kvm_host.h 46 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2012,2013 - ARM Ltd
  4. * Author: Marc Zyngier <marc.zyngier@arm.com>
  5. *
  6. * Derived from arch/arm/include/asm/kvm_host.h:
  7. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  8. * Author: Christoffer Dall <c.dall@virtualopensystems.com>
  9. */
  10. #ifndef __ARM64_KVM_HOST_H__
  11. #define __ARM64_KVM_HOST_H__
  12. #include <linux/arm-smccc.h>
  13. #include <linux/bitmap.h>
  14. #include <linux/types.h>
  15. #include <linux/jump_label.h>
  16. #include <linux/kvm_types.h>
  17. #include <linux/maple_tree.h>
  18. #include <linux/percpu.h>
  19. #include <linux/psci.h>
  20. #include <asm/arch_gicv3.h>
  21. #include <asm/barrier.h>
  22. #include <asm/cpufeature.h>
  23. #include <asm/cputype.h>
  24. #include <asm/daifflags.h>
  25. #include <asm/fpsimd.h>
  26. #include <asm/kvm.h>
  27. #include <asm/kvm_asm.h>
  28. #include <asm/vncr_mapping.h>
  29. #define __KVM_HAVE_ARCH_INTC_INITIALIZED
  30. #define KVM_HALT_POLL_NS_DEFAULT 500000
  31. #include <kvm/arm_vgic.h>
  32. #include <kvm/arm_arch_timer.h>
  33. #include <kvm/arm_pmu.h>
  34. #define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
  35. #define KVM_VCPU_MAX_FEATURES 7
  36. #define KVM_VCPU_VALID_FEATURES (BIT(KVM_VCPU_MAX_FEATURES) - 1)
  37. #define KVM_REQ_SLEEP \
  38. KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
  39. #define KVM_REQ_IRQ_PENDING KVM_ARCH_REQ(1)
  40. #define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(2)
  41. #define KVM_REQ_RECORD_STEAL KVM_ARCH_REQ(3)
  42. #define KVM_REQ_RELOAD_GICv4 KVM_ARCH_REQ(4)
  43. #define KVM_REQ_RELOAD_PMU KVM_ARCH_REQ(5)
  44. #define KVM_REQ_SUSPEND KVM_ARCH_REQ(6)
  45. #define KVM_REQ_RESYNC_PMU_EL0 KVM_ARCH_REQ(7)
  46. #define KVM_REQ_NESTED_S2_UNMAP KVM_ARCH_REQ(8)
  47. #define KVM_DIRTY_LOG_MANUAL_CAPS (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | \
  48. KVM_DIRTY_LOG_INITIALLY_SET)
  49. #define KVM_HAVE_MMU_RWLOCK
  50. /*
  51. * Mode of operation configurable with kvm-arm.mode early param.
  52. * See Documentation/admin-guide/kernel-parameters.txt for more information.
  53. */
  54. enum kvm_mode {
  55. KVM_MODE_DEFAULT,
  56. KVM_MODE_PROTECTED,
  57. KVM_MODE_NV,
  58. KVM_MODE_NONE,
  59. };
  60. #ifdef CONFIG_KVM
  61. enum kvm_mode kvm_get_mode(void);
  62. #else
  63. static inline enum kvm_mode kvm_get_mode(void) { return KVM_MODE_NONE; };
  64. #endif
  65. extern unsigned int __ro_after_init kvm_sve_max_vl;
  66. extern unsigned int __ro_after_init kvm_host_sve_max_vl;
  67. int __init kvm_arm_init_sve(void);
  68. u32 __attribute_const__ kvm_target_cpu(void);
  69. void kvm_reset_vcpu(struct kvm_vcpu *vcpu);
  70. void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu);
  71. struct kvm_hyp_memcache {
  72. phys_addr_t head;
  73. unsigned long nr_pages;
  74. };
  75. static inline void push_hyp_memcache(struct kvm_hyp_memcache *mc,
  76. phys_addr_t *p,
  77. phys_addr_t (*to_pa)(void *virt))
  78. {
  79. *p = mc->head;
  80. mc->head = to_pa(p);
  81. mc->nr_pages++;
  82. }
  83. static inline void *pop_hyp_memcache(struct kvm_hyp_memcache *mc,
  84. void *(*to_va)(phys_addr_t phys))
  85. {
  86. phys_addr_t *p = to_va(mc->head);
  87. if (!mc->nr_pages)
  88. return NULL;
  89. mc->head = *p;
  90. mc->nr_pages--;
  91. return p;
  92. }
  93. static inline int __topup_hyp_memcache(struct kvm_hyp_memcache *mc,
  94. unsigned long min_pages,
  95. void *(*alloc_fn)(void *arg),
  96. phys_addr_t (*to_pa)(void *virt),
  97. void *arg)
  98. {
  99. while (mc->nr_pages < min_pages) {
  100. phys_addr_t *p = alloc_fn(arg);
  101. if (!p)
  102. return -ENOMEM;
  103. push_hyp_memcache(mc, p, to_pa);
  104. }
  105. return 0;
  106. }
  107. static inline void __free_hyp_memcache(struct kvm_hyp_memcache *mc,
  108. void (*free_fn)(void *virt, void *arg),
  109. void *(*to_va)(phys_addr_t phys),
  110. void *arg)
  111. {
  112. while (mc->nr_pages)
  113. free_fn(pop_hyp_memcache(mc, to_va), arg);
  114. }
  115. void free_hyp_memcache(struct kvm_hyp_memcache *mc);
  116. int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages);
  117. struct kvm_vmid {
  118. atomic64_t id;
  119. };
  120. struct kvm_s2_mmu {
  121. struct kvm_vmid vmid;
  122. /*
  123. * stage2 entry level table
  124. *
  125. * Two kvm_s2_mmu structures in the same VM can point to the same
  126. * pgd here. This happens when running a guest using a
  127. * translation regime that isn't affected by its own stage-2
  128. * translation, such as a non-VHE hypervisor running at vEL2, or
  129. * for vEL1/EL0 with vHCR_EL2.VM == 0. In that case, we use the
  130. * canonical stage-2 page tables.
  131. */
  132. phys_addr_t pgd_phys;
  133. struct kvm_pgtable *pgt;
  134. /*
  135. * VTCR value used on the host. For a non-NV guest (or a NV
  136. * guest that runs in a context where its own S2 doesn't
  137. * apply), its T0SZ value reflects that of the IPA size.
  138. *
  139. * For a shadow S2 MMU, T0SZ reflects the PARange exposed to
  140. * the guest.
  141. */
  142. u64 vtcr;
  143. /* The last vcpu id that ran on each physical CPU */
  144. int __percpu *last_vcpu_ran;
  145. #define KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT 0
  146. /*
  147. * Memory cache used to split
  148. * KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE worth of huge pages. It
  149. * is used to allocate stage2 page tables while splitting huge
  150. * pages. The choice of KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE
  151. * influences both the capacity of the split page cache, and
  152. * how often KVM reschedules. Be wary of raising CHUNK_SIZE
  153. * too high.
  154. *
  155. * Protected by kvm->slots_lock.
  156. */
  157. struct kvm_mmu_memory_cache split_page_cache;
  158. uint64_t split_page_chunk_size;
  159. struct kvm_arch *arch;
  160. /*
  161. * For a shadow stage-2 MMU, the virtual vttbr used by the
  162. * host to parse the guest S2.
  163. * This either contains:
  164. * - the virtual VTTBR programmed by the guest hypervisor with
  165. * CnP cleared
  166. * - The value 1 (VMID=0, BADDR=0, CnP=1) if invalid
  167. *
  168. * We also cache the full VTCR which gets used for TLB invalidation,
  169. * taking the ARM ARM's "Any of the bits in VTCR_EL2 are permitted
  170. * to be cached in a TLB" to the letter.
  171. */
  172. u64 tlb_vttbr;
  173. u64 tlb_vtcr;
  174. /*
  175. * true when this represents a nested context where virtual
  176. * HCR_EL2.VM == 1
  177. */
  178. bool nested_stage2_enabled;
  179. /*
  180. * true when this MMU needs to be unmapped before being used for a new
  181. * purpose.
  182. */
  183. bool pending_unmap;
  184. /*
  185. * 0: Nobody is currently using this, check vttbr for validity
  186. * >0: Somebody is actively using this.
  187. */
  188. atomic_t refcnt;
  189. };
  190. struct kvm_arch_memory_slot {
  191. };
  192. /**
  193. * struct kvm_smccc_features: Descriptor of the hypercall services exposed to the guests
  194. *
  195. * @std_bmap: Bitmap of standard secure service calls
  196. * @std_hyp_bmap: Bitmap of standard hypervisor service calls
  197. * @vendor_hyp_bmap: Bitmap of vendor specific hypervisor service calls
  198. */
  199. struct kvm_smccc_features {
  200. unsigned long std_bmap;
  201. unsigned long std_hyp_bmap;
  202. unsigned long vendor_hyp_bmap;
  203. };
  204. typedef unsigned int pkvm_handle_t;
  205. struct kvm_protected_vm {
  206. pkvm_handle_t handle;
  207. struct kvm_hyp_memcache teardown_mc;
  208. bool enabled;
  209. };
  210. struct kvm_mpidr_data {
  211. u64 mpidr_mask;
  212. DECLARE_FLEX_ARRAY(u16, cmpidr_to_idx);
  213. };
  214. static inline u16 kvm_mpidr_index(struct kvm_mpidr_data *data, u64 mpidr)
  215. {
  216. unsigned long index = 0, mask = data->mpidr_mask;
  217. unsigned long aff = mpidr & MPIDR_HWID_BITMASK;
  218. bitmap_gather(&index, &aff, &mask, fls(mask));
  219. return index;
  220. }
  221. struct kvm_sysreg_masks;
  222. enum fgt_group_id {
  223. __NO_FGT_GROUP__,
  224. HFGxTR_GROUP,
  225. HDFGRTR_GROUP,
  226. HDFGWTR_GROUP = HDFGRTR_GROUP,
  227. HFGITR_GROUP,
  228. HAFGRTR_GROUP,
  229. /* Must be last */
  230. __NR_FGT_GROUP_IDS__
  231. };
  232. struct kvm_arch {
  233. struct kvm_s2_mmu mmu;
  234. /*
  235. * Fine-Grained UNDEF, mimicking the FGT layout defined by the
  236. * architecture. We track them globally, as we present the
  237. * same feature-set to all vcpus.
  238. *
  239. * Index 0 is currently spare.
  240. */
  241. u64 fgu[__NR_FGT_GROUP_IDS__];
  242. /*
  243. * Stage 2 paging state for VMs with nested S2 using a virtual
  244. * VMID.
  245. */
  246. struct kvm_s2_mmu *nested_mmus;
  247. size_t nested_mmus_size;
  248. int nested_mmus_next;
  249. /* Interrupt controller */
  250. struct vgic_dist vgic;
  251. /* Timers */
  252. struct arch_timer_vm_data timer_data;
  253. /* Mandated version of PSCI */
  254. u32 psci_version;
  255. /* Protects VM-scoped configuration data */
  256. struct mutex config_lock;
  257. /*
  258. * If we encounter a data abort without valid instruction syndrome
  259. * information, report this to user space. User space can (and
  260. * should) opt in to this feature if KVM_CAP_ARM_NISV_TO_USER is
  261. * supported.
  262. */
  263. #define KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER 0
  264. /* Memory Tagging Extension enabled for the guest */
  265. #define KVM_ARCH_FLAG_MTE_ENABLED 1
  266. /* At least one vCPU has ran in the VM */
  267. #define KVM_ARCH_FLAG_HAS_RAN_ONCE 2
  268. /* The vCPU feature set for the VM is configured */
  269. #define KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED 3
  270. /* PSCI SYSTEM_SUSPEND enabled for the guest */
  271. #define KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED 4
  272. /* VM counter offset */
  273. #define KVM_ARCH_FLAG_VM_COUNTER_OFFSET 5
  274. /* Timer PPIs made immutable */
  275. #define KVM_ARCH_FLAG_TIMER_PPIS_IMMUTABLE 6
  276. /* Initial ID reg values loaded */
  277. #define KVM_ARCH_FLAG_ID_REGS_INITIALIZED 7
  278. /* Fine-Grained UNDEF initialised */
  279. #define KVM_ARCH_FLAG_FGU_INITIALIZED 8
  280. unsigned long flags;
  281. /* VM-wide vCPU feature set */
  282. DECLARE_BITMAP(vcpu_features, KVM_VCPU_MAX_FEATURES);
  283. /* MPIDR to vcpu index mapping, optional */
  284. struct kvm_mpidr_data *mpidr_data;
  285. /*
  286. * VM-wide PMU filter, implemented as a bitmap and big enough for
  287. * up to 2^10 events (ARMv8.0) or 2^16 events (ARMv8.1+).
  288. */
  289. unsigned long *pmu_filter;
  290. struct arm_pmu *arm_pmu;
  291. cpumask_var_t supported_cpus;
  292. /* PMCR_EL0.N value for the guest */
  293. u8 pmcr_n;
  294. /* Iterator for idreg debugfs */
  295. u8 idreg_debugfs_iter;
  296. /* Hypercall features firmware registers' descriptor */
  297. struct kvm_smccc_features smccc_feat;
  298. struct maple_tree smccc_filter;
  299. /*
  300. * Emulated CPU ID registers per VM
  301. * (Op0, Op1, CRn, CRm, Op2) of the ID registers to be saved in it
  302. * is (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8.
  303. *
  304. * These emulated idregs are VM-wide, but accessed from the context of a vCPU.
  305. * Atomic access to multiple idregs are guarded by kvm_arch.config_lock.
  306. */
  307. #define IDREG_IDX(id) (((sys_reg_CRm(id) - 1) << 3) | sys_reg_Op2(id))
  308. #define KVM_ARM_ID_REG_NUM (IDREG_IDX(sys_reg(3, 0, 0, 7, 7)) + 1)
  309. u64 id_regs[KVM_ARM_ID_REG_NUM];
  310. u64 ctr_el0;
  311. /* Masks for VNCR-baked sysregs */
  312. struct kvm_sysreg_masks *sysreg_masks;
  313. /*
  314. * For an untrusted host VM, 'pkvm.handle' is used to lookup
  315. * the associated pKVM instance in the hypervisor.
  316. */
  317. struct kvm_protected_vm pkvm;
  318. };
  319. struct kvm_vcpu_fault_info {
  320. u64 esr_el2; /* Hyp Syndrom Register */
  321. u64 far_el2; /* Hyp Fault Address Register */
  322. u64 hpfar_el2; /* Hyp IPA Fault Address Register */
  323. u64 disr_el1; /* Deferred [SError] Status Register */
  324. };
  325. /*
  326. * VNCR() just places the VNCR_capable registers in the enum after
  327. * __VNCR_START__, and the value (after correction) to be an 8-byte offset
  328. * from the VNCR base. As we don't require the enum to be otherwise ordered,
  329. * we need the terrible hack below to ensure that we correctly size the
  330. * sys_regs array, no matter what.
  331. *
  332. * The __MAX__ macro has been lifted from Sean Eron Anderson's wonderful
  333. * treasure trove of bit hacks:
  334. * https://graphics.stanford.edu/~seander/bithacks.html#IntegerMinOrMax
  335. */
  336. #define __MAX__(x,y) ((x) ^ (((x) ^ (y)) & -((x) < (y))))
  337. #define VNCR(r) \
  338. __before_##r, \
  339. r = __VNCR_START__ + ((VNCR_ ## r) / 8), \
  340. __after_##r = __MAX__(__before_##r - 1, r)
  341. enum vcpu_sysreg {
  342. __INVALID_SYSREG__, /* 0 is reserved as an invalid value */
  343. MPIDR_EL1, /* MultiProcessor Affinity Register */
  344. CLIDR_EL1, /* Cache Level ID Register */
  345. CSSELR_EL1, /* Cache Size Selection Register */
  346. TPIDR_EL0, /* Thread ID, User R/W */
  347. TPIDRRO_EL0, /* Thread ID, User R/O */
  348. TPIDR_EL1, /* Thread ID, Privileged */
  349. CNTKCTL_EL1, /* Timer Control Register (EL1) */
  350. PAR_EL1, /* Physical Address Register */
  351. MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
  352. OSLSR_EL1, /* OS Lock Status Register */
  353. DISR_EL1, /* Deferred Interrupt Status Register */
  354. /* Performance Monitors Registers */
  355. PMCR_EL0, /* Control Register */
  356. PMSELR_EL0, /* Event Counter Selection Register */
  357. PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
  358. PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
  359. PMCCNTR_EL0, /* Cycle Counter Register */
  360. PMEVTYPER0_EL0, /* Event Type Register (0-30) */
  361. PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
  362. PMCCFILTR_EL0, /* Cycle Count Filter Register */
  363. PMCNTENSET_EL0, /* Count Enable Set Register */
  364. PMINTENSET_EL1, /* Interrupt Enable Set Register */
  365. PMOVSSET_EL0, /* Overflow Flag Status Set Register */
  366. PMUSERENR_EL0, /* User Enable Register */
  367. /* Pointer Authentication Registers in a strict increasing order. */
  368. APIAKEYLO_EL1,
  369. APIAKEYHI_EL1,
  370. APIBKEYLO_EL1,
  371. APIBKEYHI_EL1,
  372. APDAKEYLO_EL1,
  373. APDAKEYHI_EL1,
  374. APDBKEYLO_EL1,
  375. APDBKEYHI_EL1,
  376. APGAKEYLO_EL1,
  377. APGAKEYHI_EL1,
  378. /* Memory Tagging Extension registers */
  379. RGSR_EL1, /* Random Allocation Tag Seed Register */
  380. GCR_EL1, /* Tag Control Register */
  381. TFSRE0_EL1, /* Tag Fault Status Register (EL0) */
  382. POR_EL0, /* Permission Overlay Register 0 (EL0) */
  383. /* FP/SIMD/SVE */
  384. SVCR,
  385. FPMR,
  386. /* 32bit specific registers. */
  387. DACR32_EL2, /* Domain Access Control Register */
  388. IFSR32_EL2, /* Instruction Fault Status Register */
  389. FPEXC32_EL2, /* Floating-Point Exception Control Register */
  390. DBGVCR32_EL2, /* Debug Vector Catch Register */
  391. /* EL2 registers */
  392. SCTLR_EL2, /* System Control Register (EL2) */
  393. ACTLR_EL2, /* Auxiliary Control Register (EL2) */
  394. MDCR_EL2, /* Monitor Debug Configuration Register (EL2) */
  395. CPTR_EL2, /* Architectural Feature Trap Register (EL2) */
  396. HACR_EL2, /* Hypervisor Auxiliary Control Register */
  397. ZCR_EL2, /* SVE Control Register (EL2) */
  398. TTBR0_EL2, /* Translation Table Base Register 0 (EL2) */
  399. TTBR1_EL2, /* Translation Table Base Register 1 (EL2) */
  400. TCR_EL2, /* Translation Control Register (EL2) */
  401. SPSR_EL2, /* EL2 saved program status register */
  402. ELR_EL2, /* EL2 exception link register */
  403. AFSR0_EL2, /* Auxiliary Fault Status Register 0 (EL2) */
  404. AFSR1_EL2, /* Auxiliary Fault Status Register 1 (EL2) */
  405. ESR_EL2, /* Exception Syndrome Register (EL2) */
  406. FAR_EL2, /* Fault Address Register (EL2) */
  407. HPFAR_EL2, /* Hypervisor IPA Fault Address Register */
  408. MAIR_EL2, /* Memory Attribute Indirection Register (EL2) */
  409. AMAIR_EL2, /* Auxiliary Memory Attribute Indirection Register (EL2) */
  410. VBAR_EL2, /* Vector Base Address Register (EL2) */
  411. RVBAR_EL2, /* Reset Vector Base Address Register */
  412. CONTEXTIDR_EL2, /* Context ID Register (EL2) */
  413. CNTHCTL_EL2, /* Counter-timer Hypervisor Control register */
  414. SP_EL2, /* EL2 Stack Pointer */
  415. CNTHP_CTL_EL2,
  416. CNTHP_CVAL_EL2,
  417. CNTHV_CTL_EL2,
  418. CNTHV_CVAL_EL2,
  419. __VNCR_START__, /* Any VNCR-capable reg goes after this point */
  420. VNCR(SCTLR_EL1),/* System Control Register */
  421. VNCR(ACTLR_EL1),/* Auxiliary Control Register */
  422. VNCR(CPACR_EL1),/* Coprocessor Access Control */
  423. VNCR(ZCR_EL1), /* SVE Control */
  424. VNCR(TTBR0_EL1),/* Translation Table Base Register 0 */
  425. VNCR(TTBR1_EL1),/* Translation Table Base Register 1 */
  426. VNCR(TCR_EL1), /* Translation Control Register */
  427. VNCR(TCR2_EL1), /* Extended Translation Control Register */
  428. VNCR(ESR_EL1), /* Exception Syndrome Register */
  429. VNCR(AFSR0_EL1),/* Auxiliary Fault Status Register 0 */
  430. VNCR(AFSR1_EL1),/* Auxiliary Fault Status Register 1 */
  431. VNCR(FAR_EL1), /* Fault Address Register */
  432. VNCR(MAIR_EL1), /* Memory Attribute Indirection Register */
  433. VNCR(VBAR_EL1), /* Vector Base Address Register */
  434. VNCR(CONTEXTIDR_EL1), /* Context ID Register */
  435. VNCR(AMAIR_EL1),/* Aux Memory Attribute Indirection Register */
  436. VNCR(MDSCR_EL1),/* Monitor Debug System Control Register */
  437. VNCR(ELR_EL1),
  438. VNCR(SP_EL1),
  439. VNCR(SPSR_EL1),
  440. VNCR(TFSR_EL1), /* Tag Fault Status Register (EL1) */
  441. VNCR(VPIDR_EL2),/* Virtualization Processor ID Register */
  442. VNCR(VMPIDR_EL2),/* Virtualization Multiprocessor ID Register */
  443. VNCR(HCR_EL2), /* Hypervisor Configuration Register */
  444. VNCR(HSTR_EL2), /* Hypervisor System Trap Register */
  445. VNCR(VTTBR_EL2),/* Virtualization Translation Table Base Register */
  446. VNCR(VTCR_EL2), /* Virtualization Translation Control Register */
  447. VNCR(TPIDR_EL2),/* EL2 Software Thread ID Register */
  448. VNCR(HCRX_EL2), /* Extended Hypervisor Configuration Register */
  449. /* Permission Indirection Extension registers */
  450. VNCR(PIR_EL1), /* Permission Indirection Register 1 (EL1) */
  451. VNCR(PIRE0_EL1), /* Permission Indirection Register 0 (EL1) */
  452. VNCR(POR_EL1), /* Permission Overlay Register 1 (EL1) */
  453. VNCR(HFGRTR_EL2),
  454. VNCR(HFGWTR_EL2),
  455. VNCR(HFGITR_EL2),
  456. VNCR(HDFGRTR_EL2),
  457. VNCR(HDFGWTR_EL2),
  458. VNCR(HAFGRTR_EL2),
  459. VNCR(CNTVOFF_EL2),
  460. VNCR(CNTV_CVAL_EL0),
  461. VNCR(CNTV_CTL_EL0),
  462. VNCR(CNTP_CVAL_EL0),
  463. VNCR(CNTP_CTL_EL0),
  464. VNCR(ICH_HCR_EL2),
  465. NR_SYS_REGS /* Nothing after this line! */
  466. };
  467. struct kvm_sysreg_masks {
  468. struct {
  469. u64 res0;
  470. u64 res1;
  471. } mask[NR_SYS_REGS - __VNCR_START__];
  472. };
  473. struct kvm_cpu_context {
  474. struct user_pt_regs regs; /* sp = sp_el0 */
  475. u64 spsr_abt;
  476. u64 spsr_und;
  477. u64 spsr_irq;
  478. u64 spsr_fiq;
  479. struct user_fpsimd_state fp_regs;
  480. u64 sys_regs[NR_SYS_REGS];
  481. struct kvm_vcpu *__hyp_running_vcpu;
  482. /* This pointer has to be 4kB aligned. */
  483. u64 *vncr_array;
  484. };
  485. struct cpu_sve_state {
  486. __u64 zcr_el1;
  487. /*
  488. * Ordering is important since __sve_save_state/__sve_restore_state
  489. * relies on it.
  490. */
  491. __u32 fpsr;
  492. __u32 fpcr;
  493. /* Must be SVE_VQ_BYTES (128 bit) aligned. */
  494. __u8 sve_regs[];
  495. };
  496. /*
  497. * This structure is instantiated on a per-CPU basis, and contains
  498. * data that is:
  499. *
  500. * - tied to a single physical CPU, and
  501. * - either have a lifetime that does not extend past vcpu_put()
  502. * - or is an invariant for the lifetime of the system
  503. *
  504. * Use host_data_ptr(field) as a way to access a pointer to such a
  505. * field.
  506. */
  507. struct kvm_host_data {
  508. struct kvm_cpu_context host_ctxt;
  509. /*
  510. * All pointers in this union are hyp VA.
  511. * sve_state is only used in pKVM and if system_supports_sve().
  512. */
  513. union {
  514. struct user_fpsimd_state *fpsimd_state;
  515. struct cpu_sve_state *sve_state;
  516. };
  517. union {
  518. /* HYP VA pointer to the host storage for FPMR */
  519. u64 *fpmr_ptr;
  520. /*
  521. * Used by pKVM only, as it needs to provide storage
  522. * for the host
  523. */
  524. u64 fpmr;
  525. };
  526. /* Ownership of the FP regs */
  527. enum {
  528. FP_STATE_FREE,
  529. FP_STATE_HOST_OWNED,
  530. FP_STATE_GUEST_OWNED,
  531. } fp_owner;
  532. /*
  533. * host_debug_state contains the host registers which are
  534. * saved and restored during world switches.
  535. */
  536. struct {
  537. /* {Break,watch}point registers */
  538. struct kvm_guest_debug_arch regs;
  539. /* Statistical profiling extension */
  540. u64 pmscr_el1;
  541. /* Self-hosted trace */
  542. u64 trfcr_el1;
  543. /* Values of trap registers for the host before guest entry. */
  544. u64 mdcr_el2;
  545. } host_debug_state;
  546. };
  547. struct kvm_host_psci_config {
  548. /* PSCI version used by host. */
  549. u32 version;
  550. u32 smccc_version;
  551. /* Function IDs used by host if version is v0.1. */
  552. struct psci_0_1_function_ids function_ids_0_1;
  553. bool psci_0_1_cpu_suspend_implemented;
  554. bool psci_0_1_cpu_on_implemented;
  555. bool psci_0_1_cpu_off_implemented;
  556. bool psci_0_1_migrate_implemented;
  557. };
  558. extern struct kvm_host_psci_config kvm_nvhe_sym(kvm_host_psci_config);
  559. #define kvm_host_psci_config CHOOSE_NVHE_SYM(kvm_host_psci_config)
  560. extern s64 kvm_nvhe_sym(hyp_physvirt_offset);
  561. #define hyp_physvirt_offset CHOOSE_NVHE_SYM(hyp_physvirt_offset)
  562. extern u64 kvm_nvhe_sym(hyp_cpu_logical_map)[NR_CPUS];
  563. #define hyp_cpu_logical_map CHOOSE_NVHE_SYM(hyp_cpu_logical_map)
  564. struct vcpu_reset_state {
  565. unsigned long pc;
  566. unsigned long r0;
  567. bool be;
  568. bool reset;
  569. };
  570. struct kvm_vcpu_arch {
  571. struct kvm_cpu_context ctxt;
  572. /*
  573. * Guest floating point state
  574. *
  575. * The architecture has two main floating point extensions,
  576. * the original FPSIMD and SVE. These have overlapping
  577. * register views, with the FPSIMD V registers occupying the
  578. * low 128 bits of the SVE Z registers. When the core
  579. * floating point code saves the register state of a task it
  580. * records which view it saved in fp_type.
  581. */
  582. void *sve_state;
  583. enum fp_type fp_type;
  584. unsigned int sve_max_vl;
  585. /* Stage 2 paging state used by the hardware on next switch */
  586. struct kvm_s2_mmu *hw_mmu;
  587. /* Values of trap registers for the guest. */
  588. u64 hcr_el2;
  589. u64 hcrx_el2;
  590. u64 mdcr_el2;
  591. u64 cptr_el2;
  592. /* Exception Information */
  593. struct kvm_vcpu_fault_info fault;
  594. /* Configuration flags, set once and for all before the vcpu can run */
  595. u8 cflags;
  596. /* Input flags to the hypervisor code, potentially cleared after use */
  597. u8 iflags;
  598. /* State flags for kernel bookkeeping, unused by the hypervisor code */
  599. u8 sflags;
  600. /*
  601. * Don't run the guest (internal implementation need).
  602. *
  603. * Contrary to the flags above, this is set/cleared outside of
  604. * a vcpu context, and thus cannot be mixed with the flags
  605. * themselves (or the flag accesses need to be made atomic).
  606. */
  607. bool pause;
  608. /*
  609. * We maintain more than a single set of debug registers to support
  610. * debugging the guest from the host and to maintain separate host and
  611. * guest state during world switches. vcpu_debug_state are the debug
  612. * registers of the vcpu as the guest sees them.
  613. *
  614. * external_debug_state contains the debug values we want to debug the
  615. * guest. This is set via the KVM_SET_GUEST_DEBUG ioctl.
  616. *
  617. * debug_ptr points to the set of debug registers that should be loaded
  618. * onto the hardware when running the guest.
  619. */
  620. struct kvm_guest_debug_arch *debug_ptr;
  621. struct kvm_guest_debug_arch vcpu_debug_state;
  622. struct kvm_guest_debug_arch external_debug_state;
  623. /* VGIC state */
  624. struct vgic_cpu vgic_cpu;
  625. struct arch_timer_cpu timer_cpu;
  626. struct kvm_pmu pmu;
  627. /*
  628. * Guest registers we preserve during guest debugging.
  629. *
  630. * These shadow registers are updated by the kvm_handle_sys_reg
  631. * trap handler if the guest accesses or updates them while we
  632. * are using guest debug.
  633. */
  634. struct {
  635. u32 mdscr_el1;
  636. bool pstate_ss;
  637. } guest_debug_preserved;
  638. /* vcpu power state */
  639. struct kvm_mp_state mp_state;
  640. spinlock_t mp_state_lock;
  641. /* Cache some mmu pages needed inside spinlock regions */
  642. struct kvm_mmu_memory_cache mmu_page_cache;
  643. /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
  644. u64 vsesr_el2;
  645. /* Additional reset state */
  646. struct vcpu_reset_state reset_state;
  647. /* Guest PV state */
  648. struct {
  649. u64 last_steal;
  650. gpa_t base;
  651. } steal;
  652. /* Per-vcpu CCSIDR override or NULL */
  653. u32 *ccsidr;
  654. };
  655. /*
  656. * Each 'flag' is composed of a comma-separated triplet:
  657. *
  658. * - the flag-set it belongs to in the vcpu->arch structure
  659. * - the value for that flag
  660. * - the mask for that flag
  661. *
  662. * __vcpu_single_flag() builds such a triplet for a single-bit flag.
  663. * unpack_vcpu_flag() extract the flag value from the triplet for
  664. * direct use outside of the flag accessors.
  665. */
  666. #define __vcpu_single_flag(_set, _f) _set, (_f), (_f)
  667. #define __unpack_flag(_set, _f, _m) _f
  668. #define unpack_vcpu_flag(...) __unpack_flag(__VA_ARGS__)
  669. #define __build_check_flag(v, flagset, f, m) \
  670. do { \
  671. typeof(v->arch.flagset) *_fset; \
  672. \
  673. /* Check that the flags fit in the mask */ \
  674. BUILD_BUG_ON(HWEIGHT(m) != HWEIGHT((f) | (m))); \
  675. /* Check that the flags fit in the type */ \
  676. BUILD_BUG_ON((sizeof(*_fset) * 8) <= __fls(m)); \
  677. } while (0)
  678. #define __vcpu_get_flag(v, flagset, f, m) \
  679. ({ \
  680. __build_check_flag(v, flagset, f, m); \
  681. \
  682. READ_ONCE(v->arch.flagset) & (m); \
  683. })
  684. /*
  685. * Note that the set/clear accessors must be preempt-safe in order to
  686. * avoid nesting them with load/put which also manipulate flags...
  687. */
  688. #ifdef __KVM_NVHE_HYPERVISOR__
  689. /* the nVHE hypervisor is always non-preemptible */
  690. #define __vcpu_flags_preempt_disable()
  691. #define __vcpu_flags_preempt_enable()
  692. #else
  693. #define __vcpu_flags_preempt_disable() preempt_disable()
  694. #define __vcpu_flags_preempt_enable() preempt_enable()
  695. #endif
  696. #define __vcpu_set_flag(v, flagset, f, m) \
  697. do { \
  698. typeof(v->arch.flagset) *fset; \
  699. \
  700. __build_check_flag(v, flagset, f, m); \
  701. \
  702. fset = &v->arch.flagset; \
  703. __vcpu_flags_preempt_disable(); \
  704. if (HWEIGHT(m) > 1) \
  705. *fset &= ~(m); \
  706. *fset |= (f); \
  707. __vcpu_flags_preempt_enable(); \
  708. } while (0)
  709. #define __vcpu_clear_flag(v, flagset, f, m) \
  710. do { \
  711. typeof(v->arch.flagset) *fset; \
  712. \
  713. __build_check_flag(v, flagset, f, m); \
  714. \
  715. fset = &v->arch.flagset; \
  716. __vcpu_flags_preempt_disable(); \
  717. *fset &= ~(m); \
  718. __vcpu_flags_preempt_enable(); \
  719. } while (0)
  720. #define vcpu_get_flag(v, ...) __vcpu_get_flag((v), __VA_ARGS__)
  721. #define vcpu_set_flag(v, ...) __vcpu_set_flag((v), __VA_ARGS__)
  722. #define vcpu_clear_flag(v, ...) __vcpu_clear_flag((v), __VA_ARGS__)
  723. /* SVE exposed to guest */
  724. #define GUEST_HAS_SVE __vcpu_single_flag(cflags, BIT(0))
  725. /* SVE config completed */
  726. #define VCPU_SVE_FINALIZED __vcpu_single_flag(cflags, BIT(1))
  727. /* PTRAUTH exposed to guest */
  728. #define GUEST_HAS_PTRAUTH __vcpu_single_flag(cflags, BIT(2))
  729. /* KVM_ARM_VCPU_INIT completed */
  730. #define VCPU_INITIALIZED __vcpu_single_flag(cflags, BIT(3))
  731. /* Exception pending */
  732. #define PENDING_EXCEPTION __vcpu_single_flag(iflags, BIT(0))
  733. /*
  734. * PC increment. Overlaps with EXCEPT_MASK on purpose so that it can't
  735. * be set together with an exception...
  736. */
  737. #define INCREMENT_PC __vcpu_single_flag(iflags, BIT(1))
  738. /* Target EL/MODE (not a single flag, but let's abuse the macro) */
  739. #define EXCEPT_MASK __vcpu_single_flag(iflags, GENMASK(3, 1))
  740. /* Helpers to encode exceptions with minimum fuss */
  741. #define __EXCEPT_MASK_VAL unpack_vcpu_flag(EXCEPT_MASK)
  742. #define __EXCEPT_SHIFT __builtin_ctzl(__EXCEPT_MASK_VAL)
  743. #define __vcpu_except_flags(_f) iflags, (_f << __EXCEPT_SHIFT), __EXCEPT_MASK_VAL
  744. /*
  745. * When PENDING_EXCEPTION is set, EXCEPT_MASK can take the following
  746. * values:
  747. *
  748. * For AArch32 EL1:
  749. */
  750. #define EXCEPT_AA32_UND __vcpu_except_flags(0)
  751. #define EXCEPT_AA32_IABT __vcpu_except_flags(1)
  752. #define EXCEPT_AA32_DABT __vcpu_except_flags(2)
  753. /* For AArch64: */
  754. #define EXCEPT_AA64_EL1_SYNC __vcpu_except_flags(0)
  755. #define EXCEPT_AA64_EL1_IRQ __vcpu_except_flags(1)
  756. #define EXCEPT_AA64_EL1_FIQ __vcpu_except_flags(2)
  757. #define EXCEPT_AA64_EL1_SERR __vcpu_except_flags(3)
  758. /* For AArch64 with NV: */
  759. #define EXCEPT_AA64_EL2_SYNC __vcpu_except_flags(4)
  760. #define EXCEPT_AA64_EL2_IRQ __vcpu_except_flags(5)
  761. #define EXCEPT_AA64_EL2_FIQ __vcpu_except_flags(6)
  762. #define EXCEPT_AA64_EL2_SERR __vcpu_except_flags(7)
  763. /* Guest debug is live */
  764. #define DEBUG_DIRTY __vcpu_single_flag(iflags, BIT(4))
  765. /* Save SPE context if active */
  766. #define DEBUG_STATE_SAVE_SPE __vcpu_single_flag(iflags, BIT(5))
  767. /* Save TRBE context if active */
  768. #define DEBUG_STATE_SAVE_TRBE __vcpu_single_flag(iflags, BIT(6))
  769. /* SVE enabled for host EL0 */
  770. #define HOST_SVE_ENABLED __vcpu_single_flag(sflags, BIT(0))
  771. /* SME enabled for EL0 */
  772. #define HOST_SME_ENABLED __vcpu_single_flag(sflags, BIT(1))
  773. /* Physical CPU not in supported_cpus */
  774. #define ON_UNSUPPORTED_CPU __vcpu_single_flag(sflags, BIT(2))
  775. /* WFIT instruction trapped */
  776. #define IN_WFIT __vcpu_single_flag(sflags, BIT(3))
  777. /* vcpu system registers loaded on physical CPU */
  778. #define SYSREGS_ON_CPU __vcpu_single_flag(sflags, BIT(4))
  779. /* Software step state is Active-pending */
  780. #define DBG_SS_ACTIVE_PENDING __vcpu_single_flag(sflags, BIT(5))
  781. /* PMUSERENR for the guest EL0 is on physical CPU */
  782. #define PMUSERENR_ON_CPU __vcpu_single_flag(sflags, BIT(6))
  783. /* WFI instruction trapped */
  784. #define IN_WFI __vcpu_single_flag(sflags, BIT(7))
  785. /* Pointer to the vcpu's SVE FFR for sve_{save,load}_state() */
  786. #define vcpu_sve_pffr(vcpu) (kern_hyp_va((vcpu)->arch.sve_state) + \
  787. sve_ffr_offset((vcpu)->arch.sve_max_vl))
  788. #define vcpu_sve_max_vq(vcpu) sve_vq_from_vl((vcpu)->arch.sve_max_vl)
  789. #define vcpu_sve_zcr_elx(vcpu) \
  790. (unlikely(is_hyp_ctxt(vcpu)) ? ZCR_EL2 : ZCR_EL1)
  791. #define vcpu_sve_state_size(vcpu) ({ \
  792. size_t __size_ret; \
  793. unsigned int __vcpu_vq; \
  794. \
  795. if (WARN_ON(!sve_vl_valid((vcpu)->arch.sve_max_vl))) { \
  796. __size_ret = 0; \
  797. } else { \
  798. __vcpu_vq = vcpu_sve_max_vq(vcpu); \
  799. __size_ret = SVE_SIG_REGS_SIZE(__vcpu_vq); \
  800. } \
  801. \
  802. __size_ret; \
  803. })
  804. #define KVM_GUESTDBG_VALID_MASK (KVM_GUESTDBG_ENABLE | \
  805. KVM_GUESTDBG_USE_SW_BP | \
  806. KVM_GUESTDBG_USE_HW | \
  807. KVM_GUESTDBG_SINGLESTEP)
  808. #define vcpu_has_sve(vcpu) (system_supports_sve() && \
  809. vcpu_get_flag(vcpu, GUEST_HAS_SVE))
  810. #ifdef CONFIG_ARM64_PTR_AUTH
  811. #define vcpu_has_ptrauth(vcpu) \
  812. ((cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH) || \
  813. cpus_have_final_cap(ARM64_HAS_GENERIC_AUTH)) && \
  814. vcpu_get_flag(vcpu, GUEST_HAS_PTRAUTH))
  815. #else
  816. #define vcpu_has_ptrauth(vcpu) false
  817. #endif
  818. #define vcpu_on_unsupported_cpu(vcpu) \
  819. vcpu_get_flag(vcpu, ON_UNSUPPORTED_CPU)
  820. #define vcpu_set_on_unsupported_cpu(vcpu) \
  821. vcpu_set_flag(vcpu, ON_UNSUPPORTED_CPU)
  822. #define vcpu_clear_on_unsupported_cpu(vcpu) \
  823. vcpu_clear_flag(vcpu, ON_UNSUPPORTED_CPU)
  824. #define vcpu_gp_regs(v) (&(v)->arch.ctxt.regs)
  825. /*
  826. * Only use __vcpu_sys_reg/ctxt_sys_reg if you know you want the
  827. * memory backed version of a register, and not the one most recently
  828. * accessed by a running VCPU. For example, for userspace access or
  829. * for system registers that are never context switched, but only
  830. * emulated.
  831. *
  832. * Don't bother with VNCR-based accesses in the nVHE code, it has no
  833. * business dealing with NV.
  834. */
  835. static inline u64 *___ctxt_sys_reg(const struct kvm_cpu_context *ctxt, int r)
  836. {
  837. #if !defined (__KVM_NVHE_HYPERVISOR__)
  838. if (unlikely(cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
  839. r >= __VNCR_START__ && ctxt->vncr_array))
  840. return &ctxt->vncr_array[r - __VNCR_START__];
  841. #endif
  842. return (u64 *)&ctxt->sys_regs[r];
  843. }
  844. #define __ctxt_sys_reg(c,r) \
  845. ({ \
  846. BUILD_BUG_ON(__builtin_constant_p(r) && \
  847. (r) >= NR_SYS_REGS); \
  848. ___ctxt_sys_reg(c, r); \
  849. })
  850. #define ctxt_sys_reg(c,r) (*__ctxt_sys_reg(c,r))
  851. u64 kvm_vcpu_sanitise_vncr_reg(const struct kvm_vcpu *, enum vcpu_sysreg);
  852. #define __vcpu_sys_reg(v,r) \
  853. (*({ \
  854. const struct kvm_cpu_context *ctxt = &(v)->arch.ctxt; \
  855. u64 *__r = __ctxt_sys_reg(ctxt, (r)); \
  856. if (vcpu_has_nv((v)) && (r) >= __VNCR_START__) \
  857. *__r = kvm_vcpu_sanitise_vncr_reg((v), (r)); \
  858. __r; \
  859. }))
  860. u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg);
  861. void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg);
  862. static inline bool __vcpu_read_sys_reg_from_cpu(int reg, u64 *val)
  863. {
  864. /*
  865. * *** VHE ONLY ***
  866. *
  867. * System registers listed in the switch are not saved on every
  868. * exit from the guest but are only saved on vcpu_put.
  869. *
  870. * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
  871. * should never be listed below, because the guest cannot modify its
  872. * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
  873. * thread when emulating cross-VCPU communication.
  874. */
  875. if (!has_vhe())
  876. return false;
  877. switch (reg) {
  878. case SCTLR_EL1: *val = read_sysreg_s(SYS_SCTLR_EL12); break;
  879. case CPACR_EL1: *val = read_sysreg_s(SYS_CPACR_EL12); break;
  880. case TTBR0_EL1: *val = read_sysreg_s(SYS_TTBR0_EL12); break;
  881. case TTBR1_EL1: *val = read_sysreg_s(SYS_TTBR1_EL12); break;
  882. case TCR_EL1: *val = read_sysreg_s(SYS_TCR_EL12); break;
  883. case ESR_EL1: *val = read_sysreg_s(SYS_ESR_EL12); break;
  884. case AFSR0_EL1: *val = read_sysreg_s(SYS_AFSR0_EL12); break;
  885. case AFSR1_EL1: *val = read_sysreg_s(SYS_AFSR1_EL12); break;
  886. case FAR_EL1: *val = read_sysreg_s(SYS_FAR_EL12); break;
  887. case MAIR_EL1: *val = read_sysreg_s(SYS_MAIR_EL12); break;
  888. case VBAR_EL1: *val = read_sysreg_s(SYS_VBAR_EL12); break;
  889. case CONTEXTIDR_EL1: *val = read_sysreg_s(SYS_CONTEXTIDR_EL12);break;
  890. case TPIDR_EL0: *val = read_sysreg_s(SYS_TPIDR_EL0); break;
  891. case TPIDRRO_EL0: *val = read_sysreg_s(SYS_TPIDRRO_EL0); break;
  892. case TPIDR_EL1: *val = read_sysreg_s(SYS_TPIDR_EL1); break;
  893. case AMAIR_EL1: *val = read_sysreg_s(SYS_AMAIR_EL12); break;
  894. case CNTKCTL_EL1: *val = read_sysreg_s(SYS_CNTKCTL_EL12); break;
  895. case ELR_EL1: *val = read_sysreg_s(SYS_ELR_EL12); break;
  896. case SPSR_EL1: *val = read_sysreg_s(SYS_SPSR_EL12); break;
  897. case PAR_EL1: *val = read_sysreg_par(); break;
  898. case DACR32_EL2: *val = read_sysreg_s(SYS_DACR32_EL2); break;
  899. case IFSR32_EL2: *val = read_sysreg_s(SYS_IFSR32_EL2); break;
  900. case DBGVCR32_EL2: *val = read_sysreg_s(SYS_DBGVCR32_EL2); break;
  901. case ZCR_EL1: *val = read_sysreg_s(SYS_ZCR_EL12); break;
  902. default: return false;
  903. }
  904. return true;
  905. }
  906. static inline bool __vcpu_write_sys_reg_to_cpu(u64 val, int reg)
  907. {
  908. /*
  909. * *** VHE ONLY ***
  910. *
  911. * System registers listed in the switch are not restored on every
  912. * entry to the guest but are only restored on vcpu_load.
  913. *
  914. * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
  915. * should never be listed below, because the MPIDR should only be set
  916. * once, before running the VCPU, and never changed later.
  917. */
  918. if (!has_vhe())
  919. return false;
  920. switch (reg) {
  921. case SCTLR_EL1: write_sysreg_s(val, SYS_SCTLR_EL12); break;
  922. case CPACR_EL1: write_sysreg_s(val, SYS_CPACR_EL12); break;
  923. case TTBR0_EL1: write_sysreg_s(val, SYS_TTBR0_EL12); break;
  924. case TTBR1_EL1: write_sysreg_s(val, SYS_TTBR1_EL12); break;
  925. case TCR_EL1: write_sysreg_s(val, SYS_TCR_EL12); break;
  926. case ESR_EL1: write_sysreg_s(val, SYS_ESR_EL12); break;
  927. case AFSR0_EL1: write_sysreg_s(val, SYS_AFSR0_EL12); break;
  928. case AFSR1_EL1: write_sysreg_s(val, SYS_AFSR1_EL12); break;
  929. case FAR_EL1: write_sysreg_s(val, SYS_FAR_EL12); break;
  930. case MAIR_EL1: write_sysreg_s(val, SYS_MAIR_EL12); break;
  931. case VBAR_EL1: write_sysreg_s(val, SYS_VBAR_EL12); break;
  932. case CONTEXTIDR_EL1: write_sysreg_s(val, SYS_CONTEXTIDR_EL12);break;
  933. case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); break;
  934. case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); break;
  935. case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); break;
  936. case AMAIR_EL1: write_sysreg_s(val, SYS_AMAIR_EL12); break;
  937. case CNTKCTL_EL1: write_sysreg_s(val, SYS_CNTKCTL_EL12); break;
  938. case ELR_EL1: write_sysreg_s(val, SYS_ELR_EL12); break;
  939. case SPSR_EL1: write_sysreg_s(val, SYS_SPSR_EL12); break;
  940. case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); break;
  941. case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); break;
  942. case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); break;
  943. case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); break;
  944. case ZCR_EL1: write_sysreg_s(val, SYS_ZCR_EL12); break;
  945. default: return false;
  946. }
  947. return true;
  948. }
  949. struct kvm_vm_stat {
  950. struct kvm_vm_stat_generic generic;
  951. };
  952. struct kvm_vcpu_stat {
  953. struct kvm_vcpu_stat_generic generic;
  954. u64 hvc_exit_stat;
  955. u64 wfe_exit_stat;
  956. u64 wfi_exit_stat;
  957. u64 mmio_exit_user;
  958. u64 mmio_exit_kernel;
  959. u64 signal_exits;
  960. u64 exits;
  961. };
  962. unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
  963. int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
  964. int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
  965. int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
  966. unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu);
  967. int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices);
  968. int __kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu,
  969. struct kvm_vcpu_events *events);
  970. int __kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu,
  971. struct kvm_vcpu_events *events);
  972. void kvm_arm_halt_guest(struct kvm *kvm);
  973. void kvm_arm_resume_guest(struct kvm *kvm);
  974. #define vcpu_has_run_once(vcpu) !!rcu_access_pointer((vcpu)->pid)
  975. #ifndef __KVM_NVHE_HYPERVISOR__
  976. #define kvm_call_hyp_nvhe(f, ...) \
  977. ({ \
  978. struct arm_smccc_res res; \
  979. \
  980. arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(f), \
  981. ##__VA_ARGS__, &res); \
  982. WARN_ON(res.a0 != SMCCC_RET_SUCCESS); \
  983. \
  984. res.a1; \
  985. })
  986. /*
  987. * The couple of isb() below are there to guarantee the same behaviour
  988. * on VHE as on !VHE, where the eret to EL1 acts as a context
  989. * synchronization event.
  990. */
  991. #define kvm_call_hyp(f, ...) \
  992. do { \
  993. if (has_vhe()) { \
  994. f(__VA_ARGS__); \
  995. isb(); \
  996. } else { \
  997. kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
  998. } \
  999. } while(0)
  1000. #define kvm_call_hyp_ret(f, ...) \
  1001. ({ \
  1002. typeof(f(__VA_ARGS__)) ret; \
  1003. \
  1004. if (has_vhe()) { \
  1005. ret = f(__VA_ARGS__); \
  1006. isb(); \
  1007. } else { \
  1008. ret = kvm_call_hyp_nvhe(f, ##__VA_ARGS__); \
  1009. } \
  1010. \
  1011. ret; \
  1012. })
  1013. #else /* __KVM_NVHE_HYPERVISOR__ */
  1014. #define kvm_call_hyp(f, ...) f(__VA_ARGS__)
  1015. #define kvm_call_hyp_ret(f, ...) f(__VA_ARGS__)
  1016. #define kvm_call_hyp_nvhe(f, ...) f(__VA_ARGS__)
  1017. #endif /* __KVM_NVHE_HYPERVISOR__ */
  1018. int handle_exit(struct kvm_vcpu *vcpu, int exception_index);
  1019. void handle_exit_early(struct kvm_vcpu *vcpu, int exception_index);
  1020. int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu);
  1021. int kvm_handle_cp14_32(struct kvm_vcpu *vcpu);
  1022. int kvm_handle_cp14_64(struct kvm_vcpu *vcpu);
  1023. int kvm_handle_cp15_32(struct kvm_vcpu *vcpu);
  1024. int kvm_handle_cp15_64(struct kvm_vcpu *vcpu);
  1025. int kvm_handle_sys_reg(struct kvm_vcpu *vcpu);
  1026. int kvm_handle_cp10_id(struct kvm_vcpu *vcpu);
  1027. void kvm_sys_regs_create_debugfs(struct kvm *kvm);
  1028. void kvm_reset_sys_regs(struct kvm_vcpu *vcpu);
  1029. int __init kvm_sys_reg_table_init(void);
  1030. struct sys_reg_desc;
  1031. int __init populate_sysreg_config(const struct sys_reg_desc *sr,
  1032. unsigned int idx);
  1033. int __init populate_nv_trap_config(void);
  1034. bool lock_all_vcpus(struct kvm *kvm);
  1035. void unlock_all_vcpus(struct kvm *kvm);
  1036. void kvm_calculate_traps(struct kvm_vcpu *vcpu);
  1037. /* MMIO helpers */
  1038. void kvm_mmio_write_buf(void *buf, unsigned int len, unsigned long data);
  1039. unsigned long kvm_mmio_read_buf(const void *buf, unsigned int len);
  1040. int kvm_handle_mmio_return(struct kvm_vcpu *vcpu);
  1041. int io_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa);
  1042. /*
  1043. * Returns true if a Performance Monitoring Interrupt (PMI), a.k.a. perf event,
  1044. * arrived in guest context. For arm64, any event that arrives while a vCPU is
  1045. * loaded is considered to be "in guest".
  1046. */
  1047. static inline bool kvm_arch_pmi_in_guest(struct kvm_vcpu *vcpu)
  1048. {
  1049. return IS_ENABLED(CONFIG_GUEST_PERF_EVENTS) && !!vcpu;
  1050. }
  1051. long kvm_hypercall_pv_features(struct kvm_vcpu *vcpu);
  1052. gpa_t kvm_init_stolen_time(struct kvm_vcpu *vcpu);
  1053. void kvm_update_stolen_time(struct kvm_vcpu *vcpu);
  1054. bool kvm_arm_pvtime_supported(void);
  1055. int kvm_arm_pvtime_set_attr(struct kvm_vcpu *vcpu,
  1056. struct kvm_device_attr *attr);
  1057. int kvm_arm_pvtime_get_attr(struct kvm_vcpu *vcpu,
  1058. struct kvm_device_attr *attr);
  1059. int kvm_arm_pvtime_has_attr(struct kvm_vcpu *vcpu,
  1060. struct kvm_device_attr *attr);
  1061. extern unsigned int __ro_after_init kvm_arm_vmid_bits;
  1062. int __init kvm_arm_vmid_alloc_init(void);
  1063. void __init kvm_arm_vmid_alloc_free(void);
  1064. bool kvm_arm_vmid_update(struct kvm_vmid *kvm_vmid);
  1065. void kvm_arm_vmid_clear_active(void);
  1066. static inline void kvm_arm_pvtime_vcpu_init(struct kvm_vcpu_arch *vcpu_arch)
  1067. {
  1068. vcpu_arch->steal.base = INVALID_GPA;
  1069. }
  1070. static inline bool kvm_arm_is_pvtime_enabled(struct kvm_vcpu_arch *vcpu_arch)
  1071. {
  1072. return (vcpu_arch->steal.base != INVALID_GPA);
  1073. }
  1074. void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome);
  1075. struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr);
  1076. DECLARE_KVM_HYP_PER_CPU(struct kvm_host_data, kvm_host_data);
  1077. /*
  1078. * How we access per-CPU host data depends on the where we access it from,
  1079. * and the mode we're in:
  1080. *
  1081. * - VHE and nVHE hypervisor bits use their locally defined instance
  1082. *
  1083. * - the rest of the kernel use either the VHE or nVHE one, depending on
  1084. * the mode we're running in.
  1085. *
  1086. * Unless we're in protected mode, fully deprivileged, and the nVHE
  1087. * per-CPU stuff is exclusively accessible to the protected EL2 code.
  1088. * In this case, the EL1 code uses the *VHE* data as its private state
  1089. * (which makes sense in a way as there shouldn't be any shared state
  1090. * between the host and the hypervisor).
  1091. *
  1092. * Yes, this is all totally trivial. Shoot me now.
  1093. */
  1094. #if defined(__KVM_NVHE_HYPERVISOR__) || defined(__KVM_VHE_HYPERVISOR__)
  1095. #define host_data_ptr(f) (&this_cpu_ptr(&kvm_host_data)->f)
  1096. #else
  1097. #define host_data_ptr(f) \
  1098. (static_branch_unlikely(&kvm_protected_mode_initialized) ? \
  1099. &this_cpu_ptr(&kvm_host_data)->f : \
  1100. &this_cpu_ptr_hyp_sym(kvm_host_data)->f)
  1101. #endif
  1102. /* Check whether the FP regs are owned by the guest */
  1103. static inline bool guest_owns_fp_regs(void)
  1104. {
  1105. return *host_data_ptr(fp_owner) == FP_STATE_GUEST_OWNED;
  1106. }
  1107. /* Check whether the FP regs are owned by the host */
  1108. static inline bool host_owns_fp_regs(void)
  1109. {
  1110. return *host_data_ptr(fp_owner) == FP_STATE_HOST_OWNED;
  1111. }
  1112. static inline void kvm_init_host_cpu_context(struct kvm_cpu_context *cpu_ctxt)
  1113. {
  1114. /* The host's MPIDR is immutable, so let's set it up at boot time */
  1115. ctxt_sys_reg(cpu_ctxt, MPIDR_EL1) = read_cpuid_mpidr();
  1116. }
  1117. static inline bool kvm_system_needs_idmapped_vectors(void)
  1118. {
  1119. return cpus_have_final_cap(ARM64_SPECTRE_V3A);
  1120. }
  1121. static inline void kvm_arch_sync_events(struct kvm *kvm) {}
  1122. void kvm_arm_init_debug(void);
  1123. void kvm_arm_vcpu_init_debug(struct kvm_vcpu *vcpu);
  1124. void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
  1125. void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
  1126. void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
  1127. #define kvm_vcpu_os_lock_enabled(vcpu) \
  1128. (!!(__vcpu_sys_reg(vcpu, OSLSR_EL1) & OSLSR_EL1_OSLK))
  1129. int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
  1130. struct kvm_device_attr *attr);
  1131. int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
  1132. struct kvm_device_attr *attr);
  1133. int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
  1134. struct kvm_device_attr *attr);
  1135. int kvm_vm_ioctl_mte_copy_tags(struct kvm *kvm,
  1136. struct kvm_arm_copy_mte_tags *copy_tags);
  1137. int kvm_vm_ioctl_set_counter_offset(struct kvm *kvm,
  1138. struct kvm_arm_counter_offset *offset);
  1139. int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm,
  1140. struct reg_mask_range *range);
  1141. /* Guest/host FPSIMD coordination helpers */
  1142. int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu);
  1143. void kvm_arch_vcpu_load_fp(struct kvm_vcpu *vcpu);
  1144. void kvm_arch_vcpu_ctxflush_fp(struct kvm_vcpu *vcpu);
  1145. void kvm_arch_vcpu_ctxsync_fp(struct kvm_vcpu *vcpu);
  1146. void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu);
  1147. static inline bool kvm_pmu_counter_deferred(struct perf_event_attr *attr)
  1148. {
  1149. return (!has_vhe() && attr->exclude_host);
  1150. }
  1151. /* Flags for host debug state */
  1152. void kvm_arch_vcpu_load_debug_state_flags(struct kvm_vcpu *vcpu);
  1153. void kvm_arch_vcpu_put_debug_state_flags(struct kvm_vcpu *vcpu);
  1154. #ifdef CONFIG_KVM
  1155. void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr);
  1156. void kvm_clr_pmu_events(u64 clr);
  1157. bool kvm_set_pmuserenr(u64 val);
  1158. #else
  1159. static inline void kvm_set_pmu_events(u64 set, struct perf_event_attr *attr) {}
  1160. static inline void kvm_clr_pmu_events(u64 clr) {}
  1161. static inline bool kvm_set_pmuserenr(u64 val)
  1162. {
  1163. return false;
  1164. }
  1165. #endif
  1166. void kvm_vcpu_load_vhe(struct kvm_vcpu *vcpu);
  1167. void kvm_vcpu_put_vhe(struct kvm_vcpu *vcpu);
  1168. int __init kvm_set_ipa_limit(void);
  1169. u32 kvm_get_pa_bits(struct kvm *kvm);
  1170. #define __KVM_HAVE_ARCH_VM_ALLOC
  1171. struct kvm *kvm_arch_alloc_vm(void);
  1172. #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS
  1173. #define __KVM_HAVE_ARCH_FLUSH_REMOTE_TLBS_RANGE
  1174. #define kvm_vm_is_protected(kvm) (is_protected_kvm_enabled() && (kvm)->arch.pkvm.enabled)
  1175. #define vcpu_is_protected(vcpu) kvm_vm_is_protected((vcpu)->kvm)
  1176. int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature);
  1177. bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu);
  1178. #define kvm_arm_vcpu_sve_finalized(vcpu) vcpu_get_flag(vcpu, VCPU_SVE_FINALIZED)
  1179. #define kvm_has_mte(kvm) \
  1180. (system_supports_mte() && \
  1181. test_bit(KVM_ARCH_FLAG_MTE_ENABLED, &(kvm)->arch.flags))
  1182. #define kvm_supports_32bit_el0() \
  1183. (system_supports_32bit_el0() && \
  1184. !static_branch_unlikely(&arm64_mismatched_32bit_el0))
  1185. #define kvm_vm_has_ran_once(kvm) \
  1186. (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &(kvm)->arch.flags))
  1187. static inline bool __vcpu_has_feature(const struct kvm_arch *ka, int feature)
  1188. {
  1189. return test_bit(feature, ka->vcpu_features);
  1190. }
  1191. #define vcpu_has_feature(v, f) __vcpu_has_feature(&(v)->kvm->arch, (f))
  1192. #define kvm_vcpu_initialized(v) vcpu_get_flag(vcpu, VCPU_INITIALIZED)
  1193. int kvm_trng_call(struct kvm_vcpu *vcpu);
  1194. #ifdef CONFIG_KVM
  1195. extern phys_addr_t hyp_mem_base;
  1196. extern phys_addr_t hyp_mem_size;
  1197. void __init kvm_hyp_reserve(void);
  1198. #else
  1199. static inline void kvm_hyp_reserve(void) { }
  1200. #endif
  1201. void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu);
  1202. bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu);
  1203. static inline u64 *__vm_id_reg(struct kvm_arch *ka, u32 reg)
  1204. {
  1205. switch (reg) {
  1206. case sys_reg(3, 0, 0, 1, 0) ... sys_reg(3, 0, 0, 7, 7):
  1207. return &ka->id_regs[IDREG_IDX(reg)];
  1208. case SYS_CTR_EL0:
  1209. return &ka->ctr_el0;
  1210. default:
  1211. WARN_ON_ONCE(1);
  1212. return NULL;
  1213. }
  1214. }
  1215. #define kvm_read_vm_id_reg(kvm, reg) \
  1216. ({ u64 __val = *__vm_id_reg(&(kvm)->arch, reg); __val; })
  1217. void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val);
  1218. #define __expand_field_sign_unsigned(id, fld, val) \
  1219. ((u64)SYS_FIELD_VALUE(id, fld, val))
  1220. #define __expand_field_sign_signed(id, fld, val) \
  1221. ({ \
  1222. u64 __val = SYS_FIELD_VALUE(id, fld, val); \
  1223. sign_extend64(__val, id##_##fld##_WIDTH - 1); \
  1224. })
  1225. #define get_idreg_field_unsigned(kvm, id, fld) \
  1226. ({ \
  1227. u64 __val = kvm_read_vm_id_reg((kvm), SYS_##id); \
  1228. FIELD_GET(id##_##fld##_MASK, __val); \
  1229. })
  1230. #define get_idreg_field_signed(kvm, id, fld) \
  1231. ({ \
  1232. u64 __val = get_idreg_field_unsigned(kvm, id, fld); \
  1233. sign_extend64(__val, id##_##fld##_WIDTH - 1); \
  1234. })
  1235. #define get_idreg_field_enum(kvm, id, fld) \
  1236. get_idreg_field_unsigned(kvm, id, fld)
  1237. #define kvm_cmp_feat_signed(kvm, id, fld, op, limit) \
  1238. (get_idreg_field_signed((kvm), id, fld) op __expand_field_sign_signed(id, fld, limit))
  1239. #define kvm_cmp_feat_unsigned(kvm, id, fld, op, limit) \
  1240. (get_idreg_field_unsigned((kvm), id, fld) op __expand_field_sign_unsigned(id, fld, limit))
  1241. #define kvm_cmp_feat(kvm, id, fld, op, limit) \
  1242. (id##_##fld##_SIGNED ? \
  1243. kvm_cmp_feat_signed(kvm, id, fld, op, limit) : \
  1244. kvm_cmp_feat_unsigned(kvm, id, fld, op, limit))
  1245. #define kvm_has_feat(kvm, id, fld, limit) \
  1246. kvm_cmp_feat(kvm, id, fld, >=, limit)
  1247. #define kvm_has_feat_enum(kvm, id, fld, val) \
  1248. kvm_cmp_feat_unsigned(kvm, id, fld, ==, val)
  1249. #define kvm_has_feat_range(kvm, id, fld, min, max) \
  1250. (kvm_cmp_feat(kvm, id, fld, >=, min) && \
  1251. kvm_cmp_feat(kvm, id, fld, <=, max))
  1252. /* Check for a given level of PAuth support */
  1253. #define kvm_has_pauth(k, l) \
  1254. ({ \
  1255. bool pa, pi, pa3; \
  1256. \
  1257. pa = kvm_has_feat((k), ID_AA64ISAR1_EL1, APA, l); \
  1258. pa &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPA, IMP); \
  1259. pi = kvm_has_feat((k), ID_AA64ISAR1_EL1, API, l); \
  1260. pi &= kvm_has_feat((k), ID_AA64ISAR1_EL1, GPI, IMP); \
  1261. pa3 = kvm_has_feat((k), ID_AA64ISAR2_EL1, APA3, l); \
  1262. pa3 &= kvm_has_feat((k), ID_AA64ISAR2_EL1, GPA3, IMP); \
  1263. \
  1264. (pa + pi + pa3) == 1; \
  1265. })
  1266. #define kvm_has_fpmr(k) \
  1267. (system_supports_fpmr() && \
  1268. kvm_has_feat((k), ID_AA64PFR2_EL1, FPMR, IMP))
  1269. #endif /* __ARM64_KVM_HOST_H__ */