sys_regs.c 70 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606
  1. /*
  2. * Copyright (C) 2012,2013 - ARM Ltd
  3. * Author: Marc Zyngier <marc.zyngier@arm.com>
  4. *
  5. * Derived from arch/arm/kvm/coproc.c:
  6. * Copyright (C) 2012 - Virtual Open Systems and Columbia University
  7. * Authors: Rusty Russell <rusty@rustcorp.com.au>
  8. * Christoffer Dall <c.dall@virtualopensystems.com>
  9. *
  10. * This program is free software; you can redistribute it and/or modify
  11. * it under the terms of the GNU General Public License, version 2, as
  12. * published by the Free Software Foundation.
  13. *
  14. * This program is distributed in the hope that it will be useful,
  15. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  16. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  17. * GNU General Public License for more details.
  18. *
  19. * You should have received a copy of the GNU General Public License
  20. * along with this program. If not, see <http://www.gnu.org/licenses/>.
  21. */
  22. #include <linux/bsearch.h>
  23. #include <linux/kvm_host.h>
  24. #include <linux/mm.h>
  25. #include <linux/printk.h>
  26. #include <linux/uaccess.h>
  27. #include <asm/cacheflush.h>
  28. #include <asm/cputype.h>
  29. #include <asm/debug-monitors.h>
  30. #include <asm/esr.h>
  31. #include <asm/kvm_arm.h>
  32. #include <asm/kvm_coproc.h>
  33. #include <asm/kvm_emulate.h>
  34. #include <asm/kvm_host.h>
  35. #include <asm/kvm_hyp.h>
  36. #include <asm/kvm_mmu.h>
  37. #include <asm/perf_event.h>
  38. #include <asm/sysreg.h>
  39. #include <trace/events/kvm.h>
  40. #include "sys_regs.h"
  41. #include "trace.h"
  42. /*
  43. * All of this file is extremly similar to the ARM coproc.c, but the
  44. * types are different. My gut feeling is that it should be pretty
  45. * easy to merge, but that would be an ABI breakage -- again. VFP
  46. * would also need to be abstracted.
  47. *
  48. * For AArch32, we only take care of what is being trapped. Anything
  49. * that has to do with init and userspace access has to go via the
  50. * 64bit interface.
  51. */
  52. static bool read_from_write_only(struct kvm_vcpu *vcpu,
  53. struct sys_reg_params *params,
  54. const struct sys_reg_desc *r)
  55. {
  56. WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
  57. print_sys_reg_instr(params);
  58. kvm_inject_undefined(vcpu);
  59. return false;
  60. }
  61. static bool write_to_read_only(struct kvm_vcpu *vcpu,
  62. struct sys_reg_params *params,
  63. const struct sys_reg_desc *r)
  64. {
  65. WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
  66. print_sys_reg_instr(params);
  67. kvm_inject_undefined(vcpu);
  68. return false;
  69. }
  70. u64 vcpu_read_sys_reg(struct kvm_vcpu *vcpu, int reg)
  71. {
  72. if (!vcpu->arch.sysregs_loaded_on_cpu)
  73. goto immediate_read;
  74. /*
  75. * System registers listed in the switch are not saved on every
  76. * exit from the guest but are only saved on vcpu_put.
  77. *
  78. * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
  79. * should never be listed below, because the guest cannot modify its
  80. * own MPIDR_EL1 and MPIDR_EL1 is accessed for VCPU A from VCPU B's
  81. * thread when emulating cross-VCPU communication.
  82. */
  83. switch (reg) {
  84. case CSSELR_EL1: return read_sysreg_s(SYS_CSSELR_EL1);
  85. case SCTLR_EL1: return read_sysreg_s(sctlr_EL12);
  86. case ACTLR_EL1: return read_sysreg_s(SYS_ACTLR_EL1);
  87. case CPACR_EL1: return read_sysreg_s(cpacr_EL12);
  88. case TTBR0_EL1: return read_sysreg_s(ttbr0_EL12);
  89. case TTBR1_EL1: return read_sysreg_s(ttbr1_EL12);
  90. case TCR_EL1: return read_sysreg_s(tcr_EL12);
  91. case ESR_EL1: return read_sysreg_s(esr_EL12);
  92. case AFSR0_EL1: return read_sysreg_s(afsr0_EL12);
  93. case AFSR1_EL1: return read_sysreg_s(afsr1_EL12);
  94. case FAR_EL1: return read_sysreg_s(far_EL12);
  95. case MAIR_EL1: return read_sysreg_s(mair_EL12);
  96. case VBAR_EL1: return read_sysreg_s(vbar_EL12);
  97. case CONTEXTIDR_EL1: return read_sysreg_s(contextidr_EL12);
  98. case TPIDR_EL0: return read_sysreg_s(SYS_TPIDR_EL0);
  99. case TPIDRRO_EL0: return read_sysreg_s(SYS_TPIDRRO_EL0);
  100. case TPIDR_EL1: return read_sysreg_s(SYS_TPIDR_EL1);
  101. case AMAIR_EL1: return read_sysreg_s(amair_EL12);
  102. case CNTKCTL_EL1: return read_sysreg_s(cntkctl_EL12);
  103. case PAR_EL1: return read_sysreg_s(SYS_PAR_EL1);
  104. case DACR32_EL2: return read_sysreg_s(SYS_DACR32_EL2);
  105. case IFSR32_EL2: return read_sysreg_s(SYS_IFSR32_EL2);
  106. case DBGVCR32_EL2: return read_sysreg_s(SYS_DBGVCR32_EL2);
  107. }
  108. immediate_read:
  109. return __vcpu_sys_reg(vcpu, reg);
  110. }
  111. void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
  112. {
  113. if (!vcpu->arch.sysregs_loaded_on_cpu)
  114. goto immediate_write;
  115. /*
  116. * System registers listed in the switch are not restored on every
  117. * entry to the guest but are only restored on vcpu_load.
  118. *
  119. * Note that MPIDR_EL1 for the guest is set by KVM via VMPIDR_EL2 but
  120. * should never be listed below, because the the MPIDR should only be
  121. * set once, before running the VCPU, and never changed later.
  122. */
  123. switch (reg) {
  124. case CSSELR_EL1: write_sysreg_s(val, SYS_CSSELR_EL1); return;
  125. case SCTLR_EL1: write_sysreg_s(val, sctlr_EL12); return;
  126. case ACTLR_EL1: write_sysreg_s(val, SYS_ACTLR_EL1); return;
  127. case CPACR_EL1: write_sysreg_s(val, cpacr_EL12); return;
  128. case TTBR0_EL1: write_sysreg_s(val, ttbr0_EL12); return;
  129. case TTBR1_EL1: write_sysreg_s(val, ttbr1_EL12); return;
  130. case TCR_EL1: write_sysreg_s(val, tcr_EL12); return;
  131. case ESR_EL1: write_sysreg_s(val, esr_EL12); return;
  132. case AFSR0_EL1: write_sysreg_s(val, afsr0_EL12); return;
  133. case AFSR1_EL1: write_sysreg_s(val, afsr1_EL12); return;
  134. case FAR_EL1: write_sysreg_s(val, far_EL12); return;
  135. case MAIR_EL1: write_sysreg_s(val, mair_EL12); return;
  136. case VBAR_EL1: write_sysreg_s(val, vbar_EL12); return;
  137. case CONTEXTIDR_EL1: write_sysreg_s(val, contextidr_EL12); return;
  138. case TPIDR_EL0: write_sysreg_s(val, SYS_TPIDR_EL0); return;
  139. case TPIDRRO_EL0: write_sysreg_s(val, SYS_TPIDRRO_EL0); return;
  140. case TPIDR_EL1: write_sysreg_s(val, SYS_TPIDR_EL1); return;
  141. case AMAIR_EL1: write_sysreg_s(val, amair_EL12); return;
  142. case CNTKCTL_EL1: write_sysreg_s(val, cntkctl_EL12); return;
  143. case PAR_EL1: write_sysreg_s(val, SYS_PAR_EL1); return;
  144. case DACR32_EL2: write_sysreg_s(val, SYS_DACR32_EL2); return;
  145. case IFSR32_EL2: write_sysreg_s(val, SYS_IFSR32_EL2); return;
  146. case DBGVCR32_EL2: write_sysreg_s(val, SYS_DBGVCR32_EL2); return;
  147. }
  148. immediate_write:
  149. __vcpu_sys_reg(vcpu, reg) = val;
  150. }
  151. /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
  152. static u32 cache_levels;
  153. /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
  154. #define CSSELR_MAX 12
  155. /* Which cache CCSIDR represents depends on CSSELR value. */
  156. static u32 get_ccsidr(u32 csselr)
  157. {
  158. u32 ccsidr;
  159. /* Make sure noone else changes CSSELR during this! */
  160. local_irq_disable();
  161. write_sysreg(csselr, csselr_el1);
  162. isb();
  163. ccsidr = read_sysreg(ccsidr_el1);
  164. local_irq_enable();
  165. return ccsidr;
  166. }
  167. /*
  168. * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
  169. */
  170. static bool access_dcsw(struct kvm_vcpu *vcpu,
  171. struct sys_reg_params *p,
  172. const struct sys_reg_desc *r)
  173. {
  174. if (!p->is_write)
  175. return read_from_write_only(vcpu, p, r);
  176. /*
  177. * Only track S/W ops if we don't have FWB. It still indicates
  178. * that the guest is a bit broken (S/W operations should only
  179. * be done by firmware, knowing that there is only a single
  180. * CPU left in the system, and certainly not from non-secure
  181. * software).
  182. */
  183. if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
  184. kvm_set_way_flush(vcpu);
  185. return true;
  186. }
  187. /*
  188. * Generic accessor for VM registers. Only called as long as HCR_TVM
  189. * is set. If the guest enables the MMU, we stop trapping the VM
  190. * sys_regs and leave it in complete control of the caches.
  191. */
  192. static bool access_vm_reg(struct kvm_vcpu *vcpu,
  193. struct sys_reg_params *p,
  194. const struct sys_reg_desc *r)
  195. {
  196. bool was_enabled = vcpu_has_cache_enabled(vcpu);
  197. u64 val;
  198. int reg = r->reg;
  199. BUG_ON(!p->is_write);
  200. /* See the 32bit mapping in kvm_host.h */
  201. if (p->is_aarch32)
  202. reg = r->reg / 2;
  203. if (!p->is_aarch32 || !p->is_32bit) {
  204. val = p->regval;
  205. } else {
  206. val = vcpu_read_sys_reg(vcpu, reg);
  207. if (r->reg % 2)
  208. val = (p->regval << 32) | (u64)lower_32_bits(val);
  209. else
  210. val = ((u64)upper_32_bits(val) << 32) |
  211. lower_32_bits(p->regval);
  212. }
  213. vcpu_write_sys_reg(vcpu, val, reg);
  214. kvm_toggle_cache(vcpu, was_enabled);
  215. return true;
  216. }
  217. /*
  218. * Trap handler for the GICv3 SGI generation system register.
  219. * Forward the request to the VGIC emulation.
  220. * The cp15_64 code makes sure this automatically works
  221. * for both AArch64 and AArch32 accesses.
  222. */
  223. static bool access_gic_sgi(struct kvm_vcpu *vcpu,
  224. struct sys_reg_params *p,
  225. const struct sys_reg_desc *r)
  226. {
  227. bool g1;
  228. if (!p->is_write)
  229. return read_from_write_only(vcpu, p, r);
  230. /*
  231. * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
  232. * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
  233. * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
  234. * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
  235. * group.
  236. */
  237. if (p->is_aarch32) {
  238. switch (p->Op1) {
  239. default: /* Keep GCC quiet */
  240. case 0: /* ICC_SGI1R */
  241. g1 = true;
  242. break;
  243. case 1: /* ICC_ASGI1R */
  244. case 2: /* ICC_SGI0R */
  245. g1 = false;
  246. break;
  247. }
  248. } else {
  249. switch (p->Op2) {
  250. default: /* Keep GCC quiet */
  251. case 5: /* ICC_SGI1R_EL1 */
  252. g1 = true;
  253. break;
  254. case 6: /* ICC_ASGI1R_EL1 */
  255. case 7: /* ICC_SGI0R_EL1 */
  256. g1 = false;
  257. break;
  258. }
  259. }
  260. vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
  261. return true;
  262. }
  263. static bool access_gic_sre(struct kvm_vcpu *vcpu,
  264. struct sys_reg_params *p,
  265. const struct sys_reg_desc *r)
  266. {
  267. if (p->is_write)
  268. return ignore_write(vcpu, p);
  269. p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
  270. return true;
  271. }
  272. static bool trap_raz_wi(struct kvm_vcpu *vcpu,
  273. struct sys_reg_params *p,
  274. const struct sys_reg_desc *r)
  275. {
  276. if (p->is_write)
  277. return ignore_write(vcpu, p);
  278. else
  279. return read_zero(vcpu, p);
  280. }
  281. static bool trap_undef(struct kvm_vcpu *vcpu,
  282. struct sys_reg_params *p,
  283. const struct sys_reg_desc *r)
  284. {
  285. kvm_inject_undefined(vcpu);
  286. return false;
  287. }
  288. static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
  289. struct sys_reg_params *p,
  290. const struct sys_reg_desc *r)
  291. {
  292. if (p->is_write) {
  293. return ignore_write(vcpu, p);
  294. } else {
  295. p->regval = (1 << 3);
  296. return true;
  297. }
  298. }
  299. static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
  300. struct sys_reg_params *p,
  301. const struct sys_reg_desc *r)
  302. {
  303. if (p->is_write) {
  304. return ignore_write(vcpu, p);
  305. } else {
  306. p->regval = read_sysreg(dbgauthstatus_el1);
  307. return true;
  308. }
  309. }
  310. /*
  311. * We want to avoid world-switching all the DBG registers all the
  312. * time:
  313. *
  314. * - If we've touched any debug register, it is likely that we're
  315. * going to touch more of them. It then makes sense to disable the
  316. * traps and start doing the save/restore dance
  317. * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
  318. * then mandatory to save/restore the registers, as the guest
  319. * depends on them.
  320. *
  321. * For this, we use a DIRTY bit, indicating the guest has modified the
  322. * debug registers, used as follow:
  323. *
  324. * On guest entry:
  325. * - If the dirty bit is set (because we're coming back from trapping),
  326. * disable the traps, save host registers, restore guest registers.
  327. * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
  328. * set the dirty bit, disable the traps, save host registers,
  329. * restore guest registers.
  330. * - Otherwise, enable the traps
  331. *
  332. * On guest exit:
  333. * - If the dirty bit is set, save guest registers, restore host
  334. * registers and clear the dirty bit. This ensure that the host can
  335. * now use the debug registers.
  336. */
  337. static bool trap_debug_regs(struct kvm_vcpu *vcpu,
  338. struct sys_reg_params *p,
  339. const struct sys_reg_desc *r)
  340. {
  341. if (p->is_write) {
  342. vcpu_write_sys_reg(vcpu, p->regval, r->reg);
  343. vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
  344. } else {
  345. p->regval = vcpu_read_sys_reg(vcpu, r->reg);
  346. }
  347. trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
  348. return true;
  349. }
  350. /*
  351. * reg_to_dbg/dbg_to_reg
  352. *
  353. * A 32 bit write to a debug register leave top bits alone
  354. * A 32 bit read from a debug register only returns the bottom bits
  355. *
  356. * All writes will set the KVM_ARM64_DEBUG_DIRTY flag to ensure the
  357. * hyp.S code switches between host and guest values in future.
  358. */
  359. static void reg_to_dbg(struct kvm_vcpu *vcpu,
  360. struct sys_reg_params *p,
  361. u64 *dbg_reg)
  362. {
  363. u64 val = p->regval;
  364. if (p->is_32bit) {
  365. val &= 0xffffffffUL;
  366. val |= ((*dbg_reg >> 32) << 32);
  367. }
  368. *dbg_reg = val;
  369. vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
  370. }
  371. static void dbg_to_reg(struct kvm_vcpu *vcpu,
  372. struct sys_reg_params *p,
  373. u64 *dbg_reg)
  374. {
  375. p->regval = *dbg_reg;
  376. if (p->is_32bit)
  377. p->regval &= 0xffffffffUL;
  378. }
  379. static bool trap_bvr(struct kvm_vcpu *vcpu,
  380. struct sys_reg_params *p,
  381. const struct sys_reg_desc *rd)
  382. {
  383. u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
  384. if (p->is_write)
  385. reg_to_dbg(vcpu, p, dbg_reg);
  386. else
  387. dbg_to_reg(vcpu, p, dbg_reg);
  388. trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
  389. return true;
  390. }
  391. static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  392. const struct kvm_one_reg *reg, void __user *uaddr)
  393. {
  394. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
  395. if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
  396. return -EFAULT;
  397. return 0;
  398. }
  399. static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  400. const struct kvm_one_reg *reg, void __user *uaddr)
  401. {
  402. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
  403. if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
  404. return -EFAULT;
  405. return 0;
  406. }
  407. static void reset_bvr(struct kvm_vcpu *vcpu,
  408. const struct sys_reg_desc *rd)
  409. {
  410. vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg] = rd->val;
  411. }
  412. static bool trap_bcr(struct kvm_vcpu *vcpu,
  413. struct sys_reg_params *p,
  414. const struct sys_reg_desc *rd)
  415. {
  416. u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
  417. if (p->is_write)
  418. reg_to_dbg(vcpu, p, dbg_reg);
  419. else
  420. dbg_to_reg(vcpu, p, dbg_reg);
  421. trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
  422. return true;
  423. }
  424. static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  425. const struct kvm_one_reg *reg, void __user *uaddr)
  426. {
  427. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
  428. if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
  429. return -EFAULT;
  430. return 0;
  431. }
  432. static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  433. const struct kvm_one_reg *reg, void __user *uaddr)
  434. {
  435. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg];
  436. if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
  437. return -EFAULT;
  438. return 0;
  439. }
  440. static void reset_bcr(struct kvm_vcpu *vcpu,
  441. const struct sys_reg_desc *rd)
  442. {
  443. vcpu->arch.vcpu_debug_state.dbg_bcr[rd->reg] = rd->val;
  444. }
  445. static bool trap_wvr(struct kvm_vcpu *vcpu,
  446. struct sys_reg_params *p,
  447. const struct sys_reg_desc *rd)
  448. {
  449. u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
  450. if (p->is_write)
  451. reg_to_dbg(vcpu, p, dbg_reg);
  452. else
  453. dbg_to_reg(vcpu, p, dbg_reg);
  454. trace_trap_reg(__func__, rd->reg, p->is_write,
  455. vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg]);
  456. return true;
  457. }
  458. static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  459. const struct kvm_one_reg *reg, void __user *uaddr)
  460. {
  461. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
  462. if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
  463. return -EFAULT;
  464. return 0;
  465. }
  466. static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  467. const struct kvm_one_reg *reg, void __user *uaddr)
  468. {
  469. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg];
  470. if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
  471. return -EFAULT;
  472. return 0;
  473. }
  474. static void reset_wvr(struct kvm_vcpu *vcpu,
  475. const struct sys_reg_desc *rd)
  476. {
  477. vcpu->arch.vcpu_debug_state.dbg_wvr[rd->reg] = rd->val;
  478. }
  479. static bool trap_wcr(struct kvm_vcpu *vcpu,
  480. struct sys_reg_params *p,
  481. const struct sys_reg_desc *rd)
  482. {
  483. u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
  484. if (p->is_write)
  485. reg_to_dbg(vcpu, p, dbg_reg);
  486. else
  487. dbg_to_reg(vcpu, p, dbg_reg);
  488. trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
  489. return true;
  490. }
  491. static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  492. const struct kvm_one_reg *reg, void __user *uaddr)
  493. {
  494. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
  495. if (copy_from_user(r, uaddr, KVM_REG_SIZE(reg->id)) != 0)
  496. return -EFAULT;
  497. return 0;
  498. }
  499. static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  500. const struct kvm_one_reg *reg, void __user *uaddr)
  501. {
  502. __u64 *r = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg];
  503. if (copy_to_user(uaddr, r, KVM_REG_SIZE(reg->id)) != 0)
  504. return -EFAULT;
  505. return 0;
  506. }
  507. static void reset_wcr(struct kvm_vcpu *vcpu,
  508. const struct sys_reg_desc *rd)
  509. {
  510. vcpu->arch.vcpu_debug_state.dbg_wcr[rd->reg] = rd->val;
  511. }
  512. static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  513. {
  514. u64 amair = read_sysreg(amair_el1);
  515. vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
  516. }
  517. static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  518. {
  519. u64 mpidr;
  520. /*
  521. * Map the vcpu_id into the first three affinity level fields of
  522. * the MPIDR. We limit the number of VCPUs in level 0 due to a
  523. * limitation to 16 CPUs in that level in the ICC_SGIxR registers
  524. * of the GICv3 to be able to address each CPU directly when
  525. * sending IPIs.
  526. */
  527. mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
  528. mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
  529. mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
  530. vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
  531. }
  532. static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
  533. {
  534. u64 pmcr, val;
  535. /* No PMU available, PMCR_EL0 may UNDEF... */
  536. if (!kvm_arm_support_pmu_v3())
  537. return;
  538. pmcr = read_sysreg(pmcr_el0);
  539. /*
  540. * Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) are reset to UNKNOWN
  541. * except PMCR.E resetting to zero.
  542. */
  543. val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
  544. | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
  545. __vcpu_sys_reg(vcpu, r->reg) = val;
  546. }
  547. static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
  548. {
  549. u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
  550. bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
  551. if (!enabled)
  552. kvm_inject_undefined(vcpu);
  553. return !enabled;
  554. }
  555. static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
  556. {
  557. return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
  558. }
  559. static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
  560. {
  561. return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
  562. }
  563. static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
  564. {
  565. return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
  566. }
  567. static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
  568. {
  569. return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
  570. }
  571. static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  572. const struct sys_reg_desc *r)
  573. {
  574. u64 val;
  575. if (!kvm_arm_pmu_v3_ready(vcpu))
  576. return trap_raz_wi(vcpu, p, r);
  577. if (pmu_access_el0_disabled(vcpu))
  578. return false;
  579. if (p->is_write) {
  580. /* Only update writeable bits of PMCR */
  581. val = __vcpu_sys_reg(vcpu, PMCR_EL0);
  582. val &= ~ARMV8_PMU_PMCR_MASK;
  583. val |= p->regval & ARMV8_PMU_PMCR_MASK;
  584. __vcpu_sys_reg(vcpu, PMCR_EL0) = val;
  585. kvm_pmu_handle_pmcr(vcpu, val);
  586. } else {
  587. /* PMCR.P & PMCR.C are RAZ */
  588. val = __vcpu_sys_reg(vcpu, PMCR_EL0)
  589. & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
  590. p->regval = val;
  591. }
  592. return true;
  593. }
  594. static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  595. const struct sys_reg_desc *r)
  596. {
  597. if (!kvm_arm_pmu_v3_ready(vcpu))
  598. return trap_raz_wi(vcpu, p, r);
  599. if (pmu_access_event_counter_el0_disabled(vcpu))
  600. return false;
  601. if (p->is_write)
  602. __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
  603. else
  604. /* return PMSELR.SEL field */
  605. p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
  606. & ARMV8_PMU_COUNTER_MASK;
  607. return true;
  608. }
  609. static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  610. const struct sys_reg_desc *r)
  611. {
  612. u64 pmceid;
  613. if (!kvm_arm_pmu_v3_ready(vcpu))
  614. return trap_raz_wi(vcpu, p, r);
  615. BUG_ON(p->is_write);
  616. if (pmu_access_el0_disabled(vcpu))
  617. return false;
  618. if (!(p->Op2 & 1))
  619. pmceid = read_sysreg(pmceid0_el0);
  620. else
  621. pmceid = read_sysreg(pmceid1_el0);
  622. p->regval = pmceid;
  623. return true;
  624. }
  625. static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
  626. {
  627. u64 pmcr, val;
  628. pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
  629. val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
  630. if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
  631. kvm_inject_undefined(vcpu);
  632. return false;
  633. }
  634. return true;
  635. }
  636. static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
  637. struct sys_reg_params *p,
  638. const struct sys_reg_desc *r)
  639. {
  640. u64 idx;
  641. if (!kvm_arm_pmu_v3_ready(vcpu))
  642. return trap_raz_wi(vcpu, p, r);
  643. if (r->CRn == 9 && r->CRm == 13) {
  644. if (r->Op2 == 2) {
  645. /* PMXEVCNTR_EL0 */
  646. if (pmu_access_event_counter_el0_disabled(vcpu))
  647. return false;
  648. idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
  649. & ARMV8_PMU_COUNTER_MASK;
  650. } else if (r->Op2 == 0) {
  651. /* PMCCNTR_EL0 */
  652. if (pmu_access_cycle_counter_el0_disabled(vcpu))
  653. return false;
  654. idx = ARMV8_PMU_CYCLE_IDX;
  655. } else {
  656. return false;
  657. }
  658. } else if (r->CRn == 0 && r->CRm == 9) {
  659. /* PMCCNTR */
  660. if (pmu_access_event_counter_el0_disabled(vcpu))
  661. return false;
  662. idx = ARMV8_PMU_CYCLE_IDX;
  663. } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
  664. /* PMEVCNTRn_EL0 */
  665. if (pmu_access_event_counter_el0_disabled(vcpu))
  666. return false;
  667. idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
  668. } else {
  669. return false;
  670. }
  671. if (!pmu_counter_idx_valid(vcpu, idx))
  672. return false;
  673. if (p->is_write) {
  674. if (pmu_access_el0_disabled(vcpu))
  675. return false;
  676. kvm_pmu_set_counter_value(vcpu, idx, p->regval);
  677. } else {
  678. p->regval = kvm_pmu_get_counter_value(vcpu, idx);
  679. }
  680. return true;
  681. }
  682. static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  683. const struct sys_reg_desc *r)
  684. {
  685. u64 idx, reg;
  686. if (!kvm_arm_pmu_v3_ready(vcpu))
  687. return trap_raz_wi(vcpu, p, r);
  688. if (pmu_access_el0_disabled(vcpu))
  689. return false;
  690. if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
  691. /* PMXEVTYPER_EL0 */
  692. idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
  693. reg = PMEVTYPER0_EL0 + idx;
  694. } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
  695. idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
  696. if (idx == ARMV8_PMU_CYCLE_IDX)
  697. reg = PMCCFILTR_EL0;
  698. else
  699. /* PMEVTYPERn_EL0 */
  700. reg = PMEVTYPER0_EL0 + idx;
  701. } else {
  702. BUG();
  703. }
  704. if (!pmu_counter_idx_valid(vcpu, idx))
  705. return false;
  706. if (p->is_write) {
  707. kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
  708. __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
  709. } else {
  710. p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
  711. }
  712. return true;
  713. }
  714. static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  715. const struct sys_reg_desc *r)
  716. {
  717. u64 val, mask;
  718. if (!kvm_arm_pmu_v3_ready(vcpu))
  719. return trap_raz_wi(vcpu, p, r);
  720. if (pmu_access_el0_disabled(vcpu))
  721. return false;
  722. mask = kvm_pmu_valid_counter_mask(vcpu);
  723. if (p->is_write) {
  724. val = p->regval & mask;
  725. if (r->Op2 & 0x1) {
  726. /* accessing PMCNTENSET_EL0 */
  727. __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
  728. kvm_pmu_enable_counter(vcpu, val);
  729. } else {
  730. /* accessing PMCNTENCLR_EL0 */
  731. __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
  732. kvm_pmu_disable_counter(vcpu, val);
  733. }
  734. } else {
  735. p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
  736. }
  737. return true;
  738. }
  739. static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  740. const struct sys_reg_desc *r)
  741. {
  742. u64 mask = kvm_pmu_valid_counter_mask(vcpu);
  743. if (!kvm_arm_pmu_v3_ready(vcpu))
  744. return trap_raz_wi(vcpu, p, r);
  745. if (!vcpu_mode_priv(vcpu)) {
  746. kvm_inject_undefined(vcpu);
  747. return false;
  748. }
  749. if (p->is_write) {
  750. u64 val = p->regval & mask;
  751. if (r->Op2 & 0x1)
  752. /* accessing PMINTENSET_EL1 */
  753. __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
  754. else
  755. /* accessing PMINTENCLR_EL1 */
  756. __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
  757. } else {
  758. p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
  759. }
  760. return true;
  761. }
  762. static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  763. const struct sys_reg_desc *r)
  764. {
  765. u64 mask = kvm_pmu_valid_counter_mask(vcpu);
  766. if (!kvm_arm_pmu_v3_ready(vcpu))
  767. return trap_raz_wi(vcpu, p, r);
  768. if (pmu_access_el0_disabled(vcpu))
  769. return false;
  770. if (p->is_write) {
  771. if (r->CRm & 0x2)
  772. /* accessing PMOVSSET_EL0 */
  773. __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
  774. else
  775. /* accessing PMOVSCLR_EL0 */
  776. __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
  777. } else {
  778. p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
  779. }
  780. return true;
  781. }
  782. static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  783. const struct sys_reg_desc *r)
  784. {
  785. u64 mask;
  786. if (!kvm_arm_pmu_v3_ready(vcpu))
  787. return trap_raz_wi(vcpu, p, r);
  788. if (!p->is_write)
  789. return read_from_write_only(vcpu, p, r);
  790. if (pmu_write_swinc_el0_disabled(vcpu))
  791. return false;
  792. mask = kvm_pmu_valid_counter_mask(vcpu);
  793. kvm_pmu_software_increment(vcpu, p->regval & mask);
  794. return true;
  795. }
  796. static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
  797. const struct sys_reg_desc *r)
  798. {
  799. if (!kvm_arm_pmu_v3_ready(vcpu))
  800. return trap_raz_wi(vcpu, p, r);
  801. if (p->is_write) {
  802. if (!vcpu_mode_priv(vcpu)) {
  803. kvm_inject_undefined(vcpu);
  804. return false;
  805. }
  806. __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
  807. p->regval & ARMV8_PMU_USERENR_MASK;
  808. } else {
  809. p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
  810. & ARMV8_PMU_USERENR_MASK;
  811. }
  812. return true;
  813. }
  814. /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
  815. #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
  816. { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
  817. trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
  818. { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
  819. trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
  820. { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
  821. trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
  822. { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
  823. trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
  824. /* Macro to expand the PMEVCNTRn_EL0 register */
  825. #define PMU_PMEVCNTR_EL0(n) \
  826. { SYS_DESC(SYS_PMEVCNTRn_EL0(n)), \
  827. access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
  828. /* Macro to expand the PMEVTYPERn_EL0 register */
  829. #define PMU_PMEVTYPER_EL0(n) \
  830. { SYS_DESC(SYS_PMEVTYPERn_EL0(n)), \
  831. access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
  832. static bool access_cntp_tval(struct kvm_vcpu *vcpu,
  833. struct sys_reg_params *p,
  834. const struct sys_reg_desc *r)
  835. {
  836. u64 now = kvm_phys_timer_read();
  837. u64 cval;
  838. if (p->is_write) {
  839. kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL,
  840. p->regval + now);
  841. } else {
  842. cval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
  843. p->regval = cval - now;
  844. }
  845. return true;
  846. }
  847. static bool access_cntp_ctl(struct kvm_vcpu *vcpu,
  848. struct sys_reg_params *p,
  849. const struct sys_reg_desc *r)
  850. {
  851. if (p->is_write)
  852. kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CTL, p->regval);
  853. else
  854. p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CTL);
  855. return true;
  856. }
  857. static bool access_cntp_cval(struct kvm_vcpu *vcpu,
  858. struct sys_reg_params *p,
  859. const struct sys_reg_desc *r)
  860. {
  861. if (p->is_write)
  862. kvm_arm_timer_set_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL, p->regval);
  863. else
  864. p->regval = kvm_arm_timer_get_reg(vcpu, KVM_REG_ARM_PTIMER_CVAL);
  865. return true;
  866. }
  867. /* Read a sanitised cpufeature ID register by sys_reg_desc */
  868. static u64 read_id_reg(struct sys_reg_desc const *r, bool raz)
  869. {
  870. u32 id = sys_reg((u32)r->Op0, (u32)r->Op1,
  871. (u32)r->CRn, (u32)r->CRm, (u32)r->Op2);
  872. u64 val = raz ? 0 : read_sanitised_ftr_reg(id);
  873. if (id == SYS_ID_AA64PFR0_EL1) {
  874. if (val & (0xfUL << ID_AA64PFR0_SVE_SHIFT))
  875. kvm_debug("SVE unsupported for guests, suppressing\n");
  876. val &= ~(0xfUL << ID_AA64PFR0_SVE_SHIFT);
  877. } else if (id == SYS_ID_AA64MMFR1_EL1) {
  878. if (val & (0xfUL << ID_AA64MMFR1_LOR_SHIFT))
  879. kvm_debug("LORegions unsupported for guests, suppressing\n");
  880. val &= ~(0xfUL << ID_AA64MMFR1_LOR_SHIFT);
  881. }
  882. return val;
  883. }
  884. /* cpufeature ID register access trap handlers */
  885. static bool __access_id_reg(struct kvm_vcpu *vcpu,
  886. struct sys_reg_params *p,
  887. const struct sys_reg_desc *r,
  888. bool raz)
  889. {
  890. if (p->is_write)
  891. return write_to_read_only(vcpu, p, r);
  892. p->regval = read_id_reg(r, raz);
  893. return true;
  894. }
  895. static bool access_id_reg(struct kvm_vcpu *vcpu,
  896. struct sys_reg_params *p,
  897. const struct sys_reg_desc *r)
  898. {
  899. return __access_id_reg(vcpu, p, r, false);
  900. }
  901. static bool access_raz_id_reg(struct kvm_vcpu *vcpu,
  902. struct sys_reg_params *p,
  903. const struct sys_reg_desc *r)
  904. {
  905. return __access_id_reg(vcpu, p, r, true);
  906. }
  907. static int reg_from_user(u64 *val, const void __user *uaddr, u64 id);
  908. static int reg_to_user(void __user *uaddr, const u64 *val, u64 id);
  909. static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
  910. /*
  911. * cpufeature ID register user accessors
  912. *
  913. * For now, these registers are immutable for userspace, so no values
  914. * are stored, and for set_id_reg() we don't allow the effective value
  915. * to be changed.
  916. */
  917. static int __get_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
  918. bool raz)
  919. {
  920. const u64 id = sys_reg_to_index(rd);
  921. const u64 val = read_id_reg(rd, raz);
  922. return reg_to_user(uaddr, &val, id);
  923. }
  924. static int __set_id_reg(const struct sys_reg_desc *rd, void __user *uaddr,
  925. bool raz)
  926. {
  927. const u64 id = sys_reg_to_index(rd);
  928. int err;
  929. u64 val;
  930. err = reg_from_user(&val, uaddr, id);
  931. if (err)
  932. return err;
  933. /* This is what we mean by invariant: you can't change it. */
  934. if (val != read_id_reg(rd, raz))
  935. return -EINVAL;
  936. return 0;
  937. }
  938. static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  939. const struct kvm_one_reg *reg, void __user *uaddr)
  940. {
  941. return __get_id_reg(rd, uaddr, false);
  942. }
  943. static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  944. const struct kvm_one_reg *reg, void __user *uaddr)
  945. {
  946. return __set_id_reg(rd, uaddr, false);
  947. }
  948. static int get_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  949. const struct kvm_one_reg *reg, void __user *uaddr)
  950. {
  951. return __get_id_reg(rd, uaddr, true);
  952. }
  953. static int set_raz_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
  954. const struct kvm_one_reg *reg, void __user *uaddr)
  955. {
  956. return __set_id_reg(rd, uaddr, true);
  957. }
  958. /* sys_reg_desc initialiser for known cpufeature ID registers */
  959. #define ID_SANITISED(name) { \
  960. SYS_DESC(SYS_##name), \
  961. .access = access_id_reg, \
  962. .get_user = get_id_reg, \
  963. .set_user = set_id_reg, \
  964. }
  965. /*
  966. * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
  967. * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
  968. * (1 <= crm < 8, 0 <= Op2 < 8).
  969. */
  970. #define ID_UNALLOCATED(crm, op2) { \
  971. Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
  972. .access = access_raz_id_reg, \
  973. .get_user = get_raz_id_reg, \
  974. .set_user = set_raz_id_reg, \
  975. }
  976. /*
  977. * sys_reg_desc initialiser for known ID registers that we hide from guests.
  978. * For now, these are exposed just like unallocated ID regs: they appear
  979. * RAZ for the guest.
  980. */
  981. #define ID_HIDDEN(name) { \
  982. SYS_DESC(SYS_##name), \
  983. .access = access_raz_id_reg, \
  984. .get_user = get_raz_id_reg, \
  985. .set_user = set_raz_id_reg, \
  986. }
  987. /*
  988. * Architected system registers.
  989. * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
  990. *
  991. * Debug handling: We do trap most, if not all debug related system
  992. * registers. The implementation is good enough to ensure that a guest
  993. * can use these with minimal performance degradation. The drawback is
  994. * that we don't implement any of the external debug, none of the
  995. * OSlock protocol. This should be revisited if we ever encounter a
  996. * more demanding guest...
  997. */
  998. static const struct sys_reg_desc sys_reg_descs[] = {
  999. { SYS_DESC(SYS_DC_ISW), access_dcsw },
  1000. { SYS_DESC(SYS_DC_CSW), access_dcsw },
  1001. { SYS_DESC(SYS_DC_CISW), access_dcsw },
  1002. DBG_BCR_BVR_WCR_WVR_EL1(0),
  1003. DBG_BCR_BVR_WCR_WVR_EL1(1),
  1004. { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
  1005. { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
  1006. DBG_BCR_BVR_WCR_WVR_EL1(2),
  1007. DBG_BCR_BVR_WCR_WVR_EL1(3),
  1008. DBG_BCR_BVR_WCR_WVR_EL1(4),
  1009. DBG_BCR_BVR_WCR_WVR_EL1(5),
  1010. DBG_BCR_BVR_WCR_WVR_EL1(6),
  1011. DBG_BCR_BVR_WCR_WVR_EL1(7),
  1012. DBG_BCR_BVR_WCR_WVR_EL1(8),
  1013. DBG_BCR_BVR_WCR_WVR_EL1(9),
  1014. DBG_BCR_BVR_WCR_WVR_EL1(10),
  1015. DBG_BCR_BVR_WCR_WVR_EL1(11),
  1016. DBG_BCR_BVR_WCR_WVR_EL1(12),
  1017. DBG_BCR_BVR_WCR_WVR_EL1(13),
  1018. DBG_BCR_BVR_WCR_WVR_EL1(14),
  1019. DBG_BCR_BVR_WCR_WVR_EL1(15),
  1020. { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
  1021. { SYS_DESC(SYS_OSLAR_EL1), trap_raz_wi },
  1022. { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1 },
  1023. { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
  1024. { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
  1025. { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
  1026. { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
  1027. { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
  1028. { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
  1029. { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
  1030. // DBGDTR[TR]X_EL0 share the same encoding
  1031. { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
  1032. { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
  1033. { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
  1034. /*
  1035. * ID regs: all ID_SANITISED() entries here must have corresponding
  1036. * entries in arm64_ftr_regs[].
  1037. */
  1038. /* AArch64 mappings of the AArch32 ID registers */
  1039. /* CRm=1 */
  1040. ID_SANITISED(ID_PFR0_EL1),
  1041. ID_SANITISED(ID_PFR1_EL1),
  1042. ID_SANITISED(ID_DFR0_EL1),
  1043. ID_HIDDEN(ID_AFR0_EL1),
  1044. ID_SANITISED(ID_MMFR0_EL1),
  1045. ID_SANITISED(ID_MMFR1_EL1),
  1046. ID_SANITISED(ID_MMFR2_EL1),
  1047. ID_SANITISED(ID_MMFR3_EL1),
  1048. /* CRm=2 */
  1049. ID_SANITISED(ID_ISAR0_EL1),
  1050. ID_SANITISED(ID_ISAR1_EL1),
  1051. ID_SANITISED(ID_ISAR2_EL1),
  1052. ID_SANITISED(ID_ISAR3_EL1),
  1053. ID_SANITISED(ID_ISAR4_EL1),
  1054. ID_SANITISED(ID_ISAR5_EL1),
  1055. ID_SANITISED(ID_MMFR4_EL1),
  1056. ID_UNALLOCATED(2,7),
  1057. /* CRm=3 */
  1058. ID_SANITISED(MVFR0_EL1),
  1059. ID_SANITISED(MVFR1_EL1),
  1060. ID_SANITISED(MVFR2_EL1),
  1061. ID_UNALLOCATED(3,3),
  1062. ID_UNALLOCATED(3,4),
  1063. ID_UNALLOCATED(3,5),
  1064. ID_UNALLOCATED(3,6),
  1065. ID_UNALLOCATED(3,7),
  1066. /* AArch64 ID registers */
  1067. /* CRm=4 */
  1068. ID_SANITISED(ID_AA64PFR0_EL1),
  1069. ID_SANITISED(ID_AA64PFR1_EL1),
  1070. ID_UNALLOCATED(4,2),
  1071. ID_UNALLOCATED(4,3),
  1072. ID_UNALLOCATED(4,4),
  1073. ID_UNALLOCATED(4,5),
  1074. ID_UNALLOCATED(4,6),
  1075. ID_UNALLOCATED(4,7),
  1076. /* CRm=5 */
  1077. ID_SANITISED(ID_AA64DFR0_EL1),
  1078. ID_SANITISED(ID_AA64DFR1_EL1),
  1079. ID_UNALLOCATED(5,2),
  1080. ID_UNALLOCATED(5,3),
  1081. ID_HIDDEN(ID_AA64AFR0_EL1),
  1082. ID_HIDDEN(ID_AA64AFR1_EL1),
  1083. ID_UNALLOCATED(5,6),
  1084. ID_UNALLOCATED(5,7),
  1085. /* CRm=6 */
  1086. ID_SANITISED(ID_AA64ISAR0_EL1),
  1087. ID_SANITISED(ID_AA64ISAR1_EL1),
  1088. ID_UNALLOCATED(6,2),
  1089. ID_UNALLOCATED(6,3),
  1090. ID_UNALLOCATED(6,4),
  1091. ID_UNALLOCATED(6,5),
  1092. ID_UNALLOCATED(6,6),
  1093. ID_UNALLOCATED(6,7),
  1094. /* CRm=7 */
  1095. ID_SANITISED(ID_AA64MMFR0_EL1),
  1096. ID_SANITISED(ID_AA64MMFR1_EL1),
  1097. ID_SANITISED(ID_AA64MMFR2_EL1),
  1098. ID_UNALLOCATED(7,3),
  1099. ID_UNALLOCATED(7,4),
  1100. ID_UNALLOCATED(7,5),
  1101. ID_UNALLOCATED(7,6),
  1102. ID_UNALLOCATED(7,7),
  1103. { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
  1104. { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
  1105. { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
  1106. { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
  1107. { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
  1108. { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
  1109. { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
  1110. { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
  1111. { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
  1112. { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
  1113. { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
  1114. { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
  1115. { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
  1116. { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
  1117. { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
  1118. { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
  1119. { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
  1120. { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
  1121. { SYS_DESC(SYS_PMINTENSET_EL1), access_pminten, reset_unknown, PMINTENSET_EL1 },
  1122. { SYS_DESC(SYS_PMINTENCLR_EL1), access_pminten, NULL, PMINTENSET_EL1 },
  1123. { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
  1124. { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
  1125. { SYS_DESC(SYS_LORSA_EL1), trap_undef },
  1126. { SYS_DESC(SYS_LOREA_EL1), trap_undef },
  1127. { SYS_DESC(SYS_LORN_EL1), trap_undef },
  1128. { SYS_DESC(SYS_LORC_EL1), trap_undef },
  1129. { SYS_DESC(SYS_LORID_EL1), trap_undef },
  1130. { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
  1131. { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
  1132. { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
  1133. { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
  1134. { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
  1135. { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
  1136. { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
  1137. { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
  1138. { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
  1139. { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
  1140. { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
  1141. { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
  1142. { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
  1143. { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
  1144. { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
  1145. { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
  1146. { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
  1147. { SYS_DESC(SYS_CSSELR_EL1), NULL, reset_unknown, CSSELR_EL1 },
  1148. { SYS_DESC(SYS_PMCR_EL0), access_pmcr, reset_pmcr, PMCR_EL0 },
  1149. { SYS_DESC(SYS_PMCNTENSET_EL0), access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
  1150. { SYS_DESC(SYS_PMCNTENCLR_EL0), access_pmcnten, NULL, PMCNTENSET_EL0 },
  1151. { SYS_DESC(SYS_PMOVSCLR_EL0), access_pmovs, NULL, PMOVSSET_EL0 },
  1152. { SYS_DESC(SYS_PMSWINC_EL0), access_pmswinc, reset_unknown, PMSWINC_EL0 },
  1153. { SYS_DESC(SYS_PMSELR_EL0), access_pmselr, reset_unknown, PMSELR_EL0 },
  1154. { SYS_DESC(SYS_PMCEID0_EL0), access_pmceid },
  1155. { SYS_DESC(SYS_PMCEID1_EL0), access_pmceid },
  1156. { SYS_DESC(SYS_PMCCNTR_EL0), access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
  1157. { SYS_DESC(SYS_PMXEVTYPER_EL0), access_pmu_evtyper },
  1158. { SYS_DESC(SYS_PMXEVCNTR_EL0), access_pmu_evcntr },
  1159. /*
  1160. * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
  1161. * in 32bit mode. Here we choose to reset it as zero for consistency.
  1162. */
  1163. { SYS_DESC(SYS_PMUSERENR_EL0), access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
  1164. { SYS_DESC(SYS_PMOVSSET_EL0), access_pmovs, reset_unknown, PMOVSSET_EL0 },
  1165. { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
  1166. { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
  1167. { SYS_DESC(SYS_CNTP_TVAL_EL0), access_cntp_tval },
  1168. { SYS_DESC(SYS_CNTP_CTL_EL0), access_cntp_ctl },
  1169. { SYS_DESC(SYS_CNTP_CVAL_EL0), access_cntp_cval },
  1170. /* PMEVCNTRn_EL0 */
  1171. PMU_PMEVCNTR_EL0(0),
  1172. PMU_PMEVCNTR_EL0(1),
  1173. PMU_PMEVCNTR_EL0(2),
  1174. PMU_PMEVCNTR_EL0(3),
  1175. PMU_PMEVCNTR_EL0(4),
  1176. PMU_PMEVCNTR_EL0(5),
  1177. PMU_PMEVCNTR_EL0(6),
  1178. PMU_PMEVCNTR_EL0(7),
  1179. PMU_PMEVCNTR_EL0(8),
  1180. PMU_PMEVCNTR_EL0(9),
  1181. PMU_PMEVCNTR_EL0(10),
  1182. PMU_PMEVCNTR_EL0(11),
  1183. PMU_PMEVCNTR_EL0(12),
  1184. PMU_PMEVCNTR_EL0(13),
  1185. PMU_PMEVCNTR_EL0(14),
  1186. PMU_PMEVCNTR_EL0(15),
  1187. PMU_PMEVCNTR_EL0(16),
  1188. PMU_PMEVCNTR_EL0(17),
  1189. PMU_PMEVCNTR_EL0(18),
  1190. PMU_PMEVCNTR_EL0(19),
  1191. PMU_PMEVCNTR_EL0(20),
  1192. PMU_PMEVCNTR_EL0(21),
  1193. PMU_PMEVCNTR_EL0(22),
  1194. PMU_PMEVCNTR_EL0(23),
  1195. PMU_PMEVCNTR_EL0(24),
  1196. PMU_PMEVCNTR_EL0(25),
  1197. PMU_PMEVCNTR_EL0(26),
  1198. PMU_PMEVCNTR_EL0(27),
  1199. PMU_PMEVCNTR_EL0(28),
  1200. PMU_PMEVCNTR_EL0(29),
  1201. PMU_PMEVCNTR_EL0(30),
  1202. /* PMEVTYPERn_EL0 */
  1203. PMU_PMEVTYPER_EL0(0),
  1204. PMU_PMEVTYPER_EL0(1),
  1205. PMU_PMEVTYPER_EL0(2),
  1206. PMU_PMEVTYPER_EL0(3),
  1207. PMU_PMEVTYPER_EL0(4),
  1208. PMU_PMEVTYPER_EL0(5),
  1209. PMU_PMEVTYPER_EL0(6),
  1210. PMU_PMEVTYPER_EL0(7),
  1211. PMU_PMEVTYPER_EL0(8),
  1212. PMU_PMEVTYPER_EL0(9),
  1213. PMU_PMEVTYPER_EL0(10),
  1214. PMU_PMEVTYPER_EL0(11),
  1215. PMU_PMEVTYPER_EL0(12),
  1216. PMU_PMEVTYPER_EL0(13),
  1217. PMU_PMEVTYPER_EL0(14),
  1218. PMU_PMEVTYPER_EL0(15),
  1219. PMU_PMEVTYPER_EL0(16),
  1220. PMU_PMEVTYPER_EL0(17),
  1221. PMU_PMEVTYPER_EL0(18),
  1222. PMU_PMEVTYPER_EL0(19),
  1223. PMU_PMEVTYPER_EL0(20),
  1224. PMU_PMEVTYPER_EL0(21),
  1225. PMU_PMEVTYPER_EL0(22),
  1226. PMU_PMEVTYPER_EL0(23),
  1227. PMU_PMEVTYPER_EL0(24),
  1228. PMU_PMEVTYPER_EL0(25),
  1229. PMU_PMEVTYPER_EL0(26),
  1230. PMU_PMEVTYPER_EL0(27),
  1231. PMU_PMEVTYPER_EL0(28),
  1232. PMU_PMEVTYPER_EL0(29),
  1233. PMU_PMEVTYPER_EL0(30),
  1234. /*
  1235. * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
  1236. * in 32bit mode. Here we choose to reset it as zero for consistency.
  1237. */
  1238. { SYS_DESC(SYS_PMCCFILTR_EL0), access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
  1239. { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
  1240. { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
  1241. { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
  1242. };
  1243. static bool trap_dbgidr(struct kvm_vcpu *vcpu,
  1244. struct sys_reg_params *p,
  1245. const struct sys_reg_desc *r)
  1246. {
  1247. if (p->is_write) {
  1248. return ignore_write(vcpu, p);
  1249. } else {
  1250. u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
  1251. u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
  1252. u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL3_SHIFT);
  1253. p->regval = ((((dfr >> ID_AA64DFR0_WRPS_SHIFT) & 0xf) << 28) |
  1254. (((dfr >> ID_AA64DFR0_BRPS_SHIFT) & 0xf) << 24) |
  1255. (((dfr >> ID_AA64DFR0_CTX_CMPS_SHIFT) & 0xf) << 20)
  1256. | (6 << 16) | (el3 << 14) | (el3 << 12));
  1257. return true;
  1258. }
  1259. }
  1260. static bool trap_debug32(struct kvm_vcpu *vcpu,
  1261. struct sys_reg_params *p,
  1262. const struct sys_reg_desc *r)
  1263. {
  1264. if (p->is_write) {
  1265. vcpu_cp14(vcpu, r->reg) = p->regval;
  1266. vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
  1267. } else {
  1268. p->regval = vcpu_cp14(vcpu, r->reg);
  1269. }
  1270. return true;
  1271. }
  1272. /* AArch32 debug register mappings
  1273. *
  1274. * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
  1275. * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
  1276. *
  1277. * All control registers and watchpoint value registers are mapped to
  1278. * the lower 32 bits of their AArch64 equivalents. We share the trap
  1279. * handlers with the above AArch64 code which checks what mode the
  1280. * system is in.
  1281. */
  1282. static bool trap_xvr(struct kvm_vcpu *vcpu,
  1283. struct sys_reg_params *p,
  1284. const struct sys_reg_desc *rd)
  1285. {
  1286. u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->reg];
  1287. if (p->is_write) {
  1288. u64 val = *dbg_reg;
  1289. val &= 0xffffffffUL;
  1290. val |= p->regval << 32;
  1291. *dbg_reg = val;
  1292. vcpu->arch.flags |= KVM_ARM64_DEBUG_DIRTY;
  1293. } else {
  1294. p->regval = *dbg_reg >> 32;
  1295. }
  1296. trace_trap_reg(__func__, rd->reg, p->is_write, *dbg_reg);
  1297. return true;
  1298. }
  1299. #define DBG_BCR_BVR_WCR_WVR(n) \
  1300. /* DBGBVRn */ \
  1301. { Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
  1302. /* DBGBCRn */ \
  1303. { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
  1304. /* DBGWVRn */ \
  1305. { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
  1306. /* DBGWCRn */ \
  1307. { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
  1308. #define DBGBXVR(n) \
  1309. { Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_xvr, NULL, n }
  1310. /*
  1311. * Trapped cp14 registers. We generally ignore most of the external
  1312. * debug, on the principle that they don't really make sense to a
  1313. * guest. Revisit this one day, would this principle change.
  1314. */
  1315. static const struct sys_reg_desc cp14_regs[] = {
  1316. /* DBGIDR */
  1317. { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgidr },
  1318. /* DBGDTRRXext */
  1319. { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
  1320. DBG_BCR_BVR_WCR_WVR(0),
  1321. /* DBGDSCRint */
  1322. { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
  1323. DBG_BCR_BVR_WCR_WVR(1),
  1324. /* DBGDCCINT */
  1325. { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug32, NULL, cp14_DBGDCCINT },
  1326. /* DBGDSCRext */
  1327. { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug32, NULL, cp14_DBGDSCRext },
  1328. DBG_BCR_BVR_WCR_WVR(2),
  1329. /* DBGDTR[RT]Xint */
  1330. { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
  1331. /* DBGDTR[RT]Xext */
  1332. { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
  1333. DBG_BCR_BVR_WCR_WVR(3),
  1334. DBG_BCR_BVR_WCR_WVR(4),
  1335. DBG_BCR_BVR_WCR_WVR(5),
  1336. /* DBGWFAR */
  1337. { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
  1338. /* DBGOSECCR */
  1339. { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
  1340. DBG_BCR_BVR_WCR_WVR(6),
  1341. /* DBGVCR */
  1342. { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug32, NULL, cp14_DBGVCR },
  1343. DBG_BCR_BVR_WCR_WVR(7),
  1344. DBG_BCR_BVR_WCR_WVR(8),
  1345. DBG_BCR_BVR_WCR_WVR(9),
  1346. DBG_BCR_BVR_WCR_WVR(10),
  1347. DBG_BCR_BVR_WCR_WVR(11),
  1348. DBG_BCR_BVR_WCR_WVR(12),
  1349. DBG_BCR_BVR_WCR_WVR(13),
  1350. DBG_BCR_BVR_WCR_WVR(14),
  1351. DBG_BCR_BVR_WCR_WVR(15),
  1352. /* DBGDRAR (32bit) */
  1353. { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
  1354. DBGBXVR(0),
  1355. /* DBGOSLAR */
  1356. { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_raz_wi },
  1357. DBGBXVR(1),
  1358. /* DBGOSLSR */
  1359. { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1 },
  1360. DBGBXVR(2),
  1361. DBGBXVR(3),
  1362. /* DBGOSDLR */
  1363. { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
  1364. DBGBXVR(4),
  1365. /* DBGPRCR */
  1366. { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
  1367. DBGBXVR(5),
  1368. DBGBXVR(6),
  1369. DBGBXVR(7),
  1370. DBGBXVR(8),
  1371. DBGBXVR(9),
  1372. DBGBXVR(10),
  1373. DBGBXVR(11),
  1374. DBGBXVR(12),
  1375. DBGBXVR(13),
  1376. DBGBXVR(14),
  1377. DBGBXVR(15),
  1378. /* DBGDSAR (32bit) */
  1379. { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
  1380. /* DBGDEVID2 */
  1381. { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
  1382. /* DBGDEVID1 */
  1383. { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
  1384. /* DBGDEVID */
  1385. { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
  1386. /* DBGCLAIMSET */
  1387. { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
  1388. /* DBGCLAIMCLR */
  1389. { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
  1390. /* DBGAUTHSTATUS */
  1391. { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
  1392. };
  1393. /* Trapped cp14 64bit registers */
  1394. static const struct sys_reg_desc cp14_64_regs[] = {
  1395. /* DBGDRAR (64bit) */
  1396. { Op1( 0), CRm( 1), .access = trap_raz_wi },
  1397. /* DBGDSAR (64bit) */
  1398. { Op1( 0), CRm( 2), .access = trap_raz_wi },
  1399. };
  1400. /* Macro to expand the PMEVCNTRn register */
  1401. #define PMU_PMEVCNTR(n) \
  1402. /* PMEVCNTRn */ \
  1403. { Op1(0), CRn(0b1110), \
  1404. CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
  1405. access_pmu_evcntr }
  1406. /* Macro to expand the PMEVTYPERn register */
  1407. #define PMU_PMEVTYPER(n) \
  1408. /* PMEVTYPERn */ \
  1409. { Op1(0), CRn(0b1110), \
  1410. CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
  1411. access_pmu_evtyper }
  1412. /*
  1413. * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
  1414. * depending on the way they are accessed (as a 32bit or a 64bit
  1415. * register).
  1416. */
  1417. static const struct sys_reg_desc cp15_regs[] = {
  1418. { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, c1_SCTLR },
  1419. { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
  1420. { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 },
  1421. { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR },
  1422. { Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, c2_TTBCR2 },
  1423. { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR },
  1424. { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR },
  1425. { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR },
  1426. { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR },
  1427. { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR },
  1428. { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR },
  1429. { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR },
  1430. /*
  1431. * DC{C,I,CI}SW operations:
  1432. */
  1433. { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
  1434. { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
  1435. { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
  1436. /* PMU */
  1437. { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
  1438. { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
  1439. { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
  1440. { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
  1441. { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
  1442. { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
  1443. { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
  1444. { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
  1445. { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
  1446. { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
  1447. { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
  1448. { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
  1449. { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
  1450. { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
  1451. { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
  1452. { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
  1453. { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
  1454. { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 },
  1455. { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 },
  1456. /* ICC_SRE */
  1457. { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
  1458. { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
  1459. /* CNTP_TVAL */
  1460. { Op1( 0), CRn(14), CRm( 2), Op2( 0), access_cntp_tval },
  1461. /* CNTP_CTL */
  1462. { Op1( 0), CRn(14), CRm( 2), Op2( 1), access_cntp_ctl },
  1463. /* PMEVCNTRn */
  1464. PMU_PMEVCNTR(0),
  1465. PMU_PMEVCNTR(1),
  1466. PMU_PMEVCNTR(2),
  1467. PMU_PMEVCNTR(3),
  1468. PMU_PMEVCNTR(4),
  1469. PMU_PMEVCNTR(5),
  1470. PMU_PMEVCNTR(6),
  1471. PMU_PMEVCNTR(7),
  1472. PMU_PMEVCNTR(8),
  1473. PMU_PMEVCNTR(9),
  1474. PMU_PMEVCNTR(10),
  1475. PMU_PMEVCNTR(11),
  1476. PMU_PMEVCNTR(12),
  1477. PMU_PMEVCNTR(13),
  1478. PMU_PMEVCNTR(14),
  1479. PMU_PMEVCNTR(15),
  1480. PMU_PMEVCNTR(16),
  1481. PMU_PMEVCNTR(17),
  1482. PMU_PMEVCNTR(18),
  1483. PMU_PMEVCNTR(19),
  1484. PMU_PMEVCNTR(20),
  1485. PMU_PMEVCNTR(21),
  1486. PMU_PMEVCNTR(22),
  1487. PMU_PMEVCNTR(23),
  1488. PMU_PMEVCNTR(24),
  1489. PMU_PMEVCNTR(25),
  1490. PMU_PMEVCNTR(26),
  1491. PMU_PMEVCNTR(27),
  1492. PMU_PMEVCNTR(28),
  1493. PMU_PMEVCNTR(29),
  1494. PMU_PMEVCNTR(30),
  1495. /* PMEVTYPERn */
  1496. PMU_PMEVTYPER(0),
  1497. PMU_PMEVTYPER(1),
  1498. PMU_PMEVTYPER(2),
  1499. PMU_PMEVTYPER(3),
  1500. PMU_PMEVTYPER(4),
  1501. PMU_PMEVTYPER(5),
  1502. PMU_PMEVTYPER(6),
  1503. PMU_PMEVTYPER(7),
  1504. PMU_PMEVTYPER(8),
  1505. PMU_PMEVTYPER(9),
  1506. PMU_PMEVTYPER(10),
  1507. PMU_PMEVTYPER(11),
  1508. PMU_PMEVTYPER(12),
  1509. PMU_PMEVTYPER(13),
  1510. PMU_PMEVTYPER(14),
  1511. PMU_PMEVTYPER(15),
  1512. PMU_PMEVTYPER(16),
  1513. PMU_PMEVTYPER(17),
  1514. PMU_PMEVTYPER(18),
  1515. PMU_PMEVTYPER(19),
  1516. PMU_PMEVTYPER(20),
  1517. PMU_PMEVTYPER(21),
  1518. PMU_PMEVTYPER(22),
  1519. PMU_PMEVTYPER(23),
  1520. PMU_PMEVTYPER(24),
  1521. PMU_PMEVTYPER(25),
  1522. PMU_PMEVTYPER(26),
  1523. PMU_PMEVTYPER(27),
  1524. PMU_PMEVTYPER(28),
  1525. PMU_PMEVTYPER(29),
  1526. PMU_PMEVTYPER(30),
  1527. /* PMCCFILTR */
  1528. { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
  1529. };
  1530. static const struct sys_reg_desc cp15_64_regs[] = {
  1531. { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
  1532. { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
  1533. { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
  1534. { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
  1535. { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
  1536. { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
  1537. { Op1( 2), CRn( 0), CRm(14), Op2( 0), access_cntp_cval },
  1538. };
  1539. /* Target specific emulation tables */
  1540. static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS];
  1541. void kvm_register_target_sys_reg_table(unsigned int target,
  1542. struct kvm_sys_reg_target_table *table)
  1543. {
  1544. target_tables[target] = table;
  1545. }
  1546. /* Get specific register table for this target. */
  1547. static const struct sys_reg_desc *get_target_table(unsigned target,
  1548. bool mode_is_64,
  1549. size_t *num)
  1550. {
  1551. struct kvm_sys_reg_target_table *table;
  1552. table = target_tables[target];
  1553. if (mode_is_64) {
  1554. *num = table->table64.num;
  1555. return table->table64.table;
  1556. } else {
  1557. *num = table->table32.num;
  1558. return table->table32.table;
  1559. }
  1560. }
  1561. #define reg_to_match_value(x) \
  1562. ({ \
  1563. unsigned long val; \
  1564. val = (x)->Op0 << 14; \
  1565. val |= (x)->Op1 << 11; \
  1566. val |= (x)->CRn << 7; \
  1567. val |= (x)->CRm << 3; \
  1568. val |= (x)->Op2; \
  1569. val; \
  1570. })
  1571. static int match_sys_reg(const void *key, const void *elt)
  1572. {
  1573. const unsigned long pval = (unsigned long)key;
  1574. const struct sys_reg_desc *r = elt;
  1575. return pval - reg_to_match_value(r);
  1576. }
  1577. static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
  1578. const struct sys_reg_desc table[],
  1579. unsigned int num)
  1580. {
  1581. unsigned long pval = reg_to_match_value(params);
  1582. return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
  1583. }
  1584. int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1585. {
  1586. kvm_inject_undefined(vcpu);
  1587. return 1;
  1588. }
  1589. static void perform_access(struct kvm_vcpu *vcpu,
  1590. struct sys_reg_params *params,
  1591. const struct sys_reg_desc *r)
  1592. {
  1593. /*
  1594. * Not having an accessor means that we have configured a trap
  1595. * that we don't know how to handle. This certainly qualifies
  1596. * as a gross bug that should be fixed right away.
  1597. */
  1598. BUG_ON(!r->access);
  1599. /* Skip instruction if instructed so */
  1600. if (likely(r->access(vcpu, params, r)))
  1601. kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
  1602. }
  1603. /*
  1604. * emulate_cp -- tries to match a sys_reg access in a handling table, and
  1605. * call the corresponding trap handler.
  1606. *
  1607. * @params: pointer to the descriptor of the access
  1608. * @table: array of trap descriptors
  1609. * @num: size of the trap descriptor array
  1610. *
  1611. * Return 0 if the access has been handled, and -1 if not.
  1612. */
  1613. static int emulate_cp(struct kvm_vcpu *vcpu,
  1614. struct sys_reg_params *params,
  1615. const struct sys_reg_desc *table,
  1616. size_t num)
  1617. {
  1618. const struct sys_reg_desc *r;
  1619. if (!table)
  1620. return -1; /* Not handled */
  1621. r = find_reg(params, table, num);
  1622. if (r) {
  1623. perform_access(vcpu, params, r);
  1624. return 0;
  1625. }
  1626. /* Not handled */
  1627. return -1;
  1628. }
  1629. static void unhandled_cp_access(struct kvm_vcpu *vcpu,
  1630. struct sys_reg_params *params)
  1631. {
  1632. u8 hsr_ec = kvm_vcpu_trap_get_class(vcpu);
  1633. int cp = -1;
  1634. switch(hsr_ec) {
  1635. case ESR_ELx_EC_CP15_32:
  1636. case ESR_ELx_EC_CP15_64:
  1637. cp = 15;
  1638. break;
  1639. case ESR_ELx_EC_CP14_MR:
  1640. case ESR_ELx_EC_CP14_64:
  1641. cp = 14;
  1642. break;
  1643. default:
  1644. WARN_ON(1);
  1645. }
  1646. kvm_err("Unsupported guest CP%d access at: %08lx\n",
  1647. cp, *vcpu_pc(vcpu));
  1648. print_sys_reg_instr(params);
  1649. kvm_inject_undefined(vcpu);
  1650. }
  1651. /**
  1652. * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
  1653. * @vcpu: The VCPU pointer
  1654. * @run: The kvm_run struct
  1655. */
  1656. static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
  1657. const struct sys_reg_desc *global,
  1658. size_t nr_global,
  1659. const struct sys_reg_desc *target_specific,
  1660. size_t nr_specific)
  1661. {
  1662. struct sys_reg_params params;
  1663. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  1664. int Rt = kvm_vcpu_sys_get_rt(vcpu);
  1665. int Rt2 = (hsr >> 10) & 0x1f;
  1666. params.is_aarch32 = true;
  1667. params.is_32bit = false;
  1668. params.CRm = (hsr >> 1) & 0xf;
  1669. params.is_write = ((hsr & 1) == 0);
  1670. params.Op0 = 0;
  1671. params.Op1 = (hsr >> 16) & 0xf;
  1672. params.Op2 = 0;
  1673. params.CRn = 0;
  1674. /*
  1675. * Make a 64-bit value out of Rt and Rt2. As we use the same trap
  1676. * backends between AArch32 and AArch64, we get away with it.
  1677. */
  1678. if (params.is_write) {
  1679. params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
  1680. params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
  1681. }
  1682. /*
  1683. * Try to emulate the coprocessor access using the target
  1684. * specific table first, and using the global table afterwards.
  1685. * If either of the tables contains a handler, handle the
  1686. * potential register operation in the case of a read and return
  1687. * with success.
  1688. */
  1689. if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
  1690. !emulate_cp(vcpu, &params, global, nr_global)) {
  1691. /* Split up the value between registers for the read side */
  1692. if (!params.is_write) {
  1693. vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
  1694. vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
  1695. }
  1696. return 1;
  1697. }
  1698. unhandled_cp_access(vcpu, &params);
  1699. return 1;
  1700. }
  1701. /**
  1702. * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
  1703. * @vcpu: The VCPU pointer
  1704. * @run: The kvm_run struct
  1705. */
  1706. static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
  1707. const struct sys_reg_desc *global,
  1708. size_t nr_global,
  1709. const struct sys_reg_desc *target_specific,
  1710. size_t nr_specific)
  1711. {
  1712. struct sys_reg_params params;
  1713. u32 hsr = kvm_vcpu_get_hsr(vcpu);
  1714. int Rt = kvm_vcpu_sys_get_rt(vcpu);
  1715. params.is_aarch32 = true;
  1716. params.is_32bit = true;
  1717. params.CRm = (hsr >> 1) & 0xf;
  1718. params.regval = vcpu_get_reg(vcpu, Rt);
  1719. params.is_write = ((hsr & 1) == 0);
  1720. params.CRn = (hsr >> 10) & 0xf;
  1721. params.Op0 = 0;
  1722. params.Op1 = (hsr >> 14) & 0x7;
  1723. params.Op2 = (hsr >> 17) & 0x7;
  1724. if (!emulate_cp(vcpu, &params, target_specific, nr_specific) ||
  1725. !emulate_cp(vcpu, &params, global, nr_global)) {
  1726. if (!params.is_write)
  1727. vcpu_set_reg(vcpu, Rt, params.regval);
  1728. return 1;
  1729. }
  1730. unhandled_cp_access(vcpu, &params);
  1731. return 1;
  1732. }
  1733. int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1734. {
  1735. const struct sys_reg_desc *target_specific;
  1736. size_t num;
  1737. target_specific = get_target_table(vcpu->arch.target, false, &num);
  1738. return kvm_handle_cp_64(vcpu,
  1739. cp15_64_regs, ARRAY_SIZE(cp15_64_regs),
  1740. target_specific, num);
  1741. }
  1742. int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1743. {
  1744. const struct sys_reg_desc *target_specific;
  1745. size_t num;
  1746. target_specific = get_target_table(vcpu->arch.target, false, &num);
  1747. return kvm_handle_cp_32(vcpu,
  1748. cp15_regs, ARRAY_SIZE(cp15_regs),
  1749. target_specific, num);
  1750. }
  1751. int kvm_handle_cp14_64(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1752. {
  1753. return kvm_handle_cp_64(vcpu,
  1754. cp14_64_regs, ARRAY_SIZE(cp14_64_regs),
  1755. NULL, 0);
  1756. }
  1757. int kvm_handle_cp14_32(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1758. {
  1759. return kvm_handle_cp_32(vcpu,
  1760. cp14_regs, ARRAY_SIZE(cp14_regs),
  1761. NULL, 0);
  1762. }
  1763. static int emulate_sys_reg(struct kvm_vcpu *vcpu,
  1764. struct sys_reg_params *params)
  1765. {
  1766. size_t num;
  1767. const struct sys_reg_desc *table, *r;
  1768. table = get_target_table(vcpu->arch.target, true, &num);
  1769. /* Search target-specific then generic table. */
  1770. r = find_reg(params, table, num);
  1771. if (!r)
  1772. r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  1773. if (likely(r)) {
  1774. perform_access(vcpu, params, r);
  1775. } else {
  1776. kvm_err("Unsupported guest sys_reg access at: %lx\n",
  1777. *vcpu_pc(vcpu));
  1778. print_sys_reg_instr(params);
  1779. kvm_inject_undefined(vcpu);
  1780. }
  1781. return 1;
  1782. }
  1783. static void reset_sys_reg_descs(struct kvm_vcpu *vcpu,
  1784. const struct sys_reg_desc *table, size_t num,
  1785. unsigned long *bmap)
  1786. {
  1787. unsigned long i;
  1788. for (i = 0; i < num; i++)
  1789. if (table[i].reset) {
  1790. int reg = table[i].reg;
  1791. table[i].reset(vcpu, &table[i]);
  1792. if (reg > 0 && reg < NR_SYS_REGS)
  1793. set_bit(reg, bmap);
  1794. }
  1795. }
  1796. /**
  1797. * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
  1798. * @vcpu: The VCPU pointer
  1799. * @run: The kvm_run struct
  1800. */
  1801. int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run)
  1802. {
  1803. struct sys_reg_params params;
  1804. unsigned long esr = kvm_vcpu_get_hsr(vcpu);
  1805. int Rt = kvm_vcpu_sys_get_rt(vcpu);
  1806. int ret;
  1807. trace_kvm_handle_sys_reg(esr);
  1808. params.is_aarch32 = false;
  1809. params.is_32bit = false;
  1810. params.Op0 = (esr >> 20) & 3;
  1811. params.Op1 = (esr >> 14) & 0x7;
  1812. params.CRn = (esr >> 10) & 0xf;
  1813. params.CRm = (esr >> 1) & 0xf;
  1814. params.Op2 = (esr >> 17) & 0x7;
  1815. params.regval = vcpu_get_reg(vcpu, Rt);
  1816. params.is_write = !(esr & 1);
  1817. ret = emulate_sys_reg(vcpu, &params);
  1818. if (!params.is_write)
  1819. vcpu_set_reg(vcpu, Rt, params.regval);
  1820. return ret;
  1821. }
  1822. /******************************************************************************
  1823. * Userspace API
  1824. *****************************************************************************/
  1825. static bool index_to_params(u64 id, struct sys_reg_params *params)
  1826. {
  1827. switch (id & KVM_REG_SIZE_MASK) {
  1828. case KVM_REG_SIZE_U64:
  1829. /* Any unused index bits means it's not valid. */
  1830. if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
  1831. | KVM_REG_ARM_COPROC_MASK
  1832. | KVM_REG_ARM64_SYSREG_OP0_MASK
  1833. | KVM_REG_ARM64_SYSREG_OP1_MASK
  1834. | KVM_REG_ARM64_SYSREG_CRN_MASK
  1835. | KVM_REG_ARM64_SYSREG_CRM_MASK
  1836. | KVM_REG_ARM64_SYSREG_OP2_MASK))
  1837. return false;
  1838. params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
  1839. >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
  1840. params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
  1841. >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
  1842. params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
  1843. >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
  1844. params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
  1845. >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
  1846. params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
  1847. >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
  1848. return true;
  1849. default:
  1850. return false;
  1851. }
  1852. }
  1853. const struct sys_reg_desc *find_reg_by_id(u64 id,
  1854. struct sys_reg_params *params,
  1855. const struct sys_reg_desc table[],
  1856. unsigned int num)
  1857. {
  1858. if (!index_to_params(id, params))
  1859. return NULL;
  1860. return find_reg(params, table, num);
  1861. }
  1862. /* Decode an index value, and find the sys_reg_desc entry. */
  1863. static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu,
  1864. u64 id)
  1865. {
  1866. size_t num;
  1867. const struct sys_reg_desc *table, *r;
  1868. struct sys_reg_params params;
  1869. /* We only do sys_reg for now. */
  1870. if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
  1871. return NULL;
  1872. if (!index_to_params(id, &params))
  1873. return NULL;
  1874. table = get_target_table(vcpu->arch.target, true, &num);
  1875. r = find_reg(&params, table, num);
  1876. if (!r)
  1877. r = find_reg(&params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
  1878. /* Not saved in the sys_reg array and not otherwise accessible? */
  1879. if (r && !(r->reg || r->get_user))
  1880. r = NULL;
  1881. return r;
  1882. }
  1883. /*
  1884. * These are the invariant sys_reg registers: we let the guest see the
  1885. * host versions of these, so they're part of the guest state.
  1886. *
  1887. * A future CPU may provide a mechanism to present different values to
  1888. * the guest, or a future kvm may trap them.
  1889. */
  1890. #define FUNCTION_INVARIANT(reg) \
  1891. static void get_##reg(struct kvm_vcpu *v, \
  1892. const struct sys_reg_desc *r) \
  1893. { \
  1894. ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
  1895. }
  1896. FUNCTION_INVARIANT(midr_el1)
  1897. FUNCTION_INVARIANT(ctr_el0)
  1898. FUNCTION_INVARIANT(revidr_el1)
  1899. FUNCTION_INVARIANT(clidr_el1)
  1900. FUNCTION_INVARIANT(aidr_el1)
  1901. /* ->val is filled in by kvm_sys_reg_table_init() */
  1902. static struct sys_reg_desc invariant_sys_regs[] = {
  1903. { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
  1904. { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
  1905. { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
  1906. { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
  1907. { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
  1908. };
  1909. static int reg_from_user(u64 *val, const void __user *uaddr, u64 id)
  1910. {
  1911. if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0)
  1912. return -EFAULT;
  1913. return 0;
  1914. }
  1915. static int reg_to_user(void __user *uaddr, const u64 *val, u64 id)
  1916. {
  1917. if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0)
  1918. return -EFAULT;
  1919. return 0;
  1920. }
  1921. static int get_invariant_sys_reg(u64 id, void __user *uaddr)
  1922. {
  1923. struct sys_reg_params params;
  1924. const struct sys_reg_desc *r;
  1925. r = find_reg_by_id(id, &params, invariant_sys_regs,
  1926. ARRAY_SIZE(invariant_sys_regs));
  1927. if (!r)
  1928. return -ENOENT;
  1929. return reg_to_user(uaddr, &r->val, id);
  1930. }
  1931. static int set_invariant_sys_reg(u64 id, void __user *uaddr)
  1932. {
  1933. struct sys_reg_params params;
  1934. const struct sys_reg_desc *r;
  1935. int err;
  1936. u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */
  1937. r = find_reg_by_id(id, &params, invariant_sys_regs,
  1938. ARRAY_SIZE(invariant_sys_regs));
  1939. if (!r)
  1940. return -ENOENT;
  1941. err = reg_from_user(&val, uaddr, id);
  1942. if (err)
  1943. return err;
  1944. /* This is what we mean by invariant: you can't change it. */
  1945. if (r->val != val)
  1946. return -EINVAL;
  1947. return 0;
  1948. }
  1949. static bool is_valid_cache(u32 val)
  1950. {
  1951. u32 level, ctype;
  1952. if (val >= CSSELR_MAX)
  1953. return false;
  1954. /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
  1955. level = (val >> 1);
  1956. ctype = (cache_levels >> (level * 3)) & 7;
  1957. switch (ctype) {
  1958. case 0: /* No cache */
  1959. return false;
  1960. case 1: /* Instruction cache only */
  1961. return (val & 1);
  1962. case 2: /* Data cache only */
  1963. case 4: /* Unified cache */
  1964. return !(val & 1);
  1965. case 3: /* Separate instruction and data caches */
  1966. return true;
  1967. default: /* Reserved: we can't know instruction or data. */
  1968. return false;
  1969. }
  1970. }
  1971. static int demux_c15_get(u64 id, void __user *uaddr)
  1972. {
  1973. u32 val;
  1974. u32 __user *uval = uaddr;
  1975. /* Fail if we have unknown bits set. */
  1976. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  1977. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  1978. return -ENOENT;
  1979. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  1980. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  1981. if (KVM_REG_SIZE(id) != 4)
  1982. return -ENOENT;
  1983. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  1984. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  1985. if (!is_valid_cache(val))
  1986. return -ENOENT;
  1987. return put_user(get_ccsidr(val), uval);
  1988. default:
  1989. return -ENOENT;
  1990. }
  1991. }
  1992. static int demux_c15_set(u64 id, void __user *uaddr)
  1993. {
  1994. u32 val, newval;
  1995. u32 __user *uval = uaddr;
  1996. /* Fail if we have unknown bits set. */
  1997. if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
  1998. | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
  1999. return -ENOENT;
  2000. switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
  2001. case KVM_REG_ARM_DEMUX_ID_CCSIDR:
  2002. if (KVM_REG_SIZE(id) != 4)
  2003. return -ENOENT;
  2004. val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
  2005. >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
  2006. if (!is_valid_cache(val))
  2007. return -ENOENT;
  2008. if (get_user(newval, uval))
  2009. return -EFAULT;
  2010. /* This is also invariant: you can't change it. */
  2011. if (newval != get_ccsidr(val))
  2012. return -EINVAL;
  2013. return 0;
  2014. default:
  2015. return -ENOENT;
  2016. }
  2017. }
  2018. int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  2019. {
  2020. const struct sys_reg_desc *r;
  2021. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  2022. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  2023. return demux_c15_get(reg->id, uaddr);
  2024. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  2025. return -ENOENT;
  2026. r = index_to_sys_reg_desc(vcpu, reg->id);
  2027. if (!r)
  2028. return get_invariant_sys_reg(reg->id, uaddr);
  2029. if (r->get_user)
  2030. return (r->get_user)(vcpu, r, reg, uaddr);
  2031. return reg_to_user(uaddr, &__vcpu_sys_reg(vcpu, r->reg), reg->id);
  2032. }
  2033. int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
  2034. {
  2035. const struct sys_reg_desc *r;
  2036. void __user *uaddr = (void __user *)(unsigned long)reg->addr;
  2037. if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
  2038. return demux_c15_set(reg->id, uaddr);
  2039. if (KVM_REG_SIZE(reg->id) != sizeof(__u64))
  2040. return -ENOENT;
  2041. r = index_to_sys_reg_desc(vcpu, reg->id);
  2042. if (!r)
  2043. return set_invariant_sys_reg(reg->id, uaddr);
  2044. if (r->set_user)
  2045. return (r->set_user)(vcpu, r, reg, uaddr);
  2046. return reg_from_user(&__vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id);
  2047. }
  2048. static unsigned int num_demux_regs(void)
  2049. {
  2050. unsigned int i, count = 0;
  2051. for (i = 0; i < CSSELR_MAX; i++)
  2052. if (is_valid_cache(i))
  2053. count++;
  2054. return count;
  2055. }
  2056. static int write_demux_regids(u64 __user *uindices)
  2057. {
  2058. u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
  2059. unsigned int i;
  2060. val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
  2061. for (i = 0; i < CSSELR_MAX; i++) {
  2062. if (!is_valid_cache(i))
  2063. continue;
  2064. if (put_user(val | i, uindices))
  2065. return -EFAULT;
  2066. uindices++;
  2067. }
  2068. return 0;
  2069. }
  2070. static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
  2071. {
  2072. return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
  2073. KVM_REG_ARM64_SYSREG |
  2074. (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
  2075. (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
  2076. (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
  2077. (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
  2078. (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
  2079. }
  2080. static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
  2081. {
  2082. if (!*uind)
  2083. return true;
  2084. if (put_user(sys_reg_to_index(reg), *uind))
  2085. return false;
  2086. (*uind)++;
  2087. return true;
  2088. }
  2089. static int walk_one_sys_reg(const struct sys_reg_desc *rd,
  2090. u64 __user **uind,
  2091. unsigned int *total)
  2092. {
  2093. /*
  2094. * Ignore registers we trap but don't save,
  2095. * and for which no custom user accessor is provided.
  2096. */
  2097. if (!(rd->reg || rd->get_user))
  2098. return 0;
  2099. if (!copy_reg_to_user(rd, uind))
  2100. return -EFAULT;
  2101. (*total)++;
  2102. return 0;
  2103. }
  2104. /* Assumed ordered tables, see kvm_sys_reg_table_init. */
  2105. static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
  2106. {
  2107. const struct sys_reg_desc *i1, *i2, *end1, *end2;
  2108. unsigned int total = 0;
  2109. size_t num;
  2110. int err;
  2111. /* We check for duplicates here, to allow arch-specific overrides. */
  2112. i1 = get_target_table(vcpu->arch.target, true, &num);
  2113. end1 = i1 + num;
  2114. i2 = sys_reg_descs;
  2115. end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
  2116. BUG_ON(i1 == end1 || i2 == end2);
  2117. /* Walk carefully, as both tables may refer to the same register. */
  2118. while (i1 || i2) {
  2119. int cmp = cmp_sys_reg(i1, i2);
  2120. /* target-specific overrides generic entry. */
  2121. if (cmp <= 0)
  2122. err = walk_one_sys_reg(i1, &uind, &total);
  2123. else
  2124. err = walk_one_sys_reg(i2, &uind, &total);
  2125. if (err)
  2126. return err;
  2127. if (cmp <= 0 && ++i1 == end1)
  2128. i1 = NULL;
  2129. if (cmp >= 0 && ++i2 == end2)
  2130. i2 = NULL;
  2131. }
  2132. return total;
  2133. }
  2134. unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
  2135. {
  2136. return ARRAY_SIZE(invariant_sys_regs)
  2137. + num_demux_regs()
  2138. + walk_sys_regs(vcpu, (u64 __user *)NULL);
  2139. }
  2140. int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
  2141. {
  2142. unsigned int i;
  2143. int err;
  2144. /* Then give them all the invariant registers' indices. */
  2145. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
  2146. if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
  2147. return -EFAULT;
  2148. uindices++;
  2149. }
  2150. err = walk_sys_regs(vcpu, uindices);
  2151. if (err < 0)
  2152. return err;
  2153. uindices += err;
  2154. return write_demux_regids(uindices);
  2155. }
  2156. static int check_sysreg_table(const struct sys_reg_desc *table, unsigned int n)
  2157. {
  2158. unsigned int i;
  2159. for (i = 1; i < n; i++) {
  2160. if (cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
  2161. kvm_err("sys_reg table %p out of order (%d)\n", table, i - 1);
  2162. return 1;
  2163. }
  2164. }
  2165. return 0;
  2166. }
  2167. void kvm_sys_reg_table_init(void)
  2168. {
  2169. unsigned int i;
  2170. struct sys_reg_desc clidr;
  2171. /* Make sure tables are unique and in order. */
  2172. BUG_ON(check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs)));
  2173. BUG_ON(check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs)));
  2174. BUG_ON(check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs)));
  2175. BUG_ON(check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
  2176. BUG_ON(check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs)));
  2177. BUG_ON(check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)));
  2178. /* We abuse the reset function to overwrite the table itself. */
  2179. for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
  2180. invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
  2181. /*
  2182. * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
  2183. *
  2184. * If software reads the Cache Type fields from Ctype1
  2185. * upwards, once it has seen a value of 0b000, no caches
  2186. * exist at further-out levels of the hierarchy. So, for
  2187. * example, if Ctype3 is the first Cache Type field with a
  2188. * value of 0b000, the values of Ctype4 to Ctype7 must be
  2189. * ignored.
  2190. */
  2191. get_clidr_el1(NULL, &clidr); /* Ugly... */
  2192. cache_levels = clidr.val;
  2193. for (i = 0; i < 7; i++)
  2194. if (((cache_levels >> (i*3)) & 7) == 0)
  2195. break;
  2196. /* Clear all higher bits. */
  2197. cache_levels &= (1 << (i*3))-1;
  2198. }
  2199. /**
  2200. * kvm_reset_sys_regs - sets system registers to reset value
  2201. * @vcpu: The VCPU pointer
  2202. *
  2203. * This function finds the right table above and sets the registers on the
  2204. * virtual CPU struct to their architecturally defined reset values.
  2205. */
  2206. void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
  2207. {
  2208. size_t num;
  2209. const struct sys_reg_desc *table;
  2210. DECLARE_BITMAP(bmap, NR_SYS_REGS) = { 0, };
  2211. /* Generic chip reset first (so target could override). */
  2212. reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs), bmap);
  2213. table = get_target_table(vcpu->arch.target, true, &num);
  2214. reset_sys_reg_descs(vcpu, table, num, bmap);
  2215. for (num = 1; num < NR_SYS_REGS; num++) {
  2216. if (WARN(!test_bit(num, bmap),
  2217. "Didn't reset __vcpu_sys_reg(%zi)\n", num))
  2218. break;
  2219. }
  2220. }