book3s_hv.h 4.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Privileged (non-hypervisor) host registers to save.
  4. */
  5. #include "asm/guest-state-buffer.h"
  6. struct p9_host_os_sprs {
  7. unsigned long iamr;
  8. unsigned long amr;
  9. unsigned int pmc1;
  10. unsigned int pmc2;
  11. unsigned int pmc3;
  12. unsigned int pmc4;
  13. unsigned int pmc5;
  14. unsigned int pmc6;
  15. unsigned long mmcr0;
  16. unsigned long mmcr1;
  17. unsigned long mmcr2;
  18. unsigned long mmcr3;
  19. unsigned long mmcra;
  20. unsigned long siar;
  21. unsigned long sier1;
  22. unsigned long sier2;
  23. unsigned long sier3;
  24. unsigned long sdar;
  25. };
  26. static inline bool nesting_enabled(struct kvm *kvm)
  27. {
  28. return kvm->arch.nested_enable && kvm_is_radix(kvm);
  29. }
  30. bool load_vcpu_state(struct kvm_vcpu *vcpu,
  31. struct p9_host_os_sprs *host_os_sprs);
  32. void store_vcpu_state(struct kvm_vcpu *vcpu);
  33. void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs);
  34. void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
  35. struct p9_host_os_sprs *host_os_sprs);
  36. void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
  37. struct p9_host_os_sprs *host_os_sprs);
  38. void switch_pmu_to_host(struct kvm_vcpu *vcpu,
  39. struct p9_host_os_sprs *host_os_sprs);
  40. #ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING
  41. void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next);
  42. #define start_timing(vcpu, next) accumulate_time(vcpu, next)
  43. #define end_timing(vcpu) accumulate_time(vcpu, NULL)
  44. #else
  45. #define accumulate_time(vcpu, next) do {} while (0)
  46. #define start_timing(vcpu, next) do {} while (0)
  47. #define end_timing(vcpu) do {} while (0)
  48. #endif
  49. static inline void __kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 val)
  50. {
  51. vcpu->arch.shregs.msr = val;
  52. kvmhv_nestedv2_mark_dirty(vcpu, KVMPPC_GSID_MSR);
  53. }
  54. static inline u64 __kvmppc_get_msr_hv(struct kvm_vcpu *vcpu)
  55. {
  56. WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, KVMPPC_GSID_MSR) < 0);
  57. return vcpu->arch.shregs.msr;
  58. }
  59. #define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size, iden) \
  60. static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, u##size val) \
  61. { \
  62. vcpu->arch.reg = val; \
  63. kvmhv_nestedv2_mark_dirty(vcpu, iden); \
  64. }
  65. #define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size, iden) \
  66. static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu) \
  67. { \
  68. kvmhv_nestedv2_cached_reload(vcpu, iden); \
  69. return vcpu->arch.reg; \
  70. }
  71. #define KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(reg, size, iden) \
  72. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_SET(reg, size, iden) \
  73. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR_GET(reg, size, iden) \
  74. #define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size, iden) \
  75. static inline void kvmppc_set_##reg ##_hv(struct kvm_vcpu *vcpu, int i, u##size val) \
  76. { \
  77. vcpu->arch.reg[i] = val; \
  78. kvmhv_nestedv2_mark_dirty(vcpu, iden(i)); \
  79. }
  80. #define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size, iden) \
  81. static inline u##size kvmppc_get_##reg ##_hv(struct kvm_vcpu *vcpu, int i) \
  82. { \
  83. WARN_ON(kvmhv_nestedv2_cached_reload(vcpu, iden(i)) < 0); \
  84. return vcpu->arch.reg[i]; \
  85. }
  86. #define KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(reg, size, iden) \
  87. KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_SET(reg, size, iden) \
  88. KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR_GET(reg, size, iden) \
  89. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(mmcra, 64, KVMPPC_GSID_MMCRA)
  90. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hfscr, 64, KVMPPC_GSID_HFSCR)
  91. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(fscr, 64, KVMPPC_GSID_FSCR)
  92. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dscr, 64, KVMPPC_GSID_DSCR)
  93. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(purr, 64, KVMPPC_GSID_PURR)
  94. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(spurr, 64, KVMPPC_GSID_SPURR)
  95. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(amr, 64, KVMPPC_GSID_AMR)
  96. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(uamor, 64, KVMPPC_GSID_UAMOR)
  97. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(siar, 64, KVMPPC_GSID_SIAR)
  98. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(sdar, 64, KVMPPC_GSID_SDAR)
  99. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(iamr, 64, KVMPPC_GSID_IAMR)
  100. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr0, 64, KVMPPC_GSID_DAWR0)
  101. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawr1, 64, KVMPPC_GSID_DAWR1)
  102. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx0, 64, KVMPPC_GSID_DAWRX0)
  103. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dawrx1, 64, KVMPPC_GSID_DAWRX1)
  104. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(dexcr, 64, KVMPPC_GSID_DEXCR)
  105. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hashkeyr, 64, KVMPPC_GSID_HASHKEYR)
  106. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(hashpkeyr, 64, KVMPPC_GSID_HASHPKEYR)
  107. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ciabr, 64, KVMPPC_GSID_CIABR)
  108. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(wort, 64, KVMPPC_GSID_WORT)
  109. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ppr, 64, KVMPPC_GSID_PPR)
  110. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(ctrl, 64, KVMPPC_GSID_CTRL);
  111. KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(mmcr, 64, KVMPPC_GSID_MMCR)
  112. KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(sier, 64, KVMPPC_GSID_SIER)
  113. KVMPPC_BOOK3S_HV_VCPU_ARRAY_ACCESSOR(pmc, 32, KVMPPC_GSID_PMC)
  114. KVMPPC_BOOK3S_HV_VCPU_ACCESSOR(pspb, 32, KVMPPC_GSID_PSPB)