tdp_mmu.h 2.4 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576
  1. // SPDX-License-Identifier: GPL-2.0
  2. #ifndef __KVM_X86_MMU_TDP_MMU_H
  3. #define __KVM_X86_MMU_TDP_MMU_H
  4. #include <linux/kvm_host.h>
  5. #include "spte.h"
  6. void kvm_mmu_init_tdp_mmu(struct kvm *kvm);
  7. void kvm_mmu_uninit_tdp_mmu(struct kvm *kvm);
  8. int kvm_tdp_mmu_alloc_root(struct kvm_vcpu *vcpu);
  9. __must_check static inline bool kvm_tdp_mmu_get_root(struct kvm_mmu_page *root)
  10. {
  11. return refcount_inc_not_zero(&root->tdp_mmu_root_count);
  12. }
  13. void kvm_tdp_mmu_put_root(struct kvm *kvm, struct kvm_mmu_page *root);
  14. bool kvm_tdp_mmu_zap_leafs(struct kvm *kvm, gfn_t start, gfn_t end, bool flush);
  15. bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp);
  16. void kvm_tdp_mmu_zap_all(struct kvm *kvm);
  17. void kvm_tdp_mmu_invalidate_all_roots(struct kvm *kvm);
  18. void kvm_tdp_mmu_zap_invalidated_roots(struct kvm *kvm);
  19. int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault);
  20. bool kvm_tdp_mmu_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range,
  21. bool flush);
  22. bool kvm_tdp_mmu_age_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range);
  23. bool kvm_tdp_mmu_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range);
  24. bool kvm_tdp_mmu_wrprot_slot(struct kvm *kvm,
  25. const struct kvm_memory_slot *slot, int min_level);
  26. bool kvm_tdp_mmu_clear_dirty_slot(struct kvm *kvm,
  27. const struct kvm_memory_slot *slot);
  28. void kvm_tdp_mmu_clear_dirty_pt_masked(struct kvm *kvm,
  29. struct kvm_memory_slot *slot,
  30. gfn_t gfn, unsigned long mask,
  31. bool wrprot);
  32. void kvm_tdp_mmu_zap_collapsible_sptes(struct kvm *kvm,
  33. const struct kvm_memory_slot *slot);
  34. bool kvm_tdp_mmu_write_protect_gfn(struct kvm *kvm,
  35. struct kvm_memory_slot *slot, gfn_t gfn,
  36. int min_level);
  37. void kvm_tdp_mmu_try_split_huge_pages(struct kvm *kvm,
  38. const struct kvm_memory_slot *slot,
  39. gfn_t start, gfn_t end,
  40. int target_level, bool shared);
  41. static inline void kvm_tdp_mmu_walk_lockless_begin(void)
  42. {
  43. rcu_read_lock();
  44. }
  45. static inline void kvm_tdp_mmu_walk_lockless_end(void)
  46. {
  47. rcu_read_unlock();
  48. }
  49. int kvm_tdp_mmu_get_walk(struct kvm_vcpu *vcpu, u64 addr, u64 *sptes,
  50. int *root_level);
  51. u64 *kvm_tdp_mmu_fast_pf_get_last_sptep(struct kvm_vcpu *vcpu, gfn_t gfn,
  52. u64 *spte);
  53. #ifdef CONFIG_X86_64
  54. static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return sp->tdp_mmu_page; }
  55. #else
  56. static inline bool is_tdp_mmu_page(struct kvm_mmu_page *sp) { return false; }
  57. #endif
  58. #endif /* __KVM_X86_MMU_TDP_MMU_H */