kvm_vcpu_pmu.h 4.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (c) 2023 Rivos Inc
  4. *
  5. * Authors:
  6. * Atish Patra <atishp@rivosinc.com>
  7. */
  8. #ifndef __KVM_VCPU_RISCV_PMU_H
  9. #define __KVM_VCPU_RISCV_PMU_H
  10. #include <linux/perf/riscv_pmu.h>
  11. #include <asm/kvm_vcpu_insn.h>
  12. #include <asm/sbi.h>
  13. #ifdef CONFIG_RISCV_PMU_SBI
  14. #define RISCV_KVM_MAX_FW_CTRS 32
  15. #define RISCV_KVM_MAX_HW_CTRS 32
  16. #define RISCV_KVM_MAX_COUNTERS (RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS)
  17. static_assert(RISCV_KVM_MAX_COUNTERS <= 64);
  18. struct kvm_fw_event {
  19. /* Current value of the event */
  20. u64 value;
  21. /* Event monitoring status */
  22. bool started;
  23. };
  24. /* Per virtual pmu counter data */
  25. struct kvm_pmc {
  26. u8 idx;
  27. struct perf_event *perf_event;
  28. u64 counter_val;
  29. union sbi_pmu_ctr_info cinfo;
  30. /* Event monitoring status */
  31. bool started;
  32. /* Monitoring event ID */
  33. unsigned long event_idx;
  34. struct kvm_vcpu *vcpu;
  35. };
  36. /* PMU data structure per vcpu */
  37. struct kvm_pmu {
  38. struct kvm_pmc pmc[RISCV_KVM_MAX_COUNTERS];
  39. struct kvm_fw_event fw_event[RISCV_KVM_MAX_FW_CTRS];
  40. /* Number of the virtual firmware counters available */
  41. int num_fw_ctrs;
  42. /* Number of the virtual hardware counters available */
  43. int num_hw_ctrs;
  44. /* A flag to indicate that pmu initialization is done */
  45. bool init_done;
  46. /* Bit map of all the virtual counter used */
  47. DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS);
  48. /* Bit map of all the virtual counter overflown */
  49. DECLARE_BITMAP(pmc_overflown, RISCV_KVM_MAX_COUNTERS);
  50. /* The address of the counter snapshot area (guest physical address) */
  51. gpa_t snapshot_addr;
  52. /* The actual data of the snapshot */
  53. struct riscv_pmu_snapshot_data *sdata;
  54. };
  55. #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context)
  56. #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu_context))
  57. #if defined(CONFIG_32BIT)
  58. #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
  59. {.base = CSR_CYCLEH, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
  60. {.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
  61. #else
  62. #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
  63. {.base = CSR_CYCLE, .count = 32, .func = kvm_riscv_vcpu_pmu_read_hpm },
  64. #endif
  65. int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
  66. int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
  67. unsigned long *val, unsigned long new_val,
  68. unsigned long wr_mask);
  69. int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata);
  70. int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
  71. struct kvm_vcpu_sbi_return *retdata);
  72. int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
  73. unsigned long ctr_mask, unsigned long flags, u64 ival,
  74. struct kvm_vcpu_sbi_return *retdata);
  75. int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
  76. unsigned long ctr_mask, unsigned long flags,
  77. struct kvm_vcpu_sbi_return *retdata);
  78. int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base,
  79. unsigned long ctr_mask, unsigned long flags,
  80. unsigned long eidx, u64 evtdata,
  81. struct kvm_vcpu_sbi_return *retdata);
  82. int kvm_riscv_vcpu_pmu_fw_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
  83. struct kvm_vcpu_sbi_return *retdata);
  84. int kvm_riscv_vcpu_pmu_fw_ctr_read_hi(struct kvm_vcpu *vcpu, unsigned long cidx,
  85. struct kvm_vcpu_sbi_return *retdata);
  86. void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu);
  87. int kvm_riscv_vcpu_pmu_snapshot_set_shmem(struct kvm_vcpu *vcpu, unsigned long saddr_low,
  88. unsigned long saddr_high, unsigned long flags,
  89. struct kvm_vcpu_sbi_return *retdata);
  90. void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu);
  91. void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
  92. #else
  93. struct kvm_pmu {
  94. };
  95. static inline int kvm_riscv_vcpu_pmu_read_legacy(struct kvm_vcpu *vcpu, unsigned int csr_num,
  96. unsigned long *val, unsigned long new_val,
  97. unsigned long wr_mask)
  98. {
  99. if (csr_num == CSR_CYCLE || csr_num == CSR_INSTRET) {
  100. *val = 0;
  101. return KVM_INSN_CONTINUE_NEXT_SEPC;
  102. } else {
  103. return KVM_INSN_ILLEGAL_TRAP;
  104. }
  105. }
  106. #define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
  107. {.base = CSR_CYCLE, .count = 3, .func = kvm_riscv_vcpu_pmu_read_legacy },
  108. static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
  109. static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
  110. {
  111. return 0;
  112. }
  113. static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {}
  114. static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {}
  115. #endif /* CONFIG_RISCV_PMU_SBI */
  116. #endif /* !__KVM_VCPU_RISCV_PMU_H */