timer.c 5.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
  4. */
  5. #include <linux/kvm_host.h>
  6. #include <asm/kvm_csr.h>
  7. #include <asm/kvm_vcpu.h>
  8. /*
  9. * ktime_to_tick() - Scale ktime_t to timer tick value.
  10. */
  11. static inline u64 ktime_to_tick(struct kvm_vcpu *vcpu, ktime_t now)
  12. {
  13. u64 delta;
  14. delta = ktime_to_ns(now);
  15. return div_u64(delta * vcpu->arch.timer_mhz, MNSEC_PER_SEC);
  16. }
  17. static inline u64 tick_to_ns(struct kvm_vcpu *vcpu, u64 tick)
  18. {
  19. return div_u64(tick * MNSEC_PER_SEC, vcpu->arch.timer_mhz);
  20. }
  21. /* Low level hrtimer wake routine */
  22. enum hrtimer_restart kvm_swtimer_wakeup(struct hrtimer *timer)
  23. {
  24. struct kvm_vcpu *vcpu;
  25. vcpu = container_of(timer, struct kvm_vcpu, arch.swtimer);
  26. kvm_queue_irq(vcpu, INT_TI);
  27. rcuwait_wake_up(&vcpu->wait);
  28. return HRTIMER_NORESTART;
  29. }
  30. /*
  31. * Initialise the timer to the specified frequency, zero it
  32. */
  33. void kvm_init_timer(struct kvm_vcpu *vcpu, unsigned long timer_hz)
  34. {
  35. vcpu->arch.timer_mhz = timer_hz >> 20;
  36. /* Starting at 0 */
  37. kvm_write_sw_gcsr(vcpu->arch.csr, LOONGARCH_CSR_TVAL, 0);
  38. }
  39. /*
  40. * Restore soft timer state from saved context.
  41. */
  42. void kvm_restore_timer(struct kvm_vcpu *vcpu)
  43. {
  44. unsigned long cfg, estat;
  45. unsigned long ticks, delta, period;
  46. ktime_t expire, now;
  47. struct loongarch_csrs *csr = vcpu->arch.csr;
  48. /*
  49. * Set guest stable timer cfg csr
  50. * Disable timer before restore estat CSR register, avoid to
  51. * get invalid timer interrupt for old timer cfg
  52. */
  53. cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);
  54. write_gcsr_timercfg(0);
  55. kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
  56. kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
  57. if (!(cfg & CSR_TCFG_EN)) {
  58. /* Guest timer is disabled, just restore timer registers */
  59. kvm_restore_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
  60. return;
  61. }
  62. /*
  63. * Freeze the soft-timer and sync the guest stable timer with it.
  64. */
  65. if (kvm_vcpu_is_blocking(vcpu))
  66. hrtimer_cancel(&vcpu->arch.swtimer);
  67. /*
  68. * From LoongArch Reference Manual Volume 1 Chapter 7.6.2
  69. * If oneshot timer is fired, CSR TVAL will be -1, there are two
  70. * conditions:
  71. * 1) timer is fired during exiting to host
  72. * 2) timer is fired and vm is doing timer irq, and then exiting to
  73. * host. Host should not inject timer irq to avoid spurious
  74. * timer interrupt again
  75. */
  76. ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);
  77. estat = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_ESTAT);
  78. if (!(cfg & CSR_TCFG_PERIOD) && (ticks > cfg)) {
  79. /*
  80. * Writing 0 to LOONGARCH_CSR_TVAL will inject timer irq
  81. * and set CSR TVAL with -1
  82. */
  83. write_gcsr_timertick(0);
  84. /*
  85. * Writing CSR_TINTCLR_TI to LOONGARCH_CSR_TINTCLR will clear
  86. * timer interrupt, and CSR TVAL keeps unchanged with -1, it
  87. * avoids spurious timer interrupt
  88. */
  89. if (!(estat & CPU_TIMER))
  90. gcsr_write(CSR_TINTCLR_TI, LOONGARCH_CSR_TINTCLR);
  91. return;
  92. }
  93. /*
  94. * Set remainder tick value if not expired
  95. */
  96. delta = 0;
  97. now = ktime_get();
  98. expire = vcpu->arch.expire;
  99. if (ktime_before(now, expire))
  100. delta = ktime_to_tick(vcpu, ktime_sub(expire, now));
  101. else if (cfg & CSR_TCFG_PERIOD) {
  102. period = cfg & CSR_TCFG_VAL;
  103. delta = ktime_to_tick(vcpu, ktime_sub(now, expire));
  104. delta = period - (delta % period);
  105. /*
  106. * Inject timer here though sw timer should inject timer
  107. * interrupt async already, since sw timer may be cancelled
  108. * during injecting intr async
  109. */
  110. kvm_queue_irq(vcpu, INT_TI);
  111. }
  112. write_gcsr_timertick(delta);
  113. }
  114. /*
  115. * Save guest timer state and switch to software emulation of guest
  116. * timer. The hard timer must already be in use, so preemption should be
  117. * disabled.
  118. */
  119. static void _kvm_save_timer(struct kvm_vcpu *vcpu)
  120. {
  121. unsigned long ticks, delta, cfg;
  122. ktime_t expire;
  123. struct loongarch_csrs *csr = vcpu->arch.csr;
  124. cfg = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG);
  125. ticks = kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TVAL);
  126. /*
  127. * From LoongArch Reference Manual Volume 1 Chapter 7.6.2
  128. * If period timer is fired, CSR TVAL will be reloaded from CSR TCFG
  129. * If oneshot timer is fired, CSR TVAL will be -1
  130. * Here judge one-shot timer fired by checking whether TVAL is larger
  131. * than TCFG
  132. */
  133. if (ticks < cfg)
  134. delta = tick_to_ns(vcpu, ticks);
  135. else
  136. delta = 0;
  137. expire = ktime_add_ns(ktime_get(), delta);
  138. vcpu->arch.expire = expire;
  139. if (kvm_vcpu_is_blocking(vcpu)) {
  140. /*
  141. * HRTIMER_MODE_PINNED_HARD is suggested since vcpu may run in
  142. * the same physical cpu in next time, and the timer should run
  143. * in hardirq context even in the PREEMPT_RT case.
  144. */
  145. hrtimer_start(&vcpu->arch.swtimer, expire, HRTIMER_MODE_ABS_PINNED_HARD);
  146. }
  147. }
  148. /*
  149. * Save guest timer state and switch to soft guest timer if hard timer was in
  150. * use.
  151. */
  152. void kvm_save_timer(struct kvm_vcpu *vcpu)
  153. {
  154. struct loongarch_csrs *csr = vcpu->arch.csr;
  155. preempt_disable();
  156. /* Save hard timer state */
  157. kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TCFG);
  158. kvm_save_hw_gcsr(csr, LOONGARCH_CSR_TVAL);
  159. if (kvm_read_sw_gcsr(csr, LOONGARCH_CSR_TCFG) & CSR_TCFG_EN)
  160. _kvm_save_timer(vcpu);
  161. /* Save timer-related state to vCPU context */
  162. kvm_save_hw_gcsr(csr, LOONGARCH_CSR_ESTAT);
  163. preempt_enable();
  164. }