dirty_ring.c 5.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * KVM dirty ring implementation
  4. *
  5. * Copyright 2019 Red Hat, Inc.
  6. */
  7. #include <linux/kvm_host.h>
  8. #include <linux/kvm.h>
  9. #include <linux/vmalloc.h>
  10. #include <linux/kvm_dirty_ring.h>
  11. #include <trace/events/kvm.h>
  12. #include "kvm_mm.h"
  13. int __weak kvm_cpu_dirty_log_size(void)
  14. {
  15. return 0;
  16. }
  17. u32 kvm_dirty_ring_get_rsvd_entries(void)
  18. {
  19. return KVM_DIRTY_RING_RSVD_ENTRIES + kvm_cpu_dirty_log_size();
  20. }
  21. bool kvm_use_dirty_bitmap(struct kvm *kvm)
  22. {
  23. lockdep_assert_held(&kvm->slots_lock);
  24. return !kvm->dirty_ring_size || kvm->dirty_ring_with_bitmap;
  25. }
  26. #ifndef CONFIG_NEED_KVM_DIRTY_RING_WITH_BITMAP
  27. bool kvm_arch_allow_write_without_running_vcpu(struct kvm *kvm)
  28. {
  29. return false;
  30. }
  31. #endif
  32. static u32 kvm_dirty_ring_used(struct kvm_dirty_ring *ring)
  33. {
  34. return READ_ONCE(ring->dirty_index) - READ_ONCE(ring->reset_index);
  35. }
  36. static bool kvm_dirty_ring_soft_full(struct kvm_dirty_ring *ring)
  37. {
  38. return kvm_dirty_ring_used(ring) >= ring->soft_limit;
  39. }
  40. static bool kvm_dirty_ring_full(struct kvm_dirty_ring *ring)
  41. {
  42. return kvm_dirty_ring_used(ring) >= ring->size;
  43. }
  44. static void kvm_reset_dirty_gfn(struct kvm *kvm, u32 slot, u64 offset, u64 mask)
  45. {
  46. struct kvm_memory_slot *memslot;
  47. int as_id, id;
  48. if (!mask)
  49. return;
  50. as_id = slot >> 16;
  51. id = (u16)slot;
  52. if (as_id >= kvm_arch_nr_memslot_as_ids(kvm) || id >= KVM_USER_MEM_SLOTS)
  53. return;
  54. memslot = id_to_memslot(__kvm_memslots(kvm, as_id), id);
  55. if (!memslot || (offset + __fls(mask)) >= memslot->npages)
  56. return;
  57. KVM_MMU_LOCK(kvm);
  58. kvm_arch_mmu_enable_log_dirty_pt_masked(kvm, memslot, offset, mask);
  59. KVM_MMU_UNLOCK(kvm);
  60. }
  61. int kvm_dirty_ring_alloc(struct kvm_dirty_ring *ring, int index, u32 size)
  62. {
  63. ring->dirty_gfns = vzalloc(size);
  64. if (!ring->dirty_gfns)
  65. return -ENOMEM;
  66. ring->size = size / sizeof(struct kvm_dirty_gfn);
  67. ring->soft_limit = ring->size - kvm_dirty_ring_get_rsvd_entries();
  68. ring->dirty_index = 0;
  69. ring->reset_index = 0;
  70. ring->index = index;
  71. return 0;
  72. }
  73. static inline void kvm_dirty_gfn_set_invalid(struct kvm_dirty_gfn *gfn)
  74. {
  75. smp_store_release(&gfn->flags, 0);
  76. }
  77. static inline void kvm_dirty_gfn_set_dirtied(struct kvm_dirty_gfn *gfn)
  78. {
  79. gfn->flags = KVM_DIRTY_GFN_F_DIRTY;
  80. }
  81. static inline bool kvm_dirty_gfn_harvested(struct kvm_dirty_gfn *gfn)
  82. {
  83. return smp_load_acquire(&gfn->flags) & KVM_DIRTY_GFN_F_RESET;
  84. }
  85. int kvm_dirty_ring_reset(struct kvm *kvm, struct kvm_dirty_ring *ring)
  86. {
  87. u32 cur_slot, next_slot;
  88. u64 cur_offset, next_offset;
  89. unsigned long mask;
  90. int count = 0;
  91. struct kvm_dirty_gfn *entry;
  92. bool first_round = true;
  93. /* This is only needed to make compilers happy */
  94. cur_slot = cur_offset = mask = 0;
  95. while (true) {
  96. entry = &ring->dirty_gfns[ring->reset_index & (ring->size - 1)];
  97. if (!kvm_dirty_gfn_harvested(entry))
  98. break;
  99. next_slot = READ_ONCE(entry->slot);
  100. next_offset = READ_ONCE(entry->offset);
  101. /* Update the flags to reflect that this GFN is reset */
  102. kvm_dirty_gfn_set_invalid(entry);
  103. ring->reset_index++;
  104. count++;
  105. /*
  106. * Try to coalesce the reset operations when the guest is
  107. * scanning pages in the same slot.
  108. */
  109. if (!first_round && next_slot == cur_slot) {
  110. s64 delta = next_offset - cur_offset;
  111. if (delta >= 0 && delta < BITS_PER_LONG) {
  112. mask |= 1ull << delta;
  113. continue;
  114. }
  115. /* Backwards visit, careful about overflows! */
  116. if (delta > -BITS_PER_LONG && delta < 0 &&
  117. (mask << -delta >> -delta) == mask) {
  118. cur_offset = next_offset;
  119. mask = (mask << -delta) | 1;
  120. continue;
  121. }
  122. }
  123. kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
  124. cur_slot = next_slot;
  125. cur_offset = next_offset;
  126. mask = 1;
  127. first_round = false;
  128. }
  129. kvm_reset_dirty_gfn(kvm, cur_slot, cur_offset, mask);
  130. /*
  131. * The request KVM_REQ_DIRTY_RING_SOFT_FULL will be cleared
  132. * by the VCPU thread next time when it enters the guest.
  133. */
  134. trace_kvm_dirty_ring_reset(ring);
  135. return count;
  136. }
  137. void kvm_dirty_ring_push(struct kvm_vcpu *vcpu, u32 slot, u64 offset)
  138. {
  139. struct kvm_dirty_ring *ring = &vcpu->dirty_ring;
  140. struct kvm_dirty_gfn *entry;
  141. /* It should never get full */
  142. WARN_ON_ONCE(kvm_dirty_ring_full(ring));
  143. entry = &ring->dirty_gfns[ring->dirty_index & (ring->size - 1)];
  144. entry->slot = slot;
  145. entry->offset = offset;
  146. /*
  147. * Make sure the data is filled in before we publish this to
  148. * the userspace program. There's no paired kernel-side reader.
  149. */
  150. smp_wmb();
  151. kvm_dirty_gfn_set_dirtied(entry);
  152. ring->dirty_index++;
  153. trace_kvm_dirty_ring_push(ring, slot, offset);
  154. if (kvm_dirty_ring_soft_full(ring))
  155. kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
  156. }
  157. bool kvm_dirty_ring_check_request(struct kvm_vcpu *vcpu)
  158. {
  159. /*
  160. * The VCPU isn't runnable when the dirty ring becomes soft full.
  161. * The KVM_REQ_DIRTY_RING_SOFT_FULL event is always set to prevent
  162. * the VCPU from running until the dirty pages are harvested and
  163. * the dirty ring is reset by userspace.
  164. */
  165. if (kvm_check_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu) &&
  166. kvm_dirty_ring_soft_full(&vcpu->dirty_ring)) {
  167. kvm_make_request(KVM_REQ_DIRTY_RING_SOFT_FULL, vcpu);
  168. vcpu->run->exit_reason = KVM_EXIT_DIRTY_RING_FULL;
  169. trace_kvm_dirty_ring_exit(vcpu);
  170. return true;
  171. }
  172. return false;
  173. }
  174. struct page *kvm_dirty_ring_get_page(struct kvm_dirty_ring *ring, u32 offset)
  175. {
  176. return vmalloc_to_page((void *)ring->dirty_gfns + offset * PAGE_SIZE);
  177. }
  178. void kvm_dirty_ring_free(struct kvm_dirty_ring *ring)
  179. {
  180. vfree(ring->dirty_gfns);
  181. ring->dirty_gfns = NULL;
  182. }