mtrr.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * vMTRR implementation
  4. *
  5. * Copyright (C) 2006 Qumranet, Inc.
  6. * Copyright 2010 Red Hat, Inc. and/or its affiliates.
  7. * Copyright(C) 2015 Intel Corporation.
  8. *
  9. * Authors:
  10. * Yaniv Kamay <yaniv@qumranet.com>
  11. * Avi Kivity <avi@qumranet.com>
  12. * Marcelo Tosatti <mtosatti@redhat.com>
  13. * Paolo Bonzini <pbonzini@redhat.com>
  14. * Xiao Guangrong <guangrong.xiao@linux.intel.com>
  15. */
  16. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  17. #include <linux/kvm_host.h>
  18. #include <asm/mtrr.h>
  19. #include "cpuid.h"
  20. static u64 *find_mtrr(struct kvm_vcpu *vcpu, unsigned int msr)
  21. {
  22. int index;
  23. switch (msr) {
  24. case MTRRphysBase_MSR(0) ... MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1):
  25. index = msr - MTRRphysBase_MSR(0);
  26. return &vcpu->arch.mtrr_state.var[index];
  27. case MSR_MTRRfix64K_00000:
  28. return &vcpu->arch.mtrr_state.fixed_64k;
  29. case MSR_MTRRfix16K_80000:
  30. case MSR_MTRRfix16K_A0000:
  31. index = msr - MSR_MTRRfix16K_80000;
  32. return &vcpu->arch.mtrr_state.fixed_16k[index];
  33. case MSR_MTRRfix4K_C0000:
  34. case MSR_MTRRfix4K_C8000:
  35. case MSR_MTRRfix4K_D0000:
  36. case MSR_MTRRfix4K_D8000:
  37. case MSR_MTRRfix4K_E0000:
  38. case MSR_MTRRfix4K_E8000:
  39. case MSR_MTRRfix4K_F0000:
  40. case MSR_MTRRfix4K_F8000:
  41. index = msr - MSR_MTRRfix4K_C0000;
  42. return &vcpu->arch.mtrr_state.fixed_4k[index];
  43. case MSR_MTRRdefType:
  44. return &vcpu->arch.mtrr_state.deftype;
  45. default:
  46. break;
  47. }
  48. return NULL;
  49. }
  50. static bool valid_mtrr_type(unsigned t)
  51. {
  52. return t < 8 && (1 << t) & 0x73; /* 0, 1, 4, 5, 6 */
  53. }
  54. static bool kvm_mtrr_valid(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  55. {
  56. int i;
  57. u64 mask;
  58. if (msr == MSR_MTRRdefType) {
  59. if (data & ~0xcff)
  60. return false;
  61. return valid_mtrr_type(data & 0xff);
  62. } else if (msr >= MSR_MTRRfix64K_00000 && msr <= MSR_MTRRfix4K_F8000) {
  63. for (i = 0; i < 8 ; i++)
  64. if (!valid_mtrr_type((data >> (i * 8)) & 0xff))
  65. return false;
  66. return true;
  67. }
  68. /* variable MTRRs */
  69. if (WARN_ON_ONCE(!(msr >= MTRRphysBase_MSR(0) &&
  70. msr <= MTRRphysMask_MSR(KVM_NR_VAR_MTRR - 1))))
  71. return false;
  72. mask = kvm_vcpu_reserved_gpa_bits_raw(vcpu);
  73. if ((msr & 1) == 0) {
  74. /* MTRR base */
  75. if (!valid_mtrr_type(data & 0xff))
  76. return false;
  77. mask |= 0xf00;
  78. } else {
  79. /* MTRR mask */
  80. mask |= 0x7ff;
  81. }
  82. return (data & mask) == 0;
  83. }
  84. int kvm_mtrr_set_msr(struct kvm_vcpu *vcpu, u32 msr, u64 data)
  85. {
  86. u64 *mtrr;
  87. mtrr = find_mtrr(vcpu, msr);
  88. if (!mtrr)
  89. return 1;
  90. if (!kvm_mtrr_valid(vcpu, msr, data))
  91. return 1;
  92. *mtrr = data;
  93. return 0;
  94. }
  95. int kvm_mtrr_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
  96. {
  97. u64 *mtrr;
  98. /* MSR_MTRRcap is a readonly MSR. */
  99. if (msr == MSR_MTRRcap) {
  100. /*
  101. * SMRR = 0
  102. * WC = 1
  103. * FIX = 1
  104. * VCNT = KVM_NR_VAR_MTRR
  105. */
  106. *pdata = 0x500 | KVM_NR_VAR_MTRR;
  107. return 0;
  108. }
  109. mtrr = find_mtrr(vcpu, msr);
  110. if (!mtrr)
  111. return 1;
  112. *pdata = *mtrr;
  113. return 0;
  114. }