enlighten_hvm.c 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270
  1. #include <linux/acpi.h>
  2. #include <linux/cpu.h>
  3. #include <linux/kexec.h>
  4. #include <linux/memblock.h>
  5. #include <xen/features.h>
  6. #include <xen/events.h>
  7. #include <xen/interface/memory.h>
  8. #include <asm/cpu.h>
  9. #include <asm/smp.h>
  10. #include <asm/reboot.h>
  11. #include <asm/setup.h>
  12. #include <asm/hypervisor.h>
  13. #include <asm/e820/api.h>
  14. #include <asm/early_ioremap.h>
  15. #include <asm/xen/cpuid.h>
  16. #include <asm/xen/hypervisor.h>
  17. #include <asm/xen/page.h>
  18. #include "xen-ops.h"
  19. #include "mmu.h"
  20. #include "smp.h"
  21. static unsigned long shared_info_pfn;
  22. void xen_hvm_init_shared_info(void)
  23. {
  24. struct xen_add_to_physmap xatp;
  25. xatp.domid = DOMID_SELF;
  26. xatp.idx = 0;
  27. xatp.space = XENMAPSPACE_shared_info;
  28. xatp.gpfn = shared_info_pfn;
  29. if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp))
  30. BUG();
  31. }
  32. static void __init reserve_shared_info(void)
  33. {
  34. u64 pa;
  35. /*
  36. * Search for a free page starting at 4kB physical address.
  37. * Low memory is preferred to avoid an EPT large page split up
  38. * by the mapping.
  39. * Starting below X86_RESERVE_LOW (usually 64kB) is fine as
  40. * the BIOS used for HVM guests is well behaved and won't
  41. * clobber memory other than the first 4kB.
  42. */
  43. for (pa = PAGE_SIZE;
  44. !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) ||
  45. memblock_is_reserved(pa);
  46. pa += PAGE_SIZE)
  47. ;
  48. shared_info_pfn = PHYS_PFN(pa);
  49. memblock_reserve(pa, PAGE_SIZE);
  50. HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE);
  51. }
  52. static void __init xen_hvm_init_mem_mapping(void)
  53. {
  54. early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE);
  55. HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn));
  56. /*
  57. * The virtual address of the shared_info page has changed, so
  58. * the vcpu_info pointer for VCPU 0 is now stale.
  59. *
  60. * The prepare_boot_cpu callback will re-initialize it via
  61. * xen_vcpu_setup, but we can't rely on that to be called for
  62. * old Xen versions (xen_have_vector_callback == 0).
  63. *
  64. * It is, in any case, bad to have a stale vcpu_info pointer
  65. * so reset it now.
  66. */
  67. xen_vcpu_info_reset(0);
  68. }
  69. static void __init init_hvm_pv_info(void)
  70. {
  71. int major, minor;
  72. uint32_t eax, ebx, ecx, edx, base;
  73. base = xen_cpuid_base();
  74. eax = cpuid_eax(base + 1);
  75. major = eax >> 16;
  76. minor = eax & 0xffff;
  77. printk(KERN_INFO "Xen version %d.%d.\n", major, minor);
  78. xen_domain_type = XEN_HVM_DOMAIN;
  79. /* PVH set up hypercall page in xen_prepare_pvh(). */
  80. if (xen_pvh_domain())
  81. pv_info.name = "Xen PVH";
  82. else {
  83. u64 pfn;
  84. uint32_t msr;
  85. pv_info.name = "Xen HVM";
  86. msr = cpuid_ebx(base + 2);
  87. pfn = __pa(hypercall_page);
  88. wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32));
  89. }
  90. xen_setup_features();
  91. cpuid(base + 4, &eax, &ebx, &ecx, &edx);
  92. if (eax & XEN_HVM_CPUID_VCPU_ID_PRESENT)
  93. this_cpu_write(xen_vcpu_id, ebx);
  94. else
  95. this_cpu_write(xen_vcpu_id, smp_processor_id());
  96. }
  97. #ifdef CONFIG_KEXEC_CORE
  98. static void xen_hvm_shutdown(void)
  99. {
  100. native_machine_shutdown();
  101. if (kexec_in_progress)
  102. xen_reboot(SHUTDOWN_soft_reset);
  103. }
  104. static void xen_hvm_crash_shutdown(struct pt_regs *regs)
  105. {
  106. native_machine_crash_shutdown(regs);
  107. xen_reboot(SHUTDOWN_soft_reset);
  108. }
  109. #endif
  110. static int xen_cpu_up_prepare_hvm(unsigned int cpu)
  111. {
  112. int rc = 0;
  113. /*
  114. * This can happen if CPU was offlined earlier and
  115. * offlining timed out in common_cpu_die().
  116. */
  117. if (cpu_report_state(cpu) == CPU_DEAD_FROZEN) {
  118. xen_smp_intr_free(cpu);
  119. xen_uninit_lock_cpu(cpu);
  120. }
  121. if (cpu_acpi_id(cpu) != U32_MAX)
  122. per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu);
  123. else
  124. per_cpu(xen_vcpu_id, cpu) = cpu;
  125. rc = xen_vcpu_setup(cpu);
  126. if (rc)
  127. return rc;
  128. if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
  129. xen_setup_timer(cpu);
  130. rc = xen_smp_intr_init(cpu);
  131. if (rc) {
  132. WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n",
  133. cpu, rc);
  134. }
  135. return rc;
  136. }
  137. static int xen_cpu_dead_hvm(unsigned int cpu)
  138. {
  139. xen_smp_intr_free(cpu);
  140. if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock))
  141. xen_teardown_timer(cpu);
  142. return 0;
  143. }
  144. static void __init xen_hvm_guest_init(void)
  145. {
  146. if (xen_pv_domain())
  147. return;
  148. init_hvm_pv_info();
  149. reserve_shared_info();
  150. xen_hvm_init_shared_info();
  151. /*
  152. * xen_vcpu is a pointer to the vcpu_info struct in the shared_info
  153. * page, we use it in the event channel upcall and in some pvclock
  154. * related functions.
  155. */
  156. xen_vcpu_info_reset(0);
  157. xen_panic_handler_init();
  158. if (xen_feature(XENFEAT_hvm_callback_vector))
  159. xen_have_vector_callback = 1;
  160. xen_hvm_smp_init();
  161. WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm, xen_cpu_dead_hvm));
  162. xen_unplug_emulated_devices();
  163. x86_init.irqs.intr_init = xen_init_IRQ;
  164. xen_hvm_init_time_ops();
  165. xen_hvm_init_mmu_ops();
  166. #ifdef CONFIG_KEXEC_CORE
  167. machine_ops.shutdown = xen_hvm_shutdown;
  168. machine_ops.crash_shutdown = xen_hvm_crash_shutdown;
  169. #endif
  170. }
  171. static bool xen_nopv;
  172. static __init int xen_parse_nopv(char *arg)
  173. {
  174. xen_nopv = true;
  175. return 0;
  176. }
  177. early_param("xen_nopv", xen_parse_nopv);
  178. bool xen_hvm_need_lapic(void)
  179. {
  180. if (xen_nopv)
  181. return false;
  182. if (xen_pv_domain())
  183. return false;
  184. if (!xen_hvm_domain())
  185. return false;
  186. if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback)
  187. return false;
  188. return true;
  189. }
  190. EXPORT_SYMBOL_GPL(xen_hvm_need_lapic);
  191. static uint32_t __init xen_platform_hvm(void)
  192. {
  193. if (xen_pv_domain() || xen_nopv)
  194. return 0;
  195. return xen_cpuid_base();
  196. }
  197. static __init void xen_hvm_guest_late_init(void)
  198. {
  199. #ifdef CONFIG_XEN_PVH
  200. /* Test for PVH domain (PVH boot path taken overrides ACPI flags). */
  201. if (!xen_pvh &&
  202. (x86_platform.legacy.rtc || !x86_platform.legacy.no_vga))
  203. return;
  204. /* PVH detected. */
  205. xen_pvh = true;
  206. /* Make sure we don't fall back to (default) ACPI_IRQ_MODEL_PIC. */
  207. if (!nr_ioapics && acpi_irq_model == ACPI_IRQ_MODEL_PIC)
  208. acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
  209. machine_ops.emergency_restart = xen_emergency_restart;
  210. pv_info.name = "Xen PVH";
  211. #endif
  212. }
  213. const __initconst struct hypervisor_x86 x86_hyper_xen_hvm = {
  214. .name = "Xen HVM",
  215. .detect = xen_platform_hvm,
  216. .type = X86_HYPER_XEN_HVM,
  217. .init.init_platform = xen_hvm_guest_init,
  218. .init.x2apic_available = xen_x2apic_para_available,
  219. .init.init_mem_mapping = xen_hvm_init_mem_mapping,
  220. .init.guest_late_init = xen_hvm_guest_late_init,
  221. .runtime.pin_vcpu = xen_pin_vcpu,
  222. };