mem_encrypt_identity.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * AMD Memory Encryption Support
  4. *
  5. * Copyright (C) 2016 Advanced Micro Devices, Inc.
  6. *
  7. * Author: Tom Lendacky <thomas.lendacky@amd.com>
  8. */
  9. #define DISABLE_BRANCH_PROFILING
  10. /*
  11. * Since we're dealing with identity mappings, physical and virtual
  12. * addresses are the same, so override these defines which are ultimately
  13. * used by the headers in misc.h.
  14. */
  15. #define __pa(x) ((unsigned long)(x))
  16. #define __va(x) ((void *)((unsigned long)(x)))
  17. /*
  18. * Special hack: we have to be careful, because no indirections are
  19. * allowed here, and paravirt_ops is a kind of one. As it will only run in
  20. * baremetal anyway, we just keep it from happening. (This list needs to
  21. * be extended when new paravirt and debugging variants are added.)
  22. */
  23. #undef CONFIG_PARAVIRT
  24. #undef CONFIG_PARAVIRT_XXL
  25. #undef CONFIG_PARAVIRT_SPINLOCKS
  26. /*
  27. * This code runs before CPU feature bits are set. By default, the
  28. * pgtable_l5_enabled() function uses bit X86_FEATURE_LA57 to determine if
  29. * 5-level paging is active, so that won't work here. USE_EARLY_PGTABLE_L5
  30. * is provided to handle this situation and, instead, use a variable that
  31. * has been set by the early boot code.
  32. */
  33. #define USE_EARLY_PGTABLE_L5
  34. #include <linux/kernel.h>
  35. #include <linux/mm.h>
  36. #include <linux/mem_encrypt.h>
  37. #include <linux/cc_platform.h>
  38. #include <asm/init.h>
  39. #include <asm/setup.h>
  40. #include <asm/sections.h>
  41. #include <asm/coco.h>
  42. #include <asm/sev.h>
  43. #include "mm_internal.h"
  44. #define PGD_FLAGS _KERNPG_TABLE_NOENC
  45. #define P4D_FLAGS _KERNPG_TABLE_NOENC
  46. #define PUD_FLAGS _KERNPG_TABLE_NOENC
  47. #define PMD_FLAGS _KERNPG_TABLE_NOENC
  48. #define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
  49. #define PMD_FLAGS_DEC PMD_FLAGS_LARGE
  50. #define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_LARGE_CACHE_MASK) | \
  51. (_PAGE_PAT_LARGE | _PAGE_PWT))
  52. #define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
  53. #define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
  54. #define PTE_FLAGS_DEC PTE_FLAGS
  55. #define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
  56. (_PAGE_PAT | _PAGE_PWT))
  57. #define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC)
  58. struct sme_populate_pgd_data {
  59. void *pgtable_area;
  60. pgd_t *pgd;
  61. pmdval_t pmd_flags;
  62. pteval_t pte_flags;
  63. unsigned long paddr;
  64. unsigned long vaddr;
  65. unsigned long vaddr_end;
  66. };
  67. /*
  68. * This work area lives in the .init.scratch section, which lives outside of
  69. * the kernel proper. It is sized to hold the intermediate copy buffer and
  70. * more than enough pagetable pages.
  71. *
  72. * By using this section, the kernel can be encrypted in place and it
  73. * avoids any possibility of boot parameters or initramfs images being
  74. * placed such that the in-place encryption logic overwrites them. This
  75. * section is 2MB aligned to allow for simple pagetable setup using only
  76. * PMD entries (see vmlinux.lds.S).
  77. */
  78. static char sme_workarea[2 * PMD_SIZE] __section(".init.scratch");
  79. static void __head sme_clear_pgd(struct sme_populate_pgd_data *ppd)
  80. {
  81. unsigned long pgd_start, pgd_end, pgd_size;
  82. pgd_t *pgd_p;
  83. pgd_start = ppd->vaddr & PGDIR_MASK;
  84. pgd_end = ppd->vaddr_end & PGDIR_MASK;
  85. pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
  86. pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
  87. memset(pgd_p, 0, pgd_size);
  88. }
  89. static pud_t __head *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
  90. {
  91. pgd_t *pgd;
  92. p4d_t *p4d;
  93. pud_t *pud;
  94. pmd_t *pmd;
  95. pgd = ppd->pgd + pgd_index(ppd->vaddr);
  96. if (pgd_none(*pgd)) {
  97. p4d = ppd->pgtable_area;
  98. memset(p4d, 0, sizeof(*p4d) * PTRS_PER_P4D);
  99. ppd->pgtable_area += sizeof(*p4d) * PTRS_PER_P4D;
  100. set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d)));
  101. }
  102. p4d = p4d_offset(pgd, ppd->vaddr);
  103. if (p4d_none(*p4d)) {
  104. pud = ppd->pgtable_area;
  105. memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD);
  106. ppd->pgtable_area += sizeof(*pud) * PTRS_PER_PUD;
  107. set_p4d(p4d, __p4d(P4D_FLAGS | __pa(pud)));
  108. }
  109. pud = pud_offset(p4d, ppd->vaddr);
  110. if (pud_none(*pud)) {
  111. pmd = ppd->pgtable_area;
  112. memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD);
  113. ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD;
  114. set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
  115. }
  116. if (pud_leaf(*pud))
  117. return NULL;
  118. return pud;
  119. }
  120. static void __head sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
  121. {
  122. pud_t *pud;
  123. pmd_t *pmd;
  124. pud = sme_prepare_pgd(ppd);
  125. if (!pud)
  126. return;
  127. pmd = pmd_offset(pud, ppd->vaddr);
  128. if (pmd_leaf(*pmd))
  129. return;
  130. set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
  131. }
  132. static void __head sme_populate_pgd(struct sme_populate_pgd_data *ppd)
  133. {
  134. pud_t *pud;
  135. pmd_t *pmd;
  136. pte_t *pte;
  137. pud = sme_prepare_pgd(ppd);
  138. if (!pud)
  139. return;
  140. pmd = pmd_offset(pud, ppd->vaddr);
  141. if (pmd_none(*pmd)) {
  142. pte = ppd->pgtable_area;
  143. memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
  144. ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
  145. set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
  146. }
  147. if (pmd_leaf(*pmd))
  148. return;
  149. pte = pte_offset_kernel(pmd, ppd->vaddr);
  150. if (pte_none(*pte))
  151. set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
  152. }
  153. static void __head __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
  154. {
  155. while (ppd->vaddr < ppd->vaddr_end) {
  156. sme_populate_pgd_large(ppd);
  157. ppd->vaddr += PMD_SIZE;
  158. ppd->paddr += PMD_SIZE;
  159. }
  160. }
  161. static void __head __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
  162. {
  163. while (ppd->vaddr < ppd->vaddr_end) {
  164. sme_populate_pgd(ppd);
  165. ppd->vaddr += PAGE_SIZE;
  166. ppd->paddr += PAGE_SIZE;
  167. }
  168. }
  169. static void __head __sme_map_range(struct sme_populate_pgd_data *ppd,
  170. pmdval_t pmd_flags, pteval_t pte_flags)
  171. {
  172. unsigned long vaddr_end;
  173. ppd->pmd_flags = pmd_flags;
  174. ppd->pte_flags = pte_flags;
  175. /* Save original end value since we modify the struct value */
  176. vaddr_end = ppd->vaddr_end;
  177. /* If start is not 2MB aligned, create PTE entries */
  178. ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_SIZE);
  179. __sme_map_range_pte(ppd);
  180. /* Create PMD entries */
  181. ppd->vaddr_end = vaddr_end & PMD_MASK;
  182. __sme_map_range_pmd(ppd);
  183. /* If end is not 2MB aligned, create PTE entries */
  184. ppd->vaddr_end = vaddr_end;
  185. __sme_map_range_pte(ppd);
  186. }
  187. static void __head sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
  188. {
  189. __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
  190. }
  191. static void __head sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
  192. {
  193. __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
  194. }
  195. static void __head sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
  196. {
  197. __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
  198. }
  199. static unsigned long __head sme_pgtable_calc(unsigned long len)
  200. {
  201. unsigned long entries = 0, tables = 0;
  202. /*
  203. * Perform a relatively simplistic calculation of the pagetable
  204. * entries that are needed. Those mappings will be covered mostly
  205. * by 2MB PMD entries so we can conservatively calculate the required
  206. * number of P4D, PUD and PMD structures needed to perform the
  207. * mappings. For mappings that are not 2MB aligned, PTE mappings
  208. * would be needed for the start and end portion of the address range
  209. * that fall outside of the 2MB alignment. This results in, at most,
  210. * two extra pages to hold PTE entries for each range that is mapped.
  211. * Incrementing the count for each covers the case where the addresses
  212. * cross entries.
  213. */
  214. /* PGDIR_SIZE is equal to P4D_SIZE on 4-level machine. */
  215. if (PTRS_PER_P4D > 1)
  216. entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D;
  217. entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD;
  218. entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD;
  219. entries += 2 * sizeof(pte_t) * PTRS_PER_PTE;
  220. /*
  221. * Now calculate the added pagetable structures needed to populate
  222. * the new pagetables.
  223. */
  224. if (PTRS_PER_P4D > 1)
  225. tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D;
  226. tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD;
  227. tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD;
  228. return entries + tables;
  229. }
  230. void __head sme_encrypt_kernel(struct boot_params *bp)
  231. {
  232. unsigned long workarea_start, workarea_end, workarea_len;
  233. unsigned long execute_start, execute_end, execute_len;
  234. unsigned long kernel_start, kernel_end, kernel_len;
  235. unsigned long initrd_start, initrd_end, initrd_len;
  236. struct sme_populate_pgd_data ppd;
  237. unsigned long pgtable_area_len;
  238. unsigned long decrypted_base;
  239. /*
  240. * This is early code, use an open coded check for SME instead of
  241. * using cc_platform_has(). This eliminates worries about removing
  242. * instrumentation or checking boot_cpu_data in the cc_platform_has()
  243. * function.
  244. */
  245. if (!sme_get_me_mask() ||
  246. RIP_REL_REF(sev_status) & MSR_AMD64_SEV_ENABLED)
  247. return;
  248. /*
  249. * Prepare for encrypting the kernel and initrd by building new
  250. * pagetables with the necessary attributes needed to encrypt the
  251. * kernel in place.
  252. *
  253. * One range of virtual addresses will map the memory occupied
  254. * by the kernel and initrd as encrypted.
  255. *
  256. * Another range of virtual addresses will map the memory occupied
  257. * by the kernel and initrd as decrypted and write-protected.
  258. *
  259. * The use of write-protect attribute will prevent any of the
  260. * memory from being cached.
  261. */
  262. kernel_start = (unsigned long)RIP_REL_REF(_text);
  263. kernel_end = ALIGN((unsigned long)RIP_REL_REF(_end), PMD_SIZE);
  264. kernel_len = kernel_end - kernel_start;
  265. initrd_start = 0;
  266. initrd_end = 0;
  267. initrd_len = 0;
  268. #ifdef CONFIG_BLK_DEV_INITRD
  269. initrd_len = (unsigned long)bp->hdr.ramdisk_size |
  270. ((unsigned long)bp->ext_ramdisk_size << 32);
  271. if (initrd_len) {
  272. initrd_start = (unsigned long)bp->hdr.ramdisk_image |
  273. ((unsigned long)bp->ext_ramdisk_image << 32);
  274. initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
  275. initrd_len = initrd_end - initrd_start;
  276. }
  277. #endif
  278. /*
  279. * Calculate required number of workarea bytes needed:
  280. * executable encryption area size:
  281. * stack page (PAGE_SIZE)
  282. * encryption routine page (PAGE_SIZE)
  283. * intermediate copy buffer (PMD_SIZE)
  284. * pagetable structures for the encryption of the kernel
  285. * pagetable structures for workarea (in case not currently mapped)
  286. */
  287. execute_start = workarea_start = (unsigned long)RIP_REL_REF(sme_workarea);
  288. execute_end = execute_start + (PAGE_SIZE * 2) + PMD_SIZE;
  289. execute_len = execute_end - execute_start;
  290. /*
  291. * One PGD for both encrypted and decrypted mappings and a set of
  292. * PUDs and PMDs for each of the encrypted and decrypted mappings.
  293. */
  294. pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
  295. pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
  296. if (initrd_len)
  297. pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
  298. /* PUDs and PMDs needed in the current pagetables for the workarea */
  299. pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
  300. /*
  301. * The total workarea includes the executable encryption area and
  302. * the pagetable area. The start of the workarea is already 2MB
  303. * aligned, align the end of the workarea on a 2MB boundary so that
  304. * we don't try to create/allocate PTE entries from the workarea
  305. * before it is mapped.
  306. */
  307. workarea_len = execute_len + pgtable_area_len;
  308. workarea_end = ALIGN(workarea_start + workarea_len, PMD_SIZE);
  309. /*
  310. * Set the address to the start of where newly created pagetable
  311. * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable
  312. * structures are created when the workarea is added to the current
  313. * pagetables and when the new encrypted and decrypted kernel
  314. * mappings are populated.
  315. */
  316. ppd.pgtable_area = (void *)execute_end;
  317. /*
  318. * Make sure the current pagetable structure has entries for
  319. * addressing the workarea.
  320. */
  321. ppd.pgd = (pgd_t *)native_read_cr3_pa();
  322. ppd.paddr = workarea_start;
  323. ppd.vaddr = workarea_start;
  324. ppd.vaddr_end = workarea_end;
  325. sme_map_range_decrypted(&ppd);
  326. /* Flush the TLB - no globals so cr3 is enough */
  327. native_write_cr3(__native_read_cr3());
  328. /*
  329. * A new pagetable structure is being built to allow for the kernel
  330. * and initrd to be encrypted. It starts with an empty PGD that will
  331. * then be populated with new PUDs and PMDs as the encrypted and
  332. * decrypted kernel mappings are created.
  333. */
  334. ppd.pgd = ppd.pgtable_area;
  335. memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
  336. ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
  337. /*
  338. * A different PGD index/entry must be used to get different
  339. * pagetable entries for the decrypted mapping. Choose the next
  340. * PGD index and convert it to a virtual address to be used as
  341. * the base of the mapping.
  342. */
  343. decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
  344. if (initrd_len) {
  345. unsigned long check_base;
  346. check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
  347. decrypted_base = max(decrypted_base, check_base);
  348. }
  349. decrypted_base <<= PGDIR_SHIFT;
  350. /* Add encrypted kernel (identity) mappings */
  351. ppd.paddr = kernel_start;
  352. ppd.vaddr = kernel_start;
  353. ppd.vaddr_end = kernel_end;
  354. sme_map_range_encrypted(&ppd);
  355. /* Add decrypted, write-protected kernel (non-identity) mappings */
  356. ppd.paddr = kernel_start;
  357. ppd.vaddr = kernel_start + decrypted_base;
  358. ppd.vaddr_end = kernel_end + decrypted_base;
  359. sme_map_range_decrypted_wp(&ppd);
  360. if (initrd_len) {
  361. /* Add encrypted initrd (identity) mappings */
  362. ppd.paddr = initrd_start;
  363. ppd.vaddr = initrd_start;
  364. ppd.vaddr_end = initrd_end;
  365. sme_map_range_encrypted(&ppd);
  366. /*
  367. * Add decrypted, write-protected initrd (non-identity) mappings
  368. */
  369. ppd.paddr = initrd_start;
  370. ppd.vaddr = initrd_start + decrypted_base;
  371. ppd.vaddr_end = initrd_end + decrypted_base;
  372. sme_map_range_decrypted_wp(&ppd);
  373. }
  374. /* Add decrypted workarea mappings to both kernel mappings */
  375. ppd.paddr = workarea_start;
  376. ppd.vaddr = workarea_start;
  377. ppd.vaddr_end = workarea_end;
  378. sme_map_range_decrypted(&ppd);
  379. ppd.paddr = workarea_start;
  380. ppd.vaddr = workarea_start + decrypted_base;
  381. ppd.vaddr_end = workarea_end + decrypted_base;
  382. sme_map_range_decrypted(&ppd);
  383. /* Perform the encryption */
  384. sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
  385. kernel_len, workarea_start, (unsigned long)ppd.pgd);
  386. if (initrd_len)
  387. sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
  388. initrd_len, workarea_start,
  389. (unsigned long)ppd.pgd);
  390. /*
  391. * At this point we are running encrypted. Remove the mappings for
  392. * the decrypted areas - all that is needed for this is to remove
  393. * the PGD entry/entries.
  394. */
  395. ppd.vaddr = kernel_start + decrypted_base;
  396. ppd.vaddr_end = kernel_end + decrypted_base;
  397. sme_clear_pgd(&ppd);
  398. if (initrd_len) {
  399. ppd.vaddr = initrd_start + decrypted_base;
  400. ppd.vaddr_end = initrd_end + decrypted_base;
  401. sme_clear_pgd(&ppd);
  402. }
  403. ppd.vaddr = workarea_start + decrypted_base;
  404. ppd.vaddr_end = workarea_end + decrypted_base;
  405. sme_clear_pgd(&ppd);
  406. /* Flush the TLB - no globals so cr3 is enough */
  407. native_write_cr3(__native_read_cr3());
  408. }
  409. void __head sme_enable(struct boot_params *bp)
  410. {
  411. unsigned int eax, ebx, ecx, edx;
  412. unsigned long feature_mask;
  413. unsigned long me_mask;
  414. bool snp;
  415. u64 msr;
  416. snp = snp_init(bp);
  417. /* Check for the SME/SEV support leaf */
  418. eax = 0x80000000;
  419. ecx = 0;
  420. native_cpuid(&eax, &ebx, &ecx, &edx);
  421. if (eax < 0x8000001f)
  422. return;
  423. #define AMD_SME_BIT BIT(0)
  424. #define AMD_SEV_BIT BIT(1)
  425. /*
  426. * Check for the SME/SEV feature:
  427. * CPUID Fn8000_001F[EAX]
  428. * - Bit 0 - Secure Memory Encryption support
  429. * - Bit 1 - Secure Encrypted Virtualization support
  430. * CPUID Fn8000_001F[EBX]
  431. * - Bits 5:0 - Pagetable bit position used to indicate encryption
  432. */
  433. eax = 0x8000001f;
  434. ecx = 0;
  435. native_cpuid(&eax, &ebx, &ecx, &edx);
  436. /* Check whether SEV or SME is supported */
  437. if (!(eax & (AMD_SEV_BIT | AMD_SME_BIT)))
  438. return;
  439. me_mask = 1UL << (ebx & 0x3f);
  440. /* Check the SEV MSR whether SEV or SME is enabled */
  441. RIP_REL_REF(sev_status) = msr = __rdmsr(MSR_AMD64_SEV);
  442. feature_mask = (msr & MSR_AMD64_SEV_ENABLED) ? AMD_SEV_BIT : AMD_SME_BIT;
  443. /* The SEV-SNP CC blob should never be present unless SEV-SNP is enabled. */
  444. if (snp && !(msr & MSR_AMD64_SEV_SNP_ENABLED))
  445. snp_abort();
  446. /* Check if memory encryption is enabled */
  447. if (feature_mask == AMD_SME_BIT) {
  448. if (!(bp->hdr.xloadflags & XLF_MEM_ENCRYPTION))
  449. return;
  450. /*
  451. * No SME if Hypervisor bit is set. This check is here to
  452. * prevent a guest from trying to enable SME. For running as a
  453. * KVM guest the MSR_AMD64_SYSCFG will be sufficient, but there
  454. * might be other hypervisors which emulate that MSR as non-zero
  455. * or even pass it through to the guest.
  456. * A malicious hypervisor can still trick a guest into this
  457. * path, but there is no way to protect against that.
  458. */
  459. eax = 1;
  460. ecx = 0;
  461. native_cpuid(&eax, &ebx, &ecx, &edx);
  462. if (ecx & BIT(31))
  463. return;
  464. /* For SME, check the SYSCFG MSR */
  465. msr = __rdmsr(MSR_AMD64_SYSCFG);
  466. if (!(msr & MSR_AMD64_SYSCFG_MEM_ENCRYPT))
  467. return;
  468. }
  469. RIP_REL_REF(sme_me_mask) = me_mask;
  470. physical_mask &= ~me_mask;
  471. cc_vendor = CC_VENDOR_AMD;
  472. cc_set_mask(me_mask);
  473. }