mem_encrypt_identity.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567
  1. /*
  2. * AMD Memory Encryption Support
  3. *
  4. * Copyright (C) 2016 Advanced Micro Devices, Inc.
  5. *
  6. * Author: Tom Lendacky <thomas.lendacky@amd.com>
  7. *
  8. * This program is free software; you can redistribute it and/or modify
  9. * it under the terms of the GNU General Public License version 2 as
  10. * published by the Free Software Foundation.
  11. */
  12. #define DISABLE_BRANCH_PROFILING
  13. /*
  14. * Since we're dealing with identity mappings, physical and virtual
  15. * addresses are the same, so override these defines which are ultimately
  16. * used by the headers in misc.h.
  17. */
  18. #define __pa(x) ((unsigned long)(x))
  19. #define __va(x) ((void *)((unsigned long)(x)))
  20. /*
  21. * Special hack: we have to be careful, because no indirections are
  22. * allowed here, and paravirt_ops is a kind of one. As it will only run in
  23. * baremetal anyway, we just keep it from happening. (This list needs to
  24. * be extended when new paravirt and debugging variants are added.)
  25. */
  26. #undef CONFIG_PARAVIRT
  27. #undef CONFIG_PARAVIRT_SPINLOCKS
  28. #include <linux/kernel.h>
  29. #include <linux/mm.h>
  30. #include <linux/mem_encrypt.h>
  31. #include <asm/setup.h>
  32. #include <asm/sections.h>
  33. #include <asm/cmdline.h>
  34. #include "mm_internal.h"
  35. #define PGD_FLAGS _KERNPG_TABLE_NOENC
  36. #define P4D_FLAGS _KERNPG_TABLE_NOENC
  37. #define PUD_FLAGS _KERNPG_TABLE_NOENC
  38. #define PMD_FLAGS _KERNPG_TABLE_NOENC
  39. #define PMD_FLAGS_LARGE (__PAGE_KERNEL_LARGE_EXEC & ~_PAGE_GLOBAL)
  40. #define PMD_FLAGS_DEC PMD_FLAGS_LARGE
  41. #define PMD_FLAGS_DEC_WP ((PMD_FLAGS_DEC & ~_PAGE_LARGE_CACHE_MASK) | \
  42. (_PAGE_PAT_LARGE | _PAGE_PWT))
  43. #define PMD_FLAGS_ENC (PMD_FLAGS_LARGE | _PAGE_ENC)
  44. #define PTE_FLAGS (__PAGE_KERNEL_EXEC & ~_PAGE_GLOBAL)
  45. #define PTE_FLAGS_DEC PTE_FLAGS
  46. #define PTE_FLAGS_DEC_WP ((PTE_FLAGS_DEC & ~_PAGE_CACHE_MASK) | \
  47. (_PAGE_PAT | _PAGE_PWT))
  48. #define PTE_FLAGS_ENC (PTE_FLAGS | _PAGE_ENC)
  49. struct sme_populate_pgd_data {
  50. void *pgtable_area;
  51. pgd_t *pgd;
  52. pmdval_t pmd_flags;
  53. pteval_t pte_flags;
  54. unsigned long paddr;
  55. unsigned long vaddr;
  56. unsigned long vaddr_end;
  57. };
  58. static char sme_cmdline_arg[] __initdata = "mem_encrypt";
  59. static char sme_cmdline_on[] __initdata = "on";
  60. static char sme_cmdline_off[] __initdata = "off";
  61. static void __init sme_clear_pgd(struct sme_populate_pgd_data *ppd)
  62. {
  63. unsigned long pgd_start, pgd_end, pgd_size;
  64. pgd_t *pgd_p;
  65. pgd_start = ppd->vaddr & PGDIR_MASK;
  66. pgd_end = ppd->vaddr_end & PGDIR_MASK;
  67. pgd_size = (((pgd_end - pgd_start) / PGDIR_SIZE) + 1) * sizeof(pgd_t);
  68. pgd_p = ppd->pgd + pgd_index(ppd->vaddr);
  69. memset(pgd_p, 0, pgd_size);
  70. }
  71. static pud_t __init *sme_prepare_pgd(struct sme_populate_pgd_data *ppd)
  72. {
  73. pgd_t *pgd;
  74. p4d_t *p4d;
  75. pud_t *pud;
  76. pmd_t *pmd;
  77. pgd = ppd->pgd + pgd_index(ppd->vaddr);
  78. if (pgd_none(*pgd)) {
  79. p4d = ppd->pgtable_area;
  80. memset(p4d, 0, sizeof(*p4d) * PTRS_PER_P4D);
  81. ppd->pgtable_area += sizeof(*p4d) * PTRS_PER_P4D;
  82. set_pgd(pgd, __pgd(PGD_FLAGS | __pa(p4d)));
  83. }
  84. p4d = p4d_offset(pgd, ppd->vaddr);
  85. if (p4d_none(*p4d)) {
  86. pud = ppd->pgtable_area;
  87. memset(pud, 0, sizeof(*pud) * PTRS_PER_PUD);
  88. ppd->pgtable_area += sizeof(*pud) * PTRS_PER_PUD;
  89. set_p4d(p4d, __p4d(P4D_FLAGS | __pa(pud)));
  90. }
  91. pud = pud_offset(p4d, ppd->vaddr);
  92. if (pud_none(*pud)) {
  93. pmd = ppd->pgtable_area;
  94. memset(pmd, 0, sizeof(*pmd) * PTRS_PER_PMD);
  95. ppd->pgtable_area += sizeof(*pmd) * PTRS_PER_PMD;
  96. set_pud(pud, __pud(PUD_FLAGS | __pa(pmd)));
  97. }
  98. if (pud_large(*pud))
  99. return NULL;
  100. return pud;
  101. }
  102. static void __init sme_populate_pgd_large(struct sme_populate_pgd_data *ppd)
  103. {
  104. pud_t *pud;
  105. pmd_t *pmd;
  106. pud = sme_prepare_pgd(ppd);
  107. if (!pud)
  108. return;
  109. pmd = pmd_offset(pud, ppd->vaddr);
  110. if (pmd_large(*pmd))
  111. return;
  112. set_pmd(pmd, __pmd(ppd->paddr | ppd->pmd_flags));
  113. }
  114. static void __init sme_populate_pgd(struct sme_populate_pgd_data *ppd)
  115. {
  116. pud_t *pud;
  117. pmd_t *pmd;
  118. pte_t *pte;
  119. pud = sme_prepare_pgd(ppd);
  120. if (!pud)
  121. return;
  122. pmd = pmd_offset(pud, ppd->vaddr);
  123. if (pmd_none(*pmd)) {
  124. pte = ppd->pgtable_area;
  125. memset(pte, 0, sizeof(*pte) * PTRS_PER_PTE);
  126. ppd->pgtable_area += sizeof(*pte) * PTRS_PER_PTE;
  127. set_pmd(pmd, __pmd(PMD_FLAGS | __pa(pte)));
  128. }
  129. if (pmd_large(*pmd))
  130. return;
  131. pte = pte_offset_map(pmd, ppd->vaddr);
  132. if (pte_none(*pte))
  133. set_pte(pte, __pte(ppd->paddr | ppd->pte_flags));
  134. }
  135. static void __init __sme_map_range_pmd(struct sme_populate_pgd_data *ppd)
  136. {
  137. while (ppd->vaddr < ppd->vaddr_end) {
  138. sme_populate_pgd_large(ppd);
  139. ppd->vaddr += PMD_PAGE_SIZE;
  140. ppd->paddr += PMD_PAGE_SIZE;
  141. }
  142. }
  143. static void __init __sme_map_range_pte(struct sme_populate_pgd_data *ppd)
  144. {
  145. while (ppd->vaddr < ppd->vaddr_end) {
  146. sme_populate_pgd(ppd);
  147. ppd->vaddr += PAGE_SIZE;
  148. ppd->paddr += PAGE_SIZE;
  149. }
  150. }
  151. static void __init __sme_map_range(struct sme_populate_pgd_data *ppd,
  152. pmdval_t pmd_flags, pteval_t pte_flags)
  153. {
  154. unsigned long vaddr_end;
  155. ppd->pmd_flags = pmd_flags;
  156. ppd->pte_flags = pte_flags;
  157. /* Save original end value since we modify the struct value */
  158. vaddr_end = ppd->vaddr_end;
  159. /* If start is not 2MB aligned, create PTE entries */
  160. ppd->vaddr_end = ALIGN(ppd->vaddr, PMD_PAGE_SIZE);
  161. __sme_map_range_pte(ppd);
  162. /* Create PMD entries */
  163. ppd->vaddr_end = vaddr_end & PMD_PAGE_MASK;
  164. __sme_map_range_pmd(ppd);
  165. /* If end is not 2MB aligned, create PTE entries */
  166. ppd->vaddr_end = vaddr_end;
  167. __sme_map_range_pte(ppd);
  168. }
  169. static void __init sme_map_range_encrypted(struct sme_populate_pgd_data *ppd)
  170. {
  171. __sme_map_range(ppd, PMD_FLAGS_ENC, PTE_FLAGS_ENC);
  172. }
  173. static void __init sme_map_range_decrypted(struct sme_populate_pgd_data *ppd)
  174. {
  175. __sme_map_range(ppd, PMD_FLAGS_DEC, PTE_FLAGS_DEC);
  176. }
  177. static void __init sme_map_range_decrypted_wp(struct sme_populate_pgd_data *ppd)
  178. {
  179. __sme_map_range(ppd, PMD_FLAGS_DEC_WP, PTE_FLAGS_DEC_WP);
  180. }
  181. static unsigned long __init sme_pgtable_calc(unsigned long len)
  182. {
  183. unsigned long entries = 0, tables = 0;
  184. /*
  185. * Perform a relatively simplistic calculation of the pagetable
  186. * entries that are needed. Those mappings will be covered mostly
  187. * by 2MB PMD entries so we can conservatively calculate the required
  188. * number of P4D, PUD and PMD structures needed to perform the
  189. * mappings. For mappings that are not 2MB aligned, PTE mappings
  190. * would be needed for the start and end portion of the address range
  191. * that fall outside of the 2MB alignment. This results in, at most,
  192. * two extra pages to hold PTE entries for each range that is mapped.
  193. * Incrementing the count for each covers the case where the addresses
  194. * cross entries.
  195. */
  196. /* PGDIR_SIZE is equal to P4D_SIZE on 4-level machine. */
  197. if (PTRS_PER_P4D > 1)
  198. entries += (DIV_ROUND_UP(len, PGDIR_SIZE) + 1) * sizeof(p4d_t) * PTRS_PER_P4D;
  199. entries += (DIV_ROUND_UP(len, P4D_SIZE) + 1) * sizeof(pud_t) * PTRS_PER_PUD;
  200. entries += (DIV_ROUND_UP(len, PUD_SIZE) + 1) * sizeof(pmd_t) * PTRS_PER_PMD;
  201. entries += 2 * sizeof(pte_t) * PTRS_PER_PTE;
  202. /*
  203. * Now calculate the added pagetable structures needed to populate
  204. * the new pagetables.
  205. */
  206. if (PTRS_PER_P4D > 1)
  207. tables += DIV_ROUND_UP(entries, PGDIR_SIZE) * sizeof(p4d_t) * PTRS_PER_P4D;
  208. tables += DIV_ROUND_UP(entries, P4D_SIZE) * sizeof(pud_t) * PTRS_PER_PUD;
  209. tables += DIV_ROUND_UP(entries, PUD_SIZE) * sizeof(pmd_t) * PTRS_PER_PMD;
  210. return entries + tables;
  211. }
  212. void __init sme_encrypt_kernel(struct boot_params *bp)
  213. {
  214. unsigned long workarea_start, workarea_end, workarea_len;
  215. unsigned long execute_start, execute_end, execute_len;
  216. unsigned long kernel_start, kernel_end, kernel_len;
  217. unsigned long initrd_start, initrd_end, initrd_len;
  218. struct sme_populate_pgd_data ppd;
  219. unsigned long pgtable_area_len;
  220. unsigned long decrypted_base;
  221. if (!sme_active())
  222. return;
  223. /*
  224. * Prepare for encrypting the kernel and initrd by building new
  225. * pagetables with the necessary attributes needed to encrypt the
  226. * kernel in place.
  227. *
  228. * One range of virtual addresses will map the memory occupied
  229. * by the kernel and initrd as encrypted.
  230. *
  231. * Another range of virtual addresses will map the memory occupied
  232. * by the kernel and initrd as decrypted and write-protected.
  233. *
  234. * The use of write-protect attribute will prevent any of the
  235. * memory from being cached.
  236. */
  237. /* Physical addresses gives us the identity mapped virtual addresses */
  238. kernel_start = __pa_symbol(_text);
  239. kernel_end = ALIGN(__pa_symbol(_end), PMD_PAGE_SIZE);
  240. kernel_len = kernel_end - kernel_start;
  241. initrd_start = 0;
  242. initrd_end = 0;
  243. initrd_len = 0;
  244. #ifdef CONFIG_BLK_DEV_INITRD
  245. initrd_len = (unsigned long)bp->hdr.ramdisk_size |
  246. ((unsigned long)bp->ext_ramdisk_size << 32);
  247. if (initrd_len) {
  248. initrd_start = (unsigned long)bp->hdr.ramdisk_image |
  249. ((unsigned long)bp->ext_ramdisk_image << 32);
  250. initrd_end = PAGE_ALIGN(initrd_start + initrd_len);
  251. initrd_len = initrd_end - initrd_start;
  252. }
  253. #endif
  254. /* Set the encryption workarea to be immediately after the kernel */
  255. workarea_start = kernel_end;
  256. /*
  257. * Calculate required number of workarea bytes needed:
  258. * executable encryption area size:
  259. * stack page (PAGE_SIZE)
  260. * encryption routine page (PAGE_SIZE)
  261. * intermediate copy buffer (PMD_PAGE_SIZE)
  262. * pagetable structures for the encryption of the kernel
  263. * pagetable structures for workarea (in case not currently mapped)
  264. */
  265. execute_start = workarea_start;
  266. execute_end = execute_start + (PAGE_SIZE * 2) + PMD_PAGE_SIZE;
  267. execute_len = execute_end - execute_start;
  268. /*
  269. * One PGD for both encrypted and decrypted mappings and a set of
  270. * PUDs and PMDs for each of the encrypted and decrypted mappings.
  271. */
  272. pgtable_area_len = sizeof(pgd_t) * PTRS_PER_PGD;
  273. pgtable_area_len += sme_pgtable_calc(execute_end - kernel_start) * 2;
  274. if (initrd_len)
  275. pgtable_area_len += sme_pgtable_calc(initrd_len) * 2;
  276. /* PUDs and PMDs needed in the current pagetables for the workarea */
  277. pgtable_area_len += sme_pgtable_calc(execute_len + pgtable_area_len);
  278. /*
  279. * The total workarea includes the executable encryption area and
  280. * the pagetable area. The start of the workarea is already 2MB
  281. * aligned, align the end of the workarea on a 2MB boundary so that
  282. * we don't try to create/allocate PTE entries from the workarea
  283. * before it is mapped.
  284. */
  285. workarea_len = execute_len + pgtable_area_len;
  286. workarea_end = ALIGN(workarea_start + workarea_len, PMD_PAGE_SIZE);
  287. /*
  288. * Set the address to the start of where newly created pagetable
  289. * structures (PGDs, PUDs and PMDs) will be allocated. New pagetable
  290. * structures are created when the workarea is added to the current
  291. * pagetables and when the new encrypted and decrypted kernel
  292. * mappings are populated.
  293. */
  294. ppd.pgtable_area = (void *)execute_end;
  295. /*
  296. * Make sure the current pagetable structure has entries for
  297. * addressing the workarea.
  298. */
  299. ppd.pgd = (pgd_t *)native_read_cr3_pa();
  300. ppd.paddr = workarea_start;
  301. ppd.vaddr = workarea_start;
  302. ppd.vaddr_end = workarea_end;
  303. sme_map_range_decrypted(&ppd);
  304. /* Flush the TLB - no globals so cr3 is enough */
  305. native_write_cr3(__native_read_cr3());
  306. /*
  307. * A new pagetable structure is being built to allow for the kernel
  308. * and initrd to be encrypted. It starts with an empty PGD that will
  309. * then be populated with new PUDs and PMDs as the encrypted and
  310. * decrypted kernel mappings are created.
  311. */
  312. ppd.pgd = ppd.pgtable_area;
  313. memset(ppd.pgd, 0, sizeof(pgd_t) * PTRS_PER_PGD);
  314. ppd.pgtable_area += sizeof(pgd_t) * PTRS_PER_PGD;
  315. /*
  316. * A different PGD index/entry must be used to get different
  317. * pagetable entries for the decrypted mapping. Choose the next
  318. * PGD index and convert it to a virtual address to be used as
  319. * the base of the mapping.
  320. */
  321. decrypted_base = (pgd_index(workarea_end) + 1) & (PTRS_PER_PGD - 1);
  322. if (initrd_len) {
  323. unsigned long check_base;
  324. check_base = (pgd_index(initrd_end) + 1) & (PTRS_PER_PGD - 1);
  325. decrypted_base = max(decrypted_base, check_base);
  326. }
  327. decrypted_base <<= PGDIR_SHIFT;
  328. /* Add encrypted kernel (identity) mappings */
  329. ppd.paddr = kernel_start;
  330. ppd.vaddr = kernel_start;
  331. ppd.vaddr_end = kernel_end;
  332. sme_map_range_encrypted(&ppd);
  333. /* Add decrypted, write-protected kernel (non-identity) mappings */
  334. ppd.paddr = kernel_start;
  335. ppd.vaddr = kernel_start + decrypted_base;
  336. ppd.vaddr_end = kernel_end + decrypted_base;
  337. sme_map_range_decrypted_wp(&ppd);
  338. if (initrd_len) {
  339. /* Add encrypted initrd (identity) mappings */
  340. ppd.paddr = initrd_start;
  341. ppd.vaddr = initrd_start;
  342. ppd.vaddr_end = initrd_end;
  343. sme_map_range_encrypted(&ppd);
  344. /*
  345. * Add decrypted, write-protected initrd (non-identity) mappings
  346. */
  347. ppd.paddr = initrd_start;
  348. ppd.vaddr = initrd_start + decrypted_base;
  349. ppd.vaddr_end = initrd_end + decrypted_base;
  350. sme_map_range_decrypted_wp(&ppd);
  351. }
  352. /* Add decrypted workarea mappings to both kernel mappings */
  353. ppd.paddr = workarea_start;
  354. ppd.vaddr = workarea_start;
  355. ppd.vaddr_end = workarea_end;
  356. sme_map_range_decrypted(&ppd);
  357. ppd.paddr = workarea_start;
  358. ppd.vaddr = workarea_start + decrypted_base;
  359. ppd.vaddr_end = workarea_end + decrypted_base;
  360. sme_map_range_decrypted(&ppd);
  361. /* Perform the encryption */
  362. sme_encrypt_execute(kernel_start, kernel_start + decrypted_base,
  363. kernel_len, workarea_start, (unsigned long)ppd.pgd);
  364. if (initrd_len)
  365. sme_encrypt_execute(initrd_start, initrd_start + decrypted_base,
  366. initrd_len, workarea_start,
  367. (unsigned long)ppd.pgd);
  368. /*
  369. * At this point we are running encrypted. Remove the mappings for
  370. * the decrypted areas - all that is needed for this is to remove
  371. * the PGD entry/entries.
  372. */
  373. ppd.vaddr = kernel_start + decrypted_base;
  374. ppd.vaddr_end = kernel_end + decrypted_base;
  375. sme_clear_pgd(&ppd);
  376. if (initrd_len) {
  377. ppd.vaddr = initrd_start + decrypted_base;
  378. ppd.vaddr_end = initrd_end + decrypted_base;
  379. sme_clear_pgd(&ppd);
  380. }
  381. ppd.vaddr = workarea_start + decrypted_base;
  382. ppd.vaddr_end = workarea_end + decrypted_base;
  383. sme_clear_pgd(&ppd);
  384. /* Flush the TLB - no globals so cr3 is enough */
  385. native_write_cr3(__native_read_cr3());
  386. }
  387. void __init sme_enable(struct boot_params *bp)
  388. {
  389. const char *cmdline_ptr, *cmdline_arg, *cmdline_on, *cmdline_off;
  390. unsigned int eax, ebx, ecx, edx;
  391. unsigned long feature_mask;
  392. bool active_by_default;
  393. unsigned long me_mask;
  394. char buffer[16];
  395. u64 msr;
  396. /* Check for the SME/SEV support leaf */
  397. eax = 0x80000000;
  398. ecx = 0;
  399. native_cpuid(&eax, &ebx, &ecx, &edx);
  400. if (eax < 0x8000001f)
  401. return;
  402. #define AMD_SME_BIT BIT(0)
  403. #define AMD_SEV_BIT BIT(1)
  404. /*
  405. * Set the feature mask (SME or SEV) based on whether we are
  406. * running under a hypervisor.
  407. */
  408. eax = 1;
  409. ecx = 0;
  410. native_cpuid(&eax, &ebx, &ecx, &edx);
  411. feature_mask = (ecx & BIT(31)) ? AMD_SEV_BIT : AMD_SME_BIT;
  412. /*
  413. * Check for the SME/SEV feature:
  414. * CPUID Fn8000_001F[EAX]
  415. * - Bit 0 - Secure Memory Encryption support
  416. * - Bit 1 - Secure Encrypted Virtualization support
  417. * CPUID Fn8000_001F[EBX]
  418. * - Bits 5:0 - Pagetable bit position used to indicate encryption
  419. */
  420. eax = 0x8000001f;
  421. ecx = 0;
  422. native_cpuid(&eax, &ebx, &ecx, &edx);
  423. if (!(eax & feature_mask))
  424. return;
  425. me_mask = 1UL << (ebx & 0x3f);
  426. /* Check if memory encryption is enabled */
  427. if (feature_mask == AMD_SME_BIT) {
  428. /* For SME, check the SYSCFG MSR */
  429. msr = __rdmsr(MSR_K8_SYSCFG);
  430. if (!(msr & MSR_K8_SYSCFG_MEM_ENCRYPT))
  431. return;
  432. } else {
  433. /* For SEV, check the SEV MSR */
  434. msr = __rdmsr(MSR_AMD64_SEV);
  435. if (!(msr & MSR_AMD64_SEV_ENABLED))
  436. return;
  437. /* SEV state cannot be controlled by a command line option */
  438. sme_me_mask = me_mask;
  439. sev_enabled = true;
  440. physical_mask &= ~sme_me_mask;
  441. return;
  442. }
  443. /*
  444. * Fixups have not been applied to phys_base yet and we're running
  445. * identity mapped, so we must obtain the address to the SME command
  446. * line argument data using rip-relative addressing.
  447. */
  448. asm ("lea sme_cmdline_arg(%%rip), %0"
  449. : "=r" (cmdline_arg)
  450. : "p" (sme_cmdline_arg));
  451. asm ("lea sme_cmdline_on(%%rip), %0"
  452. : "=r" (cmdline_on)
  453. : "p" (sme_cmdline_on));
  454. asm ("lea sme_cmdline_off(%%rip), %0"
  455. : "=r" (cmdline_off)
  456. : "p" (sme_cmdline_off));
  457. if (IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT))
  458. active_by_default = true;
  459. else
  460. active_by_default = false;
  461. cmdline_ptr = (const char *)((u64)bp->hdr.cmd_line_ptr |
  462. ((u64)bp->ext_cmd_line_ptr << 32));
  463. cmdline_find_option(cmdline_ptr, cmdline_arg, buffer, sizeof(buffer));
  464. if (!strncmp(buffer, cmdline_on, sizeof(buffer)))
  465. sme_me_mask = me_mask;
  466. else if (!strncmp(buffer, cmdline_off, sizeof(buffer)))
  467. sme_me_mask = 0;
  468. else
  469. sme_me_mask = active_by_default ? me_mask : 0;
  470. physical_mask &= ~sme_me_mask;
  471. }