mem_encrypt.c 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Memory Encryption Support Common Code
  4. *
  5. * Copyright (C) 2016 Advanced Micro Devices, Inc.
  6. *
  7. * Author: Tom Lendacky <thomas.lendacky@amd.com>
  8. */
  9. #include <linux/dma-direct.h>
  10. #include <linux/dma-mapping.h>
  11. #include <linux/swiotlb.h>
  12. #include <linux/cc_platform.h>
  13. #include <linux/mem_encrypt.h>
  14. #include <linux/virtio_anchor.h>
  15. #include <asm/sev.h>
  16. /* Override for DMA direct allocation check - ARCH_HAS_FORCE_DMA_UNENCRYPTED */
  17. bool force_dma_unencrypted(struct device *dev)
  18. {
  19. /*
  20. * For SEV, all DMA must be to unencrypted addresses.
  21. */
  22. if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
  23. return true;
  24. /*
  25. * For SME, all DMA must be to unencrypted addresses if the
  26. * device does not support DMA to addresses that include the
  27. * encryption mask.
  28. */
  29. if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
  30. u64 dma_enc_mask = DMA_BIT_MASK(__ffs64(sme_me_mask));
  31. u64 dma_dev_mask = min_not_zero(dev->coherent_dma_mask,
  32. dev->bus_dma_limit);
  33. if (dma_dev_mask <= dma_enc_mask)
  34. return true;
  35. }
  36. return false;
  37. }
  38. static void print_mem_encrypt_feature_info(void)
  39. {
  40. pr_info("Memory Encryption Features active: ");
  41. switch (cc_vendor) {
  42. case CC_VENDOR_INTEL:
  43. pr_cont("Intel TDX\n");
  44. break;
  45. case CC_VENDOR_AMD:
  46. pr_cont("AMD");
  47. /* Secure Memory Encryption */
  48. if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT)) {
  49. /*
  50. * SME is mutually exclusive with any of the SEV
  51. * features below.
  52. */
  53. pr_cont(" SME\n");
  54. return;
  55. }
  56. /* Secure Encrypted Virtualization */
  57. if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
  58. pr_cont(" SEV");
  59. /* Encrypted Register State */
  60. if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT))
  61. pr_cont(" SEV-ES");
  62. /* Secure Nested Paging */
  63. if (cc_platform_has(CC_ATTR_GUEST_SEV_SNP))
  64. pr_cont(" SEV-SNP");
  65. pr_cont("\n");
  66. sev_show_status();
  67. break;
  68. default:
  69. pr_cont("Unknown\n");
  70. }
  71. }
  72. /* Architecture __weak replacement functions */
  73. void __init mem_encrypt_init(void)
  74. {
  75. if (!cc_platform_has(CC_ATTR_MEM_ENCRYPT))
  76. return;
  77. /* Call into SWIOTLB to update the SWIOTLB DMA buffers */
  78. swiotlb_update_mem_attributes();
  79. print_mem_encrypt_feature_info();
  80. }
  81. void __init mem_encrypt_setup_arch(void)
  82. {
  83. phys_addr_t total_mem = memblock_phys_mem_size();
  84. unsigned long size;
  85. /*
  86. * Do RMP table fixups after the e820 tables have been setup by
  87. * e820__memory_setup().
  88. */
  89. if (cc_platform_has(CC_ATTR_HOST_SEV_SNP))
  90. snp_fixup_e820_tables();
  91. if (!cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
  92. return;
  93. /*
  94. * For SEV and TDX, all DMA has to occur via shared/unencrypted pages.
  95. * Kernel uses SWIOTLB to make this happen without changing device
  96. * drivers. However, depending on the workload being run, the
  97. * default 64MB of SWIOTLB may not be enough and SWIOTLB may
  98. * run out of buffers for DMA, resulting in I/O errors and/or
  99. * performance degradation especially with high I/O workloads.
  100. *
  101. * Adjust the default size of SWIOTLB using a percentage of guest
  102. * memory for SWIOTLB buffers. Also, as the SWIOTLB bounce buffer
  103. * memory is allocated from low memory, ensure that the adjusted size
  104. * is within the limits of low available memory.
  105. *
  106. * The percentage of guest memory used here for SWIOTLB buffers
  107. * is more of an approximation of the static adjustment which
  108. * 64MB for <1G, and ~128M to 256M for 1G-to-4G, i.e., the 6%
  109. */
  110. size = total_mem * 6 / 100;
  111. size = clamp_val(size, IO_TLB_DEFAULT_SIZE, SZ_1G);
  112. swiotlb_adjust_size(size);
  113. /* Set restricted memory access for virtio. */
  114. virtio_set_mem_acc_cb(virtio_require_restricted_mem_acc);
  115. }