nested.c 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * nested.c - nested mode translation support
  4. *
  5. * Copyright (C) 2023 Intel Corporation
  6. *
  7. * Author: Lu Baolu <baolu.lu@linux.intel.com>
  8. * Jacob Pan <jacob.jun.pan@linux.intel.com>
  9. * Yi Liu <yi.l.liu@intel.com>
  10. */
  11. #define pr_fmt(fmt) "DMAR: " fmt
  12. #include <linux/iommu.h>
  13. #include <linux/pci.h>
  14. #include <linux/pci-ats.h>
  15. #include "iommu.h"
  16. #include "pasid.h"
  17. static int intel_nested_attach_dev(struct iommu_domain *domain,
  18. struct device *dev)
  19. {
  20. struct device_domain_info *info = dev_iommu_priv_get(dev);
  21. struct dmar_domain *dmar_domain = to_dmar_domain(domain);
  22. struct intel_iommu *iommu = info->iommu;
  23. unsigned long flags;
  24. int ret = 0;
  25. device_block_translation(dev);
  26. if (iommu->agaw < dmar_domain->s2_domain->agaw) {
  27. dev_err_ratelimited(dev, "Adjusted guest address width not compatible\n");
  28. return -ENODEV;
  29. }
  30. /*
  31. * Stage-1 domain cannot work alone, it is nested on a s2_domain.
  32. * The s2_domain will be used in nested translation, hence needs
  33. * to ensure the s2_domain is compatible with this IOMMU.
  34. */
  35. ret = prepare_domain_attach_device(&dmar_domain->s2_domain->domain, dev);
  36. if (ret) {
  37. dev_err_ratelimited(dev, "s2 domain is not compatible\n");
  38. return ret;
  39. }
  40. ret = domain_attach_iommu(dmar_domain, iommu);
  41. if (ret) {
  42. dev_err_ratelimited(dev, "Failed to attach domain to iommu\n");
  43. return ret;
  44. }
  45. ret = cache_tag_assign_domain(dmar_domain, dev, IOMMU_NO_PASID);
  46. if (ret)
  47. goto detach_iommu;
  48. ret = intel_pasid_setup_nested(iommu, dev,
  49. IOMMU_NO_PASID, dmar_domain);
  50. if (ret)
  51. goto unassign_tag;
  52. info->domain = dmar_domain;
  53. info->domain_attached = true;
  54. spin_lock_irqsave(&dmar_domain->lock, flags);
  55. list_add(&info->link, &dmar_domain->devices);
  56. spin_unlock_irqrestore(&dmar_domain->lock, flags);
  57. return 0;
  58. unassign_tag:
  59. cache_tag_unassign_domain(dmar_domain, dev, IOMMU_NO_PASID);
  60. detach_iommu:
  61. domain_detach_iommu(dmar_domain, iommu);
  62. return ret;
  63. }
  64. static void intel_nested_domain_free(struct iommu_domain *domain)
  65. {
  66. struct dmar_domain *dmar_domain = to_dmar_domain(domain);
  67. struct dmar_domain *s2_domain = dmar_domain->s2_domain;
  68. spin_lock(&s2_domain->s1_lock);
  69. list_del(&dmar_domain->s2_link);
  70. spin_unlock(&s2_domain->s1_lock);
  71. kfree(dmar_domain->qi_batch);
  72. kfree(dmar_domain);
  73. }
  74. static int intel_nested_cache_invalidate_user(struct iommu_domain *domain,
  75. struct iommu_user_data_array *array)
  76. {
  77. struct dmar_domain *dmar_domain = to_dmar_domain(domain);
  78. struct iommu_hwpt_vtd_s1_invalidate inv_entry;
  79. u32 index, processed = 0;
  80. int ret = 0;
  81. if (array->type != IOMMU_HWPT_INVALIDATE_DATA_VTD_S1) {
  82. ret = -EINVAL;
  83. goto out;
  84. }
  85. for (index = 0; index < array->entry_num; index++) {
  86. ret = iommu_copy_struct_from_user_array(&inv_entry, array,
  87. IOMMU_HWPT_INVALIDATE_DATA_VTD_S1,
  88. index, __reserved);
  89. if (ret)
  90. break;
  91. if ((inv_entry.flags & ~IOMMU_VTD_INV_FLAGS_LEAF) ||
  92. inv_entry.__reserved) {
  93. ret = -EOPNOTSUPP;
  94. break;
  95. }
  96. if (!IS_ALIGNED(inv_entry.addr, VTD_PAGE_SIZE) ||
  97. ((inv_entry.npages == U64_MAX) && inv_entry.addr)) {
  98. ret = -EINVAL;
  99. break;
  100. }
  101. cache_tag_flush_range(dmar_domain, inv_entry.addr,
  102. inv_entry.addr + nrpages_to_size(inv_entry.npages) - 1,
  103. inv_entry.flags & IOMMU_VTD_INV_FLAGS_LEAF);
  104. processed++;
  105. }
  106. out:
  107. array->entry_num = processed;
  108. return ret;
  109. }
  110. static const struct iommu_domain_ops intel_nested_domain_ops = {
  111. .attach_dev = intel_nested_attach_dev,
  112. .free = intel_nested_domain_free,
  113. .cache_invalidate_user = intel_nested_cache_invalidate_user,
  114. };
  115. struct iommu_domain *intel_nested_domain_alloc(struct iommu_domain *parent,
  116. const struct iommu_user_data *user_data)
  117. {
  118. struct dmar_domain *s2_domain = to_dmar_domain(parent);
  119. struct iommu_hwpt_vtd_s1 vtd;
  120. struct dmar_domain *domain;
  121. int ret;
  122. /* Must be nested domain */
  123. if (user_data->type != IOMMU_HWPT_DATA_VTD_S1)
  124. return ERR_PTR(-EOPNOTSUPP);
  125. if (parent->ops != intel_iommu_ops.default_domain_ops ||
  126. !s2_domain->nested_parent)
  127. return ERR_PTR(-EINVAL);
  128. ret = iommu_copy_struct_from_user(&vtd, user_data,
  129. IOMMU_HWPT_DATA_VTD_S1, __reserved);
  130. if (ret)
  131. return ERR_PTR(ret);
  132. domain = kzalloc(sizeof(*domain), GFP_KERNEL_ACCOUNT);
  133. if (!domain)
  134. return ERR_PTR(-ENOMEM);
  135. domain->use_first_level = true;
  136. domain->s2_domain = s2_domain;
  137. domain->s1_pgtbl = vtd.pgtbl_addr;
  138. domain->s1_cfg = vtd;
  139. domain->domain.ops = &intel_nested_domain_ops;
  140. domain->domain.type = IOMMU_DOMAIN_NESTED;
  141. INIT_LIST_HEAD(&domain->devices);
  142. INIT_LIST_HEAD(&domain->dev_pasids);
  143. INIT_LIST_HEAD(&domain->cache_tags);
  144. spin_lock_init(&domain->lock);
  145. spin_lock_init(&domain->cache_lock);
  146. xa_init(&domain->iommu_array);
  147. spin_lock(&s2_domain->s1_lock);
  148. list_add(&domain->s2_link, &s2_domain->s1_domains);
  149. spin_unlock(&s2_domain->s1_lock);
  150. return &domain->domain;
  151. }