pci-dma.c 4.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196
  1. // SPDX-License-Identifier: GPL-2.0
  2. #include <linux/dma-direct.h>
  3. #include <linux/dma-debug.h>
  4. #include <linux/dmar.h>
  5. #include <linux/export.h>
  6. #include <linux/bootmem.h>
  7. #include <linux/gfp.h>
  8. #include <linux/pci.h>
  9. #include <asm/proto.h>
  10. #include <asm/dma.h>
  11. #include <asm/iommu.h>
  12. #include <asm/gart.h>
  13. #include <asm/calgary.h>
  14. #include <asm/x86_init.h>
  15. #include <asm/iommu_table.h>
  16. static bool disable_dac_quirk __read_mostly;
  17. const struct dma_map_ops *dma_ops = &dma_direct_ops;
  18. EXPORT_SYMBOL(dma_ops);
  19. #ifdef CONFIG_IOMMU_DEBUG
  20. int panic_on_overflow __read_mostly = 1;
  21. int force_iommu __read_mostly = 1;
  22. #else
  23. int panic_on_overflow __read_mostly = 0;
  24. int force_iommu __read_mostly = 0;
  25. #endif
  26. int iommu_merge __read_mostly = 0;
  27. int no_iommu __read_mostly;
  28. /* Set this to 1 if there is a HW IOMMU in the system */
  29. int iommu_detected __read_mostly = 0;
  30. /*
  31. * This variable becomes 1 if iommu=pt is passed on the kernel command line.
  32. * If this variable is 1, IOMMU implementations do no DMA translation for
  33. * devices and allow every device to access to whole physical memory. This is
  34. * useful if a user wants to use an IOMMU only for KVM device assignment to
  35. * guests and not for driver dma translation.
  36. * It is also possible to disable by default in kernel config, and enable with
  37. * iommu=nopt at boot time.
  38. */
  39. #ifdef CONFIG_IOMMU_DEFAULT_PASSTHROUGH
  40. int iommu_pass_through __read_mostly = 1;
  41. #else
  42. int iommu_pass_through __read_mostly;
  43. #endif
  44. extern struct iommu_table_entry __iommu_table[], __iommu_table_end[];
  45. /* Dummy device used for NULL arguments (normally ISA). */
  46. struct device x86_dma_fallback_dev = {
  47. .init_name = "fallback device",
  48. .coherent_dma_mask = ISA_DMA_BIT_MASK,
  49. .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask,
  50. };
  51. EXPORT_SYMBOL(x86_dma_fallback_dev);
  52. void __init pci_iommu_alloc(void)
  53. {
  54. struct iommu_table_entry *p;
  55. sort_iommu_table(__iommu_table, __iommu_table_end);
  56. check_iommu_entries(__iommu_table, __iommu_table_end);
  57. for (p = __iommu_table; p < __iommu_table_end; p++) {
  58. if (p && p->detect && p->detect() > 0) {
  59. p->flags |= IOMMU_DETECTED;
  60. if (p->early_init)
  61. p->early_init();
  62. if (p->flags & IOMMU_FINISH_IF_DETECTED)
  63. break;
  64. }
  65. }
  66. }
  67. bool arch_dma_alloc_attrs(struct device **dev)
  68. {
  69. if (!*dev)
  70. *dev = &x86_dma_fallback_dev;
  71. if (!is_device_dma_capable(*dev))
  72. return false;
  73. return true;
  74. }
  75. EXPORT_SYMBOL(arch_dma_alloc_attrs);
  76. /*
  77. * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
  78. * parameter documentation.
  79. */
  80. static __init int iommu_setup(char *p)
  81. {
  82. iommu_merge = 1;
  83. if (!p)
  84. return -EINVAL;
  85. while (*p) {
  86. if (!strncmp(p, "off", 3))
  87. no_iommu = 1;
  88. /* gart_parse_options has more force support */
  89. if (!strncmp(p, "force", 5))
  90. force_iommu = 1;
  91. if (!strncmp(p, "noforce", 7)) {
  92. iommu_merge = 0;
  93. force_iommu = 0;
  94. }
  95. if (!strncmp(p, "biomerge", 8)) {
  96. iommu_merge = 1;
  97. force_iommu = 1;
  98. }
  99. if (!strncmp(p, "panic", 5))
  100. panic_on_overflow = 1;
  101. if (!strncmp(p, "nopanic", 7))
  102. panic_on_overflow = 0;
  103. if (!strncmp(p, "merge", 5)) {
  104. iommu_merge = 1;
  105. force_iommu = 1;
  106. }
  107. if (!strncmp(p, "nomerge", 7))
  108. iommu_merge = 0;
  109. if (!strncmp(p, "forcesac", 8))
  110. pr_warn("forcesac option ignored.\n");
  111. if (!strncmp(p, "allowdac", 8))
  112. pr_warn("allowdac option ignored.\n");
  113. if (!strncmp(p, "nodac", 5))
  114. pr_warn("nodac option ignored.\n");
  115. if (!strncmp(p, "usedac", 6)) {
  116. disable_dac_quirk = true;
  117. return 1;
  118. }
  119. #ifdef CONFIG_SWIOTLB
  120. if (!strncmp(p, "soft", 4))
  121. swiotlb = 1;
  122. #endif
  123. if (!strncmp(p, "pt", 2))
  124. iommu_pass_through = 1;
  125. if (!strncmp(p, "nopt", 4))
  126. iommu_pass_through = 0;
  127. gart_parse_options(p);
  128. #ifdef CONFIG_CALGARY_IOMMU
  129. if (!strncmp(p, "calgary", 7))
  130. use_calgary = 1;
  131. #endif /* CONFIG_CALGARY_IOMMU */
  132. p += strcspn(p, ",");
  133. if (*p == ',')
  134. ++p;
  135. }
  136. return 0;
  137. }
  138. early_param("iommu", iommu_setup);
  139. static int __init pci_iommu_init(void)
  140. {
  141. struct iommu_table_entry *p;
  142. x86_init.iommu.iommu_init();
  143. for (p = __iommu_table; p < __iommu_table_end; p++) {
  144. if (p && (p->flags & IOMMU_DETECTED) && p->late_init)
  145. p->late_init();
  146. }
  147. return 0;
  148. }
  149. /* Must execute after PCI subsystem */
  150. rootfs_initcall(pci_iommu_init);
  151. #ifdef CONFIG_PCI
  152. /* Many VIA bridges seem to corrupt data for DAC. Disable it here */
  153. static int via_no_dac_cb(struct pci_dev *pdev, void *data)
  154. {
  155. pdev->dev.bus_dma_mask = DMA_BIT_MASK(32);
  156. return 0;
  157. }
  158. static void via_no_dac(struct pci_dev *dev)
  159. {
  160. if (!disable_dac_quirk) {
  161. dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n");
  162. pci_walk_bus(dev->subordinate, via_no_dac_cb, NULL);
  163. }
  164. }
  165. DECLARE_PCI_FIXUP_CLASS_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID,
  166. PCI_CLASS_BRIDGE_PCI, 8, via_no_dac);
  167. #endif