acpi.c 9.0 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * RISC-V Specific Low-Level ACPI Boot Support
  4. *
  5. * Copyright (C) 2013-2014, Linaro Ltd.
  6. * Author: Al Stone <al.stone@linaro.org>
  7. * Author: Graeme Gregory <graeme.gregory@linaro.org>
  8. * Author: Hanjun Guo <hanjun.guo@linaro.org>
  9. * Author: Tomasz Nowicki <tomasz.nowicki@linaro.org>
  10. * Author: Naresh Bhat <naresh.bhat@linaro.org>
  11. *
  12. * Copyright (C) 2021-2023, Ventana Micro Systems Inc.
  13. * Author: Sunil V L <sunilvl@ventanamicro.com>
  14. */
  15. #include <linux/acpi.h>
  16. #include <linux/efi.h>
  17. #include <linux/io.h>
  18. #include <linux/memblock.h>
  19. #include <linux/of_fdt.h>
  20. #include <linux/pci.h>
  21. #include <linux/serial_core.h>
  22. int acpi_noirq = 1; /* skip ACPI IRQ initialization */
  23. int acpi_disabled = 1;
  24. EXPORT_SYMBOL(acpi_disabled);
  25. int acpi_pci_disabled = 1; /* skip ACPI PCI scan and IRQ initialization */
  26. EXPORT_SYMBOL(acpi_pci_disabled);
  27. static bool param_acpi_off __initdata;
  28. static bool param_acpi_on __initdata;
  29. static bool param_acpi_force __initdata;
  30. static struct acpi_madt_rintc cpu_madt_rintc[NR_CPUS];
  31. static int __init parse_acpi(char *arg)
  32. {
  33. if (!arg)
  34. return -EINVAL;
  35. /* "acpi=off" disables both ACPI table parsing and interpreter */
  36. if (strcmp(arg, "off") == 0)
  37. param_acpi_off = true;
  38. else if (strcmp(arg, "on") == 0) /* prefer ACPI over DT */
  39. param_acpi_on = true;
  40. else if (strcmp(arg, "force") == 0) /* force ACPI to be enabled */
  41. param_acpi_force = true;
  42. else
  43. return -EINVAL; /* Core will print when we return error */
  44. return 0;
  45. }
  46. early_param("acpi", parse_acpi);
  47. /*
  48. * acpi_fadt_sanity_check() - Check FADT presence and carry out sanity
  49. * checks on it
  50. *
  51. * Return 0 on success, <0 on failure
  52. */
  53. static int __init acpi_fadt_sanity_check(void)
  54. {
  55. struct acpi_table_header *table;
  56. struct acpi_table_fadt *fadt;
  57. acpi_status status;
  58. int ret = 0;
  59. /*
  60. * FADT is required on riscv; retrieve it to check its presence
  61. * and carry out revision and ACPI HW reduced compliancy tests
  62. */
  63. status = acpi_get_table(ACPI_SIG_FADT, 0, &table);
  64. if (ACPI_FAILURE(status)) {
  65. const char *msg = acpi_format_exception(status);
  66. pr_err("Failed to get FADT table, %s\n", msg);
  67. return -ENODEV;
  68. }
  69. fadt = (struct acpi_table_fadt *)table;
  70. /*
  71. * The revision in the table header is the FADT's Major revision. The
  72. * FADT also has a minor revision, which is stored in the FADT itself.
  73. *
  74. * TODO: Currently, we check for 6.5 as the minimum version to check
  75. * for HW_REDUCED flag. However, once RISC-V updates are released in
  76. * the ACPI spec, we need to update this check for exact minor revision
  77. */
  78. if (table->revision < 6 || (table->revision == 6 && fadt->minor_revision < 5))
  79. pr_err(FW_BUG "Unsupported FADT revision %d.%d, should be 6.5+\n",
  80. table->revision, fadt->minor_revision);
  81. if (!(fadt->flags & ACPI_FADT_HW_REDUCED)) {
  82. pr_err("FADT not ACPI hardware reduced compliant\n");
  83. ret = -EINVAL;
  84. }
  85. /*
  86. * acpi_get_table() creates FADT table mapping that
  87. * should be released after parsing and before resuming boot
  88. */
  89. acpi_put_table(table);
  90. return ret;
  91. }
  92. /*
  93. * acpi_boot_table_init() called from setup_arch(), always.
  94. * 1. find RSDP and get its address, and then find XSDT
  95. * 2. extract all tables and checksums them all
  96. * 3. check ACPI FADT HW reduced flag
  97. *
  98. * We can parse ACPI boot-time tables such as MADT after
  99. * this function is called.
  100. *
  101. * On return ACPI is enabled if either:
  102. *
  103. * - ACPI tables are initialized and sanity checks passed
  104. * - acpi=force was passed in the command line and ACPI was not disabled
  105. * explicitly through acpi=off command line parameter
  106. *
  107. * ACPI is disabled on function return otherwise
  108. */
  109. void __init acpi_boot_table_init(void)
  110. {
  111. /*
  112. * Enable ACPI instead of device tree unless
  113. * - ACPI has been disabled explicitly (acpi=off), or
  114. * - firmware has not populated ACPI ptr in EFI system table
  115. * and ACPI has not been [force] enabled (acpi=on|force)
  116. */
  117. if (param_acpi_off ||
  118. (!param_acpi_on && !param_acpi_force &&
  119. efi.acpi20 == EFI_INVALID_TABLE_ADDR))
  120. goto done;
  121. /*
  122. * ACPI is disabled at this point. Enable it in order to parse
  123. * the ACPI tables and carry out sanity checks
  124. */
  125. enable_acpi();
  126. /*
  127. * If ACPI tables are initialized and FADT sanity checks passed,
  128. * leave ACPI enabled and carry on booting; otherwise disable ACPI
  129. * on initialization error.
  130. * If acpi=force was passed on the command line it forces ACPI
  131. * to be enabled even if its initialization failed.
  132. */
  133. if (acpi_table_init() || acpi_fadt_sanity_check()) {
  134. pr_err("Failed to init ACPI tables\n");
  135. if (!param_acpi_force)
  136. disable_acpi();
  137. }
  138. done:
  139. if (acpi_disabled) {
  140. if (earlycon_acpi_spcr_enable)
  141. early_init_dt_scan_chosen_stdout();
  142. } else {
  143. acpi_parse_spcr(earlycon_acpi_spcr_enable, true);
  144. }
  145. }
  146. static int acpi_parse_madt_rintc(union acpi_subtable_headers *header, const unsigned long end)
  147. {
  148. struct acpi_madt_rintc *rintc = (struct acpi_madt_rintc *)header;
  149. int cpuid;
  150. if (!(rintc->flags & ACPI_MADT_ENABLED))
  151. return 0;
  152. cpuid = riscv_hartid_to_cpuid(rintc->hart_id);
  153. /*
  154. * When CONFIG_SMP is disabled, mapping won't be created for
  155. * all cpus.
  156. * CPUs more than num_possible_cpus, will be ignored.
  157. */
  158. if (cpuid >= 0 && cpuid < num_possible_cpus())
  159. cpu_madt_rintc[cpuid] = *rintc;
  160. return 0;
  161. }
  162. /*
  163. * Instead of parsing (and freeing) the ACPI table, cache
  164. * the RINTC structures since they are frequently used
  165. * like in cpuinfo.
  166. */
  167. void __init acpi_init_rintc_map(void)
  168. {
  169. if (acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, acpi_parse_madt_rintc, 0) <= 0) {
  170. pr_err("No valid RINTC entries exist\n");
  171. BUG();
  172. }
  173. }
  174. struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu)
  175. {
  176. return &cpu_madt_rintc[cpu];
  177. }
  178. /*
  179. * __acpi_map_table() will be called before paging_init(), so early_ioremap()
  180. * or early_memremap() should be called here to for ACPI table mapping.
  181. */
  182. void __init __iomem *__acpi_map_table(unsigned long phys, unsigned long size)
  183. {
  184. if (!size)
  185. return NULL;
  186. return early_memremap(phys, size);
  187. }
  188. void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
  189. {
  190. if (!map || !size)
  191. return;
  192. early_memunmap(map, size);
  193. }
  194. void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
  195. {
  196. efi_memory_desc_t *md, *region = NULL;
  197. pgprot_t prot;
  198. if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP)))
  199. return NULL;
  200. for_each_efi_memory_desc(md) {
  201. u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
  202. if (phys < md->phys_addr || phys >= end)
  203. continue;
  204. if (phys + size > end) {
  205. pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n");
  206. return NULL;
  207. }
  208. region = md;
  209. break;
  210. }
  211. /*
  212. * It is fine for AML to remap regions that are not represented in the
  213. * EFI memory map at all, as it only describes normal memory, and MMIO
  214. * regions that require a virtual mapping to make them accessible to
  215. * the EFI runtime services.
  216. */
  217. prot = PAGE_KERNEL_IO;
  218. if (region) {
  219. switch (region->type) {
  220. case EFI_LOADER_CODE:
  221. case EFI_LOADER_DATA:
  222. case EFI_BOOT_SERVICES_CODE:
  223. case EFI_BOOT_SERVICES_DATA:
  224. case EFI_CONVENTIONAL_MEMORY:
  225. case EFI_PERSISTENT_MEMORY:
  226. if (memblock_is_map_memory(phys) ||
  227. !memblock_is_region_memory(phys, size)) {
  228. pr_warn(FW_BUG "requested region covers kernel memory\n");
  229. return NULL;
  230. }
  231. /*
  232. * Mapping kernel memory is permitted if the region in
  233. * question is covered by a single memblock with the
  234. * NOMAP attribute set: this enables the use of ACPI
  235. * table overrides passed via initramfs.
  236. * This particular use case only requires read access.
  237. */
  238. fallthrough;
  239. case EFI_RUNTIME_SERVICES_CODE:
  240. /*
  241. * This would be unusual, but not problematic per se,
  242. * as long as we take care not to create a writable
  243. * mapping for executable code.
  244. */
  245. prot = PAGE_KERNEL_RO;
  246. break;
  247. case EFI_ACPI_RECLAIM_MEMORY:
  248. /*
  249. * ACPI reclaim memory is used to pass firmware tables
  250. * and other data that is intended for consumption by
  251. * the OS only, which may decide it wants to reclaim
  252. * that memory and use it for something else. We never
  253. * do that, but we usually add it to the linear map
  254. * anyway, in which case we should use the existing
  255. * mapping.
  256. */
  257. if (memblock_is_map_memory(phys))
  258. return (void __iomem *)__va(phys);
  259. fallthrough;
  260. default:
  261. if (region->attribute & EFI_MEMORY_WB)
  262. prot = PAGE_KERNEL;
  263. else if ((region->attribute & EFI_MEMORY_WC) ||
  264. (region->attribute & EFI_MEMORY_WT))
  265. prot = pgprot_writecombine(PAGE_KERNEL);
  266. }
  267. }
  268. return ioremap_prot(phys, size, pgprot_val(prot));
  269. }
  270. #ifdef CONFIG_PCI
  271. /*
  272. * raw_pci_read/write - Platform-specific PCI config space access.
  273. */
  274. int raw_pci_read(unsigned int domain, unsigned int bus,
  275. unsigned int devfn, int reg, int len, u32 *val)
  276. {
  277. struct pci_bus *b = pci_find_bus(domain, bus);
  278. if (!b)
  279. return PCIBIOS_DEVICE_NOT_FOUND;
  280. return b->ops->read(b, devfn, reg, len, val);
  281. }
  282. int raw_pci_write(unsigned int domain, unsigned int bus,
  283. unsigned int devfn, int reg, int len, u32 val)
  284. {
  285. struct pci_bus *b = pci_find_bus(domain, bus);
  286. if (!b)
  287. return PCIBIOS_DEVICE_NOT_FOUND;
  288. return b->ops->write(b, devfn, reg, len, val);
  289. }
  290. #endif /* CONFIG_PCI */