cpu.c 8.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (C) 2012 Regents of the University of California
  4. */
  5. #include <linux/acpi.h>
  6. #include <linux/cpu.h>
  7. #include <linux/ctype.h>
  8. #include <linux/init.h>
  9. #include <linux/seq_file.h>
  10. #include <linux/of.h>
  11. #include <asm/acpi.h>
  12. #include <asm/cpufeature.h>
  13. #include <asm/csr.h>
  14. #include <asm/hwcap.h>
  15. #include <asm/sbi.h>
  16. #include <asm/smp.h>
  17. #include <asm/pgtable.h>
  18. #include <asm/vendor_extensions.h>
  19. bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
  20. {
  21. return phys_id == cpuid_to_hartid_map(cpu);
  22. }
  23. /*
  24. * Returns the hart ID of the given device tree node, or -ENODEV if the node
  25. * isn't an enabled and valid RISC-V hart node.
  26. */
  27. int riscv_of_processor_hartid(struct device_node *node, unsigned long *hart)
  28. {
  29. int cpu;
  30. *hart = (unsigned long)of_get_cpu_hwid(node, 0);
  31. if (*hart == ~0UL) {
  32. pr_warn("Found CPU without hart ID\n");
  33. return -ENODEV;
  34. }
  35. cpu = riscv_hartid_to_cpuid(*hart);
  36. if (cpu < 0)
  37. return cpu;
  38. if (!cpu_possible(cpu))
  39. return -ENODEV;
  40. return 0;
  41. }
  42. int __init riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hart)
  43. {
  44. const char *isa;
  45. if (!of_device_is_compatible(node, "riscv")) {
  46. pr_warn("Found incompatible CPU\n");
  47. return -ENODEV;
  48. }
  49. *hart = (unsigned long)of_get_cpu_hwid(node, 0);
  50. if (*hart == ~0UL) {
  51. pr_warn("Found CPU without hart ID\n");
  52. return -ENODEV;
  53. }
  54. if (!of_device_is_available(node))
  55. return -ENODEV;
  56. if (of_property_read_string(node, "riscv,isa-base", &isa))
  57. goto old_interface;
  58. if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32i", 5)) {
  59. pr_warn("CPU with hartid=%lu does not support rv32i", *hart);
  60. return -ENODEV;
  61. }
  62. if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64i", 5)) {
  63. pr_warn("CPU with hartid=%lu does not support rv64i", *hart);
  64. return -ENODEV;
  65. }
  66. if (!of_property_present(node, "riscv,isa-extensions"))
  67. return -ENODEV;
  68. if (of_property_match_string(node, "riscv,isa-extensions", "i") < 0 ||
  69. of_property_match_string(node, "riscv,isa-extensions", "m") < 0 ||
  70. of_property_match_string(node, "riscv,isa-extensions", "a") < 0) {
  71. pr_warn("CPU with hartid=%lu does not support ima", *hart);
  72. return -ENODEV;
  73. }
  74. return 0;
  75. old_interface:
  76. if (!riscv_isa_fallback) {
  77. pr_warn("CPU with hartid=%lu is invalid: this kernel does not parse \"riscv,isa\"",
  78. *hart);
  79. return -ENODEV;
  80. }
  81. if (of_property_read_string(node, "riscv,isa", &isa)) {
  82. pr_warn("CPU with hartid=%lu has no \"riscv,isa-base\" or \"riscv,isa\" property\n",
  83. *hart);
  84. return -ENODEV;
  85. }
  86. if (IS_ENABLED(CONFIG_32BIT) && strncasecmp(isa, "rv32ima", 7)) {
  87. pr_warn("CPU with hartid=%lu does not support rv32ima", *hart);
  88. return -ENODEV;
  89. }
  90. if (IS_ENABLED(CONFIG_64BIT) && strncasecmp(isa, "rv64ima", 7)) {
  91. pr_warn("CPU with hartid=%lu does not support rv64ima", *hart);
  92. return -ENODEV;
  93. }
  94. return 0;
  95. }
  96. /*
  97. * Find hart ID of the CPU DT node under which given DT node falls.
  98. *
  99. * To achieve this, we walk up the DT tree until we find an active
  100. * RISC-V core (HART) node and extract the cpuid from it.
  101. */
  102. int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid)
  103. {
  104. for (; node; node = node->parent) {
  105. if (of_device_is_compatible(node, "riscv")) {
  106. *hartid = (unsigned long)of_get_cpu_hwid(node, 0);
  107. if (*hartid == ~0UL) {
  108. pr_warn("Found CPU without hart ID\n");
  109. return -ENODEV;
  110. }
  111. return 0;
  112. }
  113. }
  114. return -1;
  115. }
  116. unsigned long __init riscv_get_marchid(void)
  117. {
  118. struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
  119. #if IS_ENABLED(CONFIG_RISCV_SBI)
  120. ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
  121. #elif IS_ENABLED(CONFIG_RISCV_M_MODE)
  122. ci->marchid = csr_read(CSR_MARCHID);
  123. #else
  124. ci->marchid = 0;
  125. #endif
  126. return ci->marchid;
  127. }
  128. unsigned long __init riscv_get_mvendorid(void)
  129. {
  130. struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
  131. #if IS_ENABLED(CONFIG_RISCV_SBI)
  132. ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
  133. #elif IS_ENABLED(CONFIG_RISCV_M_MODE)
  134. ci->mvendorid = csr_read(CSR_MVENDORID);
  135. #else
  136. ci->mvendorid = 0;
  137. #endif
  138. return ci->mvendorid;
  139. }
  140. DEFINE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
  141. unsigned long riscv_cached_mvendorid(unsigned int cpu_id)
  142. {
  143. struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
  144. return ci->mvendorid;
  145. }
  146. EXPORT_SYMBOL(riscv_cached_mvendorid);
  147. unsigned long riscv_cached_marchid(unsigned int cpu_id)
  148. {
  149. struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
  150. return ci->marchid;
  151. }
  152. EXPORT_SYMBOL(riscv_cached_marchid);
  153. unsigned long riscv_cached_mimpid(unsigned int cpu_id)
  154. {
  155. struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
  156. return ci->mimpid;
  157. }
  158. EXPORT_SYMBOL(riscv_cached_mimpid);
  159. static int riscv_cpuinfo_starting(unsigned int cpu)
  160. {
  161. struct riscv_cpuinfo *ci = this_cpu_ptr(&riscv_cpuinfo);
  162. #if IS_ENABLED(CONFIG_RISCV_SBI)
  163. if (!ci->mvendorid)
  164. ci->mvendorid = sbi_spec_is_0_1() ? 0 : sbi_get_mvendorid();
  165. if (!ci->marchid)
  166. ci->marchid = sbi_spec_is_0_1() ? 0 : sbi_get_marchid();
  167. ci->mimpid = sbi_spec_is_0_1() ? 0 : sbi_get_mimpid();
  168. #elif IS_ENABLED(CONFIG_RISCV_M_MODE)
  169. if (!ci->mvendorid)
  170. ci->mvendorid = csr_read(CSR_MVENDORID);
  171. if (!ci->marchid)
  172. ci->marchid = csr_read(CSR_MARCHID);
  173. ci->mimpid = csr_read(CSR_MIMPID);
  174. #else
  175. ci->mvendorid = 0;
  176. ci->marchid = 0;
  177. ci->mimpid = 0;
  178. #endif
  179. return 0;
  180. }
  181. static int __init riscv_cpuinfo_init(void)
  182. {
  183. int ret;
  184. ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "riscv/cpuinfo:starting",
  185. riscv_cpuinfo_starting, NULL);
  186. if (ret < 0) {
  187. pr_err("cpuinfo: failed to register hotplug callbacks.\n");
  188. return ret;
  189. }
  190. return 0;
  191. }
  192. arch_initcall(riscv_cpuinfo_init);
  193. #ifdef CONFIG_PROC_FS
  194. #define ALL_CPUS -1
  195. static void print_vendor_isa(struct seq_file *f, int cpu)
  196. {
  197. struct riscv_isavendorinfo *vendor_bitmap;
  198. struct riscv_isa_vendor_ext_data_list *ext_list;
  199. const struct riscv_isa_ext_data *ext_data;
  200. for (int i = 0; i < riscv_isa_vendor_ext_list_size; i++) {
  201. ext_list = riscv_isa_vendor_ext_list[i];
  202. ext_data = riscv_isa_vendor_ext_list[i]->ext_data;
  203. if (cpu == ALL_CPUS)
  204. vendor_bitmap = &ext_list->all_harts_isa_bitmap;
  205. else
  206. vendor_bitmap = &ext_list->per_hart_isa_bitmap[cpu];
  207. for (int j = 0; j < ext_list->ext_data_count; j++) {
  208. if (!__riscv_isa_extension_available(vendor_bitmap->isa, ext_data[j].id))
  209. continue;
  210. seq_printf(f, "_%s", ext_data[j].name);
  211. }
  212. }
  213. }
  214. static void print_isa(struct seq_file *f, const unsigned long *isa_bitmap, int cpu)
  215. {
  216. if (IS_ENABLED(CONFIG_32BIT))
  217. seq_write(f, "rv32", 4);
  218. else
  219. seq_write(f, "rv64", 4);
  220. for (int i = 0; i < riscv_isa_ext_count; i++) {
  221. if (!__riscv_isa_extension_available(isa_bitmap, riscv_isa_ext[i].id))
  222. continue;
  223. /* Only multi-letter extensions are split by underscores */
  224. if (strnlen(riscv_isa_ext[i].name, 2) != 1)
  225. seq_puts(f, "_");
  226. seq_printf(f, "%s", riscv_isa_ext[i].name);
  227. }
  228. print_vendor_isa(f, cpu);
  229. seq_puts(f, "\n");
  230. }
  231. static void print_mmu(struct seq_file *f)
  232. {
  233. const char *sv_type;
  234. #ifdef CONFIG_MMU
  235. #if defined(CONFIG_32BIT)
  236. sv_type = "sv32";
  237. #elif defined(CONFIG_64BIT)
  238. if (pgtable_l5_enabled)
  239. sv_type = "sv57";
  240. else if (pgtable_l4_enabled)
  241. sv_type = "sv48";
  242. else
  243. sv_type = "sv39";
  244. #endif
  245. #else
  246. sv_type = "none";
  247. #endif /* CONFIG_MMU */
  248. seq_printf(f, "mmu\t\t: %s\n", sv_type);
  249. }
  250. static void *c_start(struct seq_file *m, loff_t *pos)
  251. {
  252. if (*pos == nr_cpu_ids)
  253. return NULL;
  254. *pos = cpumask_next(*pos - 1, cpu_online_mask);
  255. if ((*pos) < nr_cpu_ids)
  256. return (void *)(uintptr_t)(1 + *pos);
  257. return NULL;
  258. }
  259. static void *c_next(struct seq_file *m, void *v, loff_t *pos)
  260. {
  261. (*pos)++;
  262. return c_start(m, pos);
  263. }
  264. static void c_stop(struct seq_file *m, void *v)
  265. {
  266. }
  267. static int c_show(struct seq_file *m, void *v)
  268. {
  269. unsigned long cpu_id = (unsigned long)v - 1;
  270. struct riscv_cpuinfo *ci = per_cpu_ptr(&riscv_cpuinfo, cpu_id);
  271. struct device_node *node;
  272. const char *compat;
  273. seq_printf(m, "processor\t: %lu\n", cpu_id);
  274. seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id));
  275. /*
  276. * For historical raisins, the isa: line is limited to the lowest common
  277. * denominator of extensions supported across all harts. A true list of
  278. * extensions supported on this hart is printed later in the hart isa:
  279. * line.
  280. */
  281. seq_puts(m, "isa\t\t: ");
  282. print_isa(m, NULL, ALL_CPUS);
  283. print_mmu(m);
  284. if (acpi_disabled) {
  285. node = of_get_cpu_node(cpu_id, NULL);
  286. if (!of_property_read_string(node, "compatible", &compat) &&
  287. strcmp(compat, "riscv"))
  288. seq_printf(m, "uarch\t\t: %s\n", compat);
  289. of_node_put(node);
  290. }
  291. seq_printf(m, "mvendorid\t: 0x%lx\n", ci->mvendorid);
  292. seq_printf(m, "marchid\t\t: 0x%lx\n", ci->marchid);
  293. seq_printf(m, "mimpid\t\t: 0x%lx\n", ci->mimpid);
  294. /*
  295. * Print the ISA extensions specific to this hart, which may show
  296. * additional extensions not present across all harts.
  297. */
  298. seq_puts(m, "hart isa\t: ");
  299. print_isa(m, hart_isa[cpu_id].isa, cpu_id);
  300. seq_puts(m, "\n");
  301. return 0;
  302. }
  303. const struct seq_operations cpuinfo_op = {
  304. .start = c_start,
  305. .next = c_next,
  306. .stop = c_stop,
  307. .show = c_show
  308. };
  309. #endif /* CONFIG_PROC_FS */