sys_hwprobe.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * The hwprobe interface, for allowing userspace to probe to see which features
  4. * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for
  5. * more details.
  6. */
  7. #include <linux/syscalls.h>
  8. #include <asm/cacheflush.h>
  9. #include <asm/cpufeature.h>
  10. #include <asm/hwprobe.h>
  11. #include <asm/processor.h>
  12. #include <asm/delay.h>
  13. #include <asm/sbi.h>
  14. #include <asm/switch_to.h>
  15. #include <asm/uaccess.h>
  16. #include <asm/unistd.h>
  17. #include <asm/vector.h>
  18. #include <vdso/vsyscall.h>
  19. static void hwprobe_arch_id(struct riscv_hwprobe *pair,
  20. const struct cpumask *cpus)
  21. {
  22. u64 id = -1ULL;
  23. bool first = true;
  24. int cpu;
  25. for_each_cpu(cpu, cpus) {
  26. u64 cpu_id;
  27. switch (pair->key) {
  28. case RISCV_HWPROBE_KEY_MVENDORID:
  29. cpu_id = riscv_cached_mvendorid(cpu);
  30. break;
  31. case RISCV_HWPROBE_KEY_MIMPID:
  32. cpu_id = riscv_cached_mimpid(cpu);
  33. break;
  34. case RISCV_HWPROBE_KEY_MARCHID:
  35. cpu_id = riscv_cached_marchid(cpu);
  36. break;
  37. }
  38. if (first) {
  39. id = cpu_id;
  40. first = false;
  41. }
  42. /*
  43. * If there's a mismatch for the given set, return -1 in the
  44. * value.
  45. */
  46. if (id != cpu_id) {
  47. id = -1ULL;
  48. break;
  49. }
  50. }
  51. pair->value = id;
  52. }
  53. static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
  54. const struct cpumask *cpus)
  55. {
  56. int cpu;
  57. u64 missing = 0;
  58. pair->value = 0;
  59. if (has_fpu())
  60. pair->value |= RISCV_HWPROBE_IMA_FD;
  61. if (riscv_isa_extension_available(NULL, c))
  62. pair->value |= RISCV_HWPROBE_IMA_C;
  63. if (has_vector() && riscv_isa_extension_available(NULL, v))
  64. pair->value |= RISCV_HWPROBE_IMA_V;
  65. /*
  66. * Loop through and record extensions that 1) anyone has, and 2) anyone
  67. * doesn't have.
  68. */
  69. for_each_cpu(cpu, cpus) {
  70. struct riscv_isainfo *isainfo = &hart_isa[cpu];
  71. #define EXT_KEY(ext) \
  72. do { \
  73. if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
  74. pair->value |= RISCV_HWPROBE_EXT_##ext; \
  75. else \
  76. missing |= RISCV_HWPROBE_EXT_##ext; \
  77. } while (false)
  78. /*
  79. * Only use EXT_KEY() for extensions which can be exposed to userspace,
  80. * regardless of the kernel's configuration, as no other checks, besides
  81. * presence in the hart_isa bitmap, are made.
  82. */
  83. EXT_KEY(ZACAS);
  84. EXT_KEY(ZAWRS);
  85. EXT_KEY(ZBA);
  86. EXT_KEY(ZBB);
  87. EXT_KEY(ZBC);
  88. EXT_KEY(ZBKB);
  89. EXT_KEY(ZBKC);
  90. EXT_KEY(ZBKX);
  91. EXT_KEY(ZBS);
  92. EXT_KEY(ZCA);
  93. EXT_KEY(ZCB);
  94. EXT_KEY(ZCMOP);
  95. EXT_KEY(ZICBOZ);
  96. EXT_KEY(ZICOND);
  97. EXT_KEY(ZIHINTNTL);
  98. EXT_KEY(ZIHINTPAUSE);
  99. EXT_KEY(ZIMOP);
  100. EXT_KEY(ZKND);
  101. EXT_KEY(ZKNE);
  102. EXT_KEY(ZKNH);
  103. EXT_KEY(ZKSED);
  104. EXT_KEY(ZKSH);
  105. EXT_KEY(ZKT);
  106. EXT_KEY(ZTSO);
  107. /*
  108. * All the following extensions must depend on the kernel
  109. * support of V.
  110. */
  111. if (has_vector()) {
  112. EXT_KEY(ZVBB);
  113. EXT_KEY(ZVBC);
  114. EXT_KEY(ZVE32F);
  115. EXT_KEY(ZVE32X);
  116. EXT_KEY(ZVE64D);
  117. EXT_KEY(ZVE64F);
  118. EXT_KEY(ZVE64X);
  119. EXT_KEY(ZVFH);
  120. EXT_KEY(ZVFHMIN);
  121. EXT_KEY(ZVKB);
  122. EXT_KEY(ZVKG);
  123. EXT_KEY(ZVKNED);
  124. EXT_KEY(ZVKNHA);
  125. EXT_KEY(ZVKNHB);
  126. EXT_KEY(ZVKSED);
  127. EXT_KEY(ZVKSH);
  128. EXT_KEY(ZVKT);
  129. }
  130. if (has_fpu()) {
  131. EXT_KEY(ZCD);
  132. EXT_KEY(ZCF);
  133. EXT_KEY(ZFA);
  134. EXT_KEY(ZFH);
  135. EXT_KEY(ZFHMIN);
  136. }
  137. #undef EXT_KEY
  138. }
  139. /* Now turn off reporting features if any CPU is missing it. */
  140. pair->value &= ~missing;
  141. }
  142. static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
  143. {
  144. struct riscv_hwprobe pair;
  145. hwprobe_isa_ext0(&pair, cpus);
  146. return (pair.value & ext);
  147. }
  148. #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
  149. static u64 hwprobe_misaligned(const struct cpumask *cpus)
  150. {
  151. int cpu;
  152. u64 perf = -1ULL;
  153. for_each_cpu(cpu, cpus) {
  154. int this_perf = per_cpu(misaligned_access_speed, cpu);
  155. if (perf == -1ULL)
  156. perf = this_perf;
  157. if (perf != this_perf) {
  158. perf = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
  159. break;
  160. }
  161. }
  162. if (perf == -1ULL)
  163. return RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
  164. return perf;
  165. }
  166. #else
  167. static u64 hwprobe_misaligned(const struct cpumask *cpus)
  168. {
  169. if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS))
  170. return RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;
  171. if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available())
  172. return RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
  173. return RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
  174. }
  175. #endif
  176. static void hwprobe_one_pair(struct riscv_hwprobe *pair,
  177. const struct cpumask *cpus)
  178. {
  179. switch (pair->key) {
  180. case RISCV_HWPROBE_KEY_MVENDORID:
  181. case RISCV_HWPROBE_KEY_MARCHID:
  182. case RISCV_HWPROBE_KEY_MIMPID:
  183. hwprobe_arch_id(pair, cpus);
  184. break;
  185. /*
  186. * The kernel already assumes that the base single-letter ISA
  187. * extensions are supported on all harts, and only supports the
  188. * IMA base, so just cheat a bit here and tell that to
  189. * userspace.
  190. */
  191. case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
  192. pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
  193. break;
  194. case RISCV_HWPROBE_KEY_IMA_EXT_0:
  195. hwprobe_isa_ext0(pair, cpus);
  196. break;
  197. case RISCV_HWPROBE_KEY_CPUPERF_0:
  198. case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF:
  199. pair->value = hwprobe_misaligned(cpus);
  200. break;
  201. case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
  202. pair->value = 0;
  203. if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
  204. pair->value = riscv_cboz_block_size;
  205. break;
  206. case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS:
  207. pair->value = user_max_virt_addr();
  208. break;
  209. case RISCV_HWPROBE_KEY_TIME_CSR_FREQ:
  210. pair->value = riscv_timebase;
  211. break;
  212. /*
  213. * For forward compatibility, unknown keys don't fail the whole
  214. * call, but get their element key set to -1 and value set to 0
  215. * indicating they're unrecognized.
  216. */
  217. default:
  218. pair->key = -1;
  219. pair->value = 0;
  220. break;
  221. }
  222. }
  223. static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
  224. size_t pair_count, size_t cpusetsize,
  225. unsigned long __user *cpus_user,
  226. unsigned int flags)
  227. {
  228. size_t out;
  229. int ret;
  230. cpumask_t cpus;
  231. /* Check the reserved flags. */
  232. if (flags != 0)
  233. return -EINVAL;
  234. /*
  235. * The interface supports taking in a CPU mask, and returns values that
  236. * are consistent across that mask. Allow userspace to specify NULL and
  237. * 0 as a shortcut to all online CPUs.
  238. */
  239. cpumask_clear(&cpus);
  240. if (!cpusetsize && !cpus_user) {
  241. cpumask_copy(&cpus, cpu_online_mask);
  242. } else {
  243. if (cpusetsize > cpumask_size())
  244. cpusetsize = cpumask_size();
  245. ret = copy_from_user(&cpus, cpus_user, cpusetsize);
  246. if (ret)
  247. return -EFAULT;
  248. /*
  249. * Userspace must provide at least one online CPU, without that
  250. * there's no way to define what is supported.
  251. */
  252. cpumask_and(&cpus, &cpus, cpu_online_mask);
  253. if (cpumask_empty(&cpus))
  254. return -EINVAL;
  255. }
  256. for (out = 0; out < pair_count; out++, pairs++) {
  257. struct riscv_hwprobe pair;
  258. if (get_user(pair.key, &pairs->key))
  259. return -EFAULT;
  260. pair.value = 0;
  261. hwprobe_one_pair(&pair, &cpus);
  262. ret = put_user(pair.key, &pairs->key);
  263. if (ret == 0)
  264. ret = put_user(pair.value, &pairs->value);
  265. if (ret)
  266. return -EFAULT;
  267. }
  268. return 0;
  269. }
  270. static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
  271. size_t pair_count, size_t cpusetsize,
  272. unsigned long __user *cpus_user,
  273. unsigned int flags)
  274. {
  275. cpumask_t cpus, one_cpu;
  276. bool clear_all = false;
  277. size_t i;
  278. int ret;
  279. if (flags != RISCV_HWPROBE_WHICH_CPUS)
  280. return -EINVAL;
  281. if (!cpusetsize || !cpus_user)
  282. return -EINVAL;
  283. if (cpusetsize > cpumask_size())
  284. cpusetsize = cpumask_size();
  285. ret = copy_from_user(&cpus, cpus_user, cpusetsize);
  286. if (ret)
  287. return -EFAULT;
  288. if (cpumask_empty(&cpus))
  289. cpumask_copy(&cpus, cpu_online_mask);
  290. cpumask_and(&cpus, &cpus, cpu_online_mask);
  291. cpumask_clear(&one_cpu);
  292. for (i = 0; i < pair_count; i++) {
  293. struct riscv_hwprobe pair, tmp;
  294. int cpu;
  295. ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
  296. if (ret)
  297. return -EFAULT;
  298. if (!riscv_hwprobe_key_is_valid(pair.key)) {
  299. clear_all = true;
  300. pair = (struct riscv_hwprobe){ .key = -1, };
  301. ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
  302. if (ret)
  303. return -EFAULT;
  304. }
  305. if (clear_all)
  306. continue;
  307. tmp = (struct riscv_hwprobe){ .key = pair.key, };
  308. for_each_cpu(cpu, &cpus) {
  309. cpumask_set_cpu(cpu, &one_cpu);
  310. hwprobe_one_pair(&tmp, &one_cpu);
  311. if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
  312. cpumask_clear_cpu(cpu, &cpus);
  313. cpumask_clear_cpu(cpu, &one_cpu);
  314. }
  315. }
  316. if (clear_all)
  317. cpumask_clear(&cpus);
  318. ret = copy_to_user(cpus_user, &cpus, cpusetsize);
  319. if (ret)
  320. return -EFAULT;
  321. return 0;
  322. }
  323. static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
  324. size_t pair_count, size_t cpusetsize,
  325. unsigned long __user *cpus_user,
  326. unsigned int flags)
  327. {
  328. if (flags & RISCV_HWPROBE_WHICH_CPUS)
  329. return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
  330. cpus_user, flags);
  331. return hwprobe_get_values(pairs, pair_count, cpusetsize,
  332. cpus_user, flags);
  333. }
  334. #ifdef CONFIG_MMU
  335. static int __init init_hwprobe_vdso_data(void)
  336. {
  337. struct vdso_data *vd = __arch_get_k_vdso_data();
  338. struct arch_vdso_data *avd = &vd->arch_data;
  339. u64 id_bitsmash = 0;
  340. struct riscv_hwprobe pair;
  341. int key;
  342. /*
  343. * Initialize vDSO data with the answers for the "all CPUs" case, to
  344. * save a syscall in the common case.
  345. */
  346. for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
  347. pair.key = key;
  348. hwprobe_one_pair(&pair, cpu_online_mask);
  349. WARN_ON_ONCE(pair.key < 0);
  350. avd->all_cpu_hwprobe_values[key] = pair.value;
  351. /*
  352. * Smash together the vendor, arch, and impl IDs to see if
  353. * they're all 0 or any negative.
  354. */
  355. if (key <= RISCV_HWPROBE_KEY_MIMPID)
  356. id_bitsmash |= pair.value;
  357. }
  358. /*
  359. * If the arch, vendor, and implementation ID are all the same across
  360. * all harts, then assume all CPUs are the same, and allow the vDSO to
  361. * answer queries for arbitrary masks. However if all values are 0 (not
  362. * populated) or any value returns -1 (varies across CPUs), then the
  363. * vDSO should defer to the kernel for exotic cpu masks.
  364. */
  365. avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
  366. return 0;
  367. }
  368. arch_initcall_sync(init_hwprobe_vdso_data);
  369. #endif /* CONFIG_MMU */
  370. SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
  371. size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
  372. cpus, unsigned int, flags)
  373. {
  374. return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
  375. cpus, flags);
  376. }