sys_hwprobe.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * The hwprobe interface, for allowing userspace to probe to see which features
  4. * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for
  5. * more details.
  6. */
  7. #include <linux/syscalls.h>
  8. #include <asm/cacheflush.h>
  9. #include <asm/cpufeature.h>
  10. #include <asm/hwprobe.h>
  11. #include <asm/processor.h>
  12. #include <asm/delay.h>
  13. #include <asm/sbi.h>
  14. #include <asm/switch_to.h>
  15. #include <asm/uaccess.h>
  16. #include <asm/unistd.h>
  17. #include <asm/vector.h>
  18. #include <vdso/vsyscall.h>
  19. static void hwprobe_arch_id(struct riscv_hwprobe *pair,
  20. const struct cpumask *cpus)
  21. {
  22. u64 id = -1ULL;
  23. bool first = true;
  24. int cpu;
  25. if (pair->key != RISCV_HWPROBE_KEY_MVENDORID &&
  26. pair->key != RISCV_HWPROBE_KEY_MIMPID &&
  27. pair->key != RISCV_HWPROBE_KEY_MARCHID)
  28. goto out;
  29. for_each_cpu(cpu, cpus) {
  30. u64 cpu_id;
  31. switch (pair->key) {
  32. case RISCV_HWPROBE_KEY_MVENDORID:
  33. cpu_id = riscv_cached_mvendorid(cpu);
  34. break;
  35. case RISCV_HWPROBE_KEY_MIMPID:
  36. cpu_id = riscv_cached_mimpid(cpu);
  37. break;
  38. case RISCV_HWPROBE_KEY_MARCHID:
  39. cpu_id = riscv_cached_marchid(cpu);
  40. break;
  41. }
  42. if (first) {
  43. id = cpu_id;
  44. first = false;
  45. }
  46. /*
  47. * If there's a mismatch for the given set, return -1 in the
  48. * value.
  49. */
  50. if (id != cpu_id) {
  51. id = -1ULL;
  52. break;
  53. }
  54. }
  55. out:
  56. pair->value = id;
  57. }
  58. static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
  59. const struct cpumask *cpus)
  60. {
  61. int cpu;
  62. u64 missing = 0;
  63. pair->value = 0;
  64. if (has_fpu())
  65. pair->value |= RISCV_HWPROBE_IMA_FD;
  66. if (riscv_isa_extension_available(NULL, c))
  67. pair->value |= RISCV_HWPROBE_IMA_C;
  68. if (has_vector() && riscv_isa_extension_available(NULL, v))
  69. pair->value |= RISCV_HWPROBE_IMA_V;
  70. /*
  71. * Loop through and record extensions that 1) anyone has, and 2) anyone
  72. * doesn't have.
  73. */
  74. for_each_cpu(cpu, cpus) {
  75. struct riscv_isainfo *isainfo = &hart_isa[cpu];
  76. #define EXT_KEY(ext) \
  77. do { \
  78. if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
  79. pair->value |= RISCV_HWPROBE_EXT_##ext; \
  80. else \
  81. missing |= RISCV_HWPROBE_EXT_##ext; \
  82. } while (false)
  83. /*
  84. * Only use EXT_KEY() for extensions which can be exposed to userspace,
  85. * regardless of the kernel's configuration, as no other checks, besides
  86. * presence in the hart_isa bitmap, are made.
  87. */
  88. EXT_KEY(ZACAS);
  89. EXT_KEY(ZAWRS);
  90. EXT_KEY(ZBA);
  91. EXT_KEY(ZBB);
  92. EXT_KEY(ZBC);
  93. EXT_KEY(ZBKB);
  94. EXT_KEY(ZBKC);
  95. EXT_KEY(ZBKX);
  96. EXT_KEY(ZBS);
  97. EXT_KEY(ZCA);
  98. EXT_KEY(ZCB);
  99. EXT_KEY(ZCMOP);
  100. EXT_KEY(ZICBOZ);
  101. EXT_KEY(ZICOND);
  102. EXT_KEY(ZIHINTNTL);
  103. EXT_KEY(ZIHINTPAUSE);
  104. EXT_KEY(ZIMOP);
  105. EXT_KEY(ZKND);
  106. EXT_KEY(ZKNE);
  107. EXT_KEY(ZKNH);
  108. EXT_KEY(ZKSED);
  109. EXT_KEY(ZKSH);
  110. EXT_KEY(ZKT);
  111. EXT_KEY(ZTSO);
  112. /*
  113. * All the following extensions must depend on the kernel
  114. * support of V.
  115. */
  116. if (has_vector()) {
  117. EXT_KEY(ZVBB);
  118. EXT_KEY(ZVBC);
  119. EXT_KEY(ZVE32F);
  120. EXT_KEY(ZVE32X);
  121. EXT_KEY(ZVE64D);
  122. EXT_KEY(ZVE64F);
  123. EXT_KEY(ZVE64X);
  124. EXT_KEY(ZVFH);
  125. EXT_KEY(ZVFHMIN);
  126. EXT_KEY(ZVKB);
  127. EXT_KEY(ZVKG);
  128. EXT_KEY(ZVKNED);
  129. EXT_KEY(ZVKNHA);
  130. EXT_KEY(ZVKNHB);
  131. EXT_KEY(ZVKSED);
  132. EXT_KEY(ZVKSH);
  133. EXT_KEY(ZVKT);
  134. }
  135. if (has_fpu()) {
  136. EXT_KEY(ZCD);
  137. EXT_KEY(ZCF);
  138. EXT_KEY(ZFA);
  139. EXT_KEY(ZFH);
  140. EXT_KEY(ZFHMIN);
  141. }
  142. #undef EXT_KEY
  143. }
  144. /* Now turn off reporting features if any CPU is missing it. */
  145. pair->value &= ~missing;
  146. }
  147. static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext)
  148. {
  149. struct riscv_hwprobe pair;
  150. hwprobe_isa_ext0(&pair, cpus);
  151. return (pair.value & ext);
  152. }
  153. #if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
  154. static u64 hwprobe_misaligned(const struct cpumask *cpus)
  155. {
  156. int cpu;
  157. u64 perf = -1ULL;
  158. for_each_cpu(cpu, cpus) {
  159. int this_perf = per_cpu(misaligned_access_speed, cpu);
  160. if (perf == -1ULL)
  161. perf = this_perf;
  162. if (perf != this_perf) {
  163. perf = RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
  164. break;
  165. }
  166. }
  167. if (perf == -1ULL)
  168. return RISCV_HWPROBE_MISALIGNED_SCALAR_UNKNOWN;
  169. return perf;
  170. }
  171. #else
  172. static u64 hwprobe_misaligned(const struct cpumask *cpus)
  173. {
  174. if (IS_ENABLED(CONFIG_RISCV_EFFICIENT_UNALIGNED_ACCESS))
  175. return RISCV_HWPROBE_MISALIGNED_SCALAR_FAST;
  176. if (IS_ENABLED(CONFIG_RISCV_EMULATED_UNALIGNED_ACCESS) && unaligned_ctl_available())
  177. return RISCV_HWPROBE_MISALIGNED_SCALAR_EMULATED;
  178. return RISCV_HWPROBE_MISALIGNED_SCALAR_SLOW;
  179. }
  180. #endif
  181. static void hwprobe_one_pair(struct riscv_hwprobe *pair,
  182. const struct cpumask *cpus)
  183. {
  184. switch (pair->key) {
  185. case RISCV_HWPROBE_KEY_MVENDORID:
  186. case RISCV_HWPROBE_KEY_MARCHID:
  187. case RISCV_HWPROBE_KEY_MIMPID:
  188. hwprobe_arch_id(pair, cpus);
  189. break;
  190. /*
  191. * The kernel already assumes that the base single-letter ISA
  192. * extensions are supported on all harts, and only supports the
  193. * IMA base, so just cheat a bit here and tell that to
  194. * userspace.
  195. */
  196. case RISCV_HWPROBE_KEY_BASE_BEHAVIOR:
  197. pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA;
  198. break;
  199. case RISCV_HWPROBE_KEY_IMA_EXT_0:
  200. hwprobe_isa_ext0(pair, cpus);
  201. break;
  202. case RISCV_HWPROBE_KEY_CPUPERF_0:
  203. case RISCV_HWPROBE_KEY_MISALIGNED_SCALAR_PERF:
  204. pair->value = hwprobe_misaligned(cpus);
  205. break;
  206. case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
  207. pair->value = 0;
  208. if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
  209. pair->value = riscv_cboz_block_size;
  210. break;
  211. case RISCV_HWPROBE_KEY_HIGHEST_VIRT_ADDRESS:
  212. pair->value = user_max_virt_addr();
  213. break;
  214. case RISCV_HWPROBE_KEY_TIME_CSR_FREQ:
  215. pair->value = riscv_timebase;
  216. break;
  217. /*
  218. * For forward compatibility, unknown keys don't fail the whole
  219. * call, but get their element key set to -1 and value set to 0
  220. * indicating they're unrecognized.
  221. */
  222. default:
  223. pair->key = -1;
  224. pair->value = 0;
  225. break;
  226. }
  227. }
  228. static int hwprobe_get_values(struct riscv_hwprobe __user *pairs,
  229. size_t pair_count, size_t cpusetsize,
  230. unsigned long __user *cpus_user,
  231. unsigned int flags)
  232. {
  233. size_t out;
  234. int ret;
  235. cpumask_t cpus;
  236. /* Check the reserved flags. */
  237. if (flags != 0)
  238. return -EINVAL;
  239. /*
  240. * The interface supports taking in a CPU mask, and returns values that
  241. * are consistent across that mask. Allow userspace to specify NULL and
  242. * 0 as a shortcut to all online CPUs.
  243. */
  244. cpumask_clear(&cpus);
  245. if (!cpusetsize && !cpus_user) {
  246. cpumask_copy(&cpus, cpu_online_mask);
  247. } else {
  248. if (cpusetsize > cpumask_size())
  249. cpusetsize = cpumask_size();
  250. ret = copy_from_user(&cpus, cpus_user, cpusetsize);
  251. if (ret)
  252. return -EFAULT;
  253. /*
  254. * Userspace must provide at least one online CPU, without that
  255. * there's no way to define what is supported.
  256. */
  257. cpumask_and(&cpus, &cpus, cpu_online_mask);
  258. if (cpumask_empty(&cpus))
  259. return -EINVAL;
  260. }
  261. for (out = 0; out < pair_count; out++, pairs++) {
  262. struct riscv_hwprobe pair;
  263. if (get_user(pair.key, &pairs->key))
  264. return -EFAULT;
  265. pair.value = 0;
  266. hwprobe_one_pair(&pair, &cpus);
  267. ret = put_user(pair.key, &pairs->key);
  268. if (ret == 0)
  269. ret = put_user(pair.value, &pairs->value);
  270. if (ret)
  271. return -EFAULT;
  272. }
  273. return 0;
  274. }
  275. static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs,
  276. size_t pair_count, size_t cpusetsize,
  277. unsigned long __user *cpus_user,
  278. unsigned int flags)
  279. {
  280. cpumask_t cpus, one_cpu;
  281. bool clear_all = false;
  282. size_t i;
  283. int ret;
  284. if (flags != RISCV_HWPROBE_WHICH_CPUS)
  285. return -EINVAL;
  286. if (!cpusetsize || !cpus_user)
  287. return -EINVAL;
  288. if (cpusetsize > cpumask_size())
  289. cpusetsize = cpumask_size();
  290. ret = copy_from_user(&cpus, cpus_user, cpusetsize);
  291. if (ret)
  292. return -EFAULT;
  293. if (cpumask_empty(&cpus))
  294. cpumask_copy(&cpus, cpu_online_mask);
  295. cpumask_and(&cpus, &cpus, cpu_online_mask);
  296. cpumask_clear(&one_cpu);
  297. for (i = 0; i < pair_count; i++) {
  298. struct riscv_hwprobe pair, tmp;
  299. int cpu;
  300. ret = copy_from_user(&pair, &pairs[i], sizeof(pair));
  301. if (ret)
  302. return -EFAULT;
  303. if (!riscv_hwprobe_key_is_valid(pair.key)) {
  304. clear_all = true;
  305. pair = (struct riscv_hwprobe){ .key = -1, };
  306. ret = copy_to_user(&pairs[i], &pair, sizeof(pair));
  307. if (ret)
  308. return -EFAULT;
  309. }
  310. if (clear_all)
  311. continue;
  312. tmp = (struct riscv_hwprobe){ .key = pair.key, };
  313. for_each_cpu(cpu, &cpus) {
  314. cpumask_set_cpu(cpu, &one_cpu);
  315. hwprobe_one_pair(&tmp, &one_cpu);
  316. if (!riscv_hwprobe_pair_cmp(&tmp, &pair))
  317. cpumask_clear_cpu(cpu, &cpus);
  318. cpumask_clear_cpu(cpu, &one_cpu);
  319. }
  320. }
  321. if (clear_all)
  322. cpumask_clear(&cpus);
  323. ret = copy_to_user(cpus_user, &cpus, cpusetsize);
  324. if (ret)
  325. return -EFAULT;
  326. return 0;
  327. }
  328. static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs,
  329. size_t pair_count, size_t cpusetsize,
  330. unsigned long __user *cpus_user,
  331. unsigned int flags)
  332. {
  333. if (flags & RISCV_HWPROBE_WHICH_CPUS)
  334. return hwprobe_get_cpus(pairs, pair_count, cpusetsize,
  335. cpus_user, flags);
  336. return hwprobe_get_values(pairs, pair_count, cpusetsize,
  337. cpus_user, flags);
  338. }
  339. #ifdef CONFIG_MMU
  340. static int __init init_hwprobe_vdso_data(void)
  341. {
  342. struct vdso_data *vd = __arch_get_k_vdso_data();
  343. struct arch_vdso_data *avd = &vd->arch_data;
  344. u64 id_bitsmash = 0;
  345. struct riscv_hwprobe pair;
  346. int key;
  347. /*
  348. * Initialize vDSO data with the answers for the "all CPUs" case, to
  349. * save a syscall in the common case.
  350. */
  351. for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) {
  352. pair.key = key;
  353. hwprobe_one_pair(&pair, cpu_online_mask);
  354. WARN_ON_ONCE(pair.key < 0);
  355. avd->all_cpu_hwprobe_values[key] = pair.value;
  356. /*
  357. * Smash together the vendor, arch, and impl IDs to see if
  358. * they're all 0 or any negative.
  359. */
  360. if (key <= RISCV_HWPROBE_KEY_MIMPID)
  361. id_bitsmash |= pair.value;
  362. }
  363. /*
  364. * If the arch, vendor, and implementation ID are all the same across
  365. * all harts, then assume all CPUs are the same, and allow the vDSO to
  366. * answer queries for arbitrary masks. However if all values are 0 (not
  367. * populated) or any value returns -1 (varies across CPUs), then the
  368. * vDSO should defer to the kernel for exotic cpu masks.
  369. */
  370. avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1;
  371. return 0;
  372. }
  373. arch_initcall_sync(init_hwprobe_vdso_data);
  374. #endif /* CONFIG_MMU */
  375. SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs,
  376. size_t, pair_count, size_t, cpusetsize, unsigned long __user *,
  377. cpus, unsigned int, flags)
  378. {
  379. return do_riscv_hwprobe(pairs, pair_count, cpusetsize,
  380. cpus, flags);
  381. }