sbi.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * SBI initialilization and all extension implementation.
  4. *
  5. * Copyright (c) 2020 Western Digital Corporation or its affiliates.
  6. */
  7. #include <linux/bits.h>
  8. #include <linux/init.h>
  9. #include <linux/mm.h>
  10. #include <linux/pm.h>
  11. #include <linux/reboot.h>
  12. #include <asm/sbi.h>
  13. #include <asm/smp.h>
  14. #include <asm/tlbflush.h>
  15. /* default SBI version is 0.1 */
  16. unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
  17. EXPORT_SYMBOL(sbi_spec_version);
  18. static void (*__sbi_set_timer)(uint64_t stime) __ro_after_init;
  19. static void (*__sbi_send_ipi)(unsigned int cpu) __ro_after_init;
  20. static int (*__sbi_rfence)(int fid, const struct cpumask *cpu_mask,
  21. unsigned long start, unsigned long size,
  22. unsigned long arg4, unsigned long arg5) __ro_after_init;
  23. #ifdef CONFIG_RISCV_SBI_V01
  24. static unsigned long __sbi_v01_cpumask_to_hartmask(const struct cpumask *cpu_mask)
  25. {
  26. unsigned long cpuid, hartid;
  27. unsigned long hmask = 0;
  28. /*
  29. * There is no maximum hartid concept in RISC-V and NR_CPUS must not be
  30. * associated with hartid. As SBI v0.1 is only kept for backward compatibility
  31. * and will be removed in the future, there is no point in supporting hartid
  32. * greater than BITS_PER_LONG (32 for RV32 and 64 for RV64). Ideally, SBI v0.2
  33. * should be used for platforms with hartid greater than BITS_PER_LONG.
  34. */
  35. for_each_cpu(cpuid, cpu_mask) {
  36. hartid = cpuid_to_hartid_map(cpuid);
  37. if (hartid >= BITS_PER_LONG) {
  38. pr_warn("Unable to send any request to hartid > BITS_PER_LONG for SBI v0.1\n");
  39. break;
  40. }
  41. hmask |= BIT(hartid);
  42. }
  43. return hmask;
  44. }
  45. /**
  46. * sbi_console_putchar() - Writes given character to the console device.
  47. * @ch: The data to be written to the console.
  48. *
  49. * Return: None
  50. */
  51. void sbi_console_putchar(int ch)
  52. {
  53. sbi_ecall(SBI_EXT_0_1_CONSOLE_PUTCHAR, 0, ch, 0, 0, 0, 0, 0);
  54. }
  55. EXPORT_SYMBOL(sbi_console_putchar);
  56. /**
  57. * sbi_console_getchar() - Reads a byte from console device.
  58. *
  59. * Returns the value read from console.
  60. */
  61. int sbi_console_getchar(void)
  62. {
  63. struct sbiret ret;
  64. ret = sbi_ecall(SBI_EXT_0_1_CONSOLE_GETCHAR, 0, 0, 0, 0, 0, 0, 0);
  65. return ret.error;
  66. }
  67. EXPORT_SYMBOL(sbi_console_getchar);
  68. /**
  69. * sbi_shutdown() - Remove all the harts from executing supervisor code.
  70. *
  71. * Return: None
  72. */
  73. void sbi_shutdown(void)
  74. {
  75. sbi_ecall(SBI_EXT_0_1_SHUTDOWN, 0, 0, 0, 0, 0, 0, 0);
  76. }
  77. EXPORT_SYMBOL(sbi_shutdown);
  78. /**
  79. * __sbi_set_timer_v01() - Program the timer for next timer event.
  80. * @stime_value: The value after which next timer event should fire.
  81. *
  82. * Return: None
  83. */
  84. static void __sbi_set_timer_v01(uint64_t stime_value)
  85. {
  86. #if __riscv_xlen == 32
  87. sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value,
  88. stime_value >> 32, 0, 0, 0, 0);
  89. #else
  90. sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value, 0, 0, 0, 0, 0);
  91. #endif
  92. }
  93. static void __sbi_send_ipi_v01(unsigned int cpu)
  94. {
  95. unsigned long hart_mask =
  96. __sbi_v01_cpumask_to_hartmask(cpumask_of(cpu));
  97. sbi_ecall(SBI_EXT_0_1_SEND_IPI, 0, (unsigned long)(&hart_mask),
  98. 0, 0, 0, 0, 0);
  99. }
  100. static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
  101. unsigned long start, unsigned long size,
  102. unsigned long arg4, unsigned long arg5)
  103. {
  104. int result = 0;
  105. unsigned long hart_mask;
  106. if (!cpu_mask || cpumask_empty(cpu_mask))
  107. cpu_mask = cpu_online_mask;
  108. hart_mask = __sbi_v01_cpumask_to_hartmask(cpu_mask);
  109. /* v0.2 function IDs are equivalent to v0.1 extension IDs */
  110. switch (fid) {
  111. case SBI_EXT_RFENCE_REMOTE_FENCE_I:
  112. sbi_ecall(SBI_EXT_0_1_REMOTE_FENCE_I, 0,
  113. (unsigned long)&hart_mask, 0, 0, 0, 0, 0);
  114. break;
  115. case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
  116. sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA, 0,
  117. (unsigned long)&hart_mask, start, size,
  118. 0, 0, 0);
  119. break;
  120. case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
  121. sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID, 0,
  122. (unsigned long)&hart_mask, start, size,
  123. arg4, 0, 0);
  124. break;
  125. default:
  126. pr_err("SBI call [%d]not supported in SBI v0.1\n", fid);
  127. result = -EINVAL;
  128. }
  129. return result;
  130. }
  131. static void sbi_set_power_off(void)
  132. {
  133. pm_power_off = sbi_shutdown;
  134. }
  135. #else
  136. static void __sbi_set_timer_v01(uint64_t stime_value)
  137. {
  138. pr_warn("Timer extension is not available in SBI v%lu.%lu\n",
  139. sbi_major_version(), sbi_minor_version());
  140. }
  141. static void __sbi_send_ipi_v01(unsigned int cpu)
  142. {
  143. pr_warn("IPI extension is not available in SBI v%lu.%lu\n",
  144. sbi_major_version(), sbi_minor_version());
  145. }
  146. static int __sbi_rfence_v01(int fid, const struct cpumask *cpu_mask,
  147. unsigned long start, unsigned long size,
  148. unsigned long arg4, unsigned long arg5)
  149. {
  150. pr_warn("remote fence extension is not available in SBI v%lu.%lu\n",
  151. sbi_major_version(), sbi_minor_version());
  152. return 0;
  153. }
  154. static void sbi_set_power_off(void) {}
  155. #endif /* CONFIG_RISCV_SBI_V01 */
  156. static void __sbi_set_timer_v02(uint64_t stime_value)
  157. {
  158. #if __riscv_xlen == 32
  159. sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value,
  160. stime_value >> 32, 0, 0, 0, 0);
  161. #else
  162. sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value, 0,
  163. 0, 0, 0, 0);
  164. #endif
  165. }
  166. static void __sbi_send_ipi_v02(unsigned int cpu)
  167. {
  168. int result;
  169. struct sbiret ret = {0};
  170. ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
  171. 1UL, cpuid_to_hartid_map(cpu), 0, 0, 0, 0);
  172. if (ret.error) {
  173. result = sbi_err_map_linux_errno(ret.error);
  174. pr_err("%s: hbase = [%lu] failed (error [%d])\n",
  175. __func__, cpuid_to_hartid_map(cpu), result);
  176. }
  177. }
  178. static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask,
  179. unsigned long hbase, unsigned long start,
  180. unsigned long size, unsigned long arg4,
  181. unsigned long arg5)
  182. {
  183. struct sbiret ret = {0};
  184. int ext = SBI_EXT_RFENCE;
  185. int result = 0;
  186. switch (fid) {
  187. case SBI_EXT_RFENCE_REMOTE_FENCE_I:
  188. ret = sbi_ecall(ext, fid, hmask, hbase, 0, 0, 0, 0);
  189. break;
  190. case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
  191. ret = sbi_ecall(ext, fid, hmask, hbase, start,
  192. size, 0, 0);
  193. break;
  194. case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
  195. ret = sbi_ecall(ext, fid, hmask, hbase, start,
  196. size, arg4, 0);
  197. break;
  198. case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
  199. ret = sbi_ecall(ext, fid, hmask, hbase, start,
  200. size, 0, 0);
  201. break;
  202. case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
  203. ret = sbi_ecall(ext, fid, hmask, hbase, start,
  204. size, arg4, 0);
  205. break;
  206. case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
  207. ret = sbi_ecall(ext, fid, hmask, hbase, start,
  208. size, 0, 0);
  209. break;
  210. case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
  211. ret = sbi_ecall(ext, fid, hmask, hbase, start,
  212. size, arg4, 0);
  213. break;
  214. default:
  215. pr_err("unknown function ID [%lu] for SBI extension [%d]\n",
  216. fid, ext);
  217. result = -EINVAL;
  218. }
  219. if (ret.error) {
  220. result = sbi_err_map_linux_errno(ret.error);
  221. pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n",
  222. __func__, hbase, hmask, result);
  223. }
  224. return result;
  225. }
  226. static int __sbi_rfence_v02(int fid, const struct cpumask *cpu_mask,
  227. unsigned long start, unsigned long size,
  228. unsigned long arg4, unsigned long arg5)
  229. {
  230. unsigned long hartid, cpuid, hmask = 0, hbase = 0, htop = 0;
  231. int result;
  232. if (!cpu_mask || cpumask_empty(cpu_mask))
  233. cpu_mask = cpu_online_mask;
  234. for_each_cpu(cpuid, cpu_mask) {
  235. hartid = cpuid_to_hartid_map(cpuid);
  236. if (hmask) {
  237. if (hartid + BITS_PER_LONG <= htop ||
  238. hbase + BITS_PER_LONG <= hartid) {
  239. result = __sbi_rfence_v02_call(fid, hmask,
  240. hbase, start, size, arg4, arg5);
  241. if (result)
  242. return result;
  243. hmask = 0;
  244. } else if (hartid < hbase) {
  245. /* shift the mask to fit lower hartid */
  246. hmask <<= hbase - hartid;
  247. hbase = hartid;
  248. }
  249. }
  250. if (!hmask) {
  251. hbase = hartid;
  252. htop = hartid;
  253. } else if (hartid > htop) {
  254. htop = hartid;
  255. }
  256. hmask |= BIT(hartid - hbase);
  257. }
  258. if (hmask) {
  259. result = __sbi_rfence_v02_call(fid, hmask, hbase,
  260. start, size, arg4, arg5);
  261. if (result)
  262. return result;
  263. }
  264. return 0;
  265. }
  266. /**
  267. * sbi_set_timer() - Program the timer for next timer event.
  268. * @stime_value: The value after which next timer event should fire.
  269. *
  270. * Return: None.
  271. */
  272. void sbi_set_timer(uint64_t stime_value)
  273. {
  274. __sbi_set_timer(stime_value);
  275. }
  276. /**
  277. * sbi_send_ipi() - Send an IPI to any hart.
  278. * @cpu: Logical id of the target CPU.
  279. */
  280. void sbi_send_ipi(unsigned int cpu)
  281. {
  282. __sbi_send_ipi(cpu);
  283. }
  284. EXPORT_SYMBOL(sbi_send_ipi);
  285. /**
  286. * sbi_remote_fence_i() - Execute FENCE.I instruction on given remote harts.
  287. * @cpu_mask: A cpu mask containing all the target harts.
  288. *
  289. * Return: 0 on success, appropriate linux error code otherwise.
  290. */
  291. int sbi_remote_fence_i(const struct cpumask *cpu_mask)
  292. {
  293. return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_FENCE_I,
  294. cpu_mask, 0, 0, 0, 0);
  295. }
  296. EXPORT_SYMBOL(sbi_remote_fence_i);
  297. /**
  298. * sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given
  299. * remote harts for a virtual address range belonging to a specific ASID or not.
  300. *
  301. * @cpu_mask: A cpu mask containing all the target harts.
  302. * @start: Start of the virtual address
  303. * @size: Total size of the virtual address range.
  304. * @asid: The value of address space identifier (ASID), or FLUSH_TLB_NO_ASID
  305. * for flushing all address spaces.
  306. *
  307. * Return: 0 on success, appropriate linux error code otherwise.
  308. */
  309. int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
  310. unsigned long start,
  311. unsigned long size,
  312. unsigned long asid)
  313. {
  314. if (asid == FLUSH_TLB_NO_ASID)
  315. return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
  316. cpu_mask, start, size, 0, 0);
  317. else
  318. return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
  319. cpu_mask, start, size, asid, 0);
  320. }
  321. EXPORT_SYMBOL(sbi_remote_sfence_vma_asid);
  322. /**
  323. * sbi_remote_hfence_gvma() - Execute HFENCE.GVMA instructions on given remote
  324. * harts for the specified guest physical address range.
  325. * @cpu_mask: A cpu mask containing all the target harts.
  326. * @start: Start of the guest physical address
  327. * @size: Total size of the guest physical address range.
  328. *
  329. * Return: None
  330. */
  331. int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask,
  332. unsigned long start,
  333. unsigned long size)
  334. {
  335. return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
  336. cpu_mask, start, size, 0, 0);
  337. }
  338. EXPORT_SYMBOL_GPL(sbi_remote_hfence_gvma);
  339. /**
  340. * sbi_remote_hfence_gvma_vmid() - Execute HFENCE.GVMA instructions on given
  341. * remote harts for a guest physical address range belonging to a specific VMID.
  342. *
  343. * @cpu_mask: A cpu mask containing all the target harts.
  344. * @start: Start of the guest physical address
  345. * @size: Total size of the guest physical address range.
  346. * @vmid: The value of guest ID (VMID).
  347. *
  348. * Return: 0 if success, Error otherwise.
  349. */
  350. int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask,
  351. unsigned long start,
  352. unsigned long size,
  353. unsigned long vmid)
  354. {
  355. return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
  356. cpu_mask, start, size, vmid, 0);
  357. }
  358. EXPORT_SYMBOL(sbi_remote_hfence_gvma_vmid);
  359. /**
  360. * sbi_remote_hfence_vvma() - Execute HFENCE.VVMA instructions on given remote
  361. * harts for the current guest virtual address range.
  362. * @cpu_mask: A cpu mask containing all the target harts.
  363. * @start: Start of the current guest virtual address
  364. * @size: Total size of the current guest virtual address range.
  365. *
  366. * Return: None
  367. */
  368. int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask,
  369. unsigned long start,
  370. unsigned long size)
  371. {
  372. return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
  373. cpu_mask, start, size, 0, 0);
  374. }
  375. EXPORT_SYMBOL(sbi_remote_hfence_vvma);
  376. /**
  377. * sbi_remote_hfence_vvma_asid() - Execute HFENCE.VVMA instructions on given
  378. * remote harts for current guest virtual address range belonging to a specific
  379. * ASID.
  380. *
  381. * @cpu_mask: A cpu mask containing all the target harts.
  382. * @start: Start of the current guest virtual address
  383. * @size: Total size of the current guest virtual address range.
  384. * @asid: The value of address space identifier (ASID).
  385. *
  386. * Return: None
  387. */
  388. int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
  389. unsigned long start,
  390. unsigned long size,
  391. unsigned long asid)
  392. {
  393. return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
  394. cpu_mask, start, size, asid, 0);
  395. }
  396. EXPORT_SYMBOL(sbi_remote_hfence_vvma_asid);
  397. static void sbi_srst_reset(unsigned long type, unsigned long reason)
  398. {
  399. sbi_ecall(SBI_EXT_SRST, SBI_EXT_SRST_RESET, type, reason,
  400. 0, 0, 0, 0);
  401. pr_warn("%s: type=0x%lx reason=0x%lx failed\n",
  402. __func__, type, reason);
  403. }
  404. static int sbi_srst_reboot(struct notifier_block *this,
  405. unsigned long mode, void *cmd)
  406. {
  407. sbi_srst_reset((mode == REBOOT_WARM || mode == REBOOT_SOFT) ?
  408. SBI_SRST_RESET_TYPE_WARM_REBOOT :
  409. SBI_SRST_RESET_TYPE_COLD_REBOOT,
  410. SBI_SRST_RESET_REASON_NONE);
  411. return NOTIFY_DONE;
  412. }
  413. static struct notifier_block sbi_srst_reboot_nb;
  414. static void sbi_srst_power_off(void)
  415. {
  416. sbi_srst_reset(SBI_SRST_RESET_TYPE_SHUTDOWN,
  417. SBI_SRST_RESET_REASON_NONE);
  418. }
  419. /**
  420. * sbi_probe_extension() - Check if an SBI extension ID is supported or not.
  421. * @extid: The extension ID to be probed.
  422. *
  423. * Return: 1 or an extension specific nonzero value if yes, 0 otherwise.
  424. */
  425. long sbi_probe_extension(int extid)
  426. {
  427. struct sbiret ret;
  428. ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid,
  429. 0, 0, 0, 0, 0);
  430. if (!ret.error)
  431. return ret.value;
  432. return 0;
  433. }
  434. EXPORT_SYMBOL(sbi_probe_extension);
  435. static inline long sbi_get_spec_version(void)
  436. {
  437. return __sbi_base_ecall(SBI_EXT_BASE_GET_SPEC_VERSION);
  438. }
  439. static inline long sbi_get_firmware_id(void)
  440. {
  441. return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_ID);
  442. }
  443. static inline long sbi_get_firmware_version(void)
  444. {
  445. return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
  446. }
  447. long sbi_get_mvendorid(void)
  448. {
  449. return __sbi_base_ecall(SBI_EXT_BASE_GET_MVENDORID);
  450. }
  451. EXPORT_SYMBOL_GPL(sbi_get_mvendorid);
  452. long sbi_get_marchid(void)
  453. {
  454. return __sbi_base_ecall(SBI_EXT_BASE_GET_MARCHID);
  455. }
  456. EXPORT_SYMBOL_GPL(sbi_get_marchid);
  457. long sbi_get_mimpid(void)
  458. {
  459. return __sbi_base_ecall(SBI_EXT_BASE_GET_MIMPID);
  460. }
  461. EXPORT_SYMBOL_GPL(sbi_get_mimpid);
  462. bool sbi_debug_console_available;
  463. int sbi_debug_console_write(const char *bytes, unsigned int num_bytes)
  464. {
  465. phys_addr_t base_addr;
  466. struct sbiret ret;
  467. if (!sbi_debug_console_available)
  468. return -EOPNOTSUPP;
  469. if (is_vmalloc_addr(bytes))
  470. base_addr = page_to_phys(vmalloc_to_page(bytes)) +
  471. offset_in_page(bytes);
  472. else
  473. base_addr = __pa(bytes);
  474. if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
  475. num_bytes = PAGE_SIZE - offset_in_page(bytes);
  476. if (IS_ENABLED(CONFIG_32BIT))
  477. ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
  478. num_bytes, lower_32_bits(base_addr),
  479. upper_32_bits(base_addr), 0, 0, 0);
  480. else
  481. ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE,
  482. num_bytes, base_addr, 0, 0, 0, 0);
  483. if (ret.error == SBI_ERR_FAILURE)
  484. return -EIO;
  485. return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value;
  486. }
  487. int sbi_debug_console_read(char *bytes, unsigned int num_bytes)
  488. {
  489. phys_addr_t base_addr;
  490. struct sbiret ret;
  491. if (!sbi_debug_console_available)
  492. return -EOPNOTSUPP;
  493. if (is_vmalloc_addr(bytes))
  494. base_addr = page_to_phys(vmalloc_to_page(bytes)) +
  495. offset_in_page(bytes);
  496. else
  497. base_addr = __pa(bytes);
  498. if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes))
  499. num_bytes = PAGE_SIZE - offset_in_page(bytes);
  500. if (IS_ENABLED(CONFIG_32BIT))
  501. ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ,
  502. num_bytes, lower_32_bits(base_addr),
  503. upper_32_bits(base_addr), 0, 0, 0);
  504. else
  505. ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ,
  506. num_bytes, base_addr, 0, 0, 0, 0);
  507. if (ret.error == SBI_ERR_FAILURE)
  508. return -EIO;
  509. return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value;
  510. }
  511. void __init sbi_init(void)
  512. {
  513. int ret;
  514. sbi_set_power_off();
  515. ret = sbi_get_spec_version();
  516. if (ret > 0)
  517. sbi_spec_version = ret;
  518. pr_info("SBI specification v%lu.%lu detected\n",
  519. sbi_major_version(), sbi_minor_version());
  520. if (!sbi_spec_is_0_1()) {
  521. pr_info("SBI implementation ID=0x%lx Version=0x%lx\n",
  522. sbi_get_firmware_id(), sbi_get_firmware_version());
  523. if (sbi_probe_extension(SBI_EXT_TIME)) {
  524. __sbi_set_timer = __sbi_set_timer_v02;
  525. pr_info("SBI TIME extension detected\n");
  526. } else {
  527. __sbi_set_timer = __sbi_set_timer_v01;
  528. }
  529. if (sbi_probe_extension(SBI_EXT_IPI)) {
  530. __sbi_send_ipi = __sbi_send_ipi_v02;
  531. pr_info("SBI IPI extension detected\n");
  532. } else {
  533. __sbi_send_ipi = __sbi_send_ipi_v01;
  534. }
  535. if (sbi_probe_extension(SBI_EXT_RFENCE)) {
  536. __sbi_rfence = __sbi_rfence_v02;
  537. pr_info("SBI RFENCE extension detected\n");
  538. } else {
  539. __sbi_rfence = __sbi_rfence_v01;
  540. }
  541. if ((sbi_spec_version >= sbi_mk_version(0, 3)) &&
  542. sbi_probe_extension(SBI_EXT_SRST)) {
  543. pr_info("SBI SRST extension detected\n");
  544. pm_power_off = sbi_srst_power_off;
  545. sbi_srst_reboot_nb.notifier_call = sbi_srst_reboot;
  546. sbi_srst_reboot_nb.priority = 192;
  547. register_restart_handler(&sbi_srst_reboot_nb);
  548. }
  549. if ((sbi_spec_version >= sbi_mk_version(2, 0)) &&
  550. (sbi_probe_extension(SBI_EXT_DBCN) > 0)) {
  551. pr_info("SBI DBCN extension detected\n");
  552. sbi_debug_console_available = true;
  553. }
  554. } else {
  555. __sbi_set_timer = __sbi_set_timer_v01;
  556. __sbi_send_ipi = __sbi_send_ipi_v01;
  557. __sbi_rfence = __sbi_rfence_v01;
  558. }
  559. }