suspend.c 4.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183
  1. // SPDX-License-Identifier: GPL-2.0-only
  2. /*
  3. * Copyright (c) 2021 Western Digital Corporation or its affiliates.
  4. * Copyright (c) 2022 Ventana Micro Systems Inc.
  5. */
  6. #define pr_fmt(fmt) "suspend: " fmt
  7. #include <linux/ftrace.h>
  8. #include <linux/suspend.h>
  9. #include <asm/csr.h>
  10. #include <asm/sbi.h>
  11. #include <asm/suspend.h>
  12. void suspend_save_csrs(struct suspend_context *context)
  13. {
  14. if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
  15. context->envcfg = csr_read(CSR_ENVCFG);
  16. context->tvec = csr_read(CSR_TVEC);
  17. context->ie = csr_read(CSR_IE);
  18. /*
  19. * No need to save/restore IP CSR (i.e. MIP or SIP) because:
  20. *
  21. * 1. For no-MMU (M-mode) kernel, the bits in MIP are set by
  22. * external devices (such as interrupt controller, timer, etc).
  23. * 2. For MMU (S-mode) kernel, the bits in SIP are set by
  24. * M-mode firmware and external devices (such as interrupt
  25. * controller, etc).
  26. */
  27. #ifdef CONFIG_MMU
  28. context->satp = csr_read(CSR_SATP);
  29. #endif
  30. }
  31. void suspend_restore_csrs(struct suspend_context *context)
  32. {
  33. csr_write(CSR_SCRATCH, 0);
  34. if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG))
  35. csr_write(CSR_ENVCFG, context->envcfg);
  36. csr_write(CSR_TVEC, context->tvec);
  37. csr_write(CSR_IE, context->ie);
  38. #ifdef CONFIG_MMU
  39. csr_write(CSR_SATP, context->satp);
  40. #endif
  41. }
  42. int cpu_suspend(unsigned long arg,
  43. int (*finish)(unsigned long arg,
  44. unsigned long entry,
  45. unsigned long context))
  46. {
  47. int rc = 0;
  48. struct suspend_context context = { 0 };
  49. /* Finisher should be non-NULL */
  50. if (!finish)
  51. return -EINVAL;
  52. /* Save additional CSRs*/
  53. suspend_save_csrs(&context);
  54. /*
  55. * Function graph tracer state gets incosistent when the kernel
  56. * calls functions that never return (aka finishers) hence disable
  57. * graph tracing during their execution.
  58. */
  59. pause_graph_tracing();
  60. /* Save context on stack */
  61. if (__cpu_suspend_enter(&context)) {
  62. /* Call the finisher */
  63. rc = finish(arg, __pa_symbol(__cpu_resume_enter),
  64. (ulong)&context);
  65. /*
  66. * Should never reach here, unless the suspend finisher
  67. * fails. Successful cpu_suspend() should return from
  68. * __cpu_resume_entry()
  69. */
  70. if (!rc)
  71. rc = -EOPNOTSUPP;
  72. }
  73. /* Enable function graph tracer */
  74. unpause_graph_tracing();
  75. /* Restore additional CSRs */
  76. suspend_restore_csrs(&context);
  77. return rc;
  78. }
  79. #ifdef CONFIG_RISCV_SBI
  80. static int sbi_system_suspend(unsigned long sleep_type,
  81. unsigned long resume_addr,
  82. unsigned long opaque)
  83. {
  84. struct sbiret ret;
  85. ret = sbi_ecall(SBI_EXT_SUSP, SBI_EXT_SUSP_SYSTEM_SUSPEND,
  86. sleep_type, resume_addr, opaque, 0, 0, 0);
  87. if (ret.error)
  88. return sbi_err_map_linux_errno(ret.error);
  89. return ret.value;
  90. }
  91. static int sbi_system_suspend_enter(suspend_state_t state)
  92. {
  93. return cpu_suspend(SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM, sbi_system_suspend);
  94. }
  95. static const struct platform_suspend_ops sbi_system_suspend_ops = {
  96. .valid = suspend_valid_only_mem,
  97. .enter = sbi_system_suspend_enter,
  98. };
  99. static int __init sbi_system_suspend_init(void)
  100. {
  101. if (sbi_spec_version >= sbi_mk_version(2, 0) &&
  102. sbi_probe_extension(SBI_EXT_SUSP) > 0) {
  103. pr_info("SBI SUSP extension detected\n");
  104. if (IS_ENABLED(CONFIG_SUSPEND))
  105. suspend_set_ops(&sbi_system_suspend_ops);
  106. }
  107. return 0;
  108. }
  109. arch_initcall(sbi_system_suspend_init);
  110. static int sbi_suspend_finisher(unsigned long suspend_type,
  111. unsigned long resume_addr,
  112. unsigned long opaque)
  113. {
  114. struct sbiret ret;
  115. ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_SUSPEND,
  116. suspend_type, resume_addr, opaque, 0, 0, 0);
  117. return (ret.error) ? sbi_err_map_linux_errno(ret.error) : 0;
  118. }
  119. int riscv_sbi_hart_suspend(u32 state)
  120. {
  121. if (state & SBI_HSM_SUSP_NON_RET_BIT)
  122. return cpu_suspend(state, sbi_suspend_finisher);
  123. else
  124. return sbi_suspend_finisher(state, 0, 0);
  125. }
  126. bool riscv_sbi_suspend_state_is_valid(u32 state)
  127. {
  128. if (state > SBI_HSM_SUSPEND_RET_DEFAULT &&
  129. state < SBI_HSM_SUSPEND_RET_PLATFORM)
  130. return false;
  131. if (state > SBI_HSM_SUSPEND_NON_RET_DEFAULT &&
  132. state < SBI_HSM_SUSPEND_NON_RET_PLATFORM)
  133. return false;
  134. return true;
  135. }
  136. bool riscv_sbi_hsm_is_supported(void)
  137. {
  138. /*
  139. * The SBI HSM suspend function is only available when:
  140. * 1) SBI version is 0.3 or higher
  141. * 2) SBI HSM extension is available
  142. */
  143. if (sbi_spec_version < sbi_mk_version(0, 3) ||
  144. !sbi_probe_extension(SBI_EXT_HSM)) {
  145. pr_info("HSM suspend not available\n");
  146. return false;
  147. }
  148. return true;
  149. }
  150. #endif /* CONFIG_RISCV_SBI */