arch-powerpc.h 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. /* SPDX-License-Identifier: LGPL-2.1 OR MIT */
  2. /*
  3. * PowerPC specific definitions for NOLIBC
  4. * Copyright (C) 2023 Zhangjin Wu <falcon@tinylab.org>
  5. */
  6. #ifndef _NOLIBC_ARCH_POWERPC_H
  7. #define _NOLIBC_ARCH_POWERPC_H
  8. #include "compiler.h"
  9. #include "crt.h"
  10. /* Syscalls for PowerPC :
  11. * - stack is 16-byte aligned
  12. * - syscall number is passed in r0
  13. * - arguments are in r3, r4, r5, r6, r7, r8, r9
  14. * - the system call is performed by calling "sc"
  15. * - syscall return comes in r3, and the summary overflow bit is checked
  16. * to know if an error occurred, in which case errno is in r3.
  17. * - the arguments are cast to long and assigned into the target
  18. * registers which are then simply passed as registers to the asm code,
  19. * so that we don't have to experience issues with register constraints.
  20. */
  21. #define _NOLIBC_SYSCALL_CLOBBERLIST \
  22. "memory", "cr0", "r12", "r11", "r10", "r9"
  23. #define my_syscall0(num) \
  24. ({ \
  25. register long _ret __asm__ ("r3"); \
  26. register long _num __asm__ ("r0") = (num); \
  27. \
  28. __asm__ volatile ( \
  29. " sc\n" \
  30. " bns+ 1f\n" \
  31. " neg %0, %0\n" \
  32. "1:\n" \
  33. : "=r"(_ret), "+r"(_num) \
  34. : \
  35. : _NOLIBC_SYSCALL_CLOBBERLIST, "r8", "r7", "r6", "r5", "r4" \
  36. ); \
  37. _ret; \
  38. })
  39. #define my_syscall1(num, arg1) \
  40. ({ \
  41. register long _ret __asm__ ("r3"); \
  42. register long _num __asm__ ("r0") = (num); \
  43. register long _arg1 __asm__ ("r3") = (long)(arg1); \
  44. \
  45. __asm__ volatile ( \
  46. " sc\n" \
  47. " bns+ 1f\n" \
  48. " neg %0, %0\n" \
  49. "1:\n" \
  50. : "=r"(_ret), "+r"(_num) \
  51. : "0"(_arg1) \
  52. : _NOLIBC_SYSCALL_CLOBBERLIST, "r8", "r7", "r6", "r5", "r4" \
  53. ); \
  54. _ret; \
  55. })
  56. #define my_syscall2(num, arg1, arg2) \
  57. ({ \
  58. register long _ret __asm__ ("r3"); \
  59. register long _num __asm__ ("r0") = (num); \
  60. register long _arg1 __asm__ ("r3") = (long)(arg1); \
  61. register long _arg2 __asm__ ("r4") = (long)(arg2); \
  62. \
  63. __asm__ volatile ( \
  64. " sc\n" \
  65. " bns+ 1f\n" \
  66. " neg %0, %0\n" \
  67. "1:\n" \
  68. : "=r"(_ret), "+r"(_num), "+r"(_arg2) \
  69. : "0"(_arg1) \
  70. : _NOLIBC_SYSCALL_CLOBBERLIST, "r8", "r7", "r6", "r5" \
  71. ); \
  72. _ret; \
  73. })
  74. #define my_syscall3(num, arg1, arg2, arg3) \
  75. ({ \
  76. register long _ret __asm__ ("r3"); \
  77. register long _num __asm__ ("r0") = (num); \
  78. register long _arg1 __asm__ ("r3") = (long)(arg1); \
  79. register long _arg2 __asm__ ("r4") = (long)(arg2); \
  80. register long _arg3 __asm__ ("r5") = (long)(arg3); \
  81. \
  82. __asm__ volatile ( \
  83. " sc\n" \
  84. " bns+ 1f\n" \
  85. " neg %0, %0\n" \
  86. "1:\n" \
  87. : "=r"(_ret), "+r"(_num), "+r"(_arg2), "+r"(_arg3) \
  88. : "0"(_arg1) \
  89. : _NOLIBC_SYSCALL_CLOBBERLIST, "r8", "r7", "r6" \
  90. ); \
  91. _ret; \
  92. })
  93. #define my_syscall4(num, arg1, arg2, arg3, arg4) \
  94. ({ \
  95. register long _ret __asm__ ("r3"); \
  96. register long _num __asm__ ("r0") = (num); \
  97. register long _arg1 __asm__ ("r3") = (long)(arg1); \
  98. register long _arg2 __asm__ ("r4") = (long)(arg2); \
  99. register long _arg3 __asm__ ("r5") = (long)(arg3); \
  100. register long _arg4 __asm__ ("r6") = (long)(arg4); \
  101. \
  102. __asm__ volatile ( \
  103. " sc\n" \
  104. " bns+ 1f\n" \
  105. " neg %0, %0\n" \
  106. "1:\n" \
  107. : "=r"(_ret), "+r"(_num), "+r"(_arg2), "+r"(_arg3), \
  108. "+r"(_arg4) \
  109. : "0"(_arg1) \
  110. : _NOLIBC_SYSCALL_CLOBBERLIST, "r8", "r7" \
  111. ); \
  112. _ret; \
  113. })
  114. #define my_syscall5(num, arg1, arg2, arg3, arg4, arg5) \
  115. ({ \
  116. register long _ret __asm__ ("r3"); \
  117. register long _num __asm__ ("r0") = (num); \
  118. register long _arg1 __asm__ ("r3") = (long)(arg1); \
  119. register long _arg2 __asm__ ("r4") = (long)(arg2); \
  120. register long _arg3 __asm__ ("r5") = (long)(arg3); \
  121. register long _arg4 __asm__ ("r6") = (long)(arg4); \
  122. register long _arg5 __asm__ ("r7") = (long)(arg5); \
  123. \
  124. __asm__ volatile ( \
  125. " sc\n" \
  126. " bns+ 1f\n" \
  127. " neg %0, %0\n" \
  128. "1:\n" \
  129. : "=r"(_ret), "+r"(_num), "+r"(_arg2), "+r"(_arg3), \
  130. "+r"(_arg4), "+r"(_arg5) \
  131. : "0"(_arg1) \
  132. : _NOLIBC_SYSCALL_CLOBBERLIST, "r8" \
  133. ); \
  134. _ret; \
  135. })
  136. #define my_syscall6(num, arg1, arg2, arg3, arg4, arg5, arg6) \
  137. ({ \
  138. register long _ret __asm__ ("r3"); \
  139. register long _num __asm__ ("r0") = (num); \
  140. register long _arg1 __asm__ ("r3") = (long)(arg1); \
  141. register long _arg2 __asm__ ("r4") = (long)(arg2); \
  142. register long _arg3 __asm__ ("r5") = (long)(arg3); \
  143. register long _arg4 __asm__ ("r6") = (long)(arg4); \
  144. register long _arg5 __asm__ ("r7") = (long)(arg5); \
  145. register long _arg6 __asm__ ("r8") = (long)(arg6); \
  146. \
  147. __asm__ volatile ( \
  148. " sc\n" \
  149. " bns+ 1f\n" \
  150. " neg %0, %0\n" \
  151. "1:\n" \
  152. : "=r"(_ret), "+r"(_num), "+r"(_arg2), "+r"(_arg3), \
  153. "+r"(_arg4), "+r"(_arg5), "+r"(_arg6) \
  154. : "0"(_arg1) \
  155. : _NOLIBC_SYSCALL_CLOBBERLIST \
  156. ); \
  157. _ret; \
  158. })
  159. #if !defined(__powerpc64__) && !defined(__clang__)
  160. /* FIXME: For 32-bit PowerPC, with newer gcc compilers (e.g. gcc 13.1.0),
  161. * "omit-frame-pointer" fails with __attribute__((no_stack_protector)) but
  162. * works with __attribute__((__optimize__("-fno-stack-protector")))
  163. */
  164. #ifdef __no_stack_protector
  165. #undef __no_stack_protector
  166. #define __no_stack_protector __attribute__((__optimize__("-fno-stack-protector")))
  167. #endif
  168. #endif /* !__powerpc64__ */
  169. /* startup code */
  170. void __attribute__((weak, noreturn)) __nolibc_entrypoint __no_stack_protector _start(void)
  171. {
  172. #ifdef __powerpc64__
  173. #if _CALL_ELF == 2
  174. /* with -mabi=elfv2, save TOC/GOT pointer to r2
  175. * r12 is global entry pointer, we use it to compute TOC from r12
  176. * https://www.llvm.org/devmtg/2014-04/PDFs/Talks/Euro-LLVM-2014-Weigand.pdf
  177. * https://refspecs.linuxfoundation.org/ELF/ppc64/PPC-elf64abi.pdf
  178. */
  179. __asm__ volatile (
  180. "addis 2, 12, .TOC. - _start@ha\n"
  181. "addi 2, 2, .TOC. - _start@l\n"
  182. );
  183. #endif /* _CALL_ELF == 2 */
  184. __asm__ volatile (
  185. "mr 3, 1\n" /* save stack pointer to r3, as arg1 of _start_c */
  186. "clrrdi 1, 1, 4\n" /* align the stack to 16 bytes */
  187. "li 0, 0\n" /* zero the frame pointer */
  188. "stdu 1, -32(1)\n" /* the initial stack frame */
  189. "bl _start_c\n" /* transfer to c runtime */
  190. );
  191. #else
  192. __asm__ volatile (
  193. "mr 3, 1\n" /* save stack pointer to r3, as arg1 of _start_c */
  194. "clrrwi 1, 1, 4\n" /* align the stack to 16 bytes */
  195. "li 0, 0\n" /* zero the frame pointer */
  196. "stwu 1, -16(1)\n" /* the initial stack frame */
  197. "bl _start_c\n" /* transfer to c runtime */
  198. );
  199. #endif
  200. __nolibc_entrypoint_epilogue();
  201. }
  202. #endif /* _NOLIBC_ARCH_POWERPC_H */