fpsimd.h 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438
  1. /* SPDX-License-Identifier: GPL-2.0-only */
  2. /*
  3. * Copyright (C) 2012 ARM Ltd.
  4. */
  5. #ifndef __ASM_FP_H
  6. #define __ASM_FP_H
  7. #include <asm/errno.h>
  8. #include <asm/ptrace.h>
  9. #include <asm/processor.h>
  10. #include <asm/sigcontext.h>
  11. #include <asm/sysreg.h>
  12. #ifndef __ASSEMBLY__
  13. #include <linux/bitmap.h>
  14. #include <linux/build_bug.h>
  15. #include <linux/bug.h>
  16. #include <linux/cache.h>
  17. #include <linux/init.h>
  18. #include <linux/stddef.h>
  19. #include <linux/types.h>
  20. /* Masks for extracting the FPSR and FPCR from the FPSCR */
  21. #define VFP_FPSCR_STAT_MASK 0xf800009f
  22. #define VFP_FPSCR_CTRL_MASK 0x07f79f00
  23. /*
  24. * The VFP state has 32x64-bit registers and a single 32-bit
  25. * control/status register.
  26. */
  27. #define VFP_STATE_SIZE ((32 * 8) + 4)
  28. static inline unsigned long cpacr_save_enable_kernel_sve(void)
  29. {
  30. unsigned long old = read_sysreg(cpacr_el1);
  31. unsigned long set = CPACR_EL1_FPEN_EL1EN | CPACR_EL1_ZEN_EL1EN;
  32. write_sysreg(old | set, cpacr_el1);
  33. isb();
  34. return old;
  35. }
  36. static inline unsigned long cpacr_save_enable_kernel_sme(void)
  37. {
  38. unsigned long old = read_sysreg(cpacr_el1);
  39. unsigned long set = CPACR_EL1_FPEN_EL1EN | CPACR_EL1_SMEN_EL1EN;
  40. write_sysreg(old | set, cpacr_el1);
  41. isb();
  42. return old;
  43. }
  44. static inline void cpacr_restore(unsigned long cpacr)
  45. {
  46. write_sysreg(cpacr, cpacr_el1);
  47. isb();
  48. }
  49. /*
  50. * When we defined the maximum SVE vector length we defined the ABI so
  51. * that the maximum vector length included all the reserved for future
  52. * expansion bits in ZCR rather than those just currently defined by
  53. * the architecture. Using this length to allocate worst size buffers
  54. * results in excessively large allocations, and this effect is even
  55. * more pronounced for SME due to ZA. Define more suitable VLs for
  56. * these situations.
  57. */
  58. #define ARCH_SVE_VQ_MAX ((ZCR_ELx_LEN_MASK >> ZCR_ELx_LEN_SHIFT) + 1)
  59. #define SME_VQ_MAX ((SMCR_ELx_LEN_MASK >> SMCR_ELx_LEN_SHIFT) + 1)
  60. struct task_struct;
  61. extern void fpsimd_save_state(struct user_fpsimd_state *state);
  62. extern void fpsimd_load_state(struct user_fpsimd_state *state);
  63. extern void fpsimd_thread_switch(struct task_struct *next);
  64. extern void fpsimd_flush_thread(void);
  65. extern void fpsimd_signal_preserve_current_state(void);
  66. extern void fpsimd_preserve_current_state(void);
  67. extern void fpsimd_restore_current_state(void);
  68. extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
  69. extern void fpsimd_kvm_prepare(void);
  70. struct cpu_fp_state {
  71. struct user_fpsimd_state *st;
  72. void *sve_state;
  73. void *sme_state;
  74. u64 *svcr;
  75. u64 *fpmr;
  76. unsigned int sve_vl;
  77. unsigned int sme_vl;
  78. enum fp_type *fp_type;
  79. enum fp_type to_save;
  80. };
  81. extern void fpsimd_bind_state_to_cpu(struct cpu_fp_state *fp_state);
  82. extern void fpsimd_flush_task_state(struct task_struct *target);
  83. extern void fpsimd_save_and_flush_cpu_state(void);
  84. static inline bool thread_sm_enabled(struct thread_struct *thread)
  85. {
  86. return system_supports_sme() && (thread->svcr & SVCR_SM_MASK);
  87. }
  88. static inline bool thread_za_enabled(struct thread_struct *thread)
  89. {
  90. return system_supports_sme() && (thread->svcr & SVCR_ZA_MASK);
  91. }
  92. /* Maximum VL that SVE/SME VL-agnostic software can transparently support */
  93. #define VL_ARCH_MAX 0x100
  94. /* Offset of FFR in the SVE register dump */
  95. static inline size_t sve_ffr_offset(int vl)
  96. {
  97. return SVE_SIG_FFR_OFFSET(sve_vq_from_vl(vl)) - SVE_SIG_REGS_OFFSET;
  98. }
  99. static inline void *sve_pffr(struct thread_struct *thread)
  100. {
  101. unsigned int vl;
  102. if (system_supports_sme() && thread_sm_enabled(thread))
  103. vl = thread_get_sme_vl(thread);
  104. else
  105. vl = thread_get_sve_vl(thread);
  106. return (char *)thread->sve_state + sve_ffr_offset(vl);
  107. }
  108. static inline void *thread_zt_state(struct thread_struct *thread)
  109. {
  110. /* The ZT register state is stored immediately after the ZA state */
  111. unsigned int sme_vq = sve_vq_from_vl(thread_get_sme_vl(thread));
  112. return thread->sme_state + ZA_SIG_REGS_SIZE(sme_vq);
  113. }
  114. extern void sve_save_state(void *state, u32 *pfpsr, int save_ffr);
  115. extern void sve_load_state(void const *state, u32 const *pfpsr,
  116. int restore_ffr);
  117. extern void sve_flush_live(bool flush_ffr, unsigned long vq_minus_1);
  118. extern unsigned int sve_get_vl(void);
  119. extern void sve_set_vq(unsigned long vq_minus_1);
  120. extern void sme_set_vq(unsigned long vq_minus_1);
  121. extern void sme_save_state(void *state, int zt);
  122. extern void sme_load_state(void const *state, int zt);
  123. struct arm64_cpu_capabilities;
  124. extern void cpu_enable_fpsimd(const struct arm64_cpu_capabilities *__unused);
  125. extern void cpu_enable_sve(const struct arm64_cpu_capabilities *__unused);
  126. extern void cpu_enable_sme(const struct arm64_cpu_capabilities *__unused);
  127. extern void cpu_enable_sme2(const struct arm64_cpu_capabilities *__unused);
  128. extern void cpu_enable_fa64(const struct arm64_cpu_capabilities *__unused);
  129. extern void cpu_enable_fpmr(const struct arm64_cpu_capabilities *__unused);
  130. /*
  131. * Helpers to translate bit indices in sve_vq_map to VQ values (and
  132. * vice versa). This allows find_next_bit() to be used to find the
  133. * _maximum_ VQ not exceeding a certain value.
  134. */
  135. static inline unsigned int __vq_to_bit(unsigned int vq)
  136. {
  137. return SVE_VQ_MAX - vq;
  138. }
  139. static inline unsigned int __bit_to_vq(unsigned int bit)
  140. {
  141. return SVE_VQ_MAX - bit;
  142. }
  143. struct vl_info {
  144. enum vec_type type;
  145. const char *name; /* For display purposes */
  146. /* Minimum supported vector length across all CPUs */
  147. int min_vl;
  148. /* Maximum supported vector length across all CPUs */
  149. int max_vl;
  150. int max_virtualisable_vl;
  151. /*
  152. * Set of available vector lengths,
  153. * where length vq encoded as bit __vq_to_bit(vq):
  154. */
  155. DECLARE_BITMAP(vq_map, SVE_VQ_MAX);
  156. /* Set of vector lengths present on at least one cpu: */
  157. DECLARE_BITMAP(vq_partial_map, SVE_VQ_MAX);
  158. };
  159. #ifdef CONFIG_ARM64_SVE
  160. extern void sve_alloc(struct task_struct *task, bool flush);
  161. extern void fpsimd_release_task(struct task_struct *task);
  162. extern void fpsimd_sync_to_sve(struct task_struct *task);
  163. extern void fpsimd_force_sync_to_sve(struct task_struct *task);
  164. extern void sve_sync_to_fpsimd(struct task_struct *task);
  165. extern void sve_sync_from_fpsimd_zeropad(struct task_struct *task);
  166. extern int vec_set_vector_length(struct task_struct *task, enum vec_type type,
  167. unsigned long vl, unsigned long flags);
  168. extern int sve_set_current_vl(unsigned long arg);
  169. extern int sve_get_current_vl(void);
  170. static inline void sve_user_disable(void)
  171. {
  172. sysreg_clear_set(cpacr_el1, CPACR_EL1_ZEN_EL0EN, 0);
  173. }
  174. static inline void sve_user_enable(void)
  175. {
  176. sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_ZEN_EL0EN);
  177. }
  178. #define sve_cond_update_zcr_vq(val, reg) \
  179. do { \
  180. u64 __zcr = read_sysreg_s((reg)); \
  181. u64 __new = __zcr & ~ZCR_ELx_LEN_MASK; \
  182. __new |= (val) & ZCR_ELx_LEN_MASK; \
  183. if (__zcr != __new) \
  184. write_sysreg_s(__new, (reg)); \
  185. } while (0)
  186. /*
  187. * Probing and setup functions.
  188. * Calls to these functions must be serialised with one another.
  189. */
  190. enum vec_type;
  191. extern void __init vec_init_vq_map(enum vec_type type);
  192. extern void vec_update_vq_map(enum vec_type type);
  193. extern int vec_verify_vq_map(enum vec_type type);
  194. extern void __init sve_setup(void);
  195. extern __ro_after_init struct vl_info vl_info[ARM64_VEC_MAX];
  196. static inline void write_vl(enum vec_type type, u64 val)
  197. {
  198. u64 tmp;
  199. switch (type) {
  200. #ifdef CONFIG_ARM64_SVE
  201. case ARM64_VEC_SVE:
  202. tmp = read_sysreg_s(SYS_ZCR_EL1) & ~ZCR_ELx_LEN_MASK;
  203. write_sysreg_s(tmp | val, SYS_ZCR_EL1);
  204. break;
  205. #endif
  206. #ifdef CONFIG_ARM64_SME
  207. case ARM64_VEC_SME:
  208. tmp = read_sysreg_s(SYS_SMCR_EL1) & ~SMCR_ELx_LEN_MASK;
  209. write_sysreg_s(tmp | val, SYS_SMCR_EL1);
  210. break;
  211. #endif
  212. default:
  213. WARN_ON_ONCE(1);
  214. break;
  215. }
  216. }
  217. static inline int vec_max_vl(enum vec_type type)
  218. {
  219. return vl_info[type].max_vl;
  220. }
  221. static inline int vec_max_virtualisable_vl(enum vec_type type)
  222. {
  223. return vl_info[type].max_virtualisable_vl;
  224. }
  225. static inline int sve_max_vl(void)
  226. {
  227. return vec_max_vl(ARM64_VEC_SVE);
  228. }
  229. static inline int sve_max_virtualisable_vl(void)
  230. {
  231. return vec_max_virtualisable_vl(ARM64_VEC_SVE);
  232. }
  233. /* Ensure vq >= SVE_VQ_MIN && vq <= SVE_VQ_MAX before calling this function */
  234. static inline bool vq_available(enum vec_type type, unsigned int vq)
  235. {
  236. return test_bit(__vq_to_bit(vq), vl_info[type].vq_map);
  237. }
  238. static inline bool sve_vq_available(unsigned int vq)
  239. {
  240. return vq_available(ARM64_VEC_SVE, vq);
  241. }
  242. size_t sve_state_size(struct task_struct const *task);
  243. #else /* ! CONFIG_ARM64_SVE */
  244. static inline void sve_alloc(struct task_struct *task, bool flush) { }
  245. static inline void fpsimd_release_task(struct task_struct *task) { }
  246. static inline void sve_sync_to_fpsimd(struct task_struct *task) { }
  247. static inline void sve_sync_from_fpsimd_zeropad(struct task_struct *task) { }
  248. static inline int sve_max_virtualisable_vl(void)
  249. {
  250. return 0;
  251. }
  252. static inline int sve_set_current_vl(unsigned long arg)
  253. {
  254. return -EINVAL;
  255. }
  256. static inline int sve_get_current_vl(void)
  257. {
  258. return -EINVAL;
  259. }
  260. static inline int sve_max_vl(void)
  261. {
  262. return -EINVAL;
  263. }
  264. static inline bool sve_vq_available(unsigned int vq) { return false; }
  265. static inline void sve_user_disable(void) { BUILD_BUG(); }
  266. static inline void sve_user_enable(void) { BUILD_BUG(); }
  267. #define sve_cond_update_zcr_vq(val, reg) do { } while (0)
  268. static inline void vec_init_vq_map(enum vec_type t) { }
  269. static inline void vec_update_vq_map(enum vec_type t) { }
  270. static inline int vec_verify_vq_map(enum vec_type t) { return 0; }
  271. static inline void sve_setup(void) { }
  272. static inline size_t sve_state_size(struct task_struct const *task)
  273. {
  274. return 0;
  275. }
  276. #endif /* ! CONFIG_ARM64_SVE */
  277. #ifdef CONFIG_ARM64_SME
  278. static inline void sme_user_disable(void)
  279. {
  280. sysreg_clear_set(cpacr_el1, CPACR_EL1_SMEN_EL0EN, 0);
  281. }
  282. static inline void sme_user_enable(void)
  283. {
  284. sysreg_clear_set(cpacr_el1, 0, CPACR_EL1_SMEN_EL0EN);
  285. }
  286. static inline void sme_smstart_sm(void)
  287. {
  288. asm volatile(__msr_s(SYS_SVCR_SMSTART_SM_EL0, "xzr"));
  289. }
  290. static inline void sme_smstop_sm(void)
  291. {
  292. asm volatile(__msr_s(SYS_SVCR_SMSTOP_SM_EL0, "xzr"));
  293. }
  294. static inline void sme_smstop(void)
  295. {
  296. asm volatile(__msr_s(SYS_SVCR_SMSTOP_SMZA_EL0, "xzr"));
  297. }
  298. extern void __init sme_setup(void);
  299. static inline int sme_max_vl(void)
  300. {
  301. return vec_max_vl(ARM64_VEC_SME);
  302. }
  303. static inline int sme_max_virtualisable_vl(void)
  304. {
  305. return vec_max_virtualisable_vl(ARM64_VEC_SME);
  306. }
  307. extern void sme_alloc(struct task_struct *task, bool flush);
  308. extern unsigned int sme_get_vl(void);
  309. extern int sme_set_current_vl(unsigned long arg);
  310. extern int sme_get_current_vl(void);
  311. extern void sme_suspend_exit(void);
  312. /*
  313. * Return how many bytes of memory are required to store the full SME
  314. * specific state for task, given task's currently configured vector
  315. * length.
  316. */
  317. static inline size_t sme_state_size(struct task_struct const *task)
  318. {
  319. unsigned int vl = task_get_sme_vl(task);
  320. size_t size;
  321. size = ZA_SIG_REGS_SIZE(sve_vq_from_vl(vl));
  322. if (system_supports_sme2())
  323. size += ZT_SIG_REG_SIZE;
  324. return size;
  325. }
  326. #else
  327. static inline void sme_user_disable(void) { BUILD_BUG(); }
  328. static inline void sme_user_enable(void) { BUILD_BUG(); }
  329. static inline void sme_smstart_sm(void) { }
  330. static inline void sme_smstop_sm(void) { }
  331. static inline void sme_smstop(void) { }
  332. static inline void sme_alloc(struct task_struct *task, bool flush) { }
  333. static inline void sme_setup(void) { }
  334. static inline unsigned int sme_get_vl(void) { return 0; }
  335. static inline int sme_max_vl(void) { return 0; }
  336. static inline int sme_max_virtualisable_vl(void) { return 0; }
  337. static inline int sme_set_current_vl(unsigned long arg) { return -EINVAL; }
  338. static inline int sme_get_current_vl(void) { return -EINVAL; }
  339. static inline void sme_suspend_exit(void) { }
  340. static inline size_t sme_state_size(struct task_struct const *task)
  341. {
  342. return 0;
  343. }
  344. #endif /* ! CONFIG_ARM64_SME */
  345. /* For use by EFI runtime services calls only */
  346. extern void __efi_fpsimd_begin(void);
  347. extern void __efi_fpsimd_end(void);
  348. #endif
  349. #endif