uaccess.h 4.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. #ifndef __ASM_SH_UACCESS_H
  3. #define __ASM_SH_UACCESS_H
  4. #include <asm/segment.h>
  5. #include <asm/extable.h>
  6. #define __addr_ok(addr) \
  7. ((unsigned long __force)(addr) < current_thread_info()->addr_limit.seg)
  8. /*
  9. * __access_ok: Check if address with size is OK or not.
  10. *
  11. * Uhhuh, this needs 33-bit arithmetic. We have a carry..
  12. *
  13. * sum := addr + size; carry? --> flag = true;
  14. * if (sum >= addr_limit) flag = true;
  15. */
  16. #define __access_ok(addr, size) ({ \
  17. unsigned long __ao_a = (addr), __ao_b = (size); \
  18. unsigned long __ao_end = __ao_a + __ao_b - !!__ao_b; \
  19. __ao_end >= __ao_a && __addr_ok(__ao_end); })
  20. #define access_ok(type, addr, size) \
  21. (__chk_user_ptr(addr), \
  22. __access_ok((unsigned long __force)(addr), (size)))
  23. #define user_addr_max() (current_thread_info()->addr_limit.seg)
  24. /*
  25. * Uh, these should become the main single-value transfer routines ...
  26. * They automatically use the right size if we just have the right
  27. * pointer type ...
  28. *
  29. * As SuperH uses the same address space for kernel and user data, we
  30. * can just do these as direct assignments.
  31. *
  32. * Careful to not
  33. * (a) re-use the arguments for side effects (sizeof is ok)
  34. * (b) require any knowledge of processes at this stage
  35. */
  36. #define put_user(x,ptr) __put_user_check((x), (ptr), sizeof(*(ptr)))
  37. #define get_user(x,ptr) __get_user_check((x), (ptr), sizeof(*(ptr)))
  38. /*
  39. * The "__xxx" versions do not do address space checking, useful when
  40. * doing multiple accesses to the same area (the user has to do the
  41. * checks by hand with "access_ok()")
  42. */
  43. #define __put_user(x,ptr) __put_user_nocheck((x), (ptr), sizeof(*(ptr)))
  44. #define __get_user(x,ptr) __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
  45. struct __large_struct { unsigned long buf[100]; };
  46. #define __m(x) (*(struct __large_struct __user *)(x))
  47. #define __get_user_nocheck(x,ptr,size) \
  48. ({ \
  49. long __gu_err; \
  50. unsigned long __gu_val; \
  51. const __typeof__(*(ptr)) __user *__gu_addr = (ptr); \
  52. __chk_user_ptr(ptr); \
  53. __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
  54. (x) = (__force __typeof__(*(ptr)))__gu_val; \
  55. __gu_err; \
  56. })
  57. #define __get_user_check(x,ptr,size) \
  58. ({ \
  59. long __gu_err = -EFAULT; \
  60. unsigned long __gu_val = 0; \
  61. const __typeof__(*(ptr)) *__gu_addr = (ptr); \
  62. if (likely(access_ok(VERIFY_READ, __gu_addr, (size)))) \
  63. __get_user_size(__gu_val, __gu_addr, (size), __gu_err); \
  64. (x) = (__force __typeof__(*(ptr)))__gu_val; \
  65. __gu_err; \
  66. })
  67. #define __put_user_nocheck(x,ptr,size) \
  68. ({ \
  69. long __pu_err; \
  70. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  71. __typeof__(*(ptr)) __pu_val = x; \
  72. __chk_user_ptr(ptr); \
  73. __put_user_size(__pu_val, __pu_addr, (size), __pu_err); \
  74. __pu_err; \
  75. })
  76. #define __put_user_check(x,ptr,size) \
  77. ({ \
  78. long __pu_err = -EFAULT; \
  79. __typeof__(*(ptr)) __user *__pu_addr = (ptr); \
  80. __typeof__(*(ptr)) __pu_val = x; \
  81. if (likely(access_ok(VERIFY_WRITE, __pu_addr, size))) \
  82. __put_user_size(__pu_val, __pu_addr, (size), \
  83. __pu_err); \
  84. __pu_err; \
  85. })
  86. #ifdef CONFIG_SUPERH32
  87. # include <asm/uaccess_32.h>
  88. #else
  89. # include <asm/uaccess_64.h>
  90. #endif
  91. extern long strncpy_from_user(char *dest, const char __user *src, long count);
  92. extern __must_check long strnlen_user(const char __user *str, long n);
  93. /* Generic arbitrary sized copy. */
  94. /* Return the number of bytes NOT copied */
  95. __kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
  96. static __always_inline unsigned long
  97. raw_copy_from_user(void *to, const void __user *from, unsigned long n)
  98. {
  99. return __copy_user(to, (__force void *)from, n);
  100. }
  101. static __always_inline unsigned long __must_check
  102. raw_copy_to_user(void __user *to, const void *from, unsigned long n)
  103. {
  104. return __copy_user((__force void *)to, from, n);
  105. }
  106. #define INLINE_COPY_FROM_USER
  107. #define INLINE_COPY_TO_USER
  108. /*
  109. * Clear the area and return remaining number of bytes
  110. * (on failure. Usually it's 0.)
  111. */
  112. __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
  113. #define clear_user(addr,n) \
  114. ({ \
  115. void __user * __cl_addr = (addr); \
  116. unsigned long __cl_size = (n); \
  117. \
  118. if (__cl_size && access_ok(VERIFY_WRITE, \
  119. ((unsigned long)(__cl_addr)), __cl_size)) \
  120. __cl_size = __clear_user(__cl_addr, __cl_size); \
  121. \
  122. __cl_size; \
  123. })
  124. extern void *set_exception_table_vec(unsigned int vec, void *handler);
  125. static inline void *set_exception_table_evt(unsigned int evt, void *handler)
  126. {
  127. return set_exception_table_vec(evt >> 5, handler);
  128. }
  129. struct mem_access {
  130. unsigned long (*from)(void *dst, const void __user *src, unsigned long cnt);
  131. unsigned long (*to)(void __user *dst, const void *src, unsigned long cnt);
  132. };
  133. int handle_unaligned_access(insn_size_t instruction, struct pt_regs *regs,
  134. struct mem_access *ma, int, unsigned long address);
  135. #endif /* __ASM_SH_UACCESS_H */