usercopy_32.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359
  1. // SPDX-License-Identifier: GPL-2.0
  2. /*
  3. * User address space access functions.
  4. * The non inlined parts of asm-i386/uaccess.h are here.
  5. *
  6. * Copyright 1997 Andi Kleen <ak@muc.de>
  7. * Copyright 1997 Linus Torvalds
  8. */
  9. #include <linux/export.h>
  10. #include <linux/uaccess.h>
  11. #include <asm/mmx.h>
  12. #include <asm/asm.h>
  13. #ifdef CONFIG_X86_INTEL_USERCOPY
  14. /*
  15. * Alignment at which movsl is preferred for bulk memory copies.
  16. */
  17. struct movsl_mask movsl_mask __read_mostly;
  18. #endif
  19. static inline int __movsl_is_ok(unsigned long a1, unsigned long a2, unsigned long n)
  20. {
  21. #ifdef CONFIG_X86_INTEL_USERCOPY
  22. if (n >= 64 && ((a1 ^ a2) & movsl_mask.mask))
  23. return 0;
  24. #endif
  25. return 1;
  26. }
  27. #define movsl_is_ok(a1, a2, n) \
  28. __movsl_is_ok((unsigned long)(a1), (unsigned long)(a2), (n))
  29. /*
  30. * Zero Userspace
  31. */
  32. #define __do_clear_user(addr,size) \
  33. do { \
  34. int __d0; \
  35. might_fault(); \
  36. __asm__ __volatile__( \
  37. ASM_STAC "\n" \
  38. "0: rep; stosl\n" \
  39. " movl %2,%0\n" \
  40. "1: rep; stosb\n" \
  41. "2: " ASM_CLAC "\n" \
  42. ".section .fixup,\"ax\"\n" \
  43. "3: lea 0(%2,%0,4),%0\n" \
  44. " jmp 2b\n" \
  45. ".previous\n" \
  46. _ASM_EXTABLE(0b,3b) \
  47. _ASM_EXTABLE(1b,2b) \
  48. : "=&c"(size), "=&D" (__d0) \
  49. : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \
  50. } while (0)
  51. /**
  52. * clear_user: - Zero a block of memory in user space.
  53. * @to: Destination address, in user space.
  54. * @n: Number of bytes to zero.
  55. *
  56. * Zero a block of memory in user space.
  57. *
  58. * Returns number of bytes that could not be cleared.
  59. * On success, this will be zero.
  60. */
  61. unsigned long
  62. clear_user(void __user *to, unsigned long n)
  63. {
  64. might_fault();
  65. if (access_ok(VERIFY_WRITE, to, n))
  66. __do_clear_user(to, n);
  67. return n;
  68. }
  69. EXPORT_SYMBOL(clear_user);
  70. /**
  71. * __clear_user: - Zero a block of memory in user space, with less checking.
  72. * @to: Destination address, in user space.
  73. * @n: Number of bytes to zero.
  74. *
  75. * Zero a block of memory in user space. Caller must check
  76. * the specified block with access_ok() before calling this function.
  77. *
  78. * Returns number of bytes that could not be cleared.
  79. * On success, this will be zero.
  80. */
  81. unsigned long
  82. __clear_user(void __user *to, unsigned long n)
  83. {
  84. __do_clear_user(to, n);
  85. return n;
  86. }
  87. EXPORT_SYMBOL(__clear_user);
  88. #ifdef CONFIG_X86_INTEL_USERCOPY
  89. static unsigned long
  90. __copy_user_intel(void __user *to, const void *from, unsigned long size)
  91. {
  92. int d0, d1;
  93. __asm__ __volatile__(
  94. " .align 2,0x90\n"
  95. "1: movl 32(%4), %%eax\n"
  96. " cmpl $67, %0\n"
  97. " jbe 3f\n"
  98. "2: movl 64(%4), %%eax\n"
  99. " .align 2,0x90\n"
  100. "3: movl 0(%4), %%eax\n"
  101. "4: movl 4(%4), %%edx\n"
  102. "5: movl %%eax, 0(%3)\n"
  103. "6: movl %%edx, 4(%3)\n"
  104. "7: movl 8(%4), %%eax\n"
  105. "8: movl 12(%4),%%edx\n"
  106. "9: movl %%eax, 8(%3)\n"
  107. "10: movl %%edx, 12(%3)\n"
  108. "11: movl 16(%4), %%eax\n"
  109. "12: movl 20(%4), %%edx\n"
  110. "13: movl %%eax, 16(%3)\n"
  111. "14: movl %%edx, 20(%3)\n"
  112. "15: movl 24(%4), %%eax\n"
  113. "16: movl 28(%4), %%edx\n"
  114. "17: movl %%eax, 24(%3)\n"
  115. "18: movl %%edx, 28(%3)\n"
  116. "19: movl 32(%4), %%eax\n"
  117. "20: movl 36(%4), %%edx\n"
  118. "21: movl %%eax, 32(%3)\n"
  119. "22: movl %%edx, 36(%3)\n"
  120. "23: movl 40(%4), %%eax\n"
  121. "24: movl 44(%4), %%edx\n"
  122. "25: movl %%eax, 40(%3)\n"
  123. "26: movl %%edx, 44(%3)\n"
  124. "27: movl 48(%4), %%eax\n"
  125. "28: movl 52(%4), %%edx\n"
  126. "29: movl %%eax, 48(%3)\n"
  127. "30: movl %%edx, 52(%3)\n"
  128. "31: movl 56(%4), %%eax\n"
  129. "32: movl 60(%4), %%edx\n"
  130. "33: movl %%eax, 56(%3)\n"
  131. "34: movl %%edx, 60(%3)\n"
  132. " addl $-64, %0\n"
  133. " addl $64, %4\n"
  134. " addl $64, %3\n"
  135. " cmpl $63, %0\n"
  136. " ja 1b\n"
  137. "35: movl %0, %%eax\n"
  138. " shrl $2, %0\n"
  139. " andl $3, %%eax\n"
  140. " cld\n"
  141. "99: rep; movsl\n"
  142. "36: movl %%eax, %0\n"
  143. "37: rep; movsb\n"
  144. "100:\n"
  145. ".section .fixup,\"ax\"\n"
  146. "101: lea 0(%%eax,%0,4),%0\n"
  147. " jmp 100b\n"
  148. ".previous\n"
  149. _ASM_EXTABLE(1b,100b)
  150. _ASM_EXTABLE(2b,100b)
  151. _ASM_EXTABLE(3b,100b)
  152. _ASM_EXTABLE(4b,100b)
  153. _ASM_EXTABLE(5b,100b)
  154. _ASM_EXTABLE(6b,100b)
  155. _ASM_EXTABLE(7b,100b)
  156. _ASM_EXTABLE(8b,100b)
  157. _ASM_EXTABLE(9b,100b)
  158. _ASM_EXTABLE(10b,100b)
  159. _ASM_EXTABLE(11b,100b)
  160. _ASM_EXTABLE(12b,100b)
  161. _ASM_EXTABLE(13b,100b)
  162. _ASM_EXTABLE(14b,100b)
  163. _ASM_EXTABLE(15b,100b)
  164. _ASM_EXTABLE(16b,100b)
  165. _ASM_EXTABLE(17b,100b)
  166. _ASM_EXTABLE(18b,100b)
  167. _ASM_EXTABLE(19b,100b)
  168. _ASM_EXTABLE(20b,100b)
  169. _ASM_EXTABLE(21b,100b)
  170. _ASM_EXTABLE(22b,100b)
  171. _ASM_EXTABLE(23b,100b)
  172. _ASM_EXTABLE(24b,100b)
  173. _ASM_EXTABLE(25b,100b)
  174. _ASM_EXTABLE(26b,100b)
  175. _ASM_EXTABLE(27b,100b)
  176. _ASM_EXTABLE(28b,100b)
  177. _ASM_EXTABLE(29b,100b)
  178. _ASM_EXTABLE(30b,100b)
  179. _ASM_EXTABLE(31b,100b)
  180. _ASM_EXTABLE(32b,100b)
  181. _ASM_EXTABLE(33b,100b)
  182. _ASM_EXTABLE(34b,100b)
  183. _ASM_EXTABLE(35b,100b)
  184. _ASM_EXTABLE(36b,100b)
  185. _ASM_EXTABLE(37b,100b)
  186. _ASM_EXTABLE(99b,101b)
  187. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  188. : "1"(to), "2"(from), "0"(size)
  189. : "eax", "edx", "memory");
  190. return size;
  191. }
  192. static unsigned long __copy_user_intel_nocache(void *to,
  193. const void __user *from, unsigned long size)
  194. {
  195. int d0, d1;
  196. __asm__ __volatile__(
  197. " .align 2,0x90\n"
  198. "0: movl 32(%4), %%eax\n"
  199. " cmpl $67, %0\n"
  200. " jbe 2f\n"
  201. "1: movl 64(%4), %%eax\n"
  202. " .align 2,0x90\n"
  203. "2: movl 0(%4), %%eax\n"
  204. "21: movl 4(%4), %%edx\n"
  205. " movnti %%eax, 0(%3)\n"
  206. " movnti %%edx, 4(%3)\n"
  207. "3: movl 8(%4), %%eax\n"
  208. "31: movl 12(%4),%%edx\n"
  209. " movnti %%eax, 8(%3)\n"
  210. " movnti %%edx, 12(%3)\n"
  211. "4: movl 16(%4), %%eax\n"
  212. "41: movl 20(%4), %%edx\n"
  213. " movnti %%eax, 16(%3)\n"
  214. " movnti %%edx, 20(%3)\n"
  215. "10: movl 24(%4), %%eax\n"
  216. "51: movl 28(%4), %%edx\n"
  217. " movnti %%eax, 24(%3)\n"
  218. " movnti %%edx, 28(%3)\n"
  219. "11: movl 32(%4), %%eax\n"
  220. "61: movl 36(%4), %%edx\n"
  221. " movnti %%eax, 32(%3)\n"
  222. " movnti %%edx, 36(%3)\n"
  223. "12: movl 40(%4), %%eax\n"
  224. "71: movl 44(%4), %%edx\n"
  225. " movnti %%eax, 40(%3)\n"
  226. " movnti %%edx, 44(%3)\n"
  227. "13: movl 48(%4), %%eax\n"
  228. "81: movl 52(%4), %%edx\n"
  229. " movnti %%eax, 48(%3)\n"
  230. " movnti %%edx, 52(%3)\n"
  231. "14: movl 56(%4), %%eax\n"
  232. "91: movl 60(%4), %%edx\n"
  233. " movnti %%eax, 56(%3)\n"
  234. " movnti %%edx, 60(%3)\n"
  235. " addl $-64, %0\n"
  236. " addl $64, %4\n"
  237. " addl $64, %3\n"
  238. " cmpl $63, %0\n"
  239. " ja 0b\n"
  240. " sfence \n"
  241. "5: movl %0, %%eax\n"
  242. " shrl $2, %0\n"
  243. " andl $3, %%eax\n"
  244. " cld\n"
  245. "6: rep; movsl\n"
  246. " movl %%eax,%0\n"
  247. "7: rep; movsb\n"
  248. "8:\n"
  249. ".section .fixup,\"ax\"\n"
  250. "9: lea 0(%%eax,%0,4),%0\n"
  251. "16: jmp 8b\n"
  252. ".previous\n"
  253. _ASM_EXTABLE(0b,16b)
  254. _ASM_EXTABLE(1b,16b)
  255. _ASM_EXTABLE(2b,16b)
  256. _ASM_EXTABLE(21b,16b)
  257. _ASM_EXTABLE(3b,16b)
  258. _ASM_EXTABLE(31b,16b)
  259. _ASM_EXTABLE(4b,16b)
  260. _ASM_EXTABLE(41b,16b)
  261. _ASM_EXTABLE(10b,16b)
  262. _ASM_EXTABLE(51b,16b)
  263. _ASM_EXTABLE(11b,16b)
  264. _ASM_EXTABLE(61b,16b)
  265. _ASM_EXTABLE(12b,16b)
  266. _ASM_EXTABLE(71b,16b)
  267. _ASM_EXTABLE(13b,16b)
  268. _ASM_EXTABLE(81b,16b)
  269. _ASM_EXTABLE(14b,16b)
  270. _ASM_EXTABLE(91b,16b)
  271. _ASM_EXTABLE(6b,9b)
  272. _ASM_EXTABLE(7b,16b)
  273. : "=&c"(size), "=&D" (d0), "=&S" (d1)
  274. : "1"(to), "2"(from), "0"(size)
  275. : "eax", "edx", "memory");
  276. return size;
  277. }
  278. #else
  279. /*
  280. * Leave these declared but undefined. They should not be any references to
  281. * them
  282. */
  283. unsigned long __copy_user_intel(void __user *to, const void *from,
  284. unsigned long size);
  285. #endif /* CONFIG_X86_INTEL_USERCOPY */
  286. /* Generic arbitrary sized copy. */
  287. #define __copy_user(to, from, size) \
  288. do { \
  289. int __d0, __d1, __d2; \
  290. __asm__ __volatile__( \
  291. " cmp $7,%0\n" \
  292. " jbe 1f\n" \
  293. " movl %1,%0\n" \
  294. " negl %0\n" \
  295. " andl $7,%0\n" \
  296. " subl %0,%3\n" \
  297. "4: rep; movsb\n" \
  298. " movl %3,%0\n" \
  299. " shrl $2,%0\n" \
  300. " andl $3,%3\n" \
  301. " .align 2,0x90\n" \
  302. "0: rep; movsl\n" \
  303. " movl %3,%0\n" \
  304. "1: rep; movsb\n" \
  305. "2:\n" \
  306. ".section .fixup,\"ax\"\n" \
  307. "5: addl %3,%0\n" \
  308. " jmp 2b\n" \
  309. "3: lea 0(%3,%0,4),%0\n" \
  310. " jmp 2b\n" \
  311. ".previous\n" \
  312. _ASM_EXTABLE(4b,5b) \
  313. _ASM_EXTABLE(0b,3b) \
  314. _ASM_EXTABLE(1b,2b) \
  315. : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \
  316. : "3"(size), "0"(size), "1"(to), "2"(from) \
  317. : "memory"); \
  318. } while (0)
  319. unsigned long __copy_user_ll(void *to, const void *from, unsigned long n)
  320. {
  321. __uaccess_begin_nospec();
  322. if (movsl_is_ok(to, from, n))
  323. __copy_user(to, from, n);
  324. else
  325. n = __copy_user_intel(to, from, n);
  326. __uaccess_end();
  327. return n;
  328. }
  329. EXPORT_SYMBOL(__copy_user_ll);
  330. unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *from,
  331. unsigned long n)
  332. {
  333. __uaccess_begin_nospec();
  334. #ifdef CONFIG_X86_INTEL_USERCOPY
  335. if (n > 64 && static_cpu_has(X86_FEATURE_XMM2))
  336. n = __copy_user_intel_nocache(to, from, n);
  337. else
  338. __copy_user(to, from, n);
  339. #else
  340. __copy_user(to, from, n);
  341. #endif
  342. __uaccess_end();
  343. return n;
  344. }
  345. EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);