memmove_64.S 3.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213
  1. /* SPDX-License-Identifier: GPL-2.0 */
  2. /*
  3. * Normally compiler builtins are used, but sometimes the compiler calls out
  4. * of line code. Based on asm-i386/string.h.
  5. *
  6. * This assembly file is re-written from memmove_64.c file.
  7. * - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
  8. */
  9. #include <linux/linkage.h>
  10. #include <asm/cpufeatures.h>
  11. #include <asm/alternative-asm.h>
  12. #include <asm/export.h>
  13. #undef memmove
  14. /*
  15. * Implement memmove(). This can handle overlap between src and dst.
  16. *
  17. * Input:
  18. * rdi: dest
  19. * rsi: src
  20. * rdx: count
  21. *
  22. * Output:
  23. * rax: dest
  24. */
  25. .weak memmove
  26. .p2align 4, 0x90
  27. memmove:
  28. ENTRY(__memmove)
  29. /* Handle more 32 bytes in loop */
  30. mov %rdi, %rax
  31. cmp $0x20, %rdx
  32. jb 1f
  33. /* Decide forward/backward copy mode */
  34. cmp %rdi, %rsi
  35. jge .Lmemmove_begin_forward
  36. mov %rsi, %r8
  37. add %rdx, %r8
  38. cmp %rdi, %r8
  39. jg 2f
  40. .Lmemmove_begin_forward:
  41. ALTERNATIVE "", "movq %rdx, %rcx; rep movsb; retq", X86_FEATURE_ERMS
  42. /*
  43. * movsq instruction have many startup latency
  44. * so we handle small size by general register.
  45. */
  46. cmp $680, %rdx
  47. jb 3f
  48. /*
  49. * movsq instruction is only good for aligned case.
  50. */
  51. cmpb %dil, %sil
  52. je 4f
  53. 3:
  54. sub $0x20, %rdx
  55. /*
  56. * We gobble 32 bytes forward in each loop.
  57. */
  58. 5:
  59. sub $0x20, %rdx
  60. movq 0*8(%rsi), %r11
  61. movq 1*8(%rsi), %r10
  62. movq 2*8(%rsi), %r9
  63. movq 3*8(%rsi), %r8
  64. leaq 4*8(%rsi), %rsi
  65. movq %r11, 0*8(%rdi)
  66. movq %r10, 1*8(%rdi)
  67. movq %r9, 2*8(%rdi)
  68. movq %r8, 3*8(%rdi)
  69. leaq 4*8(%rdi), %rdi
  70. jae 5b
  71. addq $0x20, %rdx
  72. jmp 1f
  73. /*
  74. * Handle data forward by movsq.
  75. */
  76. .p2align 4
  77. 4:
  78. movq %rdx, %rcx
  79. movq -8(%rsi, %rdx), %r11
  80. lea -8(%rdi, %rdx), %r10
  81. shrq $3, %rcx
  82. rep movsq
  83. movq %r11, (%r10)
  84. jmp 13f
  85. .Lmemmove_end_forward:
  86. /*
  87. * Handle data backward by movsq.
  88. */
  89. .p2align 4
  90. 7:
  91. movq %rdx, %rcx
  92. movq (%rsi), %r11
  93. movq %rdi, %r10
  94. leaq -8(%rsi, %rdx), %rsi
  95. leaq -8(%rdi, %rdx), %rdi
  96. shrq $3, %rcx
  97. std
  98. rep movsq
  99. cld
  100. movq %r11, (%r10)
  101. jmp 13f
  102. /*
  103. * Start to prepare for backward copy.
  104. */
  105. .p2align 4
  106. 2:
  107. cmp $680, %rdx
  108. jb 6f
  109. cmp %dil, %sil
  110. je 7b
  111. 6:
  112. /*
  113. * Calculate copy position to tail.
  114. */
  115. addq %rdx, %rsi
  116. addq %rdx, %rdi
  117. subq $0x20, %rdx
  118. /*
  119. * We gobble 32 bytes backward in each loop.
  120. */
  121. 8:
  122. subq $0x20, %rdx
  123. movq -1*8(%rsi), %r11
  124. movq -2*8(%rsi), %r10
  125. movq -3*8(%rsi), %r9
  126. movq -4*8(%rsi), %r8
  127. leaq -4*8(%rsi), %rsi
  128. movq %r11, -1*8(%rdi)
  129. movq %r10, -2*8(%rdi)
  130. movq %r9, -3*8(%rdi)
  131. movq %r8, -4*8(%rdi)
  132. leaq -4*8(%rdi), %rdi
  133. jae 8b
  134. /*
  135. * Calculate copy position to head.
  136. */
  137. addq $0x20, %rdx
  138. subq %rdx, %rsi
  139. subq %rdx, %rdi
  140. 1:
  141. cmpq $16, %rdx
  142. jb 9f
  143. /*
  144. * Move data from 16 bytes to 31 bytes.
  145. */
  146. movq 0*8(%rsi), %r11
  147. movq 1*8(%rsi), %r10
  148. movq -2*8(%rsi, %rdx), %r9
  149. movq -1*8(%rsi, %rdx), %r8
  150. movq %r11, 0*8(%rdi)
  151. movq %r10, 1*8(%rdi)
  152. movq %r9, -2*8(%rdi, %rdx)
  153. movq %r8, -1*8(%rdi, %rdx)
  154. jmp 13f
  155. .p2align 4
  156. 9:
  157. cmpq $8, %rdx
  158. jb 10f
  159. /*
  160. * Move data from 8 bytes to 15 bytes.
  161. */
  162. movq 0*8(%rsi), %r11
  163. movq -1*8(%rsi, %rdx), %r10
  164. movq %r11, 0*8(%rdi)
  165. movq %r10, -1*8(%rdi, %rdx)
  166. jmp 13f
  167. 10:
  168. cmpq $4, %rdx
  169. jb 11f
  170. /*
  171. * Move data from 4 bytes to 7 bytes.
  172. */
  173. movl (%rsi), %r11d
  174. movl -4(%rsi, %rdx), %r10d
  175. movl %r11d, (%rdi)
  176. movl %r10d, -4(%rdi, %rdx)
  177. jmp 13f
  178. 11:
  179. cmp $2, %rdx
  180. jb 12f
  181. /*
  182. * Move data from 2 bytes to 3 bytes.
  183. */
  184. movw (%rsi), %r11w
  185. movw -2(%rsi, %rdx), %r10w
  186. movw %r11w, (%rdi)
  187. movw %r10w, -2(%rdi, %rdx)
  188. jmp 13f
  189. 12:
  190. cmp $1, %rdx
  191. jb 13f
  192. /*
  193. * Move data for 1 byte.
  194. */
  195. movb (%rsi), %r11b
  196. movb %r11b, (%rdi)
  197. 13:
  198. retq
  199. ENDPROC(__memmove)
  200. ENDPROC(memmove)
  201. EXPORT_SYMBOL(__memmove)
  202. EXPORT_SYMBOL(memmove)