avx2.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480
  1. /* -*- linux-c -*- ------------------------------------------------------- *
  2. *
  3. * Copyright (C) 2012 Intel Corporation
  4. * Author: Yuanhan Liu <yuanhan.liu@linux.intel.com>
  5. *
  6. * Based on sse2.c: Copyright 2002 H. Peter Anvin - All Rights Reserved
  7. *
  8. *
  9. * This program is free software; you can redistribute it and/or modify
  10. * it under the terms of the GNU General Public License as published by
  11. * the Free Software Foundation, Inc., 53 Temple Place Ste 330,
  12. * Boston MA 02111-1307, USA; either version 2 of the License, or
  13. * (at your option) any later version; incorporated herein by reference.
  14. *
  15. * ----------------------------------------------------------------------- */
  16. /*
  17. * AVX2 implementation of RAID-6 syndrome functions
  18. *
  19. */
  20. #ifdef CONFIG_AS_AVX2
  21. #include <linux/raid/pq.h>
  22. #include "x86.h"
  23. static const struct raid6_avx2_constants {
  24. u64 x1d[4];
  25. } raid6_avx2_constants __aligned(32) = {
  26. { 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,
  27. 0x1d1d1d1d1d1d1d1dULL, 0x1d1d1d1d1d1d1d1dULL,},
  28. };
  29. static int raid6_have_avx2(void)
  30. {
  31. return boot_cpu_has(X86_FEATURE_AVX2) && boot_cpu_has(X86_FEATURE_AVX);
  32. }
  33. /*
  34. * Plain AVX2 implementation
  35. */
  36. static void raid6_avx21_gen_syndrome(int disks, size_t bytes, void **ptrs)
  37. {
  38. u8 **dptr = (u8 **)ptrs;
  39. u8 *p, *q;
  40. int d, z, z0;
  41. z0 = disks - 3; /* Highest data disk */
  42. p = dptr[z0+1]; /* XOR parity */
  43. q = dptr[z0+2]; /* RS syndrome */
  44. kernel_fpu_begin();
  45. asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
  46. asm volatile("vpxor %ymm3,%ymm3,%ymm3"); /* Zero temp */
  47. for (d = 0; d < bytes; d += 32) {
  48. asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
  49. asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */
  50. asm volatile("prefetchnta %0" : : "m" (dptr[z0-1][d]));
  51. asm volatile("vmovdqa %ymm2,%ymm4");/* Q[0] */
  52. asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z0-1][d]));
  53. for (z = z0-2; z >= 0; z--) {
  54. asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
  55. asm volatile("vpcmpgtb %ymm4,%ymm3,%ymm5");
  56. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  57. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  58. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  59. asm volatile("vpxor %ymm6,%ymm2,%ymm2");
  60. asm volatile("vpxor %ymm6,%ymm4,%ymm4");
  61. asm volatile("vmovdqa %0,%%ymm6" : : "m" (dptr[z][d]));
  62. }
  63. asm volatile("vpcmpgtb %ymm4,%ymm3,%ymm5");
  64. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  65. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  66. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  67. asm volatile("vpxor %ymm6,%ymm2,%ymm2");
  68. asm volatile("vpxor %ymm6,%ymm4,%ymm4");
  69. asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
  70. asm volatile("vpxor %ymm2,%ymm2,%ymm2");
  71. asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
  72. asm volatile("vpxor %ymm4,%ymm4,%ymm4");
  73. }
  74. asm volatile("sfence" : : : "memory");
  75. kernel_fpu_end();
  76. }
  77. static void raid6_avx21_xor_syndrome(int disks, int start, int stop,
  78. size_t bytes, void **ptrs)
  79. {
  80. u8 **dptr = (u8 **)ptrs;
  81. u8 *p, *q;
  82. int d, z, z0;
  83. z0 = stop; /* P/Q right side optimization */
  84. p = dptr[disks-2]; /* XOR parity */
  85. q = dptr[disks-1]; /* RS syndrome */
  86. kernel_fpu_begin();
  87. asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
  88. for (d = 0 ; d < bytes ; d += 32) {
  89. asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
  90. asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
  91. asm volatile("vpxor %ymm4,%ymm2,%ymm2");
  92. /* P/Q data pages */
  93. for (z = z0-1 ; z >= start ; z--) {
  94. asm volatile("vpxor %ymm5,%ymm5,%ymm5");
  95. asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
  96. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  97. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  98. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  99. asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
  100. asm volatile("vpxor %ymm5,%ymm2,%ymm2");
  101. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  102. }
  103. /* P/Q left side optimization */
  104. for (z = start-1 ; z >= 0 ; z--) {
  105. asm volatile("vpxor %ymm5,%ymm5,%ymm5");
  106. asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
  107. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  108. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  109. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  110. }
  111. asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
  112. /* Don't use movntdq for r/w memory area < cache line */
  113. asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
  114. asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
  115. }
  116. asm volatile("sfence" : : : "memory");
  117. kernel_fpu_end();
  118. }
  119. const struct raid6_calls raid6_avx2x1 = {
  120. raid6_avx21_gen_syndrome,
  121. raid6_avx21_xor_syndrome,
  122. raid6_have_avx2,
  123. "avx2x1",
  124. 1 /* Has cache hints */
  125. };
  126. /*
  127. * Unrolled-by-2 AVX2 implementation
  128. */
  129. static void raid6_avx22_gen_syndrome(int disks, size_t bytes, void **ptrs)
  130. {
  131. u8 **dptr = (u8 **)ptrs;
  132. u8 *p, *q;
  133. int d, z, z0;
  134. z0 = disks - 3; /* Highest data disk */
  135. p = dptr[z0+1]; /* XOR parity */
  136. q = dptr[z0+2]; /* RS syndrome */
  137. kernel_fpu_begin();
  138. asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
  139. asm volatile("vpxor %ymm1,%ymm1,%ymm1"); /* Zero temp */
  140. /* We uniformly assume a single prefetch covers at least 32 bytes */
  141. for (d = 0; d < bytes; d += 64) {
  142. asm volatile("prefetchnta %0" : : "m" (dptr[z0][d]));
  143. asm volatile("prefetchnta %0" : : "m" (dptr[z0][d+32]));
  144. asm volatile("vmovdqa %0,%%ymm2" : : "m" (dptr[z0][d]));/* P[0] */
  145. asm volatile("vmovdqa %0,%%ymm3" : : "m" (dptr[z0][d+32]));/* P[1] */
  146. asm volatile("vmovdqa %ymm2,%ymm4"); /* Q[0] */
  147. asm volatile("vmovdqa %ymm3,%ymm6"); /* Q[1] */
  148. for (z = z0-1; z >= 0; z--) {
  149. asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
  150. asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32]));
  151. asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5");
  152. asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7");
  153. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  154. asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
  155. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  156. asm volatile("vpand %ymm0,%ymm7,%ymm7");
  157. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  158. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  159. asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d]));
  160. asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32]));
  161. asm volatile("vpxor %ymm5,%ymm2,%ymm2");
  162. asm volatile("vpxor %ymm7,%ymm3,%ymm3");
  163. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  164. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  165. }
  166. asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
  167. asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
  168. asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
  169. asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
  170. }
  171. asm volatile("sfence" : : : "memory");
  172. kernel_fpu_end();
  173. }
  174. static void raid6_avx22_xor_syndrome(int disks, int start, int stop,
  175. size_t bytes, void **ptrs)
  176. {
  177. u8 **dptr = (u8 **)ptrs;
  178. u8 *p, *q;
  179. int d, z, z0;
  180. z0 = stop; /* P/Q right side optimization */
  181. p = dptr[disks-2]; /* XOR parity */
  182. q = dptr[disks-1]; /* RS syndrome */
  183. kernel_fpu_begin();
  184. asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
  185. for (d = 0 ; d < bytes ; d += 64) {
  186. asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
  187. asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
  188. asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
  189. asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
  190. asm volatile("vpxor %ymm4,%ymm2,%ymm2");
  191. asm volatile("vpxor %ymm6,%ymm3,%ymm3");
  192. /* P/Q data pages */
  193. for (z = z0-1 ; z >= start ; z--) {
  194. asm volatile("vpxor %ymm5,%ymm5,%ymm5");
  195. asm volatile("vpxor %ymm7,%ymm7,%ymm7");
  196. asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
  197. asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
  198. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  199. asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
  200. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  201. asm volatile("vpand %ymm0,%ymm7,%ymm7");
  202. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  203. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  204. asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
  205. asm volatile("vmovdqa %0,%%ymm7"
  206. :: "m" (dptr[z][d+32]));
  207. asm volatile("vpxor %ymm5,%ymm2,%ymm2");
  208. asm volatile("vpxor %ymm7,%ymm3,%ymm3");
  209. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  210. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  211. }
  212. /* P/Q left side optimization */
  213. for (z = start-1 ; z >= 0 ; z--) {
  214. asm volatile("vpxor %ymm5,%ymm5,%ymm5");
  215. asm volatile("vpxor %ymm7,%ymm7,%ymm7");
  216. asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
  217. asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
  218. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  219. asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
  220. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  221. asm volatile("vpand %ymm0,%ymm7,%ymm7");
  222. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  223. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  224. }
  225. asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
  226. asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
  227. /* Don't use movntdq for r/w memory area < cache line */
  228. asm volatile("vmovdqa %%ymm4,%0" : "=m" (q[d]));
  229. asm volatile("vmovdqa %%ymm6,%0" : "=m" (q[d+32]));
  230. asm volatile("vmovdqa %%ymm2,%0" : "=m" (p[d]));
  231. asm volatile("vmovdqa %%ymm3,%0" : "=m" (p[d+32]));
  232. }
  233. asm volatile("sfence" : : : "memory");
  234. kernel_fpu_end();
  235. }
  236. const struct raid6_calls raid6_avx2x2 = {
  237. raid6_avx22_gen_syndrome,
  238. raid6_avx22_xor_syndrome,
  239. raid6_have_avx2,
  240. "avx2x2",
  241. 1 /* Has cache hints */
  242. };
  243. #ifdef CONFIG_X86_64
  244. /*
  245. * Unrolled-by-4 AVX2 implementation
  246. */
  247. static void raid6_avx24_gen_syndrome(int disks, size_t bytes, void **ptrs)
  248. {
  249. u8 **dptr = (u8 **)ptrs;
  250. u8 *p, *q;
  251. int d, z, z0;
  252. z0 = disks - 3; /* Highest data disk */
  253. p = dptr[z0+1]; /* XOR parity */
  254. q = dptr[z0+2]; /* RS syndrome */
  255. kernel_fpu_begin();
  256. asm volatile("vmovdqa %0,%%ymm0" : : "m" (raid6_avx2_constants.x1d[0]));
  257. asm volatile("vpxor %ymm1,%ymm1,%ymm1"); /* Zero temp */
  258. asm volatile("vpxor %ymm2,%ymm2,%ymm2"); /* P[0] */
  259. asm volatile("vpxor %ymm3,%ymm3,%ymm3"); /* P[1] */
  260. asm volatile("vpxor %ymm4,%ymm4,%ymm4"); /* Q[0] */
  261. asm volatile("vpxor %ymm6,%ymm6,%ymm6"); /* Q[1] */
  262. asm volatile("vpxor %ymm10,%ymm10,%ymm10"); /* P[2] */
  263. asm volatile("vpxor %ymm11,%ymm11,%ymm11"); /* P[3] */
  264. asm volatile("vpxor %ymm12,%ymm12,%ymm12"); /* Q[2] */
  265. asm volatile("vpxor %ymm14,%ymm14,%ymm14"); /* Q[3] */
  266. for (d = 0; d < bytes; d += 128) {
  267. for (z = z0; z >= 0; z--) {
  268. asm volatile("prefetchnta %0" : : "m" (dptr[z][d]));
  269. asm volatile("prefetchnta %0" : : "m" (dptr[z][d+32]));
  270. asm volatile("prefetchnta %0" : : "m" (dptr[z][d+64]));
  271. asm volatile("prefetchnta %0" : : "m" (dptr[z][d+96]));
  272. asm volatile("vpcmpgtb %ymm4,%ymm1,%ymm5");
  273. asm volatile("vpcmpgtb %ymm6,%ymm1,%ymm7");
  274. asm volatile("vpcmpgtb %ymm12,%ymm1,%ymm13");
  275. asm volatile("vpcmpgtb %ymm14,%ymm1,%ymm15");
  276. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  277. asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
  278. asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
  279. asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
  280. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  281. asm volatile("vpand %ymm0,%ymm7,%ymm7");
  282. asm volatile("vpand %ymm0,%ymm13,%ymm13");
  283. asm volatile("vpand %ymm0,%ymm15,%ymm15");
  284. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  285. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  286. asm volatile("vpxor %ymm13,%ymm12,%ymm12");
  287. asm volatile("vpxor %ymm15,%ymm14,%ymm14");
  288. asm volatile("vmovdqa %0,%%ymm5" : : "m" (dptr[z][d]));
  289. asm volatile("vmovdqa %0,%%ymm7" : : "m" (dptr[z][d+32]));
  290. asm volatile("vmovdqa %0,%%ymm13" : : "m" (dptr[z][d+64]));
  291. asm volatile("vmovdqa %0,%%ymm15" : : "m" (dptr[z][d+96]));
  292. asm volatile("vpxor %ymm5,%ymm2,%ymm2");
  293. asm volatile("vpxor %ymm7,%ymm3,%ymm3");
  294. asm volatile("vpxor %ymm13,%ymm10,%ymm10");
  295. asm volatile("vpxor %ymm15,%ymm11,%ymm11");
  296. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  297. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  298. asm volatile("vpxor %ymm13,%ymm12,%ymm12");
  299. asm volatile("vpxor %ymm15,%ymm14,%ymm14");
  300. }
  301. asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
  302. asm volatile("vpxor %ymm2,%ymm2,%ymm2");
  303. asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
  304. asm volatile("vpxor %ymm3,%ymm3,%ymm3");
  305. asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
  306. asm volatile("vpxor %ymm10,%ymm10,%ymm10");
  307. asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
  308. asm volatile("vpxor %ymm11,%ymm11,%ymm11");
  309. asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
  310. asm volatile("vpxor %ymm4,%ymm4,%ymm4");
  311. asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
  312. asm volatile("vpxor %ymm6,%ymm6,%ymm6");
  313. asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
  314. asm volatile("vpxor %ymm12,%ymm12,%ymm12");
  315. asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
  316. asm volatile("vpxor %ymm14,%ymm14,%ymm14");
  317. }
  318. asm volatile("sfence" : : : "memory");
  319. kernel_fpu_end();
  320. }
  321. static void raid6_avx24_xor_syndrome(int disks, int start, int stop,
  322. size_t bytes, void **ptrs)
  323. {
  324. u8 **dptr = (u8 **)ptrs;
  325. u8 *p, *q;
  326. int d, z, z0;
  327. z0 = stop; /* P/Q right side optimization */
  328. p = dptr[disks-2]; /* XOR parity */
  329. q = dptr[disks-1]; /* RS syndrome */
  330. kernel_fpu_begin();
  331. asm volatile("vmovdqa %0,%%ymm0" :: "m" (raid6_avx2_constants.x1d[0]));
  332. for (d = 0 ; d < bytes ; d += 128) {
  333. asm volatile("vmovdqa %0,%%ymm4" :: "m" (dptr[z0][d]));
  334. asm volatile("vmovdqa %0,%%ymm6" :: "m" (dptr[z0][d+32]));
  335. asm volatile("vmovdqa %0,%%ymm12" :: "m" (dptr[z0][d+64]));
  336. asm volatile("vmovdqa %0,%%ymm14" :: "m" (dptr[z0][d+96]));
  337. asm volatile("vmovdqa %0,%%ymm2" : : "m" (p[d]));
  338. asm volatile("vmovdqa %0,%%ymm3" : : "m" (p[d+32]));
  339. asm volatile("vmovdqa %0,%%ymm10" : : "m" (p[d+64]));
  340. asm volatile("vmovdqa %0,%%ymm11" : : "m" (p[d+96]));
  341. asm volatile("vpxor %ymm4,%ymm2,%ymm2");
  342. asm volatile("vpxor %ymm6,%ymm3,%ymm3");
  343. asm volatile("vpxor %ymm12,%ymm10,%ymm10");
  344. asm volatile("vpxor %ymm14,%ymm11,%ymm11");
  345. /* P/Q data pages */
  346. for (z = z0-1 ; z >= start ; z--) {
  347. asm volatile("prefetchnta %0" :: "m" (dptr[z][d]));
  348. asm volatile("prefetchnta %0" :: "m" (dptr[z][d+64]));
  349. asm volatile("vpxor %ymm5,%ymm5,%ymm5");
  350. asm volatile("vpxor %ymm7,%ymm7,%ymm7");
  351. asm volatile("vpxor %ymm13,%ymm13,%ymm13");
  352. asm volatile("vpxor %ymm15,%ymm15,%ymm15");
  353. asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
  354. asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
  355. asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13");
  356. asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15");
  357. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  358. asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
  359. asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
  360. asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
  361. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  362. asm volatile("vpand %ymm0,%ymm7,%ymm7");
  363. asm volatile("vpand %ymm0,%ymm13,%ymm13");
  364. asm volatile("vpand %ymm0,%ymm15,%ymm15");
  365. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  366. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  367. asm volatile("vpxor %ymm13,%ymm12,%ymm12");
  368. asm volatile("vpxor %ymm15,%ymm14,%ymm14");
  369. asm volatile("vmovdqa %0,%%ymm5" :: "m" (dptr[z][d]));
  370. asm volatile("vmovdqa %0,%%ymm7"
  371. :: "m" (dptr[z][d+32]));
  372. asm volatile("vmovdqa %0,%%ymm13"
  373. :: "m" (dptr[z][d+64]));
  374. asm volatile("vmovdqa %0,%%ymm15"
  375. :: "m" (dptr[z][d+96]));
  376. asm volatile("vpxor %ymm5,%ymm2,%ymm2");
  377. asm volatile("vpxor %ymm7,%ymm3,%ymm3");
  378. asm volatile("vpxor %ymm13,%ymm10,%ymm10");
  379. asm volatile("vpxor %ymm15,%ymm11,%ymm11");
  380. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  381. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  382. asm volatile("vpxor %ymm13,%ymm12,%ymm12");
  383. asm volatile("vpxor %ymm15,%ymm14,%ymm14");
  384. }
  385. asm volatile("prefetchnta %0" :: "m" (q[d]));
  386. asm volatile("prefetchnta %0" :: "m" (q[d+64]));
  387. /* P/Q left side optimization */
  388. for (z = start-1 ; z >= 0 ; z--) {
  389. asm volatile("vpxor %ymm5,%ymm5,%ymm5");
  390. asm volatile("vpxor %ymm7,%ymm7,%ymm7");
  391. asm volatile("vpxor %ymm13,%ymm13,%ymm13");
  392. asm volatile("vpxor %ymm15,%ymm15,%ymm15");
  393. asm volatile("vpcmpgtb %ymm4,%ymm5,%ymm5");
  394. asm volatile("vpcmpgtb %ymm6,%ymm7,%ymm7");
  395. asm volatile("vpcmpgtb %ymm12,%ymm13,%ymm13");
  396. asm volatile("vpcmpgtb %ymm14,%ymm15,%ymm15");
  397. asm volatile("vpaddb %ymm4,%ymm4,%ymm4");
  398. asm volatile("vpaddb %ymm6,%ymm6,%ymm6");
  399. asm volatile("vpaddb %ymm12,%ymm12,%ymm12");
  400. asm volatile("vpaddb %ymm14,%ymm14,%ymm14");
  401. asm volatile("vpand %ymm0,%ymm5,%ymm5");
  402. asm volatile("vpand %ymm0,%ymm7,%ymm7");
  403. asm volatile("vpand %ymm0,%ymm13,%ymm13");
  404. asm volatile("vpand %ymm0,%ymm15,%ymm15");
  405. asm volatile("vpxor %ymm5,%ymm4,%ymm4");
  406. asm volatile("vpxor %ymm7,%ymm6,%ymm6");
  407. asm volatile("vpxor %ymm13,%ymm12,%ymm12");
  408. asm volatile("vpxor %ymm15,%ymm14,%ymm14");
  409. }
  410. asm volatile("vmovntdq %%ymm2,%0" : "=m" (p[d]));
  411. asm volatile("vmovntdq %%ymm3,%0" : "=m" (p[d+32]));
  412. asm volatile("vmovntdq %%ymm10,%0" : "=m" (p[d+64]));
  413. asm volatile("vmovntdq %%ymm11,%0" : "=m" (p[d+96]));
  414. asm volatile("vpxor %0,%%ymm4,%%ymm4" : : "m" (q[d]));
  415. asm volatile("vpxor %0,%%ymm6,%%ymm6" : : "m" (q[d+32]));
  416. asm volatile("vpxor %0,%%ymm12,%%ymm12" : : "m" (q[d+64]));
  417. asm volatile("vpxor %0,%%ymm14,%%ymm14" : : "m" (q[d+96]));
  418. asm volatile("vmovntdq %%ymm4,%0" : "=m" (q[d]));
  419. asm volatile("vmovntdq %%ymm6,%0" : "=m" (q[d+32]));
  420. asm volatile("vmovntdq %%ymm12,%0" : "=m" (q[d+64]));
  421. asm volatile("vmovntdq %%ymm14,%0" : "=m" (q[d+96]));
  422. }
  423. asm volatile("sfence" : : : "memory");
  424. kernel_fpu_end();
  425. }
  426. const struct raid6_calls raid6_avx2x4 = {
  427. raid6_avx24_gen_syndrome,
  428. raid6_avx24_xor_syndrome,
  429. raid6_have_avx2,
  430. "avx2x4",
  431. 1 /* Has cache hints */
  432. };
  433. #endif
  434. #endif /* CONFIG_AS_AVX2 */