1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_XOR_64_H |
3 | #define _ASM_X86_XOR_64_H |
4 | |
5 | static struct xor_block_template xor_block_sse = { |
6 | .name = "generic_sse" , |
7 | .do_2 = xor_sse_2, |
8 | .do_3 = xor_sse_3, |
9 | .do_4 = xor_sse_4, |
10 | .do_5 = xor_sse_5, |
11 | }; |
12 | |
13 | |
14 | /* Also try the AVX routines */ |
15 | #include <asm/xor_avx.h> |
16 | |
17 | /* We force the use of the SSE xor block because it can write around L2. |
18 | We may also be able to load into the L1 only depending on how the cpu |
19 | deals with a load to a line that is being prefetched. */ |
20 | #undef XOR_TRY_TEMPLATES |
21 | #define XOR_TRY_TEMPLATES \ |
22 | do { \ |
23 | AVX_XOR_SPEED; \ |
24 | xor_speed(&xor_block_sse_pf64); \ |
25 | xor_speed(&xor_block_sse); \ |
26 | } while (0) |
27 | |
28 | #endif /* _ASM_X86_XOR_64_H */ |
29 | |