1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Shared glue code for 128bit block ciphers, AVX2 assembler macros
4 *
5 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@mbnet.fi>
6 */
7
8#define load_16way(src, x0, x1, x2, x3, x4, x5, x6, x7) \
9 vmovdqu (0*32)(src), x0; \
10 vmovdqu (1*32)(src), x1; \
11 vmovdqu (2*32)(src), x2; \
12 vmovdqu (3*32)(src), x3; \
13 vmovdqu (4*32)(src), x4; \
14 vmovdqu (5*32)(src), x5; \
15 vmovdqu (6*32)(src), x6; \
16 vmovdqu (7*32)(src), x7;
17
18#define store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \
19 vmovdqu x0, (0*32)(dst); \
20 vmovdqu x1, (1*32)(dst); \
21 vmovdqu x2, (2*32)(dst); \
22 vmovdqu x3, (3*32)(dst); \
23 vmovdqu x4, (4*32)(dst); \
24 vmovdqu x5, (5*32)(dst); \
25 vmovdqu x6, (6*32)(dst); \
26 vmovdqu x7, (7*32)(dst);
27
28#define store_cbc_16way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7, t0) \
29 vpxor t0, t0, t0; \
30 vinserti128 $1, (src), t0, t0; \
31 vpxor t0, x0, x0; \
32 vpxor (0*32+16)(src), x1, x1; \
33 vpxor (1*32+16)(src), x2, x2; \
34 vpxor (2*32+16)(src), x3, x3; \
35 vpxor (3*32+16)(src), x4, x4; \
36 vpxor (4*32+16)(src), x5, x5; \
37 vpxor (5*32+16)(src), x6, x6; \
38 vpxor (6*32+16)(src), x7, x7; \
39 store_16way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
40

source code of linux/arch/x86/crypto/glue_helper-asm-avx2.S