1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * Shared glue code for 128bit block ciphers, AVX assembler macros
4 *
5 * Copyright © 2012-2013 Jussi Kivilinna <jussi.kivilinna@iki.fi>
6 */
7
8#define load_8way(src, x0, x1, x2, x3, x4, x5, x6, x7) \
9 vmovdqu (0*16)(src), x0; \
10 vmovdqu (1*16)(src), x1; \
11 vmovdqu (2*16)(src), x2; \
12 vmovdqu (3*16)(src), x3; \
13 vmovdqu (4*16)(src), x4; \
14 vmovdqu (5*16)(src), x5; \
15 vmovdqu (6*16)(src), x6; \
16 vmovdqu (7*16)(src), x7;
17
18#define store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7) \
19 vmovdqu x0, (0*16)(dst); \
20 vmovdqu x1, (1*16)(dst); \
21 vmovdqu x2, (2*16)(dst); \
22 vmovdqu x3, (3*16)(dst); \
23 vmovdqu x4, (4*16)(dst); \
24 vmovdqu x5, (5*16)(dst); \
25 vmovdqu x6, (6*16)(dst); \
26 vmovdqu x7, (7*16)(dst);
27
28#define store_cbc_8way(src, dst, x0, x1, x2, x3, x4, x5, x6, x7) \
29 vpxor (0*16)(src), x1, x1; \
30 vpxor (1*16)(src), x2, x2; \
31 vpxor (2*16)(src), x3, x3; \
32 vpxor (3*16)(src), x4, x4; \
33 vpxor (4*16)(src), x5, x5; \
34 vpxor (5*16)(src), x6, x6; \
35 vpxor (6*16)(src), x7, x7; \
36 store_8way(dst, x0, x1, x2, x3, x4, x5, x6, x7);
37

source code of linux/arch/x86/crypto/glue_helper-asm-avx.S