1 | /* SPDX-License-Identifier: GPL-2.0 */ |
---|---|
2 | /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */ |
3 | |
4 | #include <linux/export.h> |
5 | #include <linux/linkage.h> |
6 | #include <linux/cfi_types.h> |
7 | #include <asm/cpufeatures.h> |
8 | #include <asm/alternative.h> |
9 | |
10 | /* |
11 | * Some CPUs run faster using the string copy instructions (sane microcode). |
12 | * It is also a lot simpler. Use this when possible. But, don't use streaming |
13 | * copy unless the CPU indicates X86_FEATURE_REP_GOOD. Could vary the |
14 | * prefetch distance based on SMP/UP. |
15 | */ |
16 | ALIGN |
17 | SYM_TYPED_FUNC_START(copy_page) |
18 | ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD |
19 | movl $4096/8, %ecx |
20 | rep movsq |
21 | RET |
22 | SYM_FUNC_END(copy_page) |
23 | EXPORT_SYMBOL(copy_page) |
24 | |
25 | SYM_FUNC_START_LOCAL(copy_page_regs) |
26 | subq $2*8, %rsp |
27 | movq %rbx, (%rsp) |
28 | movq %r12, 1*8(%rsp) |
29 | |
30 | movl $(4096/64)-5, %ecx |
31 | .p2align 4 |
32 | .Loop64: |
33 | dec %rcx |
34 | movq 0x8*0(%rsi), %rax |
35 | movq 0x8*1(%rsi), %rbx |
36 | movq 0x8*2(%rsi), %rdx |
37 | movq 0x8*3(%rsi), %r8 |
38 | movq 0x8*4(%rsi), %r9 |
39 | movq 0x8*5(%rsi), %r10 |
40 | movq 0x8*6(%rsi), %r11 |
41 | movq 0x8*7(%rsi), %r12 |
42 | |
43 | prefetcht0 5*64(%rsi) |
44 | |
45 | movq %rax, 0x8*0(%rdi) |
46 | movq %rbx, 0x8*1(%rdi) |
47 | movq %rdx, 0x8*2(%rdi) |
48 | movq %r8, 0x8*3(%rdi) |
49 | movq %r9, 0x8*4(%rdi) |
50 | movq %r10, 0x8*5(%rdi) |
51 | movq %r11, 0x8*6(%rdi) |
52 | movq %r12, 0x8*7(%rdi) |
53 | |
54 | leaq 64 (%rsi), %rsi |
55 | leaq 64 (%rdi), %rdi |
56 | |
57 | jnz .Loop64 |
58 | |
59 | movl $5, %ecx |
60 | .p2align 4 |
61 | .Loop2: |
62 | decl %ecx |
63 | |
64 | movq 0x8*0(%rsi), %rax |
65 | movq 0x8*1(%rsi), %rbx |
66 | movq 0x8*2(%rsi), %rdx |
67 | movq 0x8*3(%rsi), %r8 |
68 | movq 0x8*4(%rsi), %r9 |
69 | movq 0x8*5(%rsi), %r10 |
70 | movq 0x8*6(%rsi), %r11 |
71 | movq 0x8*7(%rsi), %r12 |
72 | |
73 | movq %rax, 0x8*0(%rdi) |
74 | movq %rbx, 0x8*1(%rdi) |
75 | movq %rdx, 0x8*2(%rdi) |
76 | movq %r8, 0x8*3(%rdi) |
77 | movq %r9, 0x8*4(%rdi) |
78 | movq %r10, 0x8*5(%rdi) |
79 | movq %r11, 0x8*6(%rdi) |
80 | movq %r12, 0x8*7(%rdi) |
81 | |
82 | leaq 64(%rdi), %rdi |
83 | leaq 64(%rsi), %rsi |
84 | jnz .Loop2 |
85 | |
86 | movq (%rsp), %rbx |
87 | movq 1*8(%rsp), %r12 |
88 | addq $2*8, %rsp |
89 | RET |
90 | SYM_FUNC_END(copy_page_regs) |
91 |