1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * User memory copy functions for kernel
4 *
5 * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved.
6 */
7
8/*
9 * The right way to do this involves valignb
10 * The easy way to do this is only speed up src/dest similar alignment.
11 */
12
13/*
14 * Copy to/from user are the same, except that for packets with a load and
15 * a store, I don't know how to tell which kind of exception we got.
16 * Therefore, we duplicate the function, and handle faulting addresses
17 * differently for each function
18 */
19
20/*
21 * copy from user: loads can fault
22 */
23#define src_sav r13
24#define dst_sav r12
25#define src_dst_sav r13:12
26#define d_dbuf r15:14
27#define w_dbuf r15
28
29#define dst r0
30#define src r1
31#define bytes r2
32#define loopcount r5
33
34#define FUNCNAME raw_copy_from_user
35#include "copy_user_template.S"
36
37 /* LOAD FAULTS from COPY_FROM_USER */
38
39 /* Alignment loop. r2 has been updated. Return it. */
40 .falign
411009:
422009:
434009:
44 {
45 r0 = r2
46 jumpr r31
47 }
48 /* Normal copy loops. Do epilog. Use src-src_sav to compute distance */
49 /* X - (A - B) == X + B - A */
50 .falign
518089:
52 {
53 memd(dst) = d_dbuf
54 r2 += sub(src_sav,src)
55 }
56 {
57 r0 = r2
58 jumpr r31
59 }
60 .falign
614089:
62 {
63 memw(dst) = w_dbuf
64 r2 += sub(src_sav,src)
65 }
66 {
67 r0 = r2
68 jumpr r31
69 }
70 .falign
712089:
72 {
73 memh(dst) = w_dbuf
74 r2 += sub(src_sav,src)
75 }
76 {
77 r0 = r2
78 jumpr r31
79 }
80 .falign
811089:
82 {
83 memb(dst) = w_dbuf
84 r2 += sub(src_sav,src)
85 }
86 {
87 r0 = r2
88 jumpr r31
89 }
90
91 /* COPY FROM USER: only loads can fail */
92
93 .section __ex_table,"a"
94 .long 1000b,1009b
95 .long 2000b,2009b
96 .long 4000b,4009b
97 .long 8080b,8089b
98 .long 4080b,4089b
99 .long 2080b,2089b
100 .long 1080b,1089b
101 .previous
102

source code of linux/arch/hexagon/mm/copy_from_user.S