1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * Traceprobe fetch helper inlines |
4 | */ |
5 | |
6 | static nokprobe_inline void |
7 | fetch_store_raw(unsigned long val, struct fetch_insn *code, void *buf) |
8 | { |
9 | switch (code->size) { |
10 | case 1: |
11 | *(u8 *)buf = (u8)val; |
12 | break; |
13 | case 2: |
14 | *(u16 *)buf = (u16)val; |
15 | break; |
16 | case 4: |
17 | *(u32 *)buf = (u32)val; |
18 | break; |
19 | case 8: |
20 | //TBD: 32bit signed |
21 | *(u64 *)buf = (u64)val; |
22 | break; |
23 | default: |
24 | *(unsigned long *)buf = val; |
25 | } |
26 | } |
27 | |
28 | static nokprobe_inline void |
29 | fetch_apply_bitfield(struct fetch_insn *code, void *buf) |
30 | { |
31 | switch (code->basesize) { |
32 | case 1: |
33 | *(u8 *)buf <<= code->lshift; |
34 | *(u8 *)buf >>= code->rshift; |
35 | break; |
36 | case 2: |
37 | *(u16 *)buf <<= code->lshift; |
38 | *(u16 *)buf >>= code->rshift; |
39 | break; |
40 | case 4: |
41 | *(u32 *)buf <<= code->lshift; |
42 | *(u32 *)buf >>= code->rshift; |
43 | break; |
44 | case 8: |
45 | *(u64 *)buf <<= code->lshift; |
46 | *(u64 *)buf >>= code->rshift; |
47 | break; |
48 | } |
49 | } |
50 | |
51 | /* |
52 | * These functions must be defined for each callsite. |
53 | * Return consumed dynamic data size (>= 0), or error (< 0). |
54 | * If dest is NULL, don't store result and return required dynamic data size. |
55 | */ |
56 | static int |
57 | process_fetch_insn(struct fetch_insn *code, void *rec, void *edata, |
58 | void *dest, void *base); |
59 | static nokprobe_inline int fetch_store_strlen(unsigned long addr); |
60 | static nokprobe_inline int |
61 | fetch_store_string(unsigned long addr, void *dest, void *base); |
62 | static nokprobe_inline int fetch_store_strlen_user(unsigned long addr); |
63 | static nokprobe_inline int |
64 | fetch_store_string_user(unsigned long addr, void *dest, void *base); |
65 | static nokprobe_inline int |
66 | probe_mem_read(void *dest, void *src, size_t size); |
67 | static nokprobe_inline int |
68 | probe_mem_read_user(void *dest, void *src, size_t size); |
69 | |
70 | static nokprobe_inline int |
71 | fetch_store_symstrlen(unsigned long addr) |
72 | { |
73 | char namebuf[KSYM_SYMBOL_LEN]; |
74 | int ret; |
75 | |
76 | ret = sprint_symbol(buffer: namebuf, address: addr); |
77 | if (ret < 0) |
78 | return 0; |
79 | |
80 | return ret + 1; |
81 | } |
82 | |
83 | /* |
84 | * Fetch a null-terminated symbol string + offset. Caller MUST set *(u32 *)buf |
85 | * with max length and relative data location. |
86 | */ |
87 | static nokprobe_inline int |
88 | fetch_store_symstring(unsigned long addr, void *dest, void *base) |
89 | { |
90 | int maxlen = get_loc_len(*(u32 *)dest); |
91 | void *__dest; |
92 | |
93 | if (unlikely(!maxlen)) |
94 | return -ENOMEM; |
95 | |
96 | __dest = get_loc_data(dl: dest, ent: base); |
97 | |
98 | return sprint_symbol(buffer: __dest, address: addr); |
99 | } |
100 | |
101 | /* common part of process_fetch_insn*/ |
102 | static nokprobe_inline int |
103 | process_common_fetch_insn(struct fetch_insn *code, unsigned long *val) |
104 | { |
105 | switch (code->op) { |
106 | case FETCH_OP_IMM: |
107 | *val = code->immediate; |
108 | break; |
109 | case FETCH_OP_COMM: |
110 | *val = (unsigned long)current->comm; |
111 | break; |
112 | case FETCH_OP_DATA: |
113 | *val = (unsigned long)code->data; |
114 | break; |
115 | default: |
116 | return -EILSEQ; |
117 | } |
118 | return 0; |
119 | } |
120 | |
121 | /* From the 2nd stage, routine is same */ |
122 | static nokprobe_inline int |
123 | process_fetch_insn_bottom(struct fetch_insn *code, unsigned long val, |
124 | void *dest, void *base) |
125 | { |
126 | struct fetch_insn *s3 = NULL; |
127 | int total = 0, ret = 0, i = 0; |
128 | u32 loc = 0; |
129 | unsigned long lval = val; |
130 | |
131 | stage2: |
132 | /* 2nd stage: dereference memory if needed */ |
133 | do { |
134 | if (code->op == FETCH_OP_DEREF) { |
135 | lval = val; |
136 | ret = probe_mem_read(dest: &val, src: (void *)val + code->offset, |
137 | size: sizeof(val)); |
138 | } else if (code->op == FETCH_OP_UDEREF) { |
139 | lval = val; |
140 | ret = probe_mem_read_user(dest: &val, |
141 | src: (void *)val + code->offset, size: sizeof(val)); |
142 | } else |
143 | break; |
144 | if (ret) |
145 | return ret; |
146 | code++; |
147 | } while (1); |
148 | |
149 | s3 = code; |
150 | stage3: |
151 | /* 3rd stage: store value to buffer */ |
152 | if (unlikely(!dest)) { |
153 | switch (code->op) { |
154 | case FETCH_OP_ST_STRING: |
155 | ret = fetch_store_strlen(addr: val + code->offset); |
156 | code++; |
157 | goto array; |
158 | case FETCH_OP_ST_USTRING: |
159 | ret = fetch_store_strlen_user(addr: val + code->offset); |
160 | code++; |
161 | goto array; |
162 | case FETCH_OP_ST_SYMSTR: |
163 | ret = fetch_store_symstrlen(addr: val + code->offset); |
164 | code++; |
165 | goto array; |
166 | default: |
167 | return -EILSEQ; |
168 | } |
169 | } |
170 | |
171 | switch (code->op) { |
172 | case FETCH_OP_ST_RAW: |
173 | fetch_store_raw(val, code, buf: dest); |
174 | break; |
175 | case FETCH_OP_ST_MEM: |
176 | probe_mem_read(dest, src: (void *)val + code->offset, size: code->size); |
177 | break; |
178 | case FETCH_OP_ST_UMEM: |
179 | probe_mem_read_user(dest, src: (void *)val + code->offset, size: code->size); |
180 | break; |
181 | case FETCH_OP_ST_STRING: |
182 | loc = *(u32 *)dest; |
183 | ret = fetch_store_string(addr: val + code->offset, dest, base); |
184 | break; |
185 | case FETCH_OP_ST_USTRING: |
186 | loc = *(u32 *)dest; |
187 | ret = fetch_store_string_user(addr: val + code->offset, dest, base); |
188 | break; |
189 | case FETCH_OP_ST_SYMSTR: |
190 | loc = *(u32 *)dest; |
191 | ret = fetch_store_symstring(addr: val + code->offset, dest, base); |
192 | break; |
193 | default: |
194 | return -EILSEQ; |
195 | } |
196 | code++; |
197 | |
198 | /* 4th stage: modify stored value if needed */ |
199 | if (code->op == FETCH_OP_MOD_BF) { |
200 | fetch_apply_bitfield(code, buf: dest); |
201 | code++; |
202 | } |
203 | |
204 | array: |
205 | /* the last stage: Loop on array */ |
206 | if (code->op == FETCH_OP_LP_ARRAY) { |
207 | if (ret < 0) |
208 | ret = 0; |
209 | total += ret; |
210 | if (++i < code->param) { |
211 | code = s3; |
212 | if (s3->op != FETCH_OP_ST_STRING && |
213 | s3->op != FETCH_OP_ST_USTRING) { |
214 | dest += s3->size; |
215 | val += s3->size; |
216 | goto stage3; |
217 | } |
218 | code--; |
219 | val = lval + sizeof(char *); |
220 | if (dest) { |
221 | dest += sizeof(u32); |
222 | *(u32 *)dest = update_data_loc(loc, consumed: ret); |
223 | } |
224 | goto stage2; |
225 | } |
226 | code++; |
227 | ret = total; |
228 | } |
229 | |
230 | return code->op == FETCH_OP_END ? ret : -EILSEQ; |
231 | } |
232 | |
233 | /* Sum up total data length for dynamic arrays (strings) */ |
234 | static nokprobe_inline int |
235 | __get_data_size(struct trace_probe *tp, struct pt_regs *regs, void *edata) |
236 | { |
237 | struct probe_arg *arg; |
238 | int i, len, ret = 0; |
239 | |
240 | for (i = 0; i < tp->nr_args; i++) { |
241 | arg = tp->args + i; |
242 | if (unlikely(arg->dynamic)) { |
243 | len = process_fetch_insn(code: arg->code, rec: regs, edata, NULL, NULL); |
244 | if (len > 0) |
245 | ret += len; |
246 | } |
247 | } |
248 | |
249 | return ret; |
250 | } |
251 | |
252 | /* Store the value of each argument */ |
253 | static nokprobe_inline void |
254 | store_trace_args(void *data, struct trace_probe *tp, void *rec, void *edata, |
255 | int , int maxlen) |
256 | { |
257 | struct probe_arg *arg; |
258 | void *base = data - header_size; |
259 | void *dyndata = data + tp->size; |
260 | u32 *dl; /* Data location */ |
261 | int ret, i; |
262 | |
263 | for (i = 0; i < tp->nr_args; i++) { |
264 | arg = tp->args + i; |
265 | dl = data + arg->offset; |
266 | /* Point the dynamic data area if needed */ |
267 | if (unlikely(arg->dynamic)) |
268 | *dl = make_data_loc(maxlen, dyndata - base); |
269 | ret = process_fetch_insn(code: arg->code, rec, edata, dest: dl, base); |
270 | if (arg->dynamic && likely(ret > 0)) { |
271 | dyndata += ret; |
272 | maxlen -= ret; |
273 | } |
274 | } |
275 | } |
276 | |