1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Test cases for memcpy(), memmove(), and memset(). |
4 | */ |
5 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
6 | |
7 | #include <kunit/test.h> |
8 | #include <linux/device.h> |
9 | #include <linux/init.h> |
10 | #include <linux/kernel.h> |
11 | #include <linux/mm.h> |
12 | #include <linux/module.h> |
13 | #include <linux/overflow.h> |
14 | #include <linux/slab.h> |
15 | #include <linux/types.h> |
16 | #include <linux/vmalloc.h> |
17 | |
18 | struct some_bytes { |
19 | union { |
20 | u8 data[32]; |
21 | struct { |
22 | u32 one; |
23 | u16 two; |
24 | u8 three; |
25 | /* 1 byte hole */ |
26 | u32 four[4]; |
27 | }; |
28 | }; |
29 | }; |
30 | |
31 | #define check(instance, v) do { \ |
32 | BUILD_BUG_ON(sizeof(instance.data) != 32); \ |
33 | for (size_t i = 0; i < sizeof(instance.data); i++) { \ |
34 | KUNIT_ASSERT_EQ_MSG(test, instance.data[i], v, \ |
35 | "line %d: '%s' not initialized to 0x%02x @ %d (saw 0x%02x)\n", \ |
36 | __LINE__, #instance, v, i, instance.data[i]); \ |
37 | } \ |
38 | } while (0) |
39 | |
40 | #define compare(name, one, two) do { \ |
41 | BUILD_BUG_ON(sizeof(one) != sizeof(two)); \ |
42 | for (size_t i = 0; i < sizeof(one); i++) { \ |
43 | KUNIT_EXPECT_EQ_MSG(test, one.data[i], two.data[i], \ |
44 | "line %d: %s.data[%d] (0x%02x) != %s.data[%d] (0x%02x)\n", \ |
45 | __LINE__, #one, i, one.data[i], #two, i, two.data[i]); \ |
46 | } \ |
47 | kunit_info(test, "ok: " TEST_OP "() " name "\n"); \ |
48 | } while (0) |
49 | |
50 | static void memcpy_test(struct kunit *test) |
51 | { |
52 | #define TEST_OP "memcpy" |
53 | struct some_bytes control = { |
54 | .data = { 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, |
55 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, |
56 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, |
57 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, |
58 | }, |
59 | }; |
60 | struct some_bytes zero = { }; |
61 | struct some_bytes middle = { |
62 | .data = { 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, |
63 | 0x20, 0x20, 0x20, 0x20, 0x00, 0x00, 0x00, 0x00, |
64 | 0x00, 0x00, 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, |
65 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, |
66 | }, |
67 | }; |
68 | struct some_bytes three = { |
69 | .data = { 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, |
70 | 0x20, 0x00, 0x00, 0x20, 0x20, 0x20, 0x20, 0x20, |
71 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, |
72 | 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, 0x20, |
73 | }, |
74 | }; |
75 | struct some_bytes dest = { }; |
76 | int count; |
77 | u8 *ptr; |
78 | |
79 | /* Verify static initializers. */ |
80 | check(control, 0x20); |
81 | check(zero, 0); |
82 | compare("static initializers" , dest, zero); |
83 | |
84 | /* Verify assignment. */ |
85 | dest = control; |
86 | compare("direct assignment" , dest, control); |
87 | |
88 | /* Verify complete overwrite. */ |
89 | memcpy(dest.data, zero.data, sizeof(dest.data)); |
90 | compare("complete overwrite" , dest, zero); |
91 | |
92 | /* Verify middle overwrite. */ |
93 | dest = control; |
94 | memcpy(dest.data + 12, zero.data, 7); |
95 | compare("middle overwrite" , dest, middle); |
96 | |
97 | /* Verify argument side-effects aren't repeated. */ |
98 | dest = control; |
99 | ptr = dest.data; |
100 | count = 1; |
101 | memcpy(ptr++, zero.data, count++); |
102 | ptr += 8; |
103 | memcpy(ptr++, zero.data, count++); |
104 | compare("argument side-effects" , dest, three); |
105 | #undef TEST_OP |
106 | } |
107 | |
108 | static unsigned char larger_array [2048]; |
109 | |
110 | static void memmove_test(struct kunit *test) |
111 | { |
112 | #define TEST_OP "memmove" |
113 | struct some_bytes control = { |
114 | .data = { 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, |
115 | 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, |
116 | 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, |
117 | 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, |
118 | }, |
119 | }; |
120 | struct some_bytes zero = { }; |
121 | struct some_bytes middle = { |
122 | .data = { 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, |
123 | 0x99, 0x99, 0x99, 0x99, 0x00, 0x00, 0x00, 0x00, |
124 | 0x00, 0x00, 0x00, 0x99, 0x99, 0x99, 0x99, 0x99, |
125 | 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, |
126 | }, |
127 | }; |
128 | struct some_bytes five = { |
129 | .data = { 0x00, 0x00, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, |
130 | 0x99, 0x99, 0x00, 0x00, 0x00, 0x99, 0x99, 0x99, |
131 | 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, |
132 | 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, |
133 | }, |
134 | }; |
135 | struct some_bytes overlap = { |
136 | .data = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, |
137 | 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, |
138 | 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, |
139 | 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, |
140 | }, |
141 | }; |
142 | struct some_bytes overlap_expected = { |
143 | .data = { 0x00, 0x01, 0x00, 0x01, 0x02, 0x03, 0x04, 0x07, |
144 | 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D, 0x0E, 0x0F, |
145 | 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, |
146 | 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, 0x99, |
147 | }, |
148 | }; |
149 | struct some_bytes dest = { }; |
150 | int count; |
151 | u8 *ptr; |
152 | |
153 | /* Verify static initializers. */ |
154 | check(control, 0x99); |
155 | check(zero, 0); |
156 | compare("static initializers" , zero, dest); |
157 | |
158 | /* Verify assignment. */ |
159 | dest = control; |
160 | compare("direct assignment" , dest, control); |
161 | |
162 | /* Verify complete overwrite. */ |
163 | memmove(dest.data, zero.data, sizeof(dest.data)); |
164 | compare("complete overwrite" , dest, zero); |
165 | |
166 | /* Verify middle overwrite. */ |
167 | dest = control; |
168 | memmove(dest.data + 12, zero.data, 7); |
169 | compare("middle overwrite" , dest, middle); |
170 | |
171 | /* Verify argument side-effects aren't repeated. */ |
172 | dest = control; |
173 | ptr = dest.data; |
174 | count = 2; |
175 | memmove(ptr++, zero.data, count++); |
176 | ptr += 9; |
177 | memmove(ptr++, zero.data, count++); |
178 | compare("argument side-effects" , dest, five); |
179 | |
180 | /* Verify overlapping overwrite is correct. */ |
181 | ptr = &overlap.data[2]; |
182 | memmove(ptr, overlap.data, 5); |
183 | compare("overlapping write" , overlap, overlap_expected); |
184 | |
185 | /* Verify larger overlapping moves. */ |
186 | larger_array[256] = 0xAAu; |
187 | /* |
188 | * Test a backwards overlapping memmove first. 256 and 1024 are |
189 | * important for i386 to use rep movsl. |
190 | */ |
191 | memmove(larger_array, larger_array + 256, 1024); |
192 | KUNIT_ASSERT_EQ(test, larger_array[0], 0xAAu); |
193 | KUNIT_ASSERT_EQ(test, larger_array[256], 0x00); |
194 | KUNIT_ASSERT_NULL(test, |
195 | memchr(larger_array + 1, 0xaa, ARRAY_SIZE(larger_array) - 1)); |
196 | /* Test a forwards overlapping memmove. */ |
197 | larger_array[0] = 0xBBu; |
198 | memmove(larger_array + 256, larger_array, 1024); |
199 | KUNIT_ASSERT_EQ(test, larger_array[0], 0xBBu); |
200 | KUNIT_ASSERT_EQ(test, larger_array[256], 0xBBu); |
201 | KUNIT_ASSERT_NULL(test, memchr(larger_array + 1, 0xBBu, 256 - 1)); |
202 | KUNIT_ASSERT_NULL(test, |
203 | memchr(larger_array + 257, 0xBBu, ARRAY_SIZE(larger_array) - 257)); |
204 | #undef TEST_OP |
205 | } |
206 | |
207 | static void memset_test(struct kunit *test) |
208 | { |
209 | #define TEST_OP "memset" |
210 | struct some_bytes control = { |
211 | .data = { 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, |
212 | 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, |
213 | 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, |
214 | 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, |
215 | }, |
216 | }; |
217 | struct some_bytes complete = { |
218 | .data = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, |
219 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, |
220 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, |
221 | 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, |
222 | }, |
223 | }; |
224 | struct some_bytes middle = { |
225 | .data = { 0x30, 0x30, 0x30, 0x30, 0x31, 0x31, 0x31, 0x31, |
226 | 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, 0x31, |
227 | 0x31, 0x31, 0x31, 0x31, 0x30, 0x30, 0x30, 0x30, |
228 | 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, |
229 | }, |
230 | }; |
231 | struct some_bytes three = { |
232 | .data = { 0x60, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, |
233 | 0x30, 0x61, 0x61, 0x30, 0x30, 0x30, 0x30, 0x30, |
234 | 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, |
235 | 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, |
236 | }, |
237 | }; |
238 | struct some_bytes after = { |
239 | .data = { 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x72, |
240 | 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, |
241 | 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, |
242 | 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, 0x72, |
243 | }, |
244 | }; |
245 | struct some_bytes startat = { |
246 | .data = { 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, 0x30, |
247 | 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, |
248 | 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, |
249 | 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, 0x79, |
250 | }, |
251 | }; |
252 | struct some_bytes dest = { }; |
253 | int count, value; |
254 | u8 *ptr; |
255 | |
256 | /* Verify static initializers. */ |
257 | check(control, 0x30); |
258 | check(dest, 0); |
259 | |
260 | /* Verify assignment. */ |
261 | dest = control; |
262 | compare("direct assignment" , dest, control); |
263 | |
264 | /* Verify complete overwrite. */ |
265 | memset(dest.data, 0xff, sizeof(dest.data)); |
266 | compare("complete overwrite" , dest, complete); |
267 | |
268 | /* Verify middle overwrite. */ |
269 | dest = control; |
270 | memset(dest.data + 4, 0x31, 16); |
271 | compare("middle overwrite" , dest, middle); |
272 | |
273 | /* Verify argument side-effects aren't repeated. */ |
274 | dest = control; |
275 | ptr = dest.data; |
276 | value = 0x60; |
277 | count = 1; |
278 | memset(ptr++, value++, count++); |
279 | ptr += 8; |
280 | memset(ptr++, value++, count++); |
281 | compare("argument side-effects" , dest, three); |
282 | |
283 | /* Verify memset_after() */ |
284 | dest = control; |
285 | memset_after(&dest, 0x72, three); |
286 | compare("memset_after()" , dest, after); |
287 | |
288 | /* Verify memset_startat() */ |
289 | dest = control; |
290 | memset_startat(&dest, 0x79, four); |
291 | compare("memset_startat()" , dest, startat); |
292 | #undef TEST_OP |
293 | } |
294 | |
295 | static u8 large_src[1024]; |
296 | static u8 large_dst[2048]; |
297 | static const u8 large_zero[2048]; |
298 | |
299 | static void set_random_nonzero(struct kunit *test, u8 *byte) |
300 | { |
301 | int failed_rng = 0; |
302 | |
303 | while (*byte == 0) { |
304 | get_random_bytes(buf: byte, len: 1); |
305 | KUNIT_ASSERT_LT_MSG(test, failed_rng++, 100, |
306 | "Is the RNG broken?" ); |
307 | } |
308 | } |
309 | |
310 | static void init_large(struct kunit *test) |
311 | { |
312 | if (!IS_ENABLED(CONFIG_MEMCPY_SLOW_KUNIT_TEST)) |
313 | kunit_skip(test, "Slow test skipped. Enable with CONFIG_MEMCPY_SLOW_KUNIT_TEST=y" ); |
314 | |
315 | /* Get many bit patterns. */ |
316 | get_random_bytes(buf: large_src, ARRAY_SIZE(large_src)); |
317 | |
318 | /* Make sure we have non-zero edges. */ |
319 | set_random_nonzero(test, byte: &large_src[0]); |
320 | set_random_nonzero(test, byte: &large_src[ARRAY_SIZE(large_src) - 1]); |
321 | |
322 | /* Explicitly zero the entire destination. */ |
323 | memset(large_dst, 0, ARRAY_SIZE(large_dst)); |
324 | } |
325 | |
326 | /* |
327 | * Instead of an indirect function call for "copy" or a giant macro, |
328 | * use a bool to pick memcpy or memmove. |
329 | */ |
330 | static void copy_large_test(struct kunit *test, bool use_memmove) |
331 | { |
332 | init_large(test); |
333 | |
334 | /* Copy a growing number of non-overlapping bytes ... */ |
335 | for (int bytes = 1; bytes <= ARRAY_SIZE(large_src); bytes++) { |
336 | /* Over a shifting destination window ... */ |
337 | for (int offset = 0; offset < ARRAY_SIZE(large_src); offset++) { |
338 | int right_zero_pos = offset + bytes; |
339 | int right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos; |
340 | |
341 | /* Copy! */ |
342 | if (use_memmove) |
343 | memmove(large_dst + offset, large_src, bytes); |
344 | else |
345 | memcpy(large_dst + offset, large_src, bytes); |
346 | |
347 | /* Did we touch anything before the copy area? */ |
348 | KUNIT_ASSERT_EQ_MSG(test, |
349 | memcmp(large_dst, large_zero, offset), 0, |
350 | "with size %d at offset %d" , bytes, offset); |
351 | /* Did we touch anything after the copy area? */ |
352 | KUNIT_ASSERT_EQ_MSG(test, |
353 | memcmp(&large_dst[right_zero_pos], large_zero, right_zero_size), 0, |
354 | "with size %d at offset %d" , bytes, offset); |
355 | |
356 | /* Are we byte-for-byte exact across the copy? */ |
357 | KUNIT_ASSERT_EQ_MSG(test, |
358 | memcmp(large_dst + offset, large_src, bytes), 0, |
359 | "with size %d at offset %d" , bytes, offset); |
360 | |
361 | /* Zero out what we copied for the next cycle. */ |
362 | memset(large_dst + offset, 0, bytes); |
363 | } |
364 | /* Avoid stall warnings if this loop gets slow. */ |
365 | cond_resched(); |
366 | } |
367 | } |
368 | |
369 | static void memcpy_large_test(struct kunit *test) |
370 | { |
371 | copy_large_test(test, use_memmove: false); |
372 | } |
373 | |
374 | static void memmove_large_test(struct kunit *test) |
375 | { |
376 | copy_large_test(test, use_memmove: true); |
377 | } |
378 | |
379 | /* |
380 | * On the assumption that boundary conditions are going to be the most |
381 | * sensitive, instead of taking a full step (inc) each iteration, |
382 | * take single index steps for at least the first "inc"-many indexes |
383 | * from the "start" and at least the last "inc"-many indexes before |
384 | * the "end". When in the middle, take full "inc"-wide steps. For |
385 | * example, calling next_step(idx, 1, 15, 3) with idx starting at 0 |
386 | * would see the following pattern: 1 2 3 4 7 10 11 12 13 14 15. |
387 | */ |
388 | static int next_step(int idx, int start, int end, int inc) |
389 | { |
390 | start += inc; |
391 | end -= inc; |
392 | |
393 | if (idx < start || idx + inc > end) |
394 | inc = 1; |
395 | return idx + inc; |
396 | } |
397 | |
398 | static void inner_loop(struct kunit *test, int bytes, int d_off, int s_off) |
399 | { |
400 | int left_zero_pos, left_zero_size; |
401 | int right_zero_pos, right_zero_size; |
402 | int src_pos, src_orig_pos, src_size; |
403 | int pos; |
404 | |
405 | /* Place the source in the destination buffer. */ |
406 | memcpy(&large_dst[s_off], large_src, bytes); |
407 | |
408 | /* Copy to destination offset. */ |
409 | memmove(&large_dst[d_off], &large_dst[s_off], bytes); |
410 | |
411 | /* Make sure destination entirely matches. */ |
412 | KUNIT_ASSERT_EQ_MSG(test, memcmp(&large_dst[d_off], large_src, bytes), 0, |
413 | "with size %d at src offset %d and dest offset %d" , |
414 | bytes, s_off, d_off); |
415 | |
416 | /* Calculate the expected zero spans. */ |
417 | if (s_off < d_off) { |
418 | left_zero_pos = 0; |
419 | left_zero_size = s_off; |
420 | |
421 | right_zero_pos = d_off + bytes; |
422 | right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos; |
423 | |
424 | src_pos = s_off; |
425 | src_orig_pos = 0; |
426 | src_size = d_off - s_off; |
427 | } else { |
428 | left_zero_pos = 0; |
429 | left_zero_size = d_off; |
430 | |
431 | right_zero_pos = s_off + bytes; |
432 | right_zero_size = ARRAY_SIZE(large_dst) - right_zero_pos; |
433 | |
434 | src_pos = d_off + bytes; |
435 | src_orig_pos = src_pos - s_off; |
436 | src_size = right_zero_pos - src_pos; |
437 | } |
438 | |
439 | /* Check non-overlapping source is unchanged.*/ |
440 | KUNIT_ASSERT_EQ_MSG(test, |
441 | memcmp(&large_dst[src_pos], &large_src[src_orig_pos], src_size), 0, |
442 | "with size %d at src offset %d and dest offset %d" , |
443 | bytes, s_off, d_off); |
444 | |
445 | /* Check leading buffer contents are zero. */ |
446 | KUNIT_ASSERT_EQ_MSG(test, |
447 | memcmp(&large_dst[left_zero_pos], large_zero, left_zero_size), 0, |
448 | "with size %d at src offset %d and dest offset %d" , |
449 | bytes, s_off, d_off); |
450 | /* Check trailing buffer contents are zero. */ |
451 | KUNIT_ASSERT_EQ_MSG(test, |
452 | memcmp(&large_dst[right_zero_pos], large_zero, right_zero_size), 0, |
453 | "with size %d at src offset %d and dest offset %d" , |
454 | bytes, s_off, d_off); |
455 | |
456 | /* Zero out everything not already zeroed.*/ |
457 | pos = left_zero_pos + left_zero_size; |
458 | memset(&large_dst[pos], 0, right_zero_pos - pos); |
459 | } |
460 | |
461 | static void memmove_overlap_test(struct kunit *test) |
462 | { |
463 | /* |
464 | * Running all possible offset and overlap combinations takes a |
465 | * very long time. Instead, only check up to 128 bytes offset |
466 | * into the destination buffer (which should result in crossing |
467 | * cachelines), with a step size of 1 through 7 to try to skip some |
468 | * redundancy. |
469 | */ |
470 | static const int offset_max = 128; /* less than ARRAY_SIZE(large_src); */ |
471 | static const int bytes_step = 7; |
472 | static const int window_step = 7; |
473 | |
474 | static const int bytes_start = 1; |
475 | static const int bytes_end = ARRAY_SIZE(large_src) + 1; |
476 | |
477 | init_large(test); |
478 | |
479 | /* Copy a growing number of overlapping bytes ... */ |
480 | for (int bytes = bytes_start; bytes < bytes_end; |
481 | bytes = next_step(idx: bytes, start: bytes_start, end: bytes_end, inc: bytes_step)) { |
482 | |
483 | /* Over a shifting destination window ... */ |
484 | for (int d_off = 0; d_off < offset_max; d_off++) { |
485 | int s_start = max(d_off - bytes, 0); |
486 | int s_end = min_t(int, d_off + bytes, ARRAY_SIZE(large_src)); |
487 | |
488 | /* Over a shifting source window ... */ |
489 | for (int s_off = s_start; s_off < s_end; |
490 | s_off = next_step(idx: s_off, start: s_start, end: s_end, inc: window_step)) |
491 | inner_loop(test, bytes, d_off, s_off); |
492 | |
493 | /* Avoid stall warnings. */ |
494 | cond_resched(); |
495 | } |
496 | } |
497 | } |
498 | |
499 | static void strtomem_test(struct kunit *test) |
500 | { |
501 | static const char input[sizeof(unsigned long)] = "hi" ; |
502 | static const char truncate[] = "this is too long" ; |
503 | struct { |
504 | unsigned long canary1; |
505 | unsigned char output[sizeof(unsigned long)] __nonstring; |
506 | unsigned long canary2; |
507 | } wrap; |
508 | |
509 | memset(&wrap, 0xFF, sizeof(wrap)); |
510 | KUNIT_EXPECT_EQ_MSG(test, wrap.canary1, ULONG_MAX, |
511 | "bad initial canary value" ); |
512 | KUNIT_EXPECT_EQ_MSG(test, wrap.canary2, ULONG_MAX, |
513 | "bad initial canary value" ); |
514 | |
515 | /* Check unpadded copy leaves surroundings untouched. */ |
516 | strtomem(wrap.output, input); |
517 | KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); |
518 | KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]); |
519 | KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]); |
520 | for (size_t i = 2; i < sizeof(wrap.output); i++) |
521 | KUNIT_EXPECT_EQ(test, wrap.output[i], 0xFF); |
522 | KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); |
523 | |
524 | /* Check truncated copy leaves surroundings untouched. */ |
525 | memset(&wrap, 0xFF, sizeof(wrap)); |
526 | strtomem(wrap.output, truncate); |
527 | KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); |
528 | for (size_t i = 0; i < sizeof(wrap.output); i++) |
529 | KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]); |
530 | KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); |
531 | |
532 | /* Check padded copy leaves only string padded. */ |
533 | memset(&wrap, 0xFF, sizeof(wrap)); |
534 | strtomem_pad(wrap.output, input, 0xAA); |
535 | KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); |
536 | KUNIT_EXPECT_EQ(test, wrap.output[0], input[0]); |
537 | KUNIT_EXPECT_EQ(test, wrap.output[1], input[1]); |
538 | for (size_t i = 2; i < sizeof(wrap.output); i++) |
539 | KUNIT_EXPECT_EQ(test, wrap.output[i], 0xAA); |
540 | KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); |
541 | |
542 | /* Check truncated padded copy has no padding. */ |
543 | memset(&wrap, 0xFF, sizeof(wrap)); |
544 | strtomem(wrap.output, truncate); |
545 | KUNIT_EXPECT_EQ(test, wrap.canary1, ULONG_MAX); |
546 | for (size_t i = 0; i < sizeof(wrap.output); i++) |
547 | KUNIT_EXPECT_EQ(test, wrap.output[i], truncate[i]); |
548 | KUNIT_EXPECT_EQ(test, wrap.canary2, ULONG_MAX); |
549 | } |
550 | |
551 | static struct kunit_case memcpy_test_cases[] = { |
552 | KUNIT_CASE(memset_test), |
553 | KUNIT_CASE(memcpy_test), |
554 | KUNIT_CASE_SLOW(memcpy_large_test), |
555 | KUNIT_CASE_SLOW(memmove_test), |
556 | KUNIT_CASE_SLOW(memmove_large_test), |
557 | KUNIT_CASE_SLOW(memmove_overlap_test), |
558 | KUNIT_CASE(strtomem_test), |
559 | {} |
560 | }; |
561 | |
562 | static struct kunit_suite memcpy_test_suite = { |
563 | .name = "memcpy" , |
564 | .test_cases = memcpy_test_cases, |
565 | }; |
566 | |
567 | kunit_test_suite(memcpy_test_suite); |
568 | |
569 | MODULE_LICENSE("GPL" ); |
570 | |