1 | // Trying to satisfy clippy here is hopeless |
2 | #![allow (clippy::style)] |
3 | |
4 | #[allow (warnings)] |
5 | #[cfg (target_pointer_width = "16" )] |
6 | type c_int = i16; |
7 | #[allow (warnings)] |
8 | #[cfg (not(target_pointer_width = "16" ))] |
9 | type c_int = i32; |
10 | |
11 | use core::intrinsics::{atomic_load_unordered, atomic_store_unordered, exact_div}; |
12 | use core::mem; |
13 | use core::ops::{BitOr, Shl}; |
14 | |
15 | // memcpy/memmove/memset have optimized implementations on some architectures |
16 | #[cfg_attr ( |
17 | all(not(feature = "no-asm" ), target_arch = "x86_64" ), |
18 | path = "x86_64.rs" |
19 | )] |
20 | mod impls; |
21 | |
22 | intrinsics! { |
23 | #[cfg_attr(not(all(target_os = "windows" , target_env = "gnu" )), weak)] |
24 | #[mem_builtin] |
25 | pub unsafe extern "C" fn memcpy(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 { |
26 | impls::copy_forward(dest, src, n); |
27 | dest |
28 | } |
29 | |
30 | #[cfg_attr(not(all(target_os = "windows" , target_env = "gnu" )), weak)] |
31 | #[mem_builtin] |
32 | pub unsafe extern "C" fn memmove(dest: *mut u8, src: *const u8, n: usize) -> *mut u8 { |
33 | let delta = (dest as usize).wrapping_sub(src as usize); |
34 | if delta >= n { |
35 | // We can copy forwards because either dest is far enough ahead of src, |
36 | // or src is ahead of dest (and delta overflowed). |
37 | impls::copy_forward(dest, src, n); |
38 | } else { |
39 | impls::copy_backward(dest, src, n); |
40 | } |
41 | dest |
42 | } |
43 | |
44 | #[cfg_attr(not(all(target_os = "windows" , target_env = "gnu" )), weak)] |
45 | #[mem_builtin] |
46 | pub unsafe extern "C" fn memset(s: *mut u8, c: crate::mem::c_int, n: usize) -> *mut u8 { |
47 | impls::set_bytes(s, c as u8, n); |
48 | s |
49 | } |
50 | |
51 | #[cfg_attr(not(all(target_os = "windows" , target_env = "gnu" )), weak)] |
52 | #[mem_builtin] |
53 | pub unsafe extern "C" fn memcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 { |
54 | impls::compare_bytes(s1, s2, n) |
55 | } |
56 | |
57 | #[cfg_attr(not(all(target_os = "windows" , target_env = "gnu" )), weak)] |
58 | #[mem_builtin] |
59 | pub unsafe extern "C" fn bcmp(s1: *const u8, s2: *const u8, n: usize) -> i32 { |
60 | memcmp(s1, s2, n) |
61 | } |
62 | |
63 | #[cfg_attr(not(all(target_os = "windows" , target_env = "gnu" )), weak)] |
64 | #[mem_builtin] |
65 | pub unsafe extern "C" fn strlen(s: *const core::ffi::c_char) -> usize { |
66 | impls::c_string_length(s) |
67 | } |
68 | } |
69 | |
70 | // `bytes` must be a multiple of `mem::size_of::<T>()` |
71 | #[cfg_attr (not(target_has_atomic_load_store = "8" ), allow(dead_code))] |
72 | fn memcpy_element_unordered_atomic<T: Copy>(dest: *mut T, src: *const T, bytes: usize) { |
73 | unsafe { |
74 | let n: usize = exact_div(bytes, mem::size_of::<T>()); |
75 | let mut i: usize = 0; |
76 | while i < n { |
77 | atomic_store_unordered(dest.add(count:i), atomic_load_unordered(src.add(count:i))); |
78 | i += 1; |
79 | } |
80 | } |
81 | } |
82 | |
83 | // `bytes` must be a multiple of `mem::size_of::<T>()` |
84 | #[cfg_attr (not(target_has_atomic_load_store = "8" ), allow(dead_code))] |
85 | fn memmove_element_unordered_atomic<T: Copy>(dest: *mut T, src: *const T, bytes: usize) { |
86 | unsafe { |
87 | let n: usize = exact_div(bytes, mem::size_of::<T>()); |
88 | if src < dest as *const T { |
89 | // copy from end |
90 | let mut i: usize = n; |
91 | while i != 0 { |
92 | i -= 1; |
93 | atomic_store_unordered(dest.add(count:i), atomic_load_unordered(src.add(count:i))); |
94 | } |
95 | } else { |
96 | // copy from beginning |
97 | let mut i: usize = 0; |
98 | while i < n { |
99 | atomic_store_unordered(dest.add(count:i), atomic_load_unordered(src.add(count:i))); |
100 | i += 1; |
101 | } |
102 | } |
103 | } |
104 | } |
105 | |
106 | // `T` must be a primitive integer type, and `bytes` must be a multiple of `mem::size_of::<T>()` |
107 | #[cfg_attr (not(target_has_atomic_load_store = "8" ), allow(dead_code))] |
108 | fn memset_element_unordered_atomic<T>(s: *mut T, c: u8, bytes: usize) |
109 | where |
110 | T: Copy + From<u8> + Shl<u32, Output = T> + BitOr<T, Output = T>, |
111 | { |
112 | unsafe { |
113 | let n: usize = exact_div(bytes, mem::size_of::<T>()); |
114 | |
115 | // Construct a value of type `T` consisting of repeated `c` |
116 | // bytes, to let us ensure we write each `T` atomically. |
117 | let mut x = T::from(c); |
118 | let mut i: i32 = 1; |
119 | while i < mem::size_of::<T>() { |
120 | x = x << 8 | T::from(c); |
121 | i += 1; |
122 | } |
123 | |
124 | // Write it to `s` |
125 | let mut i: usize = 0; |
126 | while i < n { |
127 | atomic_store_unordered(s.add(count:i), x); |
128 | i += 1; |
129 | } |
130 | } |
131 | } |
132 | |
133 | intrinsics! { |
134 | #[cfg (target_has_atomic_load_store = "8" )] |
135 | pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) -> () { |
136 | memcpy_element_unordered_atomic(dest, src, bytes); |
137 | } |
138 | #[cfg (target_has_atomic_load_store = "16" )] |
139 | pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_2(dest: *mut u16, src: *const u16, bytes: usize) -> () { |
140 | memcpy_element_unordered_atomic(dest, src, bytes); |
141 | } |
142 | #[cfg (target_has_atomic_load_store = "32" )] |
143 | pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_4(dest: *mut u32, src: *const u32, bytes: usize) -> () { |
144 | memcpy_element_unordered_atomic(dest, src, bytes); |
145 | } |
146 | #[cfg (target_has_atomic_load_store = "64" )] |
147 | pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () { |
148 | memcpy_element_unordered_atomic(dest, src, bytes); |
149 | } |
150 | #[cfg (target_has_atomic_load_store = "128" )] |
151 | pub unsafe extern "C" fn __llvm_memcpy_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () { |
152 | memcpy_element_unordered_atomic(dest, src, bytes); |
153 | } |
154 | |
155 | #[cfg (target_has_atomic_load_store = "8" )] |
156 | pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_1(dest: *mut u8, src: *const u8, bytes: usize) -> () { |
157 | memmove_element_unordered_atomic(dest, src, bytes); |
158 | } |
159 | #[cfg (target_has_atomic_load_store = "16" )] |
160 | pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_2(dest: *mut u16, src: *const u16, bytes: usize) -> () { |
161 | memmove_element_unordered_atomic(dest, src, bytes); |
162 | } |
163 | #[cfg (target_has_atomic_load_store = "32" )] |
164 | pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_4(dest: *mut u32, src: *const u32, bytes: usize) -> () { |
165 | memmove_element_unordered_atomic(dest, src, bytes); |
166 | } |
167 | #[cfg (target_has_atomic_load_store = "64" )] |
168 | pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_8(dest: *mut u64, src: *const u64, bytes: usize) -> () { |
169 | memmove_element_unordered_atomic(dest, src, bytes); |
170 | } |
171 | #[cfg (target_has_atomic_load_store = "128" )] |
172 | pub unsafe extern "C" fn __llvm_memmove_element_unordered_atomic_16(dest: *mut u128, src: *const u128, bytes: usize) -> () { |
173 | memmove_element_unordered_atomic(dest, src, bytes); |
174 | } |
175 | |
176 | #[cfg (target_has_atomic_load_store = "8" )] |
177 | pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_1(s: *mut u8, c: u8, bytes: usize) -> () { |
178 | memset_element_unordered_atomic(s, c, bytes); |
179 | } |
180 | #[cfg (target_has_atomic_load_store = "16" )] |
181 | pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_2(s: *mut u16, c: u8, bytes: usize) -> () { |
182 | memset_element_unordered_atomic(s, c, bytes); |
183 | } |
184 | #[cfg (target_has_atomic_load_store = "32" )] |
185 | pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_4(s: *mut u32, c: u8, bytes: usize) -> () { |
186 | memset_element_unordered_atomic(s, c, bytes); |
187 | } |
188 | #[cfg (target_has_atomic_load_store = "64" )] |
189 | pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_8(s: *mut u64, c: u8, bytes: usize) -> () { |
190 | memset_element_unordered_atomic(s, c, bytes); |
191 | } |
192 | #[cfg (target_has_atomic_load_store = "128" )] |
193 | pub unsafe extern "C" fn __llvm_memset_element_unordered_atomic_16(s: *mut u128, c: u8, bytes: usize) -> () { |
194 | memset_element_unordered_atomic(s, c, bytes); |
195 | } |
196 | } |
197 | |