1 | //! Rust bindings to the `jemalloc` C library. |
2 | //! |
3 | //! `jemalloc` is a general purpose memory allocation, its documentation |
4 | //! can be found here: |
5 | //! |
6 | //! * [API documentation][jemalloc_docs] |
7 | //! * [Wiki][jemalloc_wiki] (design documents, presentations, profiling, debugging, tuning, ...) |
8 | //! |
9 | //! `jemalloc` exposes both a standard and a non-standard API. |
10 | //! |
11 | //! # Standard API |
12 | //! |
13 | //! The standard API includes: the [`malloc`], [`calloc`], [`realloc`], and |
14 | //! [`free`], which conform to to ISO/IEC 9899:1990 (“ISO C90”), |
15 | //! [`posix_memalign`] which conforms to conforms to POSIX.1-2016, and |
16 | //! [`aligned_alloc`]. |
17 | //! |
18 | //! Note that these standard leave some details as _implementation defined_. |
19 | //! This docs document this behavior for `jemalloc`, but keep in mind that other |
20 | //! standard-conforming implementations of these functions in other allocators |
21 | //! might behave slightly different. |
22 | //! |
23 | //! # Non-Standard API |
24 | //! |
25 | //! The non-standard API includes: [`mallocx`], [`rallocx`], [`xallocx`], |
26 | //! [`sallocx`], [`dallocx`], [`sdallocx`], and [`nallocx`]. These functions all |
27 | //! have a `flags` argument that can be used to specify options. Use bitwise or |
28 | //! `|` to specify one or more of the following: [`MALLOCX_LG_ALIGN`], |
29 | //! [`MALLOCX_ALIGN`], [`MALLOCX_ZERO`], [`MALLOCX_TCACHE`], |
30 | //! [`MALLOCX_TCACHE_NONE`], and [`MALLOCX_ARENA`]. |
31 | //! |
32 | //! # Environment variables |
33 | //! |
34 | //! The `MALLOC_CONF` environment variable affects the execution of the allocation functions. |
35 | //! |
36 | //! For the documentation of the [`MALLCTL` namespace visit the jemalloc |
37 | //! documenation][jemalloc_mallctl]. |
38 | //! |
39 | //! [jemalloc_docs]: http://jemalloc.net/jemalloc.3.html |
40 | //! [jemalloc_wiki]: https://github.com/jemalloc/jemalloc/wiki |
41 | //! [jemalloc_mallctl]: http://jemalloc.net/jemalloc.3.html#mallctl_namespace |
42 | #![no_std ] |
43 | #![allow (non_snake_case, non_camel_case_types)] |
44 | #![cfg_attr ( |
45 | feature = "cargo-clippy" , |
46 | allow(clippy::cast_possible_truncation, clippy::cast_possible_wrap) |
47 | )] |
48 | // TODO: rename the following lint on next minor bump |
49 | #![allow (renamed_and_removed_lints)] |
50 | #![deny (missing_docs, broken_intra_doc_links)] |
51 | |
52 | use libc::{c_char, c_int, c_uint, c_void, size_t}; |
53 | |
54 | // jemalloc uses `stdbool.h` to define `bool` for which the Rust equivalent is `bool`. |
55 | // However jemalloc also has its own `stdbool.h` that it uses when compiling with MSVC, |
56 | // and this header defines `bool` as `BOOL` which in turn is `int`. |
57 | #[cfg (target_env = "msvc" )] |
58 | type c_bool = c_int; |
59 | #[cfg (not(target_env = "msvc" ))] |
60 | type c_bool = bool; |
61 | |
62 | /// Align the memory allocation to start at an address that is a |
63 | /// multiple of `1 << la`. |
64 | /// |
65 | /// # Safety |
66 | /// |
67 | /// It does not validate that `la` is within the valid range. |
68 | #[inline ] |
69 | pub const fn MALLOCX_LG_ALIGN(la: usize) -> c_int { |
70 | la as c_int |
71 | } |
72 | |
73 | /// Align the memory allocation to start at an address that is a multiple of `align`, |
74 | /// where a is a power of two. |
75 | /// |
76 | /// # Safety |
77 | /// |
78 | /// This macro does not validate that a is a power of 2. |
79 | #[inline ] |
80 | pub const fn MALLOCX_ALIGN(aling: usize) -> c_int { |
81 | aling.trailing_zeros() as c_int |
82 | } |
83 | |
84 | /// Initialize newly allocated memory to contain zero bytes. |
85 | /// |
86 | /// In the growing reallocation case, the real size prior to reallocation |
87 | /// defines the boundary between untouched bytes and those that are initialized |
88 | /// to contain zero bytes. |
89 | /// |
90 | /// If this option is not set, newly allocated memory is uninitialized. |
91 | pub const MALLOCX_ZERO: c_int = 0x40; |
92 | |
93 | /// Use the thread-specific cache (_tcache_) specified by the identifier `tc`. |
94 | /// |
95 | /// # Safety |
96 | /// |
97 | /// `tc` must have been acquired via the `tcache.create mallctl`. This function |
98 | /// does not validate that `tc` specifies a valid identifier. |
99 | #[inline ] |
100 | pub const fn MALLOCX_TCACHE(tc: usize) -> c_int { |
101 | tc.wrapping_add(2).wrapping_shl(8) as c_int |
102 | } |
103 | |
104 | /// Do not use a thread-specific cache (_tcache_). |
105 | /// |
106 | /// Unless `MALLOCX_TCACHE(tc)` or `MALLOCX_TCACHE_NONE` is specified, an |
107 | /// automatically managed _tcache_ will be used under many circumstances. |
108 | /// |
109 | /// # Safety |
110 | /// |
111 | /// This option cannot be used in the same `flags` argument as |
112 | /// `MALLOCX_TCACHE(tc)`. |
113 | // FIXME: This should just be a const. |
114 | pub const MALLOCX_TCACHE_NONE: c_int = MALLOCX_TCACHE((-1isize) as usize); |
115 | |
116 | /// Use the arena specified by the index `a`. |
117 | /// |
118 | /// This option has no effect for regions that were allocated via an arena other |
119 | /// than the one specified. |
120 | /// |
121 | /// # Safety |
122 | /// |
123 | /// This function does not validate that `a` specifies an arena index in the |
124 | /// valid range. |
125 | #[inline ] |
126 | pub const fn MALLOCX_ARENA(a: usize) -> c_int { |
127 | (a as c_int).wrapping_add(1).wrapping_shl(20) |
128 | } |
129 | |
130 | extern "C" { |
131 | /// Allocates `size` bytes of uninitialized memory. |
132 | /// |
133 | /// It returns a pointer to the start (lowest byte address) of the allocated |
134 | /// space. This pointer is suitably aligned so that it may be assigned to a |
135 | /// pointer to any type of object and then used to access such an object in |
136 | /// the space allocated until the space is explicitly deallocated. Each |
137 | /// yielded pointer points to an object disjoint from any other object. |
138 | /// |
139 | /// If the `size` of the space requested is zero, either a null pointer is |
140 | /// returned, or the behavior is as if the `size` were some nonzero value, |
141 | /// except that the returned pointer shall not be used to access an object. |
142 | /// |
143 | /// # Errors |
144 | /// |
145 | /// If the space cannot be allocated, a null pointer is returned and `errno` |
146 | /// is set to `ENOMEM`. |
147 | #[cfg_attr (prefixed, link_name = "_rjem_malloc" )] |
148 | pub fn malloc(size: size_t) -> *mut c_void; |
149 | /// Allocates zero-initialized space for an array of `number` objects, each |
150 | /// of whose size is `size`. |
151 | /// |
152 | /// The result is identical to calling [`malloc`] with an argument of |
153 | /// `number * size`, with the exception that the allocated memory is |
154 | /// explicitly initialized to _zero_ bytes. |
155 | /// |
156 | /// Note: zero-initialized memory need not be the same as the |
157 | /// representation of floating-point zero or a null pointer constant. |
158 | #[cfg_attr (prefixed, link_name = "_rjem_calloc" )] |
159 | pub fn calloc(number: size_t, size: size_t) -> *mut c_void; |
160 | |
161 | /// Allocates `size` bytes of memory at an address which is a multiple of |
162 | /// `alignment` and is placed in `*ptr`. |
163 | /// |
164 | /// If `size` is zero, then the value placed in `*ptr` is either null, or |
165 | /// the behavior is as if the `size` were some nonzero value, except that |
166 | /// the returned pointer shall not be used to access an object. |
167 | /// |
168 | /// # Errors |
169 | /// |
170 | /// On success, it returns zero. On error, the value of `errno` is _not_ set, |
171 | /// `*ptr` is not modified, and the return values can be: |
172 | /// |
173 | /// - `EINVAL`: the `alignment` argument was not a power-of-two or was not a multiple of |
174 | /// `mem::size_of::<*const c_void>()`. |
175 | /// - `ENOMEM`: there was insufficient memory to fulfill the allocation request. |
176 | /// |
177 | /// # Safety |
178 | /// |
179 | /// The behavior is _undefined_ if: |
180 | /// |
181 | /// * `ptr` is null. |
182 | #[cfg_attr (prefixed, link_name = "_rjem_posix_memalign" )] |
183 | pub fn posix_memalign(ptr: *mut *mut c_void, alignment: size_t, size: size_t) -> c_int; |
184 | |
185 | /// Allocates `size` bytes of memory at an address which is a multiple of |
186 | /// `alignment`. |
187 | /// |
188 | /// If the `size` of the space requested is zero, either a null pointer is |
189 | /// returned, or the behavior is as if the `size` were some nonzero value, |
190 | /// except that the returned pointer shall not be used to access an object. |
191 | /// |
192 | /// # Errors |
193 | /// |
194 | /// Returns null if the request fails. |
195 | /// |
196 | /// # Safety |
197 | /// |
198 | /// The behavior is _undefined_ if: |
199 | /// |
200 | /// * `alignment` is not a power-of-two |
201 | /// * `size` is not an integral multiple of `alignment` |
202 | #[cfg_attr (prefixed, link_name = "_rjem_aligned_alloc" )] |
203 | pub fn aligned_alloc(alignment: size_t, size: size_t) -> *mut c_void; |
204 | |
205 | /// Resizes the previously-allocated memory region referenced by `ptr` to |
206 | /// `size` bytes. |
207 | /// |
208 | /// Deallocates the old object pointed to by `ptr` and returns a pointer to |
209 | /// a new object that has the size specified by `size`. The contents of the |
210 | /// new object are the same as that of the old object prior to deallocation, |
211 | /// up to the lesser of the new and old sizes. |
212 | /// |
213 | /// The memory in the new object beyond the size of the old object is |
214 | /// uninitialized. |
215 | /// |
216 | /// The returned pointer to a new object may have the same value as a |
217 | /// pointer to the old object, but [`realloc`] may move the memory |
218 | /// allocation, resulting in a different return value than `ptr`. |
219 | /// |
220 | /// If `ptr` is null, [`realloc`] behaves identically to [`malloc`] for the |
221 | /// specified size. |
222 | /// |
223 | /// If the size of the space requested is zero, the behavior is |
224 | /// implementation-defined: either a null pointer is returned, or the |
225 | /// behavior is as if the size were some nonzero value, except that the |
226 | /// returned pointer shall not be used to access an object # Errors |
227 | /// |
228 | /// # Errors |
229 | /// |
230 | /// If memory for the new object cannot be allocated, the old object is not |
231 | /// deallocated, its value is unchanged, [`realloc`] returns null, and |
232 | /// `errno` is set to `ENOMEM`. |
233 | /// |
234 | /// # Safety |
235 | /// |
236 | /// The behavior is _undefined_ if: |
237 | /// |
238 | /// * `ptr` does not match a pointer previously returned by the memory |
239 | /// allocation functions of this crate, or |
240 | /// * the memory region referenced by `ptr` has been deallocated. |
241 | #[cfg_attr (prefixed, link_name = "_rjem_realloc" )] |
242 | pub fn realloc(ptr: *mut c_void, size: size_t) -> *mut c_void; |
243 | |
244 | /// Deallocates previously-allocated memory region referenced by `ptr`. |
245 | /// |
246 | /// This makes the space available for future allocations. |
247 | /// |
248 | /// If `ptr` is null, no action occurs. |
249 | /// |
250 | /// # Safety |
251 | /// |
252 | /// The behavior is _undefined_ if: |
253 | /// |
254 | /// * `ptr` does not match a pointer earlier returned by the memory |
255 | /// allocation functions of this crate, or |
256 | /// * the memory region referenced by `ptr` has been deallocated. |
257 | #[cfg_attr (prefixed, link_name = "_rjem_free" )] |
258 | pub fn free(ptr: *mut c_void); |
259 | |
260 | /// Allocates at least `size` bytes of memory according to `flags`. |
261 | /// |
262 | /// It returns a pointer to the start (lowest byte address) of the allocated |
263 | /// space. This pointer is suitably aligned so that it may be assigned to a |
264 | /// pointer to any type of object and then used to access such an object in |
265 | /// the space allocated until the space is explicitly deallocated. Each |
266 | /// yielded pointer points to an object disjoint from any other object. |
267 | /// |
268 | /// # Errors |
269 | /// |
270 | /// On success it returns a non-null pointer. A null pointer return value |
271 | /// indicates that insufficient contiguous memory was available to service |
272 | /// the allocation request. |
273 | /// |
274 | /// # Safety |
275 | /// |
276 | /// The behavior is _undefined_ if `size == 0`. |
277 | #[cfg_attr (prefixed, link_name = "_rjem_mallocx" )] |
278 | pub fn mallocx(size: size_t, flags: c_int) -> *mut c_void; |
279 | |
280 | /// Resizes the previously-allocated memory region referenced by `ptr` to be |
281 | /// at least `size` bytes. |
282 | /// |
283 | /// Deallocates the old object pointed to by `ptr` and returns a pointer to |
284 | /// a new object that has the size specified by `size`. The contents of the |
285 | /// new object are the same as that of the old object prior to deallocation, |
286 | /// up to the lesser of the new and old sizes. |
287 | /// |
288 | /// The the memory in the new object beyond the size of the old object is |
289 | /// obtained according to `flags` (it might be uninitialized). |
290 | /// |
291 | /// The returned pointer to a new object may have the same value as a |
292 | /// pointer to the old object, but [`rallocx`] may move the memory |
293 | /// allocation, resulting in a different return value than `ptr`. |
294 | /// |
295 | /// # Errors |
296 | /// |
297 | /// On success it returns a non-null pointer. A null pointer return value |
298 | /// indicates that insufficient contiguous memory was available to service |
299 | /// the allocation request. In this case, the old object is not |
300 | /// deallocated, and its value is unchanged. |
301 | /// |
302 | /// # Safety |
303 | /// |
304 | /// The behavior is _undefiend_ if: |
305 | /// |
306 | /// * `size == 0`, or |
307 | /// * `ptr` does not match a pointer earlier returned by |
308 | /// the memory allocation functions of this crate, or |
309 | /// * the memory region referenced by `ptr` has been deallocated. |
310 | #[cfg_attr (prefixed, link_name = "_rjem_rallocx" )] |
311 | pub fn rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void; |
312 | |
313 | /// Resizes the previously-allocated memory region referenced by `ptr` _in |
314 | /// place_ to be at least `size` bytes, returning the real size of the |
315 | /// allocation. |
316 | /// |
317 | /// Deallocates the old object pointed to by `ptr` and sets `ptr` to a new |
318 | /// object that has the size returned; the old a new objects share the same |
319 | /// base address. The contents of the new object are the same as that of the |
320 | /// old object prior to deallocation, up to the lesser of the new and old |
321 | /// sizes. |
322 | /// |
323 | /// If `extra` is non-zero, an attempt is made to resize the allocation to |
324 | /// be at least `size + extra` bytes. Inability to allocate the `extra` |
325 | /// bytes will not by itself result in failure to resize. |
326 | /// |
327 | /// The memory in the new object beyond the size of the old object is |
328 | /// obtained according to `flags` (it might be uninitialized). |
329 | /// |
330 | /// # Errors |
331 | /// |
332 | /// If the allocation cannot be adequately grown in place up to `size`, the |
333 | /// size returned is smaller than `size`. |
334 | /// |
335 | /// Note: |
336 | /// |
337 | /// * the size value returned can be larger than the size requested during |
338 | /// allocation |
339 | /// * when shrinking an allocation, use the size returned to determine |
340 | /// whether the allocation was shrunk sufficiently or not. |
341 | /// |
342 | /// # Safety |
343 | /// |
344 | /// The behavior is _undefined_ if: |
345 | /// |
346 | /// * `size == 0`, or |
347 | /// * `size + extra > size_t::max_value()`, or |
348 | /// * `ptr` does not match a pointer earlier returned by the memory |
349 | /// allocation functions of this crate, or |
350 | /// * the memory region referenced by `ptr` has been deallocated. |
351 | #[cfg_attr (prefixed, link_name = "_rjem_xallocx" )] |
352 | pub fn xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t; |
353 | |
354 | /// Returns the real size of the previously-allocated memory region |
355 | /// referenced by `ptr`. |
356 | /// |
357 | /// The value may be larger than the size requested on allocation. |
358 | /// |
359 | /// # Safety |
360 | /// |
361 | /// The behavior is _undefined_ if: |
362 | /// |
363 | /// * `ptr` does not match a pointer earlier returned by the memory |
364 | /// allocation functions of this crate, or |
365 | /// * the memory region referenced by `ptr` has been deallocated. |
366 | #[cfg_attr (prefixed, link_name = "_rjem_sallocx" )] |
367 | pub fn sallocx(ptr: *const c_void, flags: c_int) -> size_t; |
368 | |
369 | /// Deallocates previously-allocated memory region referenced by `ptr`. |
370 | /// |
371 | /// This makes the space available for future allocations. |
372 | /// |
373 | /// # Safety |
374 | /// |
375 | /// The behavior is _undefined_ if: |
376 | /// |
377 | /// * `ptr` does not match a pointer earlier returned by the memory |
378 | /// allocation functions of this crate, or |
379 | /// * `ptr` is null, or |
380 | /// * the memory region referenced by `ptr` has been deallocated. |
381 | #[cfg_attr (prefixed, link_name = "_rjem_dallocx" )] |
382 | pub fn dallocx(ptr: *mut c_void, flags: c_int); |
383 | |
384 | /// Deallocates previously-allocated memory region referenced by `ptr` with |
385 | /// `size` hint. |
386 | /// |
387 | /// This makes the space available for future allocations. |
388 | /// |
389 | /// # Safety |
390 | /// |
391 | /// The behavior is _undefined_ if: |
392 | /// |
393 | /// * `size` is not in range `[req_size, alloc_size]`, where `req_size` is |
394 | /// the size requested when performing the allocation, and `alloc_size` is |
395 | /// the allocation size returned by [`nallocx`], [`sallocx`], or |
396 | /// [`xallocx`], |
397 | /// * `ptr` does not match a pointer earlier returned by the memory |
398 | /// allocation functions of this crate, or |
399 | /// * `ptr` is null, or |
400 | /// * the memory region referenced by `ptr` has been deallocated. |
401 | #[cfg_attr (prefixed, link_name = "_rjem_sdallocx" )] |
402 | pub fn sdallocx(ptr: *mut c_void, size: size_t, flags: c_int); |
403 | |
404 | /// Returns the real size of the allocation that would result from a |
405 | /// [`mallocx`] function call with the same arguments. |
406 | /// |
407 | /// # Errors |
408 | /// |
409 | /// If the inputs exceed the maximum supported size class and/or alignment |
410 | /// it returns zero. |
411 | /// |
412 | /// # Safety |
413 | /// |
414 | /// The behavior is _undefined_ if `size == 0`. |
415 | #[cfg_attr (prefixed, link_name = "_rjem_nallocx" )] |
416 | pub fn nallocx(size: size_t, flags: c_int) -> size_t; |
417 | |
418 | /// Returns the real size of the previously-allocated memory region |
419 | /// referenced by `ptr`. |
420 | /// |
421 | /// The value may be larger than the size requested on allocation. |
422 | /// |
423 | /// Although the excess bytes can be overwritten by the application without |
424 | /// ill effects, this is not good programming practice: the number of excess |
425 | /// bytes in an allocation depends on the underlying implementation. |
426 | /// |
427 | /// The main use of this function is for debugging and introspection. |
428 | /// |
429 | /// # Errors |
430 | /// |
431 | /// If `ptr` is null, 0 is returned. |
432 | /// |
433 | /// # Safety |
434 | /// |
435 | /// The behavior is _undefined_ if: |
436 | /// |
437 | /// * `ptr` does not match a pointer earlier returned by the memory |
438 | /// allocation functions of this crate, or |
439 | /// * the memory region referenced by `ptr` has been deallocated. |
440 | #[cfg_attr (prefixed, link_name = "_rjem_malloc_usable_size" )] |
441 | pub fn malloc_usable_size(ptr: *const c_void) -> size_t; |
442 | |
443 | /// General interface for introspecting the memory allocator, as well as |
444 | /// setting modifiable parameters and triggering actions. |
445 | /// |
446 | /// The period-separated name argument specifies a location in a |
447 | /// tree-structured namespace ([see jemalloc's `MALLCTL` |
448 | /// documentation][jemalloc_mallctl]). |
449 | /// |
450 | /// To read a value, pass a pointer via `oldp` to adequate space to contain |
451 | /// the value, and a pointer to its length via `oldlenp``; otherwise pass |
452 | /// null and null. Similarly, to write a value, pass a pointer to the value |
453 | /// via `newp`, and its length via `newlen`; otherwise pass null and 0. |
454 | /// |
455 | /// # Errors |
456 | /// |
457 | /// Returns `0` on success, otherwise returns: |
458 | /// |
459 | /// * `EINVAL`: if `newp` is not null, and `newlen` is too large or too |
460 | /// small. Alternatively, `*oldlenp` is too large or too small; in this case |
461 | /// as much data as possible are read despite the error. |
462 | /// |
463 | /// * `ENOENT`: `name` or mib specifies an unknown/invalid value. |
464 | /// |
465 | /// * `EPERM`: Attempt to read or write void value, or attempt to write read-only value. |
466 | /// |
467 | /// * `EAGAIN`: A memory allocation failure occurred. |
468 | /// |
469 | /// * `EFAULT`: An interface with side effects failed in some way not |
470 | /// directly related to `mallctl` read/write processing. |
471 | /// |
472 | /// [jemalloc_mallctl]: http://jemalloc.net/jemalloc.3.html#mallctl_namespace |
473 | #[cfg_attr (prefixed, link_name = "_rjem_mallctl" )] |
474 | pub fn mallctl( |
475 | name: *const c_char, |
476 | oldp: *mut c_void, |
477 | oldlenp: *mut size_t, |
478 | newp: *mut c_void, |
479 | newlen: size_t, |
480 | ) -> c_int; |
481 | /// Translates a name to a “Management Information Base” (MIB) that can be |
482 | /// passed repeatedly to [`mallctlbymib`]. |
483 | /// |
484 | /// This avoids repeated name lookups for applications that repeatedly query |
485 | /// the same portion of the namespace. |
486 | /// |
487 | /// On success, `mibp` contains an array of `*miblenp` integers, where |
488 | /// `*miblenp` is the lesser of the number of components in name and the |
489 | /// input value of `*miblenp`. Thus it is possible to pass a `*miblenp` that is |
490 | /// smaller than the number of period-separated name components, which |
491 | /// results in a partial MIB that can be used as the basis for constructing |
492 | /// a complete MIB. For name components that are integers (e.g. the 2 in |
493 | /// arenas.bin.2.size), the corresponding MIB component will always be that |
494 | /// integer. |
495 | #[cfg_attr (prefixed, link_name = "_rjem_mallctlnametomib" )] |
496 | pub fn mallctlnametomib(name: *const c_char, mibp: *mut size_t, miblenp: *mut size_t) -> c_int; |
497 | |
498 | /// Like [`mallctl`] but taking a `mib` as input instead of a name. |
499 | #[cfg_attr (prefixed, link_name = "_rjem_mallctlbymib" )] |
500 | pub fn mallctlbymib( |
501 | mib: *const size_t, |
502 | miblen: size_t, |
503 | oldp: *mut c_void, |
504 | oldpenp: *mut size_t, |
505 | newp: *mut c_void, |
506 | newlen: size_t, |
507 | ) -> c_int; |
508 | |
509 | /// Writes summary statistics via the `write_cb` callback function pointer |
510 | /// and `cbopaque` data passed to `write_cb`, or [`malloc_message`] if `write_cb` |
511 | /// is null. |
512 | /// |
513 | /// The statistics are presented in human-readable form unless “J” |
514 | /// is specified as a character within the opts string, in which case the |
515 | /// statistics are presented in JSON format. |
516 | /// |
517 | /// This function can be called repeatedly. |
518 | /// |
519 | /// General information that never changes during execution can be omitted |
520 | /// by specifying `g` as a character within the opts string. |
521 | /// |
522 | /// Note that [`malloc_message`] uses the `mallctl*` functions internally, |
523 | /// so inconsistent statistics can be reported if multiple threads use these |
524 | /// functions simultaneously. |
525 | /// |
526 | /// If the Cargo feature `stats` is enabled, `m`, `d`, and `a` can be |
527 | /// specified to omit merged arena, destroyed merged arena, and per arena |
528 | /// statistics, respectively; `b` and `l` can be specified to omit per size |
529 | /// class statistics for bins and large objects, respectively; `x` can be |
530 | /// specified to omit all mutex statistics. Unrecognized characters are |
531 | /// silently ignored. |
532 | /// |
533 | /// Note that thread caching may prevent some statistics from being |
534 | /// completely up to date, since extra locking would be required to merge |
535 | /// counters that track thread cache operations. |
536 | #[cfg_attr (prefixed, link_name = "_rjem_malloc_stats_print" )] |
537 | pub fn malloc_stats_print( |
538 | write_cb: Option<unsafe extern "C" fn(*mut c_void, *const c_char)>, |
539 | cbopaque: *mut c_void, |
540 | opts: *const c_char, |
541 | ); |
542 | |
543 | /// Allows overriding the function which emits the text strings forming the |
544 | /// errors and warnings if for some reason the `STDERR_FILENO` file descriptor |
545 | /// is not suitable for this. |
546 | /// |
547 | /// [`malloc_message`] takes the `cbopaque` pointer argument that is null, |
548 | /// unless overridden by the arguments in a call to [`malloc_stats_print`], |
549 | /// followed by a string pointer. |
550 | /// |
551 | /// Please note that doing anything which tries to allocate memory in this |
552 | /// function is likely to result in a crash or deadlock. |
553 | #[cfg_attr (prefixed, link_name = "_rjem_malloc_message" )] |
554 | pub static mut malloc_message: |
555 | Option<unsafe extern "C" fn(cbopaque: *mut c_void, s: *const c_char)>; |
556 | |
557 | /// Compile-time string of configuration options. |
558 | /// |
559 | /// Once, when the first call is made to one of the memory allocation |
560 | /// routines, the allocator initializes its internals based in part on |
561 | /// various options that can be specified at compile- or run-time. |
562 | /// |
563 | /// The string specified via `--with-malloc-conf`, the string pointed to by |
564 | /// the global variable `malloc_conf`, the “name” of the file referenced by |
565 | /// the symbolic link named `/etc/malloc.conf`, and the value of the |
566 | /// environment variable `MALLOC_CONF`, will be interpreted, in that order, |
567 | /// from left to right as options. Note that `malloc_conf` may be read |
568 | /// before `main()` is entered, so the declaration of `malloc_conf` should |
569 | /// specify an initializer that contains the final value to be read by |
570 | /// `jemalloc`. |
571 | /// |
572 | /// `--with-malloc-conf` and `malloc_conf` are compile-time mechanisms, whereas |
573 | /// `/etc/malloc.conf` and `MALLOC_CONF` can be safely set any time prior to |
574 | /// program invocation. |
575 | /// |
576 | /// An options string is a comma-separated list of `option:value` pairs. |
577 | /// There is one key corresponding to each `opt.* mallctl` (see the `MALLCTL |
578 | /// NAMESPACE` section for options documentation). For example, |
579 | /// `abort:true,narenas:1` sets the `opt.abort` and `opt.narenas` options. |
580 | /// Some options have boolean values (`true`/`false`), others have integer |
581 | /// values (base `8`, `10`, or `16`, depending on prefix), and yet others |
582 | /// have raw string values. |
583 | #[cfg_attr (prefixed, link_name = "_rjem_malloc_conf" )] |
584 | pub static malloc_conf: Option<&'static c_char>; |
585 | } |
586 | |
587 | /// Extent lifetime management functions. |
588 | pub type extent_hooks_t = extent_hooks_s; |
589 | |
590 | // note: there are two structs here, one is used when compiling the crate normally, |
591 | // and the other one is behind the `--cfg jemallocator_docs` flag and used only |
592 | // when generating docs. |
593 | // |
594 | // For the docs we want to use type aliases here, but `ctest` does see through |
595 | // them when generating the code to verify the FFI bindings, and it needs to |
596 | // be able to tell that these are `fn` types so that `Option<fn>` gets lowered |
597 | // to C function pointers. |
598 | |
599 | #[repr (C)] |
600 | #[cfg (not(jemallocator_docs))] |
601 | #[derive (Copy, Clone, Default)] |
602 | #[doc (hidden)] |
603 | #[allow (missing_docs)] |
604 | pub struct extent_hooks_s { |
605 | pub alloc: Option< |
606 | unsafe extern "C" fn( |
607 | *mut extent_hooks_t, |
608 | *mut c_void, |
609 | size_t, |
610 | size_t, |
611 | *mut c_bool, |
612 | *mut c_bool, |
613 | c_uint, |
614 | ) -> *mut c_void, |
615 | >, |
616 | pub dalloc: Option< |
617 | unsafe extern "C" fn(*mut extent_hooks_t, *mut c_void, size_t, c_bool, c_uint) -> c_bool, |
618 | >, |
619 | pub destroy: |
620 | Option<unsafe extern "C" fn(*mut extent_hooks_t, *mut c_void, size_t, c_bool, c_uint)>, |
621 | pub commit: Option< |
622 | unsafe extern "C" fn( |
623 | *mut extent_hooks_t, |
624 | *mut c_void, |
625 | size_t, |
626 | size_t, |
627 | size_t, |
628 | c_uint, |
629 | ) -> c_bool, |
630 | >, |
631 | pub decommit: Option< |
632 | unsafe extern "C" fn( |
633 | *mut extent_hooks_t, |
634 | *mut c_void, |
635 | size_t, |
636 | size_t, |
637 | size_t, |
638 | c_uint, |
639 | ) -> c_bool, |
640 | >, |
641 | pub purge_lazy: Option< |
642 | unsafe extern "C" fn( |
643 | *mut extent_hooks_t, |
644 | *mut c_void, |
645 | size_t, |
646 | size_t, |
647 | size_t, |
648 | c_uint, |
649 | ) -> c_bool, |
650 | >, |
651 | pub purge_forced: Option< |
652 | unsafe extern "C" fn( |
653 | *mut extent_hooks_t, |
654 | *mut c_void, |
655 | size_t, |
656 | size_t, |
657 | size_t, |
658 | c_uint, |
659 | ) -> c_bool, |
660 | >, |
661 | pub split: Option< |
662 | unsafe extern "C" fn( |
663 | *mut extent_hooks_t, |
664 | *mut c_void, |
665 | size_t, |
666 | size_t, |
667 | size_t, |
668 | c_bool, |
669 | c_uint, |
670 | ) -> c_bool, |
671 | >, |
672 | pub merge: Option< |
673 | unsafe extern "C" fn( |
674 | *mut extent_hooks_t, |
675 | *mut c_void, |
676 | size_t, |
677 | *mut c_void, |
678 | size_t, |
679 | c_bool, |
680 | c_uint, |
681 | ) -> c_bool, |
682 | >, |
683 | } |
684 | |
685 | /// Extent lifetime management functions. |
686 | /// |
687 | /// The extent_hooks_t structure comprises function pointers which are described |
688 | /// individually below. `jemalloc` uses these functions to manage extent lifetime, |
689 | /// which starts off with allocation of mapped committed memory, in the simplest |
690 | /// case followed by deallocation. However, there are performance and platform |
691 | /// reasons to retain extents for later reuse. Cleanup attempts cascade from |
692 | /// deallocation to decommit to forced purging to lazy purging, which gives the |
693 | /// extent management functions opportunities to reject the most permanent |
694 | /// cleanup operations in favor of less permanent (and often less costly) |
695 | /// operations. All operations except allocation can be universally opted out of |
696 | /// by setting the hook pointers to `NULL`, or selectively opted out of by |
697 | /// returning failure. Note that once the extent hook is set, the structure is |
698 | /// accessed directly by the associated arenas, so it must remain valid for the |
699 | /// entire lifetime of the arenas. |
700 | #[repr (C)] |
701 | #[cfg (jemallocator_docs)] |
702 | #[derive (Copy, Clone, Default)] |
703 | pub struct extent_hooks_s { |
704 | #[allow (missing_docs)] |
705 | pub alloc: Option<extent_alloc_t>, |
706 | #[allow (missing_docs)] |
707 | pub dalloc: Option<extent_dalloc_t>, |
708 | #[allow (missing_docs)] |
709 | pub destroy: Option<extent_destroy_t>, |
710 | #[allow (missing_docs)] |
711 | pub commit: Option<extent_commit_t>, |
712 | #[allow (missing_docs)] |
713 | pub decommit: Option<extent_decommit_t>, |
714 | #[allow (missing_docs)] |
715 | pub purge_lazy: Option<extent_purge_t>, |
716 | #[allow (missing_docs)] |
717 | pub purge_forced: Option<extent_purge_t>, |
718 | #[allow (missing_docs)] |
719 | pub split: Option<extent_split_t>, |
720 | #[allow (missing_docs)] |
721 | pub merge: Option<extent_merge_t>, |
722 | } |
723 | |
724 | /// Extent allocation function. |
725 | /// |
726 | /// On success returns a pointer to `size` bytes of mapped memory on behalf of |
727 | /// arena `arena_ind` such that the extent's base address is a multiple of |
728 | /// `alignment`, as well as setting `*zero` to indicate whether the extent is |
729 | /// zeroed and `*commit` to indicate whether the extent is committed. |
730 | /// |
731 | /// Zeroing is mandatory if `*zero` is `true` upon function entry. Committing is mandatory if |
732 | /// `*commit` is true upon function entry. If `new_addr` is not null, the returned |
733 | /// pointer must be `new_addr` on success or null on error. |
734 | /// |
735 | /// Committed memory may be committed in absolute terms as on a system that does |
736 | /// not overcommit, or in implicit terms as on a system that overcommits and |
737 | /// satisfies physical memory needs on demand via soft page faults. Note that |
738 | /// replacing the default extent allocation function makes the arena's |
739 | /// `arena.<i>.dss` setting irrelevant. |
740 | /// |
741 | /// # Errors |
742 | /// |
743 | /// On error the function returns null and leaves `*zero` and `*commit` unmodified. |
744 | /// |
745 | /// # Safety |
746 | /// |
747 | /// The behavior is _undefined_ if: |
748 | /// |
749 | /// * the `size` parameter is not a multiple of the page size |
750 | /// * the `alignment` parameter is not a power of two at least as large as the page size |
751 | pub type extent_alloc_t = unsafe extern "C" fn( |
752 | extent_hooks: *mut extent_hooks_t, |
753 | new_addr: *mut c_void, |
754 | size: size_t, |
755 | alignment: size_t, |
756 | zero: *mut c_bool, |
757 | commit: *mut c_bool, |
758 | arena_ind: c_uint, |
759 | ) -> *mut c_void; |
760 | |
761 | /// Extent deallocation function. |
762 | /// |
763 | /// Deallocates an extent at given `addr` and `size` with `committed`/decommited |
764 | /// memory as indicated, on behalf of arena `arena_ind`, returning `false` upon |
765 | /// success. |
766 | /// |
767 | /// If the function returns `true`, this indicates opt-out from deallocation; |
768 | /// the virtual memory mapping associated with the extent remains mapped, in the |
769 | /// same commit state, and available for future use, in which case it will be |
770 | /// automatically retained for later reuse. |
771 | pub type extent_dalloc_t = unsafe extern "C" fn( |
772 | extent_hooks: *mut extent_hooks_t, |
773 | addr: *mut c_void, |
774 | size: size_t, |
775 | committed: c_bool, |
776 | arena_ind: c_uint, |
777 | ) -> c_bool; |
778 | |
779 | /// Extent destruction function. |
780 | /// |
781 | /// Unconditionally destroys an extent at given `addr` and `size` with |
782 | /// `committed`/decommited memory as indicated, on behalf of arena `arena_ind`. |
783 | /// |
784 | /// This function may be called to destroy retained extents during arena |
785 | /// destruction (see `arena.<i>.destroy`). |
786 | pub type extent_destroy_t = unsafe extern "C" fn( |
787 | extent_hooks: *mut extent_hooks_t, |
788 | addr: *mut c_void, |
789 | size: size_t, |
790 | committed: c_bool, |
791 | arena_ind: c_uint, |
792 | ); |
793 | |
794 | /// Extent commit function. |
795 | /// |
796 | /// Commits zeroed physical memory to back pages within an extent at given |
797 | /// `addr` and `size` at `offset` bytes, extending for `length` on behalf of |
798 | /// arena `arena_ind`, returning `false` upon success. |
799 | /// |
800 | /// Committed memory may be committed in absolute terms as on a system that does |
801 | /// not overcommit, or in implicit terms as on a system that overcommits and |
802 | /// satisfies physical memory needs on demand via soft page faults. If the |
803 | /// function returns `true`, this indicates insufficient physical memory to |
804 | /// satisfy the request. |
805 | pub type extent_commit_t = unsafe extern "C" fn( |
806 | extent_hooks: *mut extent_hooks_t, |
807 | addr: *mut c_void, |
808 | size: size_t, |
809 | offset: size_t, |
810 | length: size_t, |
811 | arena_ind: c_uint, |
812 | ) -> c_bool; |
813 | |
814 | /// Extent decommit function. |
815 | /// |
816 | /// Decommits any physical memory that is backing pages within an extent at |
817 | /// given `addr` and `size` at `offset` bytes, extending for `length` on behalf of arena |
818 | /// `arena_ind`, returning `false` upon success, in which case the pages will be |
819 | /// committed via the extent commit function before being reused. |
820 | /// |
821 | /// If the function returns `true`, this indicates opt-out from decommit; the |
822 | /// memory remains committed and available for future use, in which case it will |
823 | /// be automatically retained for later reuse. |
824 | pub type extent_decommit_t = unsafe extern "C" fn( |
825 | extent_hooks: *mut extent_hooks_t, |
826 | addr: *mut c_void, |
827 | size: size_t, |
828 | offset: size_t, |
829 | length: size_t, |
830 | arena_ind: c_uint, |
831 | ) -> c_bool; |
832 | |
833 | /// Extent purge function. |
834 | /// |
835 | /// Discards physical pages within the virtual memory mapping associated with an |
836 | /// extent at given `addr` and `size` at `offset` bytes, extending for `length` on |
837 | /// behalf of arena `arena_ind`. |
838 | /// |
839 | /// A lazy extent purge function (e.g. implemented via `madvise(...MADV_FREE)`) |
840 | /// can delay purging indefinitely and leave the pages within the purged virtual |
841 | /// memory range in an indeterminite state, whereas a forced extent purge |
842 | /// function immediately purges, and the pages within the virtual memory range |
843 | /// will be zero-filled the next time they are accessed. If the function returns |
844 | /// `true`, this indicates failure to purge. |
845 | pub type extent_purge_t = unsafe extern "C" fn( |
846 | extent_hooks: *mut extent_hooks_t, |
847 | addr: *mut c_void, |
848 | size: size_t, |
849 | offset: size_t, |
850 | length: size_t, |
851 | arena_ind: c_uint, |
852 | ) -> c_bool; |
853 | |
854 | /// Extent split function. |
855 | /// |
856 | /// Optionally splits an extent at given `addr` and `size` into two adjacent |
857 | /// extents, the first of `size_a` bytes, and the second of `size_b` bytes, |
858 | /// operating on `committed`/decommitted memory as indicated, on behalf of arena |
859 | /// `arena_ind`, returning `false` upon success. |
860 | /// |
861 | /// If the function returns `true`, this indicates that the extent remains |
862 | /// unsplit and therefore should continue to be operated on as a whole. |
863 | pub type extent_split_t = unsafe extern "C" fn( |
864 | extent_hooks: *mut extent_hooks_t, |
865 | addr: *mut c_void, |
866 | size: size_t, |
867 | size_a: size_t, |
868 | size_b: size_t, |
869 | committed: c_bool, |
870 | arena_ind: c_uint, |
871 | ) -> c_bool; |
872 | |
873 | /// Extent merge function. |
874 | /// |
875 | /// Optionally merges adjacent extents, at given `addr_a` and `size_a` with given |
876 | /// `addr_b` and `size_b` into one contiguous extent, operating on |
877 | /// `committed`/decommitted memory as indicated, on behalf of arena `arena_ind`, |
878 | /// returning `false` upon success. |
879 | /// |
880 | /// If the function returns `true`, this indicates that the extents remain |
881 | /// distinct mappings and therefore should continue to be operated on |
882 | /// independently. |
883 | pub type extent_merge_t = unsafe extern "C" fn( |
884 | extent_hooks: *mut extent_hooks_t, |
885 | addr_a: *mut c_void, |
886 | size_a: size_t, |
887 | addr_b: *mut c_void, |
888 | size_b: size_t, |
889 | committed: c_bool, |
890 | arena_ind: c_uint, |
891 | ) -> c_bool; |
892 | |
893 | // These symbols are used by jemalloc on android but the really old android |
894 | // we're building on doesn't have them defined, so just make sure the symbols |
895 | // are available. |
896 | #[no_mangle ] |
897 | #[cfg (target_os = "android" )] |
898 | #[doc (hidden)] |
899 | pub extern "C" fn pthread_atfork( |
900 | _prefork: *mut u8, |
901 | _postfork_parent: *mut u8, |
902 | _postfork_child: *mut u8, |
903 | ) -> i32 { |
904 | 0 |
905 | } |
906 | |
907 | #[allow (missing_docs)] |
908 | mod env; |
909 | |
910 | pub use env::*; |
911 | |