1 | //! Rust bindings to the `jemalloc` C library. |
2 | //! |
3 | //! `jemalloc` is a general purpose memory allocation, its documentation |
4 | //! can be found here: |
5 | //! |
6 | //! * [API documentation][jemalloc_docs] |
7 | //! * [Wiki][jemalloc_wiki] (design documents, presentations, profiling, debugging, tuning, ...) |
8 | //! |
9 | //! `jemalloc` exposes both a standard and a non-standard API. |
10 | //! |
11 | //! # Standard API |
12 | //! |
13 | //! The standard API includes: the [`malloc`], [`calloc`], [`realloc`], and |
14 | //! [`free`], which conform to to ISO/IEC 9899:1990 (“ISO C90”), |
15 | //! [`posix_memalign`] which conforms to conforms to POSIX.1-2016, and |
16 | //! [`aligned_alloc`]. |
17 | //! |
18 | //! Note that these standard leave some details as _implementation defined_. |
19 | //! This docs document this behavior for `jemalloc`, but keep in mind that other |
20 | //! standard-conforming implementations of these functions in other allocators |
21 | //! might behave slightly different. |
22 | //! |
23 | //! # Non-Standard API |
24 | //! |
25 | //! The non-standard API includes: [`mallocx`], [`rallocx`], [`xallocx`], |
26 | //! [`sallocx`], [`dallocx`], [`sdallocx`], and [`nallocx`]. These functions all |
27 | //! have a `flags` argument that can be used to specify options. Use bitwise or |
28 | //! `|` to specify one or more of the following: [`MALLOCX_LG_ALIGN`], |
29 | //! [`MALLOCX_ALIGN`], [`MALLOCX_ZERO`], [`MALLOCX_TCACHE`], |
30 | //! [`MALLOCX_TCACHE_NONE`], and [`MALLOCX_ARENA`]. |
31 | //! |
32 | //! # Environment variables |
33 | //! |
34 | //! The `MALLOC_CONF` environment variable affects the execution of the allocation functions. |
35 | //! |
36 | //! For the documentation of the [`MALLCTL` namespace visit the jemalloc |
37 | //! documenation][jemalloc_mallctl]. |
38 | //! |
39 | //! [jemalloc_docs]: http://jemalloc.net/jemalloc.3.html |
40 | //! [jemalloc_wiki]: https://github.com/jemalloc/jemalloc/wiki |
41 | //! [jemalloc_mallctl]: http://jemalloc.net/jemalloc.3.html#mallctl_namespace |
42 | #![no_std ] |
43 | #![allow (non_snake_case, non_camel_case_types)] |
44 | // TODO: rename the following lint on next minor bump |
45 | #![allow (renamed_and_removed_lints)] |
46 | #![deny (missing_docs, broken_intra_doc_links)] |
47 | |
48 | use libc::{c_char, c_int, c_uint, c_void, size_t}; |
49 | |
50 | // jemalloc uses `stdbool.h` to define `bool` for which the Rust equivalent is `bool`. |
51 | // However jemalloc also has its own `stdbool.h` that it uses when compiling with MSVC, |
52 | // and this header defines `bool` as `BOOL` which in turn is `int`. |
53 | #[cfg (target_env = "msvc" )] |
54 | type c_bool = c_int; |
55 | #[cfg (not(target_env = "msvc" ))] |
56 | type c_bool = bool; |
57 | |
58 | /// Align the memory allocation to start at an address that is a |
59 | /// multiple of `1 << la`. |
60 | /// |
61 | /// # Safety |
62 | /// |
63 | /// It does not validate that `la` is within the valid range. |
64 | #[inline ] |
65 | pub const fn MALLOCX_LG_ALIGN(la: usize) -> c_int { |
66 | la as c_int |
67 | } |
68 | |
69 | /// Align the memory allocation to start at an address that is a multiple of `align`, |
70 | /// where a is a power of two. |
71 | /// |
72 | /// # Safety |
73 | /// |
74 | /// This macro does not validate that a is a power of 2. |
75 | #[inline ] |
76 | pub const fn MALLOCX_ALIGN(aling: usize) -> c_int { |
77 | aling.trailing_zeros() as c_int |
78 | } |
79 | |
80 | /// Initialize newly allocated memory to contain zero bytes. |
81 | /// |
82 | /// In the growing reallocation case, the real size prior to reallocation |
83 | /// defines the boundary between untouched bytes and those that are initialized |
84 | /// to contain zero bytes. |
85 | /// |
86 | /// If this option is not set, newly allocated memory is uninitialized. |
87 | pub const MALLOCX_ZERO: c_int = 0x40; |
88 | |
89 | /// Use the thread-specific cache (_tcache_) specified by the identifier `tc`. |
90 | /// |
91 | /// # Safety |
92 | /// |
93 | /// `tc` must have been acquired via the `tcache.create mallctl`. This function |
94 | /// does not validate that `tc` specifies a valid identifier. |
95 | #[inline ] |
96 | pub const fn MALLOCX_TCACHE(tc: usize) -> c_int { |
97 | tc.wrapping_add(2).wrapping_shl(8) as c_int |
98 | } |
99 | |
100 | /// Do not use a thread-specific cache (_tcache_). |
101 | /// |
102 | /// Unless `MALLOCX_TCACHE(tc)` or `MALLOCX_TCACHE_NONE` is specified, an |
103 | /// automatically managed _tcache_ will be used under many circumstances. |
104 | /// |
105 | /// # Safety |
106 | /// |
107 | /// This option cannot be used in the same `flags` argument as |
108 | /// `MALLOCX_TCACHE(tc)`. |
109 | // FIXME: This should just be a const. |
110 | pub const MALLOCX_TCACHE_NONE: c_int = MALLOCX_TCACHE((-1isize) as usize); |
111 | |
112 | /// Use the arena specified by the index `a`. |
113 | /// |
114 | /// This option has no effect for regions that were allocated via an arena other |
115 | /// than the one specified. |
116 | /// |
117 | /// # Safety |
118 | /// |
119 | /// This function does not validate that `a` specifies an arena index in the |
120 | /// valid range. |
121 | #[inline ] |
122 | pub const fn MALLOCX_ARENA(a: usize) -> c_int { |
123 | (a as c_int).wrapping_add(1).wrapping_shl(20) |
124 | } |
125 | |
126 | unsafeextern "C" { |
127 | /// Allocates `size` bytes of uninitialized memory. |
128 | /// |
129 | /// It returns a pointer to the start (lowest byte address) of the allocated |
130 | /// space. This pointer is suitably aligned so that it may be assigned to a |
131 | /// pointer to any type of object and then used to access such an object in |
132 | /// the space allocated until the space is explicitly deallocated. Each |
133 | /// yielded pointer points to an object disjoint from any other object. |
134 | /// |
135 | /// If the `size` of the space requested is zero, either a null pointer is |
136 | /// returned, or the behavior is as if the `size` were some nonzero value, |
137 | /// except that the returned pointer shall not be used to access an object. |
138 | /// |
139 | /// # Errors |
140 | /// |
141 | /// If the space cannot be allocated, a null pointer is returned and `errno` |
142 | /// is set to `ENOMEM`. |
143 | #[cfg_attr (prefixed, link_name = "_rjem_malloc" )] |
144 | pub unsafefn malloc(size: size_t) -> *mut c_void; |
145 | /// Allocates zero-initialized space for an array of `number` objects, each |
146 | /// of whose size is `size`. |
147 | /// |
148 | /// The result is identical to calling [`malloc`] with an argument of |
149 | /// `number * size`, with the exception that the allocated memory is |
150 | /// explicitly initialized to _zero_ bytes. |
151 | /// |
152 | /// Note: zero-initialized memory need not be the same as the |
153 | /// representation of floating-point zero or a null pointer constant. |
154 | #[cfg_attr (prefixed, link_name = "_rjem_calloc" )] |
155 | pub unsafefn calloc(number: size_t, size: size_t) -> *mut c_void; |
156 | |
157 | /// Allocates `size` bytes of memory at an address which is a multiple of |
158 | /// `alignment` and is placed in `*ptr`. |
159 | /// |
160 | /// If `size` is zero, then the value placed in `*ptr` is either null, or |
161 | /// the behavior is as if the `size` were some nonzero value, except that |
162 | /// the returned pointer shall not be used to access an object. |
163 | /// |
164 | /// # Errors |
165 | /// |
166 | /// On success, it returns zero. On error, the value of `errno` is _not_ set, |
167 | /// `*ptr` is not modified, and the return values can be: |
168 | /// |
169 | /// - `EINVAL`: the `alignment` argument was not a power-of-two or was not a multiple of |
170 | /// `mem::size_of::<*const c_void>()`. |
171 | /// - `ENOMEM`: there was insufficient memory to fulfill the allocation request. |
172 | /// |
173 | /// # Safety |
174 | /// |
175 | /// The behavior is _undefined_ if: |
176 | /// |
177 | /// * `ptr` is null. |
178 | #[cfg_attr (prefixed, link_name = "_rjem_posix_memalign" )] |
179 | pub unsafefn posix_memalign(ptr: *mut *mut c_void, alignment: size_t, size: size_t) -> c_int; |
180 | |
181 | /// Allocates `size` bytes of memory at an address which is a multiple of |
182 | /// `alignment`. |
183 | /// |
184 | /// If the `size` of the space requested is zero, either a null pointer is |
185 | /// returned, or the behavior is as if the `size` were some nonzero value, |
186 | /// except that the returned pointer shall not be used to access an object. |
187 | /// |
188 | /// # Errors |
189 | /// |
190 | /// Returns null if the request fails. |
191 | /// |
192 | /// # Safety |
193 | /// |
194 | /// The behavior is _undefined_ if: |
195 | /// |
196 | /// * `alignment` is not a power-of-two |
197 | /// * `size` is not an integral multiple of `alignment` |
198 | #[cfg_attr (prefixed, link_name = "_rjem_aligned_alloc" )] |
199 | pub unsafefn aligned_alloc(alignment: size_t, size: size_t) -> *mut c_void; |
200 | |
201 | /// Resizes the previously-allocated memory region referenced by `ptr` to |
202 | /// `size` bytes. |
203 | /// |
204 | /// Deallocates the old object pointed to by `ptr` and returns a pointer to |
205 | /// a new object that has the size specified by `size`. The contents of the |
206 | /// new object are the same as that of the old object prior to deallocation, |
207 | /// up to the lesser of the new and old sizes. |
208 | /// |
209 | /// The memory in the new object beyond the size of the old object is |
210 | /// uninitialized. |
211 | /// |
212 | /// The returned pointer to a new object may have the same value as a |
213 | /// pointer to the old object, but [`realloc`] may move the memory |
214 | /// allocation, resulting in a different return value than `ptr`. |
215 | /// |
216 | /// If `ptr` is null, [`realloc`] behaves identically to [`malloc`] for the |
217 | /// specified size. |
218 | /// |
219 | /// If the size of the space requested is zero, the behavior is |
220 | /// implementation-defined: either a null pointer is returned, or the |
221 | /// behavior is as if the size were some nonzero value, except that the |
222 | /// returned pointer shall not be used to access an object # Errors |
223 | /// |
224 | /// # Errors |
225 | /// |
226 | /// If memory for the new object cannot be allocated, the old object is not |
227 | /// deallocated, its value is unchanged, [`realloc`] returns null, and |
228 | /// `errno` is set to `ENOMEM`. |
229 | /// |
230 | /// # Safety |
231 | /// |
232 | /// The behavior is _undefined_ if: |
233 | /// |
234 | /// * `ptr` does not match a pointer previously returned by the memory |
235 | /// allocation functions of this crate, or |
236 | /// * the memory region referenced by `ptr` has been deallocated. |
237 | #[cfg_attr (prefixed, link_name = "_rjem_realloc" )] |
238 | pub unsafefn realloc(ptr: *mut c_void, size: size_t) -> *mut c_void; |
239 | |
240 | /// Deallocates previously-allocated memory region referenced by `ptr`. |
241 | /// |
242 | /// This makes the space available for future allocations. |
243 | /// |
244 | /// If `ptr` is null, no action occurs. |
245 | /// |
246 | /// # Safety |
247 | /// |
248 | /// The behavior is _undefined_ if: |
249 | /// |
250 | /// * `ptr` does not match a pointer earlier returned by the memory |
251 | /// allocation functions of this crate, or |
252 | /// * the memory region referenced by `ptr` has been deallocated. |
253 | #[cfg_attr (prefixed, link_name = "_rjem_free" )] |
254 | pub unsafefn free(ptr: *mut c_void); |
255 | |
256 | /// Allocates at least `size` bytes of memory according to `flags`. |
257 | /// |
258 | /// It returns a pointer to the start (lowest byte address) of the allocated |
259 | /// space. This pointer is suitably aligned so that it may be assigned to a |
260 | /// pointer to any type of object and then used to access such an object in |
261 | /// the space allocated until the space is explicitly deallocated. Each |
262 | /// yielded pointer points to an object disjoint from any other object. |
263 | /// |
264 | /// # Errors |
265 | /// |
266 | /// On success it returns a non-null pointer. A null pointer return value |
267 | /// indicates that insufficient contiguous memory was available to service |
268 | /// the allocation request. |
269 | /// |
270 | /// # Safety |
271 | /// |
272 | /// The behavior is _undefined_ if `size == 0`. |
273 | #[cfg_attr (prefixed, link_name = "_rjem_mallocx" )] |
274 | pub unsafefn mallocx(size: size_t, flags: c_int) -> *mut c_void; |
275 | |
276 | /// Resizes the previously-allocated memory region referenced by `ptr` to be |
277 | /// at least `size` bytes. |
278 | /// |
279 | /// Deallocates the old object pointed to by `ptr` and returns a pointer to |
280 | /// a new object that has the size specified by `size`. The contents of the |
281 | /// new object are the same as that of the old object prior to deallocation, |
282 | /// up to the lesser of the new and old sizes. |
283 | /// |
284 | /// The the memory in the new object beyond the size of the old object is |
285 | /// obtained according to `flags` (it might be uninitialized). |
286 | /// |
287 | /// The returned pointer to a new object may have the same value as a |
288 | /// pointer to the old object, but [`rallocx`] may move the memory |
289 | /// allocation, resulting in a different return value than `ptr`. |
290 | /// |
291 | /// # Errors |
292 | /// |
293 | /// On success it returns a non-null pointer. A null pointer return value |
294 | /// indicates that insufficient contiguous memory was available to service |
295 | /// the allocation request. In this case, the old object is not |
296 | /// deallocated, and its value is unchanged. |
297 | /// |
298 | /// # Safety |
299 | /// |
300 | /// The behavior is _undefiend_ if: |
301 | /// |
302 | /// * `size == 0`, or |
303 | /// * `ptr` does not match a pointer earlier returned by |
304 | /// the memory allocation functions of this crate, or |
305 | /// * the memory region referenced by `ptr` has been deallocated. |
306 | #[cfg_attr (prefixed, link_name = "_rjem_rallocx" )] |
307 | pub unsafefn rallocx(ptr: *mut c_void, size: size_t, flags: c_int) -> *mut c_void; |
308 | |
309 | /// Resizes the previously-allocated memory region referenced by `ptr` _in |
310 | /// place_ to be at least `size` bytes, returning the real size of the |
311 | /// allocation. |
312 | /// |
313 | /// Deallocates the old object pointed to by `ptr` and sets `ptr` to a new |
314 | /// object that has the size returned; the old a new objects share the same |
315 | /// base address. The contents of the new object are the same as that of the |
316 | /// old object prior to deallocation, up to the lesser of the new and old |
317 | /// sizes. |
318 | /// |
319 | /// If `extra` is non-zero, an attempt is made to resize the allocation to |
320 | /// be at least `size + extra` bytes. Inability to allocate the `extra` |
321 | /// bytes will not by itself result in failure to resize. |
322 | /// |
323 | /// The memory in the new object beyond the size of the old object is |
324 | /// obtained according to `flags` (it might be uninitialized). |
325 | /// |
326 | /// # Errors |
327 | /// |
328 | /// If the allocation cannot be adequately grown in place up to `size`, the |
329 | /// size returned is smaller than `size`. |
330 | /// |
331 | /// Note: |
332 | /// |
333 | /// * the size value returned can be larger than the size requested during |
334 | /// allocation |
335 | /// * when shrinking an allocation, use the size returned to determine |
336 | /// whether the allocation was shrunk sufficiently or not. |
337 | /// |
338 | /// # Safety |
339 | /// |
340 | /// The behavior is _undefined_ if: |
341 | /// |
342 | /// * `size == 0`, or |
343 | /// * `size + extra > size_t::max_value()`, or |
344 | /// * `ptr` does not match a pointer earlier returned by the memory |
345 | /// allocation functions of this crate, or |
346 | /// * the memory region referenced by `ptr` has been deallocated. |
347 | #[cfg_attr (prefixed, link_name = "_rjem_xallocx" )] |
348 | pub unsafefn xallocx(ptr: *mut c_void, size: size_t, extra: size_t, flags: c_int) -> size_t; |
349 | |
350 | /// Returns the real size of the previously-allocated memory region |
351 | /// referenced by `ptr`. |
352 | /// |
353 | /// The value may be larger than the size requested on allocation. |
354 | /// |
355 | /// # Safety |
356 | /// |
357 | /// The behavior is _undefined_ if: |
358 | /// |
359 | /// * `ptr` does not match a pointer earlier returned by the memory |
360 | /// allocation functions of this crate, or |
361 | /// * the memory region referenced by `ptr` has been deallocated. |
362 | #[cfg_attr (prefixed, link_name = "_rjem_sallocx" )] |
363 | pub unsafefn sallocx(ptr: *const c_void, flags: c_int) -> size_t; |
364 | |
365 | /// Deallocates previously-allocated memory region referenced by `ptr`. |
366 | /// |
367 | /// This makes the space available for future allocations. |
368 | /// |
369 | /// # Safety |
370 | /// |
371 | /// The behavior is _undefined_ if: |
372 | /// |
373 | /// * `ptr` does not match a pointer earlier returned by the memory |
374 | /// allocation functions of this crate, or |
375 | /// * `ptr` is null, or |
376 | /// * the memory region referenced by `ptr` has been deallocated. |
377 | #[cfg_attr (prefixed, link_name = "_rjem_dallocx" )] |
378 | pub unsafefn dallocx(ptr: *mut c_void, flags: c_int); |
379 | |
380 | /// Deallocates previously-allocated memory region referenced by `ptr` with |
381 | /// `size` hint. |
382 | /// |
383 | /// This makes the space available for future allocations. |
384 | /// |
385 | /// # Safety |
386 | /// |
387 | /// The behavior is _undefined_ if: |
388 | /// |
389 | /// * `size` is not in range `[req_size, alloc_size]`, where `req_size` is |
390 | /// the size requested when performing the allocation, and `alloc_size` is |
391 | /// the allocation size returned by [`nallocx`], [`sallocx`], or |
392 | /// [`xallocx`], |
393 | /// * `ptr` does not match a pointer earlier returned by the memory |
394 | /// allocation functions of this crate, or |
395 | /// * `ptr` is null, or |
396 | /// * the memory region referenced by `ptr` has been deallocated. |
397 | #[cfg_attr (prefixed, link_name = "_rjem_sdallocx" )] |
398 | pub unsafefn sdallocx(ptr: *mut c_void, size: size_t, flags: c_int); |
399 | |
400 | /// Returns the real size of the allocation that would result from a |
401 | /// [`mallocx`] function call with the same arguments. |
402 | /// |
403 | /// # Errors |
404 | /// |
405 | /// If the inputs exceed the maximum supported size class and/or alignment |
406 | /// it returns zero. |
407 | /// |
408 | /// # Safety |
409 | /// |
410 | /// The behavior is _undefined_ if `size == 0`. |
411 | #[cfg_attr (prefixed, link_name = "_rjem_nallocx" )] |
412 | pub unsafefn nallocx(size: size_t, flags: c_int) -> size_t; |
413 | |
414 | /// Returns the real size of the previously-allocated memory region |
415 | /// referenced by `ptr`. |
416 | /// |
417 | /// The value may be larger than the size requested on allocation. |
418 | /// |
419 | /// Although the excess bytes can be overwritten by the application without |
420 | /// ill effects, this is not good programming practice: the number of excess |
421 | /// bytes in an allocation depends on the underlying implementation. |
422 | /// |
423 | /// The main use of this function is for debugging and introspection. |
424 | /// |
425 | /// # Errors |
426 | /// |
427 | /// If `ptr` is null, 0 is returned. |
428 | /// |
429 | /// # Safety |
430 | /// |
431 | /// The behavior is _undefined_ if: |
432 | /// |
433 | /// * `ptr` does not match a pointer earlier returned by the memory |
434 | /// allocation functions of this crate, or |
435 | /// * the memory region referenced by `ptr` has been deallocated. |
436 | #[cfg_attr (prefixed, link_name = "_rjem_malloc_usable_size" )] |
437 | pub unsafefn malloc_usable_size(ptr: *const c_void) -> size_t; |
438 | |
439 | /// General interface for introspecting the memory allocator, as well as |
440 | /// setting modifiable parameters and triggering actions. |
441 | /// |
442 | /// The period-separated name argument specifies a location in a |
443 | /// tree-structured namespace ([see jemalloc's `MALLCTL` |
444 | /// documentation][jemalloc_mallctl]). |
445 | /// |
446 | /// To read a value, pass a pointer via `oldp` to adequate space to contain |
447 | /// the value, and a pointer to its length via `oldlenp``; otherwise pass |
448 | /// null and null. Similarly, to write a value, pass a pointer to the value |
449 | /// via `newp`, and its length via `newlen`; otherwise pass null and 0. |
450 | /// |
451 | /// # Errors |
452 | /// |
453 | /// Returns `0` on success, otherwise returns: |
454 | /// |
455 | /// * `EINVAL`: if `newp` is not null, and `newlen` is too large or too |
456 | /// small. Alternatively, `*oldlenp` is too large or too small; in this case |
457 | /// as much data as possible are read despite the error. |
458 | /// |
459 | /// * `ENOENT`: `name` or mib specifies an unknown/invalid value. |
460 | /// |
461 | /// * `EPERM`: Attempt to read or write void value, or attempt to write read-only value. |
462 | /// |
463 | /// * `EAGAIN`: A memory allocation failure occurred. |
464 | /// |
465 | /// * `EFAULT`: An interface with side effects failed in some way not |
466 | /// directly related to `mallctl` read/write processing. |
467 | /// |
468 | /// [jemalloc_mallctl]: http://jemalloc.net/jemalloc.3.html#mallctl_namespace |
469 | #[cfg_attr (prefixed, link_name = "_rjem_mallctl" )] |
470 | pub unsafefn mallctl( |
471 | name: *const c_char, |
472 | oldp: *mut c_void, |
473 | oldlenp: *mut size_t, |
474 | newp: *mut c_void, |
475 | newlen: size_t, |
476 | ) -> c_int; |
477 | /// Translates a name to a “Management Information Base” (MIB) that can be |
478 | /// passed repeatedly to [`mallctlbymib`]. |
479 | /// |
480 | /// This avoids repeated name lookups for applications that repeatedly query |
481 | /// the same portion of the namespace. |
482 | /// |
483 | /// On success, `mibp` contains an array of `*miblenp` integers, where |
484 | /// `*miblenp` is the lesser of the number of components in name and the |
485 | /// input value of `*miblenp`. Thus it is possible to pass a `*miblenp` that is |
486 | /// smaller than the number of period-separated name components, which |
487 | /// results in a partial MIB that can be used as the basis for constructing |
488 | /// a complete MIB. For name components that are integers (e.g. the 2 in |
489 | /// arenas.bin.2.size), the corresponding MIB component will always be that |
490 | /// integer. |
491 | #[cfg_attr (prefixed, link_name = "_rjem_mallctlnametomib" )] |
492 | pub unsafefn mallctlnametomib(name: *const c_char, mibp: *mut size_t, miblenp: *mut size_t) -> c_int; |
493 | |
494 | /// Like [`mallctl`] but taking a `mib` as input instead of a name. |
495 | #[cfg_attr (prefixed, link_name = "_rjem_mallctlbymib" )] |
496 | pub unsafefn mallctlbymib( |
497 | mib: *const size_t, |
498 | miblen: size_t, |
499 | oldp: *mut c_void, |
500 | oldpenp: *mut size_t, |
501 | newp: *mut c_void, |
502 | newlen: size_t, |
503 | ) -> c_int; |
504 | |
505 | /// Writes summary statistics via the `write_cb` callback function pointer |
506 | /// and `cbopaque` data passed to `write_cb`, or [`malloc_message`] if `write_cb` |
507 | /// is null. |
508 | /// |
509 | /// The statistics are presented in human-readable form unless “J” |
510 | /// is specified as a character within the opts string, in which case the |
511 | /// statistics are presented in JSON format. |
512 | /// |
513 | /// This function can be called repeatedly. |
514 | /// |
515 | /// General information that never changes during execution can be omitted |
516 | /// by specifying `g` as a character within the opts string. |
517 | /// |
518 | /// Note that [`malloc_message`] uses the `mallctl*` functions internally, |
519 | /// so inconsistent statistics can be reported if multiple threads use these |
520 | /// functions simultaneously. |
521 | /// |
522 | /// If the Cargo feature `stats` is enabled, `m`, `d`, and `a` can be |
523 | /// specified to omit merged arena, destroyed merged arena, and per arena |
524 | /// statistics, respectively; `b` and `l` can be specified to omit per size |
525 | /// class statistics for bins and large objects, respectively; `x` can be |
526 | /// specified to omit all mutex statistics. Unrecognized characters are |
527 | /// silently ignored. |
528 | /// |
529 | /// Note that thread caching may prevent some statistics from being |
530 | /// completely up to date, since extra locking would be required to merge |
531 | /// counters that track thread cache operations. |
532 | #[cfg_attr (prefixed, link_name = "_rjem_malloc_stats_print" )] |
533 | pub unsafefn malloc_stats_print( |
534 | write_cb: Option<unsafe extern "C" fn(*mut c_void, *const c_char)>, |
535 | cbopaque: *mut c_void, |
536 | opts: *const c_char, |
537 | ); |
538 | |
539 | /// Allows overriding the function which emits the text strings forming the |
540 | /// errors and warnings if for some reason the `STDERR_FILENO` file descriptor |
541 | /// is not suitable for this. |
542 | /// |
543 | /// [`malloc_message`] takes the `cbopaque` pointer argument that is null, |
544 | /// unless overridden by the arguments in a call to [`malloc_stats_print`], |
545 | /// followed by a string pointer. |
546 | /// |
547 | /// Please note that doing anything which tries to allocate memory in this |
548 | /// function is likely to result in a crash or deadlock. |
549 | #[cfg_attr (prefixed, link_name = "_rjem_malloc_message" )] |
550 | pub unsafestatic mut malloc_message: |
551 | Option<unsafe extern "C" fn(cbopaque: *mut c_void, s: *const c_char)>; |
552 | |
553 | /// Compile-time string of configuration options. |
554 | /// |
555 | /// Once, when the first call is made to one of the memory allocation |
556 | /// routines, the allocator initializes its internals based in part on |
557 | /// various options that can be specified at compile- or run-time. |
558 | /// |
559 | /// The string specified via `--with-malloc-conf`, the string pointed to by |
560 | /// the global variable `malloc_conf`, the “name” of the file referenced by |
561 | /// the symbolic link named `/etc/malloc.conf`, and the value of the |
562 | /// environment variable `MALLOC_CONF`, will be interpreted, in that order, |
563 | /// from left to right as options. Note that `malloc_conf` may be read |
564 | /// before `main()` is entered, so the declaration of `malloc_conf` should |
565 | /// specify an initializer that contains the final value to be read by |
566 | /// `jemalloc`. |
567 | /// |
568 | /// `--with-malloc-conf` and `malloc_conf` are compile-time mechanisms, whereas |
569 | /// `/etc/malloc.conf` and `MALLOC_CONF` can be safely set any time prior to |
570 | /// program invocation. |
571 | /// |
572 | /// An options string is a comma-separated list of `option:value` pairs. |
573 | /// There is one key corresponding to each `opt.* mallctl` (see the `MALLCTL |
574 | /// NAMESPACE` section for options documentation). For example, |
575 | /// `abort:true,narenas:1` sets the `opt.abort` and `opt.narenas` options. |
576 | /// Some options have boolean values (`true`/`false`), others have integer |
577 | /// values (base `8`, `10`, or `16`, depending on prefix), and yet others |
578 | /// have raw string values. |
579 | #[cfg_attr (prefixed, link_name = "_rjem_malloc_conf" )] |
580 | pub unsafestatic malloc_conf: Option<&'static c_char>; |
581 | } |
582 | |
583 | /// Extent lifetime management functions. |
584 | pub type extent_hooks_t = extent_hooks_s; |
585 | |
586 | // note: there are two structs here, one is used when compiling the crate normally, |
587 | // and the other one is behind the `--cfg jemallocator_docs` flag and used only |
588 | // when generating docs. |
589 | // |
590 | // For the docs we want to use type aliases here, but `ctest` does see through |
591 | // them when generating the code to verify the FFI bindings, and it needs to |
592 | // be able to tell that these are `fn` types so that `Option<fn>` gets lowered |
593 | // to C function pointers. |
594 | |
595 | #[repr (C)] |
596 | #[cfg (not(jemallocator_docs))] |
597 | #[derive (Copy, Clone, Default)] |
598 | #[doc (hidden)] |
599 | #[allow (missing_docs)] |
600 | pub struct extent_hooks_s { |
601 | pub alloc: Option< |
602 | unsafe extern "C" fn( |
603 | *mut extent_hooks_t, |
604 | *mut c_void, |
605 | size_t, |
606 | size_t, |
607 | *mut c_bool, |
608 | *mut c_bool, |
609 | c_uint, |
610 | ) -> *mut c_void, |
611 | >, |
612 | pub dalloc: Option< |
613 | unsafe extern "C" fn(*mut extent_hooks_t, *mut c_void, size_t, c_bool, c_uint) -> c_bool, |
614 | >, |
615 | pub destroy: |
616 | Option<unsafe extern "C" fn(*mut extent_hooks_t, *mut c_void, size_t, c_bool, c_uint)>, |
617 | pub commit: Option< |
618 | unsafe extern "C" fn( |
619 | *mut extent_hooks_t, |
620 | *mut c_void, |
621 | size_t, |
622 | size_t, |
623 | size_t, |
624 | c_uint, |
625 | ) -> c_bool, |
626 | >, |
627 | pub decommit: Option< |
628 | unsafe extern "C" fn( |
629 | *mut extent_hooks_t, |
630 | *mut c_void, |
631 | size_t, |
632 | size_t, |
633 | size_t, |
634 | c_uint, |
635 | ) -> c_bool, |
636 | >, |
637 | pub purge_lazy: Option< |
638 | unsafe extern "C" fn( |
639 | *mut extent_hooks_t, |
640 | *mut c_void, |
641 | size_t, |
642 | size_t, |
643 | size_t, |
644 | c_uint, |
645 | ) -> c_bool, |
646 | >, |
647 | pub purge_forced: Option< |
648 | unsafe extern "C" fn( |
649 | *mut extent_hooks_t, |
650 | *mut c_void, |
651 | size_t, |
652 | size_t, |
653 | size_t, |
654 | c_uint, |
655 | ) -> c_bool, |
656 | >, |
657 | pub split: Option< |
658 | unsafe extern "C" fn( |
659 | *mut extent_hooks_t, |
660 | *mut c_void, |
661 | size_t, |
662 | size_t, |
663 | size_t, |
664 | c_bool, |
665 | c_uint, |
666 | ) -> c_bool, |
667 | >, |
668 | pub merge: Option< |
669 | unsafe extern "C" fn( |
670 | *mut extent_hooks_t, |
671 | *mut c_void, |
672 | size_t, |
673 | *mut c_void, |
674 | size_t, |
675 | c_bool, |
676 | c_uint, |
677 | ) -> c_bool, |
678 | >, |
679 | } |
680 | |
681 | /// Extent lifetime management functions. |
682 | /// |
683 | /// The extent_hooks_t structure comprises function pointers which are described |
684 | /// individually below. `jemalloc` uses these functions to manage extent lifetime, |
685 | /// which starts off with allocation of mapped committed memory, in the simplest |
686 | /// case followed by deallocation. However, there are performance and platform |
687 | /// reasons to retain extents for later reuse. Cleanup attempts cascade from |
688 | /// deallocation to decommit to forced purging to lazy purging, which gives the |
689 | /// extent management functions opportunities to reject the most permanent |
690 | /// cleanup operations in favor of less permanent (and often less costly) |
691 | /// operations. All operations except allocation can be universally opted out of |
692 | /// by setting the hook pointers to `NULL`, or selectively opted out of by |
693 | /// returning failure. Note that once the extent hook is set, the structure is |
694 | /// accessed directly by the associated arenas, so it must remain valid for the |
695 | /// entire lifetime of the arenas. |
696 | #[repr (C)] |
697 | #[cfg (jemallocator_docs)] |
698 | #[derive (Copy, Clone, Default)] |
699 | pub struct extent_hooks_s { |
700 | #[allow (missing_docs)] |
701 | pub alloc: Option<extent_alloc_t>, |
702 | #[allow (missing_docs)] |
703 | pub dalloc: Option<extent_dalloc_t>, |
704 | #[allow (missing_docs)] |
705 | pub destroy: Option<extent_destroy_t>, |
706 | #[allow (missing_docs)] |
707 | pub commit: Option<extent_commit_t>, |
708 | #[allow (missing_docs)] |
709 | pub decommit: Option<extent_decommit_t>, |
710 | #[allow (missing_docs)] |
711 | pub purge_lazy: Option<extent_purge_t>, |
712 | #[allow (missing_docs)] |
713 | pub purge_forced: Option<extent_purge_t>, |
714 | #[allow (missing_docs)] |
715 | pub split: Option<extent_split_t>, |
716 | #[allow (missing_docs)] |
717 | pub merge: Option<extent_merge_t>, |
718 | } |
719 | |
720 | /// Extent allocation function. |
721 | /// |
722 | /// On success returns a pointer to `size` bytes of mapped memory on behalf of |
723 | /// arena `arena_ind` such that the extent's base address is a multiple of |
724 | /// `alignment`, as well as setting `*zero` to indicate whether the extent is |
725 | /// zeroed and `*commit` to indicate whether the extent is committed. |
726 | /// |
727 | /// Zeroing is mandatory if `*zero` is `true` upon function entry. Committing is mandatory if |
728 | /// `*commit` is true upon function entry. If `new_addr` is not null, the returned |
729 | /// pointer must be `new_addr` on success or null on error. |
730 | /// |
731 | /// Committed memory may be committed in absolute terms as on a system that does |
732 | /// not overcommit, or in implicit terms as on a system that overcommits and |
733 | /// satisfies physical memory needs on demand via soft page faults. Note that |
734 | /// replacing the default extent allocation function makes the arena's |
735 | /// `arena.<i>.dss` setting irrelevant. |
736 | /// |
737 | /// # Errors |
738 | /// |
739 | /// On error the function returns null and leaves `*zero` and `*commit` unmodified. |
740 | /// |
741 | /// # Safety |
742 | /// |
743 | /// The behavior is _undefined_ if: |
744 | /// |
745 | /// * the `size` parameter is not a multiple of the page size |
746 | /// * the `alignment` parameter is not a power of two at least as large as the page size |
747 | pub type extent_alloc_t = unsafe extern "C" fn( |
748 | extent_hooks: *mut extent_hooks_t, |
749 | new_addr: *mut c_void, |
750 | size: size_t, |
751 | alignment: size_t, |
752 | zero: *mut c_bool, |
753 | commit: *mut c_bool, |
754 | arena_ind: c_uint, |
755 | ) -> *mut c_void; |
756 | |
757 | /// Extent deallocation function. |
758 | /// |
759 | /// Deallocates an extent at given `addr` and `size` with `committed`/decommited |
760 | /// memory as indicated, on behalf of arena `arena_ind`, returning `false` upon |
761 | /// success. |
762 | /// |
763 | /// If the function returns `true`, this indicates opt-out from deallocation; |
764 | /// the virtual memory mapping associated with the extent remains mapped, in the |
765 | /// same commit state, and available for future use, in which case it will be |
766 | /// automatically retained for later reuse. |
767 | pub type extent_dalloc_t = unsafe extern "C" fn( |
768 | extent_hooks: *mut extent_hooks_t, |
769 | addr: *mut c_void, |
770 | size: size_t, |
771 | committed: c_bool, |
772 | arena_ind: c_uint, |
773 | ) -> c_bool; |
774 | |
775 | /// Extent destruction function. |
776 | /// |
777 | /// Unconditionally destroys an extent at given `addr` and `size` with |
778 | /// `committed`/decommited memory as indicated, on behalf of arena `arena_ind`. |
779 | /// |
780 | /// This function may be called to destroy retained extents during arena |
781 | /// destruction (see `arena.<i>.destroy`). |
782 | pub type extent_destroy_t = unsafe extern "C" fn( |
783 | extent_hooks: *mut extent_hooks_t, |
784 | addr: *mut c_void, |
785 | size: size_t, |
786 | committed: c_bool, |
787 | arena_ind: c_uint, |
788 | ); |
789 | |
790 | /// Extent commit function. |
791 | /// |
792 | /// Commits zeroed physical memory to back pages within an extent at given |
793 | /// `addr` and `size` at `offset` bytes, extending for `length` on behalf of |
794 | /// arena `arena_ind`, returning `false` upon success. |
795 | /// |
796 | /// Committed memory may be committed in absolute terms as on a system that does |
797 | /// not overcommit, or in implicit terms as on a system that overcommits and |
798 | /// satisfies physical memory needs on demand via soft page faults. If the |
799 | /// function returns `true`, this indicates insufficient physical memory to |
800 | /// satisfy the request. |
801 | pub type extent_commit_t = unsafe extern "C" fn( |
802 | extent_hooks: *mut extent_hooks_t, |
803 | addr: *mut c_void, |
804 | size: size_t, |
805 | offset: size_t, |
806 | length: size_t, |
807 | arena_ind: c_uint, |
808 | ) -> c_bool; |
809 | |
810 | /// Extent decommit function. |
811 | /// |
812 | /// Decommits any physical memory that is backing pages within an extent at |
813 | /// given `addr` and `size` at `offset` bytes, extending for `length` on behalf of arena |
814 | /// `arena_ind`, returning `false` upon success, in which case the pages will be |
815 | /// committed via the extent commit function before being reused. |
816 | /// |
817 | /// If the function returns `true`, this indicates opt-out from decommit; the |
818 | /// memory remains committed and available for future use, in which case it will |
819 | /// be automatically retained for later reuse. |
820 | pub type extent_decommit_t = unsafe extern "C" fn( |
821 | extent_hooks: *mut extent_hooks_t, |
822 | addr: *mut c_void, |
823 | size: size_t, |
824 | offset: size_t, |
825 | length: size_t, |
826 | arena_ind: c_uint, |
827 | ) -> c_bool; |
828 | |
829 | /// Extent purge function. |
830 | /// |
831 | /// Discards physical pages within the virtual memory mapping associated with an |
832 | /// extent at given `addr` and `size` at `offset` bytes, extending for `length` on |
833 | /// behalf of arena `arena_ind`. |
834 | /// |
835 | /// A lazy extent purge function (e.g. implemented via `madvise(...MADV_FREE)`) |
836 | /// can delay purging indefinitely and leave the pages within the purged virtual |
837 | /// memory range in an indeterminite state, whereas a forced extent purge |
838 | /// function immediately purges, and the pages within the virtual memory range |
839 | /// will be zero-filled the next time they are accessed. If the function returns |
840 | /// `true`, this indicates failure to purge. |
841 | pub type extent_purge_t = unsafe extern "C" fn( |
842 | extent_hooks: *mut extent_hooks_t, |
843 | addr: *mut c_void, |
844 | size: size_t, |
845 | offset: size_t, |
846 | length: size_t, |
847 | arena_ind: c_uint, |
848 | ) -> c_bool; |
849 | |
850 | /// Extent split function. |
851 | /// |
852 | /// Optionally splits an extent at given `addr` and `size` into two adjacent |
853 | /// extents, the first of `size_a` bytes, and the second of `size_b` bytes, |
854 | /// operating on `committed`/decommitted memory as indicated, on behalf of arena |
855 | /// `arena_ind`, returning `false` upon success. |
856 | /// |
857 | /// If the function returns `true`, this indicates that the extent remains |
858 | /// unsplit and therefore should continue to be operated on as a whole. |
859 | pub type extent_split_t = unsafe extern "C" fn( |
860 | extent_hooks: *mut extent_hooks_t, |
861 | addr: *mut c_void, |
862 | size: size_t, |
863 | size_a: size_t, |
864 | size_b: size_t, |
865 | committed: c_bool, |
866 | arena_ind: c_uint, |
867 | ) -> c_bool; |
868 | |
869 | /// Extent merge function. |
870 | /// |
871 | /// Optionally merges adjacent extents, at given `addr_a` and `size_a` with given |
872 | /// `addr_b` and `size_b` into one contiguous extent, operating on |
873 | /// `committed`/decommitted memory as indicated, on behalf of arena `arena_ind`, |
874 | /// returning `false` upon success. |
875 | /// |
876 | /// If the function returns `true`, this indicates that the extents remain |
877 | /// distinct mappings and therefore should continue to be operated on |
878 | /// independently. |
879 | pub type extent_merge_t = unsafe extern "C" fn( |
880 | extent_hooks: *mut extent_hooks_t, |
881 | addr_a: *mut c_void, |
882 | size_a: size_t, |
883 | addr_b: *mut c_void, |
884 | size_b: size_t, |
885 | committed: c_bool, |
886 | arena_ind: c_uint, |
887 | ) -> c_bool; |
888 | |
889 | #[allow (missing_docs)] |
890 | mod env; |
891 | |
892 | pub use env::*; |
893 | |