1 | /* |
2 | * Copyright (c) Yann Collet, Facebook, Inc. |
3 | * All rights reserved. |
4 | * |
5 | * This source code is licensed under both the BSD-style license (found in the |
6 | * LICENSE file in the root directory of this source tree) and the GPLv2 (found |
7 | * in the COPYING file in the root directory of this source tree). |
8 | * You may select, at your option, one of the above-listed licenses. |
9 | */ |
10 | |
11 | #ifndef ZSTD_CWKSP_H |
12 | #define ZSTD_CWKSP_H |
13 | |
14 | /*-************************************* |
15 | * Dependencies |
16 | ***************************************/ |
17 | #include "../common/zstd_internal.h" |
18 | |
19 | |
20 | /*-************************************* |
21 | * Constants |
22 | ***************************************/ |
23 | |
24 | /* Since the workspace is effectively its own little malloc implementation / |
25 | * arena, when we run under ASAN, we should similarly insert redzones between |
26 | * each internal element of the workspace, so ASAN will catch overruns that |
27 | * reach outside an object but that stay inside the workspace. |
28 | * |
29 | * This defines the size of that redzone. |
30 | */ |
31 | #ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE |
32 | #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128 |
33 | #endif |
34 | |
35 | |
36 | /* Set our tables and aligneds to align by 64 bytes */ |
37 | #define ZSTD_CWKSP_ALIGNMENT_BYTES 64 |
38 | |
39 | /*-************************************* |
40 | * Structures |
41 | ***************************************/ |
42 | typedef enum { |
43 | ZSTD_cwksp_alloc_objects, |
44 | ZSTD_cwksp_alloc_buffers, |
45 | ZSTD_cwksp_alloc_aligned |
46 | } ZSTD_cwksp_alloc_phase_e; |
47 | |
48 | /* |
49 | * Used to describe whether the workspace is statically allocated (and will not |
50 | * necessarily ever be freed), or if it's dynamically allocated and we can |
51 | * expect a well-formed caller to free this. |
52 | */ |
53 | typedef enum { |
54 | ZSTD_cwksp_dynamic_alloc, |
55 | ZSTD_cwksp_static_alloc |
56 | } ZSTD_cwksp_static_alloc_e; |
57 | |
58 | /* |
59 | * Zstd fits all its internal datastructures into a single continuous buffer, |
60 | * so that it only needs to perform a single OS allocation (or so that a buffer |
61 | * can be provided to it and it can perform no allocations at all). This buffer |
62 | * is called the workspace. |
63 | * |
64 | * Several optimizations complicate that process of allocating memory ranges |
65 | * from this workspace for each internal datastructure: |
66 | * |
67 | * - These different internal datastructures have different setup requirements: |
68 | * |
69 | * - The static objects need to be cleared once and can then be trivially |
70 | * reused for each compression. |
71 | * |
72 | * - Various buffers don't need to be initialized at all--they are always |
73 | * written into before they're read. |
74 | * |
75 | * - The matchstate tables have a unique requirement that they don't need |
76 | * their memory to be totally cleared, but they do need the memory to have |
77 | * some bound, i.e., a guarantee that all values in the memory they've been |
78 | * allocated is less than some maximum value (which is the starting value |
79 | * for the indices that they will then use for compression). When this |
80 | * guarantee is provided to them, they can use the memory without any setup |
81 | * work. When it can't, they have to clear the area. |
82 | * |
83 | * - These buffers also have different alignment requirements. |
84 | * |
85 | * - We would like to reuse the objects in the workspace for multiple |
86 | * compressions without having to perform any expensive reallocation or |
87 | * reinitialization work. |
88 | * |
89 | * - We would like to be able to efficiently reuse the workspace across |
90 | * multiple compressions **even when the compression parameters change** and |
91 | * we need to resize some of the objects (where possible). |
92 | * |
93 | * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp |
94 | * abstraction was created. It works as follows: |
95 | * |
96 | * Workspace Layout: |
97 | * |
98 | * [ ... workspace ... ] |
99 | * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers] |
100 | * |
101 | * The various objects that live in the workspace are divided into the |
102 | * following categories, and are allocated separately: |
103 | * |
104 | * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict, |
105 | * so that literally everything fits in a single buffer. Note: if present, |
106 | * this must be the first object in the workspace, since ZSTD_customFree{CCtx, |
107 | * CDict}() rely on a pointer comparison to see whether one or two frees are |
108 | * required. |
109 | * |
110 | * - Fixed size objects: these are fixed-size, fixed-count objects that are |
111 | * nonetheless "dynamically" allocated in the workspace so that we can |
112 | * control how they're initialized separately from the broader ZSTD_CCtx. |
113 | * Examples: |
114 | * - Entropy Workspace |
115 | * - 2 x ZSTD_compressedBlockState_t |
116 | * - CDict dictionary contents |
117 | * |
118 | * - Tables: these are any of several different datastructures (hash tables, |
119 | * chain tables, binary trees) that all respect a common format: they are |
120 | * uint32_t arrays, all of whose values are between 0 and (nextSrc - base). |
121 | * Their sizes depend on the cparams. These tables are 64-byte aligned. |
122 | * |
123 | * - Aligned: these buffers are used for various purposes that require 4 byte |
124 | * alignment, but don't require any initialization before they're used. These |
125 | * buffers are each aligned to 64 bytes. |
126 | * |
127 | * - Buffers: these buffers are used for various purposes that don't require |
128 | * any alignment or initialization before they're used. This means they can |
129 | * be moved around at no cost for a new compression. |
130 | * |
131 | * Allocating Memory: |
132 | * |
133 | * The various types of objects must be allocated in order, so they can be |
134 | * correctly packed into the workspace buffer. That order is: |
135 | * |
136 | * 1. Objects |
137 | * 2. Buffers |
138 | * 3. Aligned/Tables |
139 | * |
140 | * Attempts to reserve objects of different types out of order will fail. |
141 | */ |
142 | typedef struct { |
143 | void* workspace; |
144 | void* workspaceEnd; |
145 | |
146 | void* objectEnd; |
147 | void* tableEnd; |
148 | void* tableValidEnd; |
149 | void* allocStart; |
150 | |
151 | BYTE allocFailed; |
152 | int workspaceOversizedDuration; |
153 | ZSTD_cwksp_alloc_phase_e phase; |
154 | ZSTD_cwksp_static_alloc_e isStatic; |
155 | } ZSTD_cwksp; |
156 | |
157 | /*-************************************* |
158 | * Functions |
159 | ***************************************/ |
160 | |
161 | MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws); |
162 | |
163 | MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) { |
164 | (void)ws; |
165 | assert(ws->workspace <= ws->objectEnd); |
166 | assert(ws->objectEnd <= ws->tableEnd); |
167 | assert(ws->objectEnd <= ws->tableValidEnd); |
168 | assert(ws->tableEnd <= ws->allocStart); |
169 | assert(ws->tableValidEnd <= ws->allocStart); |
170 | assert(ws->allocStart <= ws->workspaceEnd); |
171 | } |
172 | |
173 | /* |
174 | * Align must be a power of 2. |
175 | */ |
176 | MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) { |
177 | size_t const mask = align - 1; |
178 | assert((align & mask) == 0); |
179 | return (size + mask) & ~mask; |
180 | } |
181 | |
182 | /* |
183 | * Use this to determine how much space in the workspace we will consume to |
184 | * allocate this object. (Normally it should be exactly the size of the object, |
185 | * but under special conditions, like ASAN, where we pad each object, it might |
186 | * be larger.) |
187 | * |
188 | * Since tables aren't currently redzoned, you don't need to call through this |
189 | * to figure out how much space you need for the matchState tables. Everything |
190 | * else is though. |
191 | * |
192 | * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size(). |
193 | */ |
194 | MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) { |
195 | if (size == 0) |
196 | return 0; |
197 | return size; |
198 | } |
199 | |
200 | /* |
201 | * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes. |
202 | * Used to determine the number of bytes required for a given "aligned". |
203 | */ |
204 | MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) { |
205 | return ZSTD_cwksp_alloc_size(size: ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES)); |
206 | } |
207 | |
208 | /* |
209 | * Returns the amount of additional space the cwksp must allocate |
210 | * for internal purposes (currently only alignment). |
211 | */ |
212 | MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) { |
213 | /* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes |
214 | * to align the beginning of tables section, as well as another n_2=[0, 63] bytes |
215 | * to align the beginning of the aligned section. |
216 | * |
217 | * n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and |
218 | * aligneds being sized in multiples of 64 bytes. |
219 | */ |
220 | size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES; |
221 | return slackSpace; |
222 | } |
223 | |
224 | |
225 | /* |
226 | * Return the number of additional bytes required to align a pointer to the given number of bytes. |
227 | * alignBytes must be a power of two. |
228 | */ |
229 | MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) { |
230 | size_t const alignBytesMask = alignBytes - 1; |
231 | size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask; |
232 | assert((alignBytes & alignBytesMask) == 0); |
233 | assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES); |
234 | return bytes; |
235 | } |
236 | |
237 | /* |
238 | * Internal function. Do not use directly. |
239 | * Reserves the given number of bytes within the aligned/buffer segment of the wksp, |
240 | * which counts from the end of the wksp (as opposed to the object/table segment). |
241 | * |
242 | * Returns a pointer to the beginning of that space. |
243 | */ |
244 | MEM_STATIC void* |
245 | ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes) |
246 | { |
247 | void* const alloc = (BYTE*)ws->allocStart - bytes; |
248 | void* const bottom = ws->tableEnd; |
249 | DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining" , |
250 | alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); |
251 | ZSTD_cwksp_assert_internal_consistency(ws); |
252 | assert(alloc >= bottom); |
253 | if (alloc < bottom) { |
254 | DEBUGLOG(4, "cwksp: alloc failed!" ); |
255 | ws->allocFailed = 1; |
256 | return NULL; |
257 | } |
258 | /* the area is reserved from the end of wksp. |
259 | * If it overlaps with tableValidEnd, it voids guarantees on values' range */ |
260 | if (alloc < ws->tableValidEnd) { |
261 | ws->tableValidEnd = alloc; |
262 | } |
263 | ws->allocStart = alloc; |
264 | return alloc; |
265 | } |
266 | |
267 | /* |
268 | * Moves the cwksp to the next phase, and does any necessary allocations. |
269 | * cwksp initialization must necessarily go through each phase in order. |
270 | * Returns a 0 on success, or zstd error |
271 | */ |
272 | MEM_STATIC size_t |
273 | ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) |
274 | { |
275 | assert(phase >= ws->phase); |
276 | if (phase > ws->phase) { |
277 | /* Going from allocating objects to allocating buffers */ |
278 | if (ws->phase < ZSTD_cwksp_alloc_buffers && |
279 | phase >= ZSTD_cwksp_alloc_buffers) { |
280 | ws->tableValidEnd = ws->objectEnd; |
281 | } |
282 | |
283 | /* Going from allocating buffers to allocating aligneds/tables */ |
284 | if (ws->phase < ZSTD_cwksp_alloc_aligned && |
285 | phase >= ZSTD_cwksp_alloc_aligned) { |
286 | { /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */ |
287 | size_t const bytesToAlign = |
288 | ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ptr: ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES); |
289 | DEBUGLOG(5, "reserving aligned alignment addtl space: %zu" , bytesToAlign); |
290 | ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */ |
291 | RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign), |
292 | memory_allocation, "aligned phase - alignment initial allocation failed!" ); |
293 | } |
294 | { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */ |
295 | void* const alloc = ws->objectEnd; |
296 | size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(ptr: alloc, ZSTD_CWKSP_ALIGNMENT_BYTES); |
297 | void* const objectEnd = (BYTE*)alloc + bytesToAlign; |
298 | DEBUGLOG(5, "reserving table alignment addtl space: %zu" , bytesToAlign); |
299 | RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation, |
300 | "table phase - alignment initial allocation failed!" ); |
301 | ws->objectEnd = objectEnd; |
302 | ws->tableEnd = objectEnd; /* table area starts being empty */ |
303 | if (ws->tableValidEnd < ws->tableEnd) { |
304 | ws->tableValidEnd = ws->tableEnd; |
305 | } } } |
306 | ws->phase = phase; |
307 | ZSTD_cwksp_assert_internal_consistency(ws); |
308 | } |
309 | return 0; |
310 | } |
311 | |
312 | /* |
313 | * Returns whether this object/buffer/etc was allocated in this workspace. |
314 | */ |
315 | MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) |
316 | { |
317 | return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd); |
318 | } |
319 | |
320 | /* |
321 | * Internal function. Do not use directly. |
322 | */ |
323 | MEM_STATIC void* |
324 | ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) |
325 | { |
326 | void* alloc; |
327 | if (ZSTD_isError(code: ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) { |
328 | return NULL; |
329 | } |
330 | |
331 | |
332 | alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes); |
333 | |
334 | |
335 | return alloc; |
336 | } |
337 | |
338 | /* |
339 | * Reserves and returns unaligned memory. |
340 | */ |
341 | MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) |
342 | { |
343 | return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, phase: ZSTD_cwksp_alloc_buffers); |
344 | } |
345 | |
346 | /* |
347 | * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes). |
348 | */ |
349 | MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) |
350 | { |
351 | void* ptr = ZSTD_cwksp_reserve_internal(ws, bytes: ZSTD_cwksp_align(size: bytes, ZSTD_CWKSP_ALIGNMENT_BYTES), |
352 | phase: ZSTD_cwksp_alloc_aligned); |
353 | assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0); |
354 | return ptr; |
355 | } |
356 | |
357 | /* |
358 | * Aligned on 64 bytes. These buffers have the special property that |
359 | * their values remain constrained, allowing us to re-use them without |
360 | * memset()-ing them. |
361 | */ |
362 | MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) |
363 | { |
364 | const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned; |
365 | void* alloc; |
366 | void* end; |
367 | void* top; |
368 | |
369 | if (ZSTD_isError(code: ZSTD_cwksp_internal_advance_phase(ws, phase))) { |
370 | return NULL; |
371 | } |
372 | alloc = ws->tableEnd; |
373 | end = (BYTE *)alloc + bytes; |
374 | top = ws->allocStart; |
375 | |
376 | DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining" , |
377 | alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes); |
378 | assert((bytes & (sizeof(U32)-1)) == 0); |
379 | ZSTD_cwksp_assert_internal_consistency(ws); |
380 | assert(end <= top); |
381 | if (end > top) { |
382 | DEBUGLOG(4, "cwksp: table alloc failed!" ); |
383 | ws->allocFailed = 1; |
384 | return NULL; |
385 | } |
386 | ws->tableEnd = end; |
387 | |
388 | |
389 | assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0); |
390 | assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0); |
391 | return alloc; |
392 | } |
393 | |
394 | /* |
395 | * Aligned on sizeof(void*). |
396 | * Note : should happen only once, at workspace first initialization |
397 | */ |
398 | MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) |
399 | { |
400 | size_t const roundedBytes = ZSTD_cwksp_align(size: bytes, align: sizeof(void*)); |
401 | void* alloc = ws->objectEnd; |
402 | void* end = (BYTE*)alloc + roundedBytes; |
403 | |
404 | |
405 | DEBUGLOG(4, |
406 | "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining" , |
407 | alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes); |
408 | assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0); |
409 | assert(bytes % ZSTD_ALIGNOF(void*) == 0); |
410 | ZSTD_cwksp_assert_internal_consistency(ws); |
411 | /* we must be in the first phase, no advance is possible */ |
412 | if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) { |
413 | DEBUGLOG(3, "cwksp: object alloc failed!" ); |
414 | ws->allocFailed = 1; |
415 | return NULL; |
416 | } |
417 | ws->objectEnd = end; |
418 | ws->tableEnd = end; |
419 | ws->tableValidEnd = end; |
420 | |
421 | |
422 | return alloc; |
423 | } |
424 | |
425 | MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) |
426 | { |
427 | DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty" ); |
428 | |
429 | |
430 | assert(ws->tableValidEnd >= ws->objectEnd); |
431 | assert(ws->tableValidEnd <= ws->allocStart); |
432 | ws->tableValidEnd = ws->objectEnd; |
433 | ZSTD_cwksp_assert_internal_consistency(ws); |
434 | } |
435 | |
436 | MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) { |
437 | DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean" ); |
438 | assert(ws->tableValidEnd >= ws->objectEnd); |
439 | assert(ws->tableValidEnd <= ws->allocStart); |
440 | if (ws->tableValidEnd < ws->tableEnd) { |
441 | ws->tableValidEnd = ws->tableEnd; |
442 | } |
443 | ZSTD_cwksp_assert_internal_consistency(ws); |
444 | } |
445 | |
446 | /* |
447 | * Zero the part of the allocated tables not already marked clean. |
448 | */ |
449 | MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) { |
450 | DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables" ); |
451 | assert(ws->tableValidEnd >= ws->objectEnd); |
452 | assert(ws->tableValidEnd <= ws->allocStart); |
453 | if (ws->tableValidEnd < ws->tableEnd) { |
454 | ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd); |
455 | } |
456 | ZSTD_cwksp_mark_tables_clean(ws); |
457 | } |
458 | |
459 | /* |
460 | * Invalidates table allocations. |
461 | * All other allocations remain valid. |
462 | */ |
463 | MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) { |
464 | DEBUGLOG(4, "cwksp: clearing tables!" ); |
465 | |
466 | |
467 | ws->tableEnd = ws->objectEnd; |
468 | ZSTD_cwksp_assert_internal_consistency(ws); |
469 | } |
470 | |
471 | /* |
472 | * Invalidates all buffer, aligned, and table allocations. |
473 | * Object allocations remain valid. |
474 | */ |
475 | MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) { |
476 | DEBUGLOG(4, "cwksp: clearing!" ); |
477 | |
478 | |
479 | |
480 | ws->tableEnd = ws->objectEnd; |
481 | ws->allocStart = ws->workspaceEnd; |
482 | ws->allocFailed = 0; |
483 | if (ws->phase > ZSTD_cwksp_alloc_buffers) { |
484 | ws->phase = ZSTD_cwksp_alloc_buffers; |
485 | } |
486 | ZSTD_cwksp_assert_internal_consistency(ws); |
487 | } |
488 | |
489 | /* |
490 | * The provided workspace takes ownership of the buffer [start, start+size). |
491 | * Any existing values in the workspace are ignored (the previously managed |
492 | * buffer, if present, must be separately freed). |
493 | */ |
494 | MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) { |
495 | DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes" , size); |
496 | assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */ |
497 | ws->workspace = start; |
498 | ws->workspaceEnd = (BYTE*)start + size; |
499 | ws->objectEnd = ws->workspace; |
500 | ws->tableValidEnd = ws->objectEnd; |
501 | ws->phase = ZSTD_cwksp_alloc_objects; |
502 | ws->isStatic = isStatic; |
503 | ZSTD_cwksp_clear(ws); |
504 | ws->workspaceOversizedDuration = 0; |
505 | ZSTD_cwksp_assert_internal_consistency(ws); |
506 | } |
507 | |
508 | MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) { |
509 | void* workspace = ZSTD_customMalloc(size, customMem); |
510 | DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes" , size); |
511 | RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!" ); |
512 | ZSTD_cwksp_init(ws, start: workspace, size, isStatic: ZSTD_cwksp_dynamic_alloc); |
513 | return 0; |
514 | } |
515 | |
516 | MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) { |
517 | void *ptr = ws->workspace; |
518 | DEBUGLOG(4, "cwksp: freeing workspace" ); |
519 | ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp)); |
520 | ZSTD_customFree(ptr, customMem); |
521 | } |
522 | |
523 | /* |
524 | * Moves the management of a workspace from one cwksp to another. The src cwksp |
525 | * is left in an invalid state (src must be re-init()'ed before it's used again). |
526 | */ |
527 | MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) { |
528 | *dst = *src; |
529 | ZSTD_memset(src, 0, sizeof(ZSTD_cwksp)); |
530 | } |
531 | |
532 | MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) { |
533 | return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace); |
534 | } |
535 | |
536 | MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) { |
537 | return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace) |
538 | + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart); |
539 | } |
540 | |
541 | MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) { |
542 | return ws->allocFailed; |
543 | } |
544 | |
545 | /*-************************************* |
546 | * Functions Checking Free Space |
547 | ***************************************/ |
548 | |
549 | /* ZSTD_alignmentSpaceWithinBounds() : |
550 | * Returns if the estimated space needed for a wksp is within an acceptable limit of the |
551 | * actual amount of space used. |
552 | */ |
553 | MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws, |
554 | size_t const estimatedSpace, int resizedWorkspace) { |
555 | if (resizedWorkspace) { |
556 | /* Resized/newly allocated wksp should have exact bounds */ |
557 | return ZSTD_cwksp_used(ws) == estimatedSpace; |
558 | } else { |
559 | /* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes |
560 | * than estimatedSpace. See the comments in zstd_cwksp.h for details. |
561 | */ |
562 | return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63); |
563 | } |
564 | } |
565 | |
566 | |
567 | MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) { |
568 | return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd); |
569 | } |
570 | |
571 | MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) { |
572 | return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace; |
573 | } |
574 | |
575 | MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) { |
576 | return ZSTD_cwksp_check_available( |
577 | ws, additionalNeededSpace: additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR); |
578 | } |
579 | |
580 | MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) { |
581 | return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace) |
582 | && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION; |
583 | } |
584 | |
585 | MEM_STATIC void ZSTD_cwksp_bump_oversized_duration( |
586 | ZSTD_cwksp* ws, size_t additionalNeededSpace) { |
587 | if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) { |
588 | ws->workspaceOversizedDuration++; |
589 | } else { |
590 | ws->workspaceOversizedDuration = 0; |
591 | } |
592 | } |
593 | |
594 | |
595 | #endif /* ZSTD_CWKSP_H */ |
596 | |