• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
3  * All rights reserved.
4  *
5  * This source code is licensed under both the BSD-style license (found in the
6  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7  * in the COPYING file in the root directory of this source tree).
8  * You may select, at your option, one of the above-listed licenses.
9  */
10 
11 #ifndef ZSTD_CWKSP_H
12 #define ZSTD_CWKSP_H
13 
14 /*-*************************************
15 *  Dependencies
16 ***************************************/
17 #include "../common/zstd_internal.h"
18 
19 #if defined (__cplusplus)
20 extern "C" {
21 #endif
22 
23 /*-*************************************
24 *  Constants
25 ***************************************/
26 
27 /* Since the workspace is effectively its own little malloc implementation /
28  * arena, when we run under ASAN, we should similarly insert redzones between
29  * each internal element of the workspace, so ASAN will catch overruns that
30  * reach outside an object but that stay inside the workspace.
31  *
32  * This defines the size of that redzone.
33  */
34 #ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
35 #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
36 #endif
37 
38 /*-*************************************
39 *  Structures
40 ***************************************/
41 typedef enum {
42     ZSTD_cwksp_alloc_objects,
43     ZSTD_cwksp_alloc_buffers,
44     ZSTD_cwksp_alloc_aligned
45 } ZSTD_cwksp_alloc_phase_e;
46 
47 /**
48  * Used to describe whether the workspace is statically allocated (and will not
49  * necessarily ever be freed), or if it's dynamically allocated and we can
50  * expect a well-formed caller to free this.
51  */
52 typedef enum {
53     ZSTD_cwksp_dynamic_alloc,
54     ZSTD_cwksp_static_alloc
55 } ZSTD_cwksp_static_alloc_e;
56 
57 /**
58  * Zstd fits all its internal datastructures into a single continuous buffer,
59  * so that it only needs to perform a single OS allocation (or so that a buffer
60  * can be provided to it and it can perform no allocations at all). This buffer
61  * is called the workspace.
62  *
63  * Several optimizations complicate that process of allocating memory ranges
64  * from this workspace for each internal datastructure:
65  *
66  * - These different internal datastructures have different setup requirements:
67  *
68  *   - The static objects need to be cleared once and can then be trivially
69  *     reused for each compression.
70  *
71  *   - Various buffers don't need to be initialized at all--they are always
72  *     written into before they're read.
73  *
74  *   - The matchstate tables have a unique requirement that they don't need
75  *     their memory to be totally cleared, but they do need the memory to have
76  *     some bound, i.e., a guarantee that all values in the memory they've been
77  *     allocated is less than some maximum value (which is the starting value
78  *     for the indices that they will then use for compression). When this
79  *     guarantee is provided to them, they can use the memory without any setup
80  *     work. When it can't, they have to clear the area.
81  *
82  * - These buffers also have different alignment requirements.
83  *
84  * - We would like to reuse the objects in the workspace for multiple
85  *   compressions without having to perform any expensive reallocation or
86  *   reinitialization work.
87  *
88  * - We would like to be able to efficiently reuse the workspace across
89  *   multiple compressions **even when the compression parameters change** and
90  *   we need to resize some of the objects (where possible).
91  *
92  * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
93  * abstraction was created. It works as follows:
94  *
95  * Workspace Layout:
96  *
97  * [                        ... workspace ...                         ]
98  * [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]
99  *
100  * The various objects that live in the workspace are divided into the
101  * following categories, and are allocated separately:
102  *
103  * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
104  *   so that literally everything fits in a single buffer. Note: if present,
105  *   this must be the first object in the workspace, since ZSTD_customFree{CCtx,
106  *   CDict}() rely on a pointer comparison to see whether one or two frees are
107  *   required.
108  *
109  * - Fixed size objects: these are fixed-size, fixed-count objects that are
110  *   nonetheless "dynamically" allocated in the workspace so that we can
111  *   control how they're initialized separately from the broader ZSTD_CCtx.
112  *   Examples:
113  *   - Entropy Workspace
114  *   - 2 x ZSTD_compressedBlockState_t
115  *   - CDict dictionary contents
116  *
117  * - Tables: these are any of several different datastructures (hash tables,
118  *   chain tables, binary trees) that all respect a common format: they are
119  *   uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
120  *   Their sizes depend on the cparams.
121  *
122  * - Aligned: these buffers are used for various purposes that require 4 byte
123  *   alignment, but don't require any initialization before they're used.
124  *
125  * - Buffers: these buffers are used for various purposes that don't require
126  *   any alignment or initialization before they're used. This means they can
127  *   be moved around at no cost for a new compression.
128  *
129  * Allocating Memory:
130  *
131  * The various types of objects must be allocated in order, so they can be
132  * correctly packed into the workspace buffer. That order is:
133  *
134  * 1. Objects
135  * 2. Buffers
136  * 3. Aligned
137  * 4. Tables
138  *
139  * Attempts to reserve objects of different types out of order will fail.
140  */
141 typedef struct {
142     void* workspace;
143     void* workspaceEnd;
144 
145     void* objectEnd;
146     void* tableEnd;
147     void* tableValidEnd;
148     void* allocStart;
149 
150     BYTE allocFailed;
151     int workspaceOversizedDuration;
152     ZSTD_cwksp_alloc_phase_e phase;
153     ZSTD_cwksp_static_alloc_e isStatic;
154 } ZSTD_cwksp;
155 
156 /*-*************************************
157 *  Functions
158 ***************************************/
159 
160 MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
161 
ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp * ws)162 MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
163     (void)ws;
164     assert(ws->workspace <= ws->objectEnd);
165     assert(ws->objectEnd <= ws->tableEnd);
166     assert(ws->objectEnd <= ws->tableValidEnd);
167     assert(ws->tableEnd <= ws->allocStart);
168     assert(ws->tableValidEnd <= ws->allocStart);
169     assert(ws->allocStart <= ws->workspaceEnd);
170 }
171 
172 /**
173  * Align must be a power of 2.
174  */
ZSTD_cwksp_align(size_t size,size_t const align)175 MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
176     size_t const mask = align - 1;
177     assert((align & mask) == 0);
178     return (size + mask) & ~mask;
179 }
180 
181 /**
182  * Use this to determine how much space in the workspace we will consume to
183  * allocate this object. (Normally it should be exactly the size of the object,
184  * but under special conditions, like ASAN, where we pad each object, it might
185  * be larger.)
186  *
187  * Since tables aren't currently redzoned, you don't need to call through this
188  * to figure out how much space you need for the matchState tables. Everything
189  * else is though.
190  */
ZSTD_cwksp_alloc_size(size_t size)191 MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
192     if (size == 0)
193         return 0;
194 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
195     return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
196 #else
197     return size;
198 #endif
199 }
200 
ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp * ws,ZSTD_cwksp_alloc_phase_e phase)201 MEM_STATIC void ZSTD_cwksp_internal_advance_phase(
202         ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase) {
203     assert(phase >= ws->phase);
204     if (phase > ws->phase) {
205         if (ws->phase < ZSTD_cwksp_alloc_buffers &&
206                 phase >= ZSTD_cwksp_alloc_buffers) {
207             ws->tableValidEnd = ws->objectEnd;
208         }
209         if (ws->phase < ZSTD_cwksp_alloc_aligned &&
210                 phase >= ZSTD_cwksp_alloc_aligned) {
211             /* If unaligned allocations down from a too-large top have left us
212              * unaligned, we need to realign our alloc ptr. Technically, this
213              * can consume space that is unaccounted for in the neededSpace
214              * calculation. However, I believe this can only happen when the
215              * workspace is too large, and specifically when it is too large
216              * by a larger margin than the space that will be consumed. */
217             /* TODO: cleaner, compiler warning friendly way to do this??? */
218             ws->allocStart = (BYTE*)ws->allocStart - ((size_t)ws->allocStart & (sizeof(U32)-1));
219             if (ws->allocStart < ws->tableValidEnd) {
220                 ws->tableValidEnd = ws->allocStart;
221             }
222         }
223         ws->phase = phase;
224     }
225 }
226 
227 /**
228  * Returns whether this object/buffer/etc was allocated in this workspace.
229  */
ZSTD_cwksp_owns_buffer(const ZSTD_cwksp * ws,const void * ptr)230 MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr) {
231     return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
232 }
233 
234 /**
235  * Internal function. Do not use directly.
236  */
ZSTD_cwksp_reserve_internal(ZSTD_cwksp * ws,size_t bytes,ZSTD_cwksp_alloc_phase_e phase)237 MEM_STATIC void* ZSTD_cwksp_reserve_internal(
238         ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase) {
239     void* alloc;
240     void* bottom = ws->tableEnd;
241     ZSTD_cwksp_internal_advance_phase(ws, phase);
242     alloc = (BYTE *)ws->allocStart - bytes;
243 
244     if (bytes == 0)
245         return NULL;
246 
247 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
248     /* over-reserve space */
249     alloc = (BYTE *)alloc - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
250 #endif
251 
252     DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
253         alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
254     ZSTD_cwksp_assert_internal_consistency(ws);
255     assert(alloc >= bottom);
256     if (alloc < bottom) {
257         DEBUGLOG(4, "cwksp: alloc failed!");
258         ws->allocFailed = 1;
259         return NULL;
260     }
261     if (alloc < ws->tableValidEnd) {
262         ws->tableValidEnd = alloc;
263     }
264     ws->allocStart = alloc;
265 
266 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
267     /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
268      * either size. */
269     alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
270     if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
271         __asan_unpoison_memory_region(alloc, bytes);
272     }
273 #endif
274 
275     return alloc;
276 }
277 
278 /**
279  * Reserves and returns unaligned memory.
280  */
ZSTD_cwksp_reserve_buffer(ZSTD_cwksp * ws,size_t bytes)281 MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes) {
282     return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
283 }
284 
285 /**
286  * Reserves and returns memory sized on and aligned on sizeof(unsigned).
287  */
ZSTD_cwksp_reserve_aligned(ZSTD_cwksp * ws,size_t bytes)288 MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes) {
289     assert((bytes & (sizeof(U32)-1)) == 0);
290     return ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, sizeof(U32)), ZSTD_cwksp_alloc_aligned);
291 }
292 
293 /**
294  * Aligned on sizeof(unsigned). These buffers have the special property that
295  * their values remain constrained, allowing us to re-use them without
296  * memset()-ing them.
297  */
ZSTD_cwksp_reserve_table(ZSTD_cwksp * ws,size_t bytes)298 MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes) {
299     const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;
300     void* alloc = ws->tableEnd;
301     void* end = (BYTE *)alloc + bytes;
302     void* top = ws->allocStart;
303 
304     DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
305         alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
306     assert((bytes & (sizeof(U32)-1)) == 0);
307     ZSTD_cwksp_internal_advance_phase(ws, phase);
308     ZSTD_cwksp_assert_internal_consistency(ws);
309     assert(end <= top);
310     if (end > top) {
311         DEBUGLOG(4, "cwksp: table alloc failed!");
312         ws->allocFailed = 1;
313         return NULL;
314     }
315     ws->tableEnd = end;
316 
317 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
318     if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
319         __asan_unpoison_memory_region(alloc, bytes);
320     }
321 #endif
322 
323     return alloc;
324 }
325 
326 /**
327  * Aligned on sizeof(void*).
328  */
ZSTD_cwksp_reserve_object(ZSTD_cwksp * ws,size_t bytes)329 MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes) {
330     size_t roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
331     void* alloc = ws->objectEnd;
332     void* end = (BYTE*)alloc + roundedBytes;
333 
334 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
335     /* over-reserve space */
336     end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
337 #endif
338 
339     DEBUGLOG(5,
340         "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
341         alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
342     assert(((size_t)alloc & (sizeof(void*)-1)) == 0);
343     assert((bytes & (sizeof(void*)-1)) == 0);
344     ZSTD_cwksp_assert_internal_consistency(ws);
345     /* we must be in the first phase, no advance is possible */
346     if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
347         DEBUGLOG(4, "cwksp: object alloc failed!");
348         ws->allocFailed = 1;
349         return NULL;
350     }
351     ws->objectEnd = end;
352     ws->tableEnd = end;
353     ws->tableValidEnd = end;
354 
355 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
356     /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
357      * either size. */
358     alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
359     if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
360         __asan_unpoison_memory_region(alloc, bytes);
361     }
362 #endif
363 
364     return alloc;
365 }
366 
ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp * ws)367 MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws) {
368     DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
369 
370 #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
371     /* To validate that the table re-use logic is sound, and that we don't
372      * access table space that we haven't cleaned, we re-"poison" the table
373      * space every time we mark it dirty. */
374     {
375         size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
376         assert(__msan_test_shadow(ws->objectEnd, size) == -1);
377         __msan_poison(ws->objectEnd, size);
378     }
379 #endif
380 
381     assert(ws->tableValidEnd >= ws->objectEnd);
382     assert(ws->tableValidEnd <= ws->allocStart);
383     ws->tableValidEnd = ws->objectEnd;
384     ZSTD_cwksp_assert_internal_consistency(ws);
385 }
386 
ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp * ws)387 MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
388     DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
389     assert(ws->tableValidEnd >= ws->objectEnd);
390     assert(ws->tableValidEnd <= ws->allocStart);
391     if (ws->tableValidEnd < ws->tableEnd) {
392         ws->tableValidEnd = ws->tableEnd;
393     }
394     ZSTD_cwksp_assert_internal_consistency(ws);
395 }
396 
397 /**
398  * Zero the part of the allocated tables not already marked clean.
399  */
ZSTD_cwksp_clean_tables(ZSTD_cwksp * ws)400 MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
401     DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
402     assert(ws->tableValidEnd >= ws->objectEnd);
403     assert(ws->tableValidEnd <= ws->allocStart);
404     if (ws->tableValidEnd < ws->tableEnd) {
405         ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
406     }
407     ZSTD_cwksp_mark_tables_clean(ws);
408 }
409 
410 /**
411  * Invalidates table allocations.
412  * All other allocations remain valid.
413  */
ZSTD_cwksp_clear_tables(ZSTD_cwksp * ws)414 MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
415     DEBUGLOG(4, "cwksp: clearing tables!");
416 
417 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
418     /* We don't do this when the workspace is statically allocated, because
419      * when that is the case, we have no capability to hook into the end of the
420      * workspace's lifecycle to unpoison the memory.
421      */
422     if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
423         size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
424         __asan_poison_memory_region(ws->objectEnd, size);
425     }
426 #endif
427 
428     ws->tableEnd = ws->objectEnd;
429     ZSTD_cwksp_assert_internal_consistency(ws);
430 }
431 
432 /**
433  * Invalidates all buffer, aligned, and table allocations.
434  * Object allocations remain valid.
435  */
ZSTD_cwksp_clear(ZSTD_cwksp * ws)436 MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
437     DEBUGLOG(4, "cwksp: clearing!");
438 
439 #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
440     /* To validate that the context re-use logic is sound, and that we don't
441      * access stuff that this compression hasn't initialized, we re-"poison"
442      * the workspace (or at least the non-static, non-table parts of it)
443      * every time we start a new compression. */
444     {
445         size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;
446         __msan_poison(ws->tableValidEnd, size);
447     }
448 #endif
449 
450 #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
451     /* We don't do this when the workspace is statically allocated, because
452      * when that is the case, we have no capability to hook into the end of the
453      * workspace's lifecycle to unpoison the memory.
454      */
455     if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
456         size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
457         __asan_poison_memory_region(ws->objectEnd, size);
458     }
459 #endif
460 
461     ws->tableEnd = ws->objectEnd;
462     ws->allocStart = ws->workspaceEnd;
463     ws->allocFailed = 0;
464     if (ws->phase > ZSTD_cwksp_alloc_buffers) {
465         ws->phase = ZSTD_cwksp_alloc_buffers;
466     }
467     ZSTD_cwksp_assert_internal_consistency(ws);
468 }
469 
470 /**
471  * The provided workspace takes ownership of the buffer [start, start+size).
472  * Any existing values in the workspace are ignored (the previously managed
473  * buffer, if present, must be separately freed).
474  */
ZSTD_cwksp_init(ZSTD_cwksp * ws,void * start,size_t size,ZSTD_cwksp_static_alloc_e isStatic)475 MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
476     DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
477     assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
478     ws->workspace = start;
479     ws->workspaceEnd = (BYTE*)start + size;
480     ws->objectEnd = ws->workspace;
481     ws->tableValidEnd = ws->objectEnd;
482     ws->phase = ZSTD_cwksp_alloc_objects;
483     ws->isStatic = isStatic;
484     ZSTD_cwksp_clear(ws);
485     ws->workspaceOversizedDuration = 0;
486     ZSTD_cwksp_assert_internal_consistency(ws);
487 }
488 
ZSTD_cwksp_create(ZSTD_cwksp * ws,size_t size,ZSTD_customMem customMem)489 MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
490     void* workspace = ZSTD_customMalloc(size, customMem);
491     DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
492     RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
493     ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
494     return 0;
495 }
496 
ZSTD_cwksp_free(ZSTD_cwksp * ws,ZSTD_customMem customMem)497 MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
498     void *ptr = ws->workspace;
499     DEBUGLOG(4, "cwksp: freeing workspace");
500     ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
501     ZSTD_customFree(ptr, customMem);
502 }
503 
504 /**
505  * Moves the management of a workspace from one cwksp to another. The src cwksp
506  * is left in an invalid state (src must be re-init()'ed before its used again).
507  */
ZSTD_cwksp_move(ZSTD_cwksp * dst,ZSTD_cwksp * src)508 MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
509     *dst = *src;
510     ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
511 }
512 
ZSTD_cwksp_sizeof(const ZSTD_cwksp * ws)513 MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
514     return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
515 }
516 
ZSTD_cwksp_used(const ZSTD_cwksp * ws)517 MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
518     return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
519          + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
520 }
521 
ZSTD_cwksp_reserve_failed(const ZSTD_cwksp * ws)522 MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
523     return ws->allocFailed;
524 }
525 
526 /*-*************************************
527 *  Functions Checking Free Space
528 ***************************************/
529 
ZSTD_cwksp_available_space(ZSTD_cwksp * ws)530 MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
531     return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
532 }
533 
ZSTD_cwksp_check_available(ZSTD_cwksp * ws,size_t additionalNeededSpace)534 MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
535     return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
536 }
537 
ZSTD_cwksp_check_too_large(ZSTD_cwksp * ws,size_t additionalNeededSpace)538 MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
539     return ZSTD_cwksp_check_available(
540         ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
541 }
542 
ZSTD_cwksp_check_wasteful(ZSTD_cwksp * ws,size_t additionalNeededSpace)543 MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
544     return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
545         && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
546 }
547 
ZSTD_cwksp_bump_oversized_duration(ZSTD_cwksp * ws,size_t additionalNeededSpace)548 MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
549         ZSTD_cwksp* ws, size_t additionalNeededSpace) {
550     if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
551         ws->workspaceOversizedDuration++;
552     } else {
553         ws->workspaceOversizedDuration = 0;
554     }
555 }
556 
557 #if defined (__cplusplus)
558 }
559 #endif
560 
561 #endif /* ZSTD_CWKSP_H */
562