Lines Matching defs:ws

161 MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
163 MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
164 (void)ws;
165 assert(ws->workspace <= ws->objectEnd);
166 assert(ws->objectEnd <= ws->tableEnd);
167 assert(ws->objectEnd <= ws->tableValidEnd);
168 assert(ws->tableEnd <= ws->allocStart);
169 assert(ws->tableValidEnd <= ws->allocStart);
170 assert(ws->allocStart <= ws->workspaceEnd);
245 ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
247 void* const alloc = (BYTE*)ws->allocStart - bytes;
248 void* const bottom = ws->tableEnd;
250 alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
251 ZSTD_cwksp_assert_internal_consistency(ws);
255 ws->allocFailed = 1;
260 if (alloc < ws->tableValidEnd) {
261 ws->tableValidEnd = alloc;
263 ws->allocStart = alloc;
273 ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
275 assert(phase >= ws->phase);
276 if (phase > ws->phase) {
278 if (ws->phase < ZSTD_cwksp_alloc_buffers &&
280 ws->tableValidEnd = ws->objectEnd;
284 if (ws->phase < ZSTD_cwksp_alloc_aligned &&
288 ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);
291 RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),
295 void* const alloc = ws->objectEnd;
299 RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
301 ws->objectEnd = objectEnd;
302 ws->tableEnd = objectEnd; /* table area starts being empty */
303 if (ws->tableValidEnd < ws->tableEnd) {
304 ws->tableValidEnd = ws->tableEnd;
306 ws->phase = phase;
307 ZSTD_cwksp_assert_internal_consistency(ws);
315 MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
317 return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);
324 ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
327 if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
332 alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
341 MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
343 return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
349 MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
351 void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
362 MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
369 if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
372 alloc = ws->tableEnd;
374 top = ws->allocStart;
377 alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
379 ZSTD_cwksp_assert_internal_consistency(ws);
383 ws->allocFailed = 1;
386 ws->tableEnd = end;
398 MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
401 void* alloc = ws->objectEnd;
407 alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
410 ZSTD_cwksp_assert_internal_consistency(ws);
412 if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
414 ws->allocFailed = 1;
417 ws->objectEnd = end;
418 ws->tableEnd = end;
419 ws->tableValidEnd = end;
425 MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
430 assert(ws->tableValidEnd >= ws->objectEnd);
431 assert(ws->tableValidEnd <= ws->allocStart);
432 ws->tableValidEnd = ws->objectEnd;
433 ZSTD_cwksp_assert_internal_consistency(ws);
436 MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
438 assert(ws->tableValidEnd >= ws->objectEnd);
439 assert(ws->tableValidEnd <= ws->allocStart);
440 if (ws->tableValidEnd < ws->tableEnd) {
441 ws->tableValidEnd = ws->tableEnd;
443 ZSTD_cwksp_assert_internal_consistency(ws);
449 MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
451 assert(ws->tableValidEnd >= ws->objectEnd);
452 assert(ws->tableValidEnd <= ws->allocStart);
453 if (ws->tableValidEnd < ws->tableEnd) {
454 ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);
456 ZSTD_cwksp_mark_tables_clean(ws);
463 MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
467 ws->tableEnd = ws->objectEnd;
468 ZSTD_cwksp_assert_internal_consistency(ws);
475 MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
480 ws->tableEnd = ws->objectEnd;
481 ws->allocStart = ws->workspaceEnd;
482 ws->allocFailed = 0;
483 if (ws->phase > ZSTD_cwksp_alloc_buffers) {
484 ws->phase = ZSTD_cwksp_alloc_buffers;
486 ZSTD_cwksp_assert_internal_consistency(ws);
494 MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
497 ws->workspace = start;
498 ws->workspaceEnd = (BYTE*)start + size;
499 ws->objectEnd = ws->workspace;
500 ws->tableValidEnd = ws->objectEnd;
501 ws->phase = ZSTD_cwksp_alloc_objects;
502 ws->isStatic = isStatic;
503 ZSTD_cwksp_clear(ws);
504 ws->workspaceOversizedDuration = 0;
505 ZSTD_cwksp_assert_internal_consistency(ws);
508 MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
512 ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
516 MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
517 void *ptr = ws->workspace;
519 ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
532 MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
533 return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
536 MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
537 return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
538 + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
541 MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
542 return ws->allocFailed;
553 MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws,
557 return ZSTD_cwksp_used(ws) == estimatedSpace;
562 return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);
567 MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
568 return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
571 MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
572 return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
575 MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
577 ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
580 MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
581 return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
582 && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
586 ZSTD_cwksp* ws, size_t additionalNeededSpace) {
587 if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
588 ws->workspaceOversizedDuration++;
590 ws->workspaceOversizedDuration = 0;