Lines Matching refs:bytes

36 /* Set our tables and aligneds to align by 64 bytes */
125 * buffers are each aligned to 64 bytes.
201 * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
202 * Used to determine the number of bytes required for a given "aligned".
213 /* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes
214 * to align the beginning of tables section, as well as another n_2=[0, 63] bytes
217 * n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and
218 * aligneds being sized in multiples of 64 bytes.
226 * Return the number of additional bytes required to align a pointer to the given number of bytes.
231 size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
233 assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES);
234 return bytes;
239 * Reserves the given number of bytes within the aligned/buffer segment of the wksp,
245 ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
247 void* const alloc = (BYTE*)ws->allocStart - bytes;
249 DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
250 alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
286 { /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */
294 { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
324 ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
327 if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
332 alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
341 MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
343 return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
347 * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
349 MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
351 void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
358 * Aligned on 64 bytes. These buffers have the special property that
362 MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
373 end = (BYTE *)alloc + bytes;
376 DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
377 alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
378 assert((bytes & (sizeof(U32)-1)) == 0);
389 assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
398 MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
400 size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
406 "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
407 alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
409 assert(bytes % ZSTD_ALIGNOF(void*) == 0);
495 DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
510 DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
559 /* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes