Lines Matching refs:cSize
606 unsigned jobID, size_t cSize) in ZSTDMT_serialState_ensureFinished() argument
610 assert(ZSTD_isError(cSize)); (void)cSize; in ZSTDMT_serialState_ensureFinished()
633 …size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read b… member
655 job->cSize = e; \
726 assert(job->cSize == 0); in ZSTDMT_compressionJob()
728 size_t const cSize = ZSTD_compressContinue(cctx, op, oend-op, ip, chunkSize); in ZSTDMT_compressionJob() local
729 if (ZSTD_isError(cSize)) JOB_ERROR(cSize); in ZSTDMT_compressionJob()
731 op += cSize; assert(op < oend); in ZSTDMT_compressionJob()
734 job->cSize += cSize; in ZSTDMT_compressionJob()
737 (U32)cSize, (U32)job->cSize); in ZSTDMT_compressionJob()
747 size_t const cSize = (job->lastJob) ? in ZSTDMT_compressionJob() local
750 if (ZSTD_isError(cSize)) JOB_ERROR(cSize); in ZSTDMT_compressionJob()
751 lastCBlockSize = cSize; in ZSTDMT_compressionJob()
755 ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize); in ZSTDMT_compressionJob()
764 if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0); in ZSTDMT_compressionJob()
765 job->cSize += lastCBlockSize; in ZSTDMT_compressionJob()
1070 { size_t const cResult = jobPtr->cSize; in ZSTDMT_getFrameProgression()
1098 { size_t const cResult = jobPtr->cSize; in ZSTDMT_toFlushNow()
1311 job->cSize = ERROR(memory_allocation); in ZSTDMT_writeLastEmptyBlock()
1316 job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity); in ZSTDMT_writeLastEmptyBlock()
1317 assert(!ZSTD_isError(job->cSize)); in ZSTDMT_writeLastEmptyBlock()
1341 mtctx->jobs[jobID].cSize = 0; in ZSTDMT_createCompressionJob()
1416 assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize); in ZSTDMT_flushProduced()
1417 … while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) { /* nothing to flush */ in ZSTDMT_flushProduced()
1429 { size_t cSize = mtctx->jobs[wJobID].cSize; /* shared */ in ZSTDMT_flushProduced() local
1433 if (ZSTD_isError(cSize)) { in ZSTDMT_flushProduced()
1435 mtctx->doneJobID, ZSTD_getErrorName(cSize)); in ZSTDMT_flushProduced()
1438 return cSize; in ZSTDMT_flushProduced()
1446 … MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum); in ZSTDMT_flushProduced()
1447 cSize += 4; in ZSTDMT_flushProduced()
1448 … mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */ in ZSTDMT_flushProduced()
1452 if (cSize > 0) { /* compression is ongoing or completed */ in ZSTDMT_flushProduced()
1453 … size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos); in ZSTDMT_flushProduced()
1455 (U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize); in ZSTDMT_flushProduced()
1457 assert(cSize >= mtctx->jobs[wJobID].dstFlushed); in ZSTDMT_flushProduced()
1468 …&& (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job… in ZSTDMT_flushProduced()
1474 …mtctx->jobs[wJobID].cSize = 0; /* ensure this job slot is considered "not started" in future che… in ZSTDMT_flushProduced()
1476 mtctx->produced += cSize; in ZSTDMT_flushProduced()
1481 if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed); in ZSTDMT_flushProduced()