Home
last modified time | relevance | path

Searched refs:avail (Results 1 – 24 of 24) sorted by relevance

/fs/squashfs/
Dzlib_wrapper.c70 int avail; in zlib_uncompress() local
78 avail = min(length, ((int)bvec->bv_len) - offset); in zlib_uncompress()
80 length -= avail; in zlib_uncompress()
82 stream->avail_in = avail; in zlib_uncompress()
Dlzo_wrapper.c77 int avail = min(bytes, ((int)bvec->bv_len) - offset); in lzo_uncompress() local
80 memcpy(buff, data + offset, avail); in lzo_uncompress()
81 buff += avail; in lzo_uncompress()
82 bytes -= avail; in lzo_uncompress()
Dzstd_wrapper.c89 int avail; in zstd_uncompress() local
96 avail = min(length, ((int)bvec->bv_len) - offset); in zstd_uncompress()
98 length -= avail; in zstd_uncompress()
100 in_buf.size = avail; in zstd_uncompress()
Dlz4_wrapper.c102 int avail = min(bytes, ((int)bvec->bv_len) - offset); in lz4_uncompress() local
105 memcpy(buff, data + offset, avail); in lz4_uncompress()
106 buff += avail; in lz4_uncompress()
107 bytes -= avail; in lz4_uncompress()
Dxz_wrapper.c140 int avail; in squashfs_xz_uncompress() local
148 avail = min(length, ((int)bvec->bv_len) - offset); in squashfs_xz_uncompress()
150 length -= avail; in squashfs_xz_uncompress()
152 stream->buf.in_size = avail; in squashfs_xz_uncompress()
Dfile.c364 …d squashfs_fill_page(struct page *page, struct squashfs_cache_entry *buffer, int offset, int avail) in squashfs_fill_page() argument
370 copied = squashfs_copy_data(pageaddr, buffer, offset, avail); in squashfs_fill_page()
375 if (copied == avail) in squashfs_fill_page()
399 int avail = buffer ? min_t(int, bytes, PAGE_SIZE) : 0; in squashfs_copy_cache() local
401 TRACE("bytes %d, i %d, available_bytes %d\n", bytes, i, avail); in squashfs_copy_cache()
412 squashfs_fill_page(push_page, buffer, offset, avail); in squashfs_copy_cache()
Dfile_direct.c160 int avail = min_t(int, bytes, PAGE_SIZE); in squashfs_read_cache() local
165 squashfs_fill_page(page[n], buffer, offset, avail); in squashfs_read_cache()
/fs/xfs/libxfs/
Dxfs_ag_resv.c73 xfs_extlen_t avail; in xfs_ag_resv_critical() local
78 avail = pag->pagf_freeblks - pag->pag_rmapbt_resv.ar_reserved; in xfs_ag_resv_critical()
82 avail = pag->pagf_freeblks + pag->pagf_flcount - in xfs_ag_resv_critical()
91 trace_xfs_ag_resv_critical(pag, type, avail); in xfs_ag_resv_critical()
94 return XFS_TEST_ERROR(avail < orig / 10 || avail < XFS_BTREE_MAXLEVELS, in xfs_ag_resv_critical()
Dxfs_bmap.c4719 xfs_filblks_t avail) /* stealable blocks */ in xfs_bmap_split_indlen() argument
4731 if (ores < nres && avail) in xfs_bmap_split_indlen()
4732 stolen = XFS_FILBLKS_MIN(nres - ores, avail); in xfs_bmap_split_indlen()
/fs/ubifs/
Dtnc_commit.c432 int lnum, offs, len, next_len, buf_len, buf_offs, used, avail; in layout_in_empty_space() local
445 avail = buf_len; in layout_in_empty_space()
466 avail = buf_len; in layout_in_empty_space()
512 avail -= ALIGN(len, 8); in layout_in_empty_space()
516 avail > 0) in layout_in_empty_space()
519 if (avail <= 0 && next_len && in layout_in_empty_space()
539 avail = buf_len - used; in layout_in_empty_space()
829 int avail, wlen, err, lnum_pos = 0, blen, nxt_offs; in write_index() local
845 avail = buf_len; in write_index()
912 avail = buf_len; in write_index()
[all …]
Dio.c560 ubifs_assert(c, !(wbuf->avail & 7)); in ubifs_wbuf_sync_nolock()
602 wbuf->avail = wbuf->size; in ubifs_wbuf_sync_nolock()
643 wbuf->avail = wbuf->size; in ubifs_wbuf_seek_nolock()
741 ubifs_assert(c, wbuf->avail > 0 && wbuf->avail <= wbuf->size); in ubifs_wbuf_write_nolock()
761 if (aligned_len <= wbuf->avail) { in ubifs_wbuf_write_nolock()
772 if (aligned_len == wbuf->avail) { in ubifs_wbuf_write_nolock()
786 wbuf->avail = wbuf->size; in ubifs_wbuf_write_nolock()
792 wbuf->avail -= aligned_len; in ubifs_wbuf_write_nolock()
810 memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail); in ubifs_wbuf_write_nolock()
817 len -= wbuf->avail; in ubifs_wbuf_write_nolock()
[all …]
Dorphan.c274 int avail_lebs, avail, gap; in avail_orphs() local
277 avail = avail_lebs * in avail_orphs()
281 avail += (gap - UBIFS_ORPH_NODE_SZ) / sizeof(__le64); in avail_orphs()
282 return avail; in avail_orphs()
294 int avail_lebs, avail; in tot_avail_orphs() local
297 avail = avail_lebs * in tot_avail_orphs()
299 return avail / 2; in tot_avail_orphs()
486 int avail, atomic = 0, err; in commit_orphans() local
489 avail = avail_orphs(c); in commit_orphans()
490 if (avail < c->cmt_orphans) { in commit_orphans()
Dgc.c352 int avail, moved = 0; in move_nodes() local
357 avail = c->leb_size - wbuf->offs - wbuf->used - in move_nodes()
359 if (snod->len > avail) in move_nodes()
379 avail = c->leb_size - wbuf->offs - wbuf->used - in move_nodes()
381 if (avail < min) in move_nodes()
384 if (snod->len > avail) { in move_nodes()
Djournal.c100 int err = 0, err1, retries = 0, avail, lnum, offs, squeeze; in reserve_space() local
118 avail = c->leb_size - wbuf->offs - wbuf->used; in reserve_space()
119 if (wbuf->lnum != -1 && avail >= len) in reserve_space()
167 avail = c->leb_size - wbuf->offs - wbuf->used; in reserve_space()
169 if (wbuf->lnum != -1 && avail >= len) { in reserve_space()
Dubifs.h688 int avail; member
/fs/btrfs/
Dspace-info.c303 u64 avail; in calc_available_free_space() local
311 avail = atomic64_read(&fs_info->free_chunk_space); in calc_available_free_space()
320 avail = div_u64(avail, factor); in calc_available_free_space()
328 avail >>= 3; in calc_available_free_space()
330 avail >>= 1; in calc_available_free_space()
331 return avail; in calc_available_free_space()
338 u64 avail; in btrfs_can_overcommit() local
346 avail = calc_available_free_space(fs_info, space_info, flush); in btrfs_can_overcommit()
348 if (used + bytes < space_info->total_bytes + avail) in btrfs_can_overcommit()
757 u64 avail; in btrfs_calc_reclaim_metadata_size() local
[all …]
/fs/jffs2/
Dnodemgmt.c26 uint32_t avail; in jffs2_rp_can_write() local
29 avail = c->dirty_size + c->free_size + c->unchecked_size + in jffs2_rp_can_write()
33 if (avail < 2 * opts->rp_size) in jffs2_rp_can_write()
39 c->nr_erasing_blocks, avail, c->nospc_dirty_size); in jffs2_rp_can_write()
41 if (avail > opts->rp_size) in jffs2_rp_can_write()
101 uint32_t dirty, avail; in jffs2_reserve_space() local
140 avail = c->free_size + c->dirty_size + c->erasing_size + c->unchecked_size; in jffs2_reserve_space()
141 if ( (avail / c->sector_size) <= blocksneeded) { in jffs2_reserve_space()
149 avail, blocksneeded * c->sector_size); in jffs2_reserve_space()
Dfs.c212 unsigned long avail; in jffs2_statfs() local
224 avail = c->dirty_size + c->free_size; in jffs2_statfs()
225 if (avail > c->sector_size * c->resv_blocks_write) in jffs2_statfs()
226 avail -= c->sector_size * c->resv_blocks_write; in jffs2_statfs()
228 avail = 0; in jffs2_statfs()
231 buf->f_bavail = buf->f_bfree = avail >> PAGE_SHIFT; in jffs2_statfs()
/fs/
Daio.c940 int old, avail = atomic_read(&ctx->reqs_available); in __get_reqs_available() local
943 if (avail < ctx->req_batch) in __get_reqs_available()
946 old = avail; in __get_reqs_available()
947 avail = atomic_cmpxchg(&ctx->reqs_available, in __get_reqs_available()
948 avail, avail - ctx->req_batch); in __get_reqs_available()
949 } while (avail != old); in __get_reqs_available()
1223 long avail; in aio_read_events_ring() local
1227 avail = (head <= tail ? tail : ctx->nr_events) - head; in aio_read_events_ring()
1235 avail = min(avail, nr - ret); in aio_read_events_ring()
1236 avail = min_t(long, avail, AIO_EVENTS_PER_PAGE - pos); in aio_read_events_ring()
[all …]
/fs/ceph/
Ddebugfs.c227 int total, avail, used, reserved, min, i; in caps_show() local
230 ceph_reservation_status(fsc, &total, &avail, &used, &reserved, &min); in caps_show()
236 total, avail, used, reserved, min); in caps_show()
Dsuper.h756 int *total, int *avail, int *used,
Dcaps.c407 int *total, int *avail, int *used, int *reserved, in ceph_reservation_status() argument
416 if (avail) in ceph_reservation_status()
417 *avail = mdsc->caps_avail_count; in ceph_reservation_status()
/fs/nfsd/
Dnfs4xdr.c163 unsigned int avail = (char *)argp->end - (char *)argp->p; in read_buf() local
171 avail = vec->iov_len; in read_buf()
173 argp->end = vec->iov_base + avail; in read_buf()
176 if (avail < nbytes) in read_buf()
184 if (avail + argp->pagelen < nbytes) in read_buf()
186 if (avail + PAGE_SIZE < nbytes) /* need more than a page !! */ in read_buf()
203 memcpy(p, argp->p, avail); in read_buf()
205 memcpy(((char*)p)+avail, argp->p, (nbytes - avail)); in read_buf()
206 argp->p += XDR_QUADLEN(nbytes - avail); in read_buf()
266 int avail; in svcxdr_construct_vector() local
[all …]
Dnfs4state.c1663 unsigned long avail, total_avail; in nfsd4_get_drc_mem() local
1676 avail = min((unsigned long)NFSD_MAX_MEM_PER_SESSION, total_avail); in nfsd4_get_drc_mem()
1688 avail = clamp_t(unsigned long, avail, slotsize, in nfsd4_get_drc_mem()
1690 num = min_t(int, num, avail / slotsize); in nfsd4_get_drc_mem()