/fs/cachefiles/ |
D | bind.c | 31 int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args) in cachefiles_daemon_bind() argument 34 cache->frun_percent, in cachefiles_daemon_bind() 35 cache->fcull_percent, in cachefiles_daemon_bind() 36 cache->fstop_percent, in cachefiles_daemon_bind() 37 cache->brun_percent, in cachefiles_daemon_bind() 38 cache->bcull_percent, in cachefiles_daemon_bind() 39 cache->bstop_percent, in cachefiles_daemon_bind() 43 ASSERT(cache->fstop_percent >= 0 && in cachefiles_daemon_bind() 44 cache->fstop_percent < cache->fcull_percent && in cachefiles_daemon_bind() 45 cache->fcull_percent < cache->frun_percent && in cachefiles_daemon_bind() [all …]
|
D | daemon.c | 63 int (*handler)(struct cachefiles_cache *cache, char *args); 89 struct cachefiles_cache *cache; in cachefiles_daemon_open() local 102 cache = kzalloc(sizeof(struct cachefiles_cache), GFP_KERNEL); in cachefiles_daemon_open() 103 if (!cache) { in cachefiles_daemon_open() 108 mutex_init(&cache->daemon_mutex); in cachefiles_daemon_open() 109 cache->active_nodes = RB_ROOT; in cachefiles_daemon_open() 110 rwlock_init(&cache->active_lock); in cachefiles_daemon_open() 111 init_waitqueue_head(&cache->daemon_pollwq); in cachefiles_daemon_open() 118 cache->frun_percent = 7; in cachefiles_daemon_open() 119 cache->fcull_percent = 5; in cachefiles_daemon_open() [all …]
|
D | interface.c | 32 struct cachefiles_cache *cache; in cachefiles_alloc_object() local 38 cache = container_of(_cache, struct cachefiles_cache, cache); in cachefiles_alloc_object() 40 _enter("{%s},%p,", cache->cache.identifier, cookie); in cachefiles_alloc_object() 56 fscache_object_init(&object->fscache, cookie, &cache->cache); in cachefiles_alloc_object() 105 fscache_object_destroyed(&cache->cache); in cachefiles_alloc_object() 121 struct cachefiles_cache *cache; in cachefiles_lookup_object() local 127 cache = container_of(_object->cache, struct cachefiles_cache, cache); in cachefiles_lookup_object() 136 cachefiles_begin_secure(cache, &saved_cred); in cachefiles_lookup_object() 140 cachefiles_end_secure(cache, saved_cred); in cachefiles_lookup_object() 202 struct cachefiles_cache *cache; in cachefiles_update_object() local [all …]
|
D | namei.c | 100 static void cachefiles_mark_object_buried(struct cachefiles_cache *cache, in cachefiles_mark_object_buried() argument 109 write_lock(&cache->active_lock); in cachefiles_mark_object_buried() 111 p = cache->active_nodes.rb_node; in cachefiles_mark_object_buried() 122 write_unlock(&cache->active_lock); in cachefiles_mark_object_buried() 142 write_unlock(&cache->active_lock); in cachefiles_mark_object_buried() 149 static int cachefiles_mark_object_active(struct cachefiles_cache *cache, in cachefiles_mark_object_active() argument 159 write_lock(&cache->active_lock); in cachefiles_mark_object_active() 168 _p = &cache->active_nodes.rb_node; in cachefiles_mark_object_active() 185 rb_insert_color(&object->active_node, &cache->active_nodes); in cachefiles_mark_object_active() 187 write_unlock(&cache->active_lock); in cachefiles_mark_object_active() [all …]
|
D | security.c | 20 int cachefiles_get_security_ID(struct cachefiles_cache *cache) in cachefiles_get_security_ID() argument 25 _enter("{%s}", cache->secctx); in cachefiles_get_security_ID() 33 if (cache->secctx) { in cachefiles_get_security_ID() 34 ret = set_security_override_from_ctx(new, cache->secctx); in cachefiles_get_security_ID() 43 cache->cache_cred = new; in cachefiles_get_security_ID() 53 static int cachefiles_check_cache_dir(struct cachefiles_cache *cache, in cachefiles_check_cache_dir() argument 79 int cachefiles_determine_cache_security(struct cachefiles_cache *cache, in cachefiles_determine_cache_security() argument 94 cachefiles_end_secure(cache, *_saved_cred); in cachefiles_determine_cache_security() 101 cachefiles_begin_secure(cache, _saved_cred); in cachefiles_determine_cache_security() 106 put_cred(cache->cache_cred); in cachefiles_determine_cache_security() [all …]
|
D | internal.h | 60 struct fscache_cache cache; /* FS-Cache record */ member 130 static inline void cachefiles_state_changed(struct cachefiles_cache *cache) in cachefiles_state_changed() argument 132 set_bit(CACHEFILES_STATE_CHANGED, &cache->flags); in cachefiles_state_changed() 133 wake_up_all(&cache->daemon_pollwq); in cachefiles_state_changed() 139 extern int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args); 140 extern void cachefiles_daemon_unbind(struct cachefiles_cache *cache); 147 extern int cachefiles_has_space(struct cachefiles_cache *cache, 163 extern void cachefiles_mark_object_inactive(struct cachefiles_cache *cache, 166 extern int cachefiles_delete_object(struct cachefiles_cache *cache, 172 extern struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache, [all …]
|
D | rdwr.c | 402 struct cachefiles_cache *cache; in cachefiles_read_or_alloc_page() local 410 cache = container_of(object->fscache.cache, in cachefiles_read_or_alloc_page() 411 struct cachefiles_cache, cache); in cachefiles_read_or_alloc_page() 448 } else if (cachefiles_has_space(cache, 0, 1) == 0) { in cachefiles_read_or_alloc_page() 694 struct cachefiles_cache *cache; in cachefiles_read_or_alloc_pages() local 704 cache = container_of(object->fscache.cache, in cachefiles_read_or_alloc_pages() 705 struct cachefiles_cache, cache); in cachefiles_read_or_alloc_pages() 715 if (cachefiles_has_space(cache, 0, *nr_pages) < 0) in cachefiles_read_or_alloc_pages() 808 struct cachefiles_cache *cache; in cachefiles_allocate_page() local 813 cache = container_of(object->fscache.cache, in cachefiles_allocate_page() [all …]
|
/fs/btrfs/tests/ |
D | free-space-tests.c | 32 static int test_extents(struct btrfs_block_group_cache *cache) in test_extents() argument 39 ret = btrfs_add_free_space(cache, 0, SZ_4M); in test_extents() 45 ret = btrfs_remove_free_space(cache, 0, SZ_4M); in test_extents() 51 if (test_check_exists(cache, 0, SZ_4M)) { in test_extents() 57 ret = btrfs_add_free_space(cache, 0, SZ_4M); in test_extents() 63 ret = btrfs_remove_free_space(cache, 3 * SZ_1M, SZ_1M); in test_extents() 69 ret = btrfs_remove_free_space(cache, 0, SZ_1M); in test_extents() 75 ret = btrfs_remove_free_space(cache, SZ_2M, 4096); in test_extents() 81 if (test_check_exists(cache, 0, SZ_1M)) { in test_extents() 86 if (test_check_exists(cache, SZ_2M, 4096)) { in test_extents() [all …]
|
D | free-space-tree-tests.c | 33 struct btrfs_block_group_cache *cache, in __check_free_space_extents() argument 46 info = search_free_space_info(trans, fs_info, cache, path, 0); in __check_free_space_extents() 63 end = cache->key.objectid + cache->key.offset; in __check_free_space_extents() 71 bit = free_space_test_bit(cache, path, offset); in __check_free_space_extents() 122 struct btrfs_block_group_cache *cache, in check_free_space_extents() argument 131 info = search_free_space_info(trans, fs_info, cache, path, 0); in check_free_space_extents() 140 ret = __check_free_space_extents(trans, fs_info, cache, path, extents, in check_free_space_extents() 147 ret = convert_free_space_to_extents(trans, fs_info, cache, path); in check_free_space_extents() 153 ret = convert_free_space_to_bitmaps(trans, fs_info, cache, path); in check_free_space_extents() 159 return __check_free_space_extents(trans, fs_info, cache, path, extents, in check_free_space_extents() [all …]
|
D | btrfs-tests.c | 198 struct btrfs_block_group_cache *cache; in btrfs_alloc_dummy_block_group() local 200 cache = kzalloc(sizeof(*cache), GFP_KERNEL); in btrfs_alloc_dummy_block_group() 201 if (!cache) in btrfs_alloc_dummy_block_group() 203 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl), in btrfs_alloc_dummy_block_group() 205 if (!cache->free_space_ctl) { in btrfs_alloc_dummy_block_group() 206 kfree(cache); in btrfs_alloc_dummy_block_group() 210 cache->key.objectid = 0; in btrfs_alloc_dummy_block_group() 211 cache->key.offset = length; in btrfs_alloc_dummy_block_group() 212 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY; in btrfs_alloc_dummy_block_group() 213 cache->full_stripe_len = fs_info->sectorsize; in btrfs_alloc_dummy_block_group() [all …]
|
/fs/fscache/ |
D | cache.c | 99 struct fscache_cache *cache; in fscache_select_cache_for_object() local 117 cache = object->cache; in fscache_select_cache_for_object() 119 test_bit(FSCACHE_IOERROR, &cache->flags)) in fscache_select_cache_for_object() 120 cache = NULL; in fscache_select_cache_for_object() 123 _leave(" = %p [parent]", cache); in fscache_select_cache_for_object() 124 return cache; in fscache_select_cache_for_object() 151 if (!tag->cache) { in fscache_select_cache_for_object() 156 if (test_bit(FSCACHE_IOERROR, &tag->cache->flags)) in fscache_select_cache_for_object() 159 _leave(" = %p [specific]", tag->cache); in fscache_select_cache_for_object() 160 return tag->cache; in fscache_select_cache_for_object() [all …]
|
D | cookie.c | 25 static int fscache_alloc_object(struct fscache_cache *cache, 194 struct fscache_cache *cache; in fscache_acquire_non_index_cookie() local 213 cache = fscache_select_cache_for_object(cookie->parent); in fscache_acquire_non_index_cookie() 214 if (!cache) { in fscache_acquire_non_index_cookie() 221 _debug("cache %s", cache->tag->name); in fscache_acquire_non_index_cookie() 227 ret = fscache_alloc_object(cache, cookie); in fscache_acquire_non_index_cookie() 278 static int fscache_alloc_object(struct fscache_cache *cache, in fscache_alloc_object() argument 284 _enter("%p,%p{%s}", cache, cookie, cookie->def->name); in fscache_alloc_object() 289 if (object->cache == cache) in fscache_alloc_object() 297 object = cache->ops->alloc_object(cache, cookie); in fscache_alloc_object() [all …]
|
D | object.c | 298 struct fscache_cache *cache) in fscache_object_init() argument 302 atomic_inc(&cache->object_count); in fscache_object_init() 319 object->cache = cache; in fscache_object_init() 400 object->cache->ops->grab_object(object)) { in fscache_initialise_object() 464 test_bit(FSCACHE_IOERROR, &object->cache->flags) || in fscache_look_up_object() 471 cookie->def->name, object->cache->tag->name); in fscache_look_up_object() 475 ret = object->cache->ops->lookup_object(object); in fscache_look_up_object() 590 object->cache->ops->lookup_complete(object); in fscache_object_available() 626 object->cache->ops->lookup_complete(object); in fscache_lookup_failure() 698 struct fscache_cache *cache = object->cache; in fscache_drop_object() local [all …]
|
D | operation.c | 482 struct fscache_cache *cache; in fscache_put_operation() local 520 cache = object->cache; in fscache_put_operation() 521 spin_lock(&cache->op_gc_list_lock); in fscache_put_operation() 522 list_add_tail(&op->pend_link, &cache->op_gc_list); in fscache_put_operation() 523 spin_unlock(&cache->op_gc_list_lock); in fscache_put_operation() 524 schedule_work(&cache->op_gc); in fscache_put_operation() 549 struct fscache_cache *cache = in fscache_operation_gc() local 556 spin_lock(&cache->op_gc_list_lock); in fscache_operation_gc() 557 if (list_empty(&cache->op_gc_list)) { in fscache_operation_gc() 558 spin_unlock(&cache->op_gc_list_lock); in fscache_operation_gc() [all …]
|
/fs/ |
D | mbcache.c | 46 static unsigned long mb_cache_shrink(struct mb_cache *cache, 49 static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache, in mb_cache_entry_head() argument 52 return &cache->c_hash[hash_32(key, cache->c_bucket_bits)]; in mb_cache_entry_head() 73 int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key, in mb_cache_entry_create() argument 81 if (cache->c_entry_count >= cache->c_max_entries) in mb_cache_entry_create() 82 schedule_work(&cache->c_shrink_work); in mb_cache_entry_create() 84 if (cache->c_entry_count >= 2*cache->c_max_entries) in mb_cache_entry_create() 85 mb_cache_shrink(cache, SYNC_SHRINK_BATCH); in mb_cache_entry_create() 98 head = mb_cache_entry_head(cache, key); in mb_cache_entry_create() 110 spin_lock(&cache->c_list_lock); in mb_cache_entry_create() [all …]
|
/fs/squashfs/ |
D | cache.c | 66 struct squashfs_cache *cache, u64 block, int length) in squashfs_cache_get() argument 71 spin_lock(&cache->lock); in squashfs_cache_get() 74 for (i = cache->curr_blk, n = 0; n < cache->entries; n++) { in squashfs_cache_get() 75 if (cache->entry[i].block == block) { in squashfs_cache_get() 76 cache->curr_blk = i; in squashfs_cache_get() 79 i = (i + 1) % cache->entries; in squashfs_cache_get() 82 if (n == cache->entries) { in squashfs_cache_get() 87 if (cache->unused == 0) { in squashfs_cache_get() 88 cache->num_waiters++; in squashfs_cache_get() 89 spin_unlock(&cache->lock); in squashfs_cache_get() [all …]
|
/fs/overlayfs/ |
D | readdir.c | 57 struct ovl_dir_cache *cache; member 227 struct ovl_dir_cache *cache = ovl_dir_cache(inode); in ovl_dir_cache_free() local 229 if (cache) { in ovl_dir_cache_free() 230 ovl_cache_free(&cache->entries); in ovl_dir_cache_free() 231 kfree(cache); in ovl_dir_cache_free() 237 struct ovl_dir_cache *cache = od->cache; in ovl_cache_put() local 239 WARN_ON(cache->refcount <= 0); in ovl_cache_put() 240 cache->refcount--; in ovl_cache_put() 241 if (!cache->refcount) { in ovl_cache_put() 242 if (ovl_dir_cache(d_inode(dentry)) == cache) in ovl_cache_put() [all …]
|
/fs/btrfs/ |
D | extent-tree.c | 94 static int btrfs_add_reserved_bytes(struct btrfs_block_group_cache *cache, 96 static int btrfs_free_reserved_bytes(struct btrfs_block_group_cache *cache, 113 block_group_cache_done(struct btrfs_block_group_cache *cache) in block_group_cache_done() argument 116 return cache->cached == BTRFS_CACHE_FINISHED || in block_group_cache_done() 117 cache->cached == BTRFS_CACHE_ERROR; in block_group_cache_done() 120 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits) in block_group_bits() argument 122 return (cache->flags & bits) == bits; in block_group_bits() 125 void btrfs_get_block_group(struct btrfs_block_group_cache *cache) in btrfs_get_block_group() argument 127 atomic_inc(&cache->count); in btrfs_get_block_group() 130 void btrfs_put_block_group(struct btrfs_block_group_cache *cache) in btrfs_put_block_group() argument [all …]
|
D | relocation.c | 198 static void remove_backref_node(struct backref_cache *cache, 209 static void backref_cache_init(struct backref_cache *cache) in backref_cache_init() argument 212 cache->rb_root = RB_ROOT; in backref_cache_init() 214 INIT_LIST_HEAD(&cache->pending[i]); in backref_cache_init() 215 INIT_LIST_HEAD(&cache->changed); in backref_cache_init() 216 INIT_LIST_HEAD(&cache->detached); in backref_cache_init() 217 INIT_LIST_HEAD(&cache->leaves); in backref_cache_init() 220 static void backref_cache_cleanup(struct backref_cache *cache) in backref_cache_cleanup() argument 225 while (!list_empty(&cache->detached)) { in backref_cache_cleanup() 226 node = list_entry(cache->detached.next, in backref_cache_cleanup() [all …]
|
/fs/fat/ |
D | cache.c | 41 struct fat_cache *cache = (struct fat_cache *)foo; in init_once() local 43 INIT_LIST_HEAD(&cache->cache_list); in init_once() 67 static inline void fat_cache_free(struct fat_cache *cache) in fat_cache_free() argument 69 BUG_ON(!list_empty(&cache->cache_list)); in fat_cache_free() 70 kmem_cache_free(fat_cache_cachep, cache); in fat_cache_free() 74 struct fat_cache *cache) in fat_cache_update_lru() argument 76 if (MSDOS_I(inode)->cache_lru.next != &cache->cache_list) in fat_cache_update_lru() 77 list_move(&cache->cache_list, &MSDOS_I(inode)->cache_lru); in fat_cache_update_lru() 136 struct fat_cache *cache, *tmp; in fat_cache_add() local 146 cache = fat_cache_merge(inode, new); in fat_cache_add() [all …]
|
/fs/nilfs2/ |
D | alloc.c | 272 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; in nilfs_palloc_get_desc_block() local 277 bhp, &cache->prev_desc, &cache->lock); in nilfs_palloc_get_desc_block() 291 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; in nilfs_palloc_get_bitmap_block() local 296 &cache->prev_bitmap, &cache->lock); in nilfs_palloc_get_bitmap_block() 307 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; in nilfs_palloc_delete_bitmap_block() local 312 &cache->prev_bitmap, &cache->lock); in nilfs_palloc_delete_bitmap_block() 325 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; in nilfs_palloc_get_entry_block() local 330 &cache->prev_entry, &cache->lock); in nilfs_palloc_get_entry_block() 340 struct nilfs_palloc_cache *cache = NILFS_MDT(inode)->mi_palloc_cache; in nilfs_palloc_delete_entry_block() local 344 &cache->prev_entry, &cache->lock); in nilfs_palloc_delete_entry_block() [all …]
|
/fs/afs/ |
D | file.c | 160 ret = fscache_read_or_alloc_page(vnode->cache, in __afs_page_filler() 212 fscache_uncache_page(vnode->cache, page); in __afs_page_filler() 229 fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) { in __afs_page_filler() 230 fscache_uncache_page(vnode->cache, page); in __afs_page_filler() 300 fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) { in afs_readpages_page_done() 301 fscache_uncache_page(vnode->cache, page); in afs_readpages_page_done() 363 fscache_uncache_page(vnode->cache, page); in afs_readpages_one() 398 fscache_uncache_page(vnode->cache, page); in afs_readpages_one() 432 ret = fscache_read_or_alloc_pages(vnode->cache, in afs_readpages() 501 fscache_wait_on_page_write(vnode->cache, page); in afs_invalidatepage() [all …]
|
/fs/ecryptfs/ |
D | main.c | 660 struct kmem_cache **cache; member 667 .cache = &ecryptfs_auth_tok_list_item_cache, 672 .cache = &ecryptfs_file_info_cache, 677 .cache = &ecryptfs_dentry_info_cache, 682 .cache = &ecryptfs_inode_info_cache, 689 .cache = &ecryptfs_sb_info_cache, 694 .cache = &ecryptfs_header_cache, 699 .cache = &ecryptfs_xattr_cache, 704 .cache = &ecryptfs_key_record_cache, 709 .cache = &ecryptfs_key_sig_cache, [all …]
|
/fs/nfs/ |
D | dir.c | 2149 struct nfs_access_entry *cache; in nfs_access_free_list() local 2152 cache = list_entry(head->next, struct nfs_access_entry, lru); in nfs_access_free_list() 2153 list_del(&cache->lru); in nfs_access_free_list() 2154 nfs_access_free_entry(cache); in nfs_access_free_list() 2163 struct nfs_access_entry *cache; in nfs_do_access_cache_scan() local 2176 cache = list_entry(nfsi->access_cache_entry_lru.next, in nfs_do_access_cache_scan() 2178 list_move(&cache->lru, &head); in nfs_do_access_cache_scan() 2179 rb_erase(&cache->rb_node, &nfsi->access_cache); in nfs_do_access_cache_scan() 2287 struct nfs_access_entry *cache; in nfs_access_get_cached() local 2295 cache = nfs_access_search_rbtree(inode, cred); in nfs_access_get_cached() [all …]
|
/fs/9p/ |
D | vfs_inode_dotl.c | 335 if ((v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) && in v9fs_vfs_atomic_open_dotl() 359 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) in v9fs_vfs_atomic_open_dotl() 437 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { in v9fs_vfs_mkdir_dotl() 484 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { in v9fs_vfs_getattr_dotl() 718 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { in v9fs_vfs_symlink_dotl() 794 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { in v9fs_vfs_link_dotl() 870 if (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) { in v9fs_vfs_mknod_dotl() 954 flags = (v9ses->cache == CACHE_LOOSE || v9ses->cache == CACHE_FSCACHE) ? in v9fs_refresh_inode_dotl()
|