Home
last modified time | relevance | path

Searched refs:gfp (Results 1 – 25 of 27) sorted by relevance

12

/fs/9p/
Dcache.h30 extern int __v9fs_fscache_release_page(struct page *page, gfp_t gfp);
43 gfp_t gfp) in v9fs_fscache_release_page() argument
45 return __v9fs_fscache_release_page(page, gfp); in v9fs_fscache_release_page()
103 gfp_t gfp) { in v9fs_fscache_release_page() argument
Dvfs_addr.c123 static int v9fs_release_page(struct page *page, gfp_t gfp) in v9fs_release_page() argument
127 return v9fs_fscache_release_page(page, gfp); in v9fs_release_page()
Dcache.c203 int __v9fs_fscache_release_page(struct page *page, gfp_t gfp) in __v9fs_fscache_release_page() argument
210 return fscache_maybe_release_page(v9inode->fscache, page, gfp); in __v9fs_fscache_release_page()
/fs/notify/fanotify/
Dfanotify.c214 struct inode *inode, gfp_t gfp, in fanotify_encode_fid() argument
232 fid->ext_fh = kmalloc(bytes, gfp); in fanotify_encode_fid()
285 gfp_t gfp = GFP_KERNEL_ACCOUNT; in fanotify_alloc_event() local
295 gfp |= __GFP_NOFAIL; in fanotify_alloc_event()
297 gfp |= __GFP_RETRY_MAYFAIL; in fanotify_alloc_event()
305 pevent = kmem_cache_alloc(fanotify_perm_event_cachep, gfp); in fanotify_alloc_event()
313 event = kmem_cache_alloc(fanotify_event_cachep, gfp); in fanotify_alloc_event()
326 event->fh_type = fanotify_encode_fid(event, id, gfp, fsid); in fanotify_alloc_event()
/fs/fscache/
Dpage.c64 gfp_t gfp) in __fscache_maybe_release_page() argument
69 _enter("%p,%p,%x", cookie, page, gfp); in __fscache_maybe_release_page()
128 if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS)) { in __fscache_maybe_release_page()
138 gfp &= ~__GFP_DIRECT_RECLAIM; in __fscache_maybe_release_page()
433 gfp_t gfp) in __fscache_read_or_alloc_page() argument
499 ret = object->cache->ops->allocate_page(op, page, gfp); in __fscache_read_or_alloc_page()
505 ret = object->cache->ops->read_or_alloc_page(op, page, gfp); in __fscache_read_or_alloc_page()
564 gfp_t gfp) in __fscache_read_or_alloc_pages() argument
627 op, pages, nr_pages, gfp); in __fscache_read_or_alloc_pages()
632 op, pages, nr_pages, gfp); in __fscache_read_or_alloc_pages()
[all …]
/fs/ceph/
Dcache.h53 static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp) in ceph_release_fscache_page() argument
57 return fscache_maybe_release_page(ci->fscache, page, gfp); in ceph_release_fscache_page()
154 static inline int ceph_release_fscache_page(struct page *page, gfp_t gfp) in ceph_release_fscache_page() argument
/fs/ramfs/
Dfile-nommu.c69 gfp_t gfp = mapping_gfp_mask(inode->i_mapping); in ramfs_nommu_expand_for_mapping() local
84 pages = alloc_pages(gfp, order); in ramfs_nommu_expand_for_mapping()
108 gfp); in ramfs_nommu_expand_for_mapping()
/fs/afs/
Ddir_edit.c197 gfp_t gfp; in afs_edit_dir_add() local
209 gfp = vnode->vfs_inode.i_mapping->gfp_mask; in afs_edit_dir_add()
210 page0 = find_or_create_page(vnode->vfs_inode.i_mapping, 0, gfp); in afs_edit_dir_add()
241 gfp = vnode->vfs_inode.i_mapping->gfp_mask; in afs_edit_dir_add()
243 index, gfp); in afs_edit_dir_add()
Drxrpc.c139 gfp_t gfp) in afs_alloc_call() argument
144 call = kzalloc(sizeof(*call), gfp); in afs_alloc_call()
362 void afs_make_call(struct afs_addr_cursor *ac, struct afs_call *call, gfp_t gfp) in afs_make_call() argument
411 tx_total_len, gfp, in afs_make_call()
Ddir.c271 gfp_t gfp = dvnode->vfs_inode.i_mapping->gfp_mask; in afs_read_dir() local
277 req->pages[i] = __page_cache_alloc(gfp); in afs_read_dir()
282 i, gfp); in afs_read_dir()
/fs/cifs/
Dfscache.h66 extern int cifs_fscache_release_page(struct page *page, gfp_t gfp);
133 static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp) in cifs_fscache_release_page() argument
Dfscache.c219 int cifs_fscache_release_page(struct page *page, gfp_t gfp) in cifs_fscache_release_page() argument
227 if (!fscache_maybe_release_page(cifsi->fscache, page, gfp)) in cifs_fscache_release_page()
/fs/
Dbuffer.c819 gfp_t gfp = GFP_NOFS | __GFP_ACCOUNT; in alloc_page_buffers() local
824 gfp |= __GFP_NOFAIL; in alloc_page_buffers()
832 bh = alloc_buffer_head(gfp); in alloc_page_buffers()
931 pgoff_t index, int size, int sizebits, gfp_t gfp) in grow_dev_page() argument
940 gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp; in grow_dev_page()
994 grow_buffers(struct block_device *bdev, sector_t block, int size, gfp_t gfp) in grow_buffers() argument
1019 return grow_dev_page(bdev, block, index, size, sizebits, gfp); in grow_buffers()
1024 unsigned size, gfp_t gfp) in __getblk_slow() argument
1046 ret = grow_buffers(bdev, block, size, gfp); in __getblk_slow()
1317 unsigned size, gfp_t gfp) in __getblk_gfp() argument
[all …]
Dmpage.c212 gfp_t gfp; in do_mpage_readpage() local
216 gfp = readahead_gfp_mask(page->mapping); in do_mpage_readpage()
219 gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); in do_mpage_readpage()
345 gfp); in do_mpage_readpage()
Dposix_acl.c519 __posix_acl_create(struct posix_acl **acl, gfp_t gfp, umode_t *mode_p) in __posix_acl_create() argument
521 struct posix_acl *clone = posix_acl_clone(*acl, gfp); in __posix_acl_create()
537 __posix_acl_chmod(struct posix_acl **acl, gfp_t gfp, umode_t mode) in __posix_acl_chmod() argument
539 struct posix_acl *clone = posix_acl_clone(*acl, gfp); in __posix_acl_chmod()
/fs/erofs/
Dzdata.c547 gfp_t gfp) in __stagingpage_alloc() argument
549 struct page *page = erofs_allocpage(pagepool, gfp, true); in __stagingpage_alloc()
995 gfp_t gfp) in pickup_page_for_submission() argument
1096 page = __stagingpage_alloc(pagepool, gfp); in pickup_page_for_submission()
1104 if (add_to_page_cache_lru(page, mc, index + nr, gfp)) { in pickup_page_for_submission()
1377 gfp_t gfp = mapping_gfp_constraint(mapping, GFP_KERNEL); in z_erofs_vle_normalaccess_readpages() local
1399 if (add_to_page_cache_lru(page, mapping, page->index, gfp)) { in z_erofs_vle_normalaccess_readpages()
Dutils.c10 struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail) in erofs_allocpage() argument
19 page = alloc_pages(gfp | (nofail ? __GFP_NOFAIL : 0), 0); in erofs_allocpage()
Ddata.c290 gfp_t gfp = readahead_gfp_mask(mapping); in erofs_raw_access_readpages() local
301 if (!add_to_page_cache_lru(page, mapping, page->index, gfp)) { in erofs_raw_access_readpages()
Dinternal.h385 struct page *erofs_allocpage(struct list_head *pool, gfp_t gfp, bool nofail);
/fs/cachefiles/
Drdwr.c394 gfp_t gfp) in cachefiles_read_or_alloc_page() argument
685 gfp_t gfp) in cachefiles_read_or_alloc_pages() argument
799 gfp_t gfp) in cachefiles_allocate_page() argument
837 gfp_t gfp) in cachefiles_allocate_pages() argument
/fs/nfs/
Dfscache.c331 int nfs_fscache_release_page(struct page *page, gfp_t gfp) in nfs_fscache_release_page() argument
340 if (!fscache_maybe_release_page(cookie, page, gfp)) in nfs_fscache_release_page()
Dfscache.h204 static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp) in nfs_fscache_release_page() argument
Dfile.c430 static int nfs_release_page(struct page *page, gfp_t gfp) in nfs_release_page() argument
437 return nfs_fscache_release_page(page, gfp); in nfs_release_page()
/fs/ext4/
Dmballoc.c805 static int ext4_mb_init_cache(struct page *page, char *incore, gfp_t gfp) in ext4_mb_init_cache() argument
838 bh = kzalloc(i, gfp); in ext4_mb_init_cache()
973 ext4_group_t group, struct ext4_buddy *e4b, gfp_t gfp) in ext4_mb_get_buddy_page_lock() argument
992 page = find_or_create_page(inode->i_mapping, pnum, gfp); in ext4_mb_get_buddy_page_lock()
1006 page = find_or_create_page(inode->i_mapping, pnum, gfp); in ext4_mb_get_buddy_page_lock()
1032 int ext4_mb_init_group(struct super_block *sb, ext4_group_t group, gfp_t gfp) in ext4_mb_init_group() argument
1052 ret = ext4_mb_get_buddy_page_lock(sb, group, &e4b, gfp); in ext4_mb_init_group()
1062 ret = ext4_mb_init_cache(page, NULL, gfp); in ext4_mb_init_group()
1081 ret = ext4_mb_init_cache(page, e4b.bd_bitmap, gfp); in ext4_mb_init_group()
1100 struct ext4_buddy *e4b, gfp_t gfp) in ext4_mb_load_buddy_gfp() argument
[all …]
/fs/iomap/
Dbuffered-io.c260 gfp_t gfp = mapping_gfp_constraint(page->mapping, GFP_KERNEL); in iomap_readpage_actor() local
267 gfp |= __GFP_NORETRY | __GFP_NOWARN; in iomap_readpage_actor()
268 ctx->bio = bio_alloc(gfp, min(BIO_MAX_PAGES, nr_vecs)); in iomap_readpage_actor()

12