Home
last modified time | relevance | path

Searched refs:gfp_mask (Results 1 – 25 of 38) sorted by relevance

12

/fs/nfs/blocklayout/
Ddev.c231 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask);
236 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_simple() argument
242 dev = bl_resolve_deviceid(server, v, gfp_mask); in bl_parse_simple()
353 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_scsi() argument
402 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_slice() argument
407 ret = bl_parse_deviceid(server, d, volumes, v->slice.volume, gfp_mask); in bl_parse_slice()
418 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_concat() argument
431 volumes, v->concat.volumes[i], gfp_mask); in bl_parse_concat()
447 struct pnfs_block_volume *volumes, int idx, gfp_t gfp_mask) in bl_parse_stripe() argument
460 volumes, v->stripe.volumes[i], gfp_mask); in bl_parse_stripe()
[all …]
Dblocklayout.c588 gfp_t gfp_mask) in bl_find_get_deviceid() argument
594 node = nfs4_find_get_deviceid(server, id, cred, gfp_mask); in bl_find_get_deviceid()
613 gfp_t gfp_mask) in bl_alloc_extent() argument
632 lo->plh_lc_cred, gfp_mask); in bl_alloc_extent()
669 gfp_t gfp_mask) in bl_alloc_lseg() argument
689 lseg = kzalloc(sizeof(*lseg), gfp_mask); in bl_alloc_lseg()
694 scratch = alloc_page(gfp_mask); in bl_alloc_lseg()
715 status = bl_alloc_extent(&xdr, lo, &lv, &extents, gfp_mask); in bl_alloc_lseg()
Dblocklayout.h176 struct pnfs_device *pdev, gfp_t gfp_mask);
193 struct pnfs_block_volume *b, gfp_t gfp_mask);
Drpc_pipefs.c54 gfp_t gfp_mask) in bl_resolve_deviceid() argument
77 msg->data = kzalloc(msg->len, gfp_mask); in bl_resolve_deviceid()
/fs/btrfs/
Dulist.h48 struct ulist *ulist_alloc(gfp_t gfp_mask);
50 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask);
52 u64 *old_aux, gfp_t gfp_mask);
57 void **old_aux, gfp_t gfp_mask) in ulist_add_merge_ptr() argument
61 int ret = ulist_add_merge(ulist, val, (uintptr_t)aux, &old64, gfp_mask); in ulist_add_merge_ptr()
65 return ulist_add_merge(ulist, val, (u64)aux, (u64 *)old_aux, gfp_mask); in ulist_add_merge_ptr()
Dulist.c92 struct ulist *ulist_alloc(gfp_t gfp_mask) in ulist_alloc() argument
94 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask); in ulist_alloc()
186 int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask) in ulist_add() argument
188 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask); in ulist_add()
192 u64 *old_aux, gfp_t gfp_mask) in ulist_add_merge() argument
203 node = kmalloc(sizeof(*node), gfp_mask); in ulist_add_merge()
Dbackref.c338 struct share_check *sc, gfp_t gfp_mask) in add_prelim_ref() argument
345 ref = kmem_cache_alloc(btrfs_prelim_ref_cache, gfp_mask); in add_prelim_ref()
391 struct share_check *sc, gfp_t gfp_mask) in add_direct_ref() argument
394 parent, wanted_disk_byte, count, sc, gfp_mask); in add_direct_ref()
402 struct share_check *sc, gfp_t gfp_mask) in add_indirect_ref() argument
409 wanted_disk_byte, count, sc, gfp_mask); in add_indirect_ref()
/fs/ntfs/
Dmalloc.h28 static inline void *__ntfs_malloc(unsigned long size, gfp_t gfp_mask) in __ntfs_malloc() argument
33 return kmalloc(PAGE_SIZE, gfp_mask & ~__GFP_HIGHMEM); in __ntfs_malloc()
37 return __vmalloc(size, gfp_mask, PAGE_KERNEL); in __ntfs_malloc()
/fs/crypto/
Dinline_crypt.c221 u64 first_lblk, gfp_t gfp_mask) in fscrypt_set_bio_crypt_ctx() argument
233 bio_crypt_set_ctx(bio, &ci->ci_key.blk_key->base, dun, gfp_mask); in fscrypt_set_bio_crypt_ctx()
273 gfp_t gfp_mask) in fscrypt_set_bio_crypt_ctx_bh() argument
279 fscrypt_set_bio_crypt_ctx(bio, inode, first_lblk, gfp_mask); in fscrypt_set_bio_crypt_ctx_bh()
/fs/nfs/
Dnfs4session.c105 u32 slotid, u32 seq_init, gfp_t gfp_mask) in nfs4_new_slot() argument
109 slot = kzalloc(sizeof(*slot), gfp_mask); in nfs4_new_slot()
121 u32 slotid, u32 seq_init, gfp_t gfp_mask) in nfs4_find_or_create_slot() argument
129 seq_init, gfp_mask); in nfs4_find_or_create_slot()
Dpnfs_dev.c188 gfp_t gfp_mask) in nfs4_find_get_deviceid() argument
197 new = nfs4_get_device_info(server, id, cred, gfp_mask); in nfs4_find_get_deviceid()
Dnfs4_fs.h299 extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait);
497 extern struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask);
Dnfs4state.c777 fmode_t fmode, gfp_t gfp_mask, int wait) in __nfs4_close() argument
818 nfs4_do_close(state, gfp_mask, wait); in __nfs4_close()
1073 struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter, gfp_t gfp_mask) in nfs_alloc_seqid() argument
1077 new = kmalloc(sizeof(*new), gfp_mask); in nfs_alloc_seqid()
/fs/jbd2/
Dtransaction.c299 gfp_t gfp_mask) in start_this_handle() argument
330 if ((gfp_mask & __GFP_FS) == 0) in start_this_handle()
331 gfp_mask |= __GFP_NOFAIL; in start_this_handle()
333 gfp_mask); in start_this_handle()
441 gfp_t gfp_mask, unsigned int type, in jbd2__journal_start() argument
472 err = start_this_handle(journal, handle, gfp_mask); in jbd2__journal_start()
675 int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask) in jbd2__journal_restart() argument
724 ret = start_this_handle(journal, handle, gfp_mask); in jbd2__journal_restart()
2069 struct page *page, gfp_t gfp_mask) in jbd2_journal_try_to_free_buffers() argument
Drevoke.c141 gfp_t gfp_mask = GFP_NOFS; in insert_revoke_hash() local
144 gfp_mask |= __GFP_NOFAIL; in insert_revoke_hash()
145 record = kmem_cache_alloc(jbd2_revoke_record_cache, gfp_mask); in insert_revoke_hash()
/fs/nilfs2/
Dmdt.h80 int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz);
Dmdt.c444 int nilfs_mdt_init(struct inode *inode, gfp_t gfp_mask, size_t objsz) in nilfs_mdt_init() argument
456 mapping_set_gfp_mask(inode->i_mapping, gfp_mask); in nilfs_mdt_init()
/fs/jfs/
Djfs_metapage.c173 static inline struct metapage *alloc_metapage(gfp_t gfp_mask) in alloc_metapage() argument
175 struct metapage *mp = mempool_alloc(metapage_mempool, gfp_mask); in alloc_metapage()
528 static int metapage_releasepage(struct page *page, gfp_t gfp_mask) in metapage_releasepage() argument
/fs/gfs2/
Dinode.h15 extern int gfs2_releasepage(struct page *page, gfp_t gfp_mask);
/fs/afs/
Ddir_edit.c209 gfp = vnode->vfs_inode.i_mapping->gfp_mask; in afs_edit_dir_add()
241 gfp = vnode->vfs_inode.i_mapping->gfp_mask; in afs_edit_dir_add()
/fs/xfs/
Dxfs_buf.c344 gfp_t gfp_mask = xb_to_gfp(flags); in xfs_buf_allocate_memory() local
355 gfp_mask |= __GFP_ZERO; in xfs_buf_allocate_memory()
404 page = alloc_page(gfp_mask); in xfs_buf_allocate_memory()
422 __func__, gfp_mask); in xfs_buf_allocate_memory()
Dxfs_iops.c1268 gfp_t gfp_mask; in xfs_setup_inode() local
1304 gfp_mask = mapping_gfp_mask(inode->i_mapping); in xfs_setup_inode()
1305 mapping_set_gfp_mask(inode->i_mapping, (gfp_mask & ~(__GFP_FS))); in xfs_setup_inode()
Dxfs_aops.c1129 gfp_t gfp_mask) in xfs_vm_releasepage() argument
1132 return iomap_releasepage(page, gfp_mask); in xfs_vm_releasepage()
/fs/
Dbuffer.c938 gfp_t gfp_mask; in grow_dev_page() local
940 gfp_mask = mapping_gfp_constraint(inode->i_mapping, ~__GFP_FS) | gfp; in grow_dev_page()
948 gfp_mask |= __GFP_NOFAIL; in grow_dev_page()
950 page = find_or_create_page(inode->i_mapping, index, gfp_mask); in grow_dev_page()
/fs/erofs/
Dsuper.c296 static int erofs_managed_cache_releasepage(struct page *page, gfp_t gfp_mask) in erofs_managed_cache_releasepage() argument

12