/fs/ocfs2/cluster/ |
D | tcp.c | 297 static u8 o2net_num_from_nn(struct o2net_node *nn) in o2net_num_from_nn() argument 299 BUG_ON(nn == NULL); in o2net_num_from_nn() 300 return nn - o2net_nodes; in o2net_num_from_nn() 305 static int o2net_prep_nsw(struct o2net_node *nn, struct o2net_status_wait *nsw) in o2net_prep_nsw() argument 310 if (!idr_pre_get(&nn->nn_status_idr, GFP_ATOMIC)) { in o2net_prep_nsw() 314 spin_lock(&nn->nn_lock); in o2net_prep_nsw() 315 ret = idr_get_new(&nn->nn_status_idr, nsw, &nsw->ns_id); in o2net_prep_nsw() 318 &nn->nn_status_list); in o2net_prep_nsw() 319 spin_unlock(&nn->nn_lock); in o2net_prep_nsw() 331 static void o2net_complete_nsw_locked(struct o2net_node *nn, in o2net_complete_nsw_locked() argument [all …]
|
/fs/nfs/ |
D | client.c | 68 struct nfs_net *nn = net_generic(clp->net, nfs_net_id); in nfs_get_cb_ident_idr() local 73 if (!idr_pre_get(&nn->cb_ident_idr, GFP_KERNEL)) in nfs_get_cb_ident_idr() 75 spin_lock(&nn->nfs_client_lock); in nfs_get_cb_ident_idr() 76 ret = idr_get_new(&nn->cb_ident_idr, clp, &clp->cl_cb_ident); in nfs_get_cb_ident_idr() 77 spin_unlock(&nn->nfs_client_lock); in nfs_get_cb_ident_idr() 243 struct nfs_net *nn = net_generic(net, nfs_net_id); in nfs_cleanup_cb_ident_idr() local 245 idr_destroy(&nn->cb_ident_idr); in nfs_cleanup_cb_ident_idr() 251 struct nfs_net *nn = net_generic(clp->net, nfs_net_id); in nfs_cb_idr_remove_locked() local 254 idr_remove(&nn->cb_ident_idr, clp->cl_cb_ident); in nfs_cb_idr_remove_locked() 318 struct nfs_net *nn; in nfs_put_client() local [all …]
|
D | dns_resolve.c | 335 struct nfs_net *nn = net_generic(net, nfs_net_id); in nfs_dns_resolve_name() local 337 ret = do_cache_lookup_wait(nn->nfs_dns_resolve, &key, &item); in nfs_dns_resolve_name() 344 cache_put(&item->h, nn->nfs_dns_resolve); in nfs_dns_resolve_name() 353 struct nfs_net *nn = net_generic(net, nfs_net_id); in nfs_dns_resolver_cache_init() local 383 nn->nfs_dns_resolve = cd; in nfs_dns_resolver_cache_init() 397 struct nfs_net *nn = net_generic(net, nfs_net_id); in nfs_dns_resolver_cache_destroy() local 398 struct cache_detail *cd = nn->nfs_dns_resolve; in nfs_dns_resolver_cache_destroy() 411 struct nfs_net *nn = net_generic(net, nfs_net_id); in rpc_pipefs_event() local 412 struct cache_detail *cd = nn->nfs_dns_resolve; in rpc_pipefs_event()
|
D | idmap.c | 549 struct nfs_net *nn = net_generic(net, nfs_net_id); in nfs_get_client_for_event() local 553 spin_lock(&nn->nfs_client_lock); in nfs_get_client_for_event() 554 list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) { in nfs_get_client_for_event() 562 spin_unlock(&nn->nfs_client_lock); in nfs_get_client_for_event() 565 spin_unlock(&nn->nfs_client_lock); in nfs_get_client_for_event()
|
/fs/nfs/blocklayout/ |
D | blocklayoutdm.c | 52 struct nfs_net *nn = net_generic(net, nfs_net_id); in dev_remove() local 56 bl_pipe_msg.bl_wq = &nn->bl_wq; in dev_remove() 71 add_wait_queue(&nn->bl_wq, &wq); in dev_remove() 72 if (rpc_queue_upcall(nn->bl_device_pipe, msg) < 0) { in dev_remove() 73 remove_wait_queue(&nn->bl_wq, &wq); in dev_remove() 80 remove_wait_queue(&nn->bl_wq, &wq); in dev_remove()
|
D | blocklayoutdev.c | 85 struct nfs_net *nn = net_generic(filp->f_dentry->d_sb->s_fs_info, in bl_pipe_downcall() local 91 if (copy_from_user(&nn->bl_mount_reply, src, mlen) != 0) in bl_pipe_downcall() 94 wake_up(&nn->bl_wq); in bl_pipe_downcall() 127 struct nfs_net *nn = net_generic(net, nfs_net_id); in nfs4_blk_decode_device() local 128 struct bl_dev_msg *reply = &nn->bl_mount_reply; in nfs4_blk_decode_device() 134 bl_pipe_msg.bl_wq = &nn->bl_wq; in nfs4_blk_decode_device() 155 add_wait_queue(&nn->bl_wq, &wq); in nfs4_blk_decode_device() 156 rc = rpc_queue_upcall(nn->bl_device_pipe, msg); in nfs4_blk_decode_device() 158 remove_wait_queue(&nn->bl_wq, &wq); in nfs4_blk_decode_device() 166 remove_wait_queue(&nn->bl_wq, &wq); in nfs4_blk_decode_device()
|
D | blocklayout.c | 1204 struct nfs_net *nn = net_generic(net, nfs_net_id); in rpc_pipefs_event() local 1211 if (nn->bl_device_pipe == NULL) { in rpc_pipefs_event() 1218 dentry = nfs4blocklayout_register_sb(sb, nn->bl_device_pipe); in rpc_pipefs_event() 1223 nn->bl_device_pipe->dentry = dentry; in rpc_pipefs_event() 1226 if (nn->bl_device_pipe->dentry) in rpc_pipefs_event() 1227 nfs4blocklayout_unregister_sb(sb, nn->bl_device_pipe); in rpc_pipefs_event() 1269 struct nfs_net *nn = net_generic(net, nfs_net_id); in nfs4blocklayout_net_init() local 1272 init_waitqueue_head(&nn->bl_wq); in nfs4blocklayout_net_init() 1273 nn->bl_device_pipe = rpc_mkpipe_data(&bl_upcall_ops, 0); in nfs4blocklayout_net_init() 1274 if (IS_ERR(nn->bl_device_pipe)) in nfs4blocklayout_net_init() [all …]
|
/fs/ubifs/ |
D | tnc.c | 613 int nn = *n; in tnc_next() local 615 nn += 1; in tnc_next() 616 if (nn < znode->child_cnt) { in tnc_next() 617 *n = nn; in tnc_next() 626 nn = znode->iip + 1; in tnc_next() 628 if (nn < znode->child_cnt) { in tnc_next() 629 znode = get_znode(c, znode, nn); in tnc_next() 637 nn = 0; in tnc_next() 642 *n = nn; in tnc_next() 658 int nn = *n; in tnc_prev() local [all …]
|
D | file.c | 611 int i = 0, nn = *n, offs = bu->zbranch[0].offs, hole = 0, read = 0; in populate_page() local 634 if (nn >= bu->cnt) { in populate_page() 637 } else if (key_block(c, &bu->zbranch[nn].key) == page_block) { in populate_page() 640 dn = bu->buf + (bu->zbranch[nn].offs - offs); in populate_page() 659 nn += 1; in populate_page() 661 } else if (key_block(c, &bu->zbranch[nn].key) < page_block) { in populate_page() 662 nn += 1; in populate_page() 691 *n = nn; in populate_page()
|
D | lpt.c | 2224 struct ubifs_nnode *nnode, *nn; in dbg_check_lpt_nodes() local 2243 nn = (struct ubifs_nnode *)cnode; in dbg_check_lpt_nodes() 2245 cn = nn->nbranch[iip].cnode; in dbg_check_lpt_nodes()
|
/fs/nfsd/ |
D | nfs4recover.c | 575 struct nfsd_net *nn = net_generic(filp->f_dentry->d_sb->s_fs_info, in cld_pipe_downcall() local 577 struct cld_net *cn = nn->cld_net; in cld_pipe_downcall() 688 struct nfsd_net *nn = net_generic(net, nfsd_net_id); in nfsd4_init_cld_pipe() local 691 if (nn->cld_net) in nfsd4_init_cld_pipe() 715 nn->cld_net = cn; in nfsd4_init_cld_pipe() 730 struct nfsd_net *nn = net_generic(net, nfsd_net_id); in nfsd4_remove_cld_pipe() local 731 struct cld_net *cn = nn->cld_net; in nfsd4_remove_cld_pipe() 735 kfree(nn->cld_net); in nfsd4_remove_cld_pipe() 736 nn->cld_net = NULL; in nfsd4_remove_cld_pipe() 788 struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id); in nfsd4_cld_create() local [all …]
|
/fs/gfs2/ |
D | trace_gfs2.h | 18 #define dlm_state_name(nn) { DLM_LOCK_##nn, #nn } argument
|
/fs/ocfs2/dlm/ |
D | dlmdomain.c | 70 unsigned int nn; in byte_copymap() local 76 for (nn = 0 ; nn < sz; nn++) in byte_copymap() 77 if (test_bit(nn, smap)) in byte_copymap() 78 byte_set_bit(nn, dmap); in byte_copymap()
|
D | dlmmaster.c | 1879 int nn = -1; in dlm_assert_master_handler() local 1889 while ((nn = find_next_bit (mle->response_map, O2NM_MAX_NODES, in dlm_assert_master_handler() 1890 nn+1)) < O2NM_MAX_NODES) { in dlm_assert_master_handler() 1891 if (nn != dlm->node_num && nn != assert->node_idx) in dlm_assert_master_handler()
|