/kernel/linux/linux-5.10/fs/gfs2/ |
D | glock.c | 54 struct gfs2_glock *gl; /* current glock struct */ member 58 typedef void (*glock_examiner) (struct gfs2_glock * gl); 60 static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target); 116 static void wake_up_glock(struct gfs2_glock *gl) in wake_up_glock() argument 118 wait_queue_head_t *wq = glock_waitqueue(&gl->gl_name); in wake_up_glock() 121 __wake_up(wq, TASK_NORMAL, 1, &gl->gl_name); in wake_up_glock() 126 struct gfs2_glock *gl = container_of(rcu, struct gfs2_glock, gl_rcu); in gfs2_glock_dealloc() local 128 kfree(gl->gl_lksb.sb_lvbptr); in gfs2_glock_dealloc() 129 if (gl->gl_ops->go_flags & GLOF_ASPACE) in gfs2_glock_dealloc() 130 kmem_cache_free(gfs2_glock_aspace_cachep, gl); in gfs2_glock_dealloc() [all …]
|
D | glock.h | 128 void (*lm_put_lock) (struct gfs2_glock *gl); 129 int (*lm_lock) (struct gfs2_glock *gl, unsigned int req_state, 131 void (*lm_cancel) (struct gfs2_glock *gl); 136 static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl) in gfs2_glock_is_locked_by_me() argument 142 spin_lock(&gl->gl_lockref.lock); in gfs2_glock_is_locked_by_me() 144 list_for_each_entry(gh, &gl->gl_holders, gh_list) { in gfs2_glock_is_locked_by_me() 152 spin_unlock(&gl->gl_lockref.lock); in gfs2_glock_is_locked_by_me() 157 static inline int gfs2_glock_is_held_excl(struct gfs2_glock *gl) in gfs2_glock_is_held_excl() argument 159 return gl->gl_state == LM_ST_EXCLUSIVE; in gfs2_glock_is_held_excl() 162 static inline int gfs2_glock_is_held_dfrd(struct gfs2_glock *gl) in gfs2_glock_is_held_dfrd() argument [all …]
|
D | glops.c | 34 static void gfs2_ail_error(struct gfs2_glock *gl, const struct buffer_head *bh) in gfs2_ail_error() argument 36 fs_err(gl->gl_name.ln_sbd, in gfs2_ail_error() 41 fs_err(gl->gl_name.ln_sbd, "AIL glock %u:%llu mapping %p\n", in gfs2_ail_error() 42 gl->gl_name.ln_type, gl->gl_name.ln_number, in gfs2_ail_error() 43 gfs2_glock2aspace(gl)); in gfs2_ail_error() 44 gfs2_lm(gl->gl_name.ln_sbd, "AIL error\n"); in gfs2_ail_error() 45 gfs2_withdraw(gl->gl_name.ln_sbd); in gfs2_ail_error() 56 static void __gfs2_ail_flush(struct gfs2_glock *gl, bool fsync, in __gfs2_ail_flush() argument 59 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in __gfs2_ail_flush() 60 struct list_head *head = &gl->gl_ail_list; in __gfs2_ail_flush() [all …]
|
D | lock_dlm.c | 72 static inline void gfs2_update_reply_times(struct gfs2_glock *gl) in gfs2_update_reply_times() argument 75 const unsigned gltype = gl->gl_name.ln_type; in gfs2_update_reply_times() 76 unsigned index = test_bit(GLF_BLOCKING, &gl->gl_flags) ? in gfs2_update_reply_times() 81 rtt = ktime_to_ns(ktime_sub(ktime_get_real(), gl->gl_dstamp)); in gfs2_update_reply_times() 82 lks = this_cpu_ptr(gl->gl_name.ln_sbd->sd_lkstats); in gfs2_update_reply_times() 83 gfs2_update_stats(&gl->gl_stats, index, rtt); /* Local */ in gfs2_update_reply_times() 87 trace_gfs2_glock_lock_time(gl, rtt); in gfs2_update_reply_times() 99 static inline void gfs2_update_request_times(struct gfs2_glock *gl) in gfs2_update_request_times() argument 102 const unsigned gltype = gl->gl_name.ln_type; in gfs2_update_request_times() 107 dstamp = gl->gl_dstamp; in gfs2_update_request_times() [all …]
|
D | trace_gfs2.h | 92 TP_PROTO(const struct gfs2_glock *gl, unsigned int new_state), 94 TP_ARGS(gl, new_state), 108 __entry->dev = gl->gl_name.ln_sbd->sd_vfs->s_dev; 109 __entry->glnum = gl->gl_name.ln_number; 110 __entry->gltype = gl->gl_name.ln_type; 111 __entry->cur_state = glock_trace_state(gl->gl_state); 113 __entry->tgt_state = glock_trace_state(gl->gl_target); 114 __entry->dmt_state = glock_trace_state(gl->gl_demote_state); 115 __entry->flags = gl->gl_flags | (gl->gl_object ? (1UL<<GLF_OBJECT) : 0); 131 TP_PROTO(const struct gfs2_glock *gl), [all …]
|
D | main.c | 53 struct gfs2_glock *gl = foo; in gfs2_init_glock_once() local 55 spin_lock_init(&gl->gl_lockref.lock); in gfs2_init_glock_once() 56 INIT_LIST_HEAD(&gl->gl_holders); in gfs2_init_glock_once() 57 INIT_LIST_HEAD(&gl->gl_lru); in gfs2_init_glock_once() 58 INIT_LIST_HEAD(&gl->gl_ail_list); in gfs2_init_glock_once() 59 atomic_set(&gl->gl_ail_count, 0); in gfs2_init_glock_once() 60 atomic_set(&gl->gl_revokes, 0); in gfs2_init_glock_once() 65 struct gfs2_glock *gl = foo; in gfs2_init_gl_aspace_once() local 66 struct address_space *mapping = (struct address_space *)(gl + 1); in gfs2_init_gl_aspace_once() 68 gfs2_init_glock_once(gl); in gfs2_init_gl_aspace_once()
|
D | meta_io.c | 110 struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create) in gfs2_getbuf() argument 112 struct address_space *mapping = gfs2_glock2aspace(gl); in gfs2_getbuf() 113 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_getbuf() 178 struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno) in gfs2_meta_new() argument 181 bh = gfs2_getbuf(gl, blkno, CREATE); in gfs2_meta_new() 247 int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, in gfs2_meta_read() argument 250 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_meta_read() 255 (!sdp->sd_jdesc || gl != sdp->sd_jinode_gl)) { in gfs2_meta_read() 260 *bhp = bh = gfs2_getbuf(gl, blkno, CREATE); in gfs2_meta_read() 273 bh = gfs2_getbuf(gl, blkno + 1, CREATE); in gfs2_meta_read() [all …]
|
D | trans.c | 128 static struct gfs2_bufdata *gfs2_alloc_bufdata(struct gfs2_glock *gl, in gfs2_alloc_bufdata() argument 135 bd->bd_gl = gl; in gfs2_alloc_bufdata() 157 void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh) in gfs2_trans_add_data() argument 160 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_trans_add_data() 174 bd = gfs2_alloc_bufdata(gl, bh); in gfs2_trans_add_data() 180 gfs2_assert(sdp, bd->bd_gl == gl); in gfs2_trans_add_data() 194 void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh) in gfs2_trans_add_meta() argument 197 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_trans_add_meta() 215 bd = gfs2_alloc_bufdata(gl, bh); in gfs2_trans_add_meta() 222 gfs2_assert(sdp, bd->bd_gl == gl); in gfs2_trans_add_meta()
|
D | incore.h | 37 typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret); 241 int (*go_sync) (struct gfs2_glock *gl); 242 int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh); 243 void (*go_inval) (struct gfs2_glock *gl, int flags); 244 int (*go_demote_ok) (const struct gfs2_glock *gl); 246 void (*go_dump)(struct seq_file *seq, struct gfs2_glock *gl, 248 void (*go_callback)(struct gfs2_glock *gl, bool remote); 249 void (*go_free)(struct gfs2_glock *gl); 878 static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which) in gfs2_glstats_inc() argument 880 gl->gl_stats.stats[which]++; in gfs2_glstats_inc() [all …]
|
D | meta_io.h | 51 extern struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno); 52 extern int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags, 55 extern struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, 73 struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen);
|
D | glops.h | 25 extern int gfs2_inode_metasync(struct gfs2_glock *gl); 26 extern void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync);
|
D | lops.c | 73 struct gfs2_glock *gl = bd->bd_gl; in maybe_release_space() local 74 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in maybe_release_space() 75 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); in maybe_release_space() 76 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number; in maybe_release_space() 119 struct gfs2_glock *gl = bd->bd_gl; in gfs2_unpin() local 120 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list); in gfs2_unpin() 121 atomic_inc(&gl->gl_ail_count); in gfs2_unpin() 761 struct gfs2_glock *gl = ip->i_gl; in buf_lo_scan_elements() local 784 bh_ip = gfs2_meta_new(gl, blkno); in buf_lo_scan_elements() 884 struct gfs2_glock *gl; in revoke_lo_after_commit() local [all …]
|
D | trans.h | 41 extern void gfs2_trans_add_data(struct gfs2_glock *gl, struct buffer_head *bh); 42 extern void gfs2_trans_add_meta(struct gfs2_glock *gl, struct buffer_head *bh);
|
D | super.c | 970 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; in gfs2_drop_inode() local 971 if (test_bit(GLF_DEMOTE, &gl->gl_flags)) in gfs2_drop_inode() 983 struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl; in gfs2_drop_inode() local 985 gfs2_glock_hold(gl); in gfs2_drop_inode() 986 if (!gfs2_queue_delete_work(gl, 0)) in gfs2_drop_inode() 987 gfs2_glock_queue_put(gl); in gfs2_drop_inode() 1114 struct gfs2_glock *gl = ip->i_gl; in gfs2_final_release_pages() local 1119 if (atomic_read(&gl->gl_revokes) == 0) { in gfs2_final_release_pages() 1120 clear_bit(GLF_LFLUSH, &gl->gl_flags); in gfs2_final_release_pages() 1121 clear_bit(GLF_DIRTY, &gl->gl_flags); in gfs2_final_release_pages() [all …]
|
D | log.c | 96 struct gfs2_glock *gl = NULL; in gfs2_ail1_start_one() local 125 if (gl == bd->bd_gl) in gfs2_ail1_start_one() 127 gl = bd->bd_gl; in gfs2_ail1_start_one() 679 struct gfs2_glock *gl = bd->bd_gl; in gfs2_add_revoke() local 682 if (atomic_inc_return(&gl->gl_revokes) == 1) in gfs2_add_revoke() 683 gfs2_glock_hold(gl); in gfs2_add_revoke() 688 set_bit(GLF_LFLUSH, &gl->gl_flags); in gfs2_add_revoke() 692 void gfs2_glock_remove_revoke(struct gfs2_glock *gl) in gfs2_glock_remove_revoke() argument 694 if (atomic_dec_return(&gl->gl_revokes) == 0) { in gfs2_glock_remove_revoke() 695 clear_bit(GLF_LFLUSH, &gl->gl_flags); in gfs2_glock_remove_revoke() [all …]
|
D | rgrp.c | 703 struct gfs2_glock *gl; in gfs2_clear_rgrpd() local 707 gl = rgd->rd_gl; in gfs2_clear_rgrpd() 711 if (gl) { in gfs2_clear_rgrpd() 712 if (gl->gl_state != LM_ST_UNLOCKED) { in gfs2_clear_rgrpd() 713 gfs2_glock_cb(gl, LM_ST_UNLOCKED); in gfs2_clear_rgrpd() 714 flush_delayed_work(&gl->gl_work); in gfs2_clear_rgrpd() 717 glock_clear_object(gl, rgd); in gfs2_clear_rgrpd() 718 gfs2_glock_put(gl); in gfs2_clear_rgrpd() 1018 struct gfs2_glock *gl = ip->i_gl; in gfs2_rindex_update() local 1025 if (!gfs2_glock_is_locked_by_me(gl)) { in gfs2_rindex_update() [all …]
|
D | log.h | 72 extern void gfs2_log_flush(struct gfs2_sbd *sdp, struct gfs2_glock *gl, 80 extern void gfs2_glock_remove_revoke(struct gfs2_glock *gl);
|
D | sys.c | 229 struct gfs2_glock *gl; in demote_rq_store() local 264 rv = gfs2_glock_get(sdp, glnum, glops, 0, &gl); in demote_rq_store() 267 gfs2_glock_cb(gl, glmode); in demote_rq_store() 268 gfs2_glock_put(gl); in demote_rq_store()
|
/kernel/linux/linux-5.10/drivers/target/iscsi/cxgbit/ |
D | cxgbit_main.c | 212 cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl, in cxgbit_copy_frags() argument 219 __skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page, in cxgbit_copy_frags() 220 gl->frags[0].offset + offset, in cxgbit_copy_frags() 221 gl->frags[0].size - offset); in cxgbit_copy_frags() 222 for (i = 1; i < gl->nfrags; i++) in cxgbit_copy_frags() 224 gl->frags[i].page, in cxgbit_copy_frags() 225 gl->frags[i].offset, in cxgbit_copy_frags() 226 gl->frags[i].size); in cxgbit_copy_frags() 228 skb_shinfo(skb)->nr_frags += gl->nfrags; in cxgbit_copy_frags() 231 get_page(gl->frags[gl->nfrags - 1].page); in cxgbit_copy_frags() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/cxgb4vf/ |
D | sge.c | 1473 const struct pkt_gl *gl, in copy_frags() argument 1479 __skb_fill_page_desc(skb, 0, gl->frags[0].page, in copy_frags() 1480 gl->frags[0].offset + offset, in copy_frags() 1481 gl->frags[0].size - offset); in copy_frags() 1482 skb_shinfo(skb)->nr_frags = gl->nfrags; in copy_frags() 1483 for (i = 1; i < gl->nfrags; i++) in copy_frags() 1484 __skb_fill_page_desc(skb, i, gl->frags[i].page, in copy_frags() 1485 gl->frags[i].offset, in copy_frags() 1486 gl->frags[i].size); in copy_frags() 1489 get_page(gl->frags[gl->nfrags - 1].page); in copy_frags() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/inline_crypto/chtls/ |
D | chtls_main.c | 339 static struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, in copy_gl_to_skb_pkt() argument 349 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) in copy_gl_to_skb_pkt() 353 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) in copy_gl_to_skb_pkt() 358 , gl->va + pktshift, in copy_gl_to_skb_pkt() 359 gl->tot_len - pktshift); in copy_gl_to_skb_pkt() 365 const struct pkt_gl *gl, const __be64 *rsp) in chtls_recv_packet() argument 371 skb = copy_gl_to_skb_pkt(gl, rsp, cdev->lldi->sge_pktshift); in chtls_recv_packet() 439 const struct pkt_gl *gl) in chtls_uld_rx_handler() argument 448 if (chtls_recv_packet(cdev, gl, rsp) < 0) in chtls_uld_rx_handler() 453 if (!gl) in chtls_uld_rx_handler() [all …]
|
/kernel/linux/linux-5.10/drivers/infiniband/hw/cxgb4/ |
D | device.c | 1104 static inline struct sk_buff *copy_gl_to_skb_pkt(const struct pkt_gl *gl, in copy_gl_to_skb_pkt() argument 1117 skb = alloc_skb(gl->tot_len + sizeof(struct cpl_pass_accept_req) + in copy_gl_to_skb_pkt() 1122 __skb_put(skb, gl->tot_len + sizeof(struct cpl_pass_accept_req) + in copy_gl_to_skb_pkt() 1137 gl->va + pktshift, in copy_gl_to_skb_pkt() 1138 gl->tot_len - pktshift); in copy_gl_to_skb_pkt() 1142 static inline int recv_rx_pkt(struct c4iw_dev *dev, const struct pkt_gl *gl, in recv_rx_pkt() argument 1151 skb = copy_gl_to_skb_pkt(gl , rsp, dev->rdev.lldi.sge_pktshift); in recv_rx_pkt() 1167 const struct pkt_gl *gl) in c4iw_uld_rx_handler() argument 1174 if (gl == NULL) { in c4iw_uld_rx_handler() 1183 } else if (gl == CXGB4_MSG_AN) { in c4iw_uld_rx_handler() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/cxgb4/ |
D | sge.c | 3313 const struct pkt_gl *gl, unsigned int offset) in copy_frags() argument 3318 __skb_fill_page_desc(skb, 0, gl->frags[0].page, in copy_frags() 3319 gl->frags[0].offset + offset, in copy_frags() 3320 gl->frags[0].size - offset); in copy_frags() 3321 skb_shinfo(skb)->nr_frags = gl->nfrags; in copy_frags() 3322 for (i = 1; i < gl->nfrags; i++) in copy_frags() 3323 __skb_fill_page_desc(skb, i, gl->frags[i].page, in copy_frags() 3324 gl->frags[i].offset, in copy_frags() 3325 gl->frags[i].size); in copy_frags() 3328 get_page(gl->frags[gl->nfrags - 1].page); in copy_frags() [all …]
|
D | cxgb4_uld.h | 483 const struct pkt_gl *gl); 487 const struct pkt_gl *gl, 529 struct sk_buff *cxgb4_pktgl_to_skb(const struct pkt_gl *gl,
|
D | cxgb4_uld.c | 75 const struct pkt_gl *gl) in uldrx_handler() argument 88 rsp, gl, &q->lro_mgr, in uldrx_handler() 92 rsp, gl); in uldrx_handler() 99 if (!gl) in uldrx_handler() 101 else if (gl == CXGB4_MSG_AN) in uldrx_handler()
|