/fs/ |
D | splice.c | 76 buf->flags |= PIPE_BUF_FLAG_LRU; in page_cache_pipe_buf_try_steal() 94 buf->flags &= ~PIPE_BUF_FLAG_LRU; in page_cache_pipe_buf_release() 149 if (!(buf->flags & PIPE_BUF_FLAG_GIFT)) in user_page_pipe_buf_try_steal() 152 buf->flags |= PIPE_BUF_FLAG_LRU; in user_page_pipe_buf_try_steal() 207 buf->flags = 0; in splice_to_pipe() 299 unsigned int flags) in generic_file_splice_read() argument 354 more = (sd->flags & SPLICE_F_MORE) ? MSG_MORE : 0; in pipe_to_sendpage() 484 if (sd->flags & SPLICE_F_NONBLOCK) in splice_from_pipe_next() 581 loff_t *ppos, size_t len, unsigned int flags, in splice_from_pipe() argument 587 .flags = flags, in splice_from_pipe() [all …]
|
D | open.c | 421 static long do_faccessat(int dfd, const char __user *filename, int mode, int flags) in do_faccessat() argument 432 if (flags & ~(AT_EACCESS | AT_SYMLINK_NOFOLLOW | AT_EMPTY_PATH)) in do_faccessat() 435 if (flags & AT_SYMLINK_NOFOLLOW) in do_faccessat() 437 if (flags & AT_EMPTY_PATH) in do_faccessat() 440 if (!(flags & AT_EACCESS)) { in do_faccessat() 499 int, flags) in SYSCALL_DEFINE4() argument 501 return do_faccessat(dfd, filename, mode, flags); in SYSCALL_DEFINE4() 1018 struct file *dentry_open(const struct path *path, int flags, in dentry_open() argument 1029 f = alloc_empty_file(flags, cred); in dentry_open() 1058 struct file *dentry_create(const struct path *path, int flags, umode_t mode, in dentry_create() argument [all …]
|
D | namei.c | 129 getname_flags(const char __user *filename, int flags, int *empty) in getname_flags() argument 195 if (!(flags & LOOKUP_EMPTY)) { in getname_flags() 210 int flags = (uflags & AT_EMPTY_PATH) ? LOOKUP_EMPTY : 0; in getname_uflags() local 212 return getname_flags(filename, flags, NULL); in getname_uflags() 571 unsigned int flags, state; member 635 nd->flags & LOOKUP_RCU ? GFP_ATOMIC : GFP_KERNEL); in nd_alloc_stack() 672 nd->flags &= ~LOOKUP_RCU; in leave_rcu() 680 if (!(nd->flags & LOOKUP_RCU)) { in terminate_walk() 723 if (unlikely(nd->flags & LOOKUP_CACHED)) { in legitimize_links() 774 BUG_ON(!(nd->flags & LOOKUP_RCU)); in try_to_unlazy() [all …]
|
D | ioctl.c | 116 u64 phys, u64 len, u32 flags) in fiemap_fill_next_extent() argument 124 return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0; in fiemap_fill_next_extent() 130 if (flags & SET_UNKNOWN_FLAGS) in fiemap_fill_next_extent() 131 flags |= FIEMAP_EXTENT_UNKNOWN; in fiemap_fill_next_extent() 132 if (flags & SET_NO_UNMOUNTED_IO_FLAGS) in fiemap_fill_next_extent() 133 flags |= FIEMAP_EXTENT_ENCODED; in fiemap_fill_next_extent() 134 if (flags & SET_NOT_ALIGNED_FLAGS) in fiemap_fill_next_extent() 135 flags |= FIEMAP_EXTENT_NOT_ALIGNED; in fiemap_fill_next_extent() 141 extent.fe_flags = flags; in fiemap_fill_next_extent() 150 return (flags & FIEMAP_EXTENT_LAST) ? 1 : 0; in fiemap_fill_next_extent() [all …]
|
D | timerfd.c | 67 unsigned long flags; in timerfd_triggered() local 69 spin_lock_irqsave(&ctx->wqh.lock, flags); in timerfd_triggered() 73 spin_unlock_irqrestore(&ctx->wqh.lock, flags); in timerfd_triggered() 103 unsigned long flags; in timerfd_clock_was_set() local 109 spin_lock_irqsave(&ctx->wqh.lock, flags); in timerfd_clock_was_set() 115 spin_unlock_irqrestore(&ctx->wqh.lock, flags); in timerfd_clock_was_set() 161 static void timerfd_setup_cancel(struct timerfd_ctx *ctx, int flags) in timerfd_setup_cancel() argument 166 (flags & TFD_TIMER_ABSTIME) && (flags & TFD_TIMER_CANCEL_ON_SET)) { in timerfd_setup_cancel() 191 static int timerfd_setup(struct timerfd_ctx *ctx, int flags, in timerfd_setup() argument 198 htmode = (flags & TFD_TIMER_ABSTIME) ? in timerfd_setup() [all …]
|
/fs/ubifs/ |
D | ioctl.c | 37 unsigned int flags = ubifs_inode(inode)->flags; in ubifs_set_inode_flags() local 41 if (flags & UBIFS_SYNC_FL) in ubifs_set_inode_flags() 43 if (flags & UBIFS_APPEND_FL) in ubifs_set_inode_flags() 45 if (flags & UBIFS_IMMUTABLE_FL) in ubifs_set_inode_flags() 47 if (flags & UBIFS_DIRSYNC_FL) in ubifs_set_inode_flags() 49 if (flags & UBIFS_CRYPT_FL) in ubifs_set_inode_flags() 105 static int setflags(struct inode *inode, int flags) in setflags() argument 118 ui->flags &= ~ioctl2ubifs(UBIFS_SETTABLE_IOCTL_FLAGS); in setflags() 119 ui->flags |= ioctl2ubifs(flags); in setflags() 136 int flags = ubifs2ioctl(ubifs_inode(inode)->flags); in ubifs_fileattr_get() local [all …]
|
D | lprops.c | 188 lp->flags &= ~LPROPS_CAT_MASK; in add_to_lpt_heap() 189 lp->flags |= LPROPS_UNCAT; in add_to_lpt_heap() 290 lprops->flags &= ~LPROPS_CAT_MASK; in ubifs_add_to_cat() 291 lprops->flags |= cat; in ubifs_add_to_cat() 346 cat = new_lprops->flags & LPROPS_CAT_MASK; in ubifs_replace_cat() 375 int cat = lprops->flags & LPROPS_CAT_MASK; in ubifs_ensure_cat() 399 if (lprops->flags & LPROPS_TAKEN) in ubifs_categorize_lprops() 403 ubifs_assert(c, !(lprops->flags & LPROPS_INDEX)); in ubifs_categorize_lprops() 408 if (lprops->flags & LPROPS_INDEX) in ubifs_categorize_lprops() 414 if (lprops->flags & LPROPS_INDEX) { in ubifs_categorize_lprops() [all …]
|
D | find.c | 44 int n, cat = lprops->flags & LPROPS_CAT_MASK; in valuable() 90 if (lprops->flags & LPROPS_TAKEN) in scan_for_dirty_cb() 99 if (data->exclude_index && lprops->flags & LPROPS_INDEX) in scan_for_dirty_cb() 151 if (lprops->flags & LPROPS_TAKEN) in scan_for_dirty() 155 if (exclude_index && (lprops->flags & LPROPS_INDEX)) in scan_for_dirty() 184 ubifs_assert(c, !(lprops->flags & LPROPS_TAKEN)); in scan_for_dirty() 185 ubifs_assert(c, !exclude_index || !(lprops->flags & LPROPS_INDEX)); in scan_for_dirty() 322 lp->lnum, lp->free, lp->dirty, lp->flags); in ubifs_find_dirty_leb() 325 lp->flags | LPROPS_TAKEN, 0); in ubifs_find_dirty_leb() 357 if (lprops->flags & LPROPS_TAKEN) in scan_for_free_cb() [all …]
|
/fs/ocfs2/dlm/ |
D | dlmunlock.c | 56 int flags, 85 int flags, int *call_ast, in dlmunlock_common() argument 95 flags & LKM_VALBLK); in dlmunlock_common() 107 if (in_use && !(flags & LKM_CANCEL)) { in dlmunlock_common() 116 if (master_node && !(flags & LKM_CANCEL)) { in dlmunlock_common() 139 if (flags & LKM_CANCEL) in dlmunlock_common() 148 if (flags & LKM_VALBLK) { in dlmunlock_common() 153 flags |= LKM_PUT_LVB; /* let the send function in dlmunlock_common() 160 if (flags & LKM_CANCEL) in dlmunlock_common() 167 flags, owner); in dlmunlock_common() [all …]
|
D | dlmconvert.c | 43 struct dlm_lock *lock, int flags, 48 struct dlm_lock *lock, int flags, int type); 61 struct dlm_lock *lock, int flags, int type) in dlmconvert_master() argument 72 status = __dlmconvert_master(dlm, res, lock, flags, type, in dlmconvert_master() 104 struct dlm_lock *lock, int flags, in __dlmconvert_master() argument 134 if (flags & LKM_VALBLK) { in __dlmconvert_master() 141 lock->lksb->flags |= DLM_LKSB_PUT_LVB; in __dlmconvert_master() 151 lock->lksb->flags |= DLM_LKSB_GET_LVB; in __dlmconvert_master() 157 flags &= ~(LKM_VALBLK); in __dlmconvert_master() 195 if (lock->lksb->flags & DLM_LKSB_PUT_LVB) in __dlmconvert_master() [all …]
|
D | dlmlock.c | 45 struct dlm_lock *lock, int flags); 103 struct dlm_lock *lock, int flags) in dlmlock_master() argument 149 if (flags & LKM_NOQUEUE) { in dlmlock_master() 186 lock->lksb->flags &= ~DLM_LKSB_GET_LVB; in dlm_revert_pending_lock() 199 struct dlm_lock *lock, int flags) in dlmlock_remote() argument 206 res->lockname.name, flags); in dlmlock_remote() 228 status = dlm_send_remote_lock_request(dlm, res, lock, flags); in dlmlock_remote() 285 struct dlm_lock *lock, int flags) in dlm_send_remote_lock_request() argument 296 create.flags = cpu_to_be32(flags); in dlm_send_remote_lock_request() 389 newlock->ml.flags = 0; in dlm_init_lock() [all …]
|
/fs/afs/ |
D | rotate.c | 52 if (op->flags & AFS_OPERATION_CUR_ONLY) { in afs_start_fs_iteration() 61 if (test_and_clear_bit(AFS_VNODE_CB_PROMISED, &vnode->flags)) in afs_start_fs_iteration() 92 if (!(op->flags & AFS_OPERATION_UNINTR)) { in afs_sleep_and_retry() 123 if (op->flags & AFS_OPERATION_STOP) { in afs_select_fileserver() 139 op->flags |= AFS_OPERATION_STOP; in afs_select_fileserver() 154 if (op->flags & AFS_OPERATION_VNOVOL) { in afs_select_fileserver() 163 set_bit(AFS_VOLUME_NEEDS_UPDATE, &op->volume->flags); in afs_select_fileserver() 168 if (test_bit(AFS_VOLUME_DELETED, &op->volume->flags)) { in afs_select_fileserver() 182 op->flags |= AFS_OPERATION_VNOVOL; in afs_select_fileserver() 196 if (!test_and_set_bit(AFS_VOLUME_OFFLINE, &op->volume->flags)) { in afs_select_fileserver() [all …]
|
D | vl_probe.c | 20 if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)) { in afs_finished_vl_probe() 22 clear_bit(AFS_VLSERVER_FL_RESPONDING, &server->flags); in afs_finished_vl_probe() 25 clear_bit_unlock(AFS_VLSERVER_FL_PROBING, &server->flags); in afs_finished_vl_probe() 26 wake_up_bit(&server->flags, AFS_VLSERVER_FL_PROBING); in afs_finished_vl_probe() 66 if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED)) { in afs_vlserver_probe_result() 76 server->probe.flags |= AFS_VLSERVER_PROBE_LOCAL_FAILURE; in afs_vlserver_probe_result() 93 if (!(server->probe.flags & AFS_VLSERVER_PROBE_RESPONDED) && in afs_vlserver_probe_result() 107 server->probe.flags |= AFS_VLSERVER_PROBE_IS_YFS; in afs_vlserver_probe_result() 108 set_bit(AFS_VLSERVER_FL_IS_YFS, &server->flags); in afs_vlserver_probe_result() 111 server->probe.flags |= AFS_VLSERVER_PROBE_NOT_YFS; in afs_vlserver_probe_result() [all …]
|
/fs/nfs/ |
D | delegation.c | 42 if (!test_and_set_bit(NFS_DELEGATION_REVOKED, &delegation->flags)) { in nfs_mark_delegation_revoked() 45 if (!test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) in nfs_mark_delegation_revoked() 75 set_bit(NFS_DELEGATION_REFERENCED, &delegation->flags); in nfs_mark_delegation_referenced() 81 set_bit(NFS_DELEGATION_RETURN, &delegation->flags); in nfs_mark_return_delegation() 87 fmode_t flags) in nfs4_is_valid_delegation() argument 89 if (delegation != NULL && (delegation->type & flags) == flags && in nfs4_is_valid_delegation() 90 !test_bit(NFS_DELEGATION_REVOKED, &delegation->flags) && in nfs4_is_valid_delegation() 91 !test_bit(NFS_DELEGATION_RETURNING, &delegation->flags)) in nfs4_is_valid_delegation() 107 nfs4_do_check_delegation(struct inode *inode, fmode_t flags, bool mark) in nfs4_do_check_delegation() argument 112 flags &= FMODE_READ|FMODE_WRITE; in nfs4_do_check_delegation() [all …]
|
D | fs_context.c | 438 ctx->flags &= ~NFS_MOUNT_VER3; in nfs_parse_version_string() 444 ctx->flags |= NFS_MOUNT_VER3; in nfs_parse_version_string() 505 ctx->flags |= NFS_MOUNT_SOFT; in nfs_fs_context_parse_param() 506 ctx->flags &= ~NFS_MOUNT_SOFTERR; in nfs_fs_context_parse_param() 509 ctx->flags |= NFS_MOUNT_SOFTERR | NFS_MOUNT_SOFTREVAL; in nfs_fs_context_parse_param() 510 ctx->flags &= ~NFS_MOUNT_SOFT; in nfs_fs_context_parse_param() 513 ctx->flags &= ~(NFS_MOUNT_SOFT | in nfs_fs_context_parse_param() 519 ctx->flags &= ~NFS_MOUNT_SOFTREVAL; in nfs_fs_context_parse_param() 521 ctx->flags |= NFS_MOUNT_SOFTREVAL; in nfs_fs_context_parse_param() 525 ctx->flags &= ~NFS_MOUNT_POSIX; in nfs_fs_context_parse_param() [all …]
|
/fs/notify/fanotify/ |
D | fanotify_user.c | 949 struct path *path, unsigned int flags, __u64 mask, in fanotify_find_path() argument 955 dfd, filename, flags); in fanotify_find_path() 965 if ((flags & FAN_MARK_ONLYDIR) && in fanotify_find_path() 977 if (!(flags & FAN_MARK_DONT_FOLLOW)) in fanotify_find_path() 979 if (flags & FAN_MARK_ONLYDIR) in fanotify_find_path() 1003 __u32 mask, unsigned int flags, in fanotify_mark_remove_from_mask() argument 1012 if (!(flags & FANOTIFY_MARK_IGNORE_BITS)) { in fanotify_mark_remove_from_mask() 1032 unsigned int flags, __u32 umask) in fanotify_remove_mark() argument 1045 removed = fanotify_mark_remove_from_mask(fsn_mark, mask, flags, in fanotify_remove_mark() 1062 unsigned int flags, __u32 umask) in fanotify_remove_vfsmount_mark() argument [all …]
|
/fs/iomap/ |
D | direct-io.c | 34 unsigned flags; member 89 ret = dops->end_io(iocb, dio->size, ret, dio->flags); in iomap_dio_complete() 95 !(dio->flags & IOMAP_DIO_WRITE)) in iomap_dio_complete() 112 (dio->flags & IOMAP_DIO_WRITE) && inode->i_mapping->nrpages) { in iomap_dio_complete() 129 if (dio->flags & IOMAP_DIO_NEED_SYNC) in iomap_dio_complete() 160 bool should_dirty = (dio->flags & IOMAP_DIO_DIRTY); in iomap_dio_bio_end_io() 170 } else if (dio->flags & IOMAP_DIO_WRITE) { in iomap_dio_bio_end_io() 220 if (!(dio->flags & IOMAP_DIO_WRITE)) { in iomap_dio_bio_opflags() 221 WARN_ON_ONCE(iomap->flags & IOMAP_F_ZONE_APPEND); in iomap_dio_bio_opflags() 225 if (iomap->flags & IOMAP_F_ZONE_APPEND) in iomap_dio_bio_opflags() [all …]
|
/fs/xfs/ |
D | xfs_quotaops.c | 37 tstate->flags |= QCI_SYSFILE; in xfs_qm_fill_state() 67 state->s_state[USRQUOTA].flags |= QCI_ACCT_ENABLED; in xfs_fs_get_quota_state() 69 state->s_state[USRQUOTA].flags |= QCI_LIMITS_ENFORCED; in xfs_fs_get_quota_state() 71 state->s_state[GRPQUOTA].flags |= QCI_ACCT_ENABLED; in xfs_fs_get_quota_state() 73 state->s_state[GRPQUOTA].flags |= QCI_LIMITS_ENFORCED; in xfs_fs_get_quota_state() 75 state->s_state[PRJQUOTA].flags |= QCI_ACCT_ENABLED; in xfs_fs_get_quota_state() 77 state->s_state[PRJQUOTA].flags |= QCI_LIMITS_ENFORCED; in xfs_fs_get_quota_state() 138 unsigned int flags = 0; in xfs_quota_flags() local 141 flags |= XFS_UQUOTA_ACCT; in xfs_quota_flags() 143 flags |= XFS_PQUOTA_ACCT; in xfs_quota_flags() [all …]
|
D | xfs_inode.h | 186 __xfs_iflags_set(xfs_inode_t *ip, unsigned short flags) in __xfs_iflags_set() argument 188 ip->i_flags |= flags; in __xfs_iflags_set() 192 xfs_iflags_set(xfs_inode_t *ip, unsigned short flags) in xfs_iflags_set() argument 195 __xfs_iflags_set(ip, flags); in xfs_iflags_set() 200 xfs_iflags_clear(xfs_inode_t *ip, unsigned short flags) in xfs_iflags_clear() argument 203 ip->i_flags &= ~flags; in xfs_iflags_clear() 208 __xfs_iflags_test(xfs_inode_t *ip, unsigned short flags) in __xfs_iflags_test() argument 210 return (ip->i_flags & flags); in __xfs_iflags_test() 214 xfs_iflags_test(xfs_inode_t *ip, unsigned short flags) in xfs_iflags_test() argument 218 ret = __xfs_iflags_test(ip, flags); in xfs_iflags_test() [all …]
|
D | xfs_qm_syscalls.c | 25 uint flags) in xfs_qm_scall_quotaoff() argument 32 if ((mp->m_qflags & flags) == 0) in xfs_qm_scall_quotaoff() 39 if (flags & XFS_ALL_QUOTA_ACCT) in xfs_qm_scall_quotaoff() 43 mp->m_qflags &= ~(flags & XFS_ALL_QUOTA_ENFD); in xfs_qm_scall_quotaoff() 104 uint flags) in xfs_qm_scall_trunc_qfiles() argument 108 if (!xfs_has_quota(mp) || flags == 0 || in xfs_qm_scall_trunc_qfiles() 109 (flags & ~XFS_QMOPT_QUOTALL)) { in xfs_qm_scall_trunc_qfiles() 111 __func__, flags, mp->m_qflags); in xfs_qm_scall_trunc_qfiles() 115 if (flags & XFS_QMOPT_UQUOTA) { in xfs_qm_scall_trunc_qfiles() 120 if (flags & XFS_QMOPT_GQUOTA) { in xfs_qm_scall_trunc_qfiles() [all …]
|
D | xfs_iomap.c | 89 iomap->flags = iomap_flags; in xfs_bmbt_to_iomap() 93 iomap->flags |= IOMAP_F_DIRTY; in xfs_bmbt_to_iomap() 197 unsigned int flags, in xfs_iomap_write_direct() argument 239 if (flags & IOMAP_DAX) { in xfs_iomap_write_direct() 623 unsigned flags, in imap_needs_alloc() argument 628 if (flags & IOMAP_ZERO) in imap_needs_alloc() 635 if ((flags & IOMAP_DAX) && imap->br_state == XFS_EXT_UNWRITTEN) in imap_needs_alloc() 643 unsigned int flags, in imap_needs_cow() argument 651 if (flags & IOMAP_ZERO) { in imap_needs_cow() 664 unsigned flags, in xfs_ilock_for_iomap() argument [all …]
|
/fs/jfs/ |
D | ioctl.c | 42 static long jfs_map_ext2(unsigned long flags, int from) in jfs_map_ext2() argument 49 if (jfs_map[index].ext2_flag & flags) in jfs_map_ext2() 52 if (jfs_map[index].jfs_flag & flags) in jfs_map_ext2() 63 unsigned int flags = jfs_inode->mode2 & JFS_FL_USER_VISIBLE; in jfs_fileattr_get() local 68 fileattr_fill_flags(fa, jfs_map_ext2(flags, 0)); in jfs_fileattr_get() 78 unsigned int flags; in jfs_fileattr_set() local 86 flags = jfs_map_ext2(fa->flags, 1); in jfs_fileattr_set() 88 flags &= ~JFS_DIRSYNC_FL; in jfs_fileattr_set() 94 flags = flags & JFS_FL_USER_MODIFIABLE; in jfs_fileattr_set() 95 flags |= jfs_inode->mode2 & ~JFS_FL_USER_MODIFIABLE; in jfs_fileattr_set() [all …]
|
/fs/fscache/ |
D | cookie.c | 42 cookie->flags, in fscache_print_cookie() 66 if (WARN_ON_ONCE(test_bit(FSCACHE_COOKIE_IS_HASHED, &cookie->flags))) { in fscache_free_cookie() 108 set_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags); in fscache_init_access_gate() 131 !test_bit(FSCACHE_COOKIE_NO_ACCESS_WAKE, &cookie->flags)) in fscache_end_cookie_access() 181 if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags)) in fscache_begin_cookie_access() 184 if (!test_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags) || in fscache_begin_cookie_access() 235 set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); in fscache_cookie_lookup_negative() 264 clear_bit(FSCACHE_COOKIE_IS_CACHING, &cookie->flags); in fscache_caching_failed() 346 __set_bit(FSCACHE_COOKIE_NO_DATA_TO_READ, &cookie->flags); in fscache_alloc_cookie() 414 if (!test_bit(FSCACHE_COOKIE_RELINQUISHED, &cursor->flags)) in fscache_hash_cookie() [all …]
|
/fs/btrfs/ |
D | subpage.c | 417 unsigned long flags; in btrfs_subpage_set_uptodate() local 419 spin_lock_irqsave(&subpage->lock, flags); in btrfs_subpage_set_uptodate() 423 spin_unlock_irqrestore(&subpage->lock, flags); in btrfs_subpage_set_uptodate() 432 unsigned long flags; in btrfs_subpage_clear_uptodate() local 434 spin_lock_irqsave(&subpage->lock, flags); in btrfs_subpage_clear_uptodate() 437 spin_unlock_irqrestore(&subpage->lock, flags); in btrfs_subpage_clear_uptodate() 446 unsigned long flags; in btrfs_subpage_set_error() local 448 spin_lock_irqsave(&subpage->lock, flags); in btrfs_subpage_set_error() 451 spin_unlock_irqrestore(&subpage->lock, flags); in btrfs_subpage_set_error() 460 unsigned long flags; in btrfs_subpage_clear_error() local [all …]
|
/fs/dlm/ |
D | ast.c | 29 lkb->lkb_last_bast.flags, in dlm_dump_lkb_callbacks() 37 lkb->lkb_last_cast.flags, in dlm_dump_lkb_callbacks() 46 lkb->lkb_callbacks[i].flags, in dlm_dump_lkb_callbacks() 53 int dlm_add_lkb_callback(struct dlm_lkb *lkb, uint32_t flags, int mode, in dlm_add_lkb_callback() argument 72 if ((i > 0) && (flags & DLM_CB_BAST) && in dlm_add_lkb_callback() 73 (lkb->lkb_callbacks[i-1].flags & DLM_CB_BAST)) { in dlm_add_lkb_callback() 94 lkb->lkb_callbacks[i].flags = flags; in dlm_add_lkb_callback() 105 flags, mode, status, sbflags); in dlm_add_lkb_callback() 145 if ((cb->flags & DLM_CB_BAST) && lkb->lkb_last_cast.seq) { in dlm_rem_lkb_callback() 147 cb->flags |= DLM_CB_SKIP; in dlm_rem_lkb_callback() [all …]
|