/fs/notify/fanotify/ |
D | fanotify.c | 54 static int fanotify_merge(struct list_head *list, struct fsnotify_event *event) in fanotify_merge() argument 59 pr_debug("%s: list=%p event=%p\n", __func__, list, event); in fanotify_merge() 60 new = FANOTIFY_E(event); in fanotify_merge() 71 if (should_merge(test_event, event)) { in fanotify_merge() 88 struct fanotify_perm_event *event, in fanotify_get_response() argument 93 pr_debug("%s: group=%p event=%p\n", __func__, group, event); in fanotify_get_response() 96 event->state == FAN_EVENT_ANSWERED); in fanotify_get_response() 101 if (event->state == FAN_EVENT_REPORTED) { in fanotify_get_response() 103 event->state = FAN_EVENT_CANCELED; in fanotify_get_response() 108 if (event->state == FAN_EVENT_INIT) in fanotify_get_response() [all …]
|
D | fanotify.h | 89 static inline bool fanotify_event_has_path(struct fanotify_event *event) in fanotify_event_has_path() argument 91 return event->fh_type == FILEID_ROOT; in fanotify_event_has_path() 94 static inline bool fanotify_event_has_fid(struct fanotify_event *event) in fanotify_event_has_fid() argument 96 return event->fh_type != FILEID_ROOT && in fanotify_event_has_fid() 97 event->fh_type != FILEID_INVALID; in fanotify_event_has_fid() 100 static inline bool fanotify_event_has_ext_fh(struct fanotify_event *event) in fanotify_event_has_ext_fh() argument 102 return fanotify_event_has_fid(event) && in fanotify_event_has_ext_fh() 103 event->fh_len > FANOTIFY_INLINE_FH_LEN; in fanotify_event_has_ext_fh() 106 static inline void *fanotify_event_fh(struct fanotify_event *event) in fanotify_event_fh() argument 108 return fanotify_fid_fh(&event->fid, event->fh_len); in fanotify_event_fh()
|
D | fanotify_user.c | 54 static int fanotify_event_info_len(struct fanotify_event *event) in fanotify_event_info_len() argument 56 if (!fanotify_event_has_fid(event)) in fanotify_event_info_len() 60 sizeof(struct file_handle) + event->fh_len, in fanotify_event_info_len() 100 struct fanotify_event *event, in create_fd() argument 106 pr_debug("%s: group=%p event=%p\n", __func__, group, event); in create_fd() 118 if (event->path.dentry && event->path.mnt) in create_fd() 119 new_file = dentry_open(&event->path, in create_fd() 146 struct fanotify_perm_event *event, in finish_permission_event() argument 153 event->response = response; in finish_permission_event() 154 if (event->state == FAN_EVENT_CANCELED) in finish_permission_event() [all …]
|
D | Kconfig | 11 the event.
|
/fs/notify/ |
D | notification.c | 58 struct fsnotify_event *event) in fsnotify_destroy_event() argument 61 if (!event || event == group->overflow_event) in fsnotify_destroy_event() 69 if (!list_empty(&event->list)) { in fsnotify_destroy_event() 71 WARN_ON(!list_empty(&event->list)); in fsnotify_destroy_event() 74 group->ops->free_event(event); in fsnotify_destroy_event() 85 struct fsnotify_event *event, in fsnotify_add_event() argument 92 pr_debug("%s: group=%p event=%p\n", __func__, group, event); in fsnotify_add_event() 101 if (event == group->overflow_event || in fsnotify_add_event() 109 event = group->overflow_event; in fsnotify_add_event() 114 ret = merge(list, event); in fsnotify_add_event() [all …]
|
/fs/fscache/ |
D | object.c | 173 int event = -1; in fscache_object_sm_dispatcher() local 196 event = fls(events & t->events) - 1; in fscache_object_sm_dispatcher() 197 __clear_bit(event, &object->oob_event_mask); in fscache_object_sm_dispatcher() 198 clear_bit(event, &object->events); in fscache_object_sm_dispatcher() 211 event = fls(events & t->events) - 1; in fscache_object_sm_dispatcher() 213 true, false, event); in fscache_object_sm_dispatcher() 214 clear_bit(event, &object->events); in fscache_object_sm_dispatcher() 216 object->debug_id, event, in fscache_object_sm_dispatcher() 233 trace_fscache_osm(object, state, false, oob, event); in fscache_object_sm_dispatcher() 234 new_state = state->work(object, event); in fscache_object_sm_dispatcher() [all …]
|
D | internal.h | 307 unsigned event) in fscache_raise_event() argument 309 BUG_ON(event >= NR_FSCACHE_OBJECT_EVENTS); in fscache_raise_event() 312 object->debug_id, object->event_mask, (1 << event)); in fscache_raise_event() 314 if (!test_and_set_bit(event, &object->events) && in fscache_raise_event() 315 test_bit(event, &object->event_mask)) in fscache_raise_event()
|
/fs/notify/inotify/ |
D | inotify_fsnotify.c | 50 struct fsnotify_event *event) in inotify_merge() argument 55 return event_compare(last_event, event); in inotify_merge() 66 struct inotify_event_info *event; in inotify_handle_event() local 99 event = kmalloc(alloc_len, GFP_KERNEL_ACCOUNT | __GFP_RETRY_MAYFAIL); in inotify_handle_event() 102 if (unlikely(!event)) { in inotify_handle_event() 120 fsn_event = &event->fse; in inotify_handle_event() 122 event->mask = mask; in inotify_handle_event() 123 event->wd = i_mark->wd; in inotify_handle_event() 124 event->sync_cookie = cookie; in inotify_handle_event() 125 event->name_len = len; in inotify_handle_event() [all …]
|
D | inotify_user.c | 117 struct inotify_event_info *event; in round_event_name_len() local 119 event = INOTIFY_E(fsn_event); in round_event_name_len() 120 if (!event->name_len) in round_event_name_len() 122 return roundup(event->name_len + 1, sizeof(struct inotify_event)); in round_event_name_len() 136 struct fsnotify_event *event; in get_one_event() local 141 event = fsnotify_peek_first_event(group); in get_one_event() 143 pr_debug("%s: group=%p event=%p\n", __func__, group, event); in get_one_event() 145 event_size += round_event_name_len(event); in get_one_event() 153 return event; in get_one_event() 167 struct inotify_event_info *event; in copy_event_to_user() local [all …]
|
/fs/ |
D | eventpoll.c | 174 struct epoll_event event; member 891 pt->_key = epi->event.events; in ep_item_poll() 893 return vfs_poll(epi->ffd.file, pt) & epi->event.events; in ep_item_poll() 901 locked) & epi->event.events; in ep_item_poll() 960 epi->ffd.fd, epi->event.events, in ep_show_fdinfo() 961 (long long)epi->event.data, in ep_show_fdinfo() 1227 if (!(epi->event.events & ~EP_PRIVATE_BITS)) in ep_poll_callback() 1236 if (pollflags && !(pollflags & epi->event.events)) in ep_poll_callback() 1263 if ((epi->event.events & EPOLLEXCLUSIVE) && in ep_poll_callback() 1267 if (epi->event.events & EPOLLIN) in ep_poll_callback() [all …]
|
D | proc_namespace.c | 27 int event; in mounts_poll() local 31 event = READ_ONCE(ns->event); in mounts_poll() 32 if (m->poll_event != event) { in mounts_poll() 33 m->poll_event = event; in mounts_poll() 280 m->poll_event = ns->event; in mounts_open_common()
|
D | userfaultfd.c | 197 msg.event = UFFD_EVENT_PAGEFAULT; in userfault_msg() 604 if (ewq->msg.event == 0) in userfaultfd_event_wait_completion() 615 if (ewq->msg.event == UFFD_EVENT_FORK) { in userfaultfd_event_wait_completion() 666 ewq->msg.event = 0; in userfaultfd_event_complete() 727 ewq.msg.event = UFFD_EVENT_FORK; in dup_fctx() 782 ewq.msg.event = UFFD_EVENT_REMAP; in mremap_userfaultfd_complete() 807 ewq.msg.event = UFFD_EVENT_REMOVE; in userfaultfd_remove() 864 ewq.msg.event = UFFD_EVENT_UNMAP; in userfaultfd_unmap_complete() 1108 if (uwq->msg.event == UFFD_EVENT_FORK) { in userfaultfd_ctx_read() 1147 if (!ret && msg->event == UFFD_EVENT_FORK) { in userfaultfd_ctx_read()
|
D | aio.c | 1092 struct io_event *ev_page, *event; in aio_complete() local 1110 event = ev_page + pos % AIO_EVENTS_PER_PAGE; in aio_complete() 1112 *event = iocb->ki_res; in aio_complete() 1174 struct io_event __user *event, long nr) in aio_read_events_ring() argument 1227 copy_ret = copy_to_user(event + ret, ev + pos, in aio_read_events_ring() 1254 struct io_event __user *event, long *i) in aio_read_events() argument 1256 long ret = aio_read_events_ring(ctx, event + *i, nr - *i); in aio_read_events() 1271 struct io_event __user *event, in read_events() argument 1291 aio_read_events(ctx, min_nr, nr, event, &ret); in read_events() 1294 aio_read_events(ctx, min_nr, nr, event, &ret), in read_events()
|
D | mount.h | 17 u64 event; member
|
D | namespace.c | 65 static u64 event; variable 787 ns->event = ++event; in touch_mnt_namespace() 797 if (ns && ns->event != event) { in __touch_mnt_namespace() 798 ns->event = event; in __touch_mnt_namespace() 1276 if (p->cached_event == p->ns->event) { in m_start() 1286 p->cached_event = p->ns->event; in m_start() 1604 event++; in do_umount() 1645 event++; in __detach_mounts()
|
/fs/ocfs2/cluster/ |
D | heartbeat.c | 756 struct o2hb_node_event *event; in o2hb_run_event_list() local 766 event = list_entry(o2hb_node_events.next, in o2hb_run_event_list() 769 list_del_init(&event->hn_item); in o2hb_run_event_list() 773 event->hn_event_type == O2HB_NODE_UP_CB ? "UP" : "DOWN", in o2hb_run_event_list() 774 event->hn_node_num); in o2hb_run_event_list() 776 hbcall = hbcall_from_type(event->hn_event_type); in o2hb_run_event_list() 783 o2hb_fire_callbacks(hbcall, event->hn_node, event->hn_node_num); in o2hb_run_event_list() 792 static void o2hb_queue_node_event(struct o2hb_node_event *event, in o2hb_queue_node_event() argument 801 event->hn_event_type = type; in o2hb_queue_node_event() 802 event->hn_node = node; in o2hb_queue_node_event() [all …]
|
/fs/kernfs/ |
D | file.c | 36 atomic_t event; member 165 of->event = atomic_read(&of->kn->attr.open->event); in kernfs_seq_show() 210 of->event = atomic_read(&of->kn->attr.open->event); in kernfs_file_direct_read() 576 atomic_set(&new_on->event, 1); in kernfs_get_open_node() 841 if (of->event != atomic_read(&on->event)) in kernfs_generic_poll() 944 atomic_inc(&on->event); in kernfs_notify()
|
/fs/cifs/ |
D | smbdirect.c | 176 struct rdma_cm_id *id, struct rdma_cm_event *event) in smbd_conn_upcall() argument 181 event->event, event->status); in smbd_conn_upcall() 183 switch (event->event) { in smbd_conn_upcall() 201 log_rdma_event(INFO, "connected event=%d\n", event->event); in smbd_conn_upcall() 209 log_rdma_event(INFO, "connecting failed event=%d\n", event->event); in smbd_conn_upcall() 238 smbd_qp_async_error_upcall(struct ib_event *event, void *context) in smbd_qp_async_error_upcall() argument 243 ib_event_msg(event->event), event->device->name, info); in smbd_qp_async_error_upcall() 245 switch (event->event) { in smbd_qp_async_error_upcall()
|
/fs/lockd/ |
D | svc.c | 300 unsigned long event, void *ptr) in lockd_inetaddr_event() argument 305 if ((event != NETDEV_DOWN) || in lockd_inetaddr_event() 330 unsigned long event, void *ptr) in lockd_inet6addr_event() argument 335 if ((event != NETDEV_DOWN) || in lockd_inet6addr_event()
|
/fs/nfs/blocklayout/ |
D | rpc_pipefs.c | 165 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, in rpc_pipefs_event() argument 182 switch (event) { in rpc_pipefs_event()
|
/fs/ocfs2/dlmfs/ |
D | dlmfs.c | 210 __poll_t event = 0; in dlmfs_file_poll() local 218 event = EPOLLIN | EPOLLRDNORM; in dlmfs_file_poll() 221 return event; in dlmfs_file_poll()
|
/fs/nfsd/ |
D | nfssvc.c | 428 static int nfsd_inetaddr_event(struct notifier_block *this, unsigned long event, in nfsd_inetaddr_event() argument 437 if ((event != NETDEV_DOWN) || in nfsd_inetaddr_event() 460 unsigned long event, void *ptr) in nfsd_inet6addr_event() argument 468 if ((event != NETDEV_DOWN) || in nfsd_inet6addr_event()
|
/fs/nfs/ |
D | dns_resolve.c | 425 static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, in rpc_pipefs_event() argument 440 switch (event) { in rpc_pipefs_event()
|
/fs/jfs/ |
D | jfs_txnmgr.c | 118 static inline void TXN_SLEEP_DROP_LOCK(wait_queue_head_t * event) in TXN_SLEEP_DROP_LOCK() argument 122 add_wait_queue(event, &wait); in TXN_SLEEP_DROP_LOCK() 126 remove_wait_queue(event, &wait); in TXN_SLEEP_DROP_LOCK() 129 #define TXN_SLEEP(event)\ argument 131 TXN_SLEEP_DROP_LOCK(event);\ 135 #define TXN_WAKEUP(event) wake_up_all(event) argument
|
/fs/proc/ |
D | proc_sysctl.c | 56 atomic_inc(&poll->event); in proc_sys_poll_notify() 662 unsigned long event; in proc_sys_poll() local 674 event = (unsigned long)filp->private_data; in proc_sys_poll() 677 if (event != atomic_read(&table->poll->event)) { in proc_sys_poll()
|