/net/ceph/crush/ |
D | mapper.c | 75 struct crush_work_bucket *work, in bucket_perm_choose() argument 82 if (work->perm_x != (__u32)x || work->perm_n == 0) { in bucket_perm_choose() 84 work->perm_x = x; in bucket_perm_choose() 90 work->perm[0] = s; in bucket_perm_choose() 91 work->perm_n = 0xffff; /* magic value, see below */ in bucket_perm_choose() 96 work->perm[i] = i; in bucket_perm_choose() 97 work->perm_n = 0; in bucket_perm_choose() 98 } else if (work->perm_n == 0xffff) { in bucket_perm_choose() 101 work->perm[i] = i; in bucket_perm_choose() 102 work->perm[work->perm[0]] = 0; in bucket_perm_choose() [all …]
|
/net/rds/ |
D | threads.c | 151 void rds_connect_worker(struct work_struct *work) in rds_connect_worker() argument 153 struct rds_conn_path *cp = container_of(work, in rds_connect_worker() 155 cp_conn_w.work); in rds_connect_worker() 180 void rds_send_worker(struct work_struct *work) in rds_send_worker() argument 182 struct rds_conn_path *cp = container_of(work, in rds_send_worker() 184 cp_send_w.work); in rds_send_worker() 206 void rds_recv_worker(struct work_struct *work) in rds_recv_worker() argument 208 struct rds_conn_path *cp = container_of(work, in rds_recv_worker() 210 cp_recv_w.work); in rds_recv_worker() 230 void rds_shutdown_worker(struct work_struct *work) in rds_shutdown_worker() argument [all …]
|
/net/ipv6/netfilter/ |
D | nf_nat_masquerade_ipv6.c | 89 struct work_struct work; member 95 static int inet_cmp(struct nf_conn *ct, void *work) in inet_cmp() argument 97 struct masq_dev_work *w = (struct masq_dev_work *)work; in inet_cmp() 108 static void iterate_cleanup_work(struct work_struct *work) in iterate_cleanup_work() argument 112 w = container_of(work, struct masq_dev_work, work); in iterate_cleanup_work() 160 INIT_WORK(&w->work, iterate_cleanup_work); in masq_inet_event() 164 schedule_work(&w->work); in masq_inet_event()
|
/net/netfilter/ |
D | xt_IDLETIMER.c | 63 struct work_struct work; member 105 schedule_work(&timer->work); in check_for_delayed_trigger() 213 static void idletimer_tg_work(struct work_struct *work) in idletimer_tg_work() argument 215 struct idletimer_tg *timer = container_of(work, struct idletimer_tg, in idletimer_tg_work() 216 work); in idletimer_tg_work() 232 schedule_work(&timer->work); in idletimer_tg_expired() 269 schedule_work(&timer->work); in idletimer_resume() 346 INIT_WORK(&info->timer->work, idletimer_tg_work); in idletimer_tg_create() 387 schedule_work(&timer->work); in reset_timer() 414 schedule_work(&info->timer->work); in idletimer_tg_target() [all …]
|
/net/sched/ |
D | cls_cgroup.c | 27 struct work_struct work; member 71 static void cls_cgroup_destroy_work(struct work_struct *work) in cls_cgroup_destroy_work() argument 73 struct cls_cgroup_head *head = container_of(work, in cls_cgroup_destroy_work() 75 work); in cls_cgroup_destroy_work() 87 INIT_WORK(&head->work, cls_cgroup_destroy_work); in cls_cgroup_destroy_rcu() 88 tcf_queue_work(&head->work); in cls_cgroup_destroy_rcu()
|
D | cls_tcindex.c | 31 struct work_struct work; member 41 struct work_struct work; member 148 static void tcindex_destroy_rexts_work(struct work_struct *work) in tcindex_destroy_rexts_work() argument 152 r = container_of(work, struct tcindex_filter_result, work); in tcindex_destroy_rexts_work() 163 INIT_WORK(&r->work, tcindex_destroy_rexts_work); in tcindex_destroy_rexts() 164 tcf_queue_work(&r->work); in tcindex_destroy_rexts() 174 static void tcindex_destroy_fexts_work(struct work_struct *work) in tcindex_destroy_fexts_work() argument 176 struct tcindex_filter *f = container_of(work, struct tcindex_filter, in tcindex_destroy_fexts_work() 177 work); in tcindex_destroy_fexts_work() 189 INIT_WORK(&f->work, tcindex_destroy_fexts_work); in tcindex_destroy_fexts() [all …]
|
D | cls_matchall.c | 25 struct work_struct work; member 54 static void mall_destroy_work(struct work_struct *work) in mall_destroy_work() argument 56 struct cls_mall_head *head = container_of(work, struct cls_mall_head, in mall_destroy_work() 57 work); in mall_destroy_work() 68 INIT_WORK(&head->work, mall_destroy_work); in mall_destroy_rcu() 69 tcf_queue_work(&head->work); in mall_destroy_rcu()
|
D | cls_basic.c | 38 struct work_struct work; member 96 static void basic_delete_filter_work(struct work_struct *work) in basic_delete_filter_work() argument 98 struct basic_filter *f = container_of(work, struct basic_filter, work); in basic_delete_filter_work() 109 INIT_WORK(&f->work, basic_delete_filter_work); in basic_delete_filter() 110 tcf_queue_work(&f->work); in basic_delete_filter()
|
D | cls_fw.c | 50 struct work_struct work; member 132 static void fw_delete_filter_work(struct work_struct *work) in fw_delete_filter_work() argument 134 struct fw_filter *f = container_of(work, struct fw_filter, work); in fw_delete_filter_work() 145 INIT_WORK(&f->work, fw_delete_filter_work); in fw_delete_filter() 146 tcf_queue_work(&f->work); in fw_delete_filter()
|
D | cls_u32.c | 72 struct work_struct work; member 427 static void u32_delete_key_work(struct work_struct *work) in u32_delete_key_work() argument 429 struct tc_u_knode *key = container_of(work, struct tc_u_knode, work); in u32_delete_key_work() 440 INIT_WORK(&key->work, u32_delete_key_work); in u32_delete_key_rcu() 441 tcf_queue_work(&key->work); in u32_delete_key_rcu() 451 static void u32_delete_key_freepf_work(struct work_struct *work) in u32_delete_key_freepf_work() argument 453 struct tc_u_knode *key = container_of(work, struct tc_u_knode, work); in u32_delete_key_freepf_work() 464 INIT_WORK(&key->work, u32_delete_key_freepf_work); in u32_delete_key_freepf_rcu() 465 tcf_queue_work(&key->work); in u32_delete_key_freepf_rcu()
|
D | cls_route.c | 61 struct work_struct work; member 267 static void route4_delete_filter_work(struct work_struct *work) in route4_delete_filter_work() argument 269 struct route4_filter *f = container_of(work, struct route4_filter, work); in route4_delete_filter_work() 280 INIT_WORK(&f->work, route4_delete_filter_work); in route4_delete_filter() 281 tcf_queue_work(&f->work); in route4_delete_filter()
|
/net/strparser/ |
D | strparser.c | 391 queue_work(strp_wq, &strp->work); in strp_data_ready() 404 queue_work(strp_wq, &strp->work); in strp_data_ready() 426 queue_work(strp_wq, &strp->work); in do_strp_work() 434 do_strp_work(container_of(w, struct strparser, work)); in strp_work() 440 msg_timer_work.work); in strp_msg_timeout() 494 INIT_WORK(&strp->work, strp_work); in strp_init() 507 queue_work(strp_wq, &strp->work); in strp_unpause() 519 cancel_work_sync(&strp->work); in strp_done() 536 queue_work(strp_wq, &strp->work); in strp_check_rcv()
|
/net/core/ |
D | sock_diag.c | 108 struct work_struct work; member 118 static void sock_diag_broadcast_destroy_work(struct work_struct *work) in sock_diag_broadcast_destroy_work() argument 121 container_of(work, struct broadcast_sk, work); in sock_diag_broadcast_destroy_work() 158 INIT_WORK(&bsk->work, sock_diag_broadcast_destroy_work); in sock_diag_broadcast_destroy() 159 queue_work(broadcast_wq, &bsk->work); in sock_diag_broadcast_destroy()
|
D | netpoll.c | 60 static void netpoll_async_cleanup(struct work_struct *work); 98 static void queue_process(struct work_struct *work) in queue_process() argument 101 container_of(work, struct netpoll_info, tx_work.work); in queue_process() 151 int work = 0; in poll_one_napi() local 170 work = napi->poll(napi, 0); in poll_one_napi() 171 WARN_ONCE(work, "%pF exceeded budget in poll\n", napi->poll); in poll_one_napi() 172 trace_napi_poll(napi, work, 0); in poll_one_napi() 838 static void netpoll_async_cleanup(struct work_struct *work) in netpoll_async_cleanup() argument 840 struct netpoll *np = container_of(work, struct netpoll, cleanup_work); in netpoll_async_cleanup()
|
/net/vmw_vsock/ |
D | virtio_transport.c | 84 static void virtio_transport_loopback_work(struct work_struct *work) in virtio_transport_loopback_work() argument 87 container_of(work, struct virtio_vsock, loopback_work); in virtio_transport_loopback_work() 121 virtio_transport_send_pkt_work(struct work_struct *work) in virtio_transport_send_pkt_work() argument 124 container_of(work, struct virtio_vsock, send_pkt_work); in virtio_transport_send_pkt_work() 302 static void virtio_transport_tx_work(struct work_struct *work) in virtio_transport_tx_work() argument 305 container_of(work, struct virtio_vsock, tx_work); in virtio_transport_tx_work() 339 static void virtio_transport_rx_work(struct work_struct *work) in virtio_transport_rx_work() argument 342 container_of(work, struct virtio_vsock, rx_work); in virtio_transport_rx_work() 448 static void virtio_transport_event_work(struct work_struct *work) in virtio_transport_event_work() argument 451 container_of(work, struct virtio_vsock, event_work); in virtio_transport_event_work()
|
/net/mac80211/ |
D | ht.c | 302 cancel_work_sync(&sta->ampdu_mlme.work); in ieee80211_sta_tear_down_BA_sessions() 323 void ieee80211_ba_session_work(struct work_struct *work) in ieee80211_ba_session_work() argument 326 container_of(work, struct sta_info, ampdu_mlme.work); in ieee80211_ba_session_work() 513 void ieee80211_request_smps_mgd_work(struct work_struct *work) in ieee80211_request_smps_mgd_work() argument 516 container_of(work, struct ieee80211_sub_if_data, in ieee80211_request_smps_mgd_work() 524 void ieee80211_request_smps_ap_work(struct work_struct *work) in ieee80211_request_smps_ap_work() argument 527 container_of(work, struct ieee80211_sub_if_data, in ieee80211_request_smps_ap_work()
|
/net/bluetooth/ |
D | hci_request.c | 1223 static void adv_timeout_expire(struct work_struct *work) in adv_timeout_expire() argument 1225 struct hci_dev *hdev = container_of(work, struct hci_dev, in adv_timeout_expire() 1226 adv_instance_expire.work); in adv_timeout_expire() 1556 static void scan_update_work(struct work_struct *work) in scan_update_work() argument 1558 struct hci_dev *hdev = container_of(work, struct hci_dev, scan_update); in scan_update_work() 1590 static void connectable_update_work(struct work_struct *work) in connectable_update_work() argument 1592 struct hci_dev *hdev = container_of(work, struct hci_dev, in connectable_update_work() 1699 static void discoverable_update_work(struct work_struct *work) in discoverable_update_work() argument 1701 struct hci_dev *hdev = container_of(work, struct hci_dev, in discoverable_update_work() 1810 static void bg_scan_update(struct work_struct *work) in bg_scan_update() argument [all …]
|
D | 6lowpan.c | 629 static void do_notify_peers(struct work_struct *work) in do_notify_peers() argument 631 struct lowpan_btle_dev *dev = container_of(work, struct lowpan_btle_dev, in do_notify_peers() 632 notify_peers.work); in do_notify_peers() 783 static void delete_netdev(struct work_struct *work) in delete_netdev() argument 785 struct lowpan_btle_dev *entry = container_of(work, in delete_netdev() 1068 struct work_struct work; member 1072 static void do_enable_set(struct work_struct *work) in do_enable_set() argument 1074 struct set_enable *set_enable = container_of(work, in do_enable_set() 1075 struct set_enable, work); in do_enable_set() 1104 INIT_WORK(&set_enable->work, do_enable_set); in lowpan_enable_set() [all …]
|
D | hci_conn.c | 137 static void le_scan_cleanup(struct work_struct *work) in le_scan_cleanup() argument 139 struct hci_conn *conn = container_of(work, struct hci_conn, in le_scan_cleanup() 396 static void hci_conn_timeout(struct work_struct *work) in hci_conn_timeout() argument 398 struct hci_conn *conn = container_of(work, struct hci_conn, in hci_conn_timeout() 399 disc_work.work); in hci_conn_timeout() 427 static void hci_conn_idle(struct work_struct *work) in hci_conn_idle() argument 429 struct hci_conn *conn = container_of(work, struct hci_conn, in hci_conn_idle() 430 idle_work.work); in hci_conn_idle() 461 static void hci_conn_auto_accept(struct work_struct *work) in hci_conn_auto_accept() argument 463 struct hci_conn *conn = container_of(work, struct hci_conn, in hci_conn_auto_accept() [all …]
|
/net/batman-adv/ |
D | bat_v_elp.c | 160 void batadv_v_elp_throughput_metric_update(struct work_struct *work) in batadv_v_elp_throughput_metric_update() argument 165 neigh_bat_v = container_of(work, struct batadv_hardif_neigh_node_bat_v, in batadv_v_elp_throughput_metric_update() 248 static void batadv_v_elp_periodic_work(struct work_struct *work) in batadv_v_elp_periodic_work() argument 259 bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work); in batadv_v_elp_periodic_work()
|
D | types.h | 674 struct delayed_work work; member 706 struct delayed_work work; member 776 struct delayed_work work; member 830 struct delayed_work work; member 852 struct delayed_work work; member 1656 struct work_struct work; member
|
/net/9p/ |
D | trans_xen.c | 71 struct work_struct work; member 193 static void p9_xen_response(struct work_struct *work) in p9_xen_response() argument 202 ring = container_of(work, struct xen_9pfs_dataring, work); in p9_xen_response() 263 schedule_work(&ring->work); in xen_9pfs_front_event_handler() 334 INIT_WORK(&ring->work, p9_xen_response); in xen_9pfs_front_alloc_dataring()
|
/net/wireless/ |
D | core.c | 289 static void cfg80211_rfkill_sync_work(struct work_struct *work) in cfg80211_rfkill_sync_work() argument 293 rdev = container_of(work, struct cfg80211_registered_device, rfkill_sync); in cfg80211_rfkill_sync_work() 297 static void cfg80211_event_work(struct work_struct *work) in cfg80211_event_work() argument 301 rdev = container_of(work, struct cfg80211_registered_device, in cfg80211_event_work() 321 static void cfg80211_destroy_iface_wk(struct work_struct *work) in cfg80211_destroy_iface_wk() argument 325 rdev = container_of(work, struct cfg80211_registered_device, in cfg80211_destroy_iface_wk() 333 static void cfg80211_sched_scan_stop_wk(struct work_struct *work) in cfg80211_sched_scan_stop_wk() argument 338 rdev = container_of(work, struct cfg80211_registered_device, in cfg80211_sched_scan_stop_wk() 349 static void cfg80211_propagate_radar_detect_wk(struct work_struct *work) in cfg80211_propagate_radar_detect_wk() argument 353 rdev = container_of(work, struct cfg80211_registered_device, in cfg80211_propagate_radar_detect_wk() [all …]
|
/net/tipc/ |
D | server.c | 86 static void tipc_recv_work(struct work_struct *work); 87 static void tipc_send_work(struct work_struct *work); 541 static void tipc_recv_work(struct work_struct *work) in tipc_recv_work() argument 543 struct tipc_conn *con = container_of(work, struct tipc_conn, rwork); in tipc_recv_work() 559 static void tipc_send_work(struct work_struct *work) in tipc_send_work() argument 561 struct tipc_conn *con = container_of(work, struct tipc_conn, swork); in tipc_send_work()
|
/net/openvswitch/ |
D | dp_notify.c | 47 void ovs_dp_notify_wq(struct work_struct *work) in ovs_dp_notify_wq() argument 49 struct ovs_net *ovs_net = container_of(work, struct ovs_net, dp_notify_work); in ovs_dp_notify_wq()
|