/net/sched/ |
D | cls_cgroup.c | 22 struct rcu_work rwork; member 70 rwork); in cls_cgroup_destroy_work() 123 tcf_queue_work(&head->rwork, cls_cgroup_destroy_work); in cls_cgroup_change() 140 tcf_queue_work(&head->rwork, cls_cgroup_destroy_work); in cls_cgroup_destroy()
|
D | cls_basic.c | 36 struct rcu_work rwork; member 100 rwork); in basic_delete_filter_work() 117 tcf_queue_work(&f->rwork, basic_delete_filter_work); in basic_destroy() 135 tcf_queue_work(&f->rwork, basic_delete_filter_work); in basic_delete() 234 tcf_queue_work(&fold->rwork, basic_delete_filter_work); in basic_change()
|
D | cls_fw.c | 40 struct rcu_work rwork; member 124 rwork); in fw_delete_filter_work() 146 tcf_queue_work(&f->rwork, fw_delete_filter_work); in fw_destroy() 175 tcf_queue_work(&f->rwork, fw_delete_filter_work); in fw_delete() 295 tcf_queue_work(&f->rwork, fw_delete_filter_work); in fw_change()
|
D | cls_route.c | 56 struct rcu_work rwork; member 264 rwork); in route4_delete_filter_work() 272 tcf_queue_work(&f->rwork, route4_delete_filter_work); in route4_queue_work() 344 tcf_queue_work(&f->rwork, route4_delete_filter_work); in route4_delete() 559 tcf_queue_work(&fold->rwork, route4_delete_filter_work); in route4_change()
|
D | cls_flow.c | 56 struct rcu_work rwork; member 381 rwork); in flow_destroy_filter_work() 554 tcf_queue_work(&fold->rwork, flow_destroy_filter_work); in flow_change() 574 tcf_queue_work(&f->rwork, flow_destroy_filter_work); in flow_delete() 600 tcf_queue_work(&f->rwork, flow_destroy_filter_work); in flow_destroy()
|
D | cls_matchall.c | 23 struct rcu_work rwork; member 60 rwork); in mall_destroy_work() 142 tcf_queue_work(&head->rwork, mall_destroy_work); in mall_destroy()
|
D | cls_bpf.c | 50 struct rcu_work rwork; member 278 rwork); in cls_bpf_delete_prog_work() 294 tcf_queue_work(&prog->rwork, cls_bpf_delete_prog_work); in __cls_bpf_delete() 513 tcf_queue_work(&oldprog->rwork, cls_bpf_delete_prog_work); in cls_bpf_change()
|
D | cls_u32.c | 62 struct rcu_work rwork; member 425 rwork); in u32_delete_key_work() 442 rwork); in u32_delete_key_freepf_work() 466 tcf_queue_work(&key->rwork, u32_delete_key_freepf_work); in u32_delete_key() 588 tcf_queue_work(&n->rwork, u32_delete_key_freepf_work); in u32_clear_hnode() 922 tcf_queue_work(&n->rwork, u32_delete_key_work); in u32_change()
|
D | cls_flower.c | 90 struct rcu_work rwork; member 107 struct rcu_work rwork; member 123 struct rcu_work rwork; member 375 struct fl_flow_mask, rwork); in fl_mask_free_work() 383 struct fl_flow_mask, rwork); in fl_uninit_mask_free_work() 399 tcf_queue_work(&mask->rwork, fl_mask_free_work); in fl_mask_put() 424 struct cls_fl_filter, rwork); in fl_destroy_filter_work() 519 tcf_queue_work(&f->rwork, fl_destroy_filter_work); in __fl_put() 571 rwork); in fl_destroy_sleepable() 596 tcf_queue_work(&head->rwork, fl_destroy_sleepable); in fl_destroy() [all …]
|
D | act_ct.c | 44 struct rcu_work rwork; member 330 rwork); in tcf_ct_flow_table_cleanup_work() 352 INIT_RCU_WORK(&ct_ft->rwork, tcf_ct_flow_table_cleanup_work); in tcf_ct_flow_table_put() 353 queue_rcu_work(act_ct_wq, &ct_ft->rwork); in tcf_ct_flow_table_put()
|
D | cls_api.c | 202 bool tcf_queue_work(struct rcu_work *rwork, work_func_t func) in tcf_queue_work() argument 204 INIT_RCU_WORK(rwork, func); in tcf_queue_work() 205 return queue_rcu_work(tc_filter_wq, rwork); in tcf_queue_work()
|
/net/tipc/ |
D | topsrv.c | 100 struct work_struct rwork; member 194 INIT_WORK(&con->rwork, tipc_conn_recv_work); in tipc_conn_alloc() 419 struct tipc_conn *con = container_of(work, struct tipc_conn, rwork); in tipc_conn_recv_work() 446 if (!queue_work(con->server->rcv_wq, &con->rwork)) in tipc_conn_data_ready()
|
/net/core/ |
D | skmsg.c | 814 struct sk_psock, rwork); in sk_psock_destroy() 846 INIT_RCU_WORK(&psock->rwork, sk_psock_destroy); in sk_psock_drop() 847 queue_rcu_work(system_wq, &psock->rwork); in sk_psock_drop()
|