/net/sched/ |
D | cls_u32.c | 1202 bool add, flow_setup_cb_t *cb, void *cb_priv, in u32_reoffload_hnode() argument 1209 cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE; in u32_reoffload_hnode() 1215 if (err && add && tc_skip_sw(ht->flags)) in u32_reoffload_hnode() 1222 bool add, flow_setup_cb_t *cb, void *cb_priv, in u32_reoffload_knode() argument 1231 cls_u32.command = add ? in u32_reoffload_knode() 1235 if (add) { in u32_reoffload_knode() 1251 err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32, in u32_reoffload_knode() 1260 static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, in u32_reoffload() argument 1279 if (add && !tc_skip_hw(ht->flags)) { in u32_reoffload() 1280 err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv, in u32_reoffload() [all …]
|
D | cls_matchall.c | 285 static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, in mall_reoffload() argument 301 cls_mall.command = add ? in mall_reoffload() 308 if (add && tc_skip_sw(head->flags)) { in mall_reoffload() 315 err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL, in mall_reoffload()
|
D | cls_bpf.c | 665 static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, in cls_bpf_reoffload() argument 682 cls_bpf.prog = add ? prog->filter : NULL; in cls_bpf_reoffload() 683 cls_bpf.oldprog = add ? NULL : prog->filter; in cls_bpf_reoffload() 687 err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSBPF, in cls_bpf_reoffload()
|
D | cls_api.c | 1436 void *cb_priv, bool add, bool offload_in_use, in tcf_block_playback_offloads() argument 1455 err = tp->ops->reoffload(tp, add, cb, cb_priv, in tcf_block_playback_offloads() 1457 if (err && add) in tcf_block_playback_offloads() 1459 } else if (add && offload_in_use) { in tcf_block_playback_offloads() 3151 u32 *flags, u32 diff, bool add) in tc_cls_offload_cnt_update() argument 3156 if (add) { in tc_cls_offload_cnt_update() 3374 bool add, flow_setup_cb_t *cb, in tc_setup_cb_reoffload() argument 3381 if (add && tc_skip_sw(*flags)) in tc_setup_cb_reoffload() 3385 add); in tc_setup_cb_reoffload()
|
D | cls_flower.c | 1796 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add) in fl_get_next_hw_filter() argument 1810 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) { in fl_get_next_hw_filter() 1820 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, in fl_reoffload() argument 1834 while ((f = fl_get_next_hw_filter(tp, f, add))) { in fl_reoffload() 1844 cls_flower.command = add ? in fl_reoffload() 1865 err = tc_setup_cb_reoffload(block, tp, add, cb, in fl_reoffload()
|
D | Kconfig | 732 Say Y here to add a simple action for demonstration purposes.
|
/net/caif/ |
D | cfpkt_skbuff.c | 299 struct sk_buff *add = pkt_to_skb(addpkt); in cfpkt_append() local 300 u16 addlen = skb_headlen(add); in cfpkt_append() 326 skb_put_data(dst, add->data, skb_headlen(add)); in cfpkt_append()
|
/net/netfilter/ipvs/ |
D | ip_vs_est.c | 60 bool add = false; in ip_vs_read_cpu_stats() local 67 if (add) { in ip_vs_read_cpu_stats() 82 add = true; in ip_vs_read_cpu_stats()
|
D | ip_vs_ctl.c | 859 struct ip_vs_dest_user_kern *udest, int add) in __ip_vs_update_dest() argument 867 BUG_ON(!add && udest->af != dest->af); in __ip_vs_update_dest() 869 if (add && udest->af != svc->af) in __ip_vs_update_dest() 873 if (add || udest->weight != 0) in __ip_vs_update_dest() 931 if (add) { in __ip_vs_update_dest()
|
/net/ipv4/ |
D | ip_sockglue.c | 976 int omode, add; in do_ip_setsockopt() local 986 add = 1; in do_ip_setsockopt() 989 add = 0; in do_ip_setsockopt() 1000 add = 1; in do_ip_setsockopt() 1003 add = 0; in do_ip_setsockopt() 1005 err = ip_mc_source(add, omode, sk, &mreqs, 0); in do_ip_setsockopt() 1041 int omode, add; in do_ip_setsockopt() local 1062 add = 1; in do_ip_setsockopt() 1065 add = 0; in do_ip_setsockopt() 1078 add = 1; in do_ip_setsockopt() [all …]
|
D | route.c | 2415 goto add; in __mkroute_output() 2424 goto add; in __mkroute_output() 2433 add: in __mkroute_output()
|
D | igmp.c | 2301 int ip_mc_source(int add, int omode, struct sock *sk, struct in ip_mc_source() argument 2356 if (!add) { in ip_mc_source()
|
/net/netfilter/ |
D | xt_recent.c | 568 bool add, succ; in recent_mt_proc_write() local 587 add = false; in recent_mt_proc_write() 590 add = true; in recent_mt_proc_write() 613 if (add) in recent_mt_proc_write() 616 if (add) in recent_mt_proc_write()
|
D | Kconfig | 758 Using this target and match, you can add/delete and match 1099 Workaround: activate this option and add a rule to your firewall 1269 It will also add a "tos" match, which allows you to match packets
|
/net/ipv6/ |
D | ipv6_sockglue.c | 722 int omode, add; in do_ipv6_setsockopt() local 737 add = 1; in do_ipv6_setsockopt() 740 add = 0; in do_ipv6_setsockopt() 752 add = 1; in do_ipv6_setsockopt() 755 add = 0; in do_ipv6_setsockopt() 757 retv = ip6_mc_source(add, omode, sk, &greqs); in do_ipv6_setsockopt()
|
D | ip6_fib.c | 1023 int add = (!info->nlh || in fib6_add_rt2node() local 1148 if (!add) in fib6_add_rt2node() 1151 add: in fib6_add_rt2node() 1195 if (add) in fib6_add_rt2node() 1196 goto add; in fib6_add_rt2node()
|
D | mcast.c | 328 int ip6_mc_source(int add, int omode, struct sock *sk, in ip6_mc_source() argument 384 if (!add) { in ip6_mc_source()
|
/net/psample/ |
D | Kconfig | 11 Say Y here to add support for packet-sampling netlink channel
|
/net/ife/ |
D | Kconfig | 11 Say Y here to add support of IFE encapsulation protocol
|
/net/rds/ |
D | rdma.c | 809 rm->atomic.op_m_fadd.add = args->fadd.add; in rds_cmsg_atomic() 814 rm->atomic.op_m_fadd.add = args->m_fadd.add; in rds_cmsg_atomic()
|
D | ib.c | 286 .add = rds_ib_add_one,
|
D | rds.h | 447 uint64_t add; member
|
D | ib_send.c | 782 send->s_atomic_wr.compare_add = op->op_m_fadd.add; in rds_ib_xmit_atomic()
|
/net/l2tp/ |
D | Kconfig | 104 interface, or add the interface to a bridge.
|
/net/smc/ |
D | smc_ib.c | 577 .add = smc_ib_add_dev,
|