Home
last modified time | relevance | path

Searched refs:add (Results 1 – 25 of 28) sorted by relevance

12

/net/sched/
Dcls_u32.c1197 bool add, flow_setup_cb_t *cb, void *cb_priv, in u32_reoffload_hnode() argument
1204 cls_u32.command = add ? TC_CLSU32_NEW_HNODE : TC_CLSU32_DELETE_HNODE; in u32_reoffload_hnode()
1210 if (err && add && tc_skip_sw(ht->flags)) in u32_reoffload_hnode()
1217 bool add, flow_setup_cb_t *cb, void *cb_priv, in u32_reoffload_knode() argument
1225 cls_u32.command = add ? in u32_reoffload_knode()
1229 if (add) { in u32_reoffload_knode()
1245 return tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSU32, in u32_reoffload_knode()
1250 static int u32_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, in u32_reoffload() argument
1269 if (add && !tc_skip_hw(ht->flags)) { in u32_reoffload()
1270 err = u32_reoffload_hnode(tp, ht, add, cb, cb_priv, in u32_reoffload()
[all …]
Dcls_matchall.c284 static int mall_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, in mall_reoffload() argument
300 cls_mall.command = add ? in mall_reoffload()
307 if (add && tc_skip_sw(head->flags)) { in mall_reoffload()
314 err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSMATCHALL, in mall_reoffload()
Dcls_bpf.c657 static int cls_bpf_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, in cls_bpf_reoffload() argument
674 cls_bpf.prog = add ? prog->filter : NULL; in cls_bpf_reoffload()
675 cls_bpf.oldprog = add ? NULL : prog->filter; in cls_bpf_reoffload()
679 err = tc_setup_cb_reoffload(block, tp, add, cb, TC_SETUP_CLSBPF, in cls_bpf_reoffload()
Dcls_api.c1401 void *cb_priv, bool add, bool offload_in_use, in tcf_block_playback_offloads() argument
1420 err = tp->ops->reoffload(tp, add, cb, cb_priv, in tcf_block_playback_offloads()
1422 if (err && add) in tcf_block_playback_offloads()
1424 } else if (add && offload_in_use) { in tcf_block_playback_offloads()
3210 u32 *flags, u32 diff, bool add) in tc_cls_offload_cnt_update() argument
3215 if (add) { in tc_cls_offload_cnt_update()
3433 bool add, flow_setup_cb_t *cb, in tc_setup_cb_reoffload() argument
3440 if (add && tc_skip_sw(*flags)) in tc_setup_cb_reoffload()
3444 add); in tc_setup_cb_reoffload()
Dcls_flower.c2236 fl_get_next_hw_filter(struct tcf_proto *tp, struct cls_fl_filter *f, bool add) in fl_get_next_hw_filter() argument
2250 if (!(add && f->deleted) && refcount_inc_not_zero(&f->refcnt)) { in fl_get_next_hw_filter()
2260 static int fl_reoffload(struct tcf_proto *tp, bool add, flow_setup_cb_t *cb, in fl_reoffload() argument
2274 while ((f = fl_get_next_hw_filter(tp, f, add))) { in fl_reoffload()
2284 cls_flower.command = add ? in fl_reoffload()
2304 err = tc_setup_cb_reoffload(block, tp, add, cb, in fl_reoffload()
/net/caif/
Dcfpkt_skbuff.c299 struct sk_buff *add = pkt_to_skb(addpkt); in cfpkt_append() local
300 u16 addlen = skb_headlen(add); in cfpkt_append()
326 skb_put_data(dst, add->data, skb_headlen(add)); in cfpkt_append()
/net/netfilter/ipvs/
Dip_vs_est.c60 bool add = false; in ip_vs_read_cpu_stats() local
67 if (add) { in ip_vs_read_cpu_stats()
82 add = true; in ip_vs_read_cpu_stats()
Dip_vs_ctl.c871 struct ip_vs_dest_user_kern *udest, int add) in __ip_vs_update_dest() argument
879 BUG_ON(!add && udest->af != dest->af); in __ip_vs_update_dest()
881 if (add && udest->af != svc->af) in __ip_vs_update_dest()
885 if (add || udest->weight != 0) in __ip_vs_update_dest()
943 if (add) { in __ip_vs_update_dest()
/net/ipv4/
Dip_sockglue.c733 int omode, add, err; in do_mcast_group_source() local
751 add = 1; in do_mcast_group_source()
754 add = 0; in do_mcast_group_source()
767 add = 1; in do_mcast_group_source()
770 add = 0; in do_mcast_group_source()
772 return ip_mc_source(add, omode, sk, &mreqs, greqs.gsr_interface); in do_mcast_group_source()
1272 int omode, add; in do_ip_setsockopt() local
1282 add = 1; in do_ip_setsockopt()
1285 add = 0; in do_ip_setsockopt()
1296 add = 1; in do_ip_setsockopt()
[all …]
Droute.c2583 goto add; in __mkroute_output()
2592 goto add; in __mkroute_output()
2601 add: in __mkroute_output()
Digmp.c2306 int ip_mc_source(int add, int omode, struct sock *sk, struct in ip_mc_source() argument
2361 if (!add) { in ip_mc_source()
/net/netfilter/
Dxt_recent.c568 bool add, succ; in recent_mt_proc_write() local
587 add = false; in recent_mt_proc_write()
590 add = true; in recent_mt_proc_write()
613 if (add) in recent_mt_proc_write()
616 if (add) in recent_mt_proc_write()
DKconfig785 Using this target and match, you can add/delete and match
1125 Workaround: activate this option and add a rule to your firewall
1295 It will also add a "tos" match, which allows you to match packets
/net/bridge/
Dbr_mrp_switchdev.c9 const struct switchdev_obj *obj, bool add) in br_mrp_switchdev_port_obj() argument
13 if (add) in br_mrp_switchdev_port_obj()
/net/psample/
DKconfig10 Say Y here to add support for packet-sampling netlink channel
/net/ipv6/
Dipv6_sockglue.c166 int omode, add; in do_ipv6_mcast_group_source() local
179 add = 1; in do_ipv6_mcast_group_source()
182 add = 0; in do_ipv6_mcast_group_source()
195 add = 1; in do_ipv6_mcast_group_source()
198 add = 0; in do_ipv6_mcast_group_source()
200 return ip6_mc_source(add, omode, sk, &greqs); in do_ipv6_mcast_group_source()
Dip6_fib.c1077 int add = (!info->nlh || in fib6_add_rt2node() local
1205 if (!add) in fib6_add_rt2node()
1208 add: in fib6_add_rt2node()
1263 if (add) in fib6_add_rt2node()
1264 goto add; in fib6_add_rt2node()
Dmcast.c361 int ip6_mc_source(int add, int omode, struct sock *sk, in ip6_mc_source() argument
411 if (!add) { in ip6_mc_source()
/net/ife/
DKconfig10 Say Y here to add support of IFE encapsulation protocol
/net/rds/
Drdma.c882 rm->atomic.op_m_fadd.add = args->fadd.add; in rds_cmsg_atomic()
887 rm->atomic.op_m_fadd.add = args->m_fadd.add; in rds_cmsg_atomic()
Dib.c290 .add = rds_ib_add_one,
Drds.h438 uint64_t add; member
Dib_send.c795 send->s_atomic_wr.compare_add = op->op_m_fadd.add; in rds_ib_xmit_atomic()
/net/l2tp/
DKconfig104 interface, or add the interface to a bridge.
/net/smc/
Dsmc_ib.c839 .add = smc_ib_add_dev,

12