Home
last modified time | relevance | path

Searched refs:rule (Results 1 – 25 of 29) sorted by relevance

12

/net/core/
Dfib_rules.c37 bool fib_rule_matchall(const struct fib_rule *rule) in fib_rule_matchall() argument
39 if (rule->iifindex || rule->oifindex || rule->mark || rule->tun_id || in fib_rule_matchall()
40 rule->flags) in fib_rule_matchall()
42 if (rule->suppress_ifgroup != -1 || rule->suppress_prefixlen != -1) in fib_rule_matchall()
44 if (!uid_eq(rule->uid_range.start, fib_kuid_range_unset.start) || in fib_rule_matchall()
45 !uid_eq(rule->uid_range.end, fib_kuid_range_unset.end)) in fib_rule_matchall()
47 if (fib_rule_port_range_set(&rule->sport_range)) in fib_rule_matchall()
49 if (fib_rule_port_range_set(&rule->dport_range)) in fib_rule_matchall()
86 struct fib_rule *rule; in fib_default_rule_pref() local
91 rule = list_entry(pos->next, struct fib_rule, list); in fib_default_rule_pref()
[all …]
Dflow_offload.c11 struct flow_rule *rule; in flow_rule_alloc() local
14 rule = kzalloc(struct_size(rule, action.entries, num_actions), in flow_rule_alloc()
16 if (!rule) in flow_rule_alloc()
19 rule->action.num_entries = num_actions; in flow_rule_alloc()
24 rule->action.entries[i].hw_stats = FLOW_ACTION_HW_STATS_DONT_CARE; in flow_rule_alloc()
26 return rule; in flow_rule_alloc()
37 void flow_rule_match_meta(const struct flow_rule *rule, in flow_rule_match_meta() argument
40 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_META, out); in flow_rule_match_meta()
44 void flow_rule_match_basic(const struct flow_rule *rule, in flow_rule_match_basic() argument
47 FLOW_DISSECTOR_MATCH(rule, FLOW_DISSECTOR_KEY_BASIC, out); in flow_rule_match_basic()
[all …]
/net/ipv4/
Dfib_rules.c48 static bool fib4_rule_matchall(const struct fib_rule *rule) in fib4_rule_matchall() argument
50 struct fib4_rule *r = container_of(rule, struct fib4_rule, common); in fib4_rule_matchall()
54 return fib_rule_matchall(rule); in fib4_rule_matchall()
57 bool fib4_rule_default(const struct fib_rule *rule) in fib4_rule_default() argument
59 if (!fib4_rule_matchall(rule) || rule->action != FR_ACT_TO_TBL || in fib4_rule_default()
60 rule->l3mdev) in fib4_rule_default()
62 if (rule->table != RT_TABLE_LOCAL && rule->table != RT_TABLE_MAIN && in fib4_rule_default()
63 rule->table != RT_TABLE_DEFAULT) in fib4_rule_default()
94 if (arg.rule) in __fib_lookup()
95 res->tclassid = ((struct fib4_rule *)arg.rule)->tclassid; in __fib_lookup()
[all …]
Dipmr.c166 static int ipmr_rule_action(struct fib_rule *rule, struct flowi *flp, in ipmr_rule_action() argument
172 switch (rule->action) { in ipmr_rule_action()
184 arg->table = fib_rule_get_table(rule, arg); in ipmr_rule_action()
186 mrt = ipmr_get_table(rule->fr_net, arg->table); in ipmr_rule_action()
193 static int ipmr_rule_match(struct fib_rule *rule, struct flowi *fl, int flags) in ipmr_rule_match() argument
202 static int ipmr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, in ipmr_rule_configure() argument
209 static int ipmr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, in ipmr_rule_compare() argument
215 static int ipmr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, in ipmr_rule_fill() argument
296 bool ipmr_rule_default(const struct fib_rule *rule) in ipmr_rule_default() argument
298 return fib_rule_matchall(rule) && rule->table == RT_TABLE_DEFAULT; in ipmr_rule_default()
[all …]
/net/ipv6/
Dfib6_rules.c31 static bool fib6_rule_matchall(const struct fib_rule *rule) in fib6_rule_matchall() argument
33 struct fib6_rule *r = container_of(rule, struct fib6_rule, common); in fib6_rule_matchall()
37 return fib_rule_matchall(rule); in fib6_rule_matchall()
40 bool fib6_rule_default(const struct fib_rule *rule) in fib6_rule_default() argument
42 if (!fib6_rule_matchall(rule) || rule->action != FR_ACT_TO_TBL || in fib6_rule_default()
43 rule->l3mdev) in fib6_rule_default()
45 if (rule->table != RT6_TABLE_LOCAL && rule->table != RT6_TABLE_MAIN) in fib6_rule_default()
132 static int fib6_rule_saddr(struct net *net, struct fib_rule *rule, int flags, in fib6_rule_saddr() argument
135 struct fib6_rule *r = (struct fib6_rule *)rule; in fib6_rule_saddr()
140 if ((rule->flags & FIB_RULE_FIND_SADDR) && in fib6_rule_saddr()
[all …]
Dip6mr.c153 static int ip6mr_rule_action(struct fib_rule *rule, struct flowi *flp, in ip6mr_rule_action() argument
159 switch (rule->action) { in ip6mr_rule_action()
171 arg->table = fib_rule_get_table(rule, arg); in ip6mr_rule_action()
173 mrt = ip6mr_get_table(rule->fr_net, arg->table); in ip6mr_rule_action()
180 static int ip6mr_rule_match(struct fib_rule *rule, struct flowi *flp, int flags) in ip6mr_rule_match() argument
189 static int ip6mr_rule_configure(struct fib_rule *rule, struct sk_buff *skb, in ip6mr_rule_configure() argument
196 static int ip6mr_rule_compare(struct fib_rule *rule, struct fib_rule_hdr *frh, in ip6mr_rule_compare() argument
202 static int ip6mr_rule_fill(struct fib_rule *rule, struct sk_buff *skb, in ip6mr_rule_fill() argument
283 bool ip6mr_rule_default(const struct fib_rule *rule) in ip6mr_rule_default() argument
285 return fib_rule_matchall(rule) && rule->action == FR_ACT_TO_TBL && in ip6mr_rule_default()
[all …]
/net/netfilter/
Dnf_tables_offload.c18 flow->rule = flow_rule_alloc(num_actions); in nft_flow_rule_alloc()
19 if (!flow->rule) { in nft_flow_rule_alloc()
24 flow->rule->match.dissector = &flow->match.dissector; in nft_flow_rule_alloc()
25 flow->rule->match.mask = &flow->match.mask; in nft_flow_rule_alloc()
26 flow->rule->match.key = &flow->match.key; in nft_flow_rule_alloc()
88 const struct nft_rule *rule) in nft_flow_rule_create() argument
95 expr = nft_expr_first(rule); in nft_flow_rule_create()
96 while (nft_expr_more(rule, expr)) { in nft_flow_rule_create()
111 expr = nft_expr_first(rule); in nft_flow_rule_create()
121 while (nft_expr_more(rule, expr)) { in nft_flow_rule_create()
[all …]
Dnf_tables_core.c41 const struct nft_rule *rule, in nft_trace_packet() argument
45 info->rule = rule; in nft_trace_packet()
105 const struct nft_rule *rule, in nft_trace_verdict() argument
109 info->rule = rule; in nft_trace_verdict()
207 const struct nft_rule *rule; in nft_do_chain() local
225 rule = *rules; in nft_do_chain()
228 rule = *rules; in nft_do_chain()
229 nft_rule_for_each_expr(expr, last, rule) { in nft_do_chain()
249 nft_trace_packet(&info, chain, rule, in nft_do_chain()
256 nft_trace_verdict(&info, chain, rule, &regs); in nft_do_chain()
Dnft_immediate.c102 struct nft_rule *rule; in nft_immediate_activate() local
115 list_for_each_entry(rule, &chain->rules, list) in nft_immediate_activate()
116 nft_rule_expr_activate(&chain_ctx, rule); in nft_immediate_activate()
133 struct nft_rule *rule; in nft_immediate_chain_deactivate() local
138 list_for_each_entry(rule, &chain->rules, list) in nft_immediate_chain_deactivate()
139 nft_rule_expr_deactivate(&chain_ctx, rule, phase); in nft_immediate_chain_deactivate()
191 struct nft_rule *rule, *n; in nft_immediate_destroy() local
219 list_for_each_entry_safe(rule, n, &chain->rules, list) { in nft_immediate_destroy()
221 list_del(&rule->list); in nft_immediate_destroy()
222 nf_tables_rule_destroy(&chain_ctx, rule); in nft_immediate_destroy()
[all …]
Dnf_tables_api.c456 void nft_rule_expr_activate(const struct nft_ctx *ctx, struct nft_rule *rule) in nft_rule_expr_activate() argument
460 expr = nft_expr_first(rule); in nft_rule_expr_activate()
461 while (nft_expr_more(rule, expr)) { in nft_rule_expr_activate()
469 void nft_rule_expr_deactivate(const struct nft_ctx *ctx, struct nft_rule *rule, in nft_rule_expr_deactivate() argument
474 expr = nft_expr_first(rule); in nft_rule_expr_deactivate()
475 while (nft_expr_more(rule, expr)) { in nft_rule_expr_deactivate()
484 nf_tables_delrule_deactivate(struct nft_ctx *ctx, struct nft_rule *rule) in nf_tables_delrule_deactivate() argument
487 if (nft_is_active_next(ctx->net, rule)) { in nf_tables_delrule_deactivate()
488 nft_deactivate_next(ctx->net, rule); in nf_tables_delrule_deactivate()
496 struct nft_rule *rule) in nft_trans_rule_add() argument
[all …]
Dnf_flow_table_offload.c207 int i = flow_rule->rule->action.num_entries++; in flow_action_entry_next()
209 return &flow_rule->rule->action.entries[i]; in flow_action_entry_next()
736 flow_rule->rule = flow_rule_alloc(NF_FLOW_RULE_ACTION_MAX); in nf_flow_offload_rule_alloc()
737 if (!flow_rule->rule) in nf_flow_offload_rule_alloc()
740 flow_rule->rule->match.dissector = &flow_rule->match.dissector; in nf_flow_offload_rule_alloc()
741 flow_rule->rule->match.mask = &flow_rule->match.mask; in nf_flow_offload_rule_alloc()
742 flow_rule->rule->match.key = &flow_rule->match.key; in nf_flow_offload_rule_alloc()
753 flow_rule->rule->action.num_entries = 0; in nf_flow_offload_rule_alloc()
760 kfree(flow_rule->rule); in nf_flow_offload_rule_alloc()
772 for (i = 0; i < flow_rule->rule->action.num_entries; i++) { in __nf_flow_offload_destroy()
[all …]
Dnf_tables_trace.c145 if (!info->rule) in nf_trace_fill_rule_info()
158 cpu_to_be64(info->rule->handle), in nf_trace_fill_rule_info()
Dnf_dup_netdev.c67 entry = &flow->rule->action.entries[ctx->num_actions++]; in nft_fwd_dup_netdev_offload()
Dnft_set_pipapo.c697 static void pipapo_bucket_set(struct nft_pipapo_field *f, int rule, int group, in pipapo_bucket_set() argument
706 __set_bit(rule, pos); in pipapo_bucket_set()
905 int rule = f->rules, group, ret, bit_offset = 0; in pipapo_insert() local
926 pipapo_bucket_set(f, rule, group, v); in pipapo_insert()
930 pipapo_bucket_set(f, rule, group, i); in pipapo_insert()
937 pipapo_bucket_set(f, rule, group, i); in pipapo_insert()
DKconfig467 rule-set. It also comes with the generic set infrastructure that
511 include packet and byte counters in a rule.
520 ratelimit rule matchings per connections.
532 ratelimit rule matchings.
918 resets the timer associated with label specified when the rule is
1091 will log every rule which match the packets as those traverse
1126 Workaround: activate this option and add a rule to your firewall
1333 with a single rule.
1405 limit matching allows you to control the rate at which a rule can be
1434 a series of source or destination ports: normally a rule can only
/net/sched/
Dcls_matchall.c92 cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts)); in mall_replace_hw_filter()
93 if (!cls_mall.rule) in mall_replace_hw_filter()
100 err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts); in mall_replace_hw_filter()
102 kfree(cls_mall.rule); in mall_replace_hw_filter()
114 tc_cleanup_flow_action(&cls_mall.rule->action); in mall_replace_hw_filter()
115 kfree(cls_mall.rule); in mall_replace_hw_filter()
295 cls_mall.rule = flow_rule_alloc(tcf_exts_num_actions(&head->exts)); in mall_reoffload()
296 if (!cls_mall.rule) in mall_reoffload()
304 err = tc_setup_flow_action(&cls_mall.rule->action, &head->exts); in mall_reoffload()
306 kfree(cls_mall.rule); in mall_reoffload()
[all …]
Dcls_flower.c453 cls_flower.rule = flow_rule_alloc(tcf_exts_num_actions(&f->exts)); in fl_hw_replace_filter()
454 if (!cls_flower.rule) in fl_hw_replace_filter()
460 cls_flower.rule->match.dissector = &f->mask->dissector; in fl_hw_replace_filter()
461 cls_flower.rule->match.mask = &f->mask->key; in fl_hw_replace_filter()
462 cls_flower.rule->match.key = &f->mkey; in fl_hw_replace_filter()
465 err = tc_setup_flow_action(&cls_flower.rule->action, &f->exts); in fl_hw_replace_filter()
467 kfree(cls_flower.rule); in fl_hw_replace_filter()
477 tc_cleanup_flow_action(&cls_flower.rule->action); in fl_hw_replace_filter()
478 kfree(cls_flower.rule); in fl_hw_replace_filter()
2275 cls_flower.rule = in fl_reoffload()
[all …]
/net/wireless/
Dreg.c661 static bool valid_wmm(struct fwdb_wmm_rule *rule) in valid_wmm() argument
663 struct fwdb_wmm_ac *ac = (struct fwdb_wmm_ac *)rule; in valid_wmm()
683 struct fwdb_rule *rule = (void *)(data + (rule_ptr << 2)); in valid_rule() local
685 if ((u8 *)rule + sizeof(rule->len) > data + size) in valid_rule()
689 if (rule->len < offsetofend(struct fwdb_rule, max_bw)) in valid_rule()
691 if (rule->len >= offsetofend(struct fwdb_rule, wmm_ptr)) { in valid_rule()
692 u32 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; in valid_rule()
881 const struct fwdb_rule *rule, in set_wmm_rule() argument
888 wmm_ptr = be16_to_cpu(rule->wmm_ptr) << 2; in set_wmm_rule()
893 be32_to_cpu(rule->start), be32_to_cpu(rule->end), in set_wmm_rule()
[all …]
Dreg.h63 const struct ieee80211_reg_rule *rule);
Dnl80211.c1058 const struct ieee80211_reg_rule *rule) in nl80211_msg_put_wmm_rules() argument
1074 rule->wmm_rule.client[j].cw_min) || in nl80211_msg_put_wmm_rules()
1076 rule->wmm_rule.client[j].cw_max) || in nl80211_msg_put_wmm_rules()
1078 rule->wmm_rule.client[j].aifsn) || in nl80211_msg_put_wmm_rules()
1080 rule->wmm_rule.client[j].cot)) in nl80211_msg_put_wmm_rules()
1199 const struct ieee80211_reg_rule *rule = in nl80211_msg_put_channel() local
1202 if (!IS_ERR_OR_NULL(rule) && rule->has_wmm) { in nl80211_msg_put_channel()
1203 if (nl80211_msg_put_wmm_rules(msg, rule)) in nl80211_msg_put_channel()
1787 struct nl80211_coalesce_rule_support rule; in nl80211_send_coalesce() local
1792 rule.max_rules = rdev->wiphy.coalesce->n_rules; in nl80211_send_coalesce()
[all …]
/net/ceph/crush/
Dcrush.c139 void crush_destroy_rule(struct crush_rule *rule) in crush_destroy_rule() argument
141 kfree(rule); in crush_destroy_rule()
Dmapper.c910 const struct crush_rule *rule; in crush_do_rule() local
936 rule = map->rules[ruleno]; in crush_do_rule()
939 for (step = 0; step < rule->len; step++) { in crush_do_rule()
941 const struct crush_rule_step *curstep = &rule->steps[step]; in crush_do_rule()
/net/bridge/netfilter/
DKconfig131 the rate at which a rule can be matched. This match is the
223 This option adds the log watcher, that you can use in any rule
237 This option adds the nflog watcher, that you can use in any rule
/net/dsa/
Dslave.c993 if (!flow_action_basic_hw_stats_check(&cls->rule->action, in dsa_slave_add_cls_matchall_mirred()
997 act = &cls->rule->action.entries[0]; in dsa_slave_add_cls_matchall_mirred()
1055 if (!flow_action_basic_hw_stats_check(&cls->rule->action, in dsa_slave_add_cls_matchall_police()
1067 act = &cls->rule->action.entries[0]; in dsa_slave_add_cls_matchall_police()
1097 flow_offload_has_one_action(&cls->rule->action) && in dsa_slave_add_cls_matchall()
1098 cls->rule->action.entries[0].id == FLOW_ACTION_MIRRED) in dsa_slave_add_cls_matchall()
1100 else if (flow_offload_has_one_action(&cls->rule->action) && in dsa_slave_add_cls_matchall()
1101 cls->rule->action.entries[0].id == FLOW_ACTION_POLICE) in dsa_slave_add_cls_matchall()
/net/ethtool/
Dioctl.c3033 flow->rule = flow_rule_alloc(1); in ethtool_rx_flow_rule_create()
3034 if (!flow->rule) { in ethtool_rx_flow_rule_create()
3040 flow->rule->match.dissector = &match->dissector; in ethtool_rx_flow_rule_create()
3041 flow->rule->match.mask = &match->mask; in ethtool_rx_flow_rule_create()
3042 flow->rule->match.key = &match->key; in ethtool_rx_flow_rule_create()
3243 act = &flow->rule->action.entries[0]; in ethtool_rx_flow_rule_create()
3267 kfree(flow->rule); in ethtool_rx_flow_rule_destroy()

12