Home
last modified time | relevance | path

Searched refs:mask (Results 1 – 25 of 160) sorted by relevance

1234567

/net/sched/
Dcls_flower.c97 struct fl_flow_key mask; member
112 struct fl_flow_mask *mask; member
140 static unsigned short int fl_mask_range(const struct fl_flow_mask *mask) in fl_mask_range() argument
142 return mask->range.end - mask->range.start; in fl_mask_range()
145 static void fl_mask_update_range(struct fl_flow_mask *mask) in fl_mask_update_range() argument
147 const u8 *bytes = (const u8 *) &mask->key; in fl_mask_update_range()
148 size_t size = sizeof(mask->key); in fl_mask_update_range()
164 mask->range.start = rounddown(first, sizeof(long)); in fl_mask_update_range()
165 mask->range.end = roundup(last + 1, sizeof(long)); in fl_mask_update_range()
169 const struct fl_flow_mask *mask) in fl_key_get_start() argument
[all …]
Dact_nat.c88 p->mask = parm->mask; in tcf_nat_init()
109 __be32 mask; in tcf_nat_act() local
121 mask = p->mask; in tcf_nat_act()
143 if (!((old_addr ^ addr) & mask)) { in tcf_nat_act()
147 new_addr &= mask; in tcf_nat_act()
148 new_addr |= addr & ~mask; in tcf_nat_act()
220 if ((old_addr ^ addr) & mask) in tcf_nat_act()
230 new_addr &= mask; in tcf_nat_act()
231 new_addr |= addr & ~mask; in tcf_nat_act()
272 opt.mask = p->mask; in tcf_nat_dump()
Dcls_fw.c28 u32 mask; member
59 id &= head->mask; in fw_classify()
205 u32 mask; in fw_set_parms() local
223 mask = nla_get_u32(tb[TCA_FW_MASK]); in fw_set_parms()
224 if (mask != head->mask) in fw_set_parms()
226 } else if (head->mask != 0xFFFFFFFF) in fw_set_parms()
305 u32 mask = 0xFFFFFFFF; in fw_change() local
307 mask = nla_get_u32(tb[TCA_FW_MASK]); in fw_change()
312 head->mask = mask; in fw_change()
401 if (head->mask != 0xFFFFFFFF && in fw_dump()
[all …]
Dsch_qfq.c759 unsigned long mask = mask_from(q->bitmaps[ER], grp->index); in qfq_calc_state() local
762 if (mask) { in qfq_calc_state()
763 next = qfq_ffs(q, mask); in qfq_calc_state()
778 static inline void qfq_move_groups(struct qfq_sched *q, unsigned long mask, in qfq_move_groups() argument
781 q->bitmaps[dst] |= q->bitmaps[src] & mask; in qfq_move_groups()
782 q->bitmaps[src] &= ~mask; in qfq_move_groups()
787 unsigned long mask = mask_from(q->bitmaps[ER], index + 1); in qfq_unblock_groups() local
790 if (mask) { in qfq_unblock_groups()
791 next = qfq_ffs(q, mask); in qfq_unblock_groups()
796 mask = (1UL << index) - 1; in qfq_unblock_groups()
[all …]
/net/netlabel/
Dnetlabel_addrlist.c52 if (iter->valid && (addr & iter->mask) == iter->addr) in netlbl_af4list_search()
71 __be32 mask, in netlbl_af4list_search_exact() argument
77 if (iter->valid && iter->addr == addr && iter->mask == mask) in netlbl_af4list_search_exact()
103 ipv6_masked_addr_cmp(&iter->addr, &iter->mask, addr) == 0) in netlbl_af6list_search()
122 const struct in6_addr *mask, in netlbl_af6list_search_exact() argument
130 ipv6_addr_equal(&iter->mask, mask)) in netlbl_af6list_search_exact()
154 iter->addr == entry->addr && iter->mask == entry->mask) in netlbl_af4list_add()
163 ntohl(entry->mask) > ntohl(iter->mask)) { in netlbl_af4list_add()
192 ipv6_addr_equal(&iter->mask, &entry->mask)) in netlbl_af6list_add()
201 ipv6_addr_cmp(&entry->mask, &iter->mask) > 0) { in netlbl_af6list_add()
[all …]
Dnetlabel_unlabeled.c233 const struct in_addr *mask, in netlbl_unlhsh_add_addr4() argument
243 entry->list.addr = addr->s_addr & mask->s_addr; in netlbl_unlhsh_add_addr4()
244 entry->list.mask = mask->s_addr; in netlbl_unlhsh_add_addr4()
273 const struct in6_addr *mask, in netlbl_unlhsh_add_addr6() argument
284 entry->list.addr.s6_addr32[0] &= mask->s6_addr32[0]; in netlbl_unlhsh_add_addr6()
285 entry->list.addr.s6_addr32[1] &= mask->s6_addr32[1]; in netlbl_unlhsh_add_addr6()
286 entry->list.addr.s6_addr32[2] &= mask->s6_addr32[2]; in netlbl_unlhsh_add_addr6()
287 entry->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; in netlbl_unlhsh_add_addr6()
288 entry->list.mask = *mask; in netlbl_unlhsh_add_addr6()
367 const void *mask, in netlbl_unlhsh_add() argument
[all …]
Dnetlabel_kapi.c59 const void *mask, in netlbl_cfg_map_del() argument
62 if (addr == NULL && mask == NULL) { in netlbl_cfg_map_del()
64 } else if (addr != NULL && mask != NULL) { in netlbl_cfg_map_del()
67 return netlbl_domhsh_remove_af4(domain, addr, mask, in netlbl_cfg_map_del()
71 return netlbl_domhsh_remove_af6(domain, addr, mask, in netlbl_cfg_map_del()
98 const void *mask, in netlbl_cfg_unlbl_map_add() argument
117 if (addr == NULL && mask == NULL) in netlbl_cfg_unlbl_map_add()
119 else if (addr != NULL && mask != NULL) { in netlbl_cfg_unlbl_map_add()
129 const struct in_addr *mask4 = mask; in netlbl_cfg_unlbl_map_add()
135 map4->list.mask = mask4->s_addr; in netlbl_cfg_unlbl_map_add()
[all …]
Dnetlabel_addrlist.h35 __be32 mask; member
50 struct in6_addr mask; member
100 struct netlbl_af4list *netlbl_af4list_remove(__be32 addr, __be32 mask,
106 __be32 mask,
112 __be32 addr, __be32 mask);
116 __be32 addr, __be32 mask) in netlbl_af4list_audit_addr() argument
168 const struct in6_addr *mask,
174 const struct in6_addr *mask,
182 const struct in6_addr *mask);
188 const struct in6_addr *mask) in netlbl_af6list_audit_addr() argument
Dnetlabel_mgmt.c150 struct in_addr *mask; in netlbl_mgmt_add_common() local
172 mask = nla_data(info->attrs[NLBL_MGMT_A_IPV4MASK]); in netlbl_mgmt_add_common()
180 map->list.addr = addr->s_addr & mask->s_addr; in netlbl_mgmt_add_common()
181 map->list.mask = mask->s_addr; in netlbl_mgmt_add_common()
197 struct in6_addr *mask; in netlbl_mgmt_add_common() local
219 mask = nla_data(info->attrs[NLBL_MGMT_A_IPV6MASK]); in netlbl_mgmt_add_common()
228 map->list.addr.s6_addr32[0] &= mask->s6_addr32[0]; in netlbl_mgmt_add_common()
229 map->list.addr.s6_addr32[1] &= mask->s6_addr32[1]; in netlbl_mgmt_add_common()
230 map->list.addr.s6_addr32[2] &= mask->s6_addr32[2]; in netlbl_mgmt_add_common()
231 map->list.addr.s6_addr32[3] &= mask->s6_addr32[3]; in netlbl_mgmt_add_common()
[all …]
/net/ethtool/
Dbitset.c40 u32 mask; in ethnl_bitmap32_clear() local
46 mask = ethnl_upper_bits(start); in ethnl_bitmap32_clear()
48 mask &= ethnl_lower_bits(end); in ethnl_bitmap32_clear()
49 if (dst[start_word] & mask) { in ethnl_bitmap32_clear()
50 dst[start_word] &= ~mask; in ethnl_bitmap32_clear()
55 if (dst[start_word] & mask) { in ethnl_bitmap32_clear()
56 dst[start_word] &= ~mask; in ethnl_bitmap32_clear()
69 mask = ethnl_lower_bits(end); in ethnl_bitmap32_clear()
70 if (dst[end_word] & mask) { in ethnl_bitmap32_clear()
71 dst[end_word] &= ~mask; in ethnl_bitmap32_clear()
[all …]
Dbitset.h11 int ethnl_bitset_size(const unsigned long *val, const unsigned long *mask,
14 int ethnl_bitset32_size(const u32 *val, const u32 *mask, unsigned int nbits,
17 const unsigned long *val, const unsigned long *mask,
21 const u32 *mask, unsigned int nbits,
29 int ethnl_parse_bitset(unsigned long *val, unsigned long *mask,
/net/openvswitch/
Dflow_table.c54 bool full, const struct sw_flow_mask *mask) in ovs_flow_mask_key() argument
56 int start = full ? 0 : mask->range.start; in ovs_flow_mask_key()
57 int len = full ? sizeof *dst : range_n_bytes(&mask->range); in ovs_flow_mask_key()
58 const long *m = (const long *)((const u8 *)&mask->key + start); in ovs_flow_mask_key()
301 struct sw_flow_mask *mask) in tbl_mask_array_del_mask() argument
308 if (mask == ovsl_dereference(ma->masks[i])) in tbl_mask_array_del_mask()
321 kfree_rcu(mask, rcu); in tbl_mask_array_del_mask()
333 static void flow_mask_remove(struct flow_table *tbl, struct sw_flow_mask *mask) in flow_mask_remove() argument
335 if (mask) { in flow_mask_remove()
340 BUG_ON(!mask->ref_count); in flow_mask_remove()
[all …]
Dactions.c197 const __be32 *mpls_lse, const __be32 *mask) in set_mpls() argument
207 lse = OVS_MASKED(stack->label_stack_entry, *mpls_lse, *mask); in set_mpls()
248 const u16 *mask = (const u16 *)mask_; in ether_addr_copy_masked() local
250 OVS_SET_MASKED(dst[0], src[0], mask[0]); in ether_addr_copy_masked()
251 OVS_SET_MASKED(dst[1], src[1], mask[1]); in ether_addr_copy_masked()
252 OVS_SET_MASKED(dst[2], src[2], mask[2]); in ether_addr_copy_masked()
257 const struct ovs_key_ethernet *mask) in set_eth_addr() argument
268 mask->eth_src); in set_eth_addr()
270 mask->eth_dst); in set_eth_addr()
408 const __be32 mask[4], __be32 masked[4]) in mask_ipv6_addr()
[all …]
/net/netfilter/
Dnf_flow_table_offload.c36 struct nf_flow_key *mask = &match->mask; in nf_flow_rule_lwt_match() local
46 mask->enc_key_id.keyid = 0xffffffff; in nf_flow_rule_lwt_match()
56 mask->enc_ipv4.src = 0xffffffff; in nf_flow_rule_lwt_match()
58 mask->enc_ipv4.dst = 0xffffffff; in nf_flow_rule_lwt_match()
68 memset(&mask->enc_ipv6.src, 0xff, in nf_flow_rule_lwt_match()
72 memset(&mask->enc_ipv6.dst, 0xff, in nf_flow_rule_lwt_match()
82 struct flow_dissector_key_vlan *mask, in nf_flow_rule_vlan_match() argument
86 mask->vlan_id = VLAN_VID_MASK; in nf_flow_rule_vlan_match()
88 mask->vlan_tpid = 0xffff; in nf_flow_rule_vlan_match()
95 struct nf_flow_key *mask = &match->mask; in nf_flow_rule_match() local
[all …]
Dxt_addrtype.c34 const struct in6_addr *addr, u16 mask) in match_lookup_rt6() argument
46 if (dev && (mask & XT_ADDRTYPE_LOCAL)) { in match_lookup_rt6()
69 const struct in6_addr *addr, u16 mask) in match_type6() argument
73 if ((mask & XT_ADDRTYPE_MULTICAST) && in match_type6()
76 if ((mask & XT_ADDRTYPE_UNICAST) && !(addr_type & IPV6_ADDR_UNICAST)) in match_type6()
78 if ((mask & XT_ADDRTYPE_UNSPEC) && addr_type != IPV6_ADDR_ANY) in match_type6()
82 XT_ADDRTYPE_UNREACHABLE) & mask) in match_type6()
83 return !!(mask & match_lookup_rt6(net, dev, addr, mask)); in match_type6()
105 __be32 addr, u_int16_t mask) in match_type() argument
107 return !!(mask & (1 << inet_dev_addr_type(net, dev, addr))); in match_type()
Dnft_bitwise.c23 struct nft_data mask; member
34 dst[i] = (src[i] & priv->mask.data[i]) ^ priv->xor.data[i]; in nft_bitwise_eval_bool()
96 struct nft_data_desc mask = { in nft_bitwise_init_bool() local
98 .size = sizeof(priv->mask), in nft_bitwise_init_bool()
115 err = nft_data_init(NULL, &priv->mask, &mask, tb[NFTA_BITWISE_MASK]); in nft_bitwise_init_bool()
126 nft_data_release(&priv->mask, mask.type); in nft_bitwise_init_bool()
215 if (nft_data_dump(skb, NFTA_BITWISE_MASK, &priv->mask, in nft_bitwise_dump_bool()
278 memcpy(&reg->mask, &priv->mask, sizeof(priv->mask)); in nft_bitwise_offload()
336 err = nft_bitwise_extract_u32_data(tb[NFTA_BITWISE_MASK], &priv->mask); in nft_bitwise_fast_init()
362 data.data[0] = priv->mask; in nft_bitwise_fast_dump()
[all …]
Dnf_conntrack_broadcast.c28 __be32 mask = 0; in nf_conntrack_broadcast_help() local
47 mask = ifa->ifa_mask; in nf_conntrack_broadcast_help()
53 if (mask == 0) in nf_conntrack_broadcast_help()
63 exp->mask.src.u3.ip = mask; in nf_conntrack_broadcast_help()
64 exp->mask.src.u.udp.port = htons(0xFFFF); in nf_conntrack_broadcast_help()
Dnf_tables_offload.c25 flow->rule->match.mask = &flow->match.mask; in nft_flow_rule_alloc()
35 struct nft_flow_key *mask = &match->mask; in nft_flow_rule_set_addr_type() local
42 mask->control.addr_type = 0xffff; in nft_flow_rule_set_addr_type()
50 __be16 mask; member
59 .mask = match->mask.basic.n_proto, in nft_flow_rule_transfer_vlan()
66 match->mask.basic.n_proto = match->mask.cvlan.vlan_tpid; in nft_flow_rule_transfer_vlan()
68 match->mask.cvlan.vlan_tpid = match->mask.vlan.vlan_tpid; in nft_flow_rule_transfer_vlan()
70 match->mask.vlan.vlan_tpid = ethertype.mask; in nft_flow_rule_transfer_vlan()
78 match->mask.basic.n_proto = match->mask.vlan.vlan_tpid; in nft_flow_rule_transfer_vlan()
80 match->mask.vlan.vlan_tpid = ethertype.mask; in nft_flow_rule_transfer_vlan()
Dnf_conntrack_labels.c16 static int replace_u32(u32 *address, u32 mask, u32 new) in replace_u32() argument
22 tmp = (old & mask) ^ new; in replace_u32()
32 const u32 *mask, unsigned int words32) in nf_connlabels_replace() argument
49 changed |= replace_u32(&dst[i], mask ? ~mask[i] : 0, data[i]); in nf_connlabels_replace()
Dnf_conntrack_expect.c114 return nf_ct_tuple_mask_cmp(tuple, &i->tuple, &i->mask) && in nf_ct_exp_equal()
255 intersect_mask.src.u.all = a->mask.src.u.all & b->mask.src.u.all; in expect_clash()
259 a->mask.src.u3.all[count] & b->mask.src.u3.all[count]; in expect_clash()
271 nf_ct_tuple_mask_equal(&a->mask, &b->mask) && in expect_matches()
338 memset(&exp->mask.src.u3, 0xFF, len); in nf_ct_expect_init()
339 if (sizeof(exp->mask.src.u3) > len) in nf_ct_expect_init()
340 memset((void *)&exp->mask.src.u3 + len, 0x00, in nf_ct_expect_init()
341 sizeof(exp->mask.src.u3) - len); in nf_ct_expect_init()
344 memset(&exp->mask.src.u3, 0x00, sizeof(exp->mask.src.u3)); in nf_ct_expect_init()
349 exp->mask.src.u.all = htons(0xFFFF); in nf_ct_expect_init()
[all …]
/net/can/
Daf_can.c364 static struct hlist_head *can_rcv_list_find(canid_t *can_id, canid_t *mask, in can_rcv_list_find() argument
370 if (*mask & CAN_ERR_FLAG) { in can_rcv_list_find()
372 *mask &= CAN_ERR_MASK; in can_rcv_list_find()
381 if ((*mask & CAN_EFF_FLAG) && !(*can_id & CAN_EFF_FLAG)) in can_rcv_list_find()
382 *mask &= (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS); in can_rcv_list_find()
385 *can_id &= *mask; in can_rcv_list_find()
392 if (!(*mask)) in can_rcv_list_find()
396 if (((*mask & CAN_EFF_RTR_FLAGS) == CAN_EFF_RTR_FLAGS) && in can_rcv_list_find()
399 if (*mask == (CAN_EFF_MASK | CAN_EFF_RTR_FLAGS)) in can_rcv_list_find()
402 if (*mask == (CAN_SFF_MASK | CAN_EFF_RTR_FLAGS)) in can_rcv_list_find()
[all …]
/net/core/
Dnet-sysfs.c811 cpumask_var_t mask; in show_rps_map() local
814 if (!zalloc_cpumask_var(&mask, GFP_KERNEL)) in show_rps_map()
821 cpumask_set_cpu(map->cpus[i], mask); in show_rps_map()
823 len = snprintf(buf, PAGE_SIZE, "%*pb\n", cpumask_pr_args(mask)); in show_rps_map()
825 free_cpumask_var(mask); in show_rps_map()
834 cpumask_var_t mask; in store_rps_map() local
841 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) in store_rps_map()
844 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits); in store_rps_map()
846 free_cpumask_var(mask); in store_rps_map()
850 if (!cpumask_empty(mask)) { in store_rps_map()
[all …]
Dsysctl_net_core.c67 size = orig_size = orig_sock_table ? orig_sock_table->mask + 1 : 0; in rps_sock_flow_sysctl()
87 sock_table->mask = size - 1; in rps_sock_flow_sysctl()
125 cpumask_var_t mask; in flow_limit_cpu_sysctl() local
128 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) in flow_limit_cpu_sysctl()
132 ret = cpumask_parse(buffer, mask); in flow_limit_cpu_sysctl()
142 if (cur && !cpumask_test_cpu(i, mask)) { in flow_limit_cpu_sysctl()
146 } else if (!cur && cpumask_test_cpu(i, mask)) { in flow_limit_cpu_sysctl()
168 cpumask_clear(mask); in flow_limit_cpu_sysctl()
173 cpumask_set_cpu(i, mask); in flow_limit_cpu_sysctl()
178 len = scnprintf(kbuf, len, "%*pb", cpumask_pr_args(mask)); in flow_limit_cpu_sysctl()
[all …]
/net/bridge/netfilter/
Debt_mark_m.c22 return !!(skb->mark & info->mask) ^ info->invert; in ebt_mark_mt()
23 return ((skb->mark & info->mask) == info->mark) ^ info->invert; in ebt_mark_mt()
42 compat_ulong_t mark, mask; member
52 kern->mask = user->mask; in mark_mt_compat_from_user()
63 put_user(kern->mask, &user->mask) || in mark_mt_compat_to_user()
/net/mac80211/
Drate.c422 static bool rate_idx_match_legacy_mask(s8 *rate_idx, int n_bitrates, u32 mask) in rate_idx_match_legacy_mask() argument
428 if (mask & (1 << j)) { in rate_idx_match_legacy_mask()
437 if (mask & (1 << j)) { in rate_idx_match_legacy_mask()
524 u32 mask, in rate_idx_match_mask() argument
550 mask)) in rate_idx_match_mask()
564 mask)) in rate_idx_match_mask()
569 mask)) in rate_idx_match_mask()
749 struct ieee80211_sta *sta, u32 *mask, in rate_control_cap_mask() argument
755 *mask = sdata->rc_rateidx_mask[sband->band]; in rate_control_cap_mask()
759 *mask &= ~BIT(i); in rate_control_cap_mask()
[all …]

1234567