Lines Matching +full:key +full:-
1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2007-2014 Nicira, Inc.
50 idle_ms = jiffies_to_msecs(jiffies - flow_jiffies); in ovs_flow_used_time()
54 return cur_ms - idle_ms; in ovs_flow_used_time()
64 int len = skb->len + (skb_vlan_tag_present(skb) ? VLAN_HLEN : 0); in ovs_flow_stats_update()
66 stats = rcu_dereference(flow->stats[cpu]); in ovs_flow_stats_update()
68 /* Check if already have CPU-specific stats. */ in ovs_flow_stats_update()
70 spin_lock(&stats->lock); in ovs_flow_stats_update()
71 /* Mark if we write on the pre-allocated stats. */ in ovs_flow_stats_update()
72 if (cpu == 0 && unlikely(flow->stats_last_writer != cpu)) in ovs_flow_stats_update()
73 flow->stats_last_writer = cpu; in ovs_flow_stats_update()
75 stats = rcu_dereference(flow->stats[0]); /* Pre-allocated. */ in ovs_flow_stats_update()
76 spin_lock(&stats->lock); in ovs_flow_stats_update()
79 * pre-allocated stats keep using them. in ovs_flow_stats_update()
81 if (unlikely(flow->stats_last_writer != cpu)) { in ovs_flow_stats_update()
83 * stats, so we need to check again. If CPU-specific in ovs_flow_stats_update()
84 * stats were already allocated, we update the pre- in ovs_flow_stats_update()
87 if (likely(flow->stats_last_writer != -1) && in ovs_flow_stats_update()
88 likely(!rcu_access_pointer(flow->stats[cpu]))) { in ovs_flow_stats_update()
89 /* Try to allocate CPU-specific stats. */ in ovs_flow_stats_update()
100 new_stats->used = jiffies; in ovs_flow_stats_update()
101 new_stats->packet_count = 1; in ovs_flow_stats_update()
102 new_stats->byte_count = len; in ovs_flow_stats_update()
103 new_stats->tcp_flags = tcp_flags; in ovs_flow_stats_update()
104 spin_lock_init(&new_stats->lock); in ovs_flow_stats_update()
106 rcu_assign_pointer(flow->stats[cpu], in ovs_flow_stats_update()
108 cpumask_set_cpu(cpu, &flow->cpu_used_mask); in ovs_flow_stats_update()
112 flow->stats_last_writer = cpu; in ovs_flow_stats_update()
116 stats->used = jiffies; in ovs_flow_stats_update()
117 stats->packet_count++; in ovs_flow_stats_update()
118 stats->byte_count += len; in ovs_flow_stats_update()
119 stats->tcp_flags |= tcp_flags; in ovs_flow_stats_update()
121 spin_unlock(&stats->lock); in ovs_flow_stats_update()
136 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { in ovs_flow_stats_get()
137 struct sw_flow_stats *stats = rcu_dereference_ovsl(flow->stats[cpu]); in ovs_flow_stats_get()
140 /* Local CPU may write on non-local stats, so we must in ovs_flow_stats_get()
141 * block bottom-halves here. in ovs_flow_stats_get()
143 spin_lock_bh(&stats->lock); in ovs_flow_stats_get()
144 if (!*used || time_after(stats->used, *used)) in ovs_flow_stats_get()
145 *used = stats->used; in ovs_flow_stats_get()
146 *tcp_flags |= stats->tcp_flags; in ovs_flow_stats_get()
147 ovs_stats->n_packets += stats->packet_count; in ovs_flow_stats_get()
148 ovs_stats->n_bytes += stats->byte_count; in ovs_flow_stats_get()
149 spin_unlock_bh(&stats->lock); in ovs_flow_stats_get()
160 for (cpu = 0; cpu < nr_cpu_ids; cpu = cpumask_next(cpu, &flow->cpu_used_mask)) { in ovs_flow_stats_clear()
161 struct sw_flow_stats *stats = ovsl_dereference(flow->stats[cpu]); in ovs_flow_stats_clear()
164 spin_lock_bh(&stats->lock); in ovs_flow_stats_clear()
165 stats->used = 0; in ovs_flow_stats_clear()
166 stats->packet_count = 0; in ovs_flow_stats_clear()
167 stats->byte_count = 0; in ovs_flow_stats_clear()
168 stats->tcp_flags = 0; in ovs_flow_stats_clear()
169 spin_unlock_bh(&stats->lock); in ovs_flow_stats_clear()
176 if (unlikely(skb->len < len)) in check_header()
177 return -EINVAL; in check_header()
179 return -ENOMEM; in check_header()
201 skb->len < nh_ofs + ip_len)) in check_iphdr()
202 return -EINVAL; in check_iphdr()
218 skb->len < th_ofs + tcp_len)) in tcphdr_ok()
242 static int parse_ipv6hdr(struct sk_buff *skb, struct sw_flow_key *key) in parse_ipv6hdr() argument
257 key->ip.proto = NEXTHDR_NONE; in parse_ipv6hdr()
258 key->ip.tos = ipv6_get_dsfield(nh); in parse_ipv6hdr()
259 key->ip.ttl = nh->hop_limit; in parse_ipv6hdr()
260 key->ipv6.label = *(__be32 *)nh & htonl(IPV6_FLOWINFO_FLOWLABEL); in parse_ipv6hdr()
261 key->ipv6.addr.src = nh->saddr; in parse_ipv6hdr()
262 key->ipv6.addr.dst = nh->daddr; in parse_ipv6hdr()
264 nexthdr = ipv6_find_hdr(skb, &payload_ofs, -1, &frag_off, &flags); in parse_ipv6hdr()
267 key->ip.frag = OVS_FRAG_TYPE_LATER; in parse_ipv6hdr()
268 key->ip.proto = NEXTHDR_FRAGMENT; in parse_ipv6hdr()
271 key->ip.frag = OVS_FRAG_TYPE_FIRST; in parse_ipv6hdr()
273 key->ip.frag = OVS_FRAG_TYPE_NONE; in parse_ipv6hdr()
278 * used to set key->ip.frag above. in parse_ipv6hdr()
281 return -EPROTO; in parse_ipv6hdr()
283 nh_len = payload_ofs - nh_ofs; in parse_ipv6hdr()
285 key->ip.proto = nexthdr; in parse_ipv6hdr()
298 * Returns 0 if it encounters a non-vlan or incomplete packet.
304 struct vlan_head *vh = (struct vlan_head *)skb->data; in parse_vlan_tag()
306 if (likely(!eth_type_vlan(vh->tpid))) in parse_vlan_tag()
309 if (unlikely(skb->len < sizeof(struct vlan_head) + sizeof(__be16))) in parse_vlan_tag()
314 return -ENOMEM; in parse_vlan_tag()
316 vh = (struct vlan_head *)skb->data; in parse_vlan_tag()
317 key_vh->tci = vh->tci | htons(VLAN_CFI_MASK); in parse_vlan_tag()
318 key_vh->tpid = vh->tpid; in parse_vlan_tag()
321 int offset = skb->data - skb_mac_header(skb); in parse_vlan_tag()
330 __vlan_hwaccel_put_tag(skb, key_vh->tpid, tci); in parse_vlan_tag()
337 static void clear_vlan(struct sw_flow_key *key) in clear_vlan() argument
339 key->eth.vlan.tci = 0; in clear_vlan()
340 key->eth.vlan.tpid = 0; in clear_vlan()
341 key->eth.cvlan.tci = 0; in clear_vlan()
342 key->eth.cvlan.tpid = 0; in clear_vlan()
345 static int parse_vlan(struct sk_buff *skb, struct sw_flow_key *key) in parse_vlan() argument
350 key->eth.vlan.tci = htons(skb->vlan_tci) | htons(VLAN_CFI_MASK); in parse_vlan()
351 key->eth.vlan.tpid = skb->vlan_proto; in parse_vlan()
353 /* Parse outer vlan tag in the non-accelerated case. */ in parse_vlan()
354 res = parse_vlan_tag(skb, &key->eth.vlan, true); in parse_vlan()
360 res = parse_vlan_tag(skb, &key->eth.cvlan, false); in parse_vlan()
379 proto = *(__be16 *) skb->data; in parse_ethertype()
385 if (skb->len < sizeof(struct llc_snap_hdr)) in parse_ethertype()
391 llc = (struct llc_snap_hdr *) skb->data; in parse_ethertype()
392 if (llc->dsap != LLC_SAP_SNAP || in parse_ethertype()
393 llc->ssap != LLC_SAP_SNAP || in parse_ethertype()
394 (llc->oui[0] | llc->oui[1] | llc->oui[2]) != 0) in parse_ethertype()
399 if (eth_proto_is_802_3(llc->ethertype)) in parse_ethertype()
400 return llc->ethertype; in parse_ethertype()
405 static int parse_icmpv6(struct sk_buff *skb, struct sw_flow_key *key, in parse_icmpv6() argument
410 /* The ICMPv6 type and code fields use the 16-bit transport port in parse_icmpv6()
411 * fields, so we need to store them in 16-bit network byte order. in parse_icmpv6()
413 key->tp.src = htons(icmp->icmp6_type); in parse_icmpv6()
414 key->tp.dst = htons(icmp->icmp6_code); in parse_icmpv6()
415 memset(&key->ipv6.nd, 0, sizeof(key->ipv6.nd)); in parse_icmpv6()
417 if (icmp->icmp6_code == 0 && in parse_icmpv6()
418 (icmp->icmp6_type == NDISC_NEIGHBOUR_SOLICITATION || in parse_icmpv6()
419 icmp->icmp6_type == NDISC_NEIGHBOUR_ADVERTISEMENT)) { in parse_icmpv6()
420 int icmp_len = skb->len - skb_transport_offset(skb); in parse_icmpv6()
431 return -ENOMEM; in parse_icmpv6()
434 key->ipv6.nd.target = nd->target; in parse_icmpv6()
436 icmp_len -= sizeof(*nd); in parse_icmpv6()
440 (struct nd_opt_hdr *)(nd->opt + offset); in parse_icmpv6()
441 int opt_len = nd_opt->nd_opt_len * 8; in parse_icmpv6()
450 if (nd_opt->nd_opt_type == ND_OPT_SOURCE_LL_ADDR in parse_icmpv6()
452 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.sll))) in parse_icmpv6()
454 ether_addr_copy(key->ipv6.nd.sll, in parse_icmpv6()
455 &nd->opt[offset+sizeof(*nd_opt)]); in parse_icmpv6()
456 } else if (nd_opt->nd_opt_type == ND_OPT_TARGET_LL_ADDR in parse_icmpv6()
458 if (unlikely(!is_zero_ether_addr(key->ipv6.nd.tll))) in parse_icmpv6()
460 ether_addr_copy(key->ipv6.nd.tll, in parse_icmpv6()
461 &nd->opt[offset+sizeof(*nd_opt)]); in parse_icmpv6()
464 icmp_len -= opt_len; in parse_icmpv6()
472 memset(&key->ipv6.nd.target, 0, sizeof(key->ipv6.nd.target)); in parse_icmpv6()
473 memset(key->ipv6.nd.sll, 0, sizeof(key->ipv6.nd.sll)); in parse_icmpv6()
474 memset(key->ipv6.nd.tll, 0, sizeof(key->ipv6.nd.tll)); in parse_icmpv6()
479 static int parse_nsh(struct sk_buff *skb, struct sw_flow_key *key) in parse_nsh() argument
495 return -EINVAL; in parse_nsh()
502 key->nsh.base.flags = nsh_get_flags(nh); in parse_nsh()
503 key->nsh.base.ttl = nsh_get_ttl(nh); in parse_nsh()
504 key->nsh.base.mdtype = nh->mdtype; in parse_nsh()
505 key->nsh.base.np = nh->np; in parse_nsh()
506 key->nsh.base.path_hdr = nh->path_hdr; in parse_nsh()
507 switch (key->nsh.base.mdtype) { in parse_nsh()
510 return -EINVAL; in parse_nsh()
511 memcpy(key->nsh.context, nh->md1.context, in parse_nsh()
512 sizeof(nh->md1)); in parse_nsh()
515 memset(key->nsh.context, 0, in parse_nsh()
516 sizeof(nh->md1)); in parse_nsh()
519 return -EINVAL; in parse_nsh()
526 * key_extract_l3l4 - extracts L3/L4 header information.
527 * @skb: sk_buff that contains the frame, with skb->data pointing to the
529 * @key: output flow key
532 static int key_extract_l3l4(struct sk_buff *skb, struct sw_flow_key *key) in key_extract_l3l4() argument
537 if (key->eth.type == htons(ETH_P_IP)) { in key_extract_l3l4()
543 memset(&key->ip, 0, sizeof(key->ip)); in key_extract_l3l4()
544 memset(&key->ipv4, 0, sizeof(key->ipv4)); in key_extract_l3l4()
545 if (error == -EINVAL) { in key_extract_l3l4()
546 skb->transport_header = skb->network_header; in key_extract_l3l4()
553 key->ipv4.addr.src = nh->saddr; in key_extract_l3l4()
554 key->ipv4.addr.dst = nh->daddr; in key_extract_l3l4()
556 key->ip.proto = nh->protocol; in key_extract_l3l4()
557 key->ip.tos = nh->tos; in key_extract_l3l4()
558 key->ip.ttl = nh->ttl; in key_extract_l3l4()
560 offset = nh->frag_off & htons(IP_OFFSET); in key_extract_l3l4()
562 key->ip.frag = OVS_FRAG_TYPE_LATER; in key_extract_l3l4()
563 memset(&key->tp, 0, sizeof(key->tp)); in key_extract_l3l4()
566 if (nh->frag_off & htons(IP_MF) || in key_extract_l3l4()
567 skb_shinfo(skb)->gso_type & SKB_GSO_UDP) in key_extract_l3l4()
568 key->ip.frag = OVS_FRAG_TYPE_FIRST; in key_extract_l3l4()
570 key->ip.frag = OVS_FRAG_TYPE_NONE; in key_extract_l3l4()
573 if (key->ip.proto == IPPROTO_TCP) { in key_extract_l3l4()
576 key->tp.src = tcp->source; in key_extract_l3l4()
577 key->tp.dst = tcp->dest; in key_extract_l3l4()
578 key->tp.flags = TCP_FLAGS_BE16(tcp); in key_extract_l3l4()
580 memset(&key->tp, 0, sizeof(key->tp)); in key_extract_l3l4()
583 } else if (key->ip.proto == IPPROTO_UDP) { in key_extract_l3l4()
586 key->tp.src = udp->source; in key_extract_l3l4()
587 key->tp.dst = udp->dest; in key_extract_l3l4()
589 memset(&key->tp, 0, sizeof(key->tp)); in key_extract_l3l4()
591 } else if (key->ip.proto == IPPROTO_SCTP) { in key_extract_l3l4()
594 key->tp.src = sctp->source; in key_extract_l3l4()
595 key->tp.dst = sctp->dest; in key_extract_l3l4()
597 memset(&key->tp, 0, sizeof(key->tp)); in key_extract_l3l4()
599 } else if (key->ip.proto == IPPROTO_ICMP) { in key_extract_l3l4()
602 /* The ICMP type and code fields use the 16-bit in key_extract_l3l4()
604 * them in 16-bit network byte order. */ in key_extract_l3l4()
605 key->tp.src = htons(icmp->type); in key_extract_l3l4()
606 key->tp.dst = htons(icmp->code); in key_extract_l3l4()
608 memset(&key->tp, 0, sizeof(key->tp)); in key_extract_l3l4()
612 } else if (key->eth.type == htons(ETH_P_ARP) || in key_extract_l3l4()
613 key->eth.type == htons(ETH_P_RARP)) { in key_extract_l3l4()
620 arp->ar_hrd == htons(ARPHRD_ETHER) && in key_extract_l3l4()
621 arp->ar_pro == htons(ETH_P_IP) && in key_extract_l3l4()
622 arp->ar_hln == ETH_ALEN && in key_extract_l3l4()
623 arp->ar_pln == 4) { in key_extract_l3l4()
626 if (ntohs(arp->ar_op) <= 0xff) in key_extract_l3l4()
627 key->ip.proto = ntohs(arp->ar_op); in key_extract_l3l4()
629 key->ip.proto = 0; in key_extract_l3l4()
631 memcpy(&key->ipv4.addr.src, arp->ar_sip, sizeof(key->ipv4.addr.src)); in key_extract_l3l4()
632 memcpy(&key->ipv4.addr.dst, arp->ar_tip, sizeof(key->ipv4.addr.dst)); in key_extract_l3l4()
633 ether_addr_copy(key->ipv4.arp.sha, arp->ar_sha); in key_extract_l3l4()
634 ether_addr_copy(key->ipv4.arp.tha, arp->ar_tha); in key_extract_l3l4()
636 memset(&key->ip, 0, sizeof(key->ip)); in key_extract_l3l4()
637 memset(&key->ipv4, 0, sizeof(key->ipv4)); in key_extract_l3l4()
639 } else if (eth_p_mpls(key->eth.type)) { in key_extract_l3l4()
642 memset(&key->mpls, 0, sizeof(key->mpls)); in key_extract_l3l4()
643 skb_set_inner_network_header(skb, skb->mac_len); in key_extract_l3l4()
647 error = check_header(skb, skb->mac_len + in key_extract_l3l4()
655 memcpy(&key->mpls.lse[label_count - 1], &lse, in key_extract_l3l4()
658 skb_set_inner_network_header(skb, skb->mac_len + in key_extract_l3l4()
668 key->mpls.num_labels_mask = GENMASK(label_count - 1, 0); in key_extract_l3l4()
669 } else if (key->eth.type == htons(ETH_P_IPV6)) { in key_extract_l3l4()
672 nh_len = parse_ipv6hdr(skb, key); in key_extract_l3l4()
675 case -EINVAL: in key_extract_l3l4()
676 memset(&key->ip, 0, sizeof(key->ip)); in key_extract_l3l4()
677 memset(&key->ipv6.addr, 0, sizeof(key->ipv6.addr)); in key_extract_l3l4()
679 case -EPROTO: in key_extract_l3l4()
680 skb->transport_header = skb->network_header; in key_extract_l3l4()
689 if (key->ip.frag == OVS_FRAG_TYPE_LATER) { in key_extract_l3l4()
690 memset(&key->tp, 0, sizeof(key->tp)); in key_extract_l3l4()
693 if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP) in key_extract_l3l4()
694 key->ip.frag = OVS_FRAG_TYPE_FIRST; in key_extract_l3l4()
697 if (key->ip.proto == NEXTHDR_TCP) { in key_extract_l3l4()
700 key->tp.src = tcp->source; in key_extract_l3l4()
701 key->tp.dst = tcp->dest; in key_extract_l3l4()
702 key->tp.flags = TCP_FLAGS_BE16(tcp); in key_extract_l3l4()
704 memset(&key->tp, 0, sizeof(key->tp)); in key_extract_l3l4()
706 } else if (key->ip.proto == NEXTHDR_UDP) { in key_extract_l3l4()
709 key->tp.src = udp->source; in key_extract_l3l4()
710 key->tp.dst = udp->dest; in key_extract_l3l4()
712 memset(&key->tp, 0, sizeof(key->tp)); in key_extract_l3l4()
714 } else if (key->ip.proto == NEXTHDR_SCTP) { in key_extract_l3l4()
717 key->tp.src = sctp->source; in key_extract_l3l4()
718 key->tp.dst = sctp->dest; in key_extract_l3l4()
720 memset(&key->tp, 0, sizeof(key->tp)); in key_extract_l3l4()
722 } else if (key->ip.proto == NEXTHDR_ICMP) { in key_extract_l3l4()
724 error = parse_icmpv6(skb, key, nh_len); in key_extract_l3l4()
728 memset(&key->tp, 0, sizeof(key->tp)); in key_extract_l3l4()
731 } else if (key->eth.type == htons(ETH_P_NSH)) { in key_extract_l3l4()
732 error = parse_nsh(skb, key); in key_extract_l3l4()
740 * key_extract - extracts a flow key from an Ethernet frame.
741 * @skb: sk_buff that contains the frame, with skb->data pointing to the
743 * @key: output flow key
745 * The caller must ensure that skb->len >= ETH_HLEN.
751 * - skb->mac_header: the L2 header.
753 * - skb->network_header: just past the L2 header, or just past the
756 * - skb->transport_header: If key->eth.type is ETH_P_IP or ETH_P_IPV6
758 * of a correct length, otherwise the same as skb->network_header.
759 * For other key->eth.type values it is left untouched.
761 * - skb->protocol: the type of the data starting at skb->network_header.
762 * Equals to key->eth.type.
764 static int key_extract(struct sk_buff *skb, struct sw_flow_key *key) in key_extract() argument
769 key->tp.flags = 0; in key_extract()
774 clear_vlan(key); in key_extract()
775 if (ovs_key_mac_proto(key) == MAC_PROTO_NONE) { in key_extract()
776 if (unlikely(eth_type_vlan(skb->protocol))) in key_extract()
777 return -EINVAL; in key_extract()
780 key->eth.type = skb->protocol; in key_extract()
783 ether_addr_copy(key->eth.src, eth->h_source); in key_extract()
784 ether_addr_copy(key->eth.dst, eth->h_dest); in key_extract()
788 * update skb->csum here. in key_extract()
791 if (unlikely(parse_vlan(skb, key))) in key_extract()
792 return -ENOMEM; in key_extract()
794 key->eth.type = parse_ethertype(skb); in key_extract()
795 if (unlikely(key->eth.type == htons(0))) in key_extract()
796 return -ENOMEM; in key_extract()
800 * skb->protocol. in key_extract()
802 if (key->eth.cvlan.tci & htons(VLAN_CFI_MASK)) in key_extract()
803 skb->protocol = key->eth.cvlan.tpid; in key_extract()
805 skb->protocol = key->eth.type; in key_extract()
808 __skb_push(skb, skb->data - skb_mac_header(skb)); in key_extract()
813 /* Fill out L3/L4 key info, if any */ in key_extract()
814 return key_extract_l3l4(skb, key); in key_extract()
820 int ovs_flow_key_update_l3l4(struct sk_buff *skb, struct sw_flow_key *key) in ovs_flow_key_update_l3l4() argument
822 return key_extract_l3l4(skb, key); in ovs_flow_key_update_l3l4()
825 int ovs_flow_key_update(struct sk_buff *skb, struct sw_flow_key *key) in ovs_flow_key_update() argument
829 res = key_extract(skb, key); in ovs_flow_key_update()
831 key->mac_proto &= ~SW_FLOW_KEY_INVALID; in ovs_flow_key_update()
838 switch (skb->dev->type) { in key_extract_mac_proto()
842 if (skb->protocol == htons(ETH_P_TEB)) in key_extract_mac_proto()
847 return -EINVAL; in key_extract_mac_proto()
851 struct sk_buff *skb, struct sw_flow_key *key) in ovs_flow_key_extract() argument
860 key->tun_proto = ip_tunnel_info_af(tun_info); in ovs_flow_key_extract()
861 memcpy(&key->tun_key, &tun_info->key, sizeof(key->tun_key)); in ovs_flow_key_extract()
863 if (tun_info->options_len) { in ovs_flow_key_extract()
864 BUILD_BUG_ON((1 << (sizeof(tun_info->options_len) * in ovs_flow_key_extract()
865 8)) - 1 in ovs_flow_key_extract()
866 > sizeof(key->tun_opts)); in ovs_flow_key_extract()
868 ip_tunnel_info_opts_get(TUN_METADATA_OPTS(key, tun_info->options_len), in ovs_flow_key_extract()
870 key->tun_opts_len = tun_info->options_len; in ovs_flow_key_extract()
872 key->tun_opts_len = 0; in ovs_flow_key_extract()
875 key->tun_proto = 0; in ovs_flow_key_extract()
876 key->tun_opts_len = 0; in ovs_flow_key_extract()
877 memset(&key->tun_key, 0, sizeof(key->tun_key)); in ovs_flow_key_extract()
880 key->phy.priority = skb->priority; in ovs_flow_key_extract()
881 key->phy.in_port = OVS_CB(skb)->input_vport->port_no; in ovs_flow_key_extract()
882 key->phy.skb_mark = skb->mark; in ovs_flow_key_extract()
883 key->ovs_flow_hash = 0; in ovs_flow_key_extract()
887 key->mac_proto = res; in ovs_flow_key_extract()
892 key->recirc_id = tc_ext ? tc_ext->chain : 0; in ovs_flow_key_extract()
893 OVS_CB(skb)->mru = tc_ext ? tc_ext->mru : 0; in ovs_flow_key_extract()
895 key->recirc_id = 0; in ovs_flow_key_extract()
898 key->recirc_id = 0; in ovs_flow_key_extract()
901 err = key_extract(skb, key); in ovs_flow_key_extract()
903 ovs_ct_fill_key(skb, key); /* Must be after key_extract(). */ in ovs_flow_key_extract()
909 struct sw_flow_key *key, bool log) in ovs_flow_key_extract_userspace() argument
917 return -EINVAL; in ovs_flow_key_extract_userspace()
920 err = ovs_nla_get_flow_metadata(net, a, attrs, key, log); in ovs_flow_key_extract_userspace()
924 /* key_extract assumes that skb->protocol is set-up for in ovs_flow_key_extract_userspace()
929 * For L2 packet key eth type would be zero. skb protocol in ovs_flow_key_extract_userspace()
930 * would be set to correct value later during key-extact. in ovs_flow_key_extract_userspace()
933 skb->protocol = key->eth.type; in ovs_flow_key_extract_userspace()
934 err = key_extract(skb, key); in ovs_flow_key_extract_userspace()
939 * for packets for which it makes sense. Otherwise the key may be in ovs_flow_key_extract_userspace()
940 * corrupted due to overlapping key fields. in ovs_flow_key_extract_userspace()
943 key->eth.type != htons(ETH_P_IP)) in ovs_flow_key_extract_userspace()
944 return -EINVAL; in ovs_flow_key_extract_userspace()
946 (key->eth.type != htons(ETH_P_IPV6) || in ovs_flow_key_extract_userspace()
947 sw_flow_key_is_nd(key))) in ovs_flow_key_extract_userspace()
948 return -EINVAL; in ovs_flow_key_extract_userspace()