• Home
  • Raw
  • Download

Lines Matching refs:skb

64 static inline __always_inline int do_forward6(struct __sk_buff* skb,
71 if (skb->protocol != htons(ETH_P_IPV6)) return TC_ACT_PIPE;
74 if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE;
80 // It has to be done early cause it will invalidate any skb->data/data_end derived pointers.
81 if (bpf_skb_pull_data(skb, l2_header_size + IP6_HLEN)) return TC_ACT_PIPE;
83 void* data = (void*)(long)skb->data;
84 const void* data_end = (void*)(long)skb->data_end;
105 bpf_skb_pull_data(skb, l2_header_size + IP6_HLEN + TCP_HLEN);
107 data = (void*)(long)skb->data;
108 data_end = (void*)(long)skb->data_end;
139 .iif = skb->ifindex,
144 .iif = skb->ifindex,
156 uint32_t stat_and_limit_k = stream.down ? skb->ifindex : v->oif;
179 uint64_t L3_bytes = skb->len - l2_header_size;
200 if (bpf_skb_change_head(skb, sizeof(struct ethhdr), /*flags*/ 0)) {
206 data = (void*)(long)skb->data;
207 data_end = (void*)(long)skb->data_end;
229 // bpf_csum_update() always succeeds if the skb is CHECKSUM_COMPLETE and returns an error
231 bpf_csum_update(skb, 0xFFFF - ntohs(old_hl) + ntohs(new_hl));
251 (struct __sk_buff* skb) {
252 return do_forward6(skb, ETHER, DOWNSTREAM, KVER_NONE);
257 (struct __sk_buff* skb) {
258 return do_forward6(skb, ETHER, UPSTREAM, KVER_NONE);
277 (struct __sk_buff* skb) {
278 return do_forward6(skb, RAWIP, DOWNSTREAM, KVER_4_14);
283 (struct __sk_buff* skb) {
284 return do_forward6(skb, RAWIP, UPSTREAM, KVER_4_14);
290 (__unused struct __sk_buff* skb) {
296 (__unused struct __sk_buff* skb) {
306 static inline __always_inline int do_forward4_bottom(struct __sk_buff* skb,
329 // additional updating of skb->csum (this could be fixed up manually with more effort).
331 // Note that the in-kernel implementation of 'int64_t bpf_csum_update(skb, u32 csum)' is:
332 // if (skb->ip_summed == CHECKSUM_COMPLETE)
333 // return (skb->csum = csum_add(skb->csum, csum));
338 // and leave all other packets unaffected (since it just at most adds zero to skb->csum).
343 // Additionally since we're forwarding, in most cases the value of the skb->csum field
348 // (ie. something like veth) where the CHECKSUM_COMPLETE/skb->csum can get reused
352 // void skb_checksum_complete_unset(struct sk_buff *skb) {
353 // if (skb->ip_summed == CHECKSUM_COMPLETE) skb->ip_summed = CHECKSUM_NONE;
356 if (!udph->check && (bpf_csum_update(skb, 0) >= 0)) TC_PUNT(UDP_CSUM_ZERO);
360 .iif = skb->ifindex,
375 uint32_t stat_and_limit_k = stream.down ? skb->ifindex : v->oif;
398 uint64_t L3_bytes = skb->len - l2_header_size;
419 if (bpf_skb_change_head(skb, sizeof(struct ethhdr), /*flags*/ 0)) {
425 data = (void*)(long)skb->data;
426 data_end = (void*)(long)skb->data_end;
454 bpf_l3_csum_replace(skb, ETH_IP4_OFFSET(check), old_ttl_proto, new_ttl_proto, sz2);
455 bpf_skb_store_bytes(skb, ETH_IP4_OFFSET(ttl), &new_ttl_proto, sz2, 0);
466 bpf_l4_csum_replace(skb, l4_offs_csum, old_daddr, new_daddr, sz4 | BPF_F_PSEUDO_HDR | l4_flags);
467 bpf_l3_csum_replace(skb, ETH_IP4_OFFSET(check), old_daddr, new_daddr, sz4);
468 bpf_skb_store_bytes(skb, ETH_IP4_OFFSET(daddr), &new_daddr, sz4, 0);
470 bpf_l4_csum_replace(skb, l4_offs_csum, old_saddr, new_saddr, sz4 | BPF_F_PSEUDO_HDR | l4_flags);
471 bpf_l3_csum_replace(skb, ETH_IP4_OFFSET(check), old_saddr, new_saddr, sz4);
472 bpf_skb_store_bytes(skb, ETH_IP4_OFFSET(saddr), &new_saddr, sz4, 0);
476 bpf_l4_csum_replace(skb, l4_offs_csum, k.srcPort, v->srcPort, sz2 | l4_flags);
477 bpf_skb_store_bytes(skb, is_tcp ? ETH_IP4_TCP_OFFSET(source) : ETH_IP4_UDP_OFFSET(source),
480 bpf_l4_csum_replace(skb, l4_offs_csum, k.dstPort, v->dstPort, sz2 | l4_flags);
481 bpf_skb_store_bytes(skb, is_tcp ? ETH_IP4_TCP_OFFSET(dest) : ETH_IP4_UDP_OFFSET(dest),
500 static inline __always_inline int do_forward4(struct __sk_buff* skb,
508 if (is_ethernet && (skb->pkt_type != PACKET_HOST)) return TC_ACT_PIPE;
511 if (skb->protocol != htons(ETH_P_IP)) return TC_ACT_PIPE;
518 // It has to be done early cause it will invalidate any skb->data/data_end derived pointers.
519 try_make_writable(skb, l2_header_size + IP4_HLEN + TCP_HLEN);
521 void* data = (void*)(long)skb->data;
522 const void* data_end = (void*)(long)skb->data_end;
593 return do_forward4_bottom(skb, l2_header_size, data, data_end, eth, ip,
596 return do_forward4_bottom(skb, l2_header_size, data, data_end, eth, ip,
605 (struct __sk_buff* skb) {
606 return do_forward4(skb, RAWIP, DOWNSTREAM, UPDATETIME, KVER_5_10);
611 (struct __sk_buff* skb) {
612 return do_forward4(skb, RAWIP, UPSTREAM, UPDATETIME, KVER_5_10);
617 (struct __sk_buff* skb) {
618 return do_forward4(skb, ETHER, DOWNSTREAM, UPDATETIME, KVER_5_10);
623 (struct __sk_buff* skb) {
624 return do_forward4(skb, ETHER, UPSTREAM, UPDATETIME, KVER_5_10);
634 (struct __sk_buff* skb) {
635 return do_forward4(skb, RAWIP, DOWNSTREAM, UPDATETIME, KVER_4_14);
642 (struct __sk_buff* skb) {
643 return do_forward4(skb, RAWIP, UPSTREAM, UPDATETIME, KVER_4_14);
650 (struct __sk_buff* skb) {
651 return do_forward4(skb, ETHER, DOWNSTREAM, UPDATETIME, KVER_4_14);
658 (struct __sk_buff* skb) {
659 return do_forward4(skb, ETHER, UPSTREAM, UPDATETIME, KVER_4_14);
678 (struct __sk_buff* skb) {
679 return do_forward4(skb, RAWIP, DOWNSTREAM, NO_UPDATETIME, KVER_5_4);
684 (struct __sk_buff* skb) {
685 return do_forward4(skb, RAWIP, UPSTREAM, NO_UPDATETIME, KVER_5_4);
695 (struct __sk_buff* skb) {
696 return do_forward4(skb, RAWIP, DOWNSTREAM, NO_UPDATETIME, KVER_4_14);
703 (struct __sk_buff* skb) {
704 return do_forward4(skb, RAWIP, UPSTREAM, NO_UPDATETIME, KVER_4_14);
711 (struct __sk_buff* skb) {
712 return do_forward4(skb, ETHER, DOWNSTREAM, NO_UPDATETIME, KVER_4_14);
717 (struct __sk_buff* skb) {
718 return do_forward4(skb, ETHER, UPSTREAM, NO_UPDATETIME, KVER_4_14);
727 (__unused struct __sk_buff* skb) {
733 (__unused struct __sk_buff* skb) {
741 (__unused struct __sk_buff* skb) {
747 (__unused struct __sk_buff* skb) {