Lines Matching refs:skb
179 static __always_inline inline void update_##the_stats_map(const struct __sk_buff* const skb, \ in DEFINE_BPF_MAP_RO_NETD()
192 uint64_t bytes = skb->len; \ in DEFINE_BPF_MAP_RO_NETD()
194 bool is_ipv6 = (skb->protocol == htons(ETH_P_IPV6)); \ in DEFINE_BPF_MAP_RO_NETD()
218 static __always_inline inline int bpf_skb_load_bytes_net(const struct __sk_buff* const skb,
240 ? bpf_skb_load_bytes_relative(skb, L3_off, to, len, BPF_HDR_START_NET)
241 : bpf_skb_load_bytes(skb, L3_off, to, len);
259 static __always_inline inline bool should_block_local_network_packets(struct __sk_buff *skb, in should_block_local_network_packets() argument
271 if (skb->protocol == htons(ETH_P_IP)) { in should_block_local_network_packets()
276 (void)bpf_skb_load_bytes_net(skb, remote_ip_ofs, &remote_ip6.s6_addr32[3], 4, kver); in should_block_local_network_packets()
277 (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(protocol), &ip_proto, sizeof(ip_proto), kver); in should_block_local_network_packets()
279 (void)bpf_skb_load_bytes_net(skb, IPPROTO_IHL_OFF, &ihl, sizeof(ihl), kver); in should_block_local_network_packets()
281 } else if (skb->protocol == htons(ETH_P_IPV6)) { in should_block_local_network_packets()
283 (void)bpf_skb_load_bytes_net(skb, remote_ip_ofs, &remote_ip6, sizeof(remote_ip6), kver); in should_block_local_network_packets()
284 (void)bpf_skb_load_bytes_net(skb, IP6_OFFSET(nexthdr), &ip_proto, sizeof(ip_proto), kver); in should_block_local_network_packets()
297 …(void)bpf_skb_load_bytes_net(skb, L4_off + (egress.egress ? 2 : 0), &remote_port, sizeof(remote_po… in should_block_local_network_packets()
301 return !is_local_net_access_allowed(skb->ifindex, &remote_ip6, ip_proto, remote_port); in should_block_local_network_packets()
305 const struct __sk_buff* const skb, const struct egress_bool egress, const uint32_t uid, in do_packet_tracing() argument
324 if (skb->protocol == htons(ETH_P_IP)) { in do_packet_tracing()
325 (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(protocol), &proto, sizeof(proto), kver); in do_packet_tracing()
326 (void)bpf_skb_load_bytes_net(skb, IPPROTO_IHL_OFF, &L4_off, sizeof(L4_off), kver); in do_packet_tracing()
329 } else if (skb->protocol == htons(ETH_P_IPV6)) { in do_packet_tracing()
330 (void)bpf_skb_load_bytes_net(skb, IP6_OFFSET(nexthdr), &proto, sizeof(proto), kver); in do_packet_tracing()
338 if (!bpf_skb_load_bytes_net(skb, L4_off, &ext_hdr, sizeof(ext_hdr), kver)) { in do_packet_tracing()
350 (void)bpf_skb_load_bytes_net(skb, L4_off + TCP_FLAG8_OFF, &flags, sizeof(flags), kver); in do_packet_tracing()
357 (void)bpf_skb_load_bytes_net(skb, L4_off + 0, &sport, sizeof(sport), kver); in do_packet_tracing()
358 (void)bpf_skb_load_bytes_net(skb, L4_off + 2, &dport, sizeof(dport), kver); in do_packet_tracing()
364 (void)bpf_skb_load_bytes_net(skb, L4_off + 0, (char *)&sport + 1, 1, kver); //type in do_packet_tracing()
365 (void)bpf_skb_load_bytes_net(skb, L4_off + 1, (char *)&dport + 1, 1, kver); //code in do_packet_tracing()
371 pkt->ifindex = skb->ifindex; in do_packet_tracing()
372 pkt->length = skb->len; in do_packet_tracing()
380 pkt->wakeup = !egress.egress && (skb->mark & 0x80000000); // Fwmark.ingress_cpu_wakeup in do_packet_tracing()
388 static __always_inline inline bool skip_owner_match(struct __sk_buff* skb, in skip_owner_match() argument
392 if (skb->protocol == htons(ETH_P_IP)) { in skip_owner_match()
395 (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(protocol), &proto, sizeof(proto), kver); in skip_owner_match()
404 (void)bpf_skb_load_bytes_net(skb, IPPROTO_IHL_OFF, &ihl, sizeof(ihl), kver); in skip_owner_match()
406 (void)bpf_skb_load_bytes_net(skb, (ihl & 0xF) * 4 + TCP_FLAG32_OFF, in skip_owner_match()
408 } else if (skb->protocol == htons(ETH_P_IPV6)) { in skip_owner_match()
411 (void)bpf_skb_load_bytes_net(skb, IP6_OFFSET(nexthdr), &proto, sizeof(proto), kver); in skip_owner_match()
415 (void)bpf_skb_load_bytes_net(skb, sizeof(struct ipv6hdr) + TCP_FLAG32_OFF, in skip_owner_match()
434 static __always_inline inline bool ingress_should_discard(struct __sk_buff* skb, in ingress_should_discard() argument
442 if (skb->protocol == htons(ETH_P_IP)) { in ingress_should_discard()
444 (void)bpf_skb_load_bytes_net(skb, IP4_OFFSET(daddr), &k.daddr.s6_addr32[3], 4, kver); in ingress_should_discard()
445 } else if (skb->protocol == htons(ETH_P_IPV6)) { in ingress_should_discard()
446 (void)bpf_skb_load_bytes_net(skb, IP6_OFFSET(daddr), &k.daddr, sizeof(k.daddr), kver); in ingress_should_discard()
458 if (skb->ifindex == v->iif[0]) return false; // allowed interface in ingress_should_discard()
459 if (skb->ifindex == v->iif[1]) return false; // allowed interface in ingress_should_discard()
463 static __always_inline inline int bpf_owner_match(struct __sk_buff* skb, uint32_t uid, in bpf_owner_match() argument
469 if (skip_owner_match(skb, egress, kver)) return PASS; in bpf_owner_match()
474 if (skb->ifindex == 1) enabledRules &= ~BACKGROUND_MATCH; in bpf_owner_match()
482 if (!egress.egress && skb->ifindex != 1) { in bpf_owner_match()
483 if (ingress_should_discard(skb, kver)) return DROP; in bpf_owner_match()
485 if (allowed_iif && skb->ifindex != allowed_iif) { in bpf_owner_match()
497 if (SDK_LEVEL_IS_AT_LEAST(lvl, 25Q2) && skb->ifindex == 1) { in bpf_owner_match()
505 const struct __sk_buff* const skb, in update_stats_with_config() argument
510 update_stats_map_A(skb, key, egress, kver); in update_stats_with_config()
512 update_stats_map_B(skb, key, egress, kver); in update_stats_with_config()
516 static __always_inline inline int bpf_traffic_account(struct __sk_buff* skb, in bpf_traffic_account() argument
521 uint32_t sock_uid = bpf_get_socket_uid(skb); in bpf_traffic_account()
531 uint64_t cookie = bpf_get_socket_cookie(skb); // 0 iff !skb->sk in bpf_traffic_account()
548 int match = bpf_owner_match(skb, sock_uid, egress, kver, lvl); in bpf_traffic_account()
562 if (should_block_local_network_packets(skb, uid, egress, kver)) match = DROP; in bpf_traffic_account()
568 StatsKey key = {.uid = uid, .tag = tag, .counterSet = 0, .ifaceIndex = skb->ifindex}; in bpf_traffic_account()
578 do_packet_tracing(skb, egress, uid, tag, kver); in bpf_traffic_account()
579 update_stats_with_config(*selectedMap, skb, &key, egress, kver); in bpf_traffic_account()
580 update_app_uid_stats_map(skb, &uid, egress, kver); in bpf_traffic_account()
611 (struct __sk_buff* skb) {
612 return bpf_traffic_account(skb, INGRESS, KVER_5_10, SDK_LEVEL_25Q2);
619 (struct __sk_buff* skb) {
620 return bpf_traffic_account(skb, INGRESS, KVER_5_4, SDK_LEVEL_25Q2);
627 (struct __sk_buff* skb) {
628 return bpf_traffic_account(skb, INGRESS, KVER_5_10, SDK_LEVEL_U);
634 (struct __sk_buff* skb) {
635 return bpf_traffic_account(skb, INGRESS, KVER_4_19, SDK_LEVEL_T);
641 (struct __sk_buff* skb) {
642 return bpf_traffic_account(skb, INGRESS, KVER_NONE, SDK_LEVEL_T);
651 (struct __sk_buff* skb) {
652 return bpf_traffic_account(skb, EGRESS, KVER_5_10, SDK_LEVEL_25Q2);
659 (struct __sk_buff* skb) {
660 return bpf_traffic_account(skb, EGRESS, KVER_5_4, SDK_LEVEL_25Q2);
667 (struct __sk_buff* skb) {
668 return bpf_traffic_account(skb, EGRESS, KVER_5_10, SDK_LEVEL_U);
674 (struct __sk_buff* skb) {
675 return bpf_traffic_account(skb, EGRESS, KVER_4_19, SDK_LEVEL_T);
681 (struct __sk_buff* skb) {
682 return bpf_traffic_account(skb, EGRESS, KVER_NONE, SDK_LEVEL_T);
689 (struct __sk_buff* skb) {
694 uint32_t sock_uid = bpf_get_socket_uid(skb);
696 uint64_t cookie = bpf_get_socket_cookie(skb);
701 uint32_t key = skb->ifindex;
702 update_iface_stats_map(skb, &key, EGRESS, KVER_NONE);
708 (struct __sk_buff* skb) {
714 uint32_t key = skb->ifindex;
715 update_iface_stats_map(skb, &key, INGRESS, KVER_NONE);
721 (struct __sk_buff* skb) {
722 if (is_received_skb(skb)) {
724 uint32_t key = skb->ifindex;
725 update_iface_stats_map(skb, &key, INGRESS, KVER_NONE);
732 (struct __sk_buff* skb) {
733 uint32_t sock_uid = bpf_get_socket_uid(skb);
751 (struct __sk_buff* skb) {
752 uint32_t sock_uid = bpf_get_socket_uid(skb);