/net/netfilter/ |
D | xt_connbytes.c | 28 u_int64_t pkts = 0; in connbytes_mt() local 74 pkts = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets); in connbytes_mt() 78 pkts = atomic64_read(&counters[IP_CT_DIR_REPLY].packets); in connbytes_mt() 83 pkts = atomic64_read(&counters[IP_CT_DIR_ORIGINAL].packets) + in connbytes_mt() 87 if (pkts != 0) in connbytes_mt() 88 what = div64_u64(bytes, pkts); in connbytes_mt()
|
D | nfnetlink_acct.c | 29 atomic64_t pkts; member 78 atomic64_set(&matching->pkts, 0); in nfnl_acct_new() 122 atomic64_set(&nfacct->pkts, in nfnl_acct_new() 137 u64 pkts, bytes; in nfnl_acct_fill_info() local 155 pkts = atomic64_xchg(&acct->pkts, 0); in nfnl_acct_fill_info() 161 pkts = atomic64_read(&acct->pkts); in nfnl_acct_fill_info() 164 if (nla_put_be64(skb, NFACCT_PKTS, cpu_to_be64(pkts), in nfnl_acct_fill_info() 440 atomic64_inc(&nfacct->pkts); in nfnl_acct_update() 476 atomic64_read(&nfacct->pkts) : atomic64_read(&nfacct->bytes); in nfnl_acct_overquota()
|
D | nft_limit.c | 55 const struct nlattr * const tb[], bool pkts) in nft_limit_init() argument 72 if (pkts && limit->burst == 0) in nft_limit_init() 78 if (pkts) { in nft_limit_init()
|
D | nf_tables_core.c | 110 stats->pkts++; in nft_update_chain_stats()
|
D | nf_conntrack_netlink.c | 242 u64 pkts, bytes; in dump_counters() local 245 pkts = atomic64_xchg(&counter[dir].packets, 0); in dump_counters() 248 pkts = atomic64_read(&counter[dir].packets); in dump_counters() 256 if (nla_put_be64(skb, CTA_COUNTERS_PACKETS, cpu_to_be64(pkts), in dump_counters()
|
D | nf_tables_api.c | 1223 u64 pkts, bytes; in nft_dump_stats() local 1234 pkts = cpu_stats->pkts; in nft_dump_stats() 1237 total.pkts += pkts; in nft_dump_stats() 1244 if (nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.pkts), in nft_dump_stats() 1480 stats->pkts = be64_to_cpu(nla_get_be64(tb[NFTA_COUNTER_PACKETS])); in nft_stats_alloc()
|
/net/tipc/ |
D | bcast.c | 252 static int tipc_bcast_xmit(struct net *net, struct sk_buff_head *pkts, in tipc_bcast_xmit() argument 262 rc = tipc_link_xmit(l, pkts, &xmitq); in tipc_bcast_xmit() 265 __skb_queue_purge(pkts); in tipc_bcast_xmit() 281 static int tipc_rcast_xmit(struct net *net, struct sk_buff_head *pkts, in tipc_rcast_xmit() argument 288 selector = msg_link_selector(buf_msg(skb_peek(pkts))); in tipc_rcast_xmit() 293 if (!tipc_msg_pskb_copy(dnode, pkts, &_pkts)) in tipc_rcast_xmit() 371 int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts, in tipc_mcast_xmit() argument 385 if (dests->local && !tipc_msg_reassemble(pkts, &localq)) { in tipc_mcast_xmit() 393 skb = skb_peek(pkts); in tipc_mcast_xmit() 410 rc = tipc_rcast_xmit(net, pkts, dests, cong_link_cnt); in tipc_mcast_xmit() [all …]
|
D | bearer.h | 235 void tipc_clone_to_loopback(struct net *net, struct sk_buff_head *pkts); 240 struct sk_buff_head *pkts) in tipc_loopback_trace() argument 243 tipc_clone_to_loopback(net, pkts); in tipc_loopback_trace()
|
D | socket.c | 782 struct sk_buff_head pkts; in tipc_sendmcast() local 812 __skb_queue_head_init(&pkts); in tipc_sendmcast() 813 rc = tipc_msg_build(hdr, msg, 0, dlen, mtu, &pkts); in tipc_sendmcast() 817 trace_tipc_sk_sendmcast(sk, skb_peek(&pkts), in tipc_sendmcast() 819 rc = tipc_mcast_xmit(net, &pkts, method, &dsts, in tipc_sendmcast() 845 struct sk_buff_head pkts; in tipc_send_group_msg() local 856 __skb_queue_head_init(&pkts); in tipc_send_group_msg() 858 rc = tipc_msg_build(hdr, m, 0, dlen, mtu, &pkts); in tipc_send_group_msg() 863 rc = tipc_node_xmit(net, &pkts, dnode, tsk->portid); in tipc_send_group_msg() 1030 struct sk_buff_head pkts; in tipc_send_group_bcast() local [all …]
|
D | bcast.h | 89 int tipc_mcast_xmit(struct net *net, struct sk_buff_head *pkts,
|
D | bearer.c | 682 void tipc_clone_to_loopback(struct net *net, struct sk_buff_head *pkts) in tipc_clone_to_loopback() argument 688 skb_queue_walk(pkts, _skb) { in tipc_clone_to_loopback()
|
/net/netfilter/ipvs/ |
D | ip_vs_sync.c | 464 struct ip_vs_conn *cp, int pkts) in ip_vs_sync_conn_needed() argument 527 pkts % sync_period != sysctl_sync_threshold(ipvs)) in ip_vs_sync_conn_needed() 530 pkts != sysctl_sync_threshold(ipvs)) in ip_vs_sync_conn_needed() 544 int pkts) in ip_vs_sync_conn_v0() argument 559 if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) in ip_vs_sync_conn_v0() 621 pkts = atomic_add_return(1, &cp->in_pkts); in ip_vs_sync_conn_v0() 623 pkts = sysctl_sync_threshold(ipvs); in ip_vs_sync_conn_v0() 624 ip_vs_sync_conn(ipvs, cp, pkts); in ip_vs_sync_conn_v0() 633 void ip_vs_sync_conn(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts) in ip_vs_sync_conn() argument 645 ip_vs_sync_conn_v0(ipvs, cp, pkts); in ip_vs_sync_conn() [all …]
|
D | ip_vs_core.c | 1984 int ret, pkts; in ip_vs_in() local 2143 pkts = sysctl_sync_threshold(ipvs); in ip_vs_in() 2145 pkts = atomic_add_return(1, &cp->in_pkts); in ip_vs_in() 2148 ip_vs_sync_conn(ipvs, cp, pkts); in ip_vs_in()
|
/net/vmw_vsock/ |
D | virtio_transport.c | 93 LIST_HEAD(pkts); in virtio_transport_loopback_work() 96 list_splice_init(&vsock->loopback_list, &pkts); in virtio_transport_loopback_work() 104 while (!list_empty(&pkts)) { in virtio_transport_loopback_work() 107 pkt = list_first_entry(&pkts, struct virtio_vsock_pkt, list); in virtio_transport_loopback_work()
|
/net/sched/ |
D | cls_matchall.c | 340 cls_mall.stats.pkts, cls_mall.stats.lastused); in mall_stats_hw_filter()
|
D | cls_flower.c | 492 cls_flower.stats.pkts, in fl_hw_update_stats()
|
/net/wireless/ |
D | rdev-ops.h | 798 struct net_device *dev, u32 rate, u32 pkts, u32 intvl) in rdev_set_cqm_txe_config() argument 801 trace_rdev_set_cqm_txe_config(&rdev->wiphy, dev, rate, pkts, intvl); in rdev_set_cqm_txe_config() 802 ret = rdev->ops->set_cqm_txe_config(&rdev->wiphy, dev, rate, pkts, in rdev_set_cqm_txe_config()
|
D | trace.h | 1384 u32 pkts, u32 intvl), 1385 TP_ARGS(wiphy, netdev, rate, pkts, intvl), 1390 __field(u32, pkts) 1397 __entry->pkts = pkts; 1401 WIPHY_PR_ARG, NETDEV_PR_ARG, __entry->rate, __entry->pkts,
|
D | nl80211.c | 10788 u32 rate, u32 pkts, u32 intvl) in nl80211_set_cqm_txe() argument 10804 return rdev_set_cqm_txe_config(rdev, dev, rate, pkts, intvl); in nl80211_set_cqm_txe() 10972 u32 pkts = nla_get_u32(attrs[NL80211_ATTR_CQM_TXE_PKTS]); in nl80211_set_cqm() local 10975 return nl80211_set_cqm_txe(info, rate, pkts, intvl); in nl80211_set_cqm()
|
/net/ipv4/ |
D | tcp_output.c | 181 static inline void tcp_event_ack_sent(struct sock *sk, unsigned int pkts, in tcp_event_ack_sent() argument 196 tcp_dec_quickack_mode(sk, pkts); in tcp_event_ack_sent()
|