/net/ipv6/netfilter/ |
D | nf_conntrack_reasm.c | 130 static int nf_ct_frag6_reasm(struct frag_queue *fq, struct sk_buff *skb, 141 struct frag_queue *fq; in nf_ct_frag6_expire() local 143 fq = container_of(frag, struct frag_queue, q); in nf_ct_frag6_expire() 145 ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); in nf_ct_frag6_expire() 169 static int nf_ct_frag6_queue(struct frag_queue *fq, struct sk_buff *skb, in nf_ct_frag6_queue() argument 178 if (fq->q.flags & INET_FRAG_COMPLETE) { in nf_ct_frag6_queue() 208 if (end < fq->q.len || in nf_ct_frag6_queue() 209 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) { in nf_ct_frag6_queue() 213 fq->q.flags |= INET_FRAG_LAST_IN; in nf_ct_frag6_queue() 214 fq->q.len = end; in nf_ct_frag6_queue() [all …]
|
/net/ieee802154/6lowpan/ |
D | reassembly.c | 33 static int lowpan_frag_reasm(struct lowpan_frag_queue *fq, struct sk_buff *skb, 47 struct frag_queue *fq; in lowpan_frag_expire() local 49 fq = container_of(frag, struct frag_queue, q); in lowpan_frag_expire() 51 spin_lock(&fq->q.lock); in lowpan_frag_expire() 53 if (fq->q.flags & INET_FRAG_COMPLETE) in lowpan_frag_expire() 56 inet_frag_kill(&fq->q); in lowpan_frag_expire() 58 spin_unlock(&fq->q.lock); in lowpan_frag_expire() 59 inet_frag_put(&fq->q); in lowpan_frag_expire() 84 static int lowpan_frag_queue(struct lowpan_frag_queue *fq, in lowpan_frag_queue() argument 97 if (fq->q.flags & INET_FRAG_COMPLETE) in lowpan_frag_queue() [all …]
|
/net/ipv6/ |
D | reassembly.c | 68 static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *skb, 74 struct frag_queue *fq; in ip6_frag_expire() local 76 fq = container_of(frag, struct frag_queue, q); in ip6_frag_expire() 78 ip6frag_expire_frag_queue(fq->q.fqdir->net, fq); in ip6_frag_expire() 104 static int ip6_frag_queue(struct frag_queue *fq, struct sk_buff *skb, in ip6_frag_queue() argument 115 if (fq->q.flags & INET_FRAG_COMPLETE) in ip6_frag_queue() 145 if (end < fq->q.len || in ip6_frag_queue() 146 ((fq->q.flags & INET_FRAG_LAST_IN) && end != fq->q.len)) in ip6_frag_queue() 148 fq->q.flags |= INET_FRAG_LAST_IN; in ip6_frag_queue() 149 fq->q.len = end; in ip6_frag_queue() [all …]
|
/net/ipv4/ |
D | inet_fragment.c | 130 struct inet_frag_queue *fq = ptr; in inet_frags_free_cb() local 133 count = del_timer_sync(&fq->timer) ? 1 : 0; in inet_frags_free_cb() 135 spin_lock_bh(&fq->lock); in inet_frags_free_cb() 136 if (!(fq->flags & INET_FRAG_COMPLETE)) { in inet_frags_free_cb() 137 fq->flags |= INET_FRAG_COMPLETE; in inet_frags_free_cb() 139 } else if (fq->flags & INET_FRAG_HASH_DEAD) { in inet_frags_free_cb() 142 spin_unlock_bh(&fq->lock); in inet_frags_free_cb() 144 if (refcount_sub_and_test(count, &fq->refcnt)) in inet_frags_free_cb() 145 inet_frag_destroy(fq); in inet_frags_free_cb() 194 void inet_frag_kill(struct inet_frag_queue *fq) in inet_frag_kill() argument [all …]
|
D | ip_fragment.c | 715 const struct inet_frag_queue *fq = data; in ip4_obj_hashfn() local 717 return jhash2((const u32 *)&fq->key.v4, in ip4_obj_hashfn() 724 const struct inet_frag_queue *fq = ptr; in ip4_obj_cmpfn() local 726 return !!memcmp(&fq->key, key, sizeof(*key)); in ip4_obj_cmpfn()
|
D | Kconfig | 655 signal. It requires the fq ("Fair Queue") pacing packet scheduler.
|
/net/mac80211/ |
D | debugfs.c | 79 struct fq *fq = &local->fq; in aqm_read() local 83 spin_lock_bh(&local->fq.lock); in aqm_read() 97 fq->flows_cnt, in aqm_read() 98 fq->backlog, in aqm_read() 99 fq->overmemory, in aqm_read() 100 fq->overlimit, in aqm_read() 101 fq->collisions, in aqm_read() 102 fq->memory_usage, in aqm_read() 103 fq->memory_limit, in aqm_read() 104 fq->limit, in aqm_read() [all …]
|
D | tx.c | 1304 struct fq *fq; in codel_dequeue_func() local 1309 fq = &local->fq; in codel_dequeue_func() 1314 flow = &fq->flows[cvars - local->cvars]; in codel_dequeue_func() 1316 return fq_flow_dequeue(fq, flow); in codel_dequeue_func() 1333 static struct sk_buff *fq_tin_dequeue_func(struct fq *fq, in fq_tin_dequeue_func() argument 1343 local = container_of(fq, struct ieee80211_local, fq); in fq_tin_dequeue_func() 1358 cvars = &local->cvars[flow - fq->flows]; in fq_tin_dequeue_func() 1371 static void fq_skb_free_func(struct fq *fq, in fq_skb_free_func() argument 1378 local = container_of(fq, struct ieee80211_local, fq); in fq_skb_free_func() 1382 static struct fq_flow *fq_flow_get_default_func(struct fq *fq, in fq_flow_get_default_func() argument [all …]
|
D | agg-tx.c | 194 struct fq *fq; in ieee80211_agg_stop_txq() local 202 fq = &sdata->local->fq; in ieee80211_agg_stop_txq() 205 spin_lock_bh(&fq->lock); in ieee80211_agg_stop_txq() 207 spin_unlock_bh(&fq->lock); in ieee80211_agg_stop_txq()
|
D | util.c | 244 struct fq *fq = &local->fq; in __ieee80211_wake_txqs() local 251 spin_lock(&fq->lock); in __ieee80211_wake_txqs() 277 spin_unlock(&fq->lock); in __ieee80211_wake_txqs() 279 spin_lock(&fq->lock); in __ieee80211_wake_txqs() 292 spin_unlock(&fq->lock); in __ieee80211_wake_txqs() 298 spin_unlock(&fq->lock); in __ieee80211_wake_txqs() 491 spin_lock(&local->fq.lock); in __ieee80211_stop_queue() 493 spin_unlock(&local->fq.lock); in __ieee80211_stop_queue()
|
D | cfg.c | 3881 spin_lock_bh(&local->fq.lock); in ieee80211_get_txq_stats() 3899 txqstats->backlog_packets = local->fq.backlog; in ieee80211_get_txq_stats() 3900 txqstats->backlog_bytes = local->fq.memory_usage; in ieee80211_get_txq_stats() 3901 txqstats->overlimit = local->fq.overlimit; in ieee80211_get_txq_stats() 3902 txqstats->overmemory = local->fq.overmemory; in ieee80211_get_txq_stats() 3903 txqstats->collisions = local->fq.collisions; in ieee80211_get_txq_stats() 3904 txqstats->max_flows = local->fq.flows_cnt; in ieee80211_get_txq_stats() 3909 spin_unlock_bh(&local->fq.lock); in ieee80211_get_txq_stats()
|
D | debugfs_netdev.c | 498 spin_lock_bh(&local->fq.lock); in ieee80211_if_fmt_aqm() 517 spin_unlock_bh(&local->fq.lock); in ieee80211_if_fmt_aqm()
|
D | debugfs_sta.c | 150 spin_lock_bh(&local->fq.lock); in sta_aqm_read() 187 spin_unlock_bh(&local->fq.lock); in sta_aqm_read()
|
D | ieee80211_i.h | 1135 struct fq fq; member
|
D | sta_info.c | 2147 spin_lock_bh(&local->fq.lock); in sta_set_tidstats() 2155 spin_unlock_bh(&local->fq.lock); in sta_set_tidstats()
|
/net/xdp/ |
D | xsk.c | 37 READ_ONCE(xs->umem->fq); in xsk_is_setup_for_bpf_map() 42 return xskq_has_addrs(umem->fq, cnt); in xsk_umem_has_addrs() 48 return xskq_peek_addr(umem->fq, addr, umem); in xsk_umem_peek_addr() 54 xskq_discard_addr(umem->fq); in xsk_umem_discard_addr() 63 umem->fq->ring->flags |= XDP_RING_NEED_WAKEUP; in xsk_set_rx_need_wakeup() 90 umem->fq->ring->flags &= ~XDP_RING_NEED_WAKEUP; in xsk_clear_rx_need_wakeup() 149 if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) || in __xsk_rcv() 170 xskq_discard_addr(xs->umem->fq); in __xsk_rcv() 237 if (!xskq_peek_addr(xs->umem->fq, &addr, xs->umem) || in xsk_generic_rcv() 252 xskq_discard_addr(xs->umem->fq); in xsk_generic_rcv() [all …]
|
D | xdp_umem.c | 237 if (umem->fq) { in xdp_umem_release() 238 xskq_destroy(umem->fq); in xdp_umem_release() 239 umem->fq = NULL; in xdp_umem_release() 459 return umem->fq && umem->cq; in xdp_umem_validate_queues()
|
D | xsk_diag.c | 70 if (!err && umem->fq) in xsk_diag_put_umem() 71 err = xsk_diag_put_ring(umem->fq, XDP_DIAG_UMEM_FILL_RING, nlskb); in xsk_diag_put_umem()
|
/net/sched/ |
D | Kconfig | 451 default "fq" if DEFAULT_FQ
|