/net/netfilter/ |
D | nft_limit.c | 28 static inline bool nft_limit_eval(struct nft_limit *limit, u64 cost) in nft_limit_eval() argument 33 spin_lock_bh(&limit->lock); in nft_limit_eval() 35 tokens = limit->tokens + now - limit->last; in nft_limit_eval() 36 if (tokens > limit->tokens_max) in nft_limit_eval() 37 tokens = limit->tokens_max; in nft_limit_eval() 39 limit->last = now; in nft_limit_eval() 42 limit->tokens = delta; in nft_limit_eval() 43 spin_unlock_bh(&limit->lock); in nft_limit_eval() 44 return limit->invert; in nft_limit_eval() 46 limit->tokens = tokens; in nft_limit_eval() [all …]
|
D | nf_conntrack_sip.c | 67 const char *limit, int *shift) in string_len() argument 71 while (dptr < limit && isalpha(*dptr)) { in string_len() 79 const char *limit, int *shift) in digits_len() argument 82 while (dptr < limit && isdigit(*dptr)) { in digits_len() 100 static int word_len(const char *dptr, const char *limit) in word_len() argument 103 while (dptr < limit && iswordc(*dptr)) { in word_len() 111 const char *limit, int *shift) in callid_len() argument 115 len = word_len(dptr, limit); in callid_len() 117 if (!len || dptr == limit || *dptr != '@') in callid_len() 122 domain_len = word_len(dptr, limit); in callid_len() [all …]
|
D | nft_connlimit.c | 18 u32 limit; member 53 if ((count > priv->limit) ^ priv->invert) { in nft_connlimit_do_eval() 64 u32 flags, limit; in nft_connlimit_do_init() local 69 limit = ntohl(nla_get_be32(tb[NFTA_CONNLIMIT_COUNT])); in nft_connlimit_do_init() 80 priv->limit = limit; in nft_connlimit_do_init() 96 if (nla_put_be32(skb, NFTA_CONNLIMIT_COUNT, htonl(priv->limit))) in nft_connlimit_do_dump() 204 priv_dst->limit = priv_src->limit; in nft_connlimit_clone()
|
/net/sched/ |
D | sch_fifo.c | 21 if (likely(sch->qstats.backlog + qdisc_pkt_len(skb) <= sch->limit)) in bfifo_enqueue() 30 if (likely(sch->q.qlen < sch->limit)) in pfifo_enqueue() 41 if (likely(sch->q.qlen < sch->limit)) in pfifo_tail_enqueue() 61 u32 limit = qdisc_dev(sch)->tx_queue_len; in fifo_init() local 64 limit *= psched_mtu(qdisc_dev(sch)); in fifo_init() 66 sch->limit = limit; in fifo_init() 73 sch->limit = ctl->limit; in fifo_init() 77 bypass = sch->limit >= psched_mtu(qdisc_dev(sch)); in fifo_init() 79 bypass = sch->limit >= 1; in fifo_init() 90 struct tc_fifo_qopt opt = { .limit = sch->limit }; in fifo_dump() [all …]
|
D | sch_plug.c | 66 u32 limit; member 92 if (likely(sch->qstats.backlog + skb->len <= q->limit)) { in plug_enqueue() 133 q->limit = qdisc_dev(sch)->tx_queue_len in plug_init() 141 q->limit = ctl->limit; in plug_init() 199 q->limit = msg->limit; in plug_change()
|
D | sch_sfb.c | 62 u32 limit; /* HARD maximal queue length */ member 295 if (unlikely(sch->q.qlen >= q->limit)) { in sfb_enqueue() 302 unsigned long limit = q->rehash_time + q->rehash_interval; in sfb_enqueue() local 304 if (unlikely(time_after(jiffies, limit))) { in sfb_enqueue() 308 time_after(jiffies, limit - q->warmup_time))) { in sfb_enqueue() 482 .limit = 0, 498 u32 limit; in sfb_change() local 513 limit = ctl->limit; in sfb_change() 514 if (limit == 0) in sfb_change() 515 limit = qdisc_dev(sch)->tx_queue_len; in sfb_change() [all …]
|
D | sch_pie.c | 32 u32 limit; /* number of packets that can be enqueued */ member 76 params->limit = 1000; /* default of 1000 packets */ in pie_params_init() 157 if (unlikely(qdisc_qlen(sch) >= sch->limit)) { in pie_qdisc_enqueue() 232 u32 limit = nla_get_u32(tb[TCA_PIE_LIMIT]); in pie_change() local 234 q->params.limit = limit; in pie_change() 235 sch->limit = limit; in pie_change() 252 while (sch->q.qlen > sch->limit) { in pie_change() 465 sch->limit = q->params.limit; in pie_init() 494 nla_put_u32(skb, TCA_PIE_LIMIT, sch->limit) || in pie_dump()
|
D | sch_gred.c | 34 u32 limit; /* HARD maximal queue length */ member 182 sch->limit)) in gred_enqueue() 246 if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) { in gred_enqueue() 337 opt.set.tab[i].limit = q->limit; in gred_offload() 498 if (ctl->limit > sch->limit) in gred_change_vq() 499 q->limit = sch->limit; in gred_change_vq() 501 q->limit = ctl->limit; in gred_change_vq() 656 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); in gred_change() 749 sch->limit = nla_get_u32(tb[TCA_GRED_LIMIT]); in gred_init() 751 sch->limit = qdisc_dev(sch)->tx_queue_len in gred_init() [all …]
|
D | sch_red.c | 37 u32 limit; /* HARD maximal queue length */ member 167 opt.set.limit = q->limit; in red_offload() 224 if (ctl->limit > 0) { in red_change() 225 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, ctl->limit, in red_change() 236 q->limit = ctl->limit; in red_change() 309 .limit = q->limit, in red_dump()
|
D | sch_codel.c | 116 if (likely(qdisc_qlen(sch) < sch->limit)) { in codel_qdisc_enqueue() 170 sch->limit = nla_get_u32(tb[TCA_CODEL_LIMIT]); in codel_change() 176 while (sch->q.qlen > sch->limit) { in codel_change() 194 sch->limit = DEFAULT_CODEL_LIMIT; in codel_init() 208 if (sch->limit >= 1) in codel_init() 228 sch->limit) || in codel_dump()
|
D | sch_skbprio.c | 82 if (sch->q.qlen < sch->limit) { in skbprio_enqueue() 175 sch->limit = ctl->limit; in skbprio_change() 192 sch->limit = 64; in skbprio_init() 203 opt.limit = sch->limit; in skbprio_dump()
|
D | sch_tbf.c | 98 u32 limit; /* Maximal length of backlog: bytes */ member 374 err = fifo_set_limit(q->qdisc, qopt->limit); in tbf_change() 377 } else if (qopt->limit > 0) { in tbf_change() 378 child = fifo_create_dflt(sch, &bfifo_qdisc_ops, qopt->limit, in tbf_change() 395 q->limit = qopt->limit; in tbf_change() 453 opt.limit = q->limit; in tbf_dump()
|
D | sch_choke.c | 50 u32 limit; member 274 if (sch->q.qlen < q->limit) { in choke_enqueue() 375 if (ctl->limit > CHOKE_MAX_QUEUE) in choke_change() 378 mask = roundup_pow_of_two(ctl->limit + 1) - 1; in choke_change() 418 q->limit = ctl->limit; in choke_change() 445 .limit = q->limit, in choke_dump()
|
D | sch_sfq.c | 115 int limit; /* limit of total number of packets in this qdisc */ member 461 if (++sch->q.qlen <= q->limit) in sfq_enqueue() 684 if (ctl->limit) { in sfq_change() 685 q->limit = min_t(u32, ctl->limit, q->maxdepth * q->maxflows); in sfq_change() 686 q->maxflows = min_t(u32, q->maxflows, q->limit); in sfq_change() 690 while (sch->q.qlen > q->limit) { in sfq_change() 750 q->limit = SFQ_MAX_DEPTH; in sfq_init() 781 if (q->limit >= 1) in sfq_init() 798 opt.v0.limit = q->limit; in sfq_dump()
|
D | sch_fq_codel.c | 218 if (++sch->q.qlen <= sch->limit && !memory_limited) in fq_codel_enqueue() 419 sch->limit = nla_get_u32(tb[TCA_FQ_CODEL_LIMIT]); in fq_codel_change() 433 while (sch->q.qlen > sch->limit || in fq_codel_change() 465 sch->limit = 10*1024; in fq_codel_init() 507 if (sch->limit >= 1) in fq_codel_init() 533 sch->limit) || in fq_codel_dump()
|
D | sch_hhf.c | 402 if (++sch->q.qlen <= sch->limit) in hhf_enqueue() 540 sch->limit = nla_get_u32(tb[TCA_HHF_BACKLOG_LIMIT]); in hhf_change() 565 while (sch->q.qlen > sch->limit) { in hhf_change() 583 sch->limit = 1000; in hhf_init() 663 if (nla_put_u32(skb, TCA_HHF_BACKLOG_LIMIT, sch->limit) || in hhf_dump()
|
D | sch_netem.c | 90 u32 limit; member 520 if (unlikely(sch->q.qlen >= sch->limit)) { in netem_enqueue() 998 sch->limit = qopt->limit; in netem_change() 1002 q->limit = qopt->limit; in netem_change() 1148 qopt.limit = q->limit; in netem_dump()
|
/net/wireless/ |
D | of.c | 30 struct ieee80211_freq_range *limit = &freq_limits[i]; in wiphy_freq_limits_valid_chan() local 32 if (cfg80211_does_bw_fit_range(limit, in wiphy_freq_limits_valid_chan() 109 struct ieee80211_freq_range *limit = &freq_limits[i]; in wiphy_read_of_freq_limits() local 111 p = of_prop_next_u32(prop, p, &limit->start_freq_khz); in wiphy_read_of_freq_limits() 117 p = of_prop_next_u32(prop, p, &limit->end_freq_khz); in wiphy_read_of_freq_limits() 123 if (!limit->start_freq_khz || in wiphy_read_of_freq_limits() 124 !limit->end_freq_khz || in wiphy_read_of_freq_limits() 125 limit->start_freq_khz >= limit->end_freq_khz) { in wiphy_read_of_freq_limits()
|
/net/bridge/netfilter/ |
D | ebt_among.c | 28 int start, limit, i; in ebt_mac_wormhash_contains() local 34 limit = wh->table[key + 1]; in ebt_mac_wormhash_contains() 36 for (i = start; i < limit; i++) { in ebt_mac_wormhash_contains() 43 for (i = start; i < limit; i++) { in ebt_mac_wormhash_contains()
|
/net/batman-adv/ |
D | fragmentation.c | 87 int limit = BATADV_FRAG_MAX_FRAG_SIZE; in batadv_frag_size_limit() local 89 limit -= sizeof(struct batadv_frag_packet); in batadv_frag_size_limit() 90 limit *= BATADV_FRAG_MAX_FRAGMENTS; in batadv_frag_size_limit() 92 return limit; in batadv_frag_size_limit()
|
/net/tipc/ |
D | bcast.c | 571 static int tipc_bc_link_set_queue_limits(struct net *net, u32 limit) in tipc_bc_link_set_queue_limits() argument 577 if (limit < BCLINK_WIN_MIN) in tipc_bc_link_set_queue_limits() 578 limit = BCLINK_WIN_MIN; in tipc_bc_link_set_queue_limits() 579 if (limit > TIPC_MAX_LINK_WIN) in tipc_bc_link_set_queue_limits() 582 tipc_link_set_queue_limits(l, limit); in tipc_bc_link_set_queue_limits()
|
/net/9p/ |
D | trans_virtio.c | 168 int limit, char *data, int count) in pack_sg_list() argument 177 BUG_ON(index >= limit); in pack_sg_list() 213 pack_sg_list_p(struct scatterlist *sg, int start, int limit, in pack_sg_list_p() argument 220 BUG_ON(nr_pages > (limit - start)); in pack_sg_list_p() 229 BUG_ON(index >= limit); in pack_sg_list_p()
|
/net/ipv4/ |
D | tcp_output.c | 1312 long limit; in tcp_fragment() local 1328 limit = sk->sk_sndbuf + 2 * SKB_TRUESIZE(GSO_MAX_SIZE); in tcp_fragment() 1329 if (unlikely((sk->sk_wmem_queued >> 1) > limit && in tcp_fragment() 1955 u32 send_win, cong_win, limit, in_flight; in tcp_tso_should_defer() local 1983 limit = min(send_win, cong_win); in tcp_tso_should_defer() 1986 if (limit >= max_segs * tp->mss_cache) in tcp_tso_should_defer() 1990 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) in tcp_tso_should_defer() 2001 if (limit >= chunk) in tcp_tso_should_defer() 2009 if (limit > tcp_max_tso_deferred_mss(tp) * tp->mss_cache) in tcp_tso_should_defer() 2288 unsigned long limit; in tcp_small_queue_check() local [all …]
|
/net/openvswitch/ |
D | conntrack.c | 87 u32 limit; member 1143 return ct_limit->limit; in ct_limit_get() 1955 info->default_limit = zone_limit->limit; in ovs_ct_limit_set_zone_limit() 1968 ct_limit->limit = zone_limit->limit; in ovs_ct_limit_set_zone_limit() 2025 .limit = info->default_limit, in ovs_ct_limit_get_default_limit() 2033 u16 zone_id, u32 limit, in __ovs_ct_limit_get_zone_limit() argument 2041 zone_limit.limit = limit; in __ovs_ct_limit_get_zone_limit() 2056 u32 limit; in ovs_ct_limit_get_zone_limit() local 2073 limit = ct_limit_get(info, zone); in ovs_ct_limit_get_zone_limit() 2077 net, info->data, zone, limit, reply); in ovs_ct_limit_get_zone_limit() [all …]
|
/net/rxrpc/ |
D | conn_client.c | 238 int id_cursor, id, distance, limit; in rxrpc_may_reuse_conn() local 257 limit = max(rxrpc_max_client_connections * 4, 1024U); in rxrpc_may_reuse_conn() 258 if (distance > limit) in rxrpc_may_reuse_conn() 1019 unsigned int nr_active, limit; in rxrpc_cull_active_client_conns() local 1028 limit = rxrpc_reap_client_connections; in rxrpc_cull_active_client_conns() 1033 while (nr_active > limit) { in rxrpc_cull_active_client_conns()
|