/net/ipv4/ |
D | tcp_cdg.c | 66 s32 max; member 156 u32 base_owd = max(ca->delay_min / 2U, 125U); in tcp_cdg_hystart_update() 175 s32 thresh = max(ca->delay_min + ca->delay_min / 8U, in tcp_cdg_hystart_update() 193 s32 gmax = ca->rtt.max - ca->rtt_prev.max; in tcp_cdg_grad() 198 ca->gsum.max += gmax - ca->gradients[ca->tail].max; in tcp_cdg_grad() 200 ca->gradients[ca->tail].max = gmax; in tcp_cdg_grad() 203 gmax = ca->gsum.max; in tcp_cdg_grad() 255 ca->shadow_wnd = max(ca->shadow_wnd, tp->snd_cwnd); in tcp_cdg_backoff() 296 ca->shadow_wnd = max(ca->shadow_wnd, ca->shadow_wnd + incr); in tcp_cdg_cong_avoid() 325 ca->rtt.max = max(ca->rtt.max, sample->rtt_us); in tcp_cdg_acked() [all …]
|
D | tcp_yeah.c | 166 tp->snd_cwnd = max(tp->snd_cwnd, in tcp_yeah_cong_avoid() 173 yeah->reno_count = max(tp->snd_cwnd>>1, 2U); in tcp_yeah_cong_avoid() 215 reduction = min(reduction, max(tp->snd_cwnd>>1, 2U)); in tcp_yeah_ssthresh() 217 reduction = max(reduction, tp->snd_cwnd >> TCP_YEAH_DELTA); in tcp_yeah_ssthresh() 219 reduction = max(tp->snd_cwnd>>1, 2U); in tcp_yeah_ssthresh() 222 yeah->reno_count = max(yeah->reno_count>>1, 2U); in tcp_yeah_ssthresh()
|
D | tcp_dctcp.c | 108 return max(tp->snd_cwnd - ((tp->snd_cwnd * ca->dctcp_alpha) >> 11U), 2U); in dctcp_ssthresh() 131 delivered_ce /= max(1U, delivered); in dctcp_update_alpha() 150 tp->snd_ssthresh = max(tp->snd_cwnd >> 1U, 2U); in dctcp_react_to_loss() 213 return max(tcp_sk(sk)->snd_cwnd, ca->loss_cwnd); in dctcp_cwnd_undo()
|
D | tcp_nv.c | 204 cnt = max(4U, tp->snd_cwnd >> ca->cwnd_growth_factor); in tcpnv_cong_avoid() 213 return max((tp->snd_cwnd * nv_loss_dec_factor) >> 10, 2U); in tcpnv_recalc_ssthresh() 334 ca->nv_min_cwnd = max(ca->nv_min_cwnd / 2, NV_MIN_CWND); in tcpnv_acked() 406 dec = max(2U, ((tp->snd_cwnd - max_win) * in tcpnv_acked()
|
D | tcp_bbr.c | 235 return max(bbr->extra_acked[0], bbr->extra_acked[1]); in bbr_extra_acked() 272 rtt_us = max(tp->srtt_us >> 3, 1U); in bbr_init_pacing_rate_from_rtt() 326 bbr->prior_cwnd = max(bbr->prior_cwnd, tp->snd_cwnd); in bbr_save_cwnd() 442 edt_ns = max(tp->tcp_wstamp_ns, now_ns); in bbr_packets_in_net_at_edt() 500 cwnd = max(cwnd, bbr->prior_cwnd); in bbr_set_cwnd_to_recover_or_restore() 506 *new_cwnd = max(cwnd, tcp_packets_in_flight(tp) + acked); in bbr_set_cwnd_to_recover_or_restore() 542 cwnd = max(cwnd, bbr_cwnd_min_target); in bbr_set_cwnd() 915 tp->snd_cwnd = max(tp->snd_cwnd, bbr->prior_cwnd); in bbr_check_probe_rtt_done()
|
/net/phonet/ |
D | sysctl.c | 38 void phonet_get_local_port_range(int *min, int *max) in phonet_get_local_port_range() argument 46 if (max) in phonet_get_local_port_range() 47 *max = local_port_range[1]; in phonet_get_local_port_range()
|
/net/ipv4/netfilter/ |
D | ipt_ah.c | 20 spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert) in spi_match() argument 24 invert ? '!' : ' ', min, spi, max); in spi_match() 25 r = (spi >= min && spi <= max) ^ invert; in spi_match()
|
/net/netfilter/ |
D | xt_esp.c | 26 spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert) in spi_match() argument 30 invert ? '!' : ' ', min, spi, max); in spi_match() 31 r = (spi >= min && spi <= max) ^ invert; in spi_match()
|
D | xt_ipcomp.c | 30 spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert) in spi_match() argument 34 invert ? '!' : ' ', min, spi, max); in spi_match() 35 r = (spi >= min && spi <= max) ^ invert; in spi_match()
|
D | nf_nat_core.c | 235 const union nf_conntrack_man_proto *max) in l4proto_in_range() argument 243 ntohs(tuple->src.u.icmp.id) <= ntohs(max->icmp.id); in l4proto_in_range() 256 ntohs(port) <= ntohs(max->all); in l4proto_in_range() 335 unsigned int i, max; in find_best_ips_proto() local 356 max = sizeof(var_ipp->ip) / sizeof(u32) - 1; in find_best_ips_proto() 358 max = sizeof(var_ipp->ip6) / sizeof(u32) - 1; in find_best_ips_proto() 369 0 : (__force u32)tuple->dst.u3.all[max] ^ zone->id); in find_best_ips_proto() 372 for (i = 0; i <= max; i++) { in find_best_ips_proto() 405 unsigned int range_size, min, max, i, attempts; in nf_nat_l4proto_unique_tuple() local 481 max = ntohs(range->max_proto.all); in nf_nat_l4proto_unique_tuple() [all …]
|
D | xt_hashlimit.c | 148 to->max = cfg->max; in cfg_copy() 160 to->max = cfg->max; in cfg_copy() 241 if (ht->cfg.max && ht->count >= ht->cfg.max) { in dsthash_alloc_init() 243 net_err_ratelimited("max count of %u reached\n", ht->cfg.max); in dsthash_alloc_init() 311 if (hinfo->cfg.max == 0) in htable_create() 312 hinfo->cfg.max = 8 * hinfo->cfg.size; in htable_create() 313 else if (hinfo->cfg.max < hinfo->cfg.size) in htable_create() 314 hinfo->cfg.max = hinfo->cfg.size; in htable_create() 852 if (cfg->max > HASHLIMIT_MAX_SIZE) { in hashlimit_mt_check_common() 853 cfg->max = HASHLIMIT_MAX_SIZE; in hashlimit_mt_check_common() [all …]
|
D | xt_length.c | 26 return (pktlen >= info->min && pktlen <= info->max) ^ info->invert; in length_mt() 36 return (pktlen >= info->min && pktlen <= info->max) ^ info->invert; in length_mt6()
|
/net/rxrpc/ |
D | call_accept.c | 44 int max, tmp; in rxrpc_service_prealloc_one() local 48 max = rx->sk.sk_max_ack_backlog; in rxrpc_service_prealloc_one() 50 if (tmp >= max) { in rxrpc_service_prealloc_one() 51 _leave(" = -ENOBUFS [full %u]", max); in rxrpc_service_prealloc_one() 54 max -= tmp; in rxrpc_service_prealloc_one() 63 if (tmp >= max) { in rxrpc_service_prealloc_one() 67 max = tmp + 1; in rxrpc_service_prealloc_one() 71 if (CIRC_CNT(head, tail, size) < max) { in rxrpc_service_prealloc_one() 82 if (CIRC_CNT(head, tail, size) < max) { in rxrpc_service_prealloc_one()
|
D | rtt.c | 95 peer->rttvar_us = max(peer->mdev_us, rxrpc_rto_min_us(peer)); in rxrpc_rtt_estimator() 99 peer->srtt_us = max(1U, srtt); in rxrpc_rtt_estimator()
|
/net/ipv6/netfilter/ |
D | ip6t_ah.c | 25 spi_match(u_int32_t min, u_int32_t max, u_int32_t spi, bool invert) in spi_match() argument 30 invert ? '!' : ' ', min, spi, max); in spi_match() 31 r = (spi >= min && spi <= max) ^ invert; in spi_match()
|
D | ip6t_frag.c | 24 id_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert) in id_match() argument 28 min, id, max); in id_match() 29 r = (id >= min && id <= max) ^ invert; in id_match()
|
D | ip6t_mh.c | 26 type_match(u_int8_t min, u_int8_t max, u_int8_t type, bool invert) in type_match() argument 28 return (type >= min && type <= max) ^ invert; in type_match()
|
D | ip6t_rt.c | 26 segsleft_match(u_int32_t min, u_int32_t max, u_int32_t id, bool invert) in segsleft_match() argument 30 invert ? '!' : ' ', min, id, max); in segsleft_match() 31 r = (id >= min && id <= max) ^ invert; in segsleft_match()
|
/net/dccp/ccids/ |
D | ccid3.c | 126 min_rate = max(min_rate, 2 * hc->tx_x_recv); in ccid3_hc_tx_update_x() 132 hc->tx_x = max(hc->tx_x, (((__u64)hc->tx_s) << 6) / TFRC_T_MBI); in ccid3_hc_tx_update_x() 137 hc->tx_x = max(hc->tx_x, in ccid3_hc_tx_update_x() 216 hc->tx_x = max(hc->tx_x / 2, in ccid3_hc_tx_no_feedback_timer() 232 max(hc->tx_x_recv / 2, in ccid3_hc_tx_no_feedback_timer() 250 t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi); in ccid3_hc_tx_no_feedback_timer() 442 t_nfb = max(hc->tx_t_rto, 2 * hc->tx_t_ipi); in ccid3_hc_tx_packet_recv()
|
D | ccid2.c | 200 win_used = max(hc->tx_cwnd_used, init_win); in ccid2_cwnd_application_limited() 203 hc->tx_ssthresh = max(hc->tx_ssthresh, in ccid2_cwnd_application_limited() 221 hc->tx_ssthresh = max(hc->tx_ssthresh, (cwnd >> 1) + (cwnd >> 2)); in ccid2_cwnd_restart() 228 hc->tx_cwnd = max(cwnd, restart_cwnd); in ccid2_cwnd_restart() 369 hc->tx_mdev_max = max(hc->tx_mdev, tcp_rto_min(sk)); in ccid2_rtt_estimator() 487 hc->tx_ssthresh = max(hc->tx_cwnd, 2U); in ccid2_congestion_event()
|
/net/sched/ |
D | sch_sfb.c | 58 u32 max; member 348 if (unlikely(minqlen >= q->max)) { in sfb_enqueue() 483 .max = 25, 535 q->max = ctl->max; in sfb_change() 576 .max = q->max, in sfb_dump()
|
/net/wireless/ |
D | wext-priv.c | 68 int max = args & IW_PRIV_SIZE_MASK; in adjust_priv_size() local 72 if (max < num) in adjust_priv_size() 73 num = max; in adjust_priv_size()
|
/net/ceph/ |
D | osdmap.c | 998 static int osdmap_set_max_osd(struct ceph_osdmap *map, u32 max) in osdmap_set_max_osd() argument 1006 dout("%s old %u new %u\n", __func__, map->max_osd, max); in osdmap_set_max_osd() 1007 if (max == map->max_osd) in osdmap_set_max_osd() 1010 state = ceph_kvmalloc(array_size(max, sizeof(*state)), GFP_NOFS); in osdmap_set_max_osd() 1011 weight = ceph_kvmalloc(array_size(max, sizeof(*weight)), GFP_NOFS); in osdmap_set_max_osd() 1012 addr = ceph_kvmalloc(array_size(max, sizeof(*addr)), GFP_NOFS); in osdmap_set_max_osd() 1020 to_copy = min(map->max_osd, max); in osdmap_set_max_osd() 1033 for (i = map->max_osd; i < max; i++) { in osdmap_set_max_osd() 1042 affinity = ceph_kvmalloc(array_size(max, sizeof(*affinity)), in osdmap_set_max_osd() 1052 for (i = map->max_osd; i < max; i++) in osdmap_set_max_osd() [all …]
|
/net/tipc/ |
D | core.h | 201 static inline int in_range(u16 val, u16 min, u16 max) in in_range() argument 203 return !less(val, min) && !more(val, max); in in_range()
|
/net/sctp/ |
D | sysctl.c | 377 unsigned int max = *(unsigned int *) ctl->extra2; in proc_sctp_do_rto_min() local 391 if (new_value > max || new_value < min) in proc_sctp_do_rto_min() 406 unsigned int max = *(unsigned int *) ctl->extra2; in proc_sctp_do_rto_max() local 420 if (new_value > max || new_value < min) in proc_sctp_do_rto_max()
|