Lines Matching refs:skb
72 static void tcp_event_new_data_sent(struct sock *sk, const struct sk_buff *skb) in tcp_event_new_data_sent() argument
78 tcp_advance_send_head(sk, skb); in tcp_event_new_data_sent()
79 tp->snd_nxt = TCP_SKB_CB(skb)->end_seq; in tcp_event_new_data_sent()
81 tp->packets_out += tcp_skb_pcount(skb); in tcp_event_new_data_sent()
299 static inline void TCP_ECN_send_synack(const struct tcp_sock *tp, struct sk_buff *skb) in TCP_ECN_send_synack() argument
301 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_CWR; in TCP_ECN_send_synack()
303 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_ECE; in TCP_ECN_send_synack()
307 static inline void TCP_ECN_send_syn(struct sock *sk, struct sk_buff *skb) in TCP_ECN_send_syn() argument
313 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ECE | TCPHDR_CWR; in TCP_ECN_send_syn()
328 static inline void TCP_ECN_send(struct sock *sk, struct sk_buff *skb, in TCP_ECN_send() argument
335 if (skb->len != tcp_header_len && in TCP_ECN_send()
336 !before(TCP_SKB_CB(skb)->seq, tp->snd_nxt)) { in TCP_ECN_send()
340 tcp_hdr(skb)->cwr = 1; in TCP_ECN_send()
341 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN; in TCP_ECN_send()
348 tcp_hdr(skb)->ece = 1; in TCP_ECN_send()
355 static void tcp_init_nondata_skb(struct sk_buff *skb, u32 seq, u8 flags) in tcp_init_nondata_skb() argument
357 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_init_nondata_skb()
358 skb->csum = 0; in tcp_init_nondata_skb()
360 TCP_SKB_CB(skb)->tcp_flags = flags; in tcp_init_nondata_skb()
361 TCP_SKB_CB(skb)->sacked = 0; in tcp_init_nondata_skb()
363 skb_shinfo(skb)->gso_segs = 1; in tcp_init_nondata_skb()
364 skb_shinfo(skb)->gso_size = 0; in tcp_init_nondata_skb()
365 skb_shinfo(skb)->gso_type = 0; in tcp_init_nondata_skb()
367 TCP_SKB_CB(skb)->seq = seq; in tcp_init_nondata_skb()
370 TCP_SKB_CB(skb)->end_seq = seq; in tcp_init_nondata_skb()
497 static unsigned int tcp_syn_options(struct sock *sk, struct sk_buff *skb, in tcp_syn_options() argument
529 opts->tsval = TCP_SKB_CB(skb)->when + tp->tsoffset; in tcp_syn_options()
561 unsigned int mss, struct sk_buff *skb, in tcp_synack_options() argument
597 opts->tsval = TCP_SKB_CB(skb)->when; in tcp_synack_options()
622 static unsigned int tcp_established_options(struct sock *sk, struct sk_buff *skb, in tcp_established_options() argument
626 struct tcp_skb_cb *tcb = skb ? TCP_SKB_CB(skb) : NULL; in tcp_established_options()
790 void tcp_wfree(struct sk_buff *skb) in tcp_wfree() argument
792 struct sock *sk = skb->sk; in tcp_wfree()
803 atomic_sub(skb->truesize - 1, &sk->sk_wmem_alloc); in tcp_wfree()
812 sock_wfree(skb); in tcp_wfree()
827 static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, in tcp_transmit_skb() argument
840 BUG_ON(!skb || !tcp_skb_pcount(skb)); in tcp_transmit_skb()
846 __net_timestamp(skb); in tcp_transmit_skb()
849 const struct sk_buff *fclone = skb + 1; in tcp_transmit_skb()
851 if (unlikely(skb->fclone == SKB_FCLONE_ORIG && in tcp_transmit_skb()
856 if (unlikely(skb_cloned(skb))) in tcp_transmit_skb()
857 skb = pskb_copy(skb, gfp_mask); in tcp_transmit_skb()
859 skb = skb_clone(skb, gfp_mask); in tcp_transmit_skb()
860 if (unlikely(!skb)) in tcp_transmit_skb()
866 tcb = TCP_SKB_CB(skb); in tcp_transmit_skb()
870 tcp_options_size = tcp_syn_options(sk, skb, &opts, &md5); in tcp_transmit_skb()
872 tcp_options_size = tcp_established_options(sk, skb, &opts, in tcp_transmit_skb()
882 skb->ooo_okay = sk_wmem_alloc_get(sk) == 0; in tcp_transmit_skb()
884 skb_push(skb, tcp_header_size); in tcp_transmit_skb()
885 skb_reset_transport_header(skb); in tcp_transmit_skb()
887 skb_orphan(skb); in tcp_transmit_skb()
888 skb->sk = sk; in tcp_transmit_skb()
889 skb->destructor = (sysctl_tcp_limit_output_bytes > 0) ? in tcp_transmit_skb()
891 atomic_add(skb->truesize, &sk->sk_wmem_alloc); in tcp_transmit_skb()
894 th = tcp_hdr(skb); in tcp_transmit_skb()
926 TCP_ECN_send(sk, skb, tcp_header_size); in tcp_transmit_skb()
933 md5, sk, NULL, skb); in tcp_transmit_skb()
937 icsk->icsk_af_ops->send_check(sk, skb); in tcp_transmit_skb()
940 tcp_event_ack_sent(sk, tcp_skb_pcount(skb)); in tcp_transmit_skb()
942 if (skb->len != tcp_header_size) in tcp_transmit_skb()
947 tcp_skb_pcount(skb)); in tcp_transmit_skb()
949 err = icsk->icsk_af_ops->queue_xmit(skb, &inet->cork.fl); in tcp_transmit_skb()
963 static void tcp_queue_skb(struct sock *sk, struct sk_buff *skb) in tcp_queue_skb() argument
968 tp->write_seq = TCP_SKB_CB(skb)->end_seq; in tcp_queue_skb()
969 skb_header_release(skb); in tcp_queue_skb()
970 tcp_add_write_queue_tail(sk, skb); in tcp_queue_skb()
971 sk->sk_wmem_queued += skb->truesize; in tcp_queue_skb()
972 sk_mem_charge(sk, skb->truesize); in tcp_queue_skb()
976 static void tcp_set_skb_tso_segs(const struct sock *sk, struct sk_buff *skb, in tcp_set_skb_tso_segs() argument
979 if (skb->len <= mss_now || !sk_can_gso(sk) || in tcp_set_skb_tso_segs()
980 skb->ip_summed == CHECKSUM_NONE) { in tcp_set_skb_tso_segs()
984 skb_shinfo(skb)->gso_segs = 1; in tcp_set_skb_tso_segs()
985 skb_shinfo(skb)->gso_size = 0; in tcp_set_skb_tso_segs()
986 skb_shinfo(skb)->gso_type = 0; in tcp_set_skb_tso_segs()
988 skb_shinfo(skb)->gso_segs = DIV_ROUND_UP(skb->len, mss_now); in tcp_set_skb_tso_segs()
989 skb_shinfo(skb)->gso_size = mss_now; in tcp_set_skb_tso_segs()
990 skb_shinfo(skb)->gso_type = sk->sk_gso_type; in tcp_set_skb_tso_segs()
997 static void tcp_adjust_fackets_out(struct sock *sk, const struct sk_buff *skb, in tcp_adjust_fackets_out() argument
1005 if (after(tcp_highest_sack_seq(tp), TCP_SKB_CB(skb)->seq)) in tcp_adjust_fackets_out()
1012 static void tcp_adjust_pcount(struct sock *sk, const struct sk_buff *skb, int decr) in tcp_adjust_pcount() argument
1018 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) in tcp_adjust_pcount()
1020 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) in tcp_adjust_pcount()
1022 if (TCP_SKB_CB(skb)->sacked & TCPCB_LOST) in tcp_adjust_pcount()
1029 tcp_adjust_fackets_out(sk, skb, decr); in tcp_adjust_pcount()
1032 before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(tp->lost_skb_hint)->seq) && in tcp_adjust_pcount()
1033 (tcp_is_fack(tp) || (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED))) in tcp_adjust_pcount()
1044 int tcp_fragment(struct sock *sk, struct sk_buff *skb, u32 len, in tcp_fragment() argument
1053 if (WARN_ON(len > skb->len)) in tcp_fragment()
1056 nsize = skb_headlen(skb) - len; in tcp_fragment()
1060 if (skb_cloned(skb) && in tcp_fragment()
1061 skb_is_nonlinear(skb) && in tcp_fragment()
1062 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) in tcp_fragment()
1072 nlen = skb->len - len - nsize; in tcp_fragment()
1074 skb->truesize -= nlen; in tcp_fragment()
1077 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tcp_fragment()
1078 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_fragment()
1079 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tcp_fragment()
1082 flags = TCP_SKB_CB(skb)->tcp_flags; in tcp_fragment()
1083 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); in tcp_fragment()
1085 TCP_SKB_CB(buff)->sacked = TCP_SKB_CB(skb)->sacked; in tcp_fragment()
1087 if (!skb_shinfo(skb)->nr_frags && skb->ip_summed != CHECKSUM_PARTIAL) { in tcp_fragment()
1089 buff->csum = csum_partial_copy_nocheck(skb->data + len, in tcp_fragment()
1093 skb_trim(skb, len); in tcp_fragment()
1095 skb->csum = csum_block_sub(skb->csum, buff->csum, len); in tcp_fragment()
1097 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_fragment()
1098 skb_split(skb, buff, len); in tcp_fragment()
1101 buff->ip_summed = skb->ip_summed; in tcp_fragment()
1106 TCP_SKB_CB(buff)->when = TCP_SKB_CB(skb)->when; in tcp_fragment()
1107 buff->tstamp = skb->tstamp; in tcp_fragment()
1109 old_factor = tcp_skb_pcount(skb); in tcp_fragment()
1112 tcp_set_skb_tso_segs(sk, skb, mss_now); in tcp_fragment()
1119 int diff = old_factor - tcp_skb_pcount(skb) - in tcp_fragment()
1123 tcp_adjust_pcount(sk, skb, diff); in tcp_fragment()
1128 tcp_insert_write_queue_after(skb, buff, sk); in tcp_fragment()
1137 static void __pskb_trim_head(struct sk_buff *skb, int len) in __pskb_trim_head() argument
1141 eat = min_t(int, len, skb_headlen(skb)); in __pskb_trim_head()
1143 __skb_pull(skb, eat); in __pskb_trim_head()
1150 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { in __pskb_trim_head()
1151 int size = skb_frag_size(&skb_shinfo(skb)->frags[i]); in __pskb_trim_head()
1154 skb_frag_unref(skb, i); in __pskb_trim_head()
1157 skb_shinfo(skb)->frags[k] = skb_shinfo(skb)->frags[i]; in __pskb_trim_head()
1159 skb_shinfo(skb)->frags[k].page_offset += eat; in __pskb_trim_head()
1160 skb_frag_size_sub(&skb_shinfo(skb)->frags[k], eat); in __pskb_trim_head()
1166 skb_shinfo(skb)->nr_frags = k; in __pskb_trim_head()
1168 skb_reset_tail_pointer(skb); in __pskb_trim_head()
1169 skb->data_len -= len; in __pskb_trim_head()
1170 skb->len = skb->data_len; in __pskb_trim_head()
1174 int tcp_trim_head(struct sock *sk, struct sk_buff *skb, u32 len) in tcp_trim_head() argument
1176 if (skb_unclone(skb, GFP_ATOMIC)) in tcp_trim_head()
1179 __pskb_trim_head(skb, len); in tcp_trim_head()
1181 TCP_SKB_CB(skb)->seq += len; in tcp_trim_head()
1182 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_trim_head()
1184 skb->truesize -= len; in tcp_trim_head()
1190 if (tcp_skb_pcount(skb) > 1) in tcp_trim_head()
1191 tcp_set_skb_tso_segs(sk, skb, tcp_skb_mss(skb)); in tcp_trim_head()
1383 static unsigned int tcp_mss_split_point(const struct sock *sk, const struct sk_buff *skb, in tcp_mss_split_point() argument
1389 window = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_mss_split_point()
1392 if (likely(max_len <= window && skb != tcp_write_queue_tail(sk))) in tcp_mss_split_point()
1395 needed = min(skb->len, window); in tcp_mss_split_point()
1407 const struct sk_buff *skb) in tcp_cwnd_test() argument
1412 if ((TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && in tcp_cwnd_test()
1413 tcp_skb_pcount(skb) == 1) in tcp_cwnd_test()
1428 static int tcp_init_tso_segs(const struct sock *sk, struct sk_buff *skb, in tcp_init_tso_segs() argument
1431 int tso_segs = tcp_skb_pcount(skb); in tcp_init_tso_segs()
1433 if (!tso_segs || (tso_segs > 1 && tcp_skb_mss(skb) != mss_now)) { in tcp_init_tso_segs()
1434 tcp_set_skb_tso_segs(sk, skb, mss_now); in tcp_init_tso_segs()
1435 tso_segs = tcp_skb_pcount(skb); in tcp_init_tso_segs()
1455 const struct sk_buff *skb, in tcp_nagle_check() argument
1458 return skb->len < mss_now && in tcp_nagle_check()
1466 static inline bool tcp_nagle_test(const struct tcp_sock *tp, const struct sk_buff *skb, in tcp_nagle_test() argument
1479 if (tcp_urg_mode(tp) || (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)) in tcp_nagle_test()
1482 if (!tcp_nagle_check(tp, skb, cur_mss, nonagle)) in tcp_nagle_test()
1490 const struct sk_buff *skb, in tcp_snd_wnd_test() argument
1493 u32 end_seq = TCP_SKB_CB(skb)->end_seq; in tcp_snd_wnd_test()
1495 if (skb->len > cur_mss) in tcp_snd_wnd_test()
1496 end_seq = TCP_SKB_CB(skb)->seq + cur_mss; in tcp_snd_wnd_test()
1505 static unsigned int tcp_snd_test(const struct sock *sk, struct sk_buff *skb, in tcp_snd_test() argument
1511 tcp_init_tso_segs(sk, skb, cur_mss); in tcp_snd_test()
1513 if (!tcp_nagle_test(tp, skb, cur_mss, nonagle)) in tcp_snd_test()
1516 cwnd_quota = tcp_cwnd_test(tp, skb); in tcp_snd_test()
1517 if (cwnd_quota && !tcp_snd_wnd_test(tp, skb, cur_mss)) in tcp_snd_test()
1527 struct sk_buff *skb = tcp_send_head(sk); in tcp_may_send_now() local
1529 return skb && in tcp_may_send_now()
1530 tcp_snd_test(sk, skb, tcp_current_mss(sk), in tcp_may_send_now()
1531 (tcp_skb_is_last(sk, skb) ? in tcp_may_send_now()
1542 static int tso_fragment(struct sock *sk, struct sk_buff *skb, unsigned int len, in tso_fragment() argument
1546 int nlen = skb->len - len; in tso_fragment()
1550 if (skb->len != skb->data_len) in tso_fragment()
1551 return tcp_fragment(sk, skb, len, mss_now); in tso_fragment()
1560 skb->truesize -= nlen; in tso_fragment()
1563 TCP_SKB_CB(buff)->seq = TCP_SKB_CB(skb)->seq + len; in tso_fragment()
1564 TCP_SKB_CB(buff)->end_seq = TCP_SKB_CB(skb)->end_seq; in tso_fragment()
1565 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(buff)->seq; in tso_fragment()
1568 flags = TCP_SKB_CB(skb)->tcp_flags; in tso_fragment()
1569 TCP_SKB_CB(skb)->tcp_flags = flags & ~(TCPHDR_FIN | TCPHDR_PSH); in tso_fragment()
1575 buff->ip_summed = skb->ip_summed = CHECKSUM_PARTIAL; in tso_fragment()
1576 skb_split(skb, buff, len); in tso_fragment()
1579 tcp_set_skb_tso_segs(sk, skb, mss_now); in tso_fragment()
1584 tcp_insert_write_queue_after(skb, buff, sk); in tso_fragment()
1594 static bool tcp_tso_should_defer(struct sock *sk, struct sk_buff *skb) in tcp_tso_should_defer() argument
1601 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) in tcp_tso_should_defer()
1614 BUG_ON(tcp_skb_pcount(skb) <= 1 || (tp->snd_cwnd <= in_flight)); in tcp_tso_should_defer()
1616 send_win = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_tso_should_defer()
1629 if ((skb != tcp_write_queue_tail(sk)) && (limit >= skb->len)) in tcp_tso_should_defer()
1678 struct sk_buff *skb, *nskb, *next; in tcp_mtu_probe() local
1728 skb = tcp_send_head(sk); in tcp_mtu_probe()
1730 TCP_SKB_CB(nskb)->seq = TCP_SKB_CB(skb)->seq; in tcp_mtu_probe()
1731 TCP_SKB_CB(nskb)->end_seq = TCP_SKB_CB(skb)->seq + probe_size; in tcp_mtu_probe()
1735 nskb->ip_summed = skb->ip_summed; in tcp_mtu_probe()
1737 tcp_insert_write_queue_before(nskb, skb, sk); in tcp_mtu_probe()
1740 tcp_for_write_queue_from_safe(skb, next, sk) { in tcp_mtu_probe()
1741 copy = min_t(int, skb->len, probe_size - len); in tcp_mtu_probe()
1743 skb_copy_bits(skb, 0, skb_put(nskb, copy), copy); in tcp_mtu_probe()
1745 nskb->csum = skb_copy_and_csum_bits(skb, 0, in tcp_mtu_probe()
1749 if (skb->len <= copy) { in tcp_mtu_probe()
1752 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags; in tcp_mtu_probe()
1753 tcp_unlink_write_queue(skb, sk); in tcp_mtu_probe()
1754 sk_wmem_free_skb(sk, skb); in tcp_mtu_probe()
1756 TCP_SKB_CB(nskb)->tcp_flags |= TCP_SKB_CB(skb)->tcp_flags & in tcp_mtu_probe()
1758 if (!skb_shinfo(skb)->nr_frags) { in tcp_mtu_probe()
1759 skb_pull(skb, copy); in tcp_mtu_probe()
1760 if (skb->ip_summed != CHECKSUM_PARTIAL) in tcp_mtu_probe()
1761 skb->csum = csum_partial(skb->data, in tcp_mtu_probe()
1762 skb->len, 0); in tcp_mtu_probe()
1764 __pskb_trim_head(skb, copy); in tcp_mtu_probe()
1765 tcp_set_skb_tso_segs(sk, skb, mss_now); in tcp_mtu_probe()
1767 TCP_SKB_CB(skb)->seq += copy; in tcp_mtu_probe()
1814 struct sk_buff *skb; in tcp_write_xmit() local
1831 while ((skb = tcp_send_head(sk))) { in tcp_write_xmit()
1835 tso_segs = tcp_init_tso_segs(sk, skb, mss_now); in tcp_write_xmit()
1841 cwnd_quota = tcp_cwnd_test(tp, skb); in tcp_write_xmit()
1850 if (unlikely(!tcp_snd_wnd_test(tp, skb, mss_now))) in tcp_write_xmit()
1854 if (unlikely(!tcp_nagle_test(tp, skb, mss_now, in tcp_write_xmit()
1855 (tcp_skb_is_last(sk, skb) ? in tcp_write_xmit()
1859 if (!push_one && tcp_tso_should_defer(sk, skb)) in tcp_write_xmit()
1872 limit = tcp_mss_split_point(sk, skb, mss_now, in tcp_write_xmit()
1877 if (skb->len > limit && in tcp_write_xmit()
1878 unlikely(tso_fragment(sk, skb, limit, mss_now, gfp))) in tcp_write_xmit()
1881 TCP_SKB_CB(skb)->when = tcp_time_stamp; in tcp_write_xmit()
1883 if (unlikely(tcp_transmit_skb(sk, skb, 1, gfp))) in tcp_write_xmit()
1890 tcp_event_new_data_sent(sk, skb); in tcp_write_xmit()
1892 tcp_minshall_update(tp, mss_now, skb); in tcp_write_xmit()
1893 sent_pkts += tcp_skb_pcount(skb); in tcp_write_xmit()
1976 struct sk_buff *skb; in tcp_send_loss_probe() local
1991 skb = tcp_write_queue_tail(sk); in tcp_send_loss_probe()
1992 if (WARN_ON(!skb)) in tcp_send_loss_probe()
1995 pcount = tcp_skb_pcount(skb); in tcp_send_loss_probe()
1999 if ((pcount > 1) && (skb->len > (pcount - 1) * mss)) { in tcp_send_loss_probe()
2000 if (unlikely(tcp_fragment(sk, skb, (pcount - 1) * mss, mss))) in tcp_send_loss_probe()
2002 skb = tcp_write_queue_tail(sk); in tcp_send_loss_probe()
2005 if (WARN_ON(!skb || !tcp_skb_pcount(skb))) in tcp_send_loss_probe()
2009 if (skb->len > 0) in tcp_send_loss_probe()
2010 err = __tcp_retransmit_skb(sk, skb); in tcp_send_loss_probe()
2051 struct sk_buff *skb = tcp_send_head(sk); in tcp_push_one() local
2053 BUG_ON(!skb || skb->len < mss_now); in tcp_push_one()
2176 static void tcp_collapse_retrans(struct sock *sk, struct sk_buff *skb) in tcp_collapse_retrans() argument
2179 struct sk_buff *next_skb = tcp_write_queue_next(sk, skb); in tcp_collapse_retrans()
2182 skb_size = skb->len; in tcp_collapse_retrans()
2185 BUG_ON(tcp_skb_pcount(skb) != 1 || tcp_skb_pcount(next_skb) != 1); in tcp_collapse_retrans()
2187 tcp_highest_sack_combine(sk, next_skb, skb); in tcp_collapse_retrans()
2191 skb_copy_from_linear_data(next_skb, skb_put(skb, next_skb_size), in tcp_collapse_retrans()
2195 skb->ip_summed = CHECKSUM_PARTIAL; in tcp_collapse_retrans()
2197 if (skb->ip_summed != CHECKSUM_PARTIAL) in tcp_collapse_retrans()
2198 skb->csum = csum_block_add(skb->csum, next_skb->csum, skb_size); in tcp_collapse_retrans()
2201 TCP_SKB_CB(skb)->end_seq = TCP_SKB_CB(next_skb)->end_seq; in tcp_collapse_retrans()
2204 TCP_SKB_CB(skb)->tcp_flags |= TCP_SKB_CB(next_skb)->tcp_flags; in tcp_collapse_retrans()
2209 TCP_SKB_CB(skb)->sacked |= TCP_SKB_CB(next_skb)->sacked & TCPCB_EVER_RETRANS; in tcp_collapse_retrans()
2214 tp->retransmit_skb_hint = skb; in tcp_collapse_retrans()
2222 static bool tcp_can_collapse(const struct sock *sk, const struct sk_buff *skb) in tcp_can_collapse() argument
2224 if (tcp_skb_pcount(skb) > 1) in tcp_can_collapse()
2227 if (skb_shinfo(skb)->nr_frags != 0) in tcp_can_collapse()
2229 if (skb_cloned(skb)) in tcp_can_collapse()
2231 if (skb == tcp_send_head(sk)) in tcp_can_collapse()
2234 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_ACKED) in tcp_can_collapse()
2247 struct sk_buff *skb = to, *tmp; in tcp_retrans_try_collapse() local
2252 if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN) in tcp_retrans_try_collapse()
2255 tcp_for_write_queue_from_safe(skb, tmp, sk) { in tcp_retrans_try_collapse()
2256 if (!tcp_can_collapse(sk, skb)) in tcp_retrans_try_collapse()
2259 space -= skb->len; in tcp_retrans_try_collapse()
2271 if (skb->len > skb_availroom(to)) in tcp_retrans_try_collapse()
2274 if (after(TCP_SKB_CB(skb)->end_seq, tcp_wnd_end(tp))) in tcp_retrans_try_collapse()
2285 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) in __tcp_retransmit_skb() argument
2303 if (before(TCP_SKB_CB(skb)->seq, tp->snd_una)) { in __tcp_retransmit_skb()
2304 if (before(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) in __tcp_retransmit_skb()
2306 if (tcp_trim_head(sk, skb, tp->snd_una - TCP_SKB_CB(skb)->seq)) in __tcp_retransmit_skb()
2320 if (!before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp)) && in __tcp_retransmit_skb()
2321 TCP_SKB_CB(skb)->seq != tp->snd_una) in __tcp_retransmit_skb()
2324 if (skb->len > cur_mss) { in __tcp_retransmit_skb()
2325 if (tcp_fragment(sk, skb, cur_mss, cur_mss)) in __tcp_retransmit_skb()
2328 int oldpcount = tcp_skb_pcount(skb); in __tcp_retransmit_skb()
2331 tcp_init_tso_segs(sk, skb, cur_mss); in __tcp_retransmit_skb()
2332 tcp_adjust_pcount(sk, skb, oldpcount - tcp_skb_pcount(skb)); in __tcp_retransmit_skb()
2336 tcp_retrans_try_collapse(sk, skb, cur_mss); in __tcp_retransmit_skb()
2342 if (skb->len > 0 && in __tcp_retransmit_skb()
2343 (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN) && in __tcp_retransmit_skb()
2344 tp->snd_una == (TCP_SKB_CB(skb)->end_seq - 1)) { in __tcp_retransmit_skb()
2345 if (!pskb_trim(skb, 0)) { in __tcp_retransmit_skb()
2347 tcp_init_nondata_skb(skb, TCP_SKB_CB(skb)->end_seq - 1, in __tcp_retransmit_skb()
2348 TCP_SKB_CB(skb)->tcp_flags); in __tcp_retransmit_skb()
2349 skb->ip_summed = CHECKSUM_NONE; in __tcp_retransmit_skb()
2356 TCP_SKB_CB(skb)->when = tcp_time_stamp; in __tcp_retransmit_skb()
2362 if (unlikely((NET_IP_ALIGN && ((unsigned long)skb->data & 3)) || in __tcp_retransmit_skb()
2363 skb_headroom(skb) >= 0xFFFF)) { in __tcp_retransmit_skb()
2364 struct sk_buff *nskb = __pskb_copy(skb, MAX_TCP_HEADER, in __tcp_retransmit_skb()
2369 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in __tcp_retransmit_skb()
2373 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb) in tcp_retransmit_skb() argument
2376 int err = __tcp_retransmit_skb(sk, skb); in tcp_retransmit_skb()
2385 if (TCP_SKB_CB(skb)->sacked & TCPCB_SACKED_RETRANS) { in tcp_retransmit_skb()
2391 TCP_SKB_CB(skb)->sacked |= TCPCB_RETRANS; in tcp_retransmit_skb()
2392 tp->retrans_out += tcp_skb_pcount(skb); in tcp_retransmit_skb()
2396 tp->retrans_stamp = TCP_SKB_CB(skb)->when; in tcp_retransmit_skb()
2398 tp->undo_retrans += tcp_skb_pcount(skb); in tcp_retransmit_skb()
2403 TCP_SKB_CB(skb)->ack_seq = tp->snd_nxt; in tcp_retransmit_skb()
2450 struct sk_buff *skb; in tcp_xmit_retransmit_queue() local
2463 skb = tp->retransmit_skb_hint; in tcp_xmit_retransmit_queue()
2464 last_lost = TCP_SKB_CB(skb)->end_seq; in tcp_xmit_retransmit_queue()
2468 skb = tcp_write_queue_head(sk); in tcp_xmit_retransmit_queue()
2472 tcp_for_write_queue_from(skb, sk) { in tcp_xmit_retransmit_queue()
2473 __u8 sacked = TCP_SKB_CB(skb)->sacked; in tcp_xmit_retransmit_queue()
2475 if (skb == tcp_send_head(sk)) in tcp_xmit_retransmit_queue()
2479 tp->retransmit_skb_hint = skb; in tcp_xmit_retransmit_queue()
2493 if (!before(TCP_SKB_CB(skb)->seq, tcp_highest_sack_seq(tp))) in tcp_xmit_retransmit_queue()
2497 } else if (!before(TCP_SKB_CB(skb)->seq, tp->retransmit_high)) { in tcp_xmit_retransmit_queue()
2503 skb = hole; in tcp_xmit_retransmit_queue()
2511 hole = skb; in tcp_xmit_retransmit_queue()
2515 last_lost = TCP_SKB_CB(skb)->end_seq; in tcp_xmit_retransmit_queue()
2525 if (tcp_retransmit_skb(sk, skb)) { in tcp_xmit_retransmit_queue()
2532 tp->prr_out += tcp_skb_pcount(skb); in tcp_xmit_retransmit_queue()
2534 if (skb == tcp_write_queue_head(sk)) in tcp_xmit_retransmit_queue()
2547 struct sk_buff *skb = tcp_write_queue_tail(sk); in tcp_send_fin() local
2557 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_FIN; in tcp_send_fin()
2558 TCP_SKB_CB(skb)->end_seq++; in tcp_send_fin()
2563 skb = alloc_skb_fclone(MAX_TCP_HEADER, in tcp_send_fin()
2565 if (skb) in tcp_send_fin()
2571 skb_reserve(skb, MAX_TCP_HEADER); in tcp_send_fin()
2573 tcp_init_nondata_skb(skb, tp->write_seq, in tcp_send_fin()
2575 tcp_queue_skb(sk, skb); in tcp_send_fin()
2587 struct sk_buff *skb; in tcp_send_active_reset() local
2590 skb = alloc_skb(MAX_TCP_HEADER, priority); in tcp_send_active_reset()
2591 if (!skb) { in tcp_send_active_reset()
2597 skb_reserve(skb, MAX_TCP_HEADER); in tcp_send_active_reset()
2598 tcp_init_nondata_skb(skb, tcp_acceptable_seq(sk), in tcp_send_active_reset()
2601 TCP_SKB_CB(skb)->when = tcp_time_stamp; in tcp_send_active_reset()
2602 if (tcp_transmit_skb(sk, skb, 0, priority)) in tcp_send_active_reset()
2616 struct sk_buff *skb; in tcp_send_synack() local
2618 skb = tcp_write_queue_head(sk); in tcp_send_synack()
2619 if (skb == NULL || !(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_SYN)) { in tcp_send_synack()
2623 if (!(TCP_SKB_CB(skb)->tcp_flags & TCPHDR_ACK)) { in tcp_send_synack()
2624 if (skb_cloned(skb)) { in tcp_send_synack()
2625 struct sk_buff *nskb = skb_copy(skb, GFP_ATOMIC); in tcp_send_synack()
2628 tcp_unlink_write_queue(skb, sk); in tcp_send_synack()
2631 sk_wmem_free_skb(sk, skb); in tcp_send_synack()
2634 skb = nskb; in tcp_send_synack()
2637 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_ACK; in tcp_send_synack()
2638 TCP_ECN_send_synack(tcp_sk(sk), skb); in tcp_send_synack()
2640 TCP_SKB_CB(skb)->when = tcp_time_stamp; in tcp_send_synack()
2641 return tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in tcp_send_synack()
2661 struct sk_buff *skb; in tcp_make_synack() local
2666 skb = sock_wmalloc(sk, MAX_TCP_HEADER + 15, 1, GFP_ATOMIC); in tcp_make_synack()
2667 if (unlikely(!skb)) { in tcp_make_synack()
2672 skb_reserve(skb, MAX_TCP_HEADER); in tcp_make_synack()
2674 skb_dst_set(skb, dst); in tcp_make_synack()
2675 security_skb_owned_by(skb, sk); in tcp_make_synack()
2705 TCP_SKB_CB(skb)->when = cookie_init_timestamp(req); in tcp_make_synack()
2708 TCP_SKB_CB(skb)->when = tcp_time_stamp; in tcp_make_synack()
2709 tcp_header_size = tcp_synack_options(sk, req, mss, skb, &opts, &md5, in tcp_make_synack()
2712 skb_push(skb, tcp_header_size); in tcp_make_synack()
2713 skb_reset_transport_header(skb); in tcp_make_synack()
2715 th = tcp_hdr(skb); in tcp_make_synack()
2725 tcp_init_nondata_skb(skb, tcp_rsk(req)->snt_isn, in tcp_make_synack()
2728 th->seq = htonl(TCP_SKB_CB(skb)->seq); in tcp_make_synack()
2736 TCP_ADD_STATS(sock_net(sk), TCP_MIB_OUTSEGS, tcp_skb_pcount(skb)); in tcp_make_synack()
2742 md5, NULL, req, skb); in tcp_make_synack()
2746 return skb; in tcp_make_synack()
2818 static void tcp_connect_queue_skb(struct sock *sk, struct sk_buff *skb) in tcp_connect_queue_skb() argument
2821 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb); in tcp_connect_queue_skb()
2823 tcb->end_seq += skb->len; in tcp_connect_queue_skb()
2824 skb_header_release(skb); in tcp_connect_queue_skb()
2825 __tcp_add_write_queue_tail(sk, skb); in tcp_connect_queue_skb()
2826 sk->sk_wmem_queued += skb->truesize; in tcp_connect_queue_skb()
2827 sk_mem_charge(sk, skb->truesize); in tcp_connect_queue_skb()
2829 tp->packets_out += tcp_skb_pcount(skb); in tcp_connect_queue_skb()
3068 struct sk_buff *skb; in tcp_xmit_probe_skb() local
3071 skb = alloc_skb(MAX_TCP_HEADER, sk_gfp_atomic(sk, GFP_ATOMIC)); in tcp_xmit_probe_skb()
3072 if (skb == NULL) in tcp_xmit_probe_skb()
3076 skb_reserve(skb, MAX_TCP_HEADER); in tcp_xmit_probe_skb()
3081 tcp_init_nondata_skb(skb, tp->snd_una - !urgent, TCPHDR_ACK); in tcp_xmit_probe_skb()
3082 TCP_SKB_CB(skb)->when = tcp_time_stamp; in tcp_xmit_probe_skb()
3083 return tcp_transmit_skb(sk, skb, 0, GFP_ATOMIC); in tcp_xmit_probe_skb()
3099 struct sk_buff *skb; in tcp_write_wakeup() local
3104 if ((skb = tcp_send_head(sk)) != NULL && in tcp_write_wakeup()
3105 before(TCP_SKB_CB(skb)->seq, tcp_wnd_end(tp))) { in tcp_write_wakeup()
3108 unsigned int seg_size = tcp_wnd_end(tp) - TCP_SKB_CB(skb)->seq; in tcp_write_wakeup()
3110 if (before(tp->pushed_seq, TCP_SKB_CB(skb)->end_seq)) in tcp_write_wakeup()
3111 tp->pushed_seq = TCP_SKB_CB(skb)->end_seq; in tcp_write_wakeup()
3117 if (seg_size < TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq || in tcp_write_wakeup()
3118 skb->len > mss) { in tcp_write_wakeup()
3120 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_write_wakeup()
3121 if (tcp_fragment(sk, skb, seg_size, mss)) in tcp_write_wakeup()
3123 } else if (!tcp_skb_pcount(skb)) in tcp_write_wakeup()
3124 tcp_set_skb_tso_segs(sk, skb, mss); in tcp_write_wakeup()
3126 TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH; in tcp_write_wakeup()
3127 TCP_SKB_CB(skb)->when = tcp_time_stamp; in tcp_write_wakeup()
3128 err = tcp_transmit_skb(sk, skb, 1, GFP_ATOMIC); in tcp_write_wakeup()
3130 tcp_event_new_data_sent(sk, skb); in tcp_write_wakeup()