Searched refs:sk_wmem_queued (Results 1 – 23 of 23) sorted by relevance
211 WARN_ON(sk->sk_wmem_queued); in sk_stream_kill_queues()
1919 newsk->sk_wmem_queued = 0; in sk_clone_lock()2632 if (sk->sk_wmem_queued < wmem0) in __sk_mem_raise_allocated()2646 sk_mem_pages(sk->sk_wmem_queued + in __sk_mem_raise_allocated()2660 if (sk->sk_wmem_queued + size >= sk->sk_sndbuf) in __sk_mem_raise_allocated()3316 mem[SK_MEMINFO_WMEM_QUEUED] = READ_ONCE(sk->sk_wmem_queued); in sk_get_meminfo()
438 int sk_wmem_queued; member976 return READ_ONCE(sk->sk_wmem_queued) >> 1; in sk_stream_min_wspace()981 return READ_ONCE(sk->sk_sndbuf) - READ_ONCE(sk->sk_wmem_queued); in sk_stream_wspace()986 WRITE_ONCE(sk->sk_wmem_queued, sk->sk_wmem_queued + val); in sk_wmem_queued_add()1304 if (READ_ONCE(sk->sk_wmem_queued) >= READ_ONCE(sk->sk_sndbuf)) in __sk_stream_memory_free()2354 val = min(sk->sk_sndbuf, sk->sk_wmem_queued >> 1); in sk_stream_moderate_sndbuf()
289 if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF && in tcp_out_of_memory()
709 sk->sk_wmem_queued -= total_size; in chtls_push_frames()827 sk->sk_wmem_queued += skb->truesize; in skb_entail()901 sk->sk_wmem_queued += copy; in chtls_skb_copy_to_page_nocache()907 return (cdev->max_host_sndbuf - sk->sk_wmem_queued > 0); in csk_mem_free()1217 sk->sk_wmem_queued -= skb->truesize; in chtls_sendmsg()1297 sk->sk_wmem_queued += copy; in chtls_sendpage()
185 sk->sk_wmem_queued -= skb->truesize; in chtls_purge_write_queue()
121 __entry->wmem_queued = READ_ONCE(sk->sk_wmem_queued);
287 sk->sk_wmem_queued, in sctp_assocs_seq_show()
173 mem[SK_MEMINFO_WMEM_QUEUED] = sk->sk_wmem_queued; in inet_sctp_diag_fill()
141 sk->sk_wmem_queued += chunk->skb->truesize + sizeof(struct sctp_chunk); in sctp_set_owner_w()8879 sk->sk_wmem_queued -= skb->truesize + sizeof(struct sctp_chunk); in sctp_wfree()9036 return sk->sk_sndbuf > sk->sk_wmem_queued; in sctp_writeable()
372 sk->sk_wmem_queued += skb->truesize; in tcp_nip_connect_queue_skb()616 sk->sk_wmem_queued += skb->truesize; in tcp_nip_queue_skb()
477 WARN_ON(sk->sk_wmem_queued); in sk_nip_stream_kill_queues()1421 sk->sk_wmem_queued += skb->truesize; in skb_nip_entail()
449 dst->value = READ_ONCE(sk->sk_wmem_queued); in META_COLLECTOR()
707 sk->sk_wmem_queued -= sent; in kcm_write_msgs()848 sk->sk_wmem_queued += size; in kcm_sendpage()
1554 if (unlikely((sk->sk_wmem_queued >> 1) > limit && in tcp_fragment()3155 min_t(u32, sk->sk_wmem_queued + (sk->sk_wmem_queued >> 2), in __tcp_retransmit_skb()
274 .idiag_wmem = READ_ONCE(sk->sk_wmem_queued), in inet_sk_diag_fill()
156 WARN_ON(sk->sk_wmem_queued); in inet_sock_destruct()
636 int queued = sk->sk_wmem_queued; in make_resync_request()
1508 if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5) in drbd_update_congested()
312 WARN_ON(sk->sk_wmem_queued); in iucv_sock_destruct()
1140 ratio = div_u64((u64)READ_ONCE(ssk->sk_wmem_queued) << 32, in mptcp_subflow_get_send()
59776 + if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf)) {
364399 + if (unlikely((sk->sk_wmem_queued >> 1) > sk->sk_sndbuf)) {