Home
last modified time | relevance | path

Searched refs:sk_write_queue (Results 1 – 25 of 53) sorted by relevance

123

/kernel/linux/linux-5.10/net/dccp/
Dqpolicy.c17 skb_queue_tail(&sk->sk_write_queue, skb); in qpolicy_simple_push()
23 sk->sk_write_queue.qlen >= dccp_sk(sk)->dccps_tx_qlen; in qpolicy_simple_full()
28 return skb_peek(&sk->sk_write_queue); in qpolicy_simple_top()
40 skb_queue_walk(&sk->sk_write_queue, skb) in qpolicy_prio_best_skb()
50 skb_queue_walk(&sk->sk_write_queue, skb) in qpolicy_prio_worst_skb()
106 skb_unlink(skb, &sk->sk_write_queue); in dccp_qpolicy_drop()
123 skb_unlink(skb, &sk->sk_write_queue); in dccp_qpolicy_pop()
Doutput.c310 while (*time_budget > 0 && (skb = skb_peek(&sk->sk_write_queue))) { in dccp_flush_write_queue()
336 skb_dequeue(&sk->sk_write_queue); in dccp_flush_write_queue()
Dproto.c215 __skb_queue_purge(&sk->sk_write_queue); in dccp_destroy_sock()
285 __skb_queue_purge(&sk->sk_write_queue); in dccp_disconnect()
1037 __skb_queue_purge(&sk->sk_write_queue); in dccp_close()
Dinput.c181 if (sk->sk_write_queue.qlen > 0 || !(sk->sk_shutdown & SEND_SHUTDOWN)) in dccp_deliver_input_to_ccids()
/kernel/linux/linux-5.10/net/x25/
Dx25_out.c100 skb_queue_tail(&sk->sk_write_queue, skbn); in x25_output()
106 skb_queue_tail(&sk->sk_write_queue, skb); in x25_output()
159 if (!skb_peek(&sk->sk_write_queue)) in x25_kick()
177 skb = skb_dequeue(&sk->sk_write_queue); in x25_kick()
181 skb_queue_head(&sk->sk_write_queue, skb); in x25_kick()
200 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL); in x25_kick()
Dx25_subr.c38 skb_queue_purge(&sk->sk_write_queue); in x25_clear_queues()
79 skb_queue_head(&sk->sk_write_queue, skb); in x25_requeue_frames()
81 skb_append(skb_prev, skb, &sk->sk_write_queue); in x25_requeue_frames()
/kernel/linux/linux-5.10/net/rose/
Drose_out.c57 if (!skb_peek(&sk->sk_write_queue)) in rose_kick()
73 skb = skb_dequeue(&sk->sk_write_queue); in rose_kick()
77 skb_queue_head(&sk->sk_write_queue, skb); in rose_kick()
96 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL); in rose_kick()
Drose_subr.c34 skb_queue_purge(&sk->sk_write_queue); in rose_clear_queues()
71 skb_queue_head(&sk->sk_write_queue, skb); in rose_requeue_frames()
73 skb_append(skb_prev, skb, &sk->sk_write_queue); in rose_requeue_frames()
Daf_rose.c1203 skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ in rose_sendmsg()
1209 skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */ in rose_sendmsg()
1212 skb_queue_tail(&sk->sk_write_queue, skb); /* Shove it onto the queue */ in rose_sendmsg()
/kernel/linux/linux-5.10/net/nfc/
Drawsock.c40 spin_lock_bh(&sk->sk_write_queue.lock); in rawsock_write_queue_purge()
41 __skb_queue_purge(&sk->sk_write_queue); in rawsock_write_queue_purge()
43 spin_unlock_bh(&sk->sk_write_queue.lock); in rawsock_write_queue_purge()
158 spin_lock_bh(&sk->sk_write_queue.lock); in rawsock_data_exchange_complete()
159 if (!skb_queue_empty(&sk->sk_write_queue)) in rawsock_data_exchange_complete()
163 spin_unlock_bh(&sk->sk_write_queue.lock); in rawsock_data_exchange_complete()
191 skb = skb_dequeue(&sk->sk_write_queue); in rawsock_tx_work()
227 spin_lock_bh(&sk->sk_write_queue.lock); in rawsock_sendmsg()
228 __skb_queue_tail(&sk->sk_write_queue, skb); in rawsock_sendmsg()
233 spin_unlock_bh(&sk->sk_write_queue.lock); in rawsock_sendmsg()
/kernel/linux/linux-5.10/net/netrom/
Dnr_out.c64 skb_queue_tail(&sk->sk_write_queue, skbn); /* Throw it on the queue */ in nr_output()
69 skb_queue_tail(&sk->sk_write_queue, skb); /* Throw it on the queue */ in nr_output()
134 if (!skb_peek(&sk->sk_write_queue)) in nr_kick()
153 skb = skb_dequeue(&sk->sk_write_queue); in nr_kick()
157 skb_queue_head(&sk->sk_write_queue, skb); in nr_kick()
176 (skb = skb_dequeue(&sk->sk_write_queue)) != NULL); in nr_kick()
Dnr_subr.c35 skb_queue_purge(&sk->sk_write_queue); in nr_clear_queues()
74 skb_queue_head(&sk->sk_write_queue, skb); in nr_requeue_frames()
76 skb_append(skb_prev, skb, &sk->sk_write_queue); in nr_requeue_frames()
/kernel/linux/common_modules/newip/third_party/linux-5.10/include/net/
Dtcp_nip.h130 __skb_queue_tail(&sk->sk_write_queue, skb); in tcp_nip_add_write_queue_tail()
140 while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) { in tcp_nip_write_queue_purge()
153 return skb_queue_empty(&sk->sk_write_queue); in tcp_nip_write_queue_empty()
/kernel/linux/linux-5.10/net/bluetooth/bnep/
Dnetdev.c104 skb_queue_tail(&sk->sk_write_queue, skb); in bnep_net_set_mc_list()
192 skb_queue_tail(&sk->sk_write_queue, skb); in bnep_net_xmit()
195 if (skb_queue_len(&sk->sk_write_queue) >= BNEP_TX_QUEUE_LEN) { in bnep_net_xmit()
/kernel/linux/linux-5.10/net/llc/
Dllc_conn.c180 skb_queue_tail(&sk->sk_write_queue, skb); in llc_conn_send_pdu()
231 skb_queue_tail(&sk->sk_write_queue, skb); in llc_conn_resend_i_pdu_as_cmd()
272 skb_queue_tail(&sk->sk_write_queue, skb); in llc_conn_resend_i_pdu_as_rsp()
331 while ((skb = skb_dequeue(&sk->sk_write_queue)) != NULL) { in llc_conn_send_pdus()
969 skb_queue_len(&sk->sk_write_queue)); in llc_sk_free()
972 skb_queue_purge(&sk->sk_write_queue); in llc_sk_free()
1001 skb_queue_purge(&sk->sk_write_queue); in llc_sk_reset()
/kernel/linux/linux-5.10/net/kcm/
Dkcmsock.c596 if (skb_queue_empty(&sk->sk_write_queue)) in kcm_write_msgs()
599 kcm_tx_msg(skb_peek(&sk->sk_write_queue))->sent = 0; in kcm_write_msgs()
601 } else if (skb_queue_empty(&sk->sk_write_queue)) { in kcm_write_msgs()
605 head = skb_peek(&sk->sk_write_queue); in kcm_write_msgs()
705 skb_dequeue(&sk->sk_write_queue); in kcm_write_msgs()
710 } while ((head = skb_peek(&sk->sk_write_queue))); in kcm_write_msgs()
714 WARN_ON(!skb_queue_empty(&sk->sk_write_queue)); in kcm_write_msgs()
858 bool not_busy = skb_queue_empty(&sk->sk_write_queue); in kcm_sendpage()
861 __skb_queue_tail(&sk->sk_write_queue, head); in kcm_sendpage()
896 if (unlikely(skb_queue_len(&sk->sk_write_queue) == 0 && err == -EAGAIN)) in kcm_sendpage()
[all …]
Dkcmproc.c121 kcm->sk.sk_write_queue.qlen, in kcm_format_sock()
151 psock->sk->sk_write_queue.qlen, in kcm_format_psock()
/kernel/linux/linux-5.10/include/net/
Dudplite.h104 skb_queue_walk(&sk->sk_write_queue, skb) { in udplite_csum_outgoing()
Dtcp.h1790 return skb_peek(&sk->sk_write_queue); in tcp_write_queue_head()
1795 return skb_peek_tail(&sk->sk_write_queue); in tcp_write_queue_tail()
1799 skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1803 return skb_peek(&sk->sk_write_queue); in tcp_send_head()
1809 return skb_queue_is_last(&sk->sk_write_queue, skb); in tcp_skb_is_last()
1838 __skb_queue_tail(&sk->sk_write_queue, skb); in tcp_add_write_queue_tail()
1841 if (sk->sk_write_queue.next == skb) in tcp_add_write_queue_tail()
1850 __skb_queue_before(&sk->sk_write_queue, skb, new); in tcp_insert_write_queue_before()
1856 __skb_unlink(skb, &sk->sk_write_queue); in tcp_unlink_write_queue()
/kernel/linux/linux-5.10/net/ipv4/
Dip_output.c1327 if (skb_queue_empty(&sk->sk_write_queue)) { in ip_append_data()
1335 return __ip_append_data(sk, fl4, &sk->sk_write_queue, &inet->cork.base, in ip_append_data()
1360 if (skb_queue_empty(&sk->sk_write_queue)) in ip_append_page()
1384 skb = skb_peek_tail(&sk->sk_write_queue); in ip_append_page()
1437 __skb_queue_tail(&sk->sk_write_queue, skb); in ip_append_page()
1620 __ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base); in ip_flush_pending_frames()
1733 nskb = skb_peek(&sk->sk_write_queue); in ip_send_unicast_reply()
Dicmp.c382 } else if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) { in icmp_push_reply()
390 skb_queue_walk(&sk->sk_write_queue, skb1) { in icmp_push_reply()
/kernel/linux/linux-5.10/net/core/
Dstream.c206 WARN_ON(!skb_queue_empty(&sk->sk_write_queue)); in sk_stream_kill_queues()
/kernel/linux/linux-5.10/net/ipv6/
Dicmp.c281 skb = skb_peek(&sk->sk_write_queue); in icmpv6_push_pending_frames()
289 if (skb_queue_len(&sk->sk_write_queue) == 1) { in icmpv6_push_pending_frames()
299 skb_queue_walk(&sk->sk_write_queue, skb) { in icmpv6_push_pending_frames()
Draw.c554 skb = skb_peek(&sk->sk_write_queue); in rawv6_push_pending_frames()
570 if (skb_queue_len(&sk->sk_write_queue) == 1) { in rawv6_push_pending_frames()
579 skb_queue_walk(&sk->sk_write_queue, skb) { in rawv6_push_pending_frames()
/kernel/linux/linux-5.10/net/tipc/
Dsocket.c553 __skb_queue_purge(&sk->sk_write_queue); in __tipc_shutdown()
1266 struct sk_buff_head *txq = &tsk->sk.sk_write_queue; in tipc_sk_push_backlog()
1500 if (unlikely(syn && !tipc_msg_skb_clone(&pkts, &sk->sk_write_queue))) { in __tipc_sendmsg()
1552 struct sk_buff_head *txq = &sk->sk_write_queue; in __tipc_sendstream()
1672 __skb_queue_purge(&sk->sk_write_queue); in tipc_sk_finish_conn()
2233 if (skb_queue_empty(&sk->sk_write_queue)) in tipc_sk_filter_connect()
2252 if (!skb_queue_empty(&sk->sk_write_queue)) in tipc_sk_filter_connect()
2855 tipc_msg_skb_clone(&sk->sk_write_queue, list); in tipc_sk_retry_connect()
3659 skb_queue_len(&sk->sk_write_queue)) || in tipc_sk_fill_sock_diag()
3988 i += tipc_list_dump(&sk->sk_write_queue, false, buf + i); in tipc_sk_dump()

123