Home
last modified time | relevance | path

Searched refs:queue (Results 1 – 25 of 55) sorted by relevance

123

/net/sunrpc/
Dsched.c84 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_disable_timer() argument
91 if (list_empty(&queue->timer_list.list)) in __rpc_disable_timer()
92 cancel_delayed_work(&queue->timer_list.dwork); in __rpc_disable_timer()
96 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) in rpc_set_queue_timer() argument
99 queue->timer_list.expires = expires; in rpc_set_queue_timer()
104 mod_delayed_work(rpciod_workqueue, &queue->timer_list.dwork, expires); in rpc_set_queue_timer()
111 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task, in __rpc_add_timer() argument
118 if (list_empty(&queue->timer_list.list) || time_before(timeout, queue->timer_list.expires)) in __rpc_add_timer()
119 rpc_set_queue_timer(queue, timeout); in __rpc_add_timer()
120 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); in __rpc_add_timer()
[all …]
/net/netfilter/
Dnfnetlink_queue.c161 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn,
191 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) in __enqueue_entry() argument
193 list_add_tail(&entry->list, &queue->queue_list); in __enqueue_entry()
194 queue->queue_total++; in __enqueue_entry()
198 __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) in __dequeue_entry() argument
201 queue->queue_total--; in __dequeue_entry()
205 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) in find_dequeue_entry() argument
209 spin_lock_bh(&queue->lock); in find_dequeue_entry()
211 list_for_each_entry(i, &queue->queue_list, list) { in find_dequeue_entry()
219 __dequeue_entry(queue, entry); in find_dequeue_entry()
[all …]
Dxt_NFQUEUE.c40 u32 queue = info->queuenum; in nfqueue_tg_v1() local
43 queue = nfqueue_hash(skb, queue, info->queues_total, in nfqueue_tg_v1()
46 return NF_QUEUE_NR(queue); in nfqueue_tg_v1()
89 u32 queue = info->queuenum; in nfqueue_tg_v3() local
96 queue = info->queuenum + cpu % info->queues_total; in nfqueue_tg_v3()
98 queue = nfqueue_hash(skb, queue, info->queues_total, in nfqueue_tg_v3()
103 ret = NF_QUEUE_NR(queue); in nfqueue_tg_v3()
Dnft_queue.c33 u32 queue = priv->queuenum; in nft_queue_eval() local
40 queue = priv->queuenum + cpu % priv->queues_total; in nft_queue_eval()
42 queue = nfqueue_hash(pkt->skb, queue, in nft_queue_eval()
48 ret = NF_QUEUE_NR(queue); in nft_queue_eval()
60 u32 queue, ret; in nft_queue_sreg_eval() local
62 queue = regs->data[priv->sreg_qnum]; in nft_queue_sreg_eval()
64 ret = NF_QUEUE_NR(queue); in nft_queue_sreg_eval()
/net/core/
Dnet-sysfs.c709 struct netdev_rx_queue *queue = to_rx_queue(kobj); in rx_queue_attr_show() local
714 return attribute->show(queue, buf); in rx_queue_attr_show()
721 struct netdev_rx_queue *queue = to_rx_queue(kobj); in rx_queue_attr_store() local
726 return attribute->store(queue, buf, count); in rx_queue_attr_store()
735 static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf) in show_rps_map() argument
745 map = rcu_dereference(queue->rps_map); in show_rps_map()
757 static ssize_t store_rps_map(struct netdev_rx_queue *queue, in store_rps_map() argument
797 old_map = rcu_dereference_protected(queue->rps_map, in store_rps_map()
799 rcu_assign_pointer(queue->rps_map, map); in store_rps_map()
815 static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, in show_rps_dev_flow_table_cnt() argument
[all …]
Drequest_sock.c34 void reqsk_queue_alloc(struct request_sock_queue *queue) in reqsk_queue_alloc() argument
36 spin_lock_init(&queue->rskq_lock); in reqsk_queue_alloc()
38 spin_lock_init(&queue->fastopenq.lock); in reqsk_queue_alloc()
39 queue->fastopenq.rskq_rst_head = NULL; in reqsk_queue_alloc()
40 queue->fastopenq.rskq_rst_tail = NULL; in reqsk_queue_alloc()
41 queue->fastopenq.qlen = 0; in reqsk_queue_alloc()
43 queue->rskq_accept_head = NULL; in reqsk_queue_alloc()
Ddatagram.c167 struct sk_buff_head *queue, in __skb_try_recv_from_queue() argument
183 *last = queue->prev; in __skb_try_recv_from_queue()
184 skb_queue_walk(queue, skb) { in __skb_try_recv_from_queue()
200 __skb_unlink(skb, queue); in __skb_try_recv_from_queue()
251 struct sk_buff_head *queue = &sk->sk_receive_queue; in __skb_try_recv_datagram() local
269 spin_lock_irqsave(&queue->lock, cpu_flags); in __skb_try_recv_datagram()
270 skb = __skb_try_recv_from_queue(sk, queue, flags, destructor, in __skb_try_recv_datagram()
272 spin_unlock_irqrestore(&queue->lock, cpu_flags); in __skb_try_recv_datagram()
/net/sctp/
Dinqueue.c32 void sctp_inq_init(struct sctp_inq *queue) in sctp_inq_init() argument
34 INIT_LIST_HEAD(&queue->in_chunk_list); in sctp_inq_init()
35 queue->in_progress = NULL; in sctp_inq_init()
38 INIT_WORK(&queue->immediate, NULL); in sctp_inq_init()
42 void sctp_inq_free(struct sctp_inq *queue) in sctp_inq_free() argument
47 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { in sctp_inq_free()
55 if (queue->in_progress) { in sctp_inq_free()
56 sctp_chunk_free(queue->in_progress); in sctp_inq_free()
57 queue->in_progress = NULL; in sctp_inq_free()
84 struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue) in sctp_inq_peek() argument
[all …]
Dulpqueue.c187 struct sk_buff_head *queue; in sctp_ulpq_tail_event() local
216 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event()
227 queue = &sp->pd_lobby; in sctp_ulpq_tail_event()
230 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event()
239 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event()
241 queue = &sp->pd_lobby; in sctp_ulpq_tail_event()
245 skb_queue_splice_tail_init(skb_list, queue); in sctp_ulpq_tail_event()
254 if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) { in sctp_ulpq_tail_event()
319 struct sk_buff_head *queue, in sctp_make_reassembled_event() argument
363 __skb_unlink(f_frag, queue); in sctp_make_reassembled_event()
[all …]
/net/qrtr/
Dtun.c15 struct sk_buff_head queue; member
23 skb_queue_tail(&tun->queue, skb); in qrtr_tun_send()
40 skb_queue_head_init(&tun->queue); in qrtr_tun_open()
66 while (!(skb = skb_dequeue(&tun->queue))) { in qrtr_tun_read_iter()
72 !skb_queue_empty(&tun->queue))) in qrtr_tun_read_iter()
121 if (!skb_queue_empty(&tun->queue)) in qrtr_tun_poll()
135 while (!skb_queue_empty(&tun->queue)) { in qrtr_tun_release()
136 skb = skb_dequeue(&tun->queue); in qrtr_tun_release()
/net/ipv4/
Dinet_connection_sock.c454 struct request_sock_queue *queue = &icsk->icsk_accept_queue; in inet_csk_accept() local
469 if (reqsk_queue_empty(queue)) { in inet_csk_accept()
481 req = reqsk_queue_remove(queue, sk); in inet_csk_accept()
486 spin_lock_bh(&queue->fastopenq.lock); in inet_csk_accept()
497 spin_unlock_bh(&queue->fastopenq.lock); in inet_csk_accept()
742 struct request_sock_queue *queue = &icsk->icsk_accept_queue; in reqsk_timer_handler() local
769 qlen = reqsk_queue_len(queue); in reqsk_timer_handler()
771 int young = reqsk_queue_len_young(queue) << 1; in reqsk_timer_handler()
780 defer_accept = READ_ONCE(queue->rskq_defer_accept); in reqsk_timer_handler()
793 atomic_dec(&queue->young); in reqsk_timer_handler()
[all …]
Dtcp_yeah.c133 u32 rtt, queue; in tcp_yeah_cong_avoid() local
155 queue = bw; in tcp_yeah_cong_avoid()
157 if (queue > TCP_YEAH_ALPHA || in tcp_yeah_cong_avoid()
159 if (queue > TCP_YEAH_ALPHA && in tcp_yeah_cong_avoid()
161 u32 reduction = min(queue / TCP_YEAH_GAMMA , in tcp_yeah_cong_avoid()
190 yeah->lastQ = queue; in tcp_yeah_cong_avoid()
Dip_output.c964 struct sk_buff_head *queue, in __ip_append_data() argument
990 skb = skb_peek_tail(queue); in __ip_append_data()
1176 __skb_queue_tail(queue, skb); in __ip_append_data()
1480 struct sk_buff_head *queue, in __ip_make_skb() argument
1493 skb = __skb_dequeue(queue); in __ip_make_skb()
1501 while ((tmp_skb = __skb_dequeue(queue)) != NULL) { in __ip_make_skb()
1612 struct sk_buff_head *queue, in __ip_flush_pending_frames() argument
1617 while ((skb = __skb_dequeue_tail(queue)) != NULL) in __ip_flush_pending_frames()
1636 struct sk_buff_head queue; in ip_make_skb() local
1642 __skb_queue_head_init(&queue); in ip_make_skb()
[all …]
Dtcp_fastopen.c260 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in tcp_fastopen_create_child() local
269 spin_lock(&queue->fastopenq.lock); in tcp_fastopen_create_child()
270 queue->fastopenq.qlen++; in tcp_fastopen_create_child()
271 spin_unlock(&queue->fastopenq.lock); in tcp_fastopen_create_child()
/net/mac80211/
Dutil.c348 void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue) in ieee80211_propagate_queue_wake() argument
372 if (ac_queue == queue || in ieee80211_propagate_queue_wake()
373 (sdata->vif.cab_queue == queue && in ieee80211_propagate_queue_wake()
381 static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, in __ieee80211_wake_queue() argument
388 trace_wake_queue(local, queue, reason); in __ieee80211_wake_queue()
390 if (WARN_ON(queue >= hw->queues)) in __ieee80211_wake_queue()
393 if (!test_bit(reason, &local->queue_stop_reasons[queue])) in __ieee80211_wake_queue()
397 local->q_stop_reasons[queue][reason] = 0; in __ieee80211_wake_queue()
399 local->q_stop_reasons[queue][reason]--; in __ieee80211_wake_queue()
400 if (WARN_ON(local->q_stop_reasons[queue][reason] < 0)) in __ieee80211_wake_queue()
[all …]
Dagg-tx.c164 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; in __acquires() local
168 if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1) in __acquires()
170 &sdata->local->hw, queue, in __acquires()
179 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; in __releases() local
181 if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0) in __releases()
183 &sdata->local->hw, queue, in __releases()
245 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; in __acquires() local
259 &local->pending[queue]); in __acquires()
/net/x25/
Dx25_link.c105 while ((skbn = skb_dequeue(&nb->queue)) != NULL) in x25_link_control()
196 skb_queue_tail(&nb->queue, skb); in x25_transmit_link()
202 skb_queue_tail(&nb->queue, skb); in x25_transmit_link()
249 skb_queue_head_init(&nb->queue); in x25_link_device_up()
280 skb_queue_purge(&nb->queue); in __x25_remove_neigh()
/net/sched/
Dsch_etf.c32 int queue; member
310 etf.queue = q->queue; in etf_disable_offload()
316 etf.queue); in etf_disable_offload()
334 etf.queue = q->queue; in etf_enable_offload()
382 q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0); in etf_init()
Dsch_cbs.c71 int queue; member
266 cbs.queue = q->queue; in cbs_disable_offload()
272 cbs.queue); in cbs_disable_offload()
288 cbs.queue = q->queue; in cbs_enable_offload()
422 q->queue = sch->dev_queue - netdev_get_tx_queue(dev, 0); in cbs_init()
/net/nfc/
Ddigital_core.c28 struct list_head queue; member
120 queue); in digital_wq_cmd_complete()
126 list_del(&cmd->queue); in digital_wq_cmd_complete()
164 queue); in digital_wq_cmd()
217 list_del(&cmd->queue); in digital_wq_cmd()
244 INIT_LIST_HEAD(&cmd->queue); in digital_send_cmd()
247 list_add_tail(&cmd->queue, &ddev->cmd_queue); in digital_send_cmd()
842 list_for_each_entry_safe(cmd, n, &ddev->cmd_queue, queue) { in nfc_digital_unregister_device()
843 list_del(&cmd->queue); in nfc_digital_unregister_device()
/net/xfrm/
Dxfrm_input.c28 struct sk_buff_head queue; member
761 struct sk_buff_head queue; in xfrm_trans_reinject() local
764 __skb_queue_head_init(&queue); in xfrm_trans_reinject()
765 skb_queue_splice_init(&trans->queue, &queue); in xfrm_trans_reinject()
767 while ((skb = __skb_dequeue(&queue))) in xfrm_trans_reinject()
779 if (skb_queue_len(&trans->queue) >= netdev_max_backlog) in xfrm_trans_queue()
783 __skb_queue_tail(&trans->queue, skb); in xfrm_trans_queue()
803 __skb_queue_head_init(&trans->queue); in xfrm_input_init()
/net/dsa/
Dtag_brcm.c66 u16 queue = skb_get_queue_mapping(skb); in brcm_tag_xmit_ll() local
95 ((queue & BRCM_IG_TC_MASK) << BRCM_IG_TC_SHIFT); in brcm_tag_xmit_ll()
105 skb_set_queue_mapping(skb, BRCM_TAG_SET_PORT_QUEUE(dp->index, queue)); in brcm_tag_xmit_ll()
/net/802/
Dgarp.c263 skb_queue_tail(&app->queue, app->pdu); in garp_pdu_queue()
271 while ((skb = skb_dequeue(&app->queue))) in garp_queue_xmit()
304 goto queue; in garp_pdu_append_attr()
306 goto queue; in garp_pdu_append_attr()
311 goto queue; in garp_pdu_append_attr()
318 queue: in garp_pdu_append_attr()
595 skb_queue_head_init(&app->queue); in garp_init_applicant()
Dmrp.c355 skb_queue_tail(&app->queue, app->pdu); in mrp_pdu_queue()
363 while ((skb = skb_dequeue(&app->queue))) in mrp_queue_xmit()
425 goto queue; in mrp_pdu_append_vecattr_event()
435 goto queue; in mrp_pdu_append_vecattr_event()
446 goto queue; in mrp_pdu_append_vecattr_event()
475 queue: in mrp_pdu_append_vecattr_event()
881 skb_queue_head_init(&app->queue); in mrp_init_applicant()
/net/ipv6/
Dip6_output.c1392 struct sk_buff_head *queue, in __ip6_append_data() argument
1418 skb = skb_peek_tail(queue); in __ip6_append_data()
1669 __skb_queue_tail(queue, skb); in __ip6_append_data()
1799 struct sk_buff_head *queue, in __ip6_make_skb() argument
1814 skb = __skb_dequeue(queue); in __ip6_make_skb()
1822 while ((tmp_skb = __skb_dequeue(queue)) != NULL) { in __ip6_make_skb()
1910 struct sk_buff_head *queue, in __ip6_flush_pending_frames() argument
1916 while ((skb = __skb_dequeue_tail(queue)) != NULL) { in __ip6_flush_pending_frames()
1942 struct sk_buff_head queue; in ip6_make_skb() local
1949 __skb_queue_head_init(&queue); in ip6_make_skb()
[all …]

123