/net/sunrpc/ |
D | sched.c | 66 __rpc_disable_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_disable_timer() argument 73 if (list_empty(&queue->timer_list.list)) in __rpc_disable_timer() 74 del_timer(&queue->timer_list.timer); in __rpc_disable_timer() 78 rpc_set_queue_timer(struct rpc_wait_queue *queue, unsigned long expires) in rpc_set_queue_timer() argument 80 queue->timer_list.expires = expires; in rpc_set_queue_timer() 81 mod_timer(&queue->timer_list.timer, expires); in rpc_set_queue_timer() 88 __rpc_add_timer(struct rpc_wait_queue *queue, struct rpc_task *task) in __rpc_add_timer() argument 97 …if (list_empty(&queue->timer_list.list) || time_before(task->u.tk_wait.expires, queue->timer_list.… in __rpc_add_timer() 98 rpc_set_queue_timer(queue, task->u.tk_wait.expires); in __rpc_add_timer() 99 list_add(&task->u.tk_wait.timer_list, &queue->timer_list.list); in __rpc_add_timer() [all …]
|
/net/netfilter/ |
D | nfnetlink_queue.c | 162 static void nfqnl_flush(struct nfqnl_instance *queue, nfqnl_cmpfn cmpfn, 192 __enqueue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) in __enqueue_entry() argument 194 list_add_tail(&entry->list, &queue->queue_list); in __enqueue_entry() 195 queue->queue_total++; in __enqueue_entry() 199 __dequeue_entry(struct nfqnl_instance *queue, struct nf_queue_entry *entry) in __dequeue_entry() argument 202 queue->queue_total--; in __dequeue_entry() 206 find_dequeue_entry(struct nfqnl_instance *queue, unsigned int id) in find_dequeue_entry() argument 210 spin_lock_bh(&queue->lock); in find_dequeue_entry() 212 list_for_each_entry(i, &queue->queue_list, list) { in find_dequeue_entry() 220 __dequeue_entry(queue, entry); in find_dequeue_entry() [all …]
|
D | xt_NFQUEUE.c | 42 u32 queue = info->queuenum; in nfqueue_tg_v1() local 45 queue = nfqueue_hash(skb, queue, info->queues_total, in nfqueue_tg_v1() 48 return NF_QUEUE_NR(queue); in nfqueue_tg_v1() 91 u32 queue = info->queuenum; in nfqueue_tg_v3() local 98 queue = info->queuenum + cpu % info->queues_total; in nfqueue_tg_v3() 100 queue = nfqueue_hash(skb, queue, info->queues_total, in nfqueue_tg_v3() 105 ret = NF_QUEUE_NR(queue); in nfqueue_tg_v3()
|
D | nft_queue.c | 36 u32 queue = priv->queuenum; in nft_queue_eval() local 43 queue = priv->queuenum + cpu % priv->queues_total; in nft_queue_eval() 45 queue = nfqueue_hash(pkt->skb, queue, in nft_queue_eval() 51 ret = NF_QUEUE_NR(queue); in nft_queue_eval() 63 u32 queue, ret; in nft_queue_sreg_eval() local 65 queue = regs->data[priv->sreg_qnum]; in nft_queue_sreg_eval() 67 ret = NF_QUEUE_NR(queue); in nft_queue_sreg_eval()
|
/net/core/ |
D | net-sysfs.c | 662 struct netdev_rx_queue *queue = to_rx_queue(kobj); in rx_queue_attr_show() local 667 return attribute->show(queue, buf); in rx_queue_attr_show() 674 struct netdev_rx_queue *queue = to_rx_queue(kobj); in rx_queue_attr_store() local 679 return attribute->store(queue, buf, count); in rx_queue_attr_store() 688 static ssize_t show_rps_map(struct netdev_rx_queue *queue, char *buf) in show_rps_map() argument 698 map = rcu_dereference(queue->rps_map); in show_rps_map() 710 static ssize_t store_rps_map(struct netdev_rx_queue *queue, in store_rps_map() argument 750 old_map = rcu_dereference_protected(queue->rps_map, in store_rps_map() 752 rcu_assign_pointer(queue->rps_map, map); in store_rps_map() 768 static ssize_t show_rps_dev_flow_table_cnt(struct netdev_rx_queue *queue, in show_rps_dev_flow_table_cnt() argument [all …]
|
D | request_sock.c | 38 void reqsk_queue_alloc(struct request_sock_queue *queue) in reqsk_queue_alloc() argument 40 spin_lock_init(&queue->rskq_lock); in reqsk_queue_alloc() 42 spin_lock_init(&queue->fastopenq.lock); in reqsk_queue_alloc() 43 queue->fastopenq.rskq_rst_head = NULL; in reqsk_queue_alloc() 44 queue->fastopenq.rskq_rst_tail = NULL; in reqsk_queue_alloc() 45 queue->fastopenq.qlen = 0; in reqsk_queue_alloc() 47 queue->rskq_accept_head = NULL; in reqsk_queue_alloc()
|
D | datagram.c | 166 struct sk_buff_head *queue, in __skb_try_recv_from_queue() argument 182 *last = queue->prev; in __skb_try_recv_from_queue() 183 skb_queue_walk(queue, skb) { in __skb_try_recv_from_queue() 200 __skb_unlink(skb, queue); in __skb_try_recv_from_queue() 252 struct sk_buff_head *queue = &sk->sk_receive_queue; in __skb_try_recv_datagram() local 271 spin_lock_irqsave(&queue->lock, cpu_flags); in __skb_try_recv_datagram() 272 skb = __skb_try_recv_from_queue(sk, queue, flags, destructor, in __skb_try_recv_datagram() 274 spin_unlock_irqrestore(&queue->lock, cpu_flags); in __skb_try_recv_datagram()
|
/net/sctp/ |
D | inqueue.c | 47 void sctp_inq_init(struct sctp_inq *queue) in sctp_inq_init() argument 49 INIT_LIST_HEAD(&queue->in_chunk_list); in sctp_inq_init() 50 queue->in_progress = NULL; in sctp_inq_init() 53 INIT_WORK(&queue->immediate, NULL); in sctp_inq_init() 57 void sctp_inq_free(struct sctp_inq *queue) in sctp_inq_free() argument 62 list_for_each_entry_safe(chunk, tmp, &queue->in_chunk_list, list) { in sctp_inq_free() 70 if (queue->in_progress) { in sctp_inq_free() 71 sctp_chunk_free(queue->in_progress); in sctp_inq_free() 72 queue->in_progress = NULL; in sctp_inq_free() 99 struct sctp_chunkhdr *sctp_inq_peek(struct sctp_inq *queue) in sctp_inq_peek() argument [all …]
|
D | ulpqueue.c | 195 struct sk_buff_head *queue, *skb_list; in sctp_ulpq_tail_event() local 223 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event() 234 queue = &sp->pd_lobby; in sctp_ulpq_tail_event() 237 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event() 246 queue = &sk->sk_receive_queue; in sctp_ulpq_tail_event() 248 queue = &sp->pd_lobby; in sctp_ulpq_tail_event() 256 skb_queue_splice_tail_init(skb_list, queue); in sctp_ulpq_tail_event() 258 __skb_queue_tail(queue, skb); in sctp_ulpq_tail_event() 267 if (queue == &sk->sk_receive_queue && !sp->data_ready_signalled) { in sctp_ulpq_tail_event() 332 struct sk_buff_head *queue, struct sk_buff *f_frag, in sctp_make_reassembled_event() argument [all …]
|
/net/ipv4/ |
D | inet_connection_sock.c | 433 struct request_sock_queue *queue = &icsk->icsk_accept_queue; in inet_csk_accept() local 448 if (reqsk_queue_empty(queue)) { in inet_csk_accept() 460 req = reqsk_queue_remove(queue, sk); in inet_csk_accept() 465 spin_lock_bh(&queue->fastopenq.lock); in inet_csk_accept() 476 spin_unlock_bh(&queue->fastopenq.lock); in inet_csk_accept() 667 static bool reqsk_queue_unlink(struct request_sock_queue *queue, in reqsk_queue_unlink() argument 707 struct request_sock_queue *queue = &icsk->icsk_accept_queue; in reqsk_timer_handler() local 734 qlen = reqsk_queue_len(queue); in reqsk_timer_handler() 736 int young = reqsk_queue_len_young(queue) << 1; in reqsk_timer_handler() 745 defer_accept = READ_ONCE(queue->rskq_defer_accept); in reqsk_timer_handler() [all …]
|
D | tcp_yeah.c | 132 u32 rtt, queue; in tcp_yeah_cong_avoid() local 154 queue = bw; in tcp_yeah_cong_avoid() 156 if (queue > TCP_YEAH_ALPHA || in tcp_yeah_cong_avoid() 158 if (queue > TCP_YEAH_ALPHA && in tcp_yeah_cong_avoid() 160 u32 reduction = min(queue / TCP_YEAH_GAMMA , in tcp_yeah_cong_avoid() 189 yeah->lastQ = queue; in tcp_yeah_cong_avoid()
|
D | ip_output.c | 861 struct sk_buff_head *queue, in __ip_append_data() argument 884 skb = skb_peek_tail(queue); in __ip_append_data() 1039 __skb_queue_tail(queue, skb); in __ip_append_data() 1328 struct sk_buff_head *queue, in __ip_make_skb() argument 1341 skb = __skb_dequeue(queue); in __ip_make_skb() 1349 while ((tmp_skb = __skb_dequeue(queue)) != NULL) { in __ip_make_skb() 1449 struct sk_buff_head *queue, in __ip_flush_pending_frames() argument 1454 while ((skb = __skb_dequeue_tail(queue)) != NULL) in __ip_flush_pending_frames() 1474 struct sk_buff_head queue; in ip_make_skb() local 1480 __skb_queue_head_init(&queue); in ip_make_skb() [all …]
|
D | tcp_fastopen.c | 178 struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue; in tcp_fastopen_create_child() local 191 spin_lock(&queue->fastopenq.lock); in tcp_fastopen_create_child() 192 queue->fastopenq.qlen++; in tcp_fastopen_create_child() 193 spin_unlock(&queue->fastopenq.lock); in tcp_fastopen_create_child()
|
/net/mac80211/ |
D | util.c | 242 void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue) in ieee80211_propagate_queue_wake() argument 266 if (ac_queue == queue || in ieee80211_propagate_queue_wake() 267 (sdata->vif.cab_queue == queue && in ieee80211_propagate_queue_wake() 275 static void __ieee80211_wake_queue(struct ieee80211_hw *hw, int queue, in __ieee80211_wake_queue() argument 281 trace_wake_queue(local, queue, reason); in __ieee80211_wake_queue() 283 if (WARN_ON(queue >= hw->queues)) in __ieee80211_wake_queue() 286 if (!test_bit(reason, &local->queue_stop_reasons[queue])) in __ieee80211_wake_queue() 290 local->q_stop_reasons[queue][reason] = 0; in __ieee80211_wake_queue() 292 local->q_stop_reasons[queue][reason]--; in __ieee80211_wake_queue() 293 if (WARN_ON(local->q_stop_reasons[queue][reason] < 0)) in __ieee80211_wake_queue() [all …]
|
D | agg-tx.c | 167 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; in __acquires() local 171 if (atomic_inc_return(&sdata->local->agg_queue_stop[queue]) == 1) in __acquires() 173 &sdata->local->hw, queue, in __acquires() 182 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; in __releases() local 184 if (atomic_dec_return(&sdata->local->agg_queue_stop[queue]) == 0) in __releases() 186 &sdata->local->hw, queue, in __releases() 246 int queue = sdata->vif.hw_queue[ieee80211_ac_from_tid(tid)]; in __acquires() local 260 &local->pending[queue]); in __acquires()
|
/net/xfrm/ |
D | xfrm_input.c | 25 struct sk_buff_head queue; member 519 struct sk_buff_head queue; in xfrm_trans_reinject() local 522 __skb_queue_head_init(&queue); in xfrm_trans_reinject() 523 skb_queue_splice_init(&trans->queue, &queue); in xfrm_trans_reinject() 525 while ((skb = __skb_dequeue(&queue))) in xfrm_trans_reinject() 537 if (skb_queue_len(&trans->queue) >= netdev_max_backlog) in xfrm_trans_queue() 541 __skb_queue_tail(&trans->queue, skb); in xfrm_trans_queue() 566 __skb_queue_head_init(&trans->queue); in xfrm_input_init()
|
/net/rxrpc/ |
D | call_event.c | 32 bool queue = false; in __rxrpc_set_timer() local 38 queue = true; in __rxrpc_set_timer() 45 queue = true; in __rxrpc_set_timer() 53 queue = true; in __rxrpc_set_timer() 61 queue = true; in __rxrpc_set_timer() 83 if (queue) in __rxrpc_set_timer()
|
/net/x25/ |
D | x25_link.c | 110 while ((skbn = skb_dequeue(&nb->queue)) != NULL) in x25_link_control() 201 skb_queue_tail(&nb->queue, skb); in x25_transmit_link() 207 skb_queue_tail(&nb->queue, skb); in x25_transmit_link() 254 skb_queue_head_init(&nb->queue); in x25_link_device_up() 285 skb_queue_purge(&nb->queue); in __x25_remove_neigh()
|
/net/nfc/ |
D | digital_core.c | 37 struct list_head queue; member 129 queue); in digital_wq_cmd_complete() 135 list_del(&cmd->queue); in digital_wq_cmd_complete() 173 queue); in digital_wq_cmd() 226 list_del(&cmd->queue); in digital_wq_cmd() 253 INIT_LIST_HEAD(&cmd->queue); in digital_send_cmd() 256 list_add_tail(&cmd->queue, &ddev->cmd_queue); in digital_send_cmd() 845 list_for_each_entry_safe(cmd, n, &ddev->cmd_queue, queue) { in nfc_digital_unregister_device() 846 list_del(&cmd->queue); in nfc_digital_unregister_device()
|
/net/802/ |
D | garp.c | 253 skb_queue_tail(&app->queue, app->pdu); in garp_pdu_queue() 261 while ((skb = skb_dequeue(&app->queue))) in garp_queue_xmit() 294 goto queue; in garp_pdu_append_attr() 296 goto queue; in garp_pdu_append_attr() 301 goto queue; in garp_pdu_append_attr() 308 queue: in garp_pdu_append_attr() 585 skb_queue_head_init(&app->queue); in garp_init_applicant()
|
D | mrp.c | 345 skb_queue_tail(&app->queue, app->pdu); in mrp_pdu_queue() 353 while ((skb = skb_dequeue(&app->queue))) in mrp_queue_xmit() 415 goto queue; in mrp_pdu_append_vecattr_event() 425 goto queue; in mrp_pdu_append_vecattr_event() 436 goto queue; in mrp_pdu_append_vecattr_event() 465 queue: in mrp_pdu_append_vecattr_event() 866 skb_queue_head_init(&app->queue); in mrp_init_applicant()
|
/net/dsa/ |
D | tag_brcm.c | 65 u16 queue = skb_get_queue_mapping(skb); in brcm_tag_xmit() local 82 ((queue & BRCM_IG_TC_MASK) << BRCM_IG_TC_SHIFT); in brcm_tag_xmit()
|
/net/ipv6/ |
D | ip6_output.c | 1243 struct sk_buff_head *queue, in __ip6_append_data() argument 1268 skb = skb_peek_tail(queue); in __ip6_append_data() 1487 __skb_queue_tail(queue, skb); in __ip6_append_data() 1609 struct sk_buff_head *queue, in __ip6_make_skb() argument 1624 skb = __skb_dequeue(queue); in __ip6_make_skb() 1632 while ((tmp_skb = __skb_dequeue(queue)) != NULL) { in __ip6_make_skb() 1713 struct sk_buff_head *queue, in __ip6_flush_pending_frames() argument 1719 while ((skb = __skb_dequeue_tail(queue)) != NULL) { in __ip6_flush_pending_frames() 1746 struct sk_buff_head queue; in ip6_make_skb() local 1753 __skb_queue_head_init(&queue); in ip6_make_skb() [all …]
|
/net/rose/ |
D | rose_link.c | 170 while ((skbn = skb_dequeue(&neigh->queue)) != NULL) in rose_link_rx_restart() 281 skb_queue_tail(&neigh->queue, skb); in rose_transmit_link()
|
/net/decnet/ |
D | af_decnet.c | 1686 struct sk_buff_head *queue = &sk->sk_receive_queue; in dn_recvmsg() local 1717 queue = &scp->other_receive_queue; in dn_recvmsg() 1750 if (dn_data_ready(sk, queue, flags, target)) in dn_recvmsg() 1760 sk_wait_event(sk, &timeo, dn_data_ready(sk, queue, flags, target), &wait); in dn_recvmsg() 1765 skb_queue_walk_safe(queue, skb, n) { in dn_recvmsg() 1784 skb_unlink(skb, queue); in dn_recvmsg() 1832 static inline int dn_queue_too_long(struct dn_scp *scp, struct sk_buff_head *queue, int flags) in dn_queue_too_long() argument 1835 if (skb_queue_len(queue) >= scp->snd_window) in dn_queue_too_long() 1922 struct sk_buff_head *queue = &scp->data_xmit_queue; in dn_sendmsg() local 1976 queue = &scp->other_xmit_queue; in dn_sendmsg() [all …]
|