Home
last modified time | relevance | path

Searched refs:tq (Results 1 – 25 of 45) sorted by relevance

12

/kernel/linux/linux-5.10/drivers/net/vmxnet3/
Dvmxnet3_drv.c103 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_stopped() argument
105 return tq->stopped; in vmxnet3_tq_stopped()
110 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_start() argument
112 tq->stopped = false; in vmxnet3_tq_start()
113 netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue); in vmxnet3_tq_start()
118 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_wake() argument
120 tq->stopped = false; in vmxnet3_tq_wake()
121 netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue)); in vmxnet3_tq_wake()
126 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter) in vmxnet3_tq_stop() argument
128 tq->stopped = true; in vmxnet3_tq_stop()
[all …]
Dvmxnet3_ethtool.c478 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i]; in vmxnet3_get_regs() local
483 buf[j++] = VMXNET3_GET_ADDR_LO(tq->tx_ring.basePA); in vmxnet3_get_regs()
484 buf[j++] = VMXNET3_GET_ADDR_HI(tq->tx_ring.basePA); in vmxnet3_get_regs()
485 buf[j++] = tq->tx_ring.size; in vmxnet3_get_regs()
486 buf[j++] = tq->tx_ring.next2fill; in vmxnet3_get_regs()
487 buf[j++] = tq->tx_ring.next2comp; in vmxnet3_get_regs()
488 buf[j++] = tq->tx_ring.gen; in vmxnet3_get_regs()
490 buf[j++] = VMXNET3_GET_ADDR_LO(tq->data_ring.basePA); in vmxnet3_get_regs()
491 buf[j++] = VMXNET3_GET_ADDR_HI(tq->data_ring.basePA); in vmxnet3_get_regs()
492 buf[j++] = tq->data_ring.size; in vmxnet3_get_regs()
[all …]
/kernel/linux/linux-5.10/drivers/net/wireless/ath/ath5k/
Dqcu.c286 struct ath5k_txq_info *tq = &ah->ah_txq[queue]; in ath5k_hw_set_tx_retry_limits() local
292 (tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S) in ath5k_hw_set_tx_retry_limits()
326 struct ath5k_txq_info *tq = &ah->ah_txq[queue]; in ath5k_hw_reset_tx_queue() local
333 (tq->tqi_type == AR5K_TX_QUEUE_INACTIVE)) in ath5k_hw_reset_tx_queue()
341 AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) | in ath5k_hw_reset_tx_queue()
342 AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) | in ath5k_hw_reset_tx_queue()
343 AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS), in ath5k_hw_reset_tx_queue()
366 if (tq->tqi_cbr_period) { in ath5k_hw_reset_tx_queue()
367 ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period, in ath5k_hw_reset_tx_queue()
369 AR5K_REG_SM(tq->tqi_cbr_overflow_limit, in ath5k_hw_reset_tx_queue()
[all …]
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/cxgb4vf/
Dsge.c233 static inline unsigned int txq_avail(const struct sge_txq *tq) in txq_avail() argument
235 return tq->size - 1 - tq->in_use; in txq_avail()
307 const struct ulptx_sgl *sgl, const struct sge_txq *tq) in unmap_sgl() argument
326 if (likely((u8 *)(p + 1) <= (u8 *)tq->stat)) { in unmap_sgl()
333 } else if ((u8 *)p == (u8 *)tq->stat) { in unmap_sgl()
334 p = (const struct ulptx_sge_pair *)tq->desc; in unmap_sgl()
336 } else if ((u8 *)p + 8 == (u8 *)tq->stat) { in unmap_sgl()
337 const __be64 *addr = (const __be64 *)tq->desc; in unmap_sgl()
345 const __be64 *addr = (const __be64 *)tq->desc; in unmap_sgl()
357 if ((u8 *)p == (u8 *)tq->stat) in unmap_sgl()
[all …]
/kernel/linux/linux-5.10/drivers/media/v4l2-core/
Dv4l2-jpeg.c211 int c, h_v, tq; in jpeg_parse_frame_header() local
232 tq = jpeg_get_byte(stream); in jpeg_parse_frame_header()
233 if (tq < 0) in jpeg_parse_frame_header()
234 return tq; in jpeg_parse_frame_header()
241 component->quantization_table_selector = tq; in jpeg_parse_frame_header()
317 u8 pq, tq, *qk; in jpeg_parse_quantization_tables() local
335 tq = pq_tq & 0xf; in jpeg_parse_quantization_tables()
336 if (tq > 3) in jpeg_parse_quantization_tables()
346 tables[tq].start = qk; in jpeg_parse_quantization_tables()
347 tables[tq].length = pq ? 128 : 64; in jpeg_parse_quantization_tables()
/kernel/linux/linux-5.10/net/tipc/
Dtrace.h284 TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
286 TP_ARGS(r, f, t, tq),
301 __entry->len = skb_queue_len(tq);
303 msg_seqno(buf_msg(skb_peek(tq))) : 0;
305 msg_seqno(buf_msg(skb_peek_tail(tq))) : 0;
314 TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
315 TP_ARGS(r, f, t, tq),
320 TP_PROTO(struct tipc_link *r, u16 f, u16 t, struct sk_buff_head *tq),
321 TP_ARGS(r, f, t, tq),
/kernel/linux/linux-5.10/drivers/net/
Difb.c51 struct sk_buff_head tq; member
69 skb = skb_peek(&txp->tq); in ifb_ri_tasklet()
73 skb_queue_splice_tail_init(&txp->rq, &txp->tq); in ifb_ri_tasklet()
77 while ((skb = __skb_dequeue(&txp->tq)) != NULL) { in ifb_ri_tasklet()
94 if (skb_queue_len(&txp->tq) != 0) in ifb_ri_tasklet()
172 __skb_queue_head_init(&txp->tq); in ifb_dev_init()
206 __skb_queue_purge(&txp->tq); in ifb_dev_free()
/kernel/linux/linux-5.10/drivers/input/serio/
Dhp_sdc.c190 curr = hp_sdc.tq[hp_sdc.rcurr]; in hp_sdc_take()
316 curr = hp_sdc.tq[hp_sdc.rcurr]; in hp_sdc_tasklet()
378 if (hp_sdc.tq[curridx] != NULL) in hp_sdc_put()
392 if (hp_sdc.tq[curridx] != NULL) in hp_sdc_put()
412 curr = hp_sdc.tq[curridx]; in hp_sdc_put()
416 hp_sdc.tq[curridx] = NULL; in hp_sdc_put()
430 hp_sdc.tq[curridx] = NULL; in hp_sdc_put()
573 hp_sdc.tq[curridx] = NULL; in hp_sdc_put()
609 if (hp_sdc.tq[i] == this) in __hp_sdc_enqueue_transaction()
617 if (hp_sdc.tq[i] == NULL) { in __hp_sdc_enqueue_transaction()
[all …]
/kernel/linux/linux-5.10/arch/arm/boot/dts/
Dimx7s-mba7.dts6 * Author: Markus Niebel <Markus.Niebel@tq-group.com>
17 compatible = "tq,imx7s-mba7", "fsl,imx7s";
Dimx53-mba53.dts12 compatible = "tq,mba53", "tq,tqma53", "fsl,imx53";
65 compatible = "tq,imx53-mba53-sgtl5000",
Dimx7d-mba7.dts6 * Author: Markus Niebel <Markus.Niebel@tq-group.com>
17 compatible = "tq,imx7d-mba7", "fsl,imx7d";
Dimx6q-tqma6a.dtsi4 * Copyright 2013-2017 Markus Niebel <Markus.Niebel@tq-group.com>
Dimx6dl-tqma6b.dtsi4 * Copyright 2013-2017 Markus Niebel <Markus.Niebel@tq-group.com>
Dimx6dl-tqma6a.dtsi4 * Copyright 2013-2017 Markus Niebel <Markus.Niebel@tq-group.com>
Dimx7s-tqma7.dtsi6 * Author: Markus Niebel <Markus.Niebel@tq-group.com>
Dimx6qdl-tqma6b.dtsi4 * Copyright 2013-2017 Markus Niebel <Markus.Niebel@tq-group.com>
Dimx6qdl-tqma6a.dtsi4 * Copyright 2013-2017 Markus Niebel <Markus.Niebel@tq-group.com>
Dimx7d-tqma7.dtsi6 * Author: Markus Niebel <Markus.Niebel@tq-group.com>
/kernel/linux/linux-5.10/drivers/input/keyboard/
Dsunkbd.c63 struct work_struct tq; member
103 schedule_work(&sunkbd->tq); in sunkbd_interrupt()
230 struct sunkbd *sunkbd = container_of(work, struct sunkbd, tq); in sunkbd_reinit()
253 cancel_work_sync(&sunkbd->tq); in sunkbd_enable()
277 INIT_WORK(&sunkbd->tq, sunkbd_reinit); in sunkbd_connect()
Dlkkbd.c270 struct work_struct tq; member
457 schedule_work(&lk->tq); in lkkbd_interrupt()
568 struct lkkbd *lk = container_of(work, struct lkkbd, tq); in lkkbd_reinit()
623 INIT_WORK(&lk->tq, lkkbd_reinit); in lkkbd_connect()
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/selftests/
Di915_gem_context.c656 struct i915_request *tq[5] = {}; in igt_ctx_exec() local
714 err = throttle(ce, tq, ARRAY_SIZE(tq)); in igt_ctx_exec()
749 throttle_release(tq, ARRAY_SIZE(tq)); in igt_ctx_exec()
766 struct i915_request *tq[5] = {}; in igt_shared_ctx_exec() local
852 err = throttle(ce, tq, ARRAY_SIZE(tq)); in igt_shared_ctx_exec()
888 throttle_release(tq, ARRAY_SIZE(tq)); in igt_shared_ctx_exec()
1355 struct i915_request *tq[5] = {}; in igt_ctx_readonly() local
1430 err = throttle(ce, tq, ARRAY_SIZE(tq)); in igt_ctx_readonly()
1466 throttle_release(tq, ARRAY_SIZE(tq)); in igt_ctx_readonly()
/kernel/linux/linux-5.10/net/batman-adv/
Dbat_iv_ogm.c220 batadv_ogm_packet->tq = BATADV_TQ_MAX_VALUE; in batadv_iv_ogm_iface_enable()
296 static u8 batadv_hop_penalty(u8 tq, const struct batadv_priv *bat_priv) in batadv_hop_penalty() argument
301 new_tq = tq * (BATADV_TQ_MAX_VALUE - hop_penalty); in batadv_hop_penalty()
375 batadv_ogm_packet->tq, batadv_ogm_packet->ttl, in batadv_iv_ogm_send_to_if()
719 batadv_ogm_packet->tq = batadv_hop_penalty(batadv_ogm_packet->tq, in batadv_iv_ogm_forward()
724 batadv_ogm_packet->tq, batadv_ogm_packet->ttl); in batadv_iv_ogm_forward()
1002 batadv_ogm_packet->tq); in batadv_iv_ogm_orig_update()
1174 combined_tq = batadv_ogm_packet->tq * in batadv_iv_ogm_calc_tq()
1181 batadv_ogm_packet->tq = combined_tq; in batadv_iv_ogm_calc_tq()
1187 tq_iface_hop_penalty, batadv_ogm_packet->tq, in batadv_iv_ogm_calc_tq()
[all …]
/kernel/linux/patches/linux-5.10/prebuilts/usr/include/linux/can/
Dnetlink.h25 __u32 tq; member
/kernel/linux/linux-5.10/include/uapi/linux/can/
Dnetlink.h34 __u32 tq; /* Time quanta (TQ) in nanoseconds */ member
/kernel/linux/patches/linux-4.19/prebuilts/usr/include/linux/can/
Dnetlink.h12 __u32 tq; member

12