/drivers/net/wireless/b43legacy/ |
D | pio.c | 34 static void tx_start(struct b43legacy_pioqueue *queue) in tx_start() argument 36 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_start() 40 static void tx_octet(struct b43legacy_pioqueue *queue, in tx_octet() argument 43 if (queue->need_workarounds) { in tx_octet() 44 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 45 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 48 b43legacy_pio_write(queue, B43legacy_PIO_TXCTL, in tx_octet() 50 b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet); in tx_octet() 75 static void tx_data(struct b43legacy_pioqueue *queue, in tx_data() argument 83 if (queue->need_workarounds) { in tx_data() [all …]
|
D | pio.h | 42 struct b43legacy_pioqueue *queue; member 48 (packet)->queue->tx_packets_cache)) 87 u16 b43legacy_pio_read(struct b43legacy_pioqueue *queue, in b43legacy_pio_read() argument 90 return b43legacy_read16(queue->dev, queue->mmio_base + offset); in b43legacy_pio_read() 94 void b43legacy_pio_write(struct b43legacy_pioqueue *queue, in b43legacy_pio_write() argument 97 b43legacy_write16(queue->dev, queue->mmio_base + offset, value); in b43legacy_pio_write() 111 void b43legacy_pio_rx(struct b43legacy_pioqueue *queue); 114 void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue); 115 void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue); 148 void b43legacy_pio_rx(struct b43legacy_pioqueue *queue) in b43legacy_pio_rx() argument [all …]
|
/drivers/media/video/uvc/ |
D | uvc_queue.c | 81 void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type) in uvc_queue_init() argument 83 mutex_init(&queue->mutex); in uvc_queue_init() 84 spin_lock_init(&queue->irqlock); in uvc_queue_init() 85 INIT_LIST_HEAD(&queue->mainqueue); in uvc_queue_init() 86 INIT_LIST_HEAD(&queue->irqqueue); in uvc_queue_init() 87 queue->type = type; in uvc_queue_init() 98 int uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers, in uvc_alloc_buffers() argument 109 mutex_lock(&queue->mutex); in uvc_alloc_buffers() 111 if ((ret = uvc_free_buffers(queue)) < 0) in uvc_alloc_buffers() 131 memset(&queue->buffer[i], 0, sizeof queue->buffer[i]); in uvc_alloc_buffers() [all …]
|
D | uvc_video.c | 417 struct uvc_video_queue *queue = &video->queue; in uvc_video_decode_data() local 426 mem = queue->mem + buf->buf.m.offset + buf->buf.bytesused; in uvc_video_decode_data() 475 struct uvc_video_queue *queue = &video->queue; in uvc_video_encode_data() local 480 mem = queue->mem + buf->buf.m.offset + queue->buf_used; in uvc_video_encode_data() 481 nbytes = min((unsigned int)len, buf->buf.bytesused - queue->buf_used); in uvc_video_encode_data() 486 queue->buf_used += nbytes; in uvc_video_encode_data() 517 buf = uvc_queue_next_buffer(&video->queue, buf); in uvc_video_decode_isoc() 533 buf = uvc_queue_next_buffer(&video->queue, buf); in uvc_video_decode_isoc() 554 buf = uvc_queue_next_buffer(&video->queue, buf); in uvc_video_decode_bulk() 588 buf = uvc_queue_next_buffer(&video->queue, buf); in uvc_video_decode_bulk() [all …]
|
/drivers/infiniband/hw/ehca/ |
D | ipz_pt_fn.c | 49 void *ipz_qpageit_get_inc(struct ipz_queue *queue) in ipz_qpageit_get_inc() argument 51 void *ret = ipz_qeit_get(queue); in ipz_qpageit_get_inc() 52 queue->current_q_offset += queue->pagesize; in ipz_qpageit_get_inc() 53 if (queue->current_q_offset > queue->queue_length) { in ipz_qpageit_get_inc() 54 queue->current_q_offset -= queue->pagesize; in ipz_qpageit_get_inc() 57 if (((u64)ret) % queue->pagesize) { in ipz_qpageit_get_inc() 64 void *ipz_qeit_eq_get_inc(struct ipz_queue *queue) in ipz_qeit_eq_get_inc() argument 66 void *ret = ipz_qeit_get(queue); in ipz_qeit_eq_get_inc() 67 u64 last_entry_in_q = queue->queue_length - queue->qe_size; in ipz_qeit_eq_get_inc() 69 queue->current_q_offset += queue->qe_size; in ipz_qeit_eq_get_inc() [all …]
|
D | ipz_pt_fn.h | 93 static inline void *ipz_qeit_calc(struct ipz_queue *queue, u64 q_offset) in ipz_qeit_calc() argument 96 if (q_offset >= queue->queue_length) in ipz_qeit_calc() 98 current_page = (queue->queue_pages)[q_offset >> EHCA_PAGESHIFT]; in ipz_qeit_calc() 106 static inline void *ipz_qeit_get(struct ipz_queue *queue) in ipz_qeit_get() argument 108 return ipz_qeit_calc(queue, queue->current_q_offset); in ipz_qeit_get() 118 void *ipz_qpageit_get_inc(struct ipz_queue *queue); 126 static inline void *ipz_qeit_get_inc(struct ipz_queue *queue) in ipz_qeit_get_inc() argument 128 void *ret = ipz_qeit_get(queue); in ipz_qeit_get_inc() 129 queue->current_q_offset += queue->qe_size; in ipz_qeit_get_inc() 130 if (queue->current_q_offset >= queue->queue_length) { in ipz_qeit_get_inc() [all …]
|
/drivers/net/wireless/rt2x00/ |
D | rt2x00queue.c | 46 frame_size = entry->queue->data_size + entry->queue->desc_size; in rt2x00queue_alloc_rxskb() 154 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00queue_create_tx_descriptor() 171 txdesc->queue = entry->queue->qid; in rt2x00queue_create_tx_descriptor() 172 txdesc->cw_min = entry->queue->cw_min; in rt2x00queue_create_tx_descriptor() 173 txdesc->cw_max = entry->queue->cw_max; in rt2x00queue_create_tx_descriptor() 174 txdesc->aifs = entry->queue->aifs; in rt2x00queue_create_tx_descriptor() 325 struct data_queue *queue = entry->queue; in rt2x00queue_write_tx_descriptor() local 326 struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; in rt2x00queue_write_tx_descriptor() 346 if (entry->queue->qid == QID_BEACON) in rt2x00queue_write_tx_descriptor() 349 if (rt2x00queue_threshold(queue) || in rt2x00queue_write_tx_descriptor() [all …]
|
D | rt2x00usb.c | 185 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00usb_interrupt_txdone() 213 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00usb_write_tx_data() 222 skb_push(entry->skb, entry->queue->desc_size); in rt2x00usb_write_tx_data() 223 memset(entry->skb->data, 0, entry->queue->desc_size); in rt2x00usb_write_tx_data() 230 skbdesc->desc_len = entry->queue->desc_size; in rt2x00usb_write_tx_data() 240 usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), in rt2x00usb_write_tx_data() 248 skb_pull(entry->skb, entry->queue->desc_size); in rt2x00usb_write_tx_data() 265 struct data_queue *queue = rt2x00queue_get_queue(rt2x00dev, qid); in rt2x00usb_kick_tx_queue() local 277 spin_lock_irqsave(&queue->lock, irqflags); in rt2x00usb_kick_tx_queue() 278 index = queue->index[Q_INDEX]; in rt2x00usb_kick_tx_queue() [all …]
|
D | rt2x00pci.c | 64 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; in rt2x00pci_write_tx_data() 77 entry->queue->qid, DRV_PROJECT); in rt2x00pci_write_tx_data() 86 skbdesc->desc_len = entry->queue->desc_size; in rt2x00pci_write_tx_data() 97 struct data_queue *queue = rt2x00dev->rx; in rt2x00pci_rxdone() local 103 entry = rt2x00queue_get_entry(queue, Q_INDEX); in rt2x00pci_rxdone() 114 skbdesc->desc_len = entry->queue->desc_size; in rt2x00pci_rxdone() 128 struct data_queue *queue) in rt2x00pci_alloc_queue_dma() argument 139 queue->limit * queue->desc_size, in rt2x00pci_alloc_queue_dma() 144 memset(addr, 0, queue->limit * queue->desc_size); in rt2x00pci_alloc_queue_dma() 149 for (i = 0; i < queue->limit; i++) { in rt2x00pci_alloc_queue_dma() [all …]
|
D | rt2x00mac.c | 33 struct data_queue *queue, in rt2x00mac_tx_rts_cts() argument 94 retval = rt2x00queue_write_tx_frame(queue, skb); in rt2x00mac_tx_rts_cts() 109 struct data_queue *queue; in rt2x00mac_tx() local 126 queue = rt2x00queue_get_queue(rt2x00dev, QID_ATIM); in rt2x00mac_tx() 128 queue = rt2x00queue_get_queue(rt2x00dev, qid); in rt2x00mac_tx() 129 if (unlikely(!queue)) { in rt2x00mac_tx() 149 if (rt2x00queue_available(queue) <= 1) in rt2x00mac_tx() 152 if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) in rt2x00mac_tx() 156 if (rt2x00queue_write_tx_frame(queue, skb)) in rt2x00mac_tx() 159 if (rt2x00queue_threshold(queue)) in rt2x00mac_tx() [all …]
|
D | rt2x00queue.h | 278 enum data_queue_qid queue; member 332 struct data_queue *queue; member 513 static inline int rt2x00queue_empty(struct data_queue *queue) in rt2x00queue_empty() argument 515 return queue->length == 0; in rt2x00queue_empty() 522 static inline int rt2x00queue_full(struct data_queue *queue) in rt2x00queue_full() argument 524 return queue->length == queue->limit; in rt2x00queue_full() 531 static inline int rt2x00queue_available(struct data_queue *queue) in rt2x00queue_available() argument 533 return queue->limit - queue->length; in rt2x00queue_available() 540 static inline int rt2x00queue_threshold(struct data_queue *queue) in rt2x00queue_threshold() argument 542 return rt2x00queue_available(queue) < queue->threshold; in rt2x00queue_threshold()
|
/drivers/scsi/arm/ |
D | queue.c | 58 int queue_initialise (Queue_t *queue) in queue_initialise() argument 63 spin_lock_init(&queue->queue_lock); in queue_initialise() 64 INIT_LIST_HEAD(&queue->head); in queue_initialise() 65 INIT_LIST_HEAD(&queue->free); in queue_initialise() 73 queue->alloc = q = kmalloc(sizeof(QE_t) * nqueues, GFP_KERNEL); in queue_initialise() 78 list_add(&q->list, &queue->free); in queue_initialise() 82 return queue->alloc != NULL; in queue_initialise() 90 void queue_free (Queue_t *queue) in queue_free() argument 92 if (!list_empty(&queue->head)) in queue_free() 93 printk(KERN_WARNING "freeing non-empty queue %p\n", queue); in queue_free() [all …]
|
D | queue.h | 25 extern int queue_initialise (Queue_t *queue); 32 extern void queue_free (Queue_t *queue); 40 extern struct scsi_cmnd *queue_remove (Queue_t *queue); 49 extern struct scsi_cmnd *queue_remove_exclude(Queue_t *queue, 52 #define queue_add_cmd_ordered(queue,SCpnt) \ argument 53 __queue_add(queue,SCpnt,(SCpnt)->cmnd[0] == REQUEST_SENSE) 54 #define queue_add_cmd_tail(queue,SCpnt) \ argument 55 __queue_add(queue,SCpnt,0) 64 extern int __queue_add(Queue_t *queue, struct scsi_cmnd *SCpnt, int head); 75 extern struct scsi_cmnd *queue_remove_tgtluntag(Queue_t *queue, int target, [all …]
|
/drivers/net/ehea/ |
D | ehea_qmr.h | 197 static inline void *hw_qeit_calc(struct hw_queue *queue, u64 q_offset) in hw_qeit_calc() argument 201 if (q_offset >= queue->queue_length) in hw_qeit_calc() 202 q_offset -= queue->queue_length; in hw_qeit_calc() 203 current_page = (queue->queue_pages)[q_offset >> EHEA_PAGESHIFT]; in hw_qeit_calc() 207 static inline void *hw_qeit_get(struct hw_queue *queue) in hw_qeit_get() argument 209 return hw_qeit_calc(queue, queue->current_q_offset); in hw_qeit_get() 212 static inline void hw_qeit_inc(struct hw_queue *queue) in hw_qeit_inc() argument 214 queue->current_q_offset += queue->qe_size; in hw_qeit_inc() 215 if (queue->current_q_offset >= queue->queue_length) { in hw_qeit_inc() 216 queue->current_q_offset = 0; in hw_qeit_inc() [all …]
|
/drivers/net/wireless/ath5k/ |
D | qcu.c | 31 int ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue, in ath5k_hw_get_tx_queueprops() argument 35 memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info)); in ath5k_hw_get_tx_queueprops() 42 int ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue, in ath5k_hw_set_tx_queueprops() argument 46 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); in ath5k_hw_set_tx_queueprops() 48 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) in ath5k_hw_set_tx_queueprops() 51 memcpy(&ah->ah_txq[queue], queue_info, sizeof(struct ath5k_txq_info)); in ath5k_hw_set_tx_queueprops() 58 ah->ah_txq[queue].tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS; in ath5k_hw_set_tx_queueprops() 69 unsigned int queue; in ath5k_hw_setup_tx_queue() local 81 queue = AR5K_TX_QUEUE_ID_NOQCU_DATA; in ath5k_hw_setup_tx_queue() 85 queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON; in ath5k_hw_setup_tx_queue() [all …]
|
D | dma.c | 126 int ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_start_tx_dma() argument 131 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); in ath5k_hw_start_tx_dma() 134 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) in ath5k_hw_start_tx_dma() 143 switch (ah->ah_txq[queue].tqi_type) { in ath5k_hw_start_tx_dma() 165 if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue)) in ath5k_hw_start_tx_dma() 169 AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue); in ath5k_hw_start_tx_dma() 186 int ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue) in ath5k_hw_stop_tx_dma() argument 192 AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num); in ath5k_hw_stop_tx_dma() 195 if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE) in ath5k_hw_stop_tx_dma() 204 switch (ah->ah_txq[queue].tqi_type) { in ath5k_hw_stop_tx_dma() [all …]
|
/drivers/scsi/ibmvscsi/ |
D | rpa_vscsi.c | 69 static void rpavscsi_release_crq_queue(struct crq_queue *queue, in rpavscsi_release_crq_queue() argument 81 queue->msg_token, in rpavscsi_release_crq_queue() 82 queue->size * sizeof(*queue->msgs), DMA_BIDIRECTIONAL); in rpavscsi_release_crq_queue() 83 free_page((unsigned long)queue->msgs); in rpavscsi_release_crq_queue() 93 static struct viosrp_crq *crq_queue_next_crq(struct crq_queue *queue) in crq_queue_next_crq() argument 98 spin_lock_irqsave(&queue->lock, flags); in crq_queue_next_crq() 99 crq = &queue->msgs[queue->cur]; in crq_queue_next_crq() 101 if (++queue->cur == queue->size) in crq_queue_next_crq() 102 queue->cur = 0; in crq_queue_next_crq() 105 spin_unlock_irqrestore(&queue->lock, flags); in crq_queue_next_crq() [all …]
|
D | ibmvstgt.c | 553 static int crq_queue_create(struct crq_queue *queue, struct srp_target *target) in crq_queue_create() argument 558 queue->msgs = (struct viosrp_crq *) get_zeroed_page(GFP_KERNEL); in crq_queue_create() 559 if (!queue->msgs) in crq_queue_create() 561 queue->size = PAGE_SIZE / sizeof(*queue->msgs); in crq_queue_create() 563 queue->msg_token = dma_map_single(target->dev, queue->msgs, in crq_queue_create() 564 queue->size * sizeof(*queue->msgs), in crq_queue_create() 567 if (dma_mapping_error(target->dev, queue->msg_token)) in crq_queue_create() 570 err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token, in crq_queue_create() 581 err = h_reg_crq(vport->dma_dev->unit_address, queue->msg_token, in crq_queue_create() 599 queue->cur = 0; in crq_queue_create() [all …]
|
/drivers/net/ |
D | eql.c | 135 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave); 142 spin_lock_bh(&eql->queue.lock); in eql_timer() 143 head = &eql->queue.all_slaves; in eql_timer() 152 eql_kill_one_slave(&eql->queue, slave); in eql_timer() 156 spin_unlock_bh(&eql->queue.lock); in eql_timer() 181 spin_lock_init(&eql->queue.lock); in eql_setup() 182 INIT_LIST_HEAD(&eql->queue.all_slaves); in eql_setup() 183 eql->queue.master_dev = dev; in eql_setup() 207 BUG_ON(!list_empty(&eql->queue.all_slaves)); in eql_open() 217 static void eql_kill_one_slave(slave_queue_t *queue, slave_t *slave) in eql_kill_one_slave() argument [all …]
|
/drivers/isdn/i4l/ |
D | isdn_net.h | 85 lp = nd->queue; /* get lp on top of queue */ in isdn_net_get_locked_lp() 86 spin_lock(&nd->queue->xmit_lock); in isdn_net_get_locked_lp() 87 while (isdn_net_lp_busy(nd->queue)) { in isdn_net_get_locked_lp() 88 spin_unlock(&nd->queue->xmit_lock); in isdn_net_get_locked_lp() 89 nd->queue = nd->queue->next; in isdn_net_get_locked_lp() 90 if (nd->queue == lp) { /* not found -- should never happen */ in isdn_net_get_locked_lp() 94 spin_lock(&nd->queue->xmit_lock); in isdn_net_get_locked_lp() 96 lp = nd->queue; in isdn_net_get_locked_lp() 97 nd->queue = nd->queue->next; in isdn_net_get_locked_lp() 114 lp = nd->queue; in isdn_net_add_to_bundle() [all …]
|
/drivers/mmc/card/ |
D | queue.c | 47 struct request_queue *q = mq->queue; in mmc_queue_thread() 126 mq->queue = blk_init_queue(mmc_request, lock); in mmc_init_queue() 127 if (!mq->queue) in mmc_init_queue() 130 mq->queue->queuedata = mq; in mmc_init_queue() 133 blk_queue_prep_rq(mq->queue, mmc_prep_request); in mmc_init_queue() 134 blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL); in mmc_init_queue() 135 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue); in mmc_init_queue() 160 blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY); in mmc_init_queue() 161 blk_queue_max_sectors(mq->queue, bouncesz / 512); in mmc_init_queue() 162 blk_queue_max_phys_segments(mq->queue, bouncesz / 512); in mmc_init_queue() [all …]
|
/drivers/net/skfp/ |
D | hwmtm.c | 85 static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue); 86 static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue); 367 struct s_smt_tx_queue *queue ; in init_txd_ring() local 375 queue = smc->hw.fp.tx[QUEUE_A0] ; in init_txd_ring() 381 queue->tx_curr_put = queue->tx_curr_get = ds ; in init_txd_ring() 383 queue->tx_free = HWM_ASYNC_TXD_COUNT ; in init_txd_ring() 384 queue->tx_used = 0 ; in init_txd_ring() 389 queue = smc->hw.fp.tx[QUEUE_S] ; in init_txd_ring() 395 queue->tx_curr_put = queue->tx_curr_get = ds ; in init_txd_ring() 396 queue->tx_free = HWM_SYNC_TXD_COUNT ; in init_txd_ring() [all …]
|
/drivers/scsi/aacraid/ |
D | comminit.c | 229 comm->queue[HostNormCmdQueue].base = queues; in aac_comm_init() 230 aac_queue_init(dev, &comm->queue[HostNormCmdQueue], headers, HOST_NORM_CMD_ENTRIES); in aac_comm_init() 235 comm->queue[HostHighCmdQueue].base = queues; in aac_comm_init() 236 aac_queue_init(dev, &comm->queue[HostHighCmdQueue], headers, HOST_HIGH_CMD_ENTRIES); in aac_comm_init() 242 comm->queue[AdapNormCmdQueue].base = queues; in aac_comm_init() 243 aac_queue_init(dev, &comm->queue[AdapNormCmdQueue], headers, ADAP_NORM_CMD_ENTRIES); in aac_comm_init() 249 comm->queue[AdapHighCmdQueue].base = queues; in aac_comm_init() 250 aac_queue_init(dev, &comm->queue[AdapHighCmdQueue], headers, ADAP_HIGH_CMD_ENTRIES); in aac_comm_init() 256 comm->queue[HostNormRespQueue].base = queues; in aac_comm_init() 257 aac_queue_init(dev, &comm->queue[HostNormRespQueue], headers, HOST_NORM_RESP_ENTRIES); in aac_comm_init() [all …]
|
/drivers/block/ |
D | ps3disk.c | 42 struct request_queue *queue; member 286 ps3disk_do_request(dev, priv->queue); in ps3disk_interrupt() 424 struct request_queue *queue; in ps3disk_probe() local 468 queue = blk_init_queue(ps3disk_request, &priv->lock); in ps3disk_probe() 469 if (!queue) { in ps3disk_probe() 476 priv->queue = queue; in ps3disk_probe() 477 queue->queuedata = dev; in ps3disk_probe() 479 blk_queue_bounce_limit(queue, BLK_BOUNCE_HIGH); in ps3disk_probe() 481 blk_queue_max_sectors(queue, dev->bounce_size >> 9); in ps3disk_probe() 482 blk_queue_segment_boundary(queue, -1UL); in ps3disk_probe() [all …]
|
/drivers/watchdog/ |
D | mtx-1_wdt.c | 65 int queue; member 86 if (mtx1_wdt_device.queue && ticks) in mtx1_wdt_trigger() 104 if (!mtx1_wdt_device.queue) { in mtx1_wdt_start() 105 mtx1_wdt_device.queue = 1; in mtx1_wdt_start() 118 if (mtx1_wdt_device.queue) { in mtx1_wdt_stop() 119 mtx1_wdt_device.queue = 0; in mtx1_wdt_stop() 217 mtx1_wdt_device.queue = 0; in mtx1_wdt_probe() 235 if (mtx1_wdt_device.queue) { in mtx1_wdt_remove() 236 mtx1_wdt_device.queue = 0; in mtx1_wdt_remove()
|