Lines Matching refs:self
92 static int w83977af_close(struct w83977af_ir *self);
94 static int w83977af_dma_receive(struct w83977af_ir *self);
95 static int w83977af_dma_receive_complete(struct w83977af_ir *self);
98 static void w83977af_dma_write(struct w83977af_ir *self, int iobase);
99 static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed);
100 static int w83977af_is_receiving(struct w83977af_ir *self);
153 struct w83977af_ir *self; in w83977af_open() local
180 self = netdev_priv(dev); in w83977af_open()
181 spin_lock_init(&self->lock); in w83977af_open()
185 self->io.fir_base = iobase; in w83977af_open()
186 self->io.irq = irq; in w83977af_open()
187 self->io.fir_ext = CHIP_IO_EXTENT; in w83977af_open()
188 self->io.dma = dma; in w83977af_open()
189 self->io.fifo_size = 32; in w83977af_open()
192 irda_init_max_qos_capabilies(&self->qos); in w83977af_open()
197 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600| in w83977af_open()
201 self->qos.min_turn_time.bits = qos_mtt_bits; in w83977af_open()
202 irda_qos_bits_to_value(&self->qos); in w83977af_open()
205 self->rx_buff.truesize = 14384; in w83977af_open()
206 self->tx_buff.truesize = 4000; in w83977af_open()
209 self->rx_buff.head = in w83977af_open()
210 dma_alloc_coherent(NULL, self->rx_buff.truesize, in w83977af_open()
211 &self->rx_buff_dma, GFP_KERNEL); in w83977af_open()
212 if (self->rx_buff.head == NULL) { in w83977af_open()
217 memset(self->rx_buff.head, 0, self->rx_buff.truesize); in w83977af_open()
219 self->tx_buff.head = in w83977af_open()
220 dma_alloc_coherent(NULL, self->tx_buff.truesize, in w83977af_open()
221 &self->tx_buff_dma, GFP_KERNEL); in w83977af_open()
222 if (self->tx_buff.head == NULL) { in w83977af_open()
226 memset(self->tx_buff.head, 0, self->tx_buff.truesize); in w83977af_open()
228 self->rx_buff.in_frame = FALSE; in w83977af_open()
229 self->rx_buff.state = OUTSIDE_FRAME; in w83977af_open()
230 self->tx_buff.data = self->tx_buff.head; in w83977af_open()
231 self->rx_buff.data = self->rx_buff.head; in w83977af_open()
232 self->netdev = dev; in w83977af_open()
248 dev_self[i] = self; in w83977af_open()
252 dma_free_coherent(NULL, self->tx_buff.truesize, in w83977af_open()
253 self->tx_buff.head, self->tx_buff_dma); in w83977af_open()
255 dma_free_coherent(NULL, self->rx_buff.truesize, in w83977af_open()
256 self->rx_buff.head, self->rx_buff_dma); in w83977af_open()
270 static int w83977af_close(struct w83977af_ir *self) in w83977af_close() argument
276 iobase = self->io.fir_base; in w83977af_close()
291 unregister_netdev(self->netdev); in w83977af_close()
295 __func__ , self->io.fir_base); in w83977af_close()
296 release_region(self->io.fir_base, self->io.fir_ext); in w83977af_close()
298 if (self->tx_buff.head) in w83977af_close()
299 dma_free_coherent(NULL, self->tx_buff.truesize, in w83977af_close()
300 self->tx_buff.head, self->tx_buff_dma); in w83977af_close()
302 if (self->rx_buff.head) in w83977af_close()
303 dma_free_coherent(NULL, self->rx_buff.truesize, in w83977af_close()
304 self->rx_buff.head, self->rx_buff_dma); in w83977af_close()
306 free_netdev(self->netdev); in w83977af_close()
410 static void w83977af_change_speed(struct w83977af_ir *self, __u32 speed) in w83977af_change_speed() argument
416 iobase = self->io.fir_base; in w83977af_change_speed()
419 self->io.speed = speed; in w83977af_change_speed()
470 netif_wake_queue(self->netdev); in w83977af_change_speed()
476 w83977af_dma_receive(self); in w83977af_change_speed()
492 struct w83977af_ir *self; in w83977af_hard_xmit() local
498 self = netdev_priv(dev); in w83977af_hard_xmit()
500 iobase = self->io.fir_base; in w83977af_hard_xmit()
510 if ((speed != self->io.speed) && (speed != -1)) { in w83977af_hard_xmit()
513 w83977af_change_speed(self, speed); in w83977af_hard_xmit()
518 self->new_speed = speed; in w83977af_hard_xmit()
525 if (self->io.speed > PIO_MAX_SPEED) { in w83977af_hard_xmit()
526 self->tx_buff.data = self->tx_buff.head; in w83977af_hard_xmit()
527 skb_copy_from_linear_data(skb, self->tx_buff.data, skb->len); in w83977af_hard_xmit()
528 self->tx_buff.len = skb->len; in w83977af_hard_xmit()
543 self->io.direction = IO_XMIT; in w83977af_hard_xmit()
557 w83977af_dma_write(self, iobase); in w83977af_hard_xmit()
562 self->tx_buff.data = self->tx_buff.head; in w83977af_hard_xmit()
563 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, in w83977af_hard_xmit()
564 self->tx_buff.truesize); in w83977af_hard_xmit()
585 static void w83977af_dma_write(struct w83977af_ir *self, int iobase) in w83977af_dma_write() argument
592 IRDA_DEBUG(4, "%s(), len=%d\n", __func__ , self->tx_buff.len); in w83977af_dma_write()
605 spin_lock_irqsave(&self->lock, flags); in w83977af_dma_write()
607 disable_dma(self->io.dma); in w83977af_dma_write()
608 clear_dma_ff(self->io.dma); in w83977af_dma_write()
609 set_dma_mode(self->io.dma, DMA_MODE_READ); in w83977af_dma_write()
610 set_dma_addr(self->io.dma, self->tx_buff_dma); in w83977af_dma_write()
611 set_dma_count(self->io.dma, self->tx_buff.len); in w83977af_dma_write()
613 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len, in w83977af_dma_write()
616 self->io.direction = IO_XMIT; in w83977af_dma_write()
623 enable_dma(self->io.dma); in w83977af_dma_write()
624 spin_unlock_irqrestore(&self->lock, flags); in w83977af_dma_write()
681 static void w83977af_dma_xmit_complete(struct w83977af_ir *self) in w83977af_dma_xmit_complete() argument
688 IRDA_ASSERT(self != NULL, return;); in w83977af_dma_xmit_complete()
690 iobase = self->io.fir_base; in w83977af_dma_xmit_complete()
703 self->netdev->stats.tx_errors++; in w83977af_dma_xmit_complete()
704 self->netdev->stats.tx_fifo_errors++; in w83977af_dma_xmit_complete()
709 self->netdev->stats.tx_packets++; in w83977af_dma_xmit_complete()
712 if (self->new_speed) { in w83977af_dma_xmit_complete()
713 w83977af_change_speed(self, self->new_speed); in w83977af_dma_xmit_complete()
714 self->new_speed = 0; in w83977af_dma_xmit_complete()
719 netif_wake_queue(self->netdev); in w83977af_dma_xmit_complete()
732 static int w83977af_dma_receive(struct w83977af_ir *self) in w83977af_dma_receive() argument
740 IRDA_ASSERT(self != NULL, return -1;); in w83977af_dma_receive()
744 iobase= self->io.fir_base; in w83977af_dma_receive()
758 self->io.direction = IO_RECV; in w83977af_dma_receive()
759 self->rx_buff.data = self->rx_buff.head; in w83977af_dma_receive()
762 spin_lock_irqsave(&self->lock, flags); in w83977af_dma_receive()
764 disable_dma(self->io.dma); in w83977af_dma_receive()
765 clear_dma_ff(self->io.dma); in w83977af_dma_receive()
766 set_dma_mode(self->io.dma, DMA_MODE_READ); in w83977af_dma_receive()
767 set_dma_addr(self->io.dma, self->rx_buff_dma); in w83977af_dma_receive()
768 set_dma_count(self->io.dma, self->rx_buff.truesize); in w83977af_dma_receive()
770 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize, in w83977af_dma_receive()
780 self->st_fifo.len = self->st_fifo.tail = self->st_fifo.head = 0; in w83977af_dma_receive()
787 enable_dma(self->io.dma); in w83977af_dma_receive()
788 spin_unlock_irqrestore(&self->lock, flags); in w83977af_dma_receive()
804 static int w83977af_dma_receive_complete(struct w83977af_ir *self) in w83977af_dma_receive_complete() argument
815 st_fifo = &self->st_fifo; in w83977af_dma_receive_complete()
817 iobase = self->io.fir_base; in w83977af_dma_receive_complete()
822 iobase = self->io.fir_base; in w83977af_dma_receive_complete()
847 self->netdev->stats.rx_errors += len; in w83977af_dma_receive_complete()
850 self->netdev->stats.rx_errors++; in w83977af_dma_receive_complete()
852 self->rx_buff.data += len; in w83977af_dma_receive_complete()
855 self->netdev->stats.rx_length_errors++; in w83977af_dma_receive_complete()
858 self->netdev->stats.rx_frame_errors++; in w83977af_dma_receive_complete()
861 self->netdev->stats.rx_crc_errors++; in w83977af_dma_receive_complete()
865 self->netdev->stats.rx_fifo_errors++; in w83977af_dma_receive_complete()
868 self->netdev->stats.rx_fifo_errors++; in w83977af_dma_receive_complete()
904 if (self->io.speed < 4000000) { in w83977af_dma_receive_complete()
907 self->rx_buff.data, in w83977af_dma_receive_complete()
912 self->rx_buff.data, in w83977af_dma_receive_complete()
917 self->rx_buff.data += len; in w83977af_dma_receive_complete()
918 self->netdev->stats.rx_packets++; in w83977af_dma_receive_complete()
920 skb->dev = self->netdev; in w83977af_dma_receive_complete()
938 static void w83977af_pio_receive(struct w83977af_ir *self) in w83977af_pio_receive() argument
945 IRDA_ASSERT(self != NULL, return;); in w83977af_pio_receive()
947 iobase = self->io.fir_base; in w83977af_pio_receive()
952 async_unwrap_char(self->netdev, &self->netdev->stats, &self->rx_buff, in w83977af_pio_receive()
963 static __u8 w83977af_sir_interrupt(struct w83977af_ir *self, int isr) in w83977af_sir_interrupt() argument
972 iobase = self->io.fir_base; in w83977af_sir_interrupt()
976 actual = w83977af_pio_write(self->io.fir_base, in w83977af_sir_interrupt()
977 self->tx_buff.data, in w83977af_sir_interrupt()
978 self->tx_buff.len, in w83977af_sir_interrupt()
979 self->io.fifo_size); in w83977af_sir_interrupt()
981 self->tx_buff.data += actual; in w83977af_sir_interrupt()
982 self->tx_buff.len -= actual; in w83977af_sir_interrupt()
984 self->io.direction = IO_XMIT; in w83977af_sir_interrupt()
987 if (self->tx_buff.len > 0) { in w83977af_sir_interrupt()
995 self->netdev->stats.tx_packets++; in w83977af_sir_interrupt()
998 netif_wake_queue(self->netdev); in w83977af_sir_interrupt()
1005 if (self->new_speed) { in w83977af_sir_interrupt()
1008 w83977af_change_speed(self, self->new_speed); in w83977af_sir_interrupt()
1009 self->new_speed = 0; in w83977af_sir_interrupt()
1013 self->io.direction = IO_RECV; in w83977af_sir_interrupt()
1019 w83977af_pio_receive(self); in w83977af_sir_interrupt()
1033 static __u8 w83977af_fir_interrupt(struct w83977af_ir *self, int isr) in w83977af_fir_interrupt() argument
1039 iobase = self->io.fir_base; in w83977af_fir_interrupt()
1044 if (w83977af_dma_receive_complete(self)) { in w83977af_fir_interrupt()
1073 if (self->io.direction == IO_XMIT) { in w83977af_fir_interrupt()
1074 w83977af_dma_write(self, iobase); in w83977af_fir_interrupt()
1079 w83977af_dma_receive_complete(self); in w83977af_fir_interrupt()
1086 w83977af_dma_xmit_complete(self); in w83977af_fir_interrupt()
1095 w83977af_dma_receive(self); in w83977af_fir_interrupt()
1115 struct w83977af_ir *self; in w83977af_interrupt() local
1119 self = netdev_priv(dev); in w83977af_interrupt()
1121 iobase = self->io.fir_base; in w83977af_interrupt()
1134 if (self->io.speed > PIO_MAX_SPEED ) in w83977af_interrupt()
1135 icr = w83977af_fir_interrupt(self, isr); in w83977af_interrupt()
1137 icr = w83977af_sir_interrupt(self, isr); in w83977af_interrupt()
1151 static int w83977af_is_receiving(struct w83977af_ir *self) in w83977af_is_receiving() argument
1157 IRDA_ASSERT(self != NULL, return FALSE;); in w83977af_is_receiving()
1159 if (self->io.speed > 115200) { in w83977af_is_receiving()
1160 iobase = self->io.fir_base; in w83977af_is_receiving()
1171 status = (self->rx_buff.state != OUTSIDE_FRAME); in w83977af_is_receiving()
1184 struct w83977af_ir *self; in w83977af_net_open() local
1192 self = netdev_priv(dev); in w83977af_net_open()
1194 IRDA_ASSERT(self != NULL, return 0;); in w83977af_net_open()
1196 iobase = self->io.fir_base; in w83977af_net_open()
1198 if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name, in w83977af_net_open()
1206 if (request_dma(self->io.dma, dev->name)) { in w83977af_net_open()
1207 free_irq(self->io.irq, self); in w83977af_net_open()
1216 if (self->io.speed > 115200) { in w83977af_net_open()
1218 w83977af_dma_receive(self); in w83977af_net_open()
1229 sprintf(hwname, "w83977af @ 0x%03x", self->io.fir_base); in w83977af_net_open()
1235 self->irlap = irlap_open(dev, &self->qos, hwname); in w83977af_net_open()
1248 struct w83977af_ir *self; in w83977af_net_close() local
1256 self = netdev_priv(dev); in w83977af_net_close()
1258 IRDA_ASSERT(self != NULL, return 0;); in w83977af_net_close()
1260 iobase = self->io.fir_base; in w83977af_net_close()
1266 if (self->irlap) in w83977af_net_close()
1267 irlap_close(self->irlap); in w83977af_net_close()
1268 self->irlap = NULL; in w83977af_net_close()
1270 disable_dma(self->io.dma); in w83977af_net_close()
1279 free_irq(self->io.irq, dev); in w83977af_net_close()
1280 free_dma(self->io.dma); in w83977af_net_close()
1297 struct w83977af_ir *self; in w83977af_net_ioctl() local
1303 self = netdev_priv(dev); in w83977af_net_ioctl()
1305 IRDA_ASSERT(self != NULL, return -1;); in w83977af_net_ioctl()
1309 spin_lock_irqsave(&self->lock, flags); in w83977af_net_ioctl()
1317 w83977af_change_speed(self, irq->ifr_baudrate); in w83977af_net_ioctl()
1324 irda_device_set_media_busy(self->netdev, TRUE); in w83977af_net_ioctl()
1327 irq->ifr_receiving = w83977af_is_receiving(self); in w83977af_net_ioctl()
1333 spin_unlock_irqrestore(&self->lock, flags); in w83977af_net_ioctl()