Lines Matching refs:self
81 static int via_ircc_dma_receive(struct via_ircc_cb *self);
82 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self,
88 static void via_hw_init(struct via_ircc_cb *self);
89 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 baud);
91 static int via_ircc_is_receiving(struct via_ircc_cb *self);
100 static int RxTimerHandler(struct via_ircc_cb *self, int iobase);
101 static void hwreset(struct via_ircc_cb *self);
102 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase);
103 static int upload_rxdata(struct via_ircc_cb *self, int iobase);
289 struct via_ircc_cb *self; in via_ircc_open() local
299 self = netdev_priv(dev); in via_ircc_open()
300 self->netdev = dev; in via_ircc_open()
301 spin_lock_init(&self->lock); in via_ircc_open()
303 pci_set_drvdata(pdev, self); in via_ircc_open()
306 self->io.cfg_base = info->cfg_base; in via_ircc_open()
307 self->io.fir_base = info->fir_base; in via_ircc_open()
308 self->io.irq = info->irq; in via_ircc_open()
309 self->io.fir_ext = CHIP_IO_EXTENT; in via_ircc_open()
310 self->io.dma = info->dma; in via_ircc_open()
311 self->io.dma2 = info->dma2; in via_ircc_open()
312 self->io.fifo_size = 32; in via_ircc_open()
313 self->chip_id = id; in via_ircc_open()
314 self->st_fifo.len = 0; in via_ircc_open()
315 self->RxDataReady = 0; in via_ircc_open()
318 if (!request_region(self->io.fir_base, self->io.fir_ext, driver_name)) { in via_ircc_open()
320 __func__, self->io.fir_base); in via_ircc_open()
326 irda_init_max_qos_capabilies(&self->qos); in via_ircc_open()
330 dongle_id = via_ircc_read_dongle_id(self->io.fir_base); in via_ircc_open()
331 self->io.dongle_id = dongle_id; in via_ircc_open()
335 switch( self->io.dongle_id ){ in via_ircc_open()
337 self->qos.baud_rate.bits = in via_ircc_open()
342 self->qos.baud_rate.bits = in via_ircc_open()
354 self->qos.min_turn_time.bits = qos_mtt_bits; in via_ircc_open()
355 irda_qos_bits_to_value(&self->qos); in via_ircc_open()
358 self->rx_buff.truesize = 14384 + 2048; in via_ircc_open()
359 self->tx_buff.truesize = 14384 + 2048; in via_ircc_open()
362 self->rx_buff.head = in via_ircc_open()
363 dma_zalloc_coherent(&pdev->dev, self->rx_buff.truesize, in via_ircc_open()
364 &self->rx_buff_dma, GFP_KERNEL); in via_ircc_open()
365 if (self->rx_buff.head == NULL) { in via_ircc_open()
370 self->tx_buff.head = in via_ircc_open()
371 dma_zalloc_coherent(&pdev->dev, self->tx_buff.truesize, in via_ircc_open()
372 &self->tx_buff_dma, GFP_KERNEL); in via_ircc_open()
373 if (self->tx_buff.head == NULL) { in via_ircc_open()
378 self->rx_buff.in_frame = FALSE; in via_ircc_open()
379 self->rx_buff.state = OUTSIDE_FRAME; in via_ircc_open()
380 self->tx_buff.data = self->tx_buff.head; in via_ircc_open()
381 self->rx_buff.data = self->rx_buff.head; in via_ircc_open()
384 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; in via_ircc_open()
385 self->tx_fifo.tail = self->tx_buff.head; in via_ircc_open()
398 self->io.speed = 9600; in via_ircc_open()
399 via_hw_init(self); in via_ircc_open()
402 dma_free_coherent(&pdev->dev, self->tx_buff.truesize, in via_ircc_open()
403 self->tx_buff.head, self->tx_buff_dma); in via_ircc_open()
405 dma_free_coherent(&pdev->dev, self->rx_buff.truesize, in via_ircc_open()
406 self->rx_buff.head, self->rx_buff_dma); in via_ircc_open()
408 release_region(self->io.fir_base, self->io.fir_ext); in via_ircc_open()
422 struct via_ircc_cb *self = pci_get_drvdata(pdev); in via_remove_one() local
427 iobase = self->io.fir_base; in via_remove_one()
431 unregister_netdev(self->netdev); in via_remove_one()
435 __func__, self->io.fir_base); in via_remove_one()
436 release_region(self->io.fir_base, self->io.fir_ext); in via_remove_one()
437 if (self->tx_buff.head) in via_remove_one()
438 dma_free_coherent(&pdev->dev, self->tx_buff.truesize, in via_remove_one()
439 self->tx_buff.head, self->tx_buff_dma); in via_remove_one()
440 if (self->rx_buff.head) in via_remove_one()
441 dma_free_coherent(&pdev->dev, self->rx_buff.truesize, in via_remove_one()
442 self->rx_buff.head, self->rx_buff_dma); in via_remove_one()
444 free_netdev(self->netdev); in via_remove_one()
456 static void via_hw_init(struct via_ircc_cb *self) in via_hw_init() argument
458 int iobase = self->io.fir_base; in via_hw_init()
498 self->io.speed = 9600; in via_hw_init()
499 self->st_fifo.len = 0; in via_hw_init()
501 via_ircc_change_dongle_speed(iobase, self->io.speed, in via_hw_init()
502 self->io.dongle_id); in via_hw_init()
666 static void via_ircc_change_speed(struct via_ircc_cb *self, __u32 speed) in via_ircc_change_speed() argument
668 struct net_device *dev = self->netdev; in via_ircc_change_speed()
672 iobase = self->io.fir_base; in via_ircc_change_speed()
674 self->io.speed = speed; in via_ircc_change_speed()
727 via_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id); in via_ircc_change_speed()
751 via_ircc_dma_receive(self); in via_ircc_change_speed()
768 struct via_ircc_cb *self; in via_ircc_hard_xmit_sir() local
773 self = netdev_priv(dev); in via_ircc_hard_xmit_sir()
774 IRDA_ASSERT(self != NULL, return NETDEV_TX_OK;); in via_ircc_hard_xmit_sir()
775 iobase = self->io.fir_base; in via_ircc_hard_xmit_sir()
780 if ((speed != self->io.speed) && (speed != -1)) { in via_ircc_hard_xmit_sir()
783 via_ircc_change_speed(self, speed); in via_ircc_hard_xmit_sir()
788 self->new_speed = speed; in via_ircc_hard_xmit_sir()
798 spin_lock_irqsave(&self->lock, flags); in via_ircc_hard_xmit_sir()
799 self->tx_buff.data = self->tx_buff.head; in via_ircc_hard_xmit_sir()
800 self->tx_buff.len = in via_ircc_hard_xmit_sir()
801 async_wrap_skb(skb, self->tx_buff.data, in via_ircc_hard_xmit_sir()
802 self->tx_buff.truesize); in via_ircc_hard_xmit_sir()
804 dev->stats.tx_bytes += self->tx_buff.len; in via_ircc_hard_xmit_sir()
806 SetBaudRate(iobase, self->io.speed); in via_ircc_hard_xmit_sir()
824 irda_setup_dma(self->io.dma, self->tx_buff_dma, self->tx_buff.len, in via_ircc_hard_xmit_sir()
827 SetSendByte(iobase, self->tx_buff.len); in via_ircc_hard_xmit_sir()
832 spin_unlock_irqrestore(&self->lock, flags); in via_ircc_hard_xmit_sir()
840 struct via_ircc_cb *self; in via_ircc_hard_xmit_fir() local
845 self = netdev_priv(dev); in via_ircc_hard_xmit_fir()
846 iobase = self->io.fir_base; in via_ircc_hard_xmit_fir()
848 if (self->st_fifo.len) in via_ircc_hard_xmit_fir()
850 if (self->chip_id == 0x3076) in via_ircc_hard_xmit_fir()
856 if ((speed != self->io.speed) && (speed != -1)) { in via_ircc_hard_xmit_fir()
858 via_ircc_change_speed(self, speed); in via_ircc_hard_xmit_fir()
863 self->new_speed = speed; in via_ircc_hard_xmit_fir()
865 spin_lock_irqsave(&self->lock, flags); in via_ircc_hard_xmit_fir()
866 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail; in via_ircc_hard_xmit_fir()
867 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len; in via_ircc_hard_xmit_fir()
869 self->tx_fifo.tail += skb->len; in via_ircc_hard_xmit_fir()
872 self->tx_fifo.queue[self->tx_fifo.free].start, skb->len); in via_ircc_hard_xmit_fir()
873 self->tx_fifo.len++; in via_ircc_hard_xmit_fir()
874 self->tx_fifo.free++; in via_ircc_hard_xmit_fir()
876 via_ircc_dma_xmit(self, iobase); in via_ircc_hard_xmit_fir()
881 spin_unlock_irqrestore(&self->lock, flags); in via_ircc_hard_xmit_fir()
886 static int via_ircc_dma_xmit(struct via_ircc_cb *self, u16 iobase) in via_ircc_dma_xmit() argument
889 self->io.direction = IO_XMIT; in via_ircc_dma_xmit()
901 irda_setup_dma(self->io.dma, in via_ircc_dma_xmit()
902 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start - in via_ircc_dma_xmit()
903 self->tx_buff.head) + self->tx_buff_dma, in via_ircc_dma_xmit()
904 self->tx_fifo.queue[self->tx_fifo.ptr].len, DMA_TX_MODE); in via_ircc_dma_xmit()
906 __func__, self->tx_fifo.ptr, in via_ircc_dma_xmit()
907 self->tx_fifo.queue[self->tx_fifo.ptr].len, in via_ircc_dma_xmit()
908 self->tx_fifo.len); in via_ircc_dma_xmit()
910 SetSendByte(iobase, self->tx_fifo.queue[self->tx_fifo.ptr].len); in via_ircc_dma_xmit()
924 static int via_ircc_dma_xmit_complete(struct via_ircc_cb *self) in via_ircc_dma_xmit_complete() argument
931 iobase = self->io.fir_base; in via_ircc_dma_xmit_complete()
938 self->netdev->stats.tx_errors++; in via_ircc_dma_xmit_complete()
939 self->netdev->stats.tx_fifo_errors++; in via_ircc_dma_xmit_complete()
940 hwreset(self); in via_ircc_dma_xmit_complete()
943 self->netdev->stats.tx_packets++; in via_ircc_dma_xmit_complete()
948 if (self->new_speed) { in via_ircc_dma_xmit_complete()
949 via_ircc_change_speed(self, self->new_speed); in via_ircc_dma_xmit_complete()
950 self->new_speed = 0; in via_ircc_dma_xmit_complete()
955 if (self->tx_fifo.len) { in via_ircc_dma_xmit_complete()
956 self->tx_fifo.len--; in via_ircc_dma_xmit_complete()
957 self->tx_fifo.ptr++; in via_ircc_dma_xmit_complete()
963 self->tx_fifo.len, self->tx_fifo.ptr, self->tx_fifo.free); in via_ircc_dma_xmit_complete()
973 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; in via_ircc_dma_xmit_complete()
974 self->tx_fifo.tail = self->tx_buff.head; in via_ircc_dma_xmit_complete()
981 netif_wake_queue(self->netdev); in via_ircc_dma_xmit_complete()
992 static int via_ircc_dma_receive(struct via_ircc_cb *self) in via_ircc_dma_receive() argument
996 iobase = self->io.fir_base; in via_ircc_dma_receive()
1000 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; in via_ircc_dma_receive()
1001 self->tx_fifo.tail = self->tx_buff.head; in via_ircc_dma_receive()
1002 self->RxDataReady = 0; in via_ircc_dma_receive()
1003 self->io.direction = IO_RECV; in via_ircc_dma_receive()
1004 self->rx_buff.data = self->rx_buff.head; in via_ircc_dma_receive()
1005 self->st_fifo.len = self->st_fifo.pending_bytes = 0; in via_ircc_dma_receive()
1006 self->st_fifo.tail = self->st_fifo.head = 0; in via_ircc_dma_receive()
1021 irda_setup_dma(self->io.dma2, self->rx_buff_dma, in via_ircc_dma_receive()
1022 self->rx_buff.truesize, DMA_RX_MODE); in via_ircc_dma_receive()
1036 static int via_ircc_dma_receive_complete(struct via_ircc_cb *self, in via_ircc_dma_receive_complete() argument
1044 iobase = self->io.fir_base; in via_ircc_dma_receive_complete()
1045 st_fifo = &self->st_fifo; in via_ircc_dma_receive_complete()
1047 if (self->io.speed < 4000000) { //Speed below FIR in via_ircc_dma_receive_complete()
1048 len = GetRecvByte(iobase, self); in via_ircc_dma_receive_complete()
1055 if (self->chip_id == 0x3076) { in via_ircc_dma_receive_complete()
1057 skb->data[i] = self->rx_buff.data[i * 2]; in via_ircc_dma_receive_complete()
1059 if (self->chip_id == 0x3096) { in via_ircc_dma_receive_complete()
1062 self->rx_buff.data[i]; in via_ircc_dma_receive_complete()
1066 self->rx_buff.data += len; in via_ircc_dma_receive_complete()
1067 self->netdev->stats.rx_bytes += len; in via_ircc_dma_receive_complete()
1068 self->netdev->stats.rx_packets++; in via_ircc_dma_receive_complete()
1069 skb->dev = self->netdev; in via_ircc_dma_receive_complete()
1077 len = GetRecvByte(iobase, self); in via_ircc_dma_receive_complete()
1082 __func__, len, RxCurCount(iobase, self), in via_ircc_dma_receive_complete()
1083 self->RxLastCount); in via_ircc_dma_receive_complete()
1084 hwreset(self); in via_ircc_dma_receive_complete()
1089 st_fifo->len, len - 4, RxCurCount(iobase, self)); in via_ircc_dma_receive_complete()
1098 self->RxDataReady = 0; in via_ircc_dma_receive_complete()
1127 (self->rx_buff.data == NULL) || (len < 6)) { in via_ircc_dma_receive_complete()
1128 self->netdev->stats.rx_dropped++; in via_ircc_dma_receive_complete()
1135 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); in via_ircc_dma_receive_complete()
1137 len - 4, self->rx_buff.data); in via_ircc_dma_receive_complete()
1140 self->rx_buff.data += len; in via_ircc_dma_receive_complete()
1141 self->netdev->stats.rx_bytes += len; in via_ircc_dma_receive_complete()
1142 self->netdev->stats.rx_packets++; in via_ircc_dma_receive_complete()
1143 skb->dev = self->netdev; in via_ircc_dma_receive_complete()
1157 static int upload_rxdata(struct via_ircc_cb *self, int iobase) in upload_rxdata() argument
1162 st_fifo = &self->st_fifo; in upload_rxdata()
1164 len = GetRecvByte(iobase, self); in upload_rxdata()
1169 self->netdev->stats.rx_dropped++; in upload_rxdata()
1175 self->netdev->stats.rx_dropped++; in upload_rxdata()
1180 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4 + 1); in upload_rxdata()
1186 self->rx_buff.data += len; in upload_rxdata()
1187 self->netdev->stats.rx_bytes += len; in upload_rxdata()
1188 self->netdev->stats.rx_packets++; in upload_rxdata()
1189 skb->dev = self->netdev; in upload_rxdata()
1207 static int RxTimerHandler(struct via_ircc_cb *self, int iobase) in RxTimerHandler() argument
1214 st_fifo = &self->st_fifo; in RxTimerHandler()
1216 if (CkRxRecv(iobase, self)) { in RxTimerHandler()
1218 self->RetryCount = 0; in RxTimerHandler()
1220 self->RxDataReady++; in RxTimerHandler()
1223 self->RetryCount++; in RxTimerHandler()
1225 if ((self->RetryCount >= 1) || in RxTimerHandler()
1226 ((st_fifo->pending_bytes + 2048) > self->rx_buff.truesize) || in RxTimerHandler()
1243 (self->rx_buff.data == NULL) || (len < 6)) { in RxTimerHandler()
1244 self->netdev->stats.rx_dropped++; in RxTimerHandler()
1249 skb_copy_to_linear_data(skb, self->rx_buff.data, len - 4); in RxTimerHandler()
1255 self->rx_buff.data += len; in RxTimerHandler()
1256 self->netdev->stats.rx_bytes += len; in RxTimerHandler()
1257 self->netdev->stats.rx_packets++; in RxTimerHandler()
1258 skb->dev = self->netdev; in RxTimerHandler()
1263 self->RetryCount = 0; in RxTimerHandler()
1275 (RxCurCount(iobase, self) != self->RxLastCount)) { in RxTimerHandler()
1276 upload_rxdata(self, iobase); in RxTimerHandler()
1277 if (irda_device_txqueue_empty(self->netdev)) in RxTimerHandler()
1278 via_ircc_dma_receive(self); in RxTimerHandler()
1298 struct via_ircc_cb *self = netdev_priv(dev); in via_ircc_interrupt() local
1302 iobase = self->io.fir_base; in via_ircc_interrupt()
1303 spin_lock(&self->lock); in via_ircc_interrupt()
1314 self->EventFlag.TimeOut++; in via_ircc_interrupt()
1316 if (self->io.direction == IO_XMIT) { in via_ircc_interrupt()
1317 via_ircc_dma_xmit(self, iobase); in via_ircc_interrupt()
1319 if (self->io.direction == IO_RECV) { in via_ircc_interrupt()
1323 if (self->RxDataReady > 30) { in via_ircc_interrupt()
1324 hwreset(self); in via_ircc_interrupt()
1325 if (irda_device_txqueue_empty(self->netdev)) { in via_ircc_interrupt()
1326 via_ircc_dma_receive(self); in via_ircc_interrupt()
1329 RxTimerHandler(self, iobase); in via_ircc_interrupt()
1344 self->EventFlag.EOMessage++; // read and will auto clean in via_ircc_interrupt()
1345 if (via_ircc_dma_xmit_complete(self)) { in via_ircc_interrupt()
1347 (self->netdev)) { in via_ircc_interrupt()
1348 via_ircc_dma_receive(self); in via_ircc_interrupt()
1351 self->EventFlag.Unknown++; in via_ircc_interrupt()
1373 if (via_ircc_dma_receive_complete(self, iobase)) { in via_ircc_interrupt()
1375 via_ircc_dma_receive(self); in via_ircc_interrupt()
1381 RxCurCount(iobase, self), in via_ircc_interrupt()
1382 self->RxLastCount); in via_ircc_interrupt()
1390 hwreset(self); //F01 in via_ircc_interrupt()
1392 via_ircc_dma_receive(self); in via_ircc_interrupt()
1396 spin_unlock(&self->lock); in via_ircc_interrupt()
1400 static void hwreset(struct via_ircc_cb *self) in hwreset() argument
1403 iobase = self->io.fir_base; in hwreset()
1428 via_ircc_change_speed(self, self->io.speed); in hwreset()
1430 self->st_fifo.len = 0; in hwreset()
1439 static int via_ircc_is_receiving(struct via_ircc_cb *self) in via_ircc_is_receiving() argument
1444 IRDA_ASSERT(self != NULL, return FALSE;); in via_ircc_is_receiving()
1446 iobase = self->io.fir_base; in via_ircc_is_receiving()
1447 if (CkRxRecv(iobase, self)) in via_ircc_is_receiving()
1464 struct via_ircc_cb *self; in via_ircc_net_open() local
1471 self = netdev_priv(dev); in via_ircc_net_open()
1473 IRDA_ASSERT(self != NULL, return 0;); in via_ircc_net_open()
1474 iobase = self->io.fir_base; in via_ircc_net_open()
1475 if (request_irq(self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) { in via_ircc_net_open()
1477 self->io.irq); in via_ircc_net_open()
1484 if (request_dma(self->io.dma, dev->name)) { in via_ircc_net_open()
1486 self->io.dma); in via_ircc_net_open()
1487 free_irq(self->io.irq, dev); in via_ircc_net_open()
1490 if (self->io.dma2 != self->io.dma) { in via_ircc_net_open()
1491 if (request_dma(self->io.dma2, dev->name)) { in via_ircc_net_open()
1493 driver_name, self->io.dma2); in via_ircc_net_open()
1494 free_irq(self->io.irq, dev); in via_ircc_net_open()
1495 free_dma(self->io.dma); in via_ircc_net_open()
1507 via_ircc_dma_receive(self); in via_ircc_net_open()
1517 self->irlap = irlap_open(dev, &self->qos, hwname); in via_ircc_net_open()
1519 self->RxLastCount = 0; in via_ircc_net_open()
1532 struct via_ircc_cb *self; in via_ircc_net_close() local
1538 self = netdev_priv(dev); in via_ircc_net_close()
1539 IRDA_ASSERT(self != NULL, return 0;); in via_ircc_net_close()
1544 if (self->irlap) in via_ircc_net_close()
1545 irlap_close(self->irlap); in via_ircc_net_close()
1546 self->irlap = NULL; in via_ircc_net_close()
1547 iobase = self->io.fir_base; in via_ircc_net_close()
1550 DisableDmaChannel(self->io.dma); in via_ircc_net_close()
1554 free_irq(self->io.irq, dev); in via_ircc_net_close()
1555 free_dma(self->io.dma); in via_ircc_net_close()
1556 if (self->io.dma2 != self->io.dma) in via_ircc_net_close()
1557 free_dma(self->io.dma2); in via_ircc_net_close()
1572 struct via_ircc_cb *self; in via_ircc_net_ioctl() local
1577 self = netdev_priv(dev); in via_ircc_net_ioctl()
1578 IRDA_ASSERT(self != NULL, return -1;); in via_ircc_net_ioctl()
1582 spin_lock_irqsave(&self->lock, flags); in via_ircc_net_ioctl()
1589 via_ircc_change_speed(self, irq->ifr_baudrate); in via_ircc_net_ioctl()
1596 irda_device_set_media_busy(self->netdev, TRUE); in via_ircc_net_ioctl()
1599 irq->ifr_receiving = via_ircc_is_receiving(self); in via_ircc_net_ioctl()
1605 spin_unlock_irqrestore(&self->lock, flags); in via_ircc_net_ioctl()