• Home
  • Raw
  • Download

Lines Matching refs:self

171 static int  nsc_ircc_close(struct nsc_ircc_cb *self);
173 static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self);
174 static int nsc_ircc_dma_receive(struct nsc_ircc_cb *self);
175 static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase);
179 static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase);
180 static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 baud);
181 static int nsc_ircc_is_receiving(struct nsc_ircc_cb *self);
343 struct nsc_ircc_cb *self; in nsc_ircc_open() local
375 self = netdev_priv(dev); in nsc_ircc_open()
376 self->netdev = dev; in nsc_ircc_open()
377 spin_lock_init(&self->lock); in nsc_ircc_open()
380 dev_self[chip_index] = self; in nsc_ircc_open()
381 self->index = chip_index; in nsc_ircc_open()
384 self->io.cfg_base = info->cfg_base; in nsc_ircc_open()
385 self->io.fir_base = info->fir_base; in nsc_ircc_open()
386 self->io.irq = info->irq; in nsc_ircc_open()
387 self->io.fir_ext = CHIP_IO_EXTENT; in nsc_ircc_open()
388 self->io.dma = info->dma; in nsc_ircc_open()
389 self->io.fifo_size = 32; in nsc_ircc_open()
392 ret = request_region(self->io.fir_base, self->io.fir_ext, driver_name); in nsc_ircc_open()
395 __func__, self->io.fir_base); in nsc_ircc_open()
401 irda_init_max_qos_capabilies(&self->qos); in nsc_ircc_open()
404 self->qos.baud_rate.bits = IR_9600|IR_19200|IR_38400|IR_57600| in nsc_ircc_open()
407 self->qos.min_turn_time.bits = qos_mtt_bits; in nsc_ircc_open()
408 irda_qos_bits_to_value(&self->qos); in nsc_ircc_open()
411 self->rx_buff.truesize = 14384; in nsc_ircc_open()
412 self->tx_buff.truesize = 14384; in nsc_ircc_open()
415 self->rx_buff.head = in nsc_ircc_open()
416 dma_alloc_coherent(NULL, self->rx_buff.truesize, in nsc_ircc_open()
417 &self->rx_buff_dma, GFP_KERNEL); in nsc_ircc_open()
418 if (self->rx_buff.head == NULL) { in nsc_ircc_open()
423 memset(self->rx_buff.head, 0, self->rx_buff.truesize); in nsc_ircc_open()
425 self->tx_buff.head = in nsc_ircc_open()
426 dma_alloc_coherent(NULL, self->tx_buff.truesize, in nsc_ircc_open()
427 &self->tx_buff_dma, GFP_KERNEL); in nsc_ircc_open()
428 if (self->tx_buff.head == NULL) { in nsc_ircc_open()
432 memset(self->tx_buff.head, 0, self->tx_buff.truesize); in nsc_ircc_open()
434 self->rx_buff.in_frame = FALSE; in nsc_ircc_open()
435 self->rx_buff.state = OUTSIDE_FRAME; in nsc_ircc_open()
436 self->tx_buff.data = self->tx_buff.head; in nsc_ircc_open()
437 self->rx_buff.data = self->rx_buff.head; in nsc_ircc_open()
440 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; in nsc_ircc_open()
441 self->tx_fifo.tail = self->tx_buff.head; in nsc_ircc_open()
459 dongle_id = nsc_ircc_read_dongle_id(self->io.fir_base); in nsc_ircc_open()
468 self->io.dongle_id = dongle_id; in nsc_ircc_open()
469 nsc_ircc_init_dongle_interface(self->io.fir_base, dongle_id); in nsc_ircc_open()
471 self->pldev = platform_device_register_simple(NSC_IRCC_DRIVER_NAME, in nsc_ircc_open()
472 self->index, NULL, 0); in nsc_ircc_open()
473 if (IS_ERR(self->pldev)) { in nsc_ircc_open()
474 err = PTR_ERR(self->pldev); in nsc_ircc_open()
477 platform_set_drvdata(self->pldev, self); in nsc_ircc_open()
484 dma_free_coherent(NULL, self->tx_buff.truesize, in nsc_ircc_open()
485 self->tx_buff.head, self->tx_buff_dma); in nsc_ircc_open()
487 dma_free_coherent(NULL, self->rx_buff.truesize, in nsc_ircc_open()
488 self->rx_buff.head, self->rx_buff_dma); in nsc_ircc_open()
490 release_region(self->io.fir_base, self->io.fir_ext); in nsc_ircc_open()
503 static int __exit nsc_ircc_close(struct nsc_ircc_cb *self) in nsc_ircc_close() argument
509 IRDA_ASSERT(self != NULL, return -1;); in nsc_ircc_close()
511 iobase = self->io.fir_base; in nsc_ircc_close()
513 platform_device_unregister(self->pldev); in nsc_ircc_close()
516 unregister_netdev(self->netdev); in nsc_ircc_close()
520 __func__, self->io.fir_base); in nsc_ircc_close()
521 release_region(self->io.fir_base, self->io.fir_ext); in nsc_ircc_close()
523 if (self->tx_buff.head) in nsc_ircc_close()
524 dma_free_coherent(NULL, self->tx_buff.truesize, in nsc_ircc_close()
525 self->tx_buff.head, self->tx_buff_dma); in nsc_ircc_close()
527 if (self->rx_buff.head) in nsc_ircc_close()
528 dma_free_coherent(NULL, self->rx_buff.truesize, in nsc_ircc_close()
529 self->rx_buff.head, self->rx_buff_dma); in nsc_ircc_close()
531 dev_self[self->index] = NULL; in nsc_ircc_close()
532 free_netdev(self->netdev); in nsc_ircc_close()
1239 static __u8 nsc_ircc_change_speed(struct nsc_ircc_cb *self, __u32 speed) in nsc_ircc_change_speed() argument
1241 struct net_device *dev = self->netdev; in nsc_ircc_change_speed()
1249 IRDA_ASSERT(self != NULL, return 0;); in nsc_ircc_change_speed()
1251 iobase = self->io.fir_base; in nsc_ircc_change_speed()
1254 self->io.speed = speed; in nsc_ircc_change_speed()
1302 nsc_ircc_change_dongle_speed(iobase, speed, self->io.dongle_id); in nsc_ircc_change_speed()
1325 nsc_ircc_dma_receive(self); in nsc_ircc_change_speed()
1349 struct nsc_ircc_cb *self; in nsc_ircc_hard_xmit_sir() local
1355 self = netdev_priv(dev); in nsc_ircc_hard_xmit_sir()
1357 IRDA_ASSERT(self != NULL, return 0;); in nsc_ircc_hard_xmit_sir()
1359 iobase = self->io.fir_base; in nsc_ircc_hard_xmit_sir()
1364 spin_lock_irqsave(&self->lock, flags); in nsc_ircc_hard_xmit_sir()
1368 if ((speed != self->io.speed) && (speed != -1)) { in nsc_ircc_hard_xmit_sir()
1375 if (self->io.direction == IO_RECV) { in nsc_ircc_hard_xmit_sir()
1376 nsc_ircc_change_speed(self, speed); in nsc_ircc_hard_xmit_sir()
1381 self->new_speed = speed; in nsc_ircc_hard_xmit_sir()
1387 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_hard_xmit_sir()
1391 self->new_speed = speed; in nsc_ircc_hard_xmit_sir()
1397 self->tx_buff.data = self->tx_buff.head; in nsc_ircc_hard_xmit_sir()
1399 self->tx_buff.len = async_wrap_skb(skb, self->tx_buff.data, in nsc_ircc_hard_xmit_sir()
1400 self->tx_buff.truesize); in nsc_ircc_hard_xmit_sir()
1402 dev->stats.tx_bytes += self->tx_buff.len; in nsc_ircc_hard_xmit_sir()
1412 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_hard_xmit_sir()
1421 struct nsc_ircc_cb *self; in nsc_ircc_hard_xmit_fir() local
1428 self = netdev_priv(dev); in nsc_ircc_hard_xmit_fir()
1429 iobase = self->io.fir_base; in nsc_ircc_hard_xmit_fir()
1434 spin_lock_irqsave(&self->lock, flags); in nsc_ircc_hard_xmit_fir()
1438 if ((speed != self->io.speed) && (speed != -1)) { in nsc_ircc_hard_xmit_fir()
1443 if(self->tx_fifo.len == 0) { in nsc_ircc_hard_xmit_fir()
1444 nsc_ircc_change_speed(self, speed); in nsc_ircc_hard_xmit_fir()
1447 self->new_speed = speed; in nsc_ircc_hard_xmit_fir()
1457 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_hard_xmit_fir()
1462 self->new_speed = speed; in nsc_ircc_hard_xmit_fir()
1470 self->tx_fifo.queue[self->tx_fifo.free].start = self->tx_fifo.tail; in nsc_ircc_hard_xmit_fir()
1471 self->tx_fifo.queue[self->tx_fifo.free].len = skb->len; in nsc_ircc_hard_xmit_fir()
1472 self->tx_fifo.tail += skb->len; in nsc_ircc_hard_xmit_fir()
1476 skb_copy_from_linear_data(skb, self->tx_fifo.queue[self->tx_fifo.free].start, in nsc_ircc_hard_xmit_fir()
1478 self->tx_fifo.len++; in nsc_ircc_hard_xmit_fir()
1479 self->tx_fifo.free++; in nsc_ircc_hard_xmit_fir()
1482 if (self->tx_fifo.len == 1) { in nsc_ircc_hard_xmit_fir()
1487 do_gettimeofday(&self->now); in nsc_ircc_hard_xmit_fir()
1488 diff = self->now.tv_usec - self->stamp.tv_usec; in nsc_ircc_hard_xmit_fir()
1514 self->io.direction = IO_XMIT; in nsc_ircc_hard_xmit_fir()
1531 nsc_ircc_dma_xmit(self, iobase); in nsc_ircc_hard_xmit_fir()
1536 if ((self->tx_fifo.free < MAX_TX_WINDOW) && (self->new_speed == 0)) in nsc_ircc_hard_xmit_fir()
1537 netif_wake_queue(self->netdev); in nsc_ircc_hard_xmit_fir()
1543 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_hard_xmit_fir()
1555 static void nsc_ircc_dma_xmit(struct nsc_ircc_cb *self, int iobase) in nsc_ircc_dma_xmit() argument
1566 self->io.direction = IO_XMIT; in nsc_ircc_dma_xmit()
1572 irda_setup_dma(self->io.dma, in nsc_ircc_dma_xmit()
1573 ((u8 *)self->tx_fifo.queue[self->tx_fifo.ptr].start - in nsc_ircc_dma_xmit()
1574 self->tx_buff.head) + self->tx_buff_dma, in nsc_ircc_dma_xmit()
1575 self->tx_fifo.queue[self->tx_fifo.ptr].len, in nsc_ircc_dma_xmit()
1634 static int nsc_ircc_dma_xmit_complete(struct nsc_ircc_cb *self) in nsc_ircc_dma_xmit_complete() argument
1642 iobase = self->io.fir_base; in nsc_ircc_dma_xmit_complete()
1653 self->netdev->stats.tx_errors++; in nsc_ircc_dma_xmit_complete()
1654 self->netdev->stats.tx_fifo_errors++; in nsc_ircc_dma_xmit_complete()
1659 self->netdev->stats.tx_packets++; in nsc_ircc_dma_xmit_complete()
1663 self->tx_fifo.ptr++; in nsc_ircc_dma_xmit_complete()
1664 self->tx_fifo.len--; in nsc_ircc_dma_xmit_complete()
1667 if (self->tx_fifo.len) { in nsc_ircc_dma_xmit_complete()
1668 nsc_ircc_dma_xmit(self, iobase); in nsc_ircc_dma_xmit_complete()
1674 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; in nsc_ircc_dma_xmit_complete()
1675 self->tx_fifo.tail = self->tx_buff.head; in nsc_ircc_dma_xmit_complete()
1680 if ((self->tx_fifo.free < MAX_TX_WINDOW) && (self->new_speed == 0)) { in nsc_ircc_dma_xmit_complete()
1683 netif_wake_queue(self->netdev); in nsc_ircc_dma_xmit_complete()
1699 static int nsc_ircc_dma_receive(struct nsc_ircc_cb *self) in nsc_ircc_dma_receive() argument
1704 iobase = self->io.fir_base; in nsc_ircc_dma_receive()
1707 self->tx_fifo.len = self->tx_fifo.ptr = self->tx_fifo.free = 0; in nsc_ircc_dma_receive()
1708 self->tx_fifo.tail = self->tx_buff.head; in nsc_ircc_dma_receive()
1721 self->io.direction = IO_RECV; in nsc_ircc_dma_receive()
1722 self->rx_buff.data = self->rx_buff.head; in nsc_ircc_dma_receive()
1728 self->st_fifo.len = self->st_fifo.pending_bytes = 0; in nsc_ircc_dma_receive()
1729 self->st_fifo.tail = self->st_fifo.head = 0; in nsc_ircc_dma_receive()
1731 irda_setup_dma(self->io.dma, self->rx_buff_dma, self->rx_buff.truesize, in nsc_ircc_dma_receive()
1751 static int nsc_ircc_dma_receive_complete(struct nsc_ircc_cb *self, int iobase) in nsc_ircc_dma_receive_complete() argument
1759 st_fifo = &self->st_fifo; in nsc_ircc_dma_receive_complete()
1794 self->netdev->stats.rx_errors += len; in nsc_ircc_dma_receive_complete()
1797 self->netdev->stats.rx_errors++; in nsc_ircc_dma_receive_complete()
1799 self->rx_buff.data += len; in nsc_ircc_dma_receive_complete()
1802 self->netdev->stats.rx_length_errors++; in nsc_ircc_dma_receive_complete()
1805 self->netdev->stats.rx_frame_errors++; in nsc_ircc_dma_receive_complete()
1808 self->netdev->stats.rx_crc_errors++; in nsc_ircc_dma_receive_complete()
1812 self->netdev->stats.rx_fifo_errors++; in nsc_ircc_dma_receive_complete()
1815 self->netdev->stats.rx_fifo_errors++; in nsc_ircc_dma_receive_complete()
1824 if (st_fifo->pending_bytes < self->io.fifo_size) { in nsc_ircc_dma_receive_complete()
1857 do_gettimeofday(&self->stamp); in nsc_ircc_dma_receive_complete()
1864 self->netdev->stats.rx_dropped++; in nsc_ircc_dma_receive_complete()
1876 if (self->io.speed < 4000000) { in nsc_ircc_dma_receive_complete()
1879 self->rx_buff.data, in nsc_ircc_dma_receive_complete()
1884 self->rx_buff.data, in nsc_ircc_dma_receive_complete()
1889 self->rx_buff.data += len; in nsc_ircc_dma_receive_complete()
1890 self->netdev->stats.rx_bytes += len; in nsc_ircc_dma_receive_complete()
1891 self->netdev->stats.rx_packets++; in nsc_ircc_dma_receive_complete()
1893 skb->dev = self->netdev; in nsc_ircc_dma_receive_complete()
1911 static void nsc_ircc_pio_receive(struct nsc_ircc_cb *self) in nsc_ircc_pio_receive() argument
1916 iobase = self->io.fir_base; in nsc_ircc_pio_receive()
1921 async_unwrap_char(self->netdev, &self->netdev->stats, in nsc_ircc_pio_receive()
1922 &self->rx_buff, byte); in nsc_ircc_pio_receive()
1932 static void nsc_ircc_sir_interrupt(struct nsc_ircc_cb *self, int eir) in nsc_ircc_sir_interrupt() argument
1939 actual = nsc_ircc_pio_write(self->io.fir_base, in nsc_ircc_sir_interrupt()
1940 self->tx_buff.data, in nsc_ircc_sir_interrupt()
1941 self->tx_buff.len, in nsc_ircc_sir_interrupt()
1942 self->io.fifo_size); in nsc_ircc_sir_interrupt()
1943 self->tx_buff.data += actual; in nsc_ircc_sir_interrupt()
1944 self->tx_buff.len -= actual; in nsc_ircc_sir_interrupt()
1946 self->io.direction = IO_XMIT; in nsc_ircc_sir_interrupt()
1949 if (self->tx_buff.len > 0) in nsc_ircc_sir_interrupt()
1950 self->ier = IER_TXLDL_IE; in nsc_ircc_sir_interrupt()
1953 self->netdev->stats.tx_packets++; in nsc_ircc_sir_interrupt()
1954 netif_wake_queue(self->netdev); in nsc_ircc_sir_interrupt()
1955 self->ier = IER_TXEMP_IE; in nsc_ircc_sir_interrupt()
1962 self->io.direction = IO_RECV; in nsc_ircc_sir_interrupt()
1963 self->ier = IER_RXHDL_IE; in nsc_ircc_sir_interrupt()
1967 if (self->new_speed) { in nsc_ircc_sir_interrupt()
1969 self->ier = nsc_ircc_change_speed(self, in nsc_ircc_sir_interrupt()
1970 self->new_speed); in nsc_ircc_sir_interrupt()
1971 self->new_speed = 0; in nsc_ircc_sir_interrupt()
1972 netif_wake_queue(self->netdev); in nsc_ircc_sir_interrupt()
1975 if (self->io.speed > 115200) { in nsc_ircc_sir_interrupt()
1984 nsc_ircc_pio_receive(self); in nsc_ircc_sir_interrupt()
1987 self->ier = IER_RXHDL_IE; in nsc_ircc_sir_interrupt()
1997 static void nsc_ircc_fir_interrupt(struct nsc_ircc_cb *self, int iobase, in nsc_ircc_fir_interrupt() argument
2007 if (nsc_ircc_dma_receive_complete(self, iobase)) { in nsc_ircc_fir_interrupt()
2009 self->ier = IER_SFIF_IE; in nsc_ircc_fir_interrupt()
2011 self->ier = IER_SFIF_IE | IER_TMR_IE; in nsc_ircc_fir_interrupt()
2023 if (self->io.direction == IO_XMIT) { in nsc_ircc_fir_interrupt()
2024 nsc_ircc_dma_xmit(self, iobase); in nsc_ircc_fir_interrupt()
2027 self->ier = IER_DMA_IE; in nsc_ircc_fir_interrupt()
2030 if (nsc_ircc_dma_receive_complete(self, iobase)) { in nsc_ircc_fir_interrupt()
2031 self->ier = IER_SFIF_IE; in nsc_ircc_fir_interrupt()
2033 self->ier = IER_SFIF_IE | IER_TMR_IE; in nsc_ircc_fir_interrupt()
2038 if (nsc_ircc_dma_xmit_complete(self)) { in nsc_ircc_fir_interrupt()
2039 if(self->new_speed != 0) { in nsc_ircc_fir_interrupt()
2043 self->ier = IER_TXEMP_IE; in nsc_ircc_fir_interrupt()
2047 if (irda_device_txqueue_empty(self->netdev)) { in nsc_ircc_fir_interrupt()
2049 nsc_ircc_dma_receive(self); in nsc_ircc_fir_interrupt()
2050 self->ier = IER_SFIF_IE; in nsc_ircc_fir_interrupt()
2058 self->ier = IER_DMA_IE; in nsc_ircc_fir_interrupt()
2063 self->ier = nsc_ircc_change_speed(self, self->new_speed); in nsc_ircc_fir_interrupt()
2064 self->new_speed = 0; in nsc_ircc_fir_interrupt()
2065 netif_wake_queue(self->netdev); in nsc_ircc_fir_interrupt()
2081 struct nsc_ircc_cb *self; in nsc_ircc_interrupt() local
2085 self = netdev_priv(dev); in nsc_ircc_interrupt()
2087 spin_lock(&self->lock); in nsc_ircc_interrupt()
2089 iobase = self->io.fir_base; in nsc_ircc_interrupt()
2094 self->ier = inb(iobase+IER); in nsc_ircc_interrupt()
2095 eir = inb(iobase+EIR) & self->ier; /* Mask out the interesting ones */ in nsc_ircc_interrupt()
2101 if (self->io.speed > 115200) in nsc_ircc_interrupt()
2102 nsc_ircc_fir_interrupt(self, iobase, eir); in nsc_ircc_interrupt()
2104 nsc_ircc_sir_interrupt(self, eir); in nsc_ircc_interrupt()
2107 outb(self->ier, iobase+IER); /* Restore interrupts */ in nsc_ircc_interrupt()
2110 spin_unlock(&self->lock); in nsc_ircc_interrupt()
2120 static int nsc_ircc_is_receiving(struct nsc_ircc_cb *self) in nsc_ircc_is_receiving() argument
2127 IRDA_ASSERT(self != NULL, return FALSE;); in nsc_ircc_is_receiving()
2129 spin_lock_irqsave(&self->lock, flags); in nsc_ircc_is_receiving()
2131 if (self->io.speed > 115200) { in nsc_ircc_is_receiving()
2132 iobase = self->io.fir_base; in nsc_ircc_is_receiving()
2143 status = (self->rx_buff.state != OUTSIDE_FRAME); in nsc_ircc_is_receiving()
2145 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_is_receiving()
2158 struct nsc_ircc_cb *self; in nsc_ircc_net_open() local
2166 self = netdev_priv(dev); in nsc_ircc_net_open()
2168 IRDA_ASSERT(self != NULL, return 0;); in nsc_ircc_net_open()
2170 iobase = self->io.fir_base; in nsc_ircc_net_open()
2172 if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, dev->name, dev)) { in nsc_ircc_net_open()
2174 driver_name, self->io.irq); in nsc_ircc_net_open()
2181 if (request_dma(self->io.dma, dev->name)) { in nsc_ircc_net_open()
2183 driver_name, self->io.dma); in nsc_ircc_net_open()
2184 free_irq(self->io.irq, dev); in nsc_ircc_net_open()
2202 sprintf(hwname, "NSC-FIR @ 0x%03x", self->io.fir_base); in nsc_ircc_net_open()
2208 self->irlap = irlap_open(dev, &self->qos, hwname); in nsc_ircc_net_open()
2221 struct nsc_ircc_cb *self; in nsc_ircc_net_close() local
2229 self = netdev_priv(dev); in nsc_ircc_net_close()
2230 IRDA_ASSERT(self != NULL, return 0;); in nsc_ircc_net_close()
2236 if (self->irlap) in nsc_ircc_net_close()
2237 irlap_close(self->irlap); in nsc_ircc_net_close()
2238 self->irlap = NULL; in nsc_ircc_net_close()
2240 iobase = self->io.fir_base; in nsc_ircc_net_close()
2242 disable_dma(self->io.dma); in nsc_ircc_net_close()
2251 free_irq(self->io.irq, dev); in nsc_ircc_net_close()
2252 free_dma(self->io.dma); in nsc_ircc_net_close()
2269 struct nsc_ircc_cb *self; in nsc_ircc_net_ioctl() local
2275 self = netdev_priv(dev); in nsc_ircc_net_ioctl()
2277 IRDA_ASSERT(self != NULL, return -1;); in nsc_ircc_net_ioctl()
2287 spin_lock_irqsave(&self->lock, flags); in nsc_ircc_net_ioctl()
2288 nsc_ircc_change_speed(self, irq->ifr_baudrate); in nsc_ircc_net_ioctl()
2289 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_net_ioctl()
2296 irda_device_set_media_busy(self->netdev, TRUE); in nsc_ircc_net_ioctl()
2300 irq->ifr_receiving = nsc_ircc_is_receiving(self); in nsc_ircc_net_ioctl()
2310 struct nsc_ircc_cb *self = platform_get_drvdata(dev); in nsc_ircc_suspend() local
2313 int iobase = self->io.fir_base; in nsc_ircc_suspend()
2315 if (self->io.suspended) in nsc_ircc_suspend()
2321 if (netif_running(self->netdev)) { in nsc_ircc_suspend()
2322 netif_device_detach(self->netdev); in nsc_ircc_suspend()
2323 spin_lock_irqsave(&self->lock, flags); in nsc_ircc_suspend()
2334 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_suspend()
2335 free_irq(self->io.irq, self->netdev); in nsc_ircc_suspend()
2336 disable_dma(self->io.dma); in nsc_ircc_suspend()
2338 self->io.suspended = 1; in nsc_ircc_suspend()
2346 struct nsc_ircc_cb *self = platform_get_drvdata(dev); in nsc_ircc_resume() local
2349 if (!self->io.suspended) in nsc_ircc_resume()
2355 nsc_ircc_setup(&self->io); in nsc_ircc_resume()
2356 nsc_ircc_init_dongle_interface(self->io.fir_base, self->io.dongle_id); in nsc_ircc_resume()
2358 if (netif_running(self->netdev)) { in nsc_ircc_resume()
2359 if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, in nsc_ircc_resume()
2360 self->netdev->name, self->netdev)) { in nsc_ircc_resume()
2362 driver_name, self->io.irq); in nsc_ircc_resume()
2368 unregister_netdevice(self->netdev); in nsc_ircc_resume()
2370 spin_lock_irqsave(&self->lock, flags); in nsc_ircc_resume()
2371 nsc_ircc_change_speed(self, self->io.speed); in nsc_ircc_resume()
2372 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_resume()
2373 netif_device_attach(self->netdev); in nsc_ircc_resume()
2377 spin_lock_irqsave(&self->lock, flags); in nsc_ircc_resume()
2378 nsc_ircc_change_speed(self, 9600); in nsc_ircc_resume()
2379 spin_unlock_irqrestore(&self->lock, flags); in nsc_ircc_resume()
2381 self->io.suspended = 0; in nsc_ircc_resume()