1
2 /*
3 * Linux device driver for PCI based Prism54
4 *
5 * Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
6 * Copyright (c) 2008, Christian Lamparter <chunkeey@web.de>
7 *
8 * Based on the islsm (softmac prism54) driver, which is:
9 * Copyright 2004-2006 Jean-Baptiste Note <jean-baptiste.note@m4x.org>, et al.
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16 #include <linux/pci.h>
17 #include <linux/slab.h>
18 #include <linux/firmware.h>
19 #include <linux/etherdevice.h>
20 #include <linux/delay.h>
21 #include <linux/completion.h>
22 #include <linux/module.h>
23 #include <net/mac80211.h>
24
25 #include "p54.h"
26 #include "lmac.h"
27 #include "p54pci.h"
28
29 MODULE_AUTHOR("Michael Wu <flamingice@sourmilk.net>");
30 MODULE_DESCRIPTION("Prism54 PCI wireless driver");
31 MODULE_LICENSE("GPL");
32 MODULE_ALIAS("prism54pci");
33 MODULE_FIRMWARE("isl3886pci");
34
35 static const struct pci_device_id p54p_table[] = {
36 /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */
37 { PCI_DEVICE(0x1260, 0x3890) },
38 /* 3COM 3CRWE154G72 Wireless LAN adapter */
39 { PCI_DEVICE(0x10b7, 0x6001) },
40 /* Intersil PRISM Indigo Wireless LAN adapter */
41 { PCI_DEVICE(0x1260, 0x3877) },
42 /* Intersil PRISM Javelin/Xbow Wireless LAN adapter */
43 { PCI_DEVICE(0x1260, 0x3886) },
44 /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */
45 { PCI_DEVICE(0x1260, 0xffff) },
46 { },
47 };
48
49 MODULE_DEVICE_TABLE(pci, p54p_table);
50
p54p_upload_firmware(struct ieee80211_hw * dev)51 static int p54p_upload_firmware(struct ieee80211_hw *dev)
52 {
53 struct p54p_priv *priv = dev->priv;
54 __le32 reg;
55 int err;
56 __le32 *data;
57 u32 remains, left, device_addr;
58
59 P54P_WRITE(int_enable, cpu_to_le32(0));
60 P54P_READ(int_enable);
61 udelay(10);
62
63 reg = P54P_READ(ctrl_stat);
64 reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
65 reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RAMBOOT);
66 P54P_WRITE(ctrl_stat, reg);
67 P54P_READ(ctrl_stat);
68 udelay(10);
69
70 reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET);
71 P54P_WRITE(ctrl_stat, reg);
72 wmb();
73 udelay(10);
74
75 reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
76 P54P_WRITE(ctrl_stat, reg);
77 wmb();
78
79 /* wait for the firmware to reset properly */
80 mdelay(10);
81
82 err = p54_parse_firmware(dev, priv->firmware);
83 if (err)
84 return err;
85
86 if (priv->common.fw_interface != FW_LM86) {
87 dev_err(&priv->pdev->dev, "wrong firmware, "
88 "please get a LM86(PCI) firmware a try again.\n");
89 return -EINVAL;
90 }
91
92 data = (__le32 *) priv->firmware->data;
93 remains = priv->firmware->size;
94 device_addr = ISL38XX_DEV_FIRMWARE_ADDR;
95 while (remains) {
96 u32 i = 0;
97 left = min((u32)0x1000, remains);
98 P54P_WRITE(direct_mem_base, cpu_to_le32(device_addr));
99 P54P_READ(int_enable);
100
101 device_addr += 0x1000;
102 while (i < left) {
103 P54P_WRITE(direct_mem_win[i], *data++);
104 i += sizeof(u32);
105 }
106
107 remains -= left;
108 P54P_READ(int_enable);
109 }
110
111 reg = P54P_READ(ctrl_stat);
112 reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN);
113 reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
114 reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RAMBOOT);
115 P54P_WRITE(ctrl_stat, reg);
116 P54P_READ(ctrl_stat);
117 udelay(10);
118
119 reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET);
120 P54P_WRITE(ctrl_stat, reg);
121 wmb();
122 udelay(10);
123
124 reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET);
125 P54P_WRITE(ctrl_stat, reg);
126 wmb();
127 udelay(10);
128
129 /* wait for the firmware to boot properly */
130 mdelay(100);
131
132 return 0;
133 }
134
p54p_refill_rx_ring(struct ieee80211_hw * dev,int ring_index,struct p54p_desc * ring,u32 ring_limit,struct sk_buff ** rx_buf,u32 index)135 static void p54p_refill_rx_ring(struct ieee80211_hw *dev,
136 int ring_index, struct p54p_desc *ring, u32 ring_limit,
137 struct sk_buff **rx_buf, u32 index)
138 {
139 struct p54p_priv *priv = dev->priv;
140 struct p54p_ring_control *ring_control = priv->ring_control;
141 u32 limit, idx, i;
142
143 idx = le32_to_cpu(ring_control->host_idx[ring_index]);
144 limit = idx;
145 limit -= index;
146 limit = ring_limit - limit;
147
148 i = idx % ring_limit;
149 while (limit-- > 1) {
150 struct p54p_desc *desc = &ring[i];
151
152 if (!desc->host_addr) {
153 struct sk_buff *skb;
154 dma_addr_t mapping;
155 skb = dev_alloc_skb(priv->common.rx_mtu + 32);
156 if (!skb)
157 break;
158
159 mapping = pci_map_single(priv->pdev,
160 skb_tail_pointer(skb),
161 priv->common.rx_mtu + 32,
162 PCI_DMA_FROMDEVICE);
163
164 if (pci_dma_mapping_error(priv->pdev, mapping)) {
165 dev_kfree_skb_any(skb);
166 dev_err(&priv->pdev->dev,
167 "RX DMA Mapping error\n");
168 break;
169 }
170
171 desc->host_addr = cpu_to_le32(mapping);
172 desc->device_addr = 0; // FIXME: necessary?
173 desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
174 desc->flags = 0;
175 rx_buf[i] = skb;
176 }
177
178 i++;
179 idx++;
180 i %= ring_limit;
181 }
182
183 wmb();
184 ring_control->host_idx[ring_index] = cpu_to_le32(idx);
185 }
186
p54p_check_rx_ring(struct ieee80211_hw * dev,u32 * index,int ring_index,struct p54p_desc * ring,u32 ring_limit,struct sk_buff ** rx_buf)187 static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index,
188 int ring_index, struct p54p_desc *ring, u32 ring_limit,
189 struct sk_buff **rx_buf)
190 {
191 struct p54p_priv *priv = dev->priv;
192 struct p54p_ring_control *ring_control = priv->ring_control;
193 struct p54p_desc *desc;
194 u32 idx, i;
195
196 i = (*index) % ring_limit;
197 (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
198 idx %= ring_limit;
199 while (i != idx) {
200 u16 len;
201 struct sk_buff *skb;
202 dma_addr_t dma_addr;
203 desc = &ring[i];
204 len = le16_to_cpu(desc->len);
205 skb = rx_buf[i];
206
207 if (!skb) {
208 i++;
209 i %= ring_limit;
210 continue;
211 }
212
213 if (unlikely(len > priv->common.rx_mtu)) {
214 if (net_ratelimit())
215 dev_err(&priv->pdev->dev, "rx'd frame size "
216 "exceeds length threshold.\n");
217
218 len = priv->common.rx_mtu;
219 }
220 dma_addr = le32_to_cpu(desc->host_addr);
221 pci_dma_sync_single_for_cpu(priv->pdev, dma_addr,
222 priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
223 skb_put(skb, len);
224
225 if (p54_rx(dev, skb)) {
226 pci_unmap_single(priv->pdev, dma_addr,
227 priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
228 rx_buf[i] = NULL;
229 desc->host_addr = cpu_to_le32(0);
230 } else {
231 skb_trim(skb, 0);
232 pci_dma_sync_single_for_device(priv->pdev, dma_addr,
233 priv->common.rx_mtu + 32, PCI_DMA_FROMDEVICE);
234 desc->len = cpu_to_le16(priv->common.rx_mtu + 32);
235 }
236
237 i++;
238 i %= ring_limit;
239 }
240
241 p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index);
242 }
243
p54p_check_tx_ring(struct ieee80211_hw * dev,u32 * index,int ring_index,struct p54p_desc * ring,u32 ring_limit,struct sk_buff ** tx_buf)244 static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index,
245 int ring_index, struct p54p_desc *ring, u32 ring_limit,
246 struct sk_buff **tx_buf)
247 {
248 struct p54p_priv *priv = dev->priv;
249 struct p54p_ring_control *ring_control = priv->ring_control;
250 struct p54p_desc *desc;
251 struct sk_buff *skb;
252 u32 idx, i;
253
254 i = (*index) % ring_limit;
255 (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]);
256 idx %= ring_limit;
257
258 while (i != idx) {
259 desc = &ring[i];
260
261 skb = tx_buf[i];
262 tx_buf[i] = NULL;
263
264 pci_unmap_single(priv->pdev, le32_to_cpu(desc->host_addr),
265 le16_to_cpu(desc->len), PCI_DMA_TODEVICE);
266
267 desc->host_addr = 0;
268 desc->device_addr = 0;
269 desc->len = 0;
270 desc->flags = 0;
271
272 if (skb && FREE_AFTER_TX(skb))
273 p54_free_skb(dev, skb);
274
275 i++;
276 i %= ring_limit;
277 }
278 }
279
p54p_tasklet(unsigned long dev_id)280 static void p54p_tasklet(unsigned long dev_id)
281 {
282 struct ieee80211_hw *dev = (struct ieee80211_hw *)dev_id;
283 struct p54p_priv *priv = dev->priv;
284 struct p54p_ring_control *ring_control = priv->ring_control;
285
286 p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt,
287 ARRAY_SIZE(ring_control->tx_mgmt),
288 priv->tx_buf_mgmt);
289
290 p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data,
291 ARRAY_SIZE(ring_control->tx_data),
292 priv->tx_buf_data);
293
294 p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt,
295 ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt);
296
297 p54p_check_rx_ring(dev, &priv->rx_idx_data, 0, ring_control->rx_data,
298 ARRAY_SIZE(ring_control->rx_data), priv->rx_buf_data);
299
300 wmb();
301 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
302 }
303
p54p_interrupt(int irq,void * dev_id)304 static irqreturn_t p54p_interrupt(int irq, void *dev_id)
305 {
306 struct ieee80211_hw *dev = dev_id;
307 struct p54p_priv *priv = dev->priv;
308 __le32 reg;
309
310 reg = P54P_READ(int_ident);
311 if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) {
312 goto out;
313 }
314 P54P_WRITE(int_ack, reg);
315
316 reg &= P54P_READ(int_enable);
317
318 if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE))
319 tasklet_schedule(&priv->tasklet);
320 else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))
321 complete(&priv->boot_comp);
322
323 out:
324 return reg ? IRQ_HANDLED : IRQ_NONE;
325 }
326
p54p_tx(struct ieee80211_hw * dev,struct sk_buff * skb)327 static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb)
328 {
329 unsigned long flags;
330 struct p54p_priv *priv = dev->priv;
331 struct p54p_ring_control *ring_control = priv->ring_control;
332 struct p54p_desc *desc;
333 dma_addr_t mapping;
334 u32 idx, i;
335 __le32 device_addr;
336
337 spin_lock_irqsave(&priv->lock, flags);
338 idx = le32_to_cpu(ring_control->host_idx[1]);
339 i = idx % ARRAY_SIZE(ring_control->tx_data);
340 device_addr = ((struct p54_hdr *)skb->data)->req_id;
341
342 mapping = pci_map_single(priv->pdev, skb->data, skb->len,
343 PCI_DMA_TODEVICE);
344 if (pci_dma_mapping_error(priv->pdev, mapping)) {
345 spin_unlock_irqrestore(&priv->lock, flags);
346 p54_free_skb(dev, skb);
347 dev_err(&priv->pdev->dev, "TX DMA mapping error\n");
348 return ;
349 }
350 priv->tx_buf_data[i] = skb;
351
352 desc = &ring_control->tx_data[i];
353 desc->host_addr = cpu_to_le32(mapping);
354 desc->device_addr = device_addr;
355 desc->len = cpu_to_le16(skb->len);
356 desc->flags = 0;
357
358 wmb();
359 ring_control->host_idx[1] = cpu_to_le32(idx + 1);
360 spin_unlock_irqrestore(&priv->lock, flags);
361
362 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
363 P54P_READ(dev_int);
364 }
365
p54p_stop(struct ieee80211_hw * dev)366 static void p54p_stop(struct ieee80211_hw *dev)
367 {
368 struct p54p_priv *priv = dev->priv;
369 struct p54p_ring_control *ring_control = priv->ring_control;
370 unsigned int i;
371 struct p54p_desc *desc;
372
373 P54P_WRITE(int_enable, cpu_to_le32(0));
374 P54P_READ(int_enable);
375 udelay(10);
376
377 free_irq(priv->pdev->irq, dev);
378
379 tasklet_kill(&priv->tasklet);
380
381 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
382
383 for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) {
384 desc = &ring_control->rx_data[i];
385 if (desc->host_addr)
386 pci_unmap_single(priv->pdev,
387 le32_to_cpu(desc->host_addr),
388 priv->common.rx_mtu + 32,
389 PCI_DMA_FROMDEVICE);
390 kfree_skb(priv->rx_buf_data[i]);
391 priv->rx_buf_data[i] = NULL;
392 }
393
394 for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) {
395 desc = &ring_control->rx_mgmt[i];
396 if (desc->host_addr)
397 pci_unmap_single(priv->pdev,
398 le32_to_cpu(desc->host_addr),
399 priv->common.rx_mtu + 32,
400 PCI_DMA_FROMDEVICE);
401 kfree_skb(priv->rx_buf_mgmt[i]);
402 priv->rx_buf_mgmt[i] = NULL;
403 }
404
405 for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) {
406 desc = &ring_control->tx_data[i];
407 if (desc->host_addr)
408 pci_unmap_single(priv->pdev,
409 le32_to_cpu(desc->host_addr),
410 le16_to_cpu(desc->len),
411 PCI_DMA_TODEVICE);
412
413 p54_free_skb(dev, priv->tx_buf_data[i]);
414 priv->tx_buf_data[i] = NULL;
415 }
416
417 for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) {
418 desc = &ring_control->tx_mgmt[i];
419 if (desc->host_addr)
420 pci_unmap_single(priv->pdev,
421 le32_to_cpu(desc->host_addr),
422 le16_to_cpu(desc->len),
423 PCI_DMA_TODEVICE);
424
425 p54_free_skb(dev, priv->tx_buf_mgmt[i]);
426 priv->tx_buf_mgmt[i] = NULL;
427 }
428
429 memset(ring_control, 0, sizeof(*ring_control));
430 }
431
p54p_open(struct ieee80211_hw * dev)432 static int p54p_open(struct ieee80211_hw *dev)
433 {
434 struct p54p_priv *priv = dev->priv;
435 int err;
436 long timeout;
437
438 init_completion(&priv->boot_comp);
439 err = request_irq(priv->pdev->irq, p54p_interrupt,
440 IRQF_SHARED, "p54pci", dev);
441 if (err) {
442 dev_err(&priv->pdev->dev, "failed to register IRQ handler\n");
443 return err;
444 }
445
446 memset(priv->ring_control, 0, sizeof(*priv->ring_control));
447 err = p54p_upload_firmware(dev);
448 if (err) {
449 free_irq(priv->pdev->irq, dev);
450 return err;
451 }
452 priv->rx_idx_data = priv->tx_idx_data = 0;
453 priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0;
454
455 p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data,
456 ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data, 0);
457
458 p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt,
459 ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt, 0);
460
461 P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma));
462 P54P_READ(ring_control_base);
463 wmb();
464 udelay(10);
465
466 P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT));
467 P54P_READ(int_enable);
468 wmb();
469 udelay(10);
470
471 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET));
472 P54P_READ(dev_int);
473
474 timeout = wait_for_completion_interruptible_timeout(
475 &priv->boot_comp, HZ);
476 if (timeout <= 0) {
477 wiphy_err(dev->wiphy, "Cannot boot firmware!\n");
478 p54p_stop(dev);
479 return timeout ? -ERESTARTSYS : -ETIMEDOUT;
480 }
481
482 P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE));
483 P54P_READ(int_enable);
484 wmb();
485 udelay(10);
486
487 P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE));
488 P54P_READ(dev_int);
489 wmb();
490 udelay(10);
491
492 return 0;
493 }
494
p54p_firmware_step2(const struct firmware * fw,void * context)495 static void p54p_firmware_step2(const struct firmware *fw,
496 void *context)
497 {
498 struct p54p_priv *priv = context;
499 struct ieee80211_hw *dev = priv->common.hw;
500 struct pci_dev *pdev = priv->pdev;
501 int err;
502
503 if (!fw) {
504 dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n");
505 err = -ENOENT;
506 goto out;
507 }
508
509 priv->firmware = fw;
510
511 err = p54p_open(dev);
512 if (err)
513 goto out;
514 err = p54_read_eeprom(dev);
515 p54p_stop(dev);
516 if (err)
517 goto out;
518
519 err = p54_register_common(dev, &pdev->dev);
520 if (err)
521 goto out;
522
523 out:
524
525 complete(&priv->fw_loaded);
526
527 if (err) {
528 struct device *parent = pdev->dev.parent;
529
530 if (parent)
531 device_lock(parent);
532
533 /*
534 * This will indirectly result in a call to p54p_remove.
535 * Hence, we don't need to bother with freeing any
536 * allocated ressources at all.
537 */
538 device_release_driver(&pdev->dev);
539
540 if (parent)
541 device_unlock(parent);
542 }
543
544 pci_dev_put(pdev);
545 }
546
p54p_probe(struct pci_dev * pdev,const struct pci_device_id * id)547 static int p54p_probe(struct pci_dev *pdev,
548 const struct pci_device_id *id)
549 {
550 struct p54p_priv *priv;
551 struct ieee80211_hw *dev;
552 unsigned long mem_addr, mem_len;
553 int err;
554
555 pci_dev_get(pdev);
556 err = pci_enable_device(pdev);
557 if (err) {
558 dev_err(&pdev->dev, "Cannot enable new PCI device\n");
559 goto err_put;
560 }
561
562 mem_addr = pci_resource_start(pdev, 0);
563 mem_len = pci_resource_len(pdev, 0);
564 if (mem_len < sizeof(struct p54p_csr)) {
565 dev_err(&pdev->dev, "Too short PCI resources\n");
566 err = -ENODEV;
567 goto err_disable_dev;
568 }
569
570 err = pci_request_regions(pdev, "p54pci");
571 if (err) {
572 dev_err(&pdev->dev, "Cannot obtain PCI resources\n");
573 goto err_disable_dev;
574 }
575
576 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
577 if (!err)
578 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
579 if (err) {
580 dev_err(&pdev->dev, "No suitable DMA available\n");
581 goto err_free_reg;
582 }
583
584 pci_set_master(pdev);
585 pci_try_set_mwi(pdev);
586
587 pci_write_config_byte(pdev, 0x40, 0);
588 pci_write_config_byte(pdev, 0x41, 0);
589
590 dev = p54_init_common(sizeof(*priv));
591 if (!dev) {
592 dev_err(&pdev->dev, "ieee80211 alloc failed\n");
593 err = -ENOMEM;
594 goto err_free_reg;
595 }
596
597 priv = dev->priv;
598 priv->pdev = pdev;
599
600 init_completion(&priv->fw_loaded);
601 SET_IEEE80211_DEV(dev, &pdev->dev);
602 pci_set_drvdata(pdev, dev);
603
604 priv->map = ioremap(mem_addr, mem_len);
605 if (!priv->map) {
606 dev_err(&pdev->dev, "Cannot map device memory\n");
607 err = -ENOMEM;
608 goto err_free_dev;
609 }
610
611 priv->ring_control = pci_alloc_consistent(pdev, sizeof(*priv->ring_control),
612 &priv->ring_control_dma);
613 if (!priv->ring_control) {
614 dev_err(&pdev->dev, "Cannot allocate rings\n");
615 err = -ENOMEM;
616 goto err_iounmap;
617 }
618 priv->common.open = p54p_open;
619 priv->common.stop = p54p_stop;
620 priv->common.tx = p54p_tx;
621
622 spin_lock_init(&priv->lock);
623 tasklet_init(&priv->tasklet, p54p_tasklet, (unsigned long)dev);
624
625 err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci",
626 &priv->pdev->dev, GFP_KERNEL,
627 priv, p54p_firmware_step2);
628 if (!err)
629 return 0;
630
631 pci_free_consistent(pdev, sizeof(*priv->ring_control),
632 priv->ring_control, priv->ring_control_dma);
633
634 err_iounmap:
635 iounmap(priv->map);
636
637 err_free_dev:
638 p54_free_common(dev);
639
640 err_free_reg:
641 pci_release_regions(pdev);
642 err_disable_dev:
643 pci_disable_device(pdev);
644 err_put:
645 pci_dev_put(pdev);
646 return err;
647 }
648
p54p_remove(struct pci_dev * pdev)649 static void p54p_remove(struct pci_dev *pdev)
650 {
651 struct ieee80211_hw *dev = pci_get_drvdata(pdev);
652 struct p54p_priv *priv;
653
654 if (!dev)
655 return;
656
657 priv = dev->priv;
658 wait_for_completion(&priv->fw_loaded);
659 p54_unregister_common(dev);
660 release_firmware(priv->firmware);
661 pci_free_consistent(pdev, sizeof(*priv->ring_control),
662 priv->ring_control, priv->ring_control_dma);
663 iounmap(priv->map);
664 pci_release_regions(pdev);
665 pci_disable_device(pdev);
666 p54_free_common(dev);
667 }
668
669 #ifdef CONFIG_PM_SLEEP
p54p_suspend(struct device * device)670 static int p54p_suspend(struct device *device)
671 {
672 struct pci_dev *pdev = to_pci_dev(device);
673
674 pci_save_state(pdev);
675 pci_set_power_state(pdev, PCI_D3hot);
676 pci_disable_device(pdev);
677 return 0;
678 }
679
p54p_resume(struct device * device)680 static int p54p_resume(struct device *device)
681 {
682 struct pci_dev *pdev = to_pci_dev(device);
683 int err;
684
685 err = pci_reenable_device(pdev);
686 if (err)
687 return err;
688 return pci_set_power_state(pdev, PCI_D0);
689 }
690
691 static SIMPLE_DEV_PM_OPS(p54pci_pm_ops, p54p_suspend, p54p_resume);
692
693 #define P54P_PM_OPS (&p54pci_pm_ops)
694 #else
695 #define P54P_PM_OPS (NULL)
696 #endif /* CONFIG_PM_SLEEP */
697
698 static struct pci_driver p54p_driver = {
699 .name = "p54pci",
700 .id_table = p54p_table,
701 .probe = p54p_probe,
702 .remove = p54p_remove,
703 .driver.pm = P54P_PM_OPS,
704 };
705
706 module_pci_driver(p54p_driver);
707