1 /*
2 * Copyright (c) 2005-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4 *
5 * Permission to use, copy, modify, and/or distribute this software for any
6 * purpose with or without fee is hereby granted, provided that the above
7 * copyright notice and this permission notice appear in all copies.
8 *
9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16 */
17
18 #include <linux/pci.h>
19 #include <linux/module.h>
20 #include <linux/interrupt.h>
21 #include <linux/spinlock.h>
22 #include <linux/bitops.h>
23
24 #include "core.h"
25 #include "debug.h"
26
27 #include "targaddrs.h"
28 #include "bmi.h"
29
30 #include "hif.h"
31 #include "htc.h"
32
33 #include "ce.h"
34 #include "pci.h"
35
36 enum ath10k_pci_irq_mode {
37 ATH10K_PCI_IRQ_AUTO = 0,
38 ATH10K_PCI_IRQ_LEGACY = 1,
39 ATH10K_PCI_IRQ_MSI = 2,
40 };
41
42 enum ath10k_pci_reset_mode {
43 ATH10K_PCI_RESET_AUTO = 0,
44 ATH10K_PCI_RESET_WARM_ONLY = 1,
45 };
46
47 static unsigned int ath10k_pci_irq_mode = ATH10K_PCI_IRQ_AUTO;
48 static unsigned int ath10k_pci_reset_mode = ATH10K_PCI_RESET_AUTO;
49
50 module_param_named(irq_mode, ath10k_pci_irq_mode, uint, 0644);
51 MODULE_PARM_DESC(irq_mode, "0: auto, 1: legacy, 2: msi (default: 0)");
52
53 module_param_named(reset_mode, ath10k_pci_reset_mode, uint, 0644);
54 MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
55
56 /* how long wait to wait for target to initialise, in ms */
57 #define ATH10K_PCI_TARGET_WAIT 3000
58 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
59
60 #define QCA988X_2_0_DEVICE_ID (0x003c)
61
62 static const struct pci_device_id ath10k_pci_id_table[] = {
63 { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
64 {0}
65 };
66
67 static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
68 static int ath10k_pci_cold_reset(struct ath10k *ar);
69 static int ath10k_pci_warm_reset(struct ath10k *ar);
70 static int ath10k_pci_wait_for_target_init(struct ath10k *ar);
71 static int ath10k_pci_init_irq(struct ath10k *ar);
72 static int ath10k_pci_deinit_irq(struct ath10k *ar);
73 static int ath10k_pci_request_irq(struct ath10k *ar);
74 static void ath10k_pci_free_irq(struct ath10k *ar);
75 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
76 struct ath10k_ce_pipe *rx_pipe,
77 struct bmi_xfer *xfer);
78
79 static const struct ce_attr host_ce_config_wlan[] = {
80 /* CE0: host->target HTC control and raw streams */
81 {
82 .flags = CE_ATTR_FLAGS,
83 .src_nentries = 16,
84 .src_sz_max = 256,
85 .dest_nentries = 0,
86 },
87
88 /* CE1: target->host HTT + HTC control */
89 {
90 .flags = CE_ATTR_FLAGS,
91 .src_nentries = 0,
92 .src_sz_max = 512,
93 .dest_nentries = 512,
94 },
95
96 /* CE2: target->host WMI */
97 {
98 .flags = CE_ATTR_FLAGS,
99 .src_nentries = 0,
100 .src_sz_max = 2048,
101 .dest_nentries = 32,
102 },
103
104 /* CE3: host->target WMI */
105 {
106 .flags = CE_ATTR_FLAGS,
107 .src_nentries = 32,
108 .src_sz_max = 2048,
109 .dest_nentries = 0,
110 },
111
112 /* CE4: host->target HTT */
113 {
114 .flags = CE_ATTR_FLAGS | CE_ATTR_DIS_INTR,
115 .src_nentries = CE_HTT_H2T_MSG_SRC_NENTRIES,
116 .src_sz_max = 256,
117 .dest_nentries = 0,
118 },
119
120 /* CE5: unused */
121 {
122 .flags = CE_ATTR_FLAGS,
123 .src_nentries = 0,
124 .src_sz_max = 0,
125 .dest_nentries = 0,
126 },
127
128 /* CE6: target autonomous hif_memcpy */
129 {
130 .flags = CE_ATTR_FLAGS,
131 .src_nentries = 0,
132 .src_sz_max = 0,
133 .dest_nentries = 0,
134 },
135
136 /* CE7: ce_diag, the Diagnostic Window */
137 {
138 .flags = CE_ATTR_FLAGS,
139 .src_nentries = 2,
140 .src_sz_max = DIAG_TRANSFER_LIMIT,
141 .dest_nentries = 2,
142 },
143 };
144
145 /* Target firmware's Copy Engine configuration. */
146 static const struct ce_pipe_config target_ce_config_wlan[] = {
147 /* CE0: host->target HTC control and raw streams */
148 {
149 .pipenum = __cpu_to_le32(0),
150 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
151 .nentries = __cpu_to_le32(32),
152 .nbytes_max = __cpu_to_le32(256),
153 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
154 .reserved = __cpu_to_le32(0),
155 },
156
157 /* CE1: target->host HTT + HTC control */
158 {
159 .pipenum = __cpu_to_le32(1),
160 .pipedir = __cpu_to_le32(PIPEDIR_IN),
161 .nentries = __cpu_to_le32(32),
162 .nbytes_max = __cpu_to_le32(512),
163 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
164 .reserved = __cpu_to_le32(0),
165 },
166
167 /* CE2: target->host WMI */
168 {
169 .pipenum = __cpu_to_le32(2),
170 .pipedir = __cpu_to_le32(PIPEDIR_IN),
171 .nentries = __cpu_to_le32(32),
172 .nbytes_max = __cpu_to_le32(2048),
173 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
174 .reserved = __cpu_to_le32(0),
175 },
176
177 /* CE3: host->target WMI */
178 {
179 .pipenum = __cpu_to_le32(3),
180 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
181 .nentries = __cpu_to_le32(32),
182 .nbytes_max = __cpu_to_le32(2048),
183 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
184 .reserved = __cpu_to_le32(0),
185 },
186
187 /* CE4: host->target HTT */
188 {
189 .pipenum = __cpu_to_le32(4),
190 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
191 .nentries = __cpu_to_le32(256),
192 .nbytes_max = __cpu_to_le32(256),
193 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
194 .reserved = __cpu_to_le32(0),
195 },
196
197 /* NB: 50% of src nentries, since tx has 2 frags */
198
199 /* CE5: unused */
200 {
201 .pipenum = __cpu_to_le32(5),
202 .pipedir = __cpu_to_le32(PIPEDIR_OUT),
203 .nentries = __cpu_to_le32(32),
204 .nbytes_max = __cpu_to_le32(2048),
205 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
206 .reserved = __cpu_to_le32(0),
207 },
208
209 /* CE6: Reserved for target autonomous hif_memcpy */
210 {
211 .pipenum = __cpu_to_le32(6),
212 .pipedir = __cpu_to_le32(PIPEDIR_INOUT),
213 .nentries = __cpu_to_le32(32),
214 .nbytes_max = __cpu_to_le32(4096),
215 .flags = __cpu_to_le32(CE_ATTR_FLAGS),
216 .reserved = __cpu_to_le32(0),
217 },
218
219 /* CE7 used only by Host */
220 };
221
222 /*
223 * Map from service/endpoint to Copy Engine.
224 * This table is derived from the CE_PCI TABLE, above.
225 * It is passed to the Target at startup for use by firmware.
226 */
227 static const struct service_to_pipe target_service_to_ce_map_wlan[] = {
228 {
229 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
230 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
231 __cpu_to_le32(3),
232 },
233 {
234 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VO),
235 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
236 __cpu_to_le32(2),
237 },
238 {
239 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
240 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
241 __cpu_to_le32(3),
242 },
243 {
244 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BK),
245 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
246 __cpu_to_le32(2),
247 },
248 {
249 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
250 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
251 __cpu_to_le32(3),
252 },
253 {
254 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_BE),
255 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
256 __cpu_to_le32(2),
257 },
258 {
259 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
260 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
261 __cpu_to_le32(3),
262 },
263 {
264 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_DATA_VI),
265 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
266 __cpu_to_le32(2),
267 },
268 {
269 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
270 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
271 __cpu_to_le32(3),
272 },
273 {
274 __cpu_to_le32(ATH10K_HTC_SVC_ID_WMI_CONTROL),
275 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
276 __cpu_to_le32(2),
277 },
278 {
279 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
280 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
281 __cpu_to_le32(0),
282 },
283 {
284 __cpu_to_le32(ATH10K_HTC_SVC_ID_RSVD_CTRL),
285 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
286 __cpu_to_le32(1),
287 },
288 { /* not used */
289 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
290 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
291 __cpu_to_le32(0),
292 },
293 { /* not used */
294 __cpu_to_le32(ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS),
295 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
296 __cpu_to_le32(1),
297 },
298 {
299 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
300 __cpu_to_le32(PIPEDIR_OUT), /* out = UL = host -> target */
301 __cpu_to_le32(4),
302 },
303 {
304 __cpu_to_le32(ATH10K_HTC_SVC_ID_HTT_DATA_MSG),
305 __cpu_to_le32(PIPEDIR_IN), /* in = DL = target -> host */
306 __cpu_to_le32(1),
307 },
308
309 /* (Additions here) */
310
311 { /* must be last */
312 __cpu_to_le32(0),
313 __cpu_to_le32(0),
314 __cpu_to_le32(0),
315 },
316 };
317
ath10k_pci_irq_pending(struct ath10k * ar)318 static bool ath10k_pci_irq_pending(struct ath10k *ar)
319 {
320 u32 cause;
321
322 /* Check if the shared legacy irq is for us */
323 cause = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
324 PCIE_INTR_CAUSE_ADDRESS);
325 if (cause & (PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL))
326 return true;
327
328 return false;
329 }
330
ath10k_pci_disable_and_clear_legacy_irq(struct ath10k * ar)331 static void ath10k_pci_disable_and_clear_legacy_irq(struct ath10k *ar)
332 {
333 /* IMPORTANT: INTR_CLR register has to be set after
334 * INTR_ENABLE is set to 0, otherwise interrupt can not be
335 * really cleared. */
336 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
337 0);
338 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_CLR_ADDRESS,
339 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
340
341 /* IMPORTANT: this extra read transaction is required to
342 * flush the posted write buffer. */
343 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
344 PCIE_INTR_ENABLE_ADDRESS);
345 }
346
ath10k_pci_enable_legacy_irq(struct ath10k * ar)347 static void ath10k_pci_enable_legacy_irq(struct ath10k *ar)
348 {
349 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
350 PCIE_INTR_ENABLE_ADDRESS,
351 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
352
353 /* IMPORTANT: this extra read transaction is required to
354 * flush the posted write buffer. */
355 (void)ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
356 PCIE_INTR_ENABLE_ADDRESS);
357 }
358
ath10k_pci_get_irq_method(struct ath10k * ar)359 static inline const char *ath10k_pci_get_irq_method(struct ath10k *ar)
360 {
361 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
362
363 if (ar_pci->num_msi_intrs > 1)
364 return "msi-x";
365
366 if (ar_pci->num_msi_intrs == 1)
367 return "msi";
368
369 return "legacy";
370 }
371
__ath10k_pci_rx_post_buf(struct ath10k_pci_pipe * pipe)372 static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
373 {
374 struct ath10k *ar = pipe->hif_ce_state;
375 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
376 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
377 struct sk_buff *skb;
378 dma_addr_t paddr;
379 int ret;
380
381 lockdep_assert_held(&ar_pci->ce_lock);
382
383 skb = dev_alloc_skb(pipe->buf_sz);
384 if (!skb)
385 return -ENOMEM;
386
387 WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
388
389 paddr = dma_map_single(ar->dev, skb->data,
390 skb->len + skb_tailroom(skb),
391 DMA_FROM_DEVICE);
392 if (unlikely(dma_mapping_error(ar->dev, paddr))) {
393 ath10k_warn(ar, "failed to dma map pci rx buf\n");
394 dev_kfree_skb_any(skb);
395 return -EIO;
396 }
397
398 ATH10K_SKB_CB(skb)->paddr = paddr;
399
400 ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
401 if (ret) {
402 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
403 dma_unmap_single(ar->dev, paddr, skb->len + skb_tailroom(skb),
404 DMA_FROM_DEVICE);
405 dev_kfree_skb_any(skb);
406 return ret;
407 }
408
409 return 0;
410 }
411
__ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe * pipe)412 static void __ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
413 {
414 struct ath10k *ar = pipe->hif_ce_state;
415 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
416 struct ath10k_ce_pipe *ce_pipe = pipe->ce_hdl;
417 int ret, num;
418
419 lockdep_assert_held(&ar_pci->ce_lock);
420
421 if (pipe->buf_sz == 0)
422 return;
423
424 if (!ce_pipe->dest_ring)
425 return;
426
427 num = __ath10k_ce_rx_num_free_bufs(ce_pipe);
428 while (num--) {
429 ret = __ath10k_pci_rx_post_buf(pipe);
430 if (ret) {
431 ath10k_warn(ar, "failed to post pci rx buf: %d\n", ret);
432 mod_timer(&ar_pci->rx_post_retry, jiffies +
433 ATH10K_PCI_RX_POST_RETRY_MS);
434 break;
435 }
436 }
437 }
438
ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe * pipe)439 static void ath10k_pci_rx_post_pipe(struct ath10k_pci_pipe *pipe)
440 {
441 struct ath10k *ar = pipe->hif_ce_state;
442 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
443
444 spin_lock_bh(&ar_pci->ce_lock);
445 __ath10k_pci_rx_post_pipe(pipe);
446 spin_unlock_bh(&ar_pci->ce_lock);
447 }
448
ath10k_pci_rx_post(struct ath10k * ar)449 static void ath10k_pci_rx_post(struct ath10k *ar)
450 {
451 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
452 int i;
453
454 spin_lock_bh(&ar_pci->ce_lock);
455 for (i = 0; i < CE_COUNT; i++)
456 __ath10k_pci_rx_post_pipe(&ar_pci->pipe_info[i]);
457 spin_unlock_bh(&ar_pci->ce_lock);
458 }
459
ath10k_pci_rx_replenish_retry(unsigned long ptr)460 static void ath10k_pci_rx_replenish_retry(unsigned long ptr)
461 {
462 struct ath10k *ar = (void *)ptr;
463
464 ath10k_pci_rx_post(ar);
465 }
466
467 /*
468 * Diagnostic read/write access is provided for startup/config/debug usage.
469 * Caller must guarantee proper alignment, when applicable, and single user
470 * at any moment.
471 */
ath10k_pci_diag_read_mem(struct ath10k * ar,u32 address,void * data,int nbytes)472 static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
473 int nbytes)
474 {
475 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
476 int ret = 0;
477 u32 buf;
478 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
479 unsigned int id;
480 unsigned int flags;
481 struct ath10k_ce_pipe *ce_diag;
482 /* Host buffer address in CE space */
483 u32 ce_data;
484 dma_addr_t ce_data_base = 0;
485 void *data_buf = NULL;
486 int i;
487
488 ce_diag = ar_pci->ce_diag;
489
490 /*
491 * Allocate a temporary bounce buffer to hold caller's data
492 * to be DMA'ed from Target. This guarantees
493 * 1) 4-byte alignment
494 * 2) Buffer in DMA-able space
495 */
496 orig_nbytes = nbytes;
497 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
498 orig_nbytes,
499 &ce_data_base,
500 GFP_ATOMIC);
501
502 if (!data_buf) {
503 ret = -ENOMEM;
504 goto done;
505 }
506 memset(data_buf, 0, orig_nbytes);
507
508 remaining_bytes = orig_nbytes;
509 ce_data = ce_data_base;
510 while (remaining_bytes) {
511 nbytes = min_t(unsigned int, remaining_bytes,
512 DIAG_TRANSFER_LIMIT);
513
514 ret = ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
515 if (ret != 0)
516 goto done;
517
518 /* Request CE to send from Target(!) address to Host buffer */
519 /*
520 * The address supplied by the caller is in the
521 * Target CPU virtual address space.
522 *
523 * In order to use this address with the diagnostic CE,
524 * convert it from Target CPU virtual address space
525 * to CE address space
526 */
527 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
528 address);
529
530 ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
531 0);
532 if (ret)
533 goto done;
534
535 i = 0;
536 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
537 &completed_nbytes,
538 &id) != 0) {
539 mdelay(1);
540 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
541 ret = -EBUSY;
542 goto done;
543 }
544 }
545
546 if (nbytes != completed_nbytes) {
547 ret = -EIO;
548 goto done;
549 }
550
551 if (buf != (u32)address) {
552 ret = -EIO;
553 goto done;
554 }
555
556 i = 0;
557 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
558 &completed_nbytes,
559 &id, &flags) != 0) {
560 mdelay(1);
561
562 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
563 ret = -EBUSY;
564 goto done;
565 }
566 }
567
568 if (nbytes != completed_nbytes) {
569 ret = -EIO;
570 goto done;
571 }
572
573 if (buf != ce_data) {
574 ret = -EIO;
575 goto done;
576 }
577
578 remaining_bytes -= nbytes;
579 address += nbytes;
580 ce_data += nbytes;
581 }
582
583 done:
584 if (ret == 0)
585 memcpy(data, data_buf, orig_nbytes);
586 else
587 ath10k_warn(ar, "failed to read diag value at 0x%x: %d\n",
588 address, ret);
589
590 if (data_buf)
591 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
592 ce_data_base);
593
594 return ret;
595 }
596
ath10k_pci_diag_read32(struct ath10k * ar,u32 address,u32 * value)597 static int ath10k_pci_diag_read32(struct ath10k *ar, u32 address, u32 *value)
598 {
599 __le32 val = 0;
600 int ret;
601
602 ret = ath10k_pci_diag_read_mem(ar, address, &val, sizeof(val));
603 *value = __le32_to_cpu(val);
604
605 return ret;
606 }
607
__ath10k_pci_diag_read_hi(struct ath10k * ar,void * dest,u32 src,u32 len)608 static int __ath10k_pci_diag_read_hi(struct ath10k *ar, void *dest,
609 u32 src, u32 len)
610 {
611 u32 host_addr, addr;
612 int ret;
613
614 host_addr = host_interest_item_address(src);
615
616 ret = ath10k_pci_diag_read32(ar, host_addr, &addr);
617 if (ret != 0) {
618 ath10k_warn(ar, "failed to get memcpy hi address for firmware address %d: %d\n",
619 src, ret);
620 return ret;
621 }
622
623 ret = ath10k_pci_diag_read_mem(ar, addr, dest, len);
624 if (ret != 0) {
625 ath10k_warn(ar, "failed to memcpy firmware memory from %d (%d B): %d\n",
626 addr, len, ret);
627 return ret;
628 }
629
630 return 0;
631 }
632
633 #define ath10k_pci_diag_read_hi(ar, dest, src, len) \
634 __ath10k_pci_diag_read_hi(ar, dest, HI_ITEM(src), len)
635
ath10k_pci_diag_write_mem(struct ath10k * ar,u32 address,const void * data,int nbytes)636 static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
637 const void *data, int nbytes)
638 {
639 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
640 int ret = 0;
641 u32 buf;
642 unsigned int completed_nbytes, orig_nbytes, remaining_bytes;
643 unsigned int id;
644 unsigned int flags;
645 struct ath10k_ce_pipe *ce_diag;
646 void *data_buf = NULL;
647 u32 ce_data; /* Host buffer address in CE space */
648 dma_addr_t ce_data_base = 0;
649 int i;
650
651 ce_diag = ar_pci->ce_diag;
652
653 /*
654 * Allocate a temporary bounce buffer to hold caller's data
655 * to be DMA'ed to Target. This guarantees
656 * 1) 4-byte alignment
657 * 2) Buffer in DMA-able space
658 */
659 orig_nbytes = nbytes;
660 data_buf = (unsigned char *)dma_alloc_coherent(ar->dev,
661 orig_nbytes,
662 &ce_data_base,
663 GFP_ATOMIC);
664 if (!data_buf) {
665 ret = -ENOMEM;
666 goto done;
667 }
668
669 /* Copy caller's data to allocated DMA buf */
670 memcpy(data_buf, data, orig_nbytes);
671
672 /*
673 * The address supplied by the caller is in the
674 * Target CPU virtual address space.
675 *
676 * In order to use this address with the diagnostic CE,
677 * convert it from
678 * Target CPU virtual address space
679 * to
680 * CE address space
681 */
682 address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem, address);
683
684 remaining_bytes = orig_nbytes;
685 ce_data = ce_data_base;
686 while (remaining_bytes) {
687 /* FIXME: check cast */
688 nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
689
690 /* Set up to receive directly into Target(!) address */
691 ret = ath10k_ce_rx_post_buf(ce_diag, NULL, address);
692 if (ret != 0)
693 goto done;
694
695 /*
696 * Request CE to send caller-supplied data that
697 * was copied to bounce buffer to Target(!) address.
698 */
699 ret = ath10k_ce_send(ce_diag, NULL, (u32)ce_data,
700 nbytes, 0, 0);
701 if (ret != 0)
702 goto done;
703
704 i = 0;
705 while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
706 &completed_nbytes,
707 &id) != 0) {
708 mdelay(1);
709
710 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
711 ret = -EBUSY;
712 goto done;
713 }
714 }
715
716 if (nbytes != completed_nbytes) {
717 ret = -EIO;
718 goto done;
719 }
720
721 if (buf != ce_data) {
722 ret = -EIO;
723 goto done;
724 }
725
726 i = 0;
727 while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
728 &completed_nbytes,
729 &id, &flags) != 0) {
730 mdelay(1);
731
732 if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
733 ret = -EBUSY;
734 goto done;
735 }
736 }
737
738 if (nbytes != completed_nbytes) {
739 ret = -EIO;
740 goto done;
741 }
742
743 if (buf != address) {
744 ret = -EIO;
745 goto done;
746 }
747
748 remaining_bytes -= nbytes;
749 address += nbytes;
750 ce_data += nbytes;
751 }
752
753 done:
754 if (data_buf) {
755 dma_free_coherent(ar->dev, orig_nbytes, data_buf,
756 ce_data_base);
757 }
758
759 if (ret != 0)
760 ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
761 address, ret);
762
763 return ret;
764 }
765
ath10k_pci_diag_write32(struct ath10k * ar,u32 address,u32 value)766 static int ath10k_pci_diag_write32(struct ath10k *ar, u32 address, u32 value)
767 {
768 __le32 val = __cpu_to_le32(value);
769
770 return ath10k_pci_diag_write_mem(ar, address, &val, sizeof(val));
771 }
772
ath10k_pci_is_awake(struct ath10k * ar)773 static bool ath10k_pci_is_awake(struct ath10k *ar)
774 {
775 u32 val = ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS);
776
777 return RTC_STATE_V_GET(val) == RTC_STATE_V_ON;
778 }
779
ath10k_pci_wake_wait(struct ath10k * ar)780 static int ath10k_pci_wake_wait(struct ath10k *ar)
781 {
782 int tot_delay = 0;
783 int curr_delay = 5;
784
785 while (tot_delay < PCIE_WAKE_TIMEOUT) {
786 if (ath10k_pci_is_awake(ar))
787 return 0;
788
789 udelay(curr_delay);
790 tot_delay += curr_delay;
791
792 if (curr_delay < 50)
793 curr_delay += 5;
794 }
795
796 return -ETIMEDOUT;
797 }
798
ath10k_pci_wake(struct ath10k * ar)799 static int ath10k_pci_wake(struct ath10k *ar)
800 {
801 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
802 PCIE_SOC_WAKE_V_MASK);
803 return ath10k_pci_wake_wait(ar);
804 }
805
ath10k_pci_sleep(struct ath10k * ar)806 static void ath10k_pci_sleep(struct ath10k *ar)
807 {
808 ath10k_pci_reg_write32(ar, PCIE_SOC_WAKE_ADDRESS,
809 PCIE_SOC_WAKE_RESET);
810 }
811
812 /* Called by lower (CE) layer when a send to Target completes. */
ath10k_pci_ce_send_done(struct ath10k_ce_pipe * ce_state)813 static void ath10k_pci_ce_send_done(struct ath10k_ce_pipe *ce_state)
814 {
815 struct ath10k *ar = ce_state->ar;
816 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
817 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
818 void *transfer_context;
819 u32 ce_data;
820 unsigned int nbytes;
821 unsigned int transfer_id;
822
823 while (ath10k_ce_completed_send_next(ce_state, &transfer_context,
824 &ce_data, &nbytes,
825 &transfer_id) == 0) {
826 /* no need to call tx completion for NULL pointers */
827 if (transfer_context == NULL)
828 continue;
829
830 cb->tx_completion(ar, transfer_context, transfer_id);
831 }
832 }
833
834 /* Called by lower (CE) layer when data is received from the Target. */
ath10k_pci_ce_recv_data(struct ath10k_ce_pipe * ce_state)835 static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
836 {
837 struct ath10k *ar = ce_state->ar;
838 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
839 struct ath10k_pci_pipe *pipe_info = &ar_pci->pipe_info[ce_state->id];
840 struct ath10k_hif_cb *cb = &ar_pci->msg_callbacks_current;
841 struct sk_buff *skb;
842 void *transfer_context;
843 u32 ce_data;
844 unsigned int nbytes, max_nbytes;
845 unsigned int transfer_id;
846 unsigned int flags;
847
848 while (ath10k_ce_completed_recv_next(ce_state, &transfer_context,
849 &ce_data, &nbytes, &transfer_id,
850 &flags) == 0) {
851 skb = transfer_context;
852 max_nbytes = skb->len + skb_tailroom(skb);
853 dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr,
854 max_nbytes, DMA_FROM_DEVICE);
855
856 if (unlikely(max_nbytes < nbytes)) {
857 ath10k_warn(ar, "rxed more than expected (nbytes %d, max %d)",
858 nbytes, max_nbytes);
859 dev_kfree_skb_any(skb);
860 continue;
861 }
862
863 skb_put(skb, nbytes);
864 cb->rx_completion(ar, skb, pipe_info->pipe_num);
865 }
866
867 ath10k_pci_rx_post_pipe(pipe_info);
868 }
869
ath10k_pci_hif_tx_sg(struct ath10k * ar,u8 pipe_id,struct ath10k_hif_sg_item * items,int n_items)870 static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
871 struct ath10k_hif_sg_item *items, int n_items)
872 {
873 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
874 struct ath10k_pci_pipe *pci_pipe = &ar_pci->pipe_info[pipe_id];
875 struct ath10k_ce_pipe *ce_pipe = pci_pipe->ce_hdl;
876 struct ath10k_ce_ring *src_ring = ce_pipe->src_ring;
877 unsigned int nentries_mask;
878 unsigned int sw_index;
879 unsigned int write_index;
880 int err, i = 0;
881
882 spin_lock_bh(&ar_pci->ce_lock);
883
884 nentries_mask = src_ring->nentries_mask;
885 sw_index = src_ring->sw_index;
886 write_index = src_ring->write_index;
887
888 if (unlikely(CE_RING_DELTA(nentries_mask,
889 write_index, sw_index - 1) < n_items)) {
890 err = -ENOBUFS;
891 goto err;
892 }
893
894 for (i = 0; i < n_items - 1; i++) {
895 ath10k_dbg(ar, ATH10K_DBG_PCI,
896 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
897 i, items[i].paddr, items[i].len, n_items);
898 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
899 items[i].vaddr, items[i].len);
900
901 err = ath10k_ce_send_nolock(ce_pipe,
902 items[i].transfer_context,
903 items[i].paddr,
904 items[i].len,
905 items[i].transfer_id,
906 CE_SEND_FLAG_GATHER);
907 if (err)
908 goto err;
909 }
910
911 /* `i` is equal to `n_items -1` after for() */
912
913 ath10k_dbg(ar, ATH10K_DBG_PCI,
914 "pci tx item %d paddr 0x%08x len %d n_items %d\n",
915 i, items[i].paddr, items[i].len, n_items);
916 ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci tx data: ",
917 items[i].vaddr, items[i].len);
918
919 err = ath10k_ce_send_nolock(ce_pipe,
920 items[i].transfer_context,
921 items[i].paddr,
922 items[i].len,
923 items[i].transfer_id,
924 0);
925 if (err)
926 goto err;
927
928 spin_unlock_bh(&ar_pci->ce_lock);
929 return 0;
930
931 err:
932 for (; i > 0; i--)
933 __ath10k_ce_send_revert(ce_pipe);
934
935 spin_unlock_bh(&ar_pci->ce_lock);
936 return err;
937 }
938
ath10k_pci_hif_get_free_queue_number(struct ath10k * ar,u8 pipe)939 static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
940 {
941 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
942
943 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get free queue number\n");
944
945 return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
946 }
947
ath10k_pci_dump_registers(struct ath10k * ar,struct ath10k_fw_crash_data * crash_data)948 static void ath10k_pci_dump_registers(struct ath10k *ar,
949 struct ath10k_fw_crash_data *crash_data)
950 {
951 __le32 reg_dump_values[REG_DUMP_COUNT_QCA988X] = {};
952 int i, ret;
953
954 lockdep_assert_held(&ar->data_lock);
955
956 ret = ath10k_pci_diag_read_hi(ar, ®_dump_values[0],
957 hi_failure_state,
958 REG_DUMP_COUNT_QCA988X * sizeof(__le32));
959 if (ret) {
960 ath10k_err(ar, "failed to read firmware dump area: %d\n", ret);
961 return;
962 }
963
964 BUILD_BUG_ON(REG_DUMP_COUNT_QCA988X % 4);
965
966 ath10k_err(ar, "firmware register dump:\n");
967 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i += 4)
968 ath10k_err(ar, "[%02d]: 0x%08X 0x%08X 0x%08X 0x%08X\n",
969 i,
970 __le32_to_cpu(reg_dump_values[i]),
971 __le32_to_cpu(reg_dump_values[i + 1]),
972 __le32_to_cpu(reg_dump_values[i + 2]),
973 __le32_to_cpu(reg_dump_values[i + 3]));
974
975 if (!crash_data)
976 return;
977
978 for (i = 0; i < REG_DUMP_COUNT_QCA988X; i++)
979 crash_data->registers[i] = reg_dump_values[i];
980 }
981
ath10k_pci_fw_crashed_dump(struct ath10k * ar)982 static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
983 {
984 struct ath10k_fw_crash_data *crash_data;
985 char uuid[50];
986
987 spin_lock_bh(&ar->data_lock);
988
989 crash_data = ath10k_debug_get_new_fw_crash_data(ar);
990
991 if (crash_data)
992 scnprintf(uuid, sizeof(uuid), "%pUl", &crash_data->uuid);
993 else
994 scnprintf(uuid, sizeof(uuid), "n/a");
995
996 ath10k_err(ar, "firmware crashed! (uuid %s)\n", uuid);
997 ath10k_print_driver_info(ar);
998 ath10k_pci_dump_registers(ar, crash_data);
999
1000 spin_unlock_bh(&ar->data_lock);
1001
1002 queue_work(ar->workqueue, &ar->restart_work);
1003 }
1004
ath10k_pci_hif_send_complete_check(struct ath10k * ar,u8 pipe,int force)1005 static void ath10k_pci_hif_send_complete_check(struct ath10k *ar, u8 pipe,
1006 int force)
1007 {
1008 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif send complete check\n");
1009
1010 if (!force) {
1011 int resources;
1012 /*
1013 * Decide whether to actually poll for completions, or just
1014 * wait for a later chance.
1015 * If there seem to be plenty of resources left, then just wait
1016 * since checking involves reading a CE register, which is a
1017 * relatively expensive operation.
1018 */
1019 resources = ath10k_pci_hif_get_free_queue_number(ar, pipe);
1020
1021 /*
1022 * If at least 50% of the total resources are still available,
1023 * don't bother checking again yet.
1024 */
1025 if (resources > (host_ce_config_wlan[pipe].src_nentries >> 1))
1026 return;
1027 }
1028 ath10k_ce_per_engine_service(ar, pipe);
1029 }
1030
ath10k_pci_hif_set_callbacks(struct ath10k * ar,struct ath10k_hif_cb * callbacks)1031 static void ath10k_pci_hif_set_callbacks(struct ath10k *ar,
1032 struct ath10k_hif_cb *callbacks)
1033 {
1034 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1035
1036 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif set callbacks\n");
1037
1038 memcpy(&ar_pci->msg_callbacks_current, callbacks,
1039 sizeof(ar_pci->msg_callbacks_current));
1040 }
1041
ath10k_pci_kill_tasklet(struct ath10k * ar)1042 static void ath10k_pci_kill_tasklet(struct ath10k *ar)
1043 {
1044 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1045 int i;
1046
1047 tasklet_kill(&ar_pci->intr_tq);
1048 tasklet_kill(&ar_pci->msi_fw_err);
1049
1050 for (i = 0; i < CE_COUNT; i++)
1051 tasklet_kill(&ar_pci->pipe_info[i].intr);
1052
1053 del_timer_sync(&ar_pci->rx_post_retry);
1054 }
1055
ath10k_pci_hif_map_service_to_pipe(struct ath10k * ar,u16 service_id,u8 * ul_pipe,u8 * dl_pipe,int * ul_is_polled,int * dl_is_polled)1056 static int ath10k_pci_hif_map_service_to_pipe(struct ath10k *ar,
1057 u16 service_id, u8 *ul_pipe,
1058 u8 *dl_pipe, int *ul_is_polled,
1059 int *dl_is_polled)
1060 {
1061 const struct service_to_pipe *entry;
1062 bool ul_set = false, dl_set = false;
1063 int i;
1064
1065 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif map service\n");
1066
1067 /* polling for received messages not supported */
1068 *dl_is_polled = 0;
1069
1070 for (i = 0; i < ARRAY_SIZE(target_service_to_ce_map_wlan); i++) {
1071 entry = &target_service_to_ce_map_wlan[i];
1072
1073 if (__le32_to_cpu(entry->service_id) != service_id)
1074 continue;
1075
1076 switch (__le32_to_cpu(entry->pipedir)) {
1077 case PIPEDIR_NONE:
1078 break;
1079 case PIPEDIR_IN:
1080 WARN_ON(dl_set);
1081 *dl_pipe = __le32_to_cpu(entry->pipenum);
1082 dl_set = true;
1083 break;
1084 case PIPEDIR_OUT:
1085 WARN_ON(ul_set);
1086 *ul_pipe = __le32_to_cpu(entry->pipenum);
1087 ul_set = true;
1088 break;
1089 case PIPEDIR_INOUT:
1090 WARN_ON(dl_set);
1091 WARN_ON(ul_set);
1092 *dl_pipe = __le32_to_cpu(entry->pipenum);
1093 *ul_pipe = __le32_to_cpu(entry->pipenum);
1094 dl_set = true;
1095 ul_set = true;
1096 break;
1097 }
1098 }
1099
1100 if (WARN_ON(!ul_set || !dl_set))
1101 return -ENOENT;
1102
1103 *ul_is_polled =
1104 (host_ce_config_wlan[*ul_pipe].flags & CE_ATTR_DIS_INTR) != 0;
1105
1106 return 0;
1107 }
1108
ath10k_pci_hif_get_default_pipe(struct ath10k * ar,u8 * ul_pipe,u8 * dl_pipe)1109 static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
1110 u8 *ul_pipe, u8 *dl_pipe)
1111 {
1112 int ul_is_polled, dl_is_polled;
1113
1114 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci hif get default pipe\n");
1115
1116 (void)ath10k_pci_hif_map_service_to_pipe(ar,
1117 ATH10K_HTC_SVC_ID_RSVD_CTRL,
1118 ul_pipe,
1119 dl_pipe,
1120 &ul_is_polled,
1121 &dl_is_polled);
1122 }
1123
ath10k_pci_irq_disable(struct ath10k * ar)1124 static void ath10k_pci_irq_disable(struct ath10k *ar)
1125 {
1126 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1127 int i;
1128
1129 ath10k_ce_disable_interrupts(ar);
1130 ath10k_pci_disable_and_clear_legacy_irq(ar);
1131 /* FIXME: How to mask all MSI interrupts? */
1132
1133 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
1134 synchronize_irq(ar_pci->pdev->irq + i);
1135 }
1136
ath10k_pci_irq_enable(struct ath10k * ar)1137 static void ath10k_pci_irq_enable(struct ath10k *ar)
1138 {
1139 ath10k_ce_enable_interrupts(ar);
1140 ath10k_pci_enable_legacy_irq(ar);
1141 /* FIXME: How to unmask all MSI interrupts? */
1142 }
1143
ath10k_pci_hif_start(struct ath10k * ar)1144 static int ath10k_pci_hif_start(struct ath10k *ar)
1145 {
1146 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif start\n");
1147
1148 ath10k_pci_irq_enable(ar);
1149 ath10k_pci_rx_post(ar);
1150
1151 return 0;
1152 }
1153
ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe * pipe_info)1154 static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1155 {
1156 struct ath10k *ar;
1157 struct ath10k_pci *ar_pci;
1158 struct ath10k_ce_pipe *ce_hdl;
1159 u32 buf_sz;
1160 struct sk_buff *netbuf;
1161 u32 ce_data;
1162
1163 buf_sz = pipe_info->buf_sz;
1164
1165 /* Unused Copy Engine */
1166 if (buf_sz == 0)
1167 return;
1168
1169 ar = pipe_info->hif_ce_state;
1170 ar_pci = ath10k_pci_priv(ar);
1171 ce_hdl = pipe_info->ce_hdl;
1172
1173 while (ath10k_ce_revoke_recv_next(ce_hdl, (void **)&netbuf,
1174 &ce_data) == 0) {
1175 dma_unmap_single(ar->dev, ATH10K_SKB_CB(netbuf)->paddr,
1176 netbuf->len + skb_tailroom(netbuf),
1177 DMA_FROM_DEVICE);
1178 dev_kfree_skb_any(netbuf);
1179 }
1180 }
1181
ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe * pipe_info)1182 static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pipe_info)
1183 {
1184 struct ath10k *ar;
1185 struct ath10k_pci *ar_pci;
1186 struct ath10k_ce_pipe *ce_hdl;
1187 struct sk_buff *netbuf;
1188 u32 ce_data;
1189 unsigned int nbytes;
1190 unsigned int id;
1191 u32 buf_sz;
1192
1193 buf_sz = pipe_info->buf_sz;
1194
1195 /* Unused Copy Engine */
1196 if (buf_sz == 0)
1197 return;
1198
1199 ar = pipe_info->hif_ce_state;
1200 ar_pci = ath10k_pci_priv(ar);
1201 ce_hdl = pipe_info->ce_hdl;
1202
1203 while (ath10k_ce_cancel_send_next(ce_hdl, (void **)&netbuf,
1204 &ce_data, &nbytes, &id) == 0) {
1205 /* no need to call tx completion for NULL pointers */
1206 if (!netbuf)
1207 continue;
1208
1209 ar_pci->msg_callbacks_current.tx_completion(ar,
1210 netbuf,
1211 id);
1212 }
1213 }
1214
1215 /*
1216 * Cleanup residual buffers for device shutdown:
1217 * buffers that were enqueued for receive
1218 * buffers that were to be sent
1219 * Note: Buffers that had completed but which were
1220 * not yet processed are on a completion queue. They
1221 * are handled when the completion thread shuts down.
1222 */
ath10k_pci_buffer_cleanup(struct ath10k * ar)1223 static void ath10k_pci_buffer_cleanup(struct ath10k *ar)
1224 {
1225 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1226 int pipe_num;
1227
1228 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1229 struct ath10k_pci_pipe *pipe_info;
1230
1231 pipe_info = &ar_pci->pipe_info[pipe_num];
1232 ath10k_pci_rx_pipe_cleanup(pipe_info);
1233 ath10k_pci_tx_pipe_cleanup(pipe_info);
1234 }
1235 }
1236
ath10k_pci_ce_deinit(struct ath10k * ar)1237 static void ath10k_pci_ce_deinit(struct ath10k *ar)
1238 {
1239 int i;
1240
1241 for (i = 0; i < CE_COUNT; i++)
1242 ath10k_ce_deinit_pipe(ar, i);
1243 }
1244
ath10k_pci_flush(struct ath10k * ar)1245 static void ath10k_pci_flush(struct ath10k *ar)
1246 {
1247 ath10k_pci_kill_tasklet(ar);
1248 ath10k_pci_buffer_cleanup(ar);
1249 }
1250
ath10k_pci_hif_stop(struct ath10k * ar)1251 static void ath10k_pci_hif_stop(struct ath10k *ar)
1252 {
1253 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif stop\n");
1254
1255 /* Most likely the device has HTT Rx ring configured. The only way to
1256 * prevent the device from accessing (and possible corrupting) host
1257 * memory is to reset the chip now.
1258 *
1259 * There's also no known way of masking MSI interrupts on the device.
1260 * For ranged MSI the CE-related interrupts can be masked. However
1261 * regardless how many MSI interrupts are assigned the first one
1262 * is always used for firmware indications (crashes) and cannot be
1263 * masked. To prevent the device from asserting the interrupt reset it
1264 * before proceeding with cleanup.
1265 */
1266 ath10k_pci_warm_reset(ar);
1267
1268 ath10k_pci_irq_disable(ar);
1269 ath10k_pci_flush(ar);
1270 }
1271
ath10k_pci_hif_exchange_bmi_msg(struct ath10k * ar,void * req,u32 req_len,void * resp,u32 * resp_len)1272 static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
1273 void *req, u32 req_len,
1274 void *resp, u32 *resp_len)
1275 {
1276 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1277 struct ath10k_pci_pipe *pci_tx = &ar_pci->pipe_info[BMI_CE_NUM_TO_TARG];
1278 struct ath10k_pci_pipe *pci_rx = &ar_pci->pipe_info[BMI_CE_NUM_TO_HOST];
1279 struct ath10k_ce_pipe *ce_tx = pci_tx->ce_hdl;
1280 struct ath10k_ce_pipe *ce_rx = pci_rx->ce_hdl;
1281 dma_addr_t req_paddr = 0;
1282 dma_addr_t resp_paddr = 0;
1283 struct bmi_xfer xfer = {};
1284 void *treq, *tresp = NULL;
1285 int ret = 0;
1286
1287 might_sleep();
1288
1289 if (resp && !resp_len)
1290 return -EINVAL;
1291
1292 if (resp && resp_len && *resp_len == 0)
1293 return -EINVAL;
1294
1295 treq = kmemdup(req, req_len, GFP_KERNEL);
1296 if (!treq)
1297 return -ENOMEM;
1298
1299 req_paddr = dma_map_single(ar->dev, treq, req_len, DMA_TO_DEVICE);
1300 ret = dma_mapping_error(ar->dev, req_paddr);
1301 if (ret) {
1302 ret = -EIO;
1303 goto err_dma;
1304 }
1305
1306 if (resp && resp_len) {
1307 tresp = kzalloc(*resp_len, GFP_KERNEL);
1308 if (!tresp) {
1309 ret = -ENOMEM;
1310 goto err_req;
1311 }
1312
1313 resp_paddr = dma_map_single(ar->dev, tresp, *resp_len,
1314 DMA_FROM_DEVICE);
1315 ret = dma_mapping_error(ar->dev, resp_paddr);
1316 if (ret) {
1317 ret = EIO;
1318 goto err_req;
1319 }
1320
1321 xfer.wait_for_resp = true;
1322 xfer.resp_len = 0;
1323
1324 ath10k_ce_rx_post_buf(ce_rx, &xfer, resp_paddr);
1325 }
1326
1327 ret = ath10k_ce_send(ce_tx, &xfer, req_paddr, req_len, -1, 0);
1328 if (ret)
1329 goto err_resp;
1330
1331 ret = ath10k_pci_bmi_wait(ce_tx, ce_rx, &xfer);
1332 if (ret) {
1333 u32 unused_buffer;
1334 unsigned int unused_nbytes;
1335 unsigned int unused_id;
1336
1337 ath10k_ce_cancel_send_next(ce_tx, NULL, &unused_buffer,
1338 &unused_nbytes, &unused_id);
1339 } else {
1340 /* non-zero means we did not time out */
1341 ret = 0;
1342 }
1343
1344 err_resp:
1345 if (resp) {
1346 u32 unused_buffer;
1347
1348 ath10k_ce_revoke_recv_next(ce_rx, NULL, &unused_buffer);
1349 dma_unmap_single(ar->dev, resp_paddr,
1350 *resp_len, DMA_FROM_DEVICE);
1351 }
1352 err_req:
1353 dma_unmap_single(ar->dev, req_paddr, req_len, DMA_TO_DEVICE);
1354
1355 if (ret == 0 && resp_len) {
1356 *resp_len = min(*resp_len, xfer.resp_len);
1357 memcpy(resp, tresp, xfer.resp_len);
1358 }
1359 err_dma:
1360 kfree(treq);
1361 kfree(tresp);
1362
1363 return ret;
1364 }
1365
ath10k_pci_bmi_send_done(struct ath10k_ce_pipe * ce_state)1366 static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
1367 {
1368 struct bmi_xfer *xfer;
1369 u32 ce_data;
1370 unsigned int nbytes;
1371 unsigned int transfer_id;
1372
1373 if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
1374 &nbytes, &transfer_id))
1375 return;
1376
1377 xfer->tx_done = true;
1378 }
1379
ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe * ce_state)1380 static void ath10k_pci_bmi_recv_data(struct ath10k_ce_pipe *ce_state)
1381 {
1382 struct ath10k *ar = ce_state->ar;
1383 struct bmi_xfer *xfer;
1384 u32 ce_data;
1385 unsigned int nbytes;
1386 unsigned int transfer_id;
1387 unsigned int flags;
1388
1389 if (ath10k_ce_completed_recv_next(ce_state, (void **)&xfer, &ce_data,
1390 &nbytes, &transfer_id, &flags))
1391 return;
1392
1393 if (!xfer->wait_for_resp) {
1394 ath10k_warn(ar, "unexpected: BMI data received; ignoring\n");
1395 return;
1396 }
1397
1398 xfer->resp_len = nbytes;
1399 xfer->rx_done = true;
1400 }
1401
ath10k_pci_bmi_wait(struct ath10k_ce_pipe * tx_pipe,struct ath10k_ce_pipe * rx_pipe,struct bmi_xfer * xfer)1402 static int ath10k_pci_bmi_wait(struct ath10k_ce_pipe *tx_pipe,
1403 struct ath10k_ce_pipe *rx_pipe,
1404 struct bmi_xfer *xfer)
1405 {
1406 unsigned long timeout = jiffies + BMI_COMMUNICATION_TIMEOUT_HZ;
1407
1408 while (time_before_eq(jiffies, timeout)) {
1409 ath10k_pci_bmi_send_done(tx_pipe);
1410 ath10k_pci_bmi_recv_data(rx_pipe);
1411
1412 if (xfer->tx_done && (xfer->rx_done == xfer->wait_for_resp))
1413 return 0;
1414
1415 schedule();
1416 }
1417
1418 return -ETIMEDOUT;
1419 }
1420
1421 /*
1422 * Send an interrupt to the device to wake up the Target CPU
1423 * so it has an opportunity to notice any changed state.
1424 */
ath10k_pci_wake_target_cpu(struct ath10k * ar)1425 static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
1426 {
1427 u32 addr, val;
1428
1429 addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
1430 val = ath10k_pci_read32(ar, addr);
1431 val |= CORE_CTRL_CPU_INTR_MASK;
1432 ath10k_pci_write32(ar, addr, val);
1433
1434 return 0;
1435 }
1436
ath10k_pci_init_config(struct ath10k * ar)1437 static int ath10k_pci_init_config(struct ath10k *ar)
1438 {
1439 u32 interconnect_targ_addr;
1440 u32 pcie_state_targ_addr = 0;
1441 u32 pipe_cfg_targ_addr = 0;
1442 u32 svc_to_pipe_map = 0;
1443 u32 pcie_config_flags = 0;
1444 u32 ealloc_value;
1445 u32 ealloc_targ_addr;
1446 u32 flag2_value;
1447 u32 flag2_targ_addr;
1448 int ret = 0;
1449
1450 /* Download to Target the CE Config and the service-to-CE map */
1451 interconnect_targ_addr =
1452 host_interest_item_address(HI_ITEM(hi_interconnect_state));
1453
1454 /* Supply Target-side CE configuration */
1455 ret = ath10k_pci_diag_read32(ar, interconnect_targ_addr,
1456 &pcie_state_targ_addr);
1457 if (ret != 0) {
1458 ath10k_err(ar, "Failed to get pcie state addr: %d\n", ret);
1459 return ret;
1460 }
1461
1462 if (pcie_state_targ_addr == 0) {
1463 ret = -EIO;
1464 ath10k_err(ar, "Invalid pcie state addr\n");
1465 return ret;
1466 }
1467
1468 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1469 offsetof(struct pcie_state,
1470 pipe_cfg_addr)),
1471 &pipe_cfg_targ_addr);
1472 if (ret != 0) {
1473 ath10k_err(ar, "Failed to get pipe cfg addr: %d\n", ret);
1474 return ret;
1475 }
1476
1477 if (pipe_cfg_targ_addr == 0) {
1478 ret = -EIO;
1479 ath10k_err(ar, "Invalid pipe cfg addr\n");
1480 return ret;
1481 }
1482
1483 ret = ath10k_pci_diag_write_mem(ar, pipe_cfg_targ_addr,
1484 target_ce_config_wlan,
1485 sizeof(target_ce_config_wlan));
1486
1487 if (ret != 0) {
1488 ath10k_err(ar, "Failed to write pipe cfg: %d\n", ret);
1489 return ret;
1490 }
1491
1492 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1493 offsetof(struct pcie_state,
1494 svc_to_pipe_map)),
1495 &svc_to_pipe_map);
1496 if (ret != 0) {
1497 ath10k_err(ar, "Failed to get svc/pipe map: %d\n", ret);
1498 return ret;
1499 }
1500
1501 if (svc_to_pipe_map == 0) {
1502 ret = -EIO;
1503 ath10k_err(ar, "Invalid svc_to_pipe map\n");
1504 return ret;
1505 }
1506
1507 ret = ath10k_pci_diag_write_mem(ar, svc_to_pipe_map,
1508 target_service_to_ce_map_wlan,
1509 sizeof(target_service_to_ce_map_wlan));
1510 if (ret != 0) {
1511 ath10k_err(ar, "Failed to write svc/pipe map: %d\n", ret);
1512 return ret;
1513 }
1514
1515 ret = ath10k_pci_diag_read32(ar, (pcie_state_targ_addr +
1516 offsetof(struct pcie_state,
1517 config_flags)),
1518 &pcie_config_flags);
1519 if (ret != 0) {
1520 ath10k_err(ar, "Failed to get pcie config_flags: %d\n", ret);
1521 return ret;
1522 }
1523
1524 pcie_config_flags &= ~PCIE_CONFIG_FLAG_ENABLE_L1;
1525
1526 ret = ath10k_pci_diag_write32(ar, (pcie_state_targ_addr +
1527 offsetof(struct pcie_state,
1528 config_flags)),
1529 pcie_config_flags);
1530 if (ret != 0) {
1531 ath10k_err(ar, "Failed to write pcie config_flags: %d\n", ret);
1532 return ret;
1533 }
1534
1535 /* configure early allocation */
1536 ealloc_targ_addr = host_interest_item_address(HI_ITEM(hi_early_alloc));
1537
1538 ret = ath10k_pci_diag_read32(ar, ealloc_targ_addr, &ealloc_value);
1539 if (ret != 0) {
1540 ath10k_err(ar, "Faile to get early alloc val: %d\n", ret);
1541 return ret;
1542 }
1543
1544 /* first bank is switched to IRAM */
1545 ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
1546 HI_EARLY_ALLOC_MAGIC_MASK);
1547 ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
1548 HI_EARLY_ALLOC_IRAM_BANKS_MASK);
1549
1550 ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
1551 if (ret != 0) {
1552 ath10k_err(ar, "Failed to set early alloc val: %d\n", ret);
1553 return ret;
1554 }
1555
1556 /* Tell Target to proceed with initialization */
1557 flag2_targ_addr = host_interest_item_address(HI_ITEM(hi_option_flag2));
1558
1559 ret = ath10k_pci_diag_read32(ar, flag2_targ_addr, &flag2_value);
1560 if (ret != 0) {
1561 ath10k_err(ar, "Failed to get option val: %d\n", ret);
1562 return ret;
1563 }
1564
1565 flag2_value |= HI_OPTION_EARLY_CFG_DONE;
1566
1567 ret = ath10k_pci_diag_write32(ar, flag2_targ_addr, flag2_value);
1568 if (ret != 0) {
1569 ath10k_err(ar, "Failed to set option val: %d\n", ret);
1570 return ret;
1571 }
1572
1573 return 0;
1574 }
1575
ath10k_pci_alloc_ce(struct ath10k * ar)1576 static int ath10k_pci_alloc_ce(struct ath10k *ar)
1577 {
1578 int i, ret;
1579
1580 for (i = 0; i < CE_COUNT; i++) {
1581 ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
1582 if (ret) {
1583 ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
1584 i, ret);
1585 return ret;
1586 }
1587 }
1588
1589 return 0;
1590 }
1591
ath10k_pci_free_ce(struct ath10k * ar)1592 static void ath10k_pci_free_ce(struct ath10k *ar)
1593 {
1594 int i;
1595
1596 for (i = 0; i < CE_COUNT; i++)
1597 ath10k_ce_free_pipe(ar, i);
1598 }
1599
ath10k_pci_ce_init(struct ath10k * ar)1600 static int ath10k_pci_ce_init(struct ath10k *ar)
1601 {
1602 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1603 struct ath10k_pci_pipe *pipe_info;
1604 const struct ce_attr *attr;
1605 int pipe_num, ret;
1606
1607 for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
1608 pipe_info = &ar_pci->pipe_info[pipe_num];
1609 pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
1610 pipe_info->pipe_num = pipe_num;
1611 pipe_info->hif_ce_state = ar;
1612 attr = &host_ce_config_wlan[pipe_num];
1613
1614 ret = ath10k_ce_init_pipe(ar, pipe_num, attr,
1615 ath10k_pci_ce_send_done,
1616 ath10k_pci_ce_recv_data);
1617 if (ret) {
1618 ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
1619 pipe_num, ret);
1620 return ret;
1621 }
1622
1623 if (pipe_num == CE_COUNT - 1) {
1624 /*
1625 * Reserve the ultimate CE for
1626 * diagnostic Window support
1627 */
1628 ar_pci->ce_diag = pipe_info->ce_hdl;
1629 continue;
1630 }
1631
1632 pipe_info->buf_sz = (size_t)(attr->src_sz_max);
1633 }
1634
1635 return 0;
1636 }
1637
ath10k_pci_has_fw_crashed(struct ath10k * ar)1638 static bool ath10k_pci_has_fw_crashed(struct ath10k *ar)
1639 {
1640 return ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS) &
1641 FW_IND_EVENT_PENDING;
1642 }
1643
ath10k_pci_fw_crashed_clear(struct ath10k * ar)1644 static void ath10k_pci_fw_crashed_clear(struct ath10k *ar)
1645 {
1646 u32 val;
1647
1648 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
1649 val &= ~FW_IND_EVENT_PENDING;
1650 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, val);
1651 }
1652
1653 /* this function effectively clears target memory controller assert line */
ath10k_pci_warm_reset_si0(struct ath10k * ar)1654 static void ath10k_pci_warm_reset_si0(struct ath10k *ar)
1655 {
1656 u32 val;
1657
1658 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1659 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1660 val | SOC_RESET_CONTROL_SI0_RST_MASK);
1661 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1662
1663 msleep(10);
1664
1665 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1666 ath10k_pci_soc_write32(ar, SOC_RESET_CONTROL_ADDRESS,
1667 val & ~SOC_RESET_CONTROL_SI0_RST_MASK);
1668 val = ath10k_pci_soc_read32(ar, SOC_RESET_CONTROL_ADDRESS);
1669
1670 msleep(10);
1671 }
1672
ath10k_pci_warm_reset(struct ath10k * ar)1673 static int ath10k_pci_warm_reset(struct ath10k *ar)
1674 {
1675 u32 val;
1676
1677 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
1678
1679 /* debug */
1680 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1681 PCIE_INTR_CAUSE_ADDRESS);
1682 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
1683 val);
1684
1685 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1686 CPU_INTR_ADDRESS);
1687 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1688 val);
1689
1690 /* disable pending irqs */
1691 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1692 PCIE_INTR_ENABLE_ADDRESS, 0);
1693
1694 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
1695 PCIE_INTR_CLR_ADDRESS, ~0);
1696
1697 msleep(100);
1698
1699 /* clear fw indicator */
1700 ath10k_pci_write32(ar, FW_INDICATOR_ADDRESS, 0);
1701
1702 /* clear target LF timer interrupts */
1703 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1704 SOC_LF_TIMER_CONTROL0_ADDRESS);
1705 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS +
1706 SOC_LF_TIMER_CONTROL0_ADDRESS,
1707 val & ~SOC_LF_TIMER_CONTROL0_ENABLE_MASK);
1708
1709 /* reset CE */
1710 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1711 SOC_RESET_CONTROL_ADDRESS);
1712 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1713 val | SOC_RESET_CONTROL_CE_RST_MASK);
1714 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1715 SOC_RESET_CONTROL_ADDRESS);
1716 msleep(10);
1717
1718 /* unreset CE */
1719 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1720 val & ~SOC_RESET_CONTROL_CE_RST_MASK);
1721 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1722 SOC_RESET_CONTROL_ADDRESS);
1723 msleep(10);
1724
1725 ath10k_pci_warm_reset_si0(ar);
1726
1727 /* debug */
1728 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1729 PCIE_INTR_CAUSE_ADDRESS);
1730 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot host cpu intr cause: 0x%08x\n",
1731 val);
1732
1733 val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
1734 CPU_INTR_ADDRESS);
1735 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target cpu intr cause: 0x%08x\n",
1736 val);
1737
1738 /* CPU warm reset */
1739 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1740 SOC_RESET_CONTROL_ADDRESS);
1741 ath10k_pci_write32(ar, RTC_SOC_BASE_ADDRESS + SOC_RESET_CONTROL_ADDRESS,
1742 val | SOC_RESET_CONTROL_CPU_WARM_RST_MASK);
1743
1744 val = ath10k_pci_read32(ar, RTC_SOC_BASE_ADDRESS +
1745 SOC_RESET_CONTROL_ADDRESS);
1746 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target reset state: 0x%08x\n",
1747 val);
1748
1749 msleep(100);
1750
1751 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset complete\n");
1752
1753 return 0;
1754 }
1755
__ath10k_pci_hif_power_up(struct ath10k * ar,bool cold_reset)1756 static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
1757 {
1758 int ret;
1759
1760 /*
1761 * Bring the target up cleanly.
1762 *
1763 * The target may be in an undefined state with an AUX-powered Target
1764 * and a Host in WoW mode. If the Host crashes, loses power, or is
1765 * restarted (without unloading the driver) then the Target is left
1766 * (aux) powered and running. On a subsequent driver load, the Target
1767 * is in an unexpected state. We try to catch that here in order to
1768 * reset the Target and retry the probe.
1769 */
1770 if (cold_reset)
1771 ret = ath10k_pci_cold_reset(ar);
1772 else
1773 ret = ath10k_pci_warm_reset(ar);
1774
1775 if (ret) {
1776 ath10k_err(ar, "failed to reset target: %d\n", ret);
1777 goto err;
1778 }
1779
1780 ret = ath10k_pci_ce_init(ar);
1781 if (ret) {
1782 ath10k_err(ar, "failed to initialize CE: %d\n", ret);
1783 goto err;
1784 }
1785
1786 ret = ath10k_pci_wait_for_target_init(ar);
1787 if (ret) {
1788 ath10k_err(ar, "failed to wait for target to init: %d\n", ret);
1789 goto err_ce;
1790 }
1791
1792 ret = ath10k_pci_init_config(ar);
1793 if (ret) {
1794 ath10k_err(ar, "failed to setup init config: %d\n", ret);
1795 goto err_ce;
1796 }
1797
1798 ret = ath10k_pci_wake_target_cpu(ar);
1799 if (ret) {
1800 ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
1801 goto err_ce;
1802 }
1803
1804 return 0;
1805
1806 err_ce:
1807 ath10k_pci_ce_deinit(ar);
1808 ath10k_pci_warm_reset(ar);
1809 err:
1810 return ret;
1811 }
1812
ath10k_pci_hif_power_up_warm(struct ath10k * ar)1813 static int ath10k_pci_hif_power_up_warm(struct ath10k *ar)
1814 {
1815 int i, ret;
1816
1817 /*
1818 * Sometime warm reset succeeds after retries.
1819 *
1820 * FIXME: It might be possible to tune ath10k_pci_warm_reset() to work
1821 * at first try.
1822 */
1823 for (i = 0; i < ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS; i++) {
1824 ret = __ath10k_pci_hif_power_up(ar, false);
1825 if (ret == 0)
1826 break;
1827
1828 ath10k_warn(ar, "failed to warm reset (attempt %d out of %d): %d\n",
1829 i + 1, ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS, ret);
1830 }
1831
1832 return ret;
1833 }
1834
ath10k_pci_hif_power_up(struct ath10k * ar)1835 static int ath10k_pci_hif_power_up(struct ath10k *ar)
1836 {
1837 int ret;
1838
1839 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power up\n");
1840
1841 /*
1842 * Hardware CUS232 version 2 has some issues with cold reset and the
1843 * preferred (and safer) way to perform a device reset is through a
1844 * warm reset.
1845 *
1846 * Warm reset doesn't always work though so fall back to cold reset may
1847 * be necessary.
1848 */
1849 ret = ath10k_pci_hif_power_up_warm(ar);
1850 if (ret) {
1851 ath10k_warn(ar, "failed to power up target using warm reset: %d\n",
1852 ret);
1853
1854 if (ath10k_pci_reset_mode == ATH10K_PCI_RESET_WARM_ONLY)
1855 return ret;
1856
1857 ath10k_warn(ar, "trying cold reset\n");
1858
1859 ret = __ath10k_pci_hif_power_up(ar, true);
1860 if (ret) {
1861 ath10k_err(ar, "failed to power up target using cold reset too (%d)\n",
1862 ret);
1863 return ret;
1864 }
1865 }
1866
1867 return 0;
1868 }
1869
ath10k_pci_hif_power_down(struct ath10k * ar)1870 static void ath10k_pci_hif_power_down(struct ath10k *ar)
1871 {
1872 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot hif power down\n");
1873
1874 ath10k_pci_warm_reset(ar);
1875 }
1876
1877 #ifdef CONFIG_PM
1878
1879 #define ATH10K_PCI_PM_CONTROL 0x44
1880
ath10k_pci_hif_suspend(struct ath10k * ar)1881 static int ath10k_pci_hif_suspend(struct ath10k *ar)
1882 {
1883 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1884 struct pci_dev *pdev = ar_pci->pdev;
1885 u32 val;
1886
1887 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1888
1889 if ((val & 0x000000ff) != 0x3) {
1890 pci_save_state(pdev);
1891 pci_disable_device(pdev);
1892 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1893 (val & 0xffffff00) | 0x03);
1894 }
1895
1896 return 0;
1897 }
1898
ath10k_pci_hif_resume(struct ath10k * ar)1899 static int ath10k_pci_hif_resume(struct ath10k *ar)
1900 {
1901 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1902 struct pci_dev *pdev = ar_pci->pdev;
1903 u32 val;
1904
1905 pci_read_config_dword(pdev, ATH10K_PCI_PM_CONTROL, &val);
1906
1907 if ((val & 0x000000ff) != 0) {
1908 pci_restore_state(pdev);
1909 pci_write_config_dword(pdev, ATH10K_PCI_PM_CONTROL,
1910 val & 0xffffff00);
1911 /*
1912 * Suspend/Resume resets the PCI configuration space,
1913 * so we have to re-disable the RETRY_TIMEOUT register (0x41)
1914 * to keep PCI Tx retries from interfering with C3 CPU state
1915 */
1916 pci_read_config_dword(pdev, 0x40, &val);
1917
1918 if ((val & 0x0000ff00) != 0)
1919 pci_write_config_dword(pdev, 0x40, val & 0xffff00ff);
1920 }
1921
1922 return 0;
1923 }
1924 #endif
1925
1926 static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
1927 .tx_sg = ath10k_pci_hif_tx_sg,
1928 .exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
1929 .start = ath10k_pci_hif_start,
1930 .stop = ath10k_pci_hif_stop,
1931 .map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
1932 .get_default_pipe = ath10k_pci_hif_get_default_pipe,
1933 .send_complete_check = ath10k_pci_hif_send_complete_check,
1934 .set_callbacks = ath10k_pci_hif_set_callbacks,
1935 .get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
1936 .power_up = ath10k_pci_hif_power_up,
1937 .power_down = ath10k_pci_hif_power_down,
1938 #ifdef CONFIG_PM
1939 .suspend = ath10k_pci_hif_suspend,
1940 .resume = ath10k_pci_hif_resume,
1941 #endif
1942 };
1943
ath10k_pci_ce_tasklet(unsigned long ptr)1944 static void ath10k_pci_ce_tasklet(unsigned long ptr)
1945 {
1946 struct ath10k_pci_pipe *pipe = (struct ath10k_pci_pipe *)ptr;
1947 struct ath10k_pci *ar_pci = pipe->ar_pci;
1948
1949 ath10k_ce_per_engine_service(ar_pci->ar, pipe->pipe_num);
1950 }
1951
ath10k_msi_err_tasklet(unsigned long data)1952 static void ath10k_msi_err_tasklet(unsigned long data)
1953 {
1954 struct ath10k *ar = (struct ath10k *)data;
1955
1956 if (!ath10k_pci_has_fw_crashed(ar)) {
1957 ath10k_warn(ar, "received unsolicited fw crash interrupt\n");
1958 return;
1959 }
1960
1961 ath10k_pci_fw_crashed_clear(ar);
1962 ath10k_pci_fw_crashed_dump(ar);
1963 }
1964
1965 /*
1966 * Handler for a per-engine interrupt on a PARTICULAR CE.
1967 * This is used in cases where each CE has a private MSI interrupt.
1968 */
ath10k_pci_per_engine_handler(int irq,void * arg)1969 static irqreturn_t ath10k_pci_per_engine_handler(int irq, void *arg)
1970 {
1971 struct ath10k *ar = arg;
1972 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1973 int ce_id = irq - ar_pci->pdev->irq - MSI_ASSIGN_CE_INITIAL;
1974
1975 if (ce_id < 0 || ce_id >= ARRAY_SIZE(ar_pci->pipe_info)) {
1976 ath10k_warn(ar, "unexpected/invalid irq %d ce_id %d\n", irq,
1977 ce_id);
1978 return IRQ_HANDLED;
1979 }
1980
1981 /*
1982 * NOTE: We are able to derive ce_id from irq because we
1983 * use a one-to-one mapping for CE's 0..5.
1984 * CE's 6 & 7 do not use interrupts at all.
1985 *
1986 * This mapping must be kept in sync with the mapping
1987 * used by firmware.
1988 */
1989 tasklet_schedule(&ar_pci->pipe_info[ce_id].intr);
1990 return IRQ_HANDLED;
1991 }
1992
ath10k_pci_msi_fw_handler(int irq,void * arg)1993 static irqreturn_t ath10k_pci_msi_fw_handler(int irq, void *arg)
1994 {
1995 struct ath10k *ar = arg;
1996 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
1997
1998 tasklet_schedule(&ar_pci->msi_fw_err);
1999 return IRQ_HANDLED;
2000 }
2001
2002 /*
2003 * Top-level interrupt handler for all PCI interrupts from a Target.
2004 * When a block of MSI interrupts is allocated, this top-level handler
2005 * is not used; instead, we directly call the correct sub-handler.
2006 */
ath10k_pci_interrupt_handler(int irq,void * arg)2007 static irqreturn_t ath10k_pci_interrupt_handler(int irq, void *arg)
2008 {
2009 struct ath10k *ar = arg;
2010 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2011
2012 if (ar_pci->num_msi_intrs == 0) {
2013 if (!ath10k_pci_irq_pending(ar))
2014 return IRQ_NONE;
2015
2016 ath10k_pci_disable_and_clear_legacy_irq(ar);
2017 }
2018
2019 tasklet_schedule(&ar_pci->intr_tq);
2020
2021 return IRQ_HANDLED;
2022 }
2023
ath10k_pci_tasklet(unsigned long data)2024 static void ath10k_pci_tasklet(unsigned long data)
2025 {
2026 struct ath10k *ar = (struct ath10k *)data;
2027 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2028
2029 if (ath10k_pci_has_fw_crashed(ar)) {
2030 ath10k_pci_fw_crashed_clear(ar);
2031 ath10k_pci_fw_crashed_dump(ar);
2032 return;
2033 }
2034
2035 ath10k_ce_per_engine_service_any(ar);
2036
2037 /* Re-enable legacy irq that was disabled in the irq handler */
2038 if (ar_pci->num_msi_intrs == 0)
2039 ath10k_pci_enable_legacy_irq(ar);
2040 }
2041
ath10k_pci_request_irq_msix(struct ath10k * ar)2042 static int ath10k_pci_request_irq_msix(struct ath10k *ar)
2043 {
2044 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2045 int ret, i;
2046
2047 ret = request_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW,
2048 ath10k_pci_msi_fw_handler,
2049 IRQF_SHARED, "ath10k_pci", ar);
2050 if (ret) {
2051 ath10k_warn(ar, "failed to request MSI-X fw irq %d: %d\n",
2052 ar_pci->pdev->irq + MSI_ASSIGN_FW, ret);
2053 return ret;
2054 }
2055
2056 for (i = MSI_ASSIGN_CE_INITIAL; i <= MSI_ASSIGN_CE_MAX; i++) {
2057 ret = request_irq(ar_pci->pdev->irq + i,
2058 ath10k_pci_per_engine_handler,
2059 IRQF_SHARED, "ath10k_pci", ar);
2060 if (ret) {
2061 ath10k_warn(ar, "failed to request MSI-X ce irq %d: %d\n",
2062 ar_pci->pdev->irq + i, ret);
2063
2064 for (i--; i >= MSI_ASSIGN_CE_INITIAL; i--)
2065 free_irq(ar_pci->pdev->irq + i, ar);
2066
2067 free_irq(ar_pci->pdev->irq + MSI_ASSIGN_FW, ar);
2068 return ret;
2069 }
2070 }
2071
2072 return 0;
2073 }
2074
ath10k_pci_request_irq_msi(struct ath10k * ar)2075 static int ath10k_pci_request_irq_msi(struct ath10k *ar)
2076 {
2077 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2078 int ret;
2079
2080 ret = request_irq(ar_pci->pdev->irq,
2081 ath10k_pci_interrupt_handler,
2082 IRQF_SHARED, "ath10k_pci", ar);
2083 if (ret) {
2084 ath10k_warn(ar, "failed to request MSI irq %d: %d\n",
2085 ar_pci->pdev->irq, ret);
2086 return ret;
2087 }
2088
2089 return 0;
2090 }
2091
ath10k_pci_request_irq_legacy(struct ath10k * ar)2092 static int ath10k_pci_request_irq_legacy(struct ath10k *ar)
2093 {
2094 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2095 int ret;
2096
2097 ret = request_irq(ar_pci->pdev->irq,
2098 ath10k_pci_interrupt_handler,
2099 IRQF_SHARED, "ath10k_pci", ar);
2100 if (ret) {
2101 ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
2102 ar_pci->pdev->irq, ret);
2103 return ret;
2104 }
2105
2106 return 0;
2107 }
2108
ath10k_pci_request_irq(struct ath10k * ar)2109 static int ath10k_pci_request_irq(struct ath10k *ar)
2110 {
2111 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2112
2113 switch (ar_pci->num_msi_intrs) {
2114 case 0:
2115 return ath10k_pci_request_irq_legacy(ar);
2116 case 1:
2117 return ath10k_pci_request_irq_msi(ar);
2118 case MSI_NUM_REQUEST:
2119 return ath10k_pci_request_irq_msix(ar);
2120 }
2121
2122 ath10k_warn(ar, "unknown irq configuration upon request\n");
2123 return -EINVAL;
2124 }
2125
ath10k_pci_free_irq(struct ath10k * ar)2126 static void ath10k_pci_free_irq(struct ath10k *ar)
2127 {
2128 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2129 int i;
2130
2131 /* There's at least one interrupt irregardless whether its legacy INTR
2132 * or MSI or MSI-X */
2133 for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
2134 free_irq(ar_pci->pdev->irq + i, ar);
2135 }
2136
ath10k_pci_init_irq_tasklets(struct ath10k * ar)2137 static void ath10k_pci_init_irq_tasklets(struct ath10k *ar)
2138 {
2139 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2140 int i;
2141
2142 tasklet_init(&ar_pci->intr_tq, ath10k_pci_tasklet, (unsigned long)ar);
2143 tasklet_init(&ar_pci->msi_fw_err, ath10k_msi_err_tasklet,
2144 (unsigned long)ar);
2145
2146 for (i = 0; i < CE_COUNT; i++) {
2147 ar_pci->pipe_info[i].ar_pci = ar_pci;
2148 tasklet_init(&ar_pci->pipe_info[i].intr, ath10k_pci_ce_tasklet,
2149 (unsigned long)&ar_pci->pipe_info[i]);
2150 }
2151 }
2152
ath10k_pci_init_irq(struct ath10k * ar)2153 static int ath10k_pci_init_irq(struct ath10k *ar)
2154 {
2155 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2156 int ret;
2157
2158 ath10k_pci_init_irq_tasklets(ar);
2159
2160 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_AUTO)
2161 ath10k_info(ar, "limiting irq mode to: %d\n",
2162 ath10k_pci_irq_mode);
2163
2164 /* Try MSI-X */
2165 if (ath10k_pci_irq_mode == ATH10K_PCI_IRQ_AUTO) {
2166 ar_pci->num_msi_intrs = MSI_NUM_REQUEST;
2167 ret = pci_enable_msi_range(ar_pci->pdev, ar_pci->num_msi_intrs,
2168 ar_pci->num_msi_intrs);
2169 if (ret > 0)
2170 return 0;
2171
2172 /* fall-through */
2173 }
2174
2175 /* Try MSI */
2176 if (ath10k_pci_irq_mode != ATH10K_PCI_IRQ_LEGACY) {
2177 ar_pci->num_msi_intrs = 1;
2178 ret = pci_enable_msi(ar_pci->pdev);
2179 if (ret == 0)
2180 return 0;
2181
2182 /* fall-through */
2183 }
2184
2185 /* Try legacy irq
2186 *
2187 * A potential race occurs here: The CORE_BASE write
2188 * depends on target correctly decoding AXI address but
2189 * host won't know when target writes BAR to CORE_CTRL.
2190 * This write might get lost if target has NOT written BAR.
2191 * For now, fix the race by repeating the write in below
2192 * synchronization checking. */
2193 ar_pci->num_msi_intrs = 0;
2194
2195 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2196 PCIE_INTR_FIRMWARE_MASK | PCIE_INTR_CE_MASK_ALL);
2197
2198 return 0;
2199 }
2200
ath10k_pci_deinit_irq_legacy(struct ath10k * ar)2201 static void ath10k_pci_deinit_irq_legacy(struct ath10k *ar)
2202 {
2203 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + PCIE_INTR_ENABLE_ADDRESS,
2204 0);
2205 }
2206
ath10k_pci_deinit_irq(struct ath10k * ar)2207 static int ath10k_pci_deinit_irq(struct ath10k *ar)
2208 {
2209 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2210
2211 switch (ar_pci->num_msi_intrs) {
2212 case 0:
2213 ath10k_pci_deinit_irq_legacy(ar);
2214 return 0;
2215 case 1:
2216 /* fall-through */
2217 case MSI_NUM_REQUEST:
2218 pci_disable_msi(ar_pci->pdev);
2219 return 0;
2220 default:
2221 pci_disable_msi(ar_pci->pdev);
2222 }
2223
2224 ath10k_warn(ar, "unknown irq configuration upon deinit\n");
2225 return -EINVAL;
2226 }
2227
ath10k_pci_wait_for_target_init(struct ath10k * ar)2228 static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
2229 {
2230 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2231 unsigned long timeout;
2232 u32 val;
2233
2234 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot waiting target to initialise\n");
2235
2236 timeout = jiffies + msecs_to_jiffies(ATH10K_PCI_TARGET_WAIT);
2237
2238 do {
2239 val = ath10k_pci_read32(ar, FW_INDICATOR_ADDRESS);
2240
2241 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target indicator %x\n",
2242 val);
2243
2244 /* target should never return this */
2245 if (val == 0xffffffff)
2246 continue;
2247
2248 /* the device has crashed so don't bother trying anymore */
2249 if (val & FW_IND_EVENT_PENDING)
2250 break;
2251
2252 if (val & FW_IND_INITIALIZED)
2253 break;
2254
2255 if (ar_pci->num_msi_intrs == 0)
2256 /* Fix potential race by repeating CORE_BASE writes */
2257 ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
2258 PCIE_INTR_ENABLE_ADDRESS,
2259 PCIE_INTR_FIRMWARE_MASK |
2260 PCIE_INTR_CE_MASK_ALL);
2261
2262 mdelay(10);
2263 } while (time_before(jiffies, timeout));
2264
2265 if (val == 0xffffffff) {
2266 ath10k_err(ar, "failed to read device register, device is gone\n");
2267 return -EIO;
2268 }
2269
2270 if (val & FW_IND_EVENT_PENDING) {
2271 ath10k_warn(ar, "device has crashed during init\n");
2272 ath10k_pci_fw_crashed_clear(ar);
2273 ath10k_pci_fw_crashed_dump(ar);
2274 return -ECOMM;
2275 }
2276
2277 if (!(val & FW_IND_INITIALIZED)) {
2278 ath10k_err(ar, "failed to receive initialized event from target: %08x\n",
2279 val);
2280 return -ETIMEDOUT;
2281 }
2282
2283 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot target initialised\n");
2284 return 0;
2285 }
2286
ath10k_pci_cold_reset(struct ath10k * ar)2287 static int ath10k_pci_cold_reset(struct ath10k *ar)
2288 {
2289 int i;
2290 u32 val;
2291
2292 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
2293
2294 /* Put Target, including PCIe, into RESET. */
2295 val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
2296 val |= 1;
2297 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2298
2299 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2300 if (ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2301 RTC_STATE_COLD_RESET_MASK)
2302 break;
2303 msleep(1);
2304 }
2305
2306 /* Pull Target, including PCIe, out of RESET. */
2307 val &= ~1;
2308 ath10k_pci_reg_write32(ar, SOC_GLOBAL_RESET_ADDRESS, val);
2309
2310 for (i = 0; i < ATH_PCI_RESET_WAIT_MAX; i++) {
2311 if (!(ath10k_pci_reg_read32(ar, RTC_STATE_ADDRESS) &
2312 RTC_STATE_COLD_RESET_MASK))
2313 break;
2314 msleep(1);
2315 }
2316
2317 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset complete\n");
2318
2319 return 0;
2320 }
2321
ath10k_pci_claim(struct ath10k * ar)2322 static int ath10k_pci_claim(struct ath10k *ar)
2323 {
2324 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2325 struct pci_dev *pdev = ar_pci->pdev;
2326 u32 lcr_val;
2327 int ret;
2328
2329 pci_set_drvdata(pdev, ar);
2330
2331 ret = pci_enable_device(pdev);
2332 if (ret) {
2333 ath10k_err(ar, "failed to enable pci device: %d\n", ret);
2334 return ret;
2335 }
2336
2337 ret = pci_request_region(pdev, BAR_NUM, "ath");
2338 if (ret) {
2339 ath10k_err(ar, "failed to request region BAR%d: %d\n", BAR_NUM,
2340 ret);
2341 goto err_device;
2342 }
2343
2344 /* Target expects 32 bit DMA. Enforce it. */
2345 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
2346 if (ret) {
2347 ath10k_err(ar, "failed to set dma mask to 32-bit: %d\n", ret);
2348 goto err_region;
2349 }
2350
2351 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
2352 if (ret) {
2353 ath10k_err(ar, "failed to set consistent dma mask to 32-bit: %d\n",
2354 ret);
2355 goto err_region;
2356 }
2357
2358 pci_set_master(pdev);
2359
2360 /* Workaround: Disable ASPM */
2361 pci_read_config_dword(pdev, 0x80, &lcr_val);
2362 pci_write_config_dword(pdev, 0x80, (lcr_val & 0xffffff00));
2363
2364 /* Arrange for access to Target SoC registers. */
2365 ar_pci->mem = pci_iomap(pdev, BAR_NUM, 0);
2366 if (!ar_pci->mem) {
2367 ath10k_err(ar, "failed to iomap BAR%d\n", BAR_NUM);
2368 ret = -EIO;
2369 goto err_master;
2370 }
2371
2372 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot pci_mem 0x%p\n", ar_pci->mem);
2373 return 0;
2374
2375 err_master:
2376 pci_clear_master(pdev);
2377
2378 err_region:
2379 pci_release_region(pdev, BAR_NUM);
2380
2381 err_device:
2382 pci_disable_device(pdev);
2383
2384 return ret;
2385 }
2386
ath10k_pci_release(struct ath10k * ar)2387 static void ath10k_pci_release(struct ath10k *ar)
2388 {
2389 struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
2390 struct pci_dev *pdev = ar_pci->pdev;
2391
2392 pci_iounmap(pdev, ar_pci->mem);
2393 pci_release_region(pdev, BAR_NUM);
2394 pci_clear_master(pdev);
2395 pci_disable_device(pdev);
2396 }
2397
ath10k_pci_probe(struct pci_dev * pdev,const struct pci_device_id * pci_dev)2398 static int ath10k_pci_probe(struct pci_dev *pdev,
2399 const struct pci_device_id *pci_dev)
2400 {
2401 int ret = 0;
2402 struct ath10k *ar;
2403 struct ath10k_pci *ar_pci;
2404 u32 chip_id;
2405
2406 ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev,
2407 &ath10k_pci_hif_ops);
2408 if (!ar) {
2409 dev_err(&pdev->dev, "failed to allocate core\n");
2410 return -ENOMEM;
2411 }
2412
2413 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci probe\n");
2414
2415 ar_pci = ath10k_pci_priv(ar);
2416 ar_pci->pdev = pdev;
2417 ar_pci->dev = &pdev->dev;
2418 ar_pci->ar = ar;
2419
2420 spin_lock_init(&ar_pci->ce_lock);
2421 setup_timer(&ar_pci->rx_post_retry, ath10k_pci_rx_replenish_retry,
2422 (unsigned long)ar);
2423
2424 ret = ath10k_pci_claim(ar);
2425 if (ret) {
2426 ath10k_err(ar, "failed to claim device: %d\n", ret);
2427 goto err_core_destroy;
2428 }
2429
2430 ret = ath10k_pci_wake(ar);
2431 if (ret) {
2432 ath10k_err(ar, "failed to wake up: %d\n", ret);
2433 goto err_release;
2434 }
2435
2436 chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
2437 if (chip_id == 0xffffffff) {
2438 ath10k_err(ar, "failed to get chip id\n");
2439 goto err_sleep;
2440 }
2441
2442 ret = ath10k_pci_alloc_ce(ar);
2443 if (ret) {
2444 ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
2445 ret);
2446 goto err_sleep;
2447 }
2448
2449 ath10k_pci_ce_deinit(ar);
2450
2451 ret = ath10k_ce_disable_interrupts(ar);
2452 if (ret) {
2453 ath10k_err(ar, "failed to disable copy engine interrupts: %d\n",
2454 ret);
2455 goto err_free_ce;
2456 }
2457
2458 /* Workaround: There's no known way to mask all possible interrupts via
2459 * device CSR. The only way to make sure device doesn't assert
2460 * interrupts is to reset it. Interrupts are then disabled on host
2461 * after handlers are registered.
2462 */
2463 ath10k_pci_warm_reset(ar);
2464
2465 ret = ath10k_pci_init_irq(ar);
2466 if (ret) {
2467 ath10k_err(ar, "failed to init irqs: %d\n", ret);
2468 goto err_free_ce;
2469 }
2470
2471 ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
2472 ath10k_pci_get_irq_method(ar), ar_pci->num_msi_intrs,
2473 ath10k_pci_irq_mode, ath10k_pci_reset_mode);
2474
2475 ret = ath10k_pci_request_irq(ar);
2476 if (ret) {
2477 ath10k_warn(ar, "failed to request irqs: %d\n", ret);
2478 goto err_deinit_irq;
2479 }
2480
2481 /* This shouldn't race as the device has been reset above. */
2482 ath10k_pci_irq_disable(ar);
2483
2484 ret = ath10k_core_register(ar, chip_id);
2485 if (ret) {
2486 ath10k_err(ar, "failed to register driver core: %d\n", ret);
2487 goto err_free_irq;
2488 }
2489
2490 return 0;
2491
2492 err_free_irq:
2493 ath10k_pci_free_irq(ar);
2494 ath10k_pci_kill_tasklet(ar);
2495
2496 err_deinit_irq:
2497 ath10k_pci_deinit_irq(ar);
2498
2499 err_free_ce:
2500 ath10k_pci_free_ce(ar);
2501
2502 err_sleep:
2503 ath10k_pci_sleep(ar);
2504
2505 err_release:
2506 ath10k_pci_release(ar);
2507
2508 err_core_destroy:
2509 ath10k_core_destroy(ar);
2510
2511 return ret;
2512 }
2513
ath10k_pci_remove(struct pci_dev * pdev)2514 static void ath10k_pci_remove(struct pci_dev *pdev)
2515 {
2516 struct ath10k *ar = pci_get_drvdata(pdev);
2517 struct ath10k_pci *ar_pci;
2518
2519 ath10k_dbg(ar, ATH10K_DBG_PCI, "pci remove\n");
2520
2521 if (!ar)
2522 return;
2523
2524 ar_pci = ath10k_pci_priv(ar);
2525
2526 if (!ar_pci)
2527 return;
2528
2529 ath10k_core_unregister(ar);
2530 ath10k_pci_free_irq(ar);
2531 ath10k_pci_kill_tasklet(ar);
2532 ath10k_pci_deinit_irq(ar);
2533 ath10k_pci_ce_deinit(ar);
2534 ath10k_pci_free_ce(ar);
2535 ath10k_pci_sleep(ar);
2536 ath10k_pci_release(ar);
2537 ath10k_core_destroy(ar);
2538 }
2539
2540 MODULE_DEVICE_TABLE(pci, ath10k_pci_id_table);
2541
2542 static struct pci_driver ath10k_pci_driver = {
2543 .name = "ath10k_pci",
2544 .id_table = ath10k_pci_id_table,
2545 .probe = ath10k_pci_probe,
2546 .remove = ath10k_pci_remove,
2547 };
2548
ath10k_pci_init(void)2549 static int __init ath10k_pci_init(void)
2550 {
2551 int ret;
2552
2553 ret = pci_register_driver(&ath10k_pci_driver);
2554 if (ret)
2555 printk(KERN_ERR "failed to register ath10k pci driver: %d\n",
2556 ret);
2557
2558 return ret;
2559 }
2560 module_init(ath10k_pci_init);
2561
ath10k_pci_exit(void)2562 static void __exit ath10k_pci_exit(void)
2563 {
2564 pci_unregister_driver(&ath10k_pci_driver);
2565 }
2566
2567 module_exit(ath10k_pci_exit);
2568
2569 MODULE_AUTHOR("Qualcomm Atheros");
2570 MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
2571 MODULE_LICENSE("Dual BSD/GPL");
2572 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_3_FILE);
2573 MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
2574