1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
4 */
5
6 #include <linux/module.h>
7 #include <linux/msi.h>
8 #include <linux/pci.h>
9
10 #include "pci.h"
11 #include "core.h"
12 #include "hif.h"
13 #include "mhi.h"
14 #include "debug.h"
15
16 #define ATH11K_PCI_BAR_NUM 0
17 #define ATH11K_PCI_DMA_MASK 32
18
19 #define ATH11K_PCI_IRQ_CE0_OFFSET 3
20
21 #define WINDOW_ENABLE_BIT 0x40000000
22 #define WINDOW_REG_ADDRESS 0x310c
23 #define WINDOW_VALUE_MASK GENMASK(24, 19)
24 #define WINDOW_START 0x80000
25 #define WINDOW_RANGE_MASK GENMASK(18, 0)
26
27 #define TCSR_SOC_HW_VERSION 0x0224
28 #define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(16, 8)
29 #define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0)
30
31 /* BAR0 + 4k is always accessible, and no
32 * need to force wakeup.
33 * 4K - 32 = 0xFE0
34 */
35 #define ACCESS_ALWAYS_OFF 0xFE0
36
37 #define QCA6390_DEVICE_ID 0x1101
38
39 static const struct pci_device_id ath11k_pci_id_table[] = {
40 { PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
41 {0}
42 };
43
44 MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table);
45
46 static const struct ath11k_bus_params ath11k_pci_bus_params = {
47 .mhi_support = true,
48 .m3_fw_support = true,
49 .fixed_bdf_addr = false,
50 .fixed_mem_region = false,
51 };
52
53 static const struct ath11k_msi_config msi_config = {
54 .total_vectors = 32,
55 .total_users = 4,
56 .users = (struct ath11k_msi_user[]) {
57 { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
58 { .name = "CE", .num_vectors = 10, .base_vector = 3 },
59 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 },
60 { .name = "DP", .num_vectors = 18, .base_vector = 14 },
61 },
62 };
63
64 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = {
65 "bhi",
66 "mhi-er0",
67 "mhi-er1",
68 "ce0",
69 "ce1",
70 "ce2",
71 "ce3",
72 "ce4",
73 "ce5",
74 "ce6",
75 "ce7",
76 "ce8",
77 "ce9",
78 "ce10",
79 "ce11",
80 "host2wbm-desc-feed",
81 "host2reo-re-injection",
82 "host2reo-command",
83 "host2rxdma-monitor-ring3",
84 "host2rxdma-monitor-ring2",
85 "host2rxdma-monitor-ring1",
86 "reo2ost-exception",
87 "wbm2host-rx-release",
88 "reo2host-status",
89 "reo2host-destination-ring4",
90 "reo2host-destination-ring3",
91 "reo2host-destination-ring2",
92 "reo2host-destination-ring1",
93 "rxdma2host-monitor-destination-mac3",
94 "rxdma2host-monitor-destination-mac2",
95 "rxdma2host-monitor-destination-mac1",
96 "ppdu-end-interrupts-mac3",
97 "ppdu-end-interrupts-mac2",
98 "ppdu-end-interrupts-mac1",
99 "rxdma2host-monitor-status-ring-mac3",
100 "rxdma2host-monitor-status-ring-mac2",
101 "rxdma2host-monitor-status-ring-mac1",
102 "host2rxdma-host-buf-ring-mac3",
103 "host2rxdma-host-buf-ring-mac2",
104 "host2rxdma-host-buf-ring-mac1",
105 "rxdma2host-destination-ring-mac3",
106 "rxdma2host-destination-ring-mac2",
107 "rxdma2host-destination-ring-mac1",
108 "host2tcl-input-ring4",
109 "host2tcl-input-ring3",
110 "host2tcl-input-ring2",
111 "host2tcl-input-ring1",
112 "wbm2host-tx-completions-ring3",
113 "wbm2host-tx-completions-ring2",
114 "wbm2host-tx-completions-ring1",
115 "tcl2host-status-ring",
116 };
117
ath11k_pci_select_window(struct ath11k_pci * ab_pci,u32 offset)118 static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset)
119 {
120 struct ath11k_base *ab = ab_pci->ab;
121
122 u32 window = FIELD_GET(WINDOW_VALUE_MASK, offset);
123
124 lockdep_assert_held(&ab_pci->window_lock);
125
126 if (window != ab_pci->register_window) {
127 iowrite32(WINDOW_ENABLE_BIT | window,
128 ab->mem + WINDOW_REG_ADDRESS);
129 ab_pci->register_window = window;
130 }
131 }
132
ath11k_pci_write32(struct ath11k_base * ab,u32 offset,u32 value)133 void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value)
134 {
135 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
136
137 /* for offset beyond BAR + 4K - 32, may
138 * need to wakeup MHI to access.
139 */
140 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
141 offset >= ACCESS_ALWAYS_OFF)
142 mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
143
144 if (offset < WINDOW_START) {
145 iowrite32(value, ab->mem + offset);
146 } else {
147 spin_lock_bh(&ab_pci->window_lock);
148 ath11k_pci_select_window(ab_pci, offset);
149 iowrite32(value, ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK));
150 spin_unlock_bh(&ab_pci->window_lock);
151 }
152
153 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
154 offset >= ACCESS_ALWAYS_OFF)
155 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
156 }
157
ath11k_pci_read32(struct ath11k_base * ab,u32 offset)158 u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset)
159 {
160 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
161 u32 val;
162
163 /* for offset beyond BAR + 4K - 32, may
164 * need to wakeup MHI to access.
165 */
166 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
167 offset >= ACCESS_ALWAYS_OFF)
168 mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
169
170 if (offset < WINDOW_START) {
171 val = ioread32(ab->mem + offset);
172 } else {
173 spin_lock_bh(&ab_pci->window_lock);
174 ath11k_pci_select_window(ab_pci, offset);
175 val = ioread32(ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK));
176 spin_unlock_bh(&ab_pci->window_lock);
177 }
178
179 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
180 offset >= ACCESS_ALWAYS_OFF)
181 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
182
183 return val;
184 }
185
ath11k_pci_soc_global_reset(struct ath11k_base * ab)186 static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
187 {
188 u32 val, delay;
189
190 val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
191
192 val |= PCIE_SOC_GLOBAL_RESET_V;
193
194 ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
195
196 /* TODO: exact time to sleep is uncertain */
197 delay = 10;
198 mdelay(delay);
199
200 /* Need to toggle V bit back otherwise stuck in reset status */
201 val &= ~PCIE_SOC_GLOBAL_RESET_V;
202
203 ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
204
205 mdelay(delay);
206
207 val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
208 if (val == 0xffffffff)
209 ath11k_warn(ab, "link down error during global reset\n");
210 }
211
ath11k_pci_clear_dbg_registers(struct ath11k_base * ab)212 static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab)
213 {
214 u32 val;
215
216 /* read cookie */
217 val = ath11k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR);
218 ath11k_dbg(ab, ATH11K_DBG_PCI, "cookie:0x%x\n", val);
219
220 val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
221 ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
222
223 /* TODO: exact time to sleep is uncertain */
224 mdelay(10);
225
226 /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
227 * continuing warm path and entering dead loop.
228 */
229 ath11k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0);
230 mdelay(10);
231
232 val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
233 ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
234
235 /* A read clear register. clear the register to prevent
236 * Q6 from entering wrong code path.
237 */
238 val = ath11k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
239 ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause:%d\n", val);
240 }
241
ath11k_pci_force_wake(struct ath11k_base * ab)242 static void ath11k_pci_force_wake(struct ath11k_base *ab)
243 {
244 ath11k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
245 mdelay(5);
246 }
247
ath11k_pci_sw_reset(struct ath11k_base * ab)248 static void ath11k_pci_sw_reset(struct ath11k_base *ab)
249 {
250 ath11k_pci_soc_global_reset(ab);
251 ath11k_mhi_clear_vector(ab);
252 ath11k_pci_soc_global_reset(ab);
253 ath11k_mhi_set_mhictrl_reset(ab);
254 ath11k_pci_clear_dbg_registers(ab);
255 }
256
ath11k_pci_get_msi_irq(struct device * dev,unsigned int vector)257 int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector)
258 {
259 struct pci_dev *pci_dev = to_pci_dev(dev);
260
261 return pci_irq_vector(pci_dev, vector);
262 }
263
ath11k_pci_get_msi_address(struct ath11k_base * ab,u32 * msi_addr_lo,u32 * msi_addr_hi)264 static void ath11k_pci_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo,
265 u32 *msi_addr_hi)
266 {
267 struct pci_dev *pci_dev = to_pci_dev(ab->dev);
268
269 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
270 msi_addr_lo);
271
272 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
273 msi_addr_hi);
274 }
275
ath11k_pci_get_user_msi_assignment(struct ath11k_pci * ab_pci,char * user_name,int * num_vectors,u32 * user_base_data,u32 * base_vector)276 int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_name,
277 int *num_vectors, u32 *user_base_data,
278 u32 *base_vector)
279 {
280 struct ath11k_base *ab = ab_pci->ab;
281 int idx;
282
283 for (idx = 0; idx < msi_config.total_users; idx++) {
284 if (strcmp(user_name, msi_config.users[idx].name) == 0) {
285 *num_vectors = msi_config.users[idx].num_vectors;
286 *user_base_data = msi_config.users[idx].base_vector
287 + ab_pci->msi_ep_base_data;
288 *base_vector = msi_config.users[idx].base_vector;
289
290 ath11k_dbg(ab, ATH11K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
291 user_name, *num_vectors, *user_base_data,
292 *base_vector);
293
294 return 0;
295 }
296 }
297
298 ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
299
300 return -EINVAL;
301 }
302
ath11k_get_user_msi_assignment(struct ath11k_base * ab,char * user_name,int * num_vectors,u32 * user_base_data,u32 * base_vector)303 static int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name,
304 int *num_vectors, u32 *user_base_data,
305 u32 *base_vector)
306 {
307 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
308
309 return ath11k_pci_get_user_msi_assignment(ab_pci, user_name,
310 num_vectors, user_base_data,
311 base_vector);
312 }
313
ath11k_pci_free_ext_irq(struct ath11k_base * ab)314 static void ath11k_pci_free_ext_irq(struct ath11k_base *ab)
315 {
316 int i, j;
317
318 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
319 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
320
321 for (j = 0; j < irq_grp->num_irq; j++)
322 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
323
324 netif_napi_del(&irq_grp->napi);
325 }
326 }
327
ath11k_pci_free_irq(struct ath11k_base * ab)328 static void ath11k_pci_free_irq(struct ath11k_base *ab)
329 {
330 int i, irq_idx;
331
332 for (i = 0; i < ab->hw_params.ce_count; i++) {
333 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
334 continue;
335 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
336 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
337 }
338
339 ath11k_pci_free_ext_irq(ab);
340 }
341
ath11k_pci_ce_irq_enable(struct ath11k_base * ab,u16 ce_id)342 static void ath11k_pci_ce_irq_enable(struct ath11k_base *ab, u16 ce_id)
343 {
344 u32 irq_idx;
345
346 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
347 enable_irq(ab->irq_num[irq_idx]);
348 }
349
ath11k_pci_ce_irq_disable(struct ath11k_base * ab,u16 ce_id)350 static void ath11k_pci_ce_irq_disable(struct ath11k_base *ab, u16 ce_id)
351 {
352 u32 irq_idx;
353
354 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id;
355 disable_irq_nosync(ab->irq_num[irq_idx]);
356 }
357
ath11k_pci_ce_irqs_disable(struct ath11k_base * ab)358 static void ath11k_pci_ce_irqs_disable(struct ath11k_base *ab)
359 {
360 int i;
361
362 for (i = 0; i < ab->hw_params.ce_count; i++) {
363 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
364 continue;
365 ath11k_pci_ce_irq_disable(ab, i);
366 }
367 }
368
ath11k_pci_sync_ce_irqs(struct ath11k_base * ab)369 static void ath11k_pci_sync_ce_irqs(struct ath11k_base *ab)
370 {
371 int i;
372 int irq_idx;
373
374 for (i = 0; i < ab->hw_params.ce_count; i++) {
375 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
376 continue;
377
378 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
379 synchronize_irq(ab->irq_num[irq_idx]);
380 }
381 }
382
ath11k_pci_ce_tasklet(unsigned long data)383 static void ath11k_pci_ce_tasklet(unsigned long data)
384 {
385 struct ath11k_ce_pipe *ce_pipe = (struct ath11k_ce_pipe *)data;
386
387 ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
388
389 ath11k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num);
390 }
391
ath11k_pci_ce_interrupt_handler(int irq,void * arg)392 static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg)
393 {
394 struct ath11k_ce_pipe *ce_pipe = arg;
395
396 ath11k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num);
397 tasklet_schedule(&ce_pipe->intr_tq);
398
399 return IRQ_HANDLED;
400 }
401
ath11k_pci_ext_grp_disable(struct ath11k_ext_irq_grp * irq_grp)402 static void ath11k_pci_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp)
403 {
404 int i;
405
406 for (i = 0; i < irq_grp->num_irq; i++)
407 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
408 }
409
__ath11k_pci_ext_irq_disable(struct ath11k_base * sc)410 static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc)
411 {
412 int i;
413
414 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
415 struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i];
416
417 ath11k_pci_ext_grp_disable(irq_grp);
418
419 if (irq_grp->napi_enabled) {
420 napi_synchronize(&irq_grp->napi);
421 napi_disable(&irq_grp->napi);
422 irq_grp->napi_enabled = false;
423 }
424 }
425 }
426
ath11k_pci_ext_grp_enable(struct ath11k_ext_irq_grp * irq_grp)427 static void ath11k_pci_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp)
428 {
429 int i;
430
431 for (i = 0; i < irq_grp->num_irq; i++)
432 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
433 }
434
ath11k_pci_ext_irq_enable(struct ath11k_base * ab)435 static void ath11k_pci_ext_irq_enable(struct ath11k_base *ab)
436 {
437 int i;
438
439 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
440 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
441
442 if (!irq_grp->napi_enabled) {
443 napi_enable(&irq_grp->napi);
444 irq_grp->napi_enabled = true;
445 }
446 ath11k_pci_ext_grp_enable(irq_grp);
447 }
448 }
449
ath11k_pci_sync_ext_irqs(struct ath11k_base * ab)450 static void ath11k_pci_sync_ext_irqs(struct ath11k_base *ab)
451 {
452 int i, j, irq_idx;
453
454 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
455 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
456
457 for (j = 0; j < irq_grp->num_irq; j++) {
458 irq_idx = irq_grp->irqs[j];
459 synchronize_irq(ab->irq_num[irq_idx]);
460 }
461 }
462 }
463
ath11k_pci_ext_irq_disable(struct ath11k_base * ab)464 static void ath11k_pci_ext_irq_disable(struct ath11k_base *ab)
465 {
466 __ath11k_pci_ext_irq_disable(ab);
467 ath11k_pci_sync_ext_irqs(ab);
468 }
469
ath11k_pci_ext_grp_napi_poll(struct napi_struct * napi,int budget)470 static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
471 {
472 struct ath11k_ext_irq_grp *irq_grp = container_of(napi,
473 struct ath11k_ext_irq_grp,
474 napi);
475 struct ath11k_base *ab = irq_grp->ab;
476 int work_done;
477
478 work_done = ath11k_dp_service_srng(ab, irq_grp, budget);
479 if (work_done < budget) {
480 napi_complete_done(napi, work_done);
481 ath11k_pci_ext_grp_enable(irq_grp);
482 }
483
484 if (work_done > budget)
485 work_done = budget;
486
487 return work_done;
488 }
489
ath11k_pci_ext_interrupt_handler(int irq,void * arg)490 static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg)
491 {
492 struct ath11k_ext_irq_grp *irq_grp = arg;
493
494 ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq);
495
496 ath11k_pci_ext_grp_disable(irq_grp);
497
498 napi_schedule(&irq_grp->napi);
499
500 return IRQ_HANDLED;
501 }
502
ath11k_pci_ext_irq_config(struct ath11k_base * ab)503 static int ath11k_pci_ext_irq_config(struct ath11k_base *ab)
504 {
505 int i, j, ret, num_vectors = 0;
506 u32 user_base_data = 0, base_vector = 0;
507
508 ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP",
509 &num_vectors,
510 &user_base_data,
511 &base_vector);
512 if (ret < 0)
513 return ret;
514
515 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
516 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
517 u32 num_irq = 0;
518
519 irq_grp->ab = ab;
520 irq_grp->grp_id = i;
521 init_dummy_netdev(&irq_grp->napi_ndev);
522 netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
523 ath11k_pci_ext_grp_napi_poll, NAPI_POLL_WEIGHT);
524
525 if (ab->hw_params.ring_mask->tx[i] ||
526 ab->hw_params.ring_mask->rx[i] ||
527 ab->hw_params.ring_mask->rx_err[i] ||
528 ab->hw_params.ring_mask->rx_wbm_rel[i] ||
529 ab->hw_params.ring_mask->reo_status[i] ||
530 ab->hw_params.ring_mask->rxdma2host[i] ||
531 ab->hw_params.ring_mask->host2rxdma[i] ||
532 ab->hw_params.ring_mask->rx_mon_status[i]) {
533 num_irq = 1;
534 }
535
536 irq_grp->num_irq = num_irq;
537 irq_grp->irqs[0] = base_vector + i;
538
539 for (j = 0; j < irq_grp->num_irq; j++) {
540 int irq_idx = irq_grp->irqs[j];
541 int vector = (i % num_vectors) + base_vector;
542 int irq = ath11k_pci_get_msi_irq(ab->dev, vector);
543
544 ab->irq_num[irq_idx] = irq;
545
546 ath11k_dbg(ab, ATH11K_DBG_PCI,
547 "irq:%d group:%d\n", irq, i);
548 ret = request_irq(irq, ath11k_pci_ext_interrupt_handler,
549 IRQF_SHARED,
550 "DP_EXT_IRQ", irq_grp);
551 if (ret) {
552 ath11k_err(ab, "failed request irq %d: %d\n",
553 vector, ret);
554 return ret;
555 }
556
557 disable_irq_nosync(ab->irq_num[irq_idx]);
558 }
559 }
560
561 return 0;
562 }
563
ath11k_pci_config_irq(struct ath11k_base * ab)564 static int ath11k_pci_config_irq(struct ath11k_base *ab)
565 {
566 struct ath11k_ce_pipe *ce_pipe;
567 u32 msi_data_start;
568 u32 msi_data_count;
569 u32 msi_irq_start;
570 unsigned int msi_data;
571 int irq, i, ret, irq_idx;
572
573 ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab),
574 "CE", &msi_data_count,
575 &msi_data_start, &msi_irq_start);
576 if (ret)
577 return ret;
578
579 /* Configure CE irqs */
580 for (i = 0; i < ab->hw_params.ce_count; i++) {
581 msi_data = (i % msi_data_count) + msi_irq_start;
582 irq = ath11k_pci_get_msi_irq(ab->dev, msi_data);
583 ce_pipe = &ab->ce.ce_pipe[i];
584
585 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
586 continue;
587
588 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i;
589
590 tasklet_init(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet,
591 (unsigned long)ce_pipe);
592
593 ret = request_irq(irq, ath11k_pci_ce_interrupt_handler,
594 IRQF_SHARED, irq_name[irq_idx],
595 ce_pipe);
596 if (ret) {
597 ath11k_err(ab, "failed to request irq %d: %d\n",
598 irq_idx, ret);
599 return ret;
600 }
601
602 ab->irq_num[irq_idx] = irq;
603 ath11k_pci_ce_irq_disable(ab, i);
604 }
605
606 ret = ath11k_pci_ext_irq_config(ab);
607 if (ret)
608 return ret;
609
610 return 0;
611 }
612
ath11k_pci_init_qmi_ce_config(struct ath11k_base * ab)613 static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
614 {
615 struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
616
617 cfg->tgt_ce = ab->hw_params.target_ce_config;
618 cfg->tgt_ce_len = ab->hw_params.target_ce_count;
619
620 cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
621 cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
622 ab->qmi.service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390;
623
624 ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2,
625 &cfg->shadow_reg_v2_len);
626 }
627
ath11k_pci_ce_irqs_enable(struct ath11k_base * ab)628 static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab)
629 {
630 int i;
631
632 for (i = 0; i < ab->hw_params.ce_count; i++) {
633 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
634 continue;
635 ath11k_pci_ce_irq_enable(ab, i);
636 }
637 }
638
ath11k_pci_enable_msi(struct ath11k_pci * ab_pci)639 static int ath11k_pci_enable_msi(struct ath11k_pci *ab_pci)
640 {
641 struct ath11k_base *ab = ab_pci->ab;
642 struct msi_desc *msi_desc;
643 int num_vectors;
644 int ret;
645
646 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
647 msi_config.total_vectors,
648 msi_config.total_vectors,
649 PCI_IRQ_MSI);
650 if (num_vectors != msi_config.total_vectors) {
651 ath11k_err(ab, "failed to get %d MSI vectors, only %d available",
652 msi_config.total_vectors, num_vectors);
653
654 if (num_vectors >= 0)
655 return -EINVAL;
656 else
657 return num_vectors;
658 }
659
660 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
661 if (!msi_desc) {
662 ath11k_err(ab, "msi_desc is NULL!\n");
663 ret = -EINVAL;
664 goto free_msi_vector;
665 }
666
667 ab_pci->msi_ep_base_data = msi_desc->msg.data;
668
669 ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
670
671 return 0;
672
673 free_msi_vector:
674 pci_free_irq_vectors(ab_pci->pdev);
675
676 return ret;
677 }
678
ath11k_pci_disable_msi(struct ath11k_pci * ab_pci)679 static void ath11k_pci_disable_msi(struct ath11k_pci *ab_pci)
680 {
681 pci_free_irq_vectors(ab_pci->pdev);
682 }
683
ath11k_pci_claim(struct ath11k_pci * ab_pci,struct pci_dev * pdev)684 static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
685 {
686 struct ath11k_base *ab = ab_pci->ab;
687 u16 device_id;
688 int ret = 0;
689
690 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
691 if (device_id != ab_pci->dev_id) {
692 ath11k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
693 device_id, ab_pci->dev_id);
694 ret = -EIO;
695 goto out;
696 }
697
698 ret = pci_assign_resource(pdev, ATH11K_PCI_BAR_NUM);
699 if (ret) {
700 ath11k_err(ab, "failed to assign pci resource: %d\n", ret);
701 goto out;
702 }
703
704 ret = pci_enable_device(pdev);
705 if (ret) {
706 ath11k_err(ab, "failed to enable pci device: %d\n", ret);
707 goto out;
708 }
709
710 ret = pci_request_region(pdev, ATH11K_PCI_BAR_NUM, "ath11k_pci");
711 if (ret) {
712 ath11k_err(ab, "failed to request pci region: %d\n", ret);
713 goto disable_device;
714 }
715
716 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
717 if (ret) {
718 ath11k_err(ab, "failed to set pci dma mask to %d: %d\n",
719 ATH11K_PCI_DMA_MASK, ret);
720 goto release_region;
721 }
722
723 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
724 if (ret) {
725 ath11k_err(ab, "failed to set pci consistent dma mask to %d: %d\n",
726 ATH11K_PCI_DMA_MASK, ret);
727 goto release_region;
728 }
729
730 pci_set_master(pdev);
731
732 ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM);
733 ab->mem = pci_iomap(pdev, ATH11K_PCI_BAR_NUM, 0);
734 if (!ab->mem) {
735 ath11k_err(ab, "failed to map pci bar %d\n", ATH11K_PCI_BAR_NUM);
736 ret = -EIO;
737 goto clear_master;
738 }
739
740 ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem);
741 return 0;
742
743 clear_master:
744 pci_clear_master(pdev);
745 release_region:
746 pci_release_region(pdev, ATH11K_PCI_BAR_NUM);
747 disable_device:
748 pci_disable_device(pdev);
749 out:
750 return ret;
751 }
752
ath11k_pci_free_region(struct ath11k_pci * ab_pci)753 static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
754 {
755 struct ath11k_base *ab = ab_pci->ab;
756 struct pci_dev *pci_dev = ab_pci->pdev;
757
758 pci_iounmap(pci_dev, ab->mem);
759 ab->mem = NULL;
760 pci_clear_master(pci_dev);
761 pci_release_region(pci_dev, ATH11K_PCI_BAR_NUM);
762 if (pci_is_enabled(pci_dev))
763 pci_disable_device(pci_dev);
764 }
765
ath11k_pci_power_up(struct ath11k_base * ab)766 static int ath11k_pci_power_up(struct ath11k_base *ab)
767 {
768 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
769 int ret;
770
771 ab_pci->register_window = 0;
772 clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
773 ath11k_pci_sw_reset(ab_pci->ab);
774
775 ret = ath11k_mhi_start(ab_pci);
776 if (ret) {
777 ath11k_err(ab, "failed to start mhi: %d\n", ret);
778 return ret;
779 }
780
781 return 0;
782 }
783
ath11k_pci_power_down(struct ath11k_base * ab)784 static void ath11k_pci_power_down(struct ath11k_base *ab)
785 {
786 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
787
788 ath11k_mhi_stop(ab_pci);
789 clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
790 ath11k_pci_force_wake(ab_pci->ab);
791 ath11k_pci_sw_reset(ab_pci->ab);
792 }
793
ath11k_pci_kill_tasklets(struct ath11k_base * ab)794 static void ath11k_pci_kill_tasklets(struct ath11k_base *ab)
795 {
796 int i;
797
798 for (i = 0; i < ab->hw_params.ce_count; i++) {
799 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
800
801 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
802 continue;
803
804 tasklet_kill(&ce_pipe->intr_tq);
805 }
806 }
807
ath11k_pci_stop(struct ath11k_base * ab)808 static void ath11k_pci_stop(struct ath11k_base *ab)
809 {
810 ath11k_pci_ce_irqs_disable(ab);
811 ath11k_pci_sync_ce_irqs(ab);
812 ath11k_pci_kill_tasklets(ab);
813 ath11k_ce_cleanup_pipes(ab);
814 }
815
ath11k_pci_start(struct ath11k_base * ab)816 static int ath11k_pci_start(struct ath11k_base *ab)
817 {
818 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
819
820 set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
821
822 ath11k_pci_ce_irqs_enable(ab);
823 ath11k_ce_rx_post_buf(ab);
824
825 return 0;
826 }
827
ath11k_pci_map_service_to_pipe(struct ath11k_base * ab,u16 service_id,u8 * ul_pipe,u8 * dl_pipe)828 static int ath11k_pci_map_service_to_pipe(struct ath11k_base *ab, u16 service_id,
829 u8 *ul_pipe, u8 *dl_pipe)
830 {
831 const struct service_to_pipe *entry;
832 bool ul_set = false, dl_set = false;
833 int i;
834
835 for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) {
836 entry = &ab->hw_params.svc_to_ce_map[i];
837
838 if (__le32_to_cpu(entry->service_id) != service_id)
839 continue;
840
841 switch (__le32_to_cpu(entry->pipedir)) {
842 case PIPEDIR_NONE:
843 break;
844 case PIPEDIR_IN:
845 WARN_ON(dl_set);
846 *dl_pipe = __le32_to_cpu(entry->pipenum);
847 dl_set = true;
848 break;
849 case PIPEDIR_OUT:
850 WARN_ON(ul_set);
851 *ul_pipe = __le32_to_cpu(entry->pipenum);
852 ul_set = true;
853 break;
854 case PIPEDIR_INOUT:
855 WARN_ON(dl_set);
856 WARN_ON(ul_set);
857 *dl_pipe = __le32_to_cpu(entry->pipenum);
858 *ul_pipe = __le32_to_cpu(entry->pipenum);
859 dl_set = true;
860 ul_set = true;
861 break;
862 }
863 }
864
865 if (WARN_ON(!ul_set || !dl_set))
866 return -ENOENT;
867
868 return 0;
869 }
870
871 static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
872 .start = ath11k_pci_start,
873 .stop = ath11k_pci_stop,
874 .read32 = ath11k_pci_read32,
875 .write32 = ath11k_pci_write32,
876 .power_down = ath11k_pci_power_down,
877 .power_up = ath11k_pci_power_up,
878 .irq_enable = ath11k_pci_ext_irq_enable,
879 .irq_disable = ath11k_pci_ext_irq_disable,
880 .get_msi_address = ath11k_pci_get_msi_address,
881 .get_user_msi_vector = ath11k_get_user_msi_assignment,
882 .map_service_to_pipe = ath11k_pci_map_service_to_pipe,
883 };
884
ath11k_pci_probe(struct pci_dev * pdev,const struct pci_device_id * pci_dev)885 static int ath11k_pci_probe(struct pci_dev *pdev,
886 const struct pci_device_id *pci_dev)
887 {
888 struct ath11k_base *ab;
889 struct ath11k_pci *ab_pci;
890 u32 soc_hw_version, soc_hw_version_major, soc_hw_version_minor;
891 int ret;
892
893 dev_warn(&pdev->dev, "WARNING: ath11k PCI support is experimental!\n");
894
895 ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI,
896 &ath11k_pci_bus_params);
897 if (!ab) {
898 dev_err(&pdev->dev, "failed to allocate ath11k base\n");
899 return -ENOMEM;
900 }
901
902 ab->dev = &pdev->dev;
903 pci_set_drvdata(pdev, ab);
904 ab_pci = ath11k_pci_priv(ab);
905 ab_pci->dev_id = pci_dev->device;
906 ab_pci->ab = ab;
907 ab_pci->pdev = pdev;
908 ab->hif.ops = &ath11k_pci_hif_ops;
909 pci_set_drvdata(pdev, ab);
910 spin_lock_init(&ab_pci->window_lock);
911
912 ret = ath11k_pci_claim(ab_pci, pdev);
913 if (ret) {
914 ath11k_err(ab, "failed to claim device: %d\n", ret);
915 goto err_free_core;
916 }
917
918 switch (pci_dev->device) {
919 case QCA6390_DEVICE_ID:
920 soc_hw_version = ath11k_pci_read32(ab, TCSR_SOC_HW_VERSION);
921 soc_hw_version_major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
922 soc_hw_version);
923 soc_hw_version_minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
924 soc_hw_version);
925
926 ath11k_dbg(ab, ATH11K_DBG_PCI, "pci tcsr_soc_hw_version major %d minor %d\n",
927 soc_hw_version_major, soc_hw_version_minor);
928
929 switch (soc_hw_version_major) {
930 case 2:
931 ab->hw_rev = ATH11K_HW_QCA6390_HW20;
932 break;
933 default:
934 dev_err(&pdev->dev, "Unsupported QCA6390 SOC hardware version: %d %d\n",
935 soc_hw_version_major, soc_hw_version_minor);
936 ret = -EOPNOTSUPP;
937 goto err_pci_free_region;
938 }
939 break;
940 default:
941 dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
942 pci_dev->device);
943 ret = -EOPNOTSUPP;
944 goto err_pci_free_region;
945 }
946
947 ret = ath11k_pci_enable_msi(ab_pci);
948 if (ret) {
949 ath11k_err(ab, "failed to enable msi: %d\n", ret);
950 goto err_pci_free_region;
951 }
952
953 ret = ath11k_core_pre_init(ab);
954 if (ret)
955 goto err_pci_disable_msi;
956
957 ret = ath11k_mhi_register(ab_pci);
958 if (ret) {
959 ath11k_err(ab, "failed to register mhi: %d\n", ret);
960 goto err_pci_disable_msi;
961 }
962
963 ret = ath11k_hal_srng_init(ab);
964 if (ret)
965 goto err_mhi_unregister;
966
967 ret = ath11k_ce_alloc_pipes(ab);
968 if (ret) {
969 ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
970 goto err_hal_srng_deinit;
971 }
972
973 ath11k_pci_init_qmi_ce_config(ab);
974
975 ret = ath11k_pci_config_irq(ab);
976 if (ret) {
977 ath11k_err(ab, "failed to config irq: %d\n", ret);
978 goto err_ce_free;
979 }
980
981 ret = ath11k_core_init(ab);
982 if (ret) {
983 ath11k_err(ab, "failed to init core: %d\n", ret);
984 goto err_free_irq;
985 }
986 return 0;
987
988 err_free_irq:
989 ath11k_pci_free_irq(ab);
990
991 err_ce_free:
992 ath11k_ce_free_pipes(ab);
993
994 err_hal_srng_deinit:
995 ath11k_hal_srng_deinit(ab);
996
997 err_mhi_unregister:
998 ath11k_mhi_unregister(ab_pci);
999
1000 err_pci_disable_msi:
1001 ath11k_pci_disable_msi(ab_pci);
1002
1003 err_pci_free_region:
1004 ath11k_pci_free_region(ab_pci);
1005
1006 err_free_core:
1007 ath11k_core_free(ab);
1008
1009 return ret;
1010 }
1011
ath11k_pci_remove(struct pci_dev * pdev)1012 static void ath11k_pci_remove(struct pci_dev *pdev)
1013 {
1014 struct ath11k_base *ab = pci_get_drvdata(pdev);
1015 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
1016
1017 set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
1018
1019 ath11k_core_deinit(ab);
1020
1021 ath11k_mhi_unregister(ab_pci);
1022
1023 ath11k_pci_free_irq(ab);
1024 ath11k_pci_disable_msi(ab_pci);
1025 ath11k_pci_free_region(ab_pci);
1026
1027 ath11k_hal_srng_deinit(ab);
1028 ath11k_ce_free_pipes(ab);
1029 ath11k_core_free(ab);
1030 }
1031
ath11k_pci_shutdown(struct pci_dev * pdev)1032 static void ath11k_pci_shutdown(struct pci_dev *pdev)
1033 {
1034 struct ath11k_base *ab = pci_get_drvdata(pdev);
1035
1036 ath11k_pci_power_down(ab);
1037 }
1038
1039 static struct pci_driver ath11k_pci_driver = {
1040 .name = "ath11k_pci",
1041 .id_table = ath11k_pci_id_table,
1042 .probe = ath11k_pci_probe,
1043 .remove = ath11k_pci_remove,
1044 .shutdown = ath11k_pci_shutdown,
1045 };
1046
ath11k_pci_init(void)1047 static int ath11k_pci_init(void)
1048 {
1049 int ret;
1050
1051 ret = pci_register_driver(&ath11k_pci_driver);
1052 if (ret)
1053 pr_err("failed to register ath11k pci driver: %d\n",
1054 ret);
1055
1056 return ret;
1057 }
1058 module_init(ath11k_pci_init);
1059
ath11k_pci_exit(void)1060 static void ath11k_pci_exit(void)
1061 {
1062 pci_unregister_driver(&ath11k_pci_driver);
1063 }
1064
1065 module_exit(ath11k_pci_exit);
1066
1067 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN PCIe devices");
1068 MODULE_LICENSE("Dual BSD/GPL");
1069