1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2007-2015, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7 #include <linux/pci.h>
8 #include <linux/interrupt.h>
9 #include <linux/debugfs.h>
10 #include <linux/sched.h>
11 #include <linux/bitops.h>
12 #include <linux/gfp.h>
13 #include <linux/vmalloc.h>
14 #include <linux/module.h>
15 #include <linux/wait.h>
16 #include <linux/seq_file.h>
17
18 #include "iwl-drv.h"
19 #include "iwl-trans.h"
20 #include "iwl-csr.h"
21 #include "iwl-prph.h"
22 #include "iwl-scd.h"
23 #include "iwl-agn-hw.h"
24 #include "fw/error-dump.h"
25 #include "fw/dbg.h"
26 #include "fw/api/tx.h"
27 #include "mei/iwl-mei.h"
28 #include "internal.h"
29 #include "iwl-fh.h"
30 #include "iwl-context-info-gen3.h"
31
32 /* extended range in FW SRAM */
33 #define IWL_FW_MEM_EXTENDED_START 0x40000
34 #define IWL_FW_MEM_EXTENDED_END 0x57FFF
35
iwl_trans_pcie_dump_regs(struct iwl_trans * trans)36 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
37 {
38 #define PCI_DUMP_SIZE 352
39 #define PCI_MEM_DUMP_SIZE 64
40 #define PCI_PARENT_DUMP_SIZE 524
41 #define PREFIX_LEN 32
42 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
43 struct pci_dev *pdev = trans_pcie->pci_dev;
44 u32 i, pos, alloc_size, *ptr, *buf;
45 char *prefix;
46
47 if (trans_pcie->pcie_dbg_dumped_once)
48 return;
49
50 /* Should be a multiple of 4 */
51 BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
52 BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3);
53 BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3);
54
55 /* Alloc a max size buffer */
56 alloc_size = PCI_ERR_ROOT_ERR_SRC + 4 + PREFIX_LEN;
57 alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN);
58 alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN);
59 alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN);
60
61 buf = kmalloc(alloc_size, GFP_ATOMIC);
62 if (!buf)
63 return;
64 prefix = (char *)buf + alloc_size - PREFIX_LEN;
65
66 IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n");
67
68 /* Print wifi device registers */
69 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
70 IWL_ERR(trans, "iwlwifi device config registers:\n");
71 for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
72 if (pci_read_config_dword(pdev, i, ptr))
73 goto err_read;
74 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
75
76 IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
77 for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++)
78 *ptr = iwl_read32(trans, i);
79 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
80
81 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
82 if (pos) {
83 IWL_ERR(trans, "iwlwifi device AER capability structure:\n");
84 for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++)
85 if (pci_read_config_dword(pdev, pos + i, ptr))
86 goto err_read;
87 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
88 32, 4, buf, i, 0);
89 }
90
91 /* Print parent device registers next */
92 if (!pdev->bus->self)
93 goto out;
94
95 pdev = pdev->bus->self;
96 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
97
98 IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
99 pci_name(pdev));
100 for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++)
101 if (pci_read_config_dword(pdev, i, ptr))
102 goto err_read;
103 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
104
105 /* Print root port AER registers */
106 pos = 0;
107 pdev = pcie_find_root_port(pdev);
108 if (pdev)
109 pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
110 if (pos) {
111 IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n",
112 pci_name(pdev));
113 sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
114 for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++)
115 if (pci_read_config_dword(pdev, pos + i, ptr))
116 goto err_read;
117 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
118 4, buf, i, 0);
119 }
120 goto out;
121
122 err_read:
123 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
124 IWL_ERR(trans, "Read failed at 0x%X\n", i);
125 out:
126 trans_pcie->pcie_dbg_dumped_once = 1;
127 kfree(buf);
128 }
129
iwl_trans_pcie_sw_reset(struct iwl_trans * trans,bool retake_ownership)130 int iwl_trans_pcie_sw_reset(struct iwl_trans *trans, bool retake_ownership)
131 {
132 /* Reset entire device - do controller reset (results in SHRD_HW_RST) */
133 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
134 iwl_set_bit(trans, CSR_GP_CNTRL,
135 CSR_GP_CNTRL_REG_FLAG_SW_RESET);
136 usleep_range(10000, 20000);
137 } else {
138 iwl_set_bit(trans, CSR_RESET,
139 CSR_RESET_REG_FLAG_SW_RESET);
140 usleep_range(5000, 6000);
141 }
142
143 if (retake_ownership)
144 return iwl_pcie_prepare_card_hw(trans);
145
146 return 0;
147 }
148
iwl_pcie_free_fw_monitor(struct iwl_trans * trans)149 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
150 {
151 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
152
153 if (!fw_mon->size)
154 return;
155
156 dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,
157 fw_mon->physical);
158
159 fw_mon->block = NULL;
160 fw_mon->physical = 0;
161 fw_mon->size = 0;
162 }
163
iwl_pcie_alloc_fw_monitor_block(struct iwl_trans * trans,u8 max_power)164 static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
165 u8 max_power)
166 {
167 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
168 void *block = NULL;
169 dma_addr_t physical = 0;
170 u32 size = 0;
171 u8 power;
172
173 if (fw_mon->size) {
174 memset(fw_mon->block, 0, fw_mon->size);
175 return;
176 }
177
178 /* need at least 2 KiB, so stop at 11 */
179 for (power = max_power; power >= 11; power--) {
180 size = BIT(power);
181 block = dma_alloc_coherent(trans->dev, size, &physical,
182 GFP_KERNEL | __GFP_NOWARN);
183 if (!block)
184 continue;
185
186 IWL_INFO(trans,
187 "Allocated 0x%08x bytes for firmware monitor.\n",
188 size);
189 break;
190 }
191
192 if (WARN_ON_ONCE(!block))
193 return;
194
195 if (power != max_power)
196 IWL_ERR(trans,
197 "Sorry - debug buffer is only %luK while you requested %luK\n",
198 (unsigned long)BIT(power - 10),
199 (unsigned long)BIT(max_power - 10));
200
201 fw_mon->block = block;
202 fw_mon->physical = physical;
203 fw_mon->size = size;
204 }
205
iwl_pcie_alloc_fw_monitor(struct iwl_trans * trans,u8 max_power)206 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
207 {
208 if (!max_power) {
209 /* default max_power is maximum */
210 max_power = 26;
211 } else {
212 max_power += 11;
213 }
214
215 if (WARN(max_power > 26,
216 "External buffer size for monitor is too big %d, check the FW TLV\n",
217 max_power))
218 return;
219
220 iwl_pcie_alloc_fw_monitor_block(trans, max_power);
221 }
222
iwl_trans_pcie_read_shr(struct iwl_trans * trans,u32 reg)223 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
224 {
225 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
226 ((reg & 0x0000ffff) | (2 << 28)));
227 return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
228 }
229
iwl_trans_pcie_write_shr(struct iwl_trans * trans,u32 reg,u32 val)230 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
231 {
232 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
233 iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
234 ((reg & 0x0000ffff) | (3 << 28)));
235 }
236
iwl_pcie_set_pwr(struct iwl_trans * trans,bool vaux)237 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
238 {
239 if (trans->cfg->apmg_not_supported)
240 return;
241
242 if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
243 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
244 APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
245 ~APMG_PS_CTRL_MSK_PWR_SRC);
246 else
247 iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
248 APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
249 ~APMG_PS_CTRL_MSK_PWR_SRC);
250 }
251
252 /* PCI registers */
253 #define PCI_CFG_RETRY_TIMEOUT 0x041
254
iwl_pcie_apm_config(struct iwl_trans * trans)255 void iwl_pcie_apm_config(struct iwl_trans *trans)
256 {
257 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
258 u16 lctl;
259 u16 cap;
260
261 /*
262 * L0S states have been found to be unstable with our devices
263 * and in newer hardware they are not officially supported at
264 * all, so we must always set the L0S_DISABLED bit.
265 */
266 iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
267
268 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
269 trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
270
271 pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
272 trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
273 IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
274 (lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
275 trans->ltr_enabled ? "En" : "Dis");
276 }
277
278 /*
279 * Start up NIC's basic functionality after it has been reset
280 * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
281 * NOTE: This does not load uCode nor start the embedded processor
282 */
iwl_pcie_apm_init(struct iwl_trans * trans)283 static int iwl_pcie_apm_init(struct iwl_trans *trans)
284 {
285 int ret;
286
287 IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
288
289 /*
290 * Use "set_bit" below rather than "write", to preserve any hardware
291 * bits already set by default after reset.
292 */
293
294 /* Disable L0S exit timer (platform NMI Work/Around) */
295 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
296 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
297 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
298
299 /*
300 * Disable L0s without affecting L1;
301 * don't wait for ICH L0s (ICH bug W/A)
302 */
303 iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
304 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
305
306 /* Set FH wait threshold to maximum (HW error during stress W/A) */
307 iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
308
309 /*
310 * Enable HAP INTA (interrupt from management bus) to
311 * wake device's PCI Express link L1a -> L0s
312 */
313 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
314 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
315
316 iwl_pcie_apm_config(trans);
317
318 /* Configure analog phase-lock-loop before activating to D0A */
319 if (trans->trans_cfg->base_params->pll_cfg)
320 iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
321
322 ret = iwl_finish_nic_init(trans);
323 if (ret)
324 return ret;
325
326 if (trans->cfg->host_interrupt_operation_mode) {
327 /*
328 * This is a bit of an abuse - This is needed for 7260 / 3160
329 * only check host_interrupt_operation_mode even if this is
330 * not related to host_interrupt_operation_mode.
331 *
332 * Enable the oscillator to count wake up time for L1 exit. This
333 * consumes slightly more power (100uA) - but allows to be sure
334 * that we wake up from L1 on time.
335 *
336 * This looks weird: read twice the same register, discard the
337 * value, set a bit, and yet again, read that same register
338 * just to discard the value. But that's the way the hardware
339 * seems to like it.
340 */
341 iwl_read_prph(trans, OSC_CLK);
342 iwl_read_prph(trans, OSC_CLK);
343 iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
344 iwl_read_prph(trans, OSC_CLK);
345 iwl_read_prph(trans, OSC_CLK);
346 }
347
348 /*
349 * Enable DMA clock and wait for it to stabilize.
350 *
351 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
352 * bits do not disable clocks. This preserves any hardware
353 * bits already set by default in "CLK_CTRL_REG" after reset.
354 */
355 if (!trans->cfg->apmg_not_supported) {
356 iwl_write_prph(trans, APMG_CLK_EN_REG,
357 APMG_CLK_VAL_DMA_CLK_RQT);
358 udelay(20);
359
360 /* Disable L1-Active */
361 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
362 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
363
364 /* Clear the interrupt in APMG if the NIC is in RFKILL */
365 iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
366 APMG_RTC_INT_STT_RFKILL);
367 }
368
369 set_bit(STATUS_DEVICE_ENABLED, &trans->status);
370
371 return 0;
372 }
373
374 /*
375 * Enable LP XTAL to avoid HW bug where device may consume much power if
376 * FW is not loaded after device reset. LP XTAL is disabled by default
377 * after device HW reset. Do it only if XTAL is fed by internal source.
378 * Configure device's "persistence" mode to avoid resetting XTAL again when
379 * SHRD_HW_RST occurs in S3.
380 */
iwl_pcie_apm_lp_xtal_enable(struct iwl_trans * trans)381 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
382 {
383 int ret;
384 u32 apmg_gp1_reg;
385 u32 apmg_xtal_cfg_reg;
386 u32 dl_cfg_reg;
387
388 /* Force XTAL ON */
389 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
390 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
391
392 ret = iwl_trans_pcie_sw_reset(trans, true);
393
394 if (!ret)
395 ret = iwl_finish_nic_init(trans);
396
397 if (WARN_ON(ret)) {
398 /* Release XTAL ON request */
399 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
400 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
401 return;
402 }
403
404 /*
405 * Clear "disable persistence" to avoid LP XTAL resetting when
406 * SHRD_HW_RST is applied in S3.
407 */
408 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
409 APMG_PCIDEV_STT_VAL_PERSIST_DIS);
410
411 /*
412 * Force APMG XTAL to be active to prevent its disabling by HW
413 * caused by APMG idle state.
414 */
415 apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
416 SHR_APMG_XTAL_CFG_REG);
417 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
418 apmg_xtal_cfg_reg |
419 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
420
421 ret = iwl_trans_pcie_sw_reset(trans, true);
422 if (ret)
423 IWL_ERR(trans,
424 "iwl_pcie_apm_lp_xtal_enable: failed to retake NIC ownership\n");
425
426 /* Enable LP XTAL by indirect access through CSR */
427 apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
428 iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
429 SHR_APMG_GP1_WF_XTAL_LP_EN |
430 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
431
432 /* Clear delay line clock power up */
433 dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
434 iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
435 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
436
437 /*
438 * Enable persistence mode to avoid LP XTAL resetting when
439 * SHRD_HW_RST is applied in S3.
440 */
441 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
442 CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
443
444 /*
445 * Clear "initialization complete" bit to move adapter from
446 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
447 */
448 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
449
450 /* Activates XTAL resources monitor */
451 __iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
452 CSR_MONITOR_XTAL_RESOURCES);
453
454 /* Release XTAL ON request */
455 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
456 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
457 udelay(10);
458
459 /* Release APMG XTAL */
460 iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
461 apmg_xtal_cfg_reg &
462 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
463 }
464
iwl_pcie_apm_stop_master(struct iwl_trans * trans)465 void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
466 {
467 int ret;
468
469 /* stop device's busmaster DMA activity */
470
471 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
472 iwl_set_bit(trans, CSR_GP_CNTRL,
473 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_REQ);
474
475 ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
476 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
477 CSR_GP_CNTRL_REG_FLAG_BUS_MASTER_DISABLE_STATUS,
478 100);
479 usleep_range(10000, 20000);
480 } else {
481 iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
482
483 ret = iwl_poll_bit(trans, CSR_RESET,
484 CSR_RESET_REG_FLAG_MASTER_DISABLED,
485 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
486 }
487
488 if (ret < 0)
489 IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
490
491 IWL_DEBUG_INFO(trans, "stop master\n");
492 }
493
iwl_pcie_apm_stop(struct iwl_trans * trans,bool op_mode_leave)494 static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
495 {
496 IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
497
498 if (op_mode_leave) {
499 if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
500 iwl_pcie_apm_init(trans);
501
502 /* inform ME that we are leaving */
503 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000)
504 iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
505 APMG_PCIDEV_STT_VAL_WAKE_ME);
506 else if (trans->trans_cfg->device_family >=
507 IWL_DEVICE_FAMILY_8000) {
508 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
509 CSR_RESET_LINK_PWR_MGMT_DISABLED);
510 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
511 CSR_HW_IF_CONFIG_REG_PREPARE |
512 CSR_HW_IF_CONFIG_REG_ENABLE_PME);
513 mdelay(1);
514 iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
515 CSR_RESET_LINK_PWR_MGMT_DISABLED);
516 }
517 mdelay(5);
518 }
519
520 clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
521
522 /* Stop device's DMA activity */
523 iwl_pcie_apm_stop_master(trans);
524
525 if (trans->cfg->lp_xtal_workaround) {
526 iwl_pcie_apm_lp_xtal_enable(trans);
527 return;
528 }
529
530 iwl_trans_pcie_sw_reset(trans, false);
531
532 /*
533 * Clear "initialization complete" bit to move adapter from
534 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
535 */
536 iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
537 }
538
iwl_pcie_nic_init(struct iwl_trans * trans)539 static int iwl_pcie_nic_init(struct iwl_trans *trans)
540 {
541 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
542 int ret;
543
544 /* nic_init */
545 spin_lock_bh(&trans_pcie->irq_lock);
546 ret = iwl_pcie_apm_init(trans);
547 spin_unlock_bh(&trans_pcie->irq_lock);
548
549 if (ret)
550 return ret;
551
552 iwl_pcie_set_pwr(trans, false);
553
554 iwl_op_mode_nic_config(trans->op_mode);
555
556 /* Allocate the RX queue, or reset if it is already allocated */
557 ret = iwl_pcie_rx_init(trans);
558 if (ret)
559 return ret;
560
561 /* Allocate or reset and init all Tx and Command queues */
562 if (iwl_pcie_tx_init(trans)) {
563 iwl_pcie_rx_free(trans);
564 return -ENOMEM;
565 }
566
567 if (trans->trans_cfg->base_params->shadow_reg_enable) {
568 /* enable shadow regs in HW */
569 iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
570 IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
571 }
572
573 return 0;
574 }
575
576 #define HW_READY_TIMEOUT (50)
577
578 /* Note: returns poll_bit return value, which is >= 0 if success */
iwl_pcie_set_hw_ready(struct iwl_trans * trans)579 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
580 {
581 int ret;
582
583 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
584 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
585
586 /* See if we got it */
587 ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
588 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
589 CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
590 HW_READY_TIMEOUT);
591
592 if (ret >= 0)
593 iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
594
595 IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
596 return ret;
597 }
598
599 /* Note: returns standard 0/-ERROR code */
iwl_pcie_prepare_card_hw(struct iwl_trans * trans)600 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
601 {
602 int ret;
603 int iter;
604
605 IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
606
607 ret = iwl_pcie_set_hw_ready(trans);
608 /* If the card is ready, exit 0 */
609 if (ret >= 0) {
610 trans->csme_own = false;
611 return 0;
612 }
613
614 iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
615 CSR_RESET_LINK_PWR_MGMT_DISABLED);
616 usleep_range(1000, 2000);
617
618 for (iter = 0; iter < 10; iter++) {
619 int t = 0;
620
621 /* If HW is not ready, prepare the conditions to check again */
622 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
623 CSR_HW_IF_CONFIG_REG_PREPARE);
624
625 do {
626 ret = iwl_pcie_set_hw_ready(trans);
627 if (ret >= 0) {
628 trans->csme_own = false;
629 return 0;
630 }
631
632 if (iwl_mei_is_connected()) {
633 IWL_DEBUG_INFO(trans,
634 "Couldn't prepare the card but SAP is connected\n");
635 trans->csme_own = true;
636 if (trans->trans_cfg->device_family !=
637 IWL_DEVICE_FAMILY_9000)
638 IWL_ERR(trans,
639 "SAP not supported for this NIC family\n");
640
641 return -EBUSY;
642 }
643
644 usleep_range(200, 1000);
645 t += 200;
646 } while (t < 150000);
647 msleep(25);
648 }
649
650 IWL_ERR(trans, "Couldn't prepare the card\n");
651
652 return ret;
653 }
654
655 /*
656 * ucode
657 */
iwl_pcie_load_firmware_chunk_fh(struct iwl_trans * trans,u32 dst_addr,dma_addr_t phy_addr,u32 byte_cnt)658 static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
659 u32 dst_addr, dma_addr_t phy_addr,
660 u32 byte_cnt)
661 {
662 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
663 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
664
665 iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
666 dst_addr);
667
668 iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
669 phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
670
671 iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
672 (iwl_get_dma_hi_addr(phy_addr)
673 << FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
674
675 iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
676 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
677 BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
678 FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
679
680 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
681 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
682 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
683 FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
684 }
685
iwl_pcie_load_firmware_chunk(struct iwl_trans * trans,u32 dst_addr,dma_addr_t phy_addr,u32 byte_cnt)686 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
687 u32 dst_addr, dma_addr_t phy_addr,
688 u32 byte_cnt)
689 {
690 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
691 int ret;
692
693 trans_pcie->ucode_write_complete = false;
694
695 if (!iwl_trans_grab_nic_access(trans))
696 return -EIO;
697
698 iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
699 byte_cnt);
700 iwl_trans_release_nic_access(trans);
701
702 ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
703 trans_pcie->ucode_write_complete, 5 * HZ);
704 if (!ret) {
705 IWL_ERR(trans, "Failed to load firmware chunk!\n");
706 iwl_trans_pcie_dump_regs(trans);
707 return -ETIMEDOUT;
708 }
709
710 return 0;
711 }
712
iwl_pcie_load_section(struct iwl_trans * trans,u8 section_num,const struct fw_desc * section)713 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
714 const struct fw_desc *section)
715 {
716 u8 *v_addr;
717 dma_addr_t p_addr;
718 u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
719 int ret = 0;
720
721 IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
722 section_num);
723
724 v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
725 GFP_KERNEL | __GFP_NOWARN);
726 if (!v_addr) {
727 IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
728 chunk_sz = PAGE_SIZE;
729 v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
730 &p_addr, GFP_KERNEL);
731 if (!v_addr)
732 return -ENOMEM;
733 }
734
735 for (offset = 0; offset < section->len; offset += chunk_sz) {
736 u32 copy_size, dst_addr;
737 bool extended_addr = false;
738
739 copy_size = min_t(u32, chunk_sz, section->len - offset);
740 dst_addr = section->offset + offset;
741
742 if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
743 dst_addr <= IWL_FW_MEM_EXTENDED_END)
744 extended_addr = true;
745
746 if (extended_addr)
747 iwl_set_bits_prph(trans, LMPM_CHICK,
748 LMPM_CHICK_EXTENDED_ADDR_SPACE);
749
750 memcpy(v_addr, (const u8 *)section->data + offset, copy_size);
751 ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
752 copy_size);
753
754 if (extended_addr)
755 iwl_clear_bits_prph(trans, LMPM_CHICK,
756 LMPM_CHICK_EXTENDED_ADDR_SPACE);
757
758 if (ret) {
759 IWL_ERR(trans,
760 "Could not load the [%d] uCode section\n",
761 section_num);
762 break;
763 }
764 }
765
766 dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
767 return ret;
768 }
769
iwl_pcie_load_cpu_sections_8000(struct iwl_trans * trans,const struct fw_img * image,int cpu,int * first_ucode_section)770 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
771 const struct fw_img *image,
772 int cpu,
773 int *first_ucode_section)
774 {
775 int shift_param;
776 int i, ret = 0, sec_num = 0x1;
777 u32 val, last_read_idx = 0;
778
779 if (cpu == 1) {
780 shift_param = 0;
781 *first_ucode_section = 0;
782 } else {
783 shift_param = 16;
784 (*first_ucode_section)++;
785 }
786
787 for (i = *first_ucode_section; i < image->num_sec; i++) {
788 last_read_idx = i;
789
790 /*
791 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
792 * CPU1 to CPU2.
793 * PAGING_SEPARATOR_SECTION delimiter - separate between
794 * CPU2 non paged to CPU2 paging sec.
795 */
796 if (!image->sec[i].data ||
797 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
798 image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
799 IWL_DEBUG_FW(trans,
800 "Break since Data not valid or Empty section, sec = %d\n",
801 i);
802 break;
803 }
804
805 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
806 if (ret)
807 return ret;
808
809 /* Notify ucode of loaded section number and status */
810 val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
811 val = val | (sec_num << shift_param);
812 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
813
814 sec_num = (sec_num << 1) | 0x1;
815 }
816
817 *first_ucode_section = last_read_idx;
818
819 iwl_enable_interrupts(trans);
820
821 if (trans->trans_cfg->gen2) {
822 if (cpu == 1)
823 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
824 0xFFFF);
825 else
826 iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
827 0xFFFFFFFF);
828 } else {
829 if (cpu == 1)
830 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
831 0xFFFF);
832 else
833 iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
834 0xFFFFFFFF);
835 }
836
837 return 0;
838 }
839
iwl_pcie_load_cpu_sections(struct iwl_trans * trans,const struct fw_img * image,int cpu,int * first_ucode_section)840 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
841 const struct fw_img *image,
842 int cpu,
843 int *first_ucode_section)
844 {
845 int i, ret = 0;
846 u32 last_read_idx = 0;
847
848 if (cpu == 1)
849 *first_ucode_section = 0;
850 else
851 (*first_ucode_section)++;
852
853 for (i = *first_ucode_section; i < image->num_sec; i++) {
854 last_read_idx = i;
855
856 /*
857 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
858 * CPU1 to CPU2.
859 * PAGING_SEPARATOR_SECTION delimiter - separate between
860 * CPU2 non paged to CPU2 paging sec.
861 */
862 if (!image->sec[i].data ||
863 image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
864 image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
865 IWL_DEBUG_FW(trans,
866 "Break since Data not valid or Empty section, sec = %d\n",
867 i);
868 break;
869 }
870
871 ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
872 if (ret)
873 return ret;
874 }
875
876 *first_ucode_section = last_read_idx;
877
878 return 0;
879 }
880
iwl_pcie_apply_destination_ini(struct iwl_trans * trans)881 static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans)
882 {
883 enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
884 struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
885 &trans->dbg.fw_mon_cfg[alloc_id];
886 struct iwl_dram_data *frag;
887
888 if (!iwl_trans_dbg_ini_valid(trans))
889 return;
890
891 if (le32_to_cpu(fw_mon_cfg->buf_location) ==
892 IWL_FW_INI_LOCATION_SRAM_PATH) {
893 IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n");
894 /* set sram monitor by enabling bit 7 */
895 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
896 CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
897
898 return;
899 }
900
901 if (le32_to_cpu(fw_mon_cfg->buf_location) !=
902 IWL_FW_INI_LOCATION_DRAM_PATH ||
903 !trans->dbg.fw_mon_ini[alloc_id].num_frags)
904 return;
905
906 frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];
907
908 IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n",
909 alloc_id);
910
911 iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
912 frag->physical >> MON_BUFF_SHIFT_VER2);
913 iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
914 (frag->physical + frag->size - 256) >>
915 MON_BUFF_SHIFT_VER2);
916 }
917
iwl_pcie_apply_destination(struct iwl_trans * trans)918 void iwl_pcie_apply_destination(struct iwl_trans *trans)
919 {
920 const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
921 const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
922 int i;
923
924 if (iwl_trans_dbg_ini_valid(trans)) {
925 iwl_pcie_apply_destination_ini(trans);
926 return;
927 }
928
929 IWL_INFO(trans, "Applying debug destination %s\n",
930 get_fw_dbg_mode_string(dest->monitor_mode));
931
932 if (dest->monitor_mode == EXTERNAL_MODE)
933 iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
934 else
935 IWL_WARN(trans, "PCI should have external buffer debug\n");
936
937 for (i = 0; i < trans->dbg.n_dest_reg; i++) {
938 u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
939 u32 val = le32_to_cpu(dest->reg_ops[i].val);
940
941 switch (dest->reg_ops[i].op) {
942 case CSR_ASSIGN:
943 iwl_write32(trans, addr, val);
944 break;
945 case CSR_SETBIT:
946 iwl_set_bit(trans, addr, BIT(val));
947 break;
948 case CSR_CLEARBIT:
949 iwl_clear_bit(trans, addr, BIT(val));
950 break;
951 case PRPH_ASSIGN:
952 iwl_write_prph(trans, addr, val);
953 break;
954 case PRPH_SETBIT:
955 iwl_set_bits_prph(trans, addr, BIT(val));
956 break;
957 case PRPH_CLEARBIT:
958 iwl_clear_bits_prph(trans, addr, BIT(val));
959 break;
960 case PRPH_BLOCKBIT:
961 if (iwl_read_prph(trans, addr) & BIT(val)) {
962 IWL_ERR(trans,
963 "BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
964 val, addr);
965 goto monitor;
966 }
967 break;
968 default:
969 IWL_ERR(trans, "FW debug - unknown OP %d\n",
970 dest->reg_ops[i].op);
971 break;
972 }
973 }
974
975 monitor:
976 if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
977 iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
978 fw_mon->physical >> dest->base_shift);
979 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
980 iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
981 (fw_mon->physical + fw_mon->size -
982 256) >> dest->end_shift);
983 else
984 iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
985 (fw_mon->physical + fw_mon->size) >>
986 dest->end_shift);
987 }
988 }
989
iwl_pcie_load_given_ucode(struct iwl_trans * trans,const struct fw_img * image)990 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
991 const struct fw_img *image)
992 {
993 int ret = 0;
994 int first_ucode_section;
995
996 IWL_DEBUG_FW(trans, "working with %s CPU\n",
997 image->is_dual_cpus ? "Dual" : "Single");
998
999 /* load to FW the binary non secured sections of CPU1 */
1000 ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
1001 if (ret)
1002 return ret;
1003
1004 if (image->is_dual_cpus) {
1005 /* set CPU2 header address */
1006 iwl_write_prph(trans,
1007 LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
1008 LMPM_SECURE_CPU2_HDR_MEM_SPACE);
1009
1010 /* load to FW the binary sections of CPU2 */
1011 ret = iwl_pcie_load_cpu_sections(trans, image, 2,
1012 &first_ucode_section);
1013 if (ret)
1014 return ret;
1015 }
1016
1017 if (iwl_pcie_dbg_on(trans))
1018 iwl_pcie_apply_destination(trans);
1019
1020 iwl_enable_interrupts(trans);
1021
1022 /* release CPU reset */
1023 iwl_write32(trans, CSR_RESET, 0);
1024
1025 return 0;
1026 }
1027
iwl_pcie_load_given_ucode_8000(struct iwl_trans * trans,const struct fw_img * image)1028 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
1029 const struct fw_img *image)
1030 {
1031 int ret = 0;
1032 int first_ucode_section;
1033
1034 IWL_DEBUG_FW(trans, "working with %s CPU\n",
1035 image->is_dual_cpus ? "Dual" : "Single");
1036
1037 if (iwl_pcie_dbg_on(trans))
1038 iwl_pcie_apply_destination(trans);
1039
1040 IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
1041 iwl_read_prph(trans, WFPM_GP2));
1042
1043 /*
1044 * Set default value. On resume reading the values that were
1045 * zeored can provide debug data on the resume flow.
1046 * This is for debugging only and has no functional impact.
1047 */
1048 iwl_write_prph(trans, WFPM_GP2, 0x01010101);
1049
1050 /* configure the ucode to be ready to get the secured image */
1051 /* release CPU reset */
1052 iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
1053
1054 /* load to FW the binary Secured sections of CPU1 */
1055 ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
1056 &first_ucode_section);
1057 if (ret)
1058 return ret;
1059
1060 /* load to FW the binary sections of CPU2 */
1061 return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
1062 &first_ucode_section);
1063 }
1064
iwl_pcie_check_hw_rf_kill(struct iwl_trans * trans)1065 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
1066 {
1067 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1068 bool hw_rfkill = iwl_is_rfkill_set(trans);
1069 bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1070 bool report;
1071
1072 if (hw_rfkill) {
1073 set_bit(STATUS_RFKILL_HW, &trans->status);
1074 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1075 } else {
1076 clear_bit(STATUS_RFKILL_HW, &trans->status);
1077 if (trans_pcie->opmode_down)
1078 clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1079 }
1080
1081 report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1082
1083 if (prev != report)
1084 iwl_trans_pcie_rf_kill(trans, report, false);
1085
1086 return hw_rfkill;
1087 }
1088
1089 struct iwl_causes_list {
1090 u16 mask_reg;
1091 u8 bit;
1092 u8 addr;
1093 };
1094
1095 #define IWL_CAUSE(reg, mask) \
1096 { \
1097 .mask_reg = reg, \
1098 .bit = ilog2(mask), \
1099 .addr = ilog2(mask) + \
1100 ((reg) == CSR_MSIX_FH_INT_MASK_AD ? -16 : \
1101 (reg) == CSR_MSIX_HW_INT_MASK_AD ? 16 : \
1102 0xffff), /* causes overflow warning */ \
1103 }
1104
1105 static const struct iwl_causes_list causes_list_common[] = {
1106 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH0_NUM),
1107 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_D2S_CH1_NUM),
1108 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_S2D),
1109 IWL_CAUSE(CSR_MSIX_FH_INT_MASK_AD, MSIX_FH_INT_CAUSES_FH_ERR),
1110 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_ALIVE),
1111 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_WAKEUP),
1112 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RESET_DONE),
1113 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR),
1114 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_CT_KILL),
1115 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_RF_KILL),
1116 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_PERIODIC),
1117 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SCD),
1118 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_FH_TX),
1119 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HW_ERR),
1120 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_HAP),
1121 };
1122
1123 static const struct iwl_causes_list causes_list_pre_bz[] = {
1124 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR),
1125 };
1126
1127 static const struct iwl_causes_list causes_list_bz[] = {
1128 IWL_CAUSE(CSR_MSIX_HW_INT_MASK_AD, MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ),
1129 };
1130
iwl_pcie_map_list(struct iwl_trans * trans,const struct iwl_causes_list * causes,int arr_size,int val)1131 static void iwl_pcie_map_list(struct iwl_trans *trans,
1132 const struct iwl_causes_list *causes,
1133 int arr_size, int val)
1134 {
1135 int i;
1136
1137 for (i = 0; i < arr_size; i++) {
1138 iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
1139 iwl_clear_bit(trans, causes[i].mask_reg,
1140 BIT(causes[i].bit));
1141 }
1142 }
1143
iwl_pcie_map_non_rx_causes(struct iwl_trans * trans)1144 static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
1145 {
1146 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1147 int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
1148 /*
1149 * Access all non RX causes and map them to the default irq.
1150 * In case we are missing at least one interrupt vector,
1151 * the first interrupt vector will serve non-RX and FBQ causes.
1152 */
1153 iwl_pcie_map_list(trans, causes_list_common,
1154 ARRAY_SIZE(causes_list_common), val);
1155 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
1156 iwl_pcie_map_list(trans, causes_list_bz,
1157 ARRAY_SIZE(causes_list_bz), val);
1158 else
1159 iwl_pcie_map_list(trans, causes_list_pre_bz,
1160 ARRAY_SIZE(causes_list_pre_bz), val);
1161 }
1162
iwl_pcie_map_rx_causes(struct iwl_trans * trans)1163 static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
1164 {
1165 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1166 u32 offset =
1167 trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
1168 u32 val, idx;
1169
1170 /*
1171 * The first RX queue - fallback queue, which is designated for
1172 * management frame, command responses etc, is always mapped to the
1173 * first interrupt vector. The other RX queues are mapped to
1174 * the other (N - 2) interrupt vectors.
1175 */
1176 val = BIT(MSIX_FH_INT_CAUSES_Q(0));
1177 for (idx = 1; idx < trans->num_rx_queues; idx++) {
1178 iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
1179 MSIX_FH_INT_CAUSES_Q(idx - offset));
1180 val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
1181 }
1182 iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
1183
1184 val = MSIX_FH_INT_CAUSES_Q(0);
1185 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
1186 val |= MSIX_NON_AUTO_CLEAR_CAUSE;
1187 iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
1188
1189 if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
1190 iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
1191 }
1192
iwl_pcie_conf_msix_hw(struct iwl_trans_pcie * trans_pcie)1193 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
1194 {
1195 struct iwl_trans *trans = trans_pcie->trans;
1196
1197 if (!trans_pcie->msix_enabled) {
1198 if (trans->trans_cfg->mq_rx_supported &&
1199 test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1200 iwl_write_umac_prph(trans, UREG_CHICK,
1201 UREG_CHICK_MSI_ENABLE);
1202 return;
1203 }
1204 /*
1205 * The IVAR table needs to be configured again after reset,
1206 * but if the device is disabled, we can't write to
1207 * prph.
1208 */
1209 if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1210 iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
1211
1212 /*
1213 * Each cause from the causes list above and the RX causes is
1214 * represented as a byte in the IVAR table. The first nibble
1215 * represents the bound interrupt vector of the cause, the second
1216 * represents no auto clear for this cause. This will be set if its
1217 * interrupt vector is bound to serve other causes.
1218 */
1219 iwl_pcie_map_rx_causes(trans);
1220
1221 iwl_pcie_map_non_rx_causes(trans);
1222 }
1223
iwl_pcie_init_msix(struct iwl_trans_pcie * trans_pcie)1224 static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
1225 {
1226 struct iwl_trans *trans = trans_pcie->trans;
1227
1228 iwl_pcie_conf_msix_hw(trans_pcie);
1229
1230 if (!trans_pcie->msix_enabled)
1231 return;
1232
1233 trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
1234 trans_pcie->fh_mask = trans_pcie->fh_init_mask;
1235 trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
1236 trans_pcie->hw_mask = trans_pcie->hw_init_mask;
1237 }
1238
_iwl_trans_pcie_stop_device(struct iwl_trans * trans,bool from_irq)1239 static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool from_irq)
1240 {
1241 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1242
1243 lockdep_assert_held(&trans_pcie->mutex);
1244
1245 if (trans_pcie->is_down)
1246 return;
1247
1248 trans_pcie->is_down = true;
1249
1250 /* tell the device to stop sending interrupts */
1251 iwl_disable_interrupts(trans);
1252
1253 /* device going down, Stop using ICT table */
1254 iwl_pcie_disable_ict(trans);
1255
1256 /*
1257 * If a HW restart happens during firmware loading,
1258 * then the firmware loading might call this function
1259 * and later it might be called again due to the
1260 * restart. So don't process again if the device is
1261 * already dead.
1262 */
1263 if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1264 IWL_DEBUG_INFO(trans,
1265 "DEVICE_ENABLED bit was set and is now cleared\n");
1266 if (!from_irq)
1267 iwl_pcie_synchronize_irqs(trans);
1268 iwl_pcie_rx_napi_sync(trans);
1269 iwl_pcie_tx_stop(trans);
1270 iwl_pcie_rx_stop(trans);
1271
1272 /* Power-down device's busmaster DMA clocks */
1273 if (!trans->cfg->apmg_not_supported) {
1274 iwl_write_prph(trans, APMG_CLK_DIS_REG,
1275 APMG_CLK_VAL_DMA_CLK_RQT);
1276 udelay(5);
1277 }
1278 }
1279
1280 /* Make sure (redundant) we've released our request to stay awake */
1281 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
1282 iwl_clear_bit(trans, CSR_GP_CNTRL,
1283 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
1284 else
1285 iwl_clear_bit(trans, CSR_GP_CNTRL,
1286 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1287
1288 /* Stop the device, and put it in low power state */
1289 iwl_pcie_apm_stop(trans, false);
1290
1291 /* re-take ownership to prevent other users from stealing the device */
1292 iwl_trans_pcie_sw_reset(trans, true);
1293
1294 /*
1295 * Upon stop, the IVAR table gets erased, so msi-x won't
1296 * work. This causes a bug in RF-KILL flows, since the interrupt
1297 * that enables radio won't fire on the correct irq, and the
1298 * driver won't be able to handle the interrupt.
1299 * Configure the IVAR table again after reset.
1300 */
1301 iwl_pcie_conf_msix_hw(trans_pcie);
1302
1303 /*
1304 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1305 * This is a bug in certain verions of the hardware.
1306 * Certain devices also keep sending HW RF kill interrupt all
1307 * the time, unless the interrupt is ACKed even if the interrupt
1308 * should be masked. Re-ACK all the interrupts here.
1309 */
1310 iwl_disable_interrupts(trans);
1311
1312 /* clear all status bits */
1313 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1314 clear_bit(STATUS_INT_ENABLED, &trans->status);
1315 clear_bit(STATUS_TPOWER_PMI, &trans->status);
1316
1317 /*
1318 * Even if we stop the HW, we still want the RF kill
1319 * interrupt
1320 */
1321 iwl_enable_rfkill_int(trans);
1322 }
1323
iwl_pcie_synchronize_irqs(struct iwl_trans * trans)1324 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
1325 {
1326 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1327
1328 if (trans_pcie->msix_enabled) {
1329 int i;
1330
1331 for (i = 0; i < trans_pcie->alloc_vecs; i++)
1332 synchronize_irq(trans_pcie->msix_entries[i].vector);
1333 } else {
1334 synchronize_irq(trans_pcie->pci_dev->irq);
1335 }
1336 }
1337
iwl_trans_pcie_start_fw(struct iwl_trans * trans,const struct fw_img * fw,bool run_in_rfkill)1338 int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1339 const struct fw_img *fw, bool run_in_rfkill)
1340 {
1341 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1342 bool hw_rfkill;
1343 int ret;
1344
1345 /* This may fail if AMT took ownership of the device */
1346 if (iwl_pcie_prepare_card_hw(trans)) {
1347 IWL_WARN(trans, "Exit HW not ready\n");
1348 return -EIO;
1349 }
1350
1351 iwl_enable_rfkill_int(trans);
1352
1353 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1354
1355 /*
1356 * We enabled the RF-Kill interrupt and the handler may very
1357 * well be running. Disable the interrupts to make sure no other
1358 * interrupt can be fired.
1359 */
1360 iwl_disable_interrupts(trans);
1361
1362 /* Make sure it finished running */
1363 iwl_pcie_synchronize_irqs(trans);
1364
1365 mutex_lock(&trans_pcie->mutex);
1366
1367 /* If platform's RF_KILL switch is NOT set to KILL */
1368 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1369 if (hw_rfkill && !run_in_rfkill) {
1370 ret = -ERFKILL;
1371 goto out;
1372 }
1373
1374 /* Someone called stop_device, don't try to start_fw */
1375 if (trans_pcie->is_down) {
1376 IWL_WARN(trans,
1377 "Can't start_fw since the HW hasn't been started\n");
1378 ret = -EIO;
1379 goto out;
1380 }
1381
1382 /* make sure rfkill handshake bits are cleared */
1383 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1384 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1385 CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1386
1387 /* clear (again), then enable host interrupts */
1388 iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1389
1390 ret = iwl_pcie_nic_init(trans);
1391 if (ret) {
1392 IWL_ERR(trans, "Unable to init nic\n");
1393 goto out;
1394 }
1395
1396 /*
1397 * Now, we load the firmware and don't want to be interrupted, even
1398 * by the RF-Kill interrupt (hence mask all the interrupt besides the
1399 * FH_TX interrupt which is needed to load the firmware). If the
1400 * RF-Kill switch is toggled, we will find out after having loaded
1401 * the firmware and return the proper value to the caller.
1402 */
1403 iwl_enable_fw_load_int(trans);
1404
1405 /* really make sure rfkill handshake bits are cleared */
1406 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1407 iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1408
1409 /* Load the given image to the HW */
1410 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1411 ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1412 else
1413 ret = iwl_pcie_load_given_ucode(trans, fw);
1414
1415 /* re-check RF-Kill state since we may have missed the interrupt */
1416 hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1417 if (hw_rfkill && !run_in_rfkill)
1418 ret = -ERFKILL;
1419
1420 out:
1421 mutex_unlock(&trans_pcie->mutex);
1422 return ret;
1423 }
1424
iwl_trans_pcie_fw_alive(struct iwl_trans * trans,u32 scd_addr)1425 void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1426 {
1427 iwl_pcie_reset_ict(trans);
1428 iwl_pcie_tx_start(trans, scd_addr);
1429 }
1430
iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans * trans,bool was_in_rfkill)1431 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1432 bool was_in_rfkill)
1433 {
1434 bool hw_rfkill;
1435
1436 /*
1437 * Check again since the RF kill state may have changed while
1438 * all the interrupts were disabled, in this case we couldn't
1439 * receive the RF kill interrupt and update the state in the
1440 * op_mode.
1441 * Don't call the op_mode if the rkfill state hasn't changed.
1442 * This allows the op_mode to call stop_device from the rfkill
1443 * notification without endless recursion. Under very rare
1444 * circumstances, we might have a small recursion if the rfkill
1445 * state changed exactly now while we were called from stop_device.
1446 * This is very unlikely but can happen and is supported.
1447 */
1448 hw_rfkill = iwl_is_rfkill_set(trans);
1449 if (hw_rfkill) {
1450 set_bit(STATUS_RFKILL_HW, &trans->status);
1451 set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1452 } else {
1453 clear_bit(STATUS_RFKILL_HW, &trans->status);
1454 clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1455 }
1456 if (hw_rfkill != was_in_rfkill)
1457 iwl_trans_pcie_rf_kill(trans, hw_rfkill, false);
1458 }
1459
iwl_trans_pcie_stop_device(struct iwl_trans * trans)1460 void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1461 {
1462 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1463 bool was_in_rfkill;
1464
1465 iwl_op_mode_time_point(trans->op_mode,
1466 IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE,
1467 NULL);
1468
1469 mutex_lock(&trans_pcie->mutex);
1470 trans_pcie->opmode_down = true;
1471 was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1472 _iwl_trans_pcie_stop_device(trans, false);
1473 iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
1474 mutex_unlock(&trans_pcie->mutex);
1475 }
1476
iwl_trans_pcie_rf_kill(struct iwl_trans * trans,bool state,bool from_irq)1477 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state, bool from_irq)
1478 {
1479 struct iwl_trans_pcie __maybe_unused *trans_pcie =
1480 IWL_TRANS_GET_PCIE_TRANS(trans);
1481
1482 lockdep_assert_held(&trans_pcie->mutex);
1483
1484 IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
1485 state ? "disabled" : "enabled");
1486 if (iwl_op_mode_hw_rf_kill(trans->op_mode, state) &&
1487 !WARN_ON(trans->trans_cfg->gen2))
1488 _iwl_trans_pcie_stop_device(trans, from_irq);
1489 }
1490
iwl_pcie_d3_complete_suspend(struct iwl_trans * trans,bool test,bool reset)1491 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
1492 bool test, bool reset)
1493 {
1494 iwl_disable_interrupts(trans);
1495
1496 /*
1497 * in testing mode, the host stays awake and the
1498 * hardware won't be reset (not even partially)
1499 */
1500 if (test)
1501 return;
1502
1503 iwl_pcie_disable_ict(trans);
1504
1505 iwl_pcie_synchronize_irqs(trans);
1506
1507 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
1508 iwl_clear_bit(trans, CSR_GP_CNTRL,
1509 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
1510 iwl_clear_bit(trans, CSR_GP_CNTRL,
1511 CSR_GP_CNTRL_REG_FLAG_MAC_INIT);
1512 } else {
1513 iwl_clear_bit(trans, CSR_GP_CNTRL,
1514 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1515 iwl_clear_bit(trans, CSR_GP_CNTRL,
1516 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1517 }
1518
1519 if (reset) {
1520 /*
1521 * reset TX queues -- some of their registers reset during S3
1522 * so if we don't reset everything here the D3 image would try
1523 * to execute some invalid memory upon resume
1524 */
1525 iwl_trans_pcie_tx_reset(trans);
1526 }
1527
1528 iwl_pcie_set_pwr(trans, true);
1529 }
1530
iwl_pcie_d3_handshake(struct iwl_trans * trans,bool suspend)1531 static int iwl_pcie_d3_handshake(struct iwl_trans *trans, bool suspend)
1532 {
1533 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1534 int ret;
1535
1536 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210)
1537 iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
1538 suspend ? UREG_DOORBELL_TO_ISR6_SUSPEND :
1539 UREG_DOORBELL_TO_ISR6_RESUME);
1540 else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
1541 iwl_write32(trans, CSR_IPC_SLEEP_CONTROL,
1542 suspend ? CSR_IPC_SLEEP_CONTROL_SUSPEND :
1543 CSR_IPC_SLEEP_CONTROL_RESUME);
1544 else
1545 return 0;
1546
1547 ret = wait_event_timeout(trans_pcie->sx_waitq,
1548 trans_pcie->sx_complete, 2 * HZ);
1549
1550 /* Invalidate it toward next suspend or resume */
1551 trans_pcie->sx_complete = false;
1552
1553 if (!ret) {
1554 IWL_ERR(trans, "Timeout %s D3\n",
1555 suspend ? "entering" : "exiting");
1556 return -ETIMEDOUT;
1557 }
1558
1559 return 0;
1560 }
1561
iwl_trans_pcie_d3_suspend(struct iwl_trans * trans,bool test,bool reset)1562 int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test, bool reset)
1563 {
1564 int ret;
1565
1566 if (!reset)
1567 /* Enable persistence mode to avoid reset */
1568 iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
1569 CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
1570
1571 ret = iwl_pcie_d3_handshake(trans, true);
1572 if (ret)
1573 return ret;
1574
1575 iwl_pcie_d3_complete_suspend(trans, test, reset);
1576
1577 return 0;
1578 }
1579
iwl_trans_pcie_d3_resume(struct iwl_trans * trans,enum iwl_d3_status * status,bool test,bool reset)1580 int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1581 enum iwl_d3_status *status,
1582 bool test, bool reset)
1583 {
1584 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1585 u32 val;
1586 int ret;
1587
1588 if (test) {
1589 iwl_enable_interrupts(trans);
1590 *status = IWL_D3_STATUS_ALIVE;
1591 ret = 0;
1592 goto out;
1593 }
1594
1595 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
1596 iwl_set_bit(trans, CSR_GP_CNTRL,
1597 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
1598 else
1599 iwl_set_bit(trans, CSR_GP_CNTRL,
1600 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1601
1602 ret = iwl_finish_nic_init(trans);
1603 if (ret)
1604 return ret;
1605
1606 /*
1607 * Reconfigure IVAR table in case of MSIX or reset ict table in
1608 * MSI mode since HW reset erased it.
1609 * Also enables interrupts - none will happen as
1610 * the device doesn't know we're waking it up, only when
1611 * the opmode actually tells it after this call.
1612 */
1613 iwl_pcie_conf_msix_hw(trans_pcie);
1614 if (!trans_pcie->msix_enabled)
1615 iwl_pcie_reset_ict(trans);
1616 iwl_enable_interrupts(trans);
1617
1618 iwl_pcie_set_pwr(trans, false);
1619
1620 if (!reset) {
1621 iwl_clear_bit(trans, CSR_GP_CNTRL,
1622 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1623 } else {
1624 iwl_trans_pcie_tx_reset(trans);
1625
1626 ret = iwl_pcie_rx_init(trans);
1627 if (ret) {
1628 IWL_ERR(trans,
1629 "Failed to resume the device (RX reset)\n");
1630 return ret;
1631 }
1632 }
1633
1634 IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
1635 iwl_read_umac_prph(trans, WFPM_GP2));
1636
1637 val = iwl_read32(trans, CSR_RESET);
1638 if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
1639 *status = IWL_D3_STATUS_RESET;
1640 else
1641 *status = IWL_D3_STATUS_ALIVE;
1642
1643 out:
1644 if (*status == IWL_D3_STATUS_ALIVE)
1645 ret = iwl_pcie_d3_handshake(trans, false);
1646 else
1647 trans->state = IWL_TRANS_NO_FW;
1648
1649 return ret;
1650 }
1651
1652 static void
iwl_pcie_set_interrupt_capa(struct pci_dev * pdev,struct iwl_trans * trans,const struct iwl_cfg_trans_params * cfg_trans)1653 iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1654 struct iwl_trans *trans,
1655 const struct iwl_cfg_trans_params *cfg_trans)
1656 {
1657 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1658 int max_irqs, num_irqs, i, ret;
1659 u16 pci_cmd;
1660 u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES;
1661
1662 if (!cfg_trans->mq_rx_supported)
1663 goto enable_msi;
1664
1665 if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000)
1666 max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES;
1667
1668 max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues);
1669 for (i = 0; i < max_irqs; i++)
1670 trans_pcie->msix_entries[i].entry = i;
1671
1672 num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
1673 MSIX_MIN_INTERRUPT_VECTORS,
1674 max_irqs);
1675 if (num_irqs < 0) {
1676 IWL_DEBUG_INFO(trans,
1677 "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
1678 num_irqs);
1679 goto enable_msi;
1680 }
1681 trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
1682
1683 IWL_DEBUG_INFO(trans,
1684 "MSI-X enabled. %d interrupt vectors were allocated\n",
1685 num_irqs);
1686
1687 /*
1688 * In case the OS provides fewer interrupts than requested, different
1689 * causes will share the same interrupt vector as follows:
1690 * One interrupt less: non rx causes shared with FBQ.
1691 * Two interrupts less: non rx causes shared with FBQ and RSS.
1692 * More than two interrupts: we will use fewer RSS queues.
1693 */
1694 if (num_irqs <= max_irqs - 2) {
1695 trans_pcie->trans->num_rx_queues = num_irqs + 1;
1696 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
1697 IWL_SHARED_IRQ_FIRST_RSS;
1698 } else if (num_irqs == max_irqs - 1) {
1699 trans_pcie->trans->num_rx_queues = num_irqs;
1700 trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
1701 } else {
1702 trans_pcie->trans->num_rx_queues = num_irqs - 1;
1703 }
1704
1705 IWL_DEBUG_INFO(trans,
1706 "MSI-X enabled with rx queues %d, vec mask 0x%x\n",
1707 trans_pcie->trans->num_rx_queues, trans_pcie->shared_vec_mask);
1708
1709 WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
1710
1711 trans_pcie->alloc_vecs = num_irqs;
1712 trans_pcie->msix_enabled = true;
1713 return;
1714
1715 enable_msi:
1716 ret = pci_enable_msi(pdev);
1717 if (ret) {
1718 dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
1719 /* enable rfkill interrupt: hw bug w/a */
1720 pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1721 if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1722 pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1723 pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1724 }
1725 }
1726 }
1727
iwl_pcie_irq_set_affinity(struct iwl_trans * trans)1728 static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
1729 {
1730 #if defined(CONFIG_SMP)
1731 int iter_rx_q, i, ret, cpu, offset;
1732 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1733
1734 i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
1735 iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
1736 offset = 1 + i;
1737 for (; i < iter_rx_q ; i++) {
1738 /*
1739 * Get the cpu prior to the place to search
1740 * (i.e. return will be > i - 1).
1741 */
1742 cpu = cpumask_next(i - offset, cpu_online_mask);
1743 cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
1744 ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
1745 &trans_pcie->affinity_mask[i]);
1746 if (ret)
1747 IWL_ERR(trans_pcie->trans,
1748 "Failed to set affinity mask for IRQ %d\n",
1749 trans_pcie->msix_entries[i].vector);
1750 }
1751 #endif
1752 }
1753
iwl_pcie_init_msix_handler(struct pci_dev * pdev,struct iwl_trans_pcie * trans_pcie)1754 static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
1755 struct iwl_trans_pcie *trans_pcie)
1756 {
1757 int i;
1758
1759 for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1760 int ret;
1761 struct msix_entry *msix_entry;
1762 const char *qname = queue_name(&pdev->dev, trans_pcie, i);
1763
1764 if (!qname)
1765 return -ENOMEM;
1766
1767 msix_entry = &trans_pcie->msix_entries[i];
1768 ret = devm_request_threaded_irq(&pdev->dev,
1769 msix_entry->vector,
1770 iwl_pcie_msix_isr,
1771 (i == trans_pcie->def_irq) ?
1772 iwl_pcie_irq_msix_handler :
1773 iwl_pcie_irq_rx_msix_handler,
1774 IRQF_SHARED,
1775 qname,
1776 msix_entry);
1777 if (ret) {
1778 IWL_ERR(trans_pcie->trans,
1779 "Error allocating IRQ %d\n", i);
1780
1781 return ret;
1782 }
1783 }
1784 iwl_pcie_irq_set_affinity(trans_pcie->trans);
1785
1786 return 0;
1787 }
1788
iwl_trans_pcie_clear_persistence_bit(struct iwl_trans * trans)1789 static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
1790 {
1791 u32 hpm, wprot;
1792
1793 switch (trans->trans_cfg->device_family) {
1794 case IWL_DEVICE_FAMILY_9000:
1795 wprot = PREG_PRPH_WPROT_9000;
1796 break;
1797 case IWL_DEVICE_FAMILY_22000:
1798 wprot = PREG_PRPH_WPROT_22000;
1799 break;
1800 default:
1801 return 0;
1802 }
1803
1804 hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG);
1805 if (!iwl_trans_is_hw_error_value(hpm) && (hpm & PERSISTENCE_BIT)) {
1806 u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot);
1807
1808 if (wprot_val & PREG_WFPM_ACCESS) {
1809 IWL_ERR(trans,
1810 "Error, can not clear persistence bit\n");
1811 return -EPERM;
1812 }
1813 iwl_write_umac_prph_no_grab(trans, HPM_DEBUG,
1814 hpm & ~PERSISTENCE_BIT);
1815 }
1816
1817 return 0;
1818 }
1819
iwl_pcie_gen2_force_power_gating(struct iwl_trans * trans)1820 static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
1821 {
1822 int ret;
1823
1824 ret = iwl_finish_nic_init(trans);
1825 if (ret < 0)
1826 return ret;
1827
1828 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1829 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1830 udelay(20);
1831 iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1832 HPM_HIPM_GEN_CFG_CR_PG_EN |
1833 HPM_HIPM_GEN_CFG_CR_SLP_EN);
1834 udelay(20);
1835 iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
1836 HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1837
1838 return iwl_trans_pcie_sw_reset(trans, true);
1839 }
1840
_iwl_trans_pcie_start_hw(struct iwl_trans * trans)1841 static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1842 {
1843 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1844 int err;
1845
1846 lockdep_assert_held(&trans_pcie->mutex);
1847
1848 err = iwl_pcie_prepare_card_hw(trans);
1849 if (err) {
1850 IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1851 return err;
1852 }
1853
1854 err = iwl_trans_pcie_clear_persistence_bit(trans);
1855 if (err)
1856 return err;
1857
1858 err = iwl_trans_pcie_sw_reset(trans, true);
1859 if (err)
1860 return err;
1861
1862 if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
1863 trans->trans_cfg->integrated) {
1864 err = iwl_pcie_gen2_force_power_gating(trans);
1865 if (err)
1866 return err;
1867 }
1868
1869 err = iwl_pcie_apm_init(trans);
1870 if (err)
1871 return err;
1872
1873 iwl_pcie_init_msix(trans_pcie);
1874
1875 /* From now on, the op_mode will be kept updated about RF kill state */
1876 iwl_enable_rfkill_int(trans);
1877
1878 trans_pcie->opmode_down = false;
1879
1880 /* Set is_down to false here so that...*/
1881 trans_pcie->is_down = false;
1882
1883 /* ...rfkill can call stop_device and set it false if needed */
1884 iwl_pcie_check_hw_rf_kill(trans);
1885
1886 return 0;
1887 }
1888
iwl_trans_pcie_start_hw(struct iwl_trans * trans)1889 int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1890 {
1891 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1892 int ret;
1893
1894 mutex_lock(&trans_pcie->mutex);
1895 ret = _iwl_trans_pcie_start_hw(trans);
1896 mutex_unlock(&trans_pcie->mutex);
1897
1898 return ret;
1899 }
1900
iwl_trans_pcie_op_mode_leave(struct iwl_trans * trans)1901 void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1902 {
1903 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1904
1905 mutex_lock(&trans_pcie->mutex);
1906
1907 /* disable interrupts - don't enable HW RF kill interrupt */
1908 iwl_disable_interrupts(trans);
1909
1910 iwl_pcie_apm_stop(trans, true);
1911
1912 iwl_disable_interrupts(trans);
1913
1914 iwl_pcie_disable_ict(trans);
1915
1916 mutex_unlock(&trans_pcie->mutex);
1917
1918 iwl_pcie_synchronize_irqs(trans);
1919 }
1920
iwl_trans_pcie_write8(struct iwl_trans * trans,u32 ofs,u8 val)1921 void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1922 {
1923 writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1924 }
1925
iwl_trans_pcie_write32(struct iwl_trans * trans,u32 ofs,u32 val)1926 void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1927 {
1928 writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1929 }
1930
iwl_trans_pcie_read32(struct iwl_trans * trans,u32 ofs)1931 u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1932 {
1933 return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1934 }
1935
iwl_trans_pcie_prph_msk(struct iwl_trans * trans)1936 static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
1937 {
1938 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1939 return 0x00FFFFFF;
1940 else
1941 return 0x000FFFFF;
1942 }
1943
iwl_trans_pcie_read_prph(struct iwl_trans * trans,u32 reg)1944 u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1945 {
1946 u32 mask = iwl_trans_pcie_prph_msk(trans);
1947
1948 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1949 ((reg & mask) | (3 << 24)));
1950 return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1951 }
1952
iwl_trans_pcie_write_prph(struct iwl_trans * trans,u32 addr,u32 val)1953 void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr, u32 val)
1954 {
1955 u32 mask = iwl_trans_pcie_prph_msk(trans);
1956
1957 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
1958 ((addr & mask) | (3 << 24)));
1959 iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1960 }
1961
iwl_trans_pcie_configure(struct iwl_trans * trans,const struct iwl_trans_config * trans_cfg)1962 void iwl_trans_pcie_configure(struct iwl_trans *trans,
1963 const struct iwl_trans_config *trans_cfg)
1964 {
1965 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1966
1967 /* free all first - we might be reconfigured for a different size */
1968 iwl_pcie_free_rbs_pool(trans);
1969
1970 trans_pcie->txqs.cmd.q_id = trans_cfg->cmd_queue;
1971 trans_pcie->txqs.cmd.fifo = trans_cfg->cmd_fifo;
1972 trans_pcie->txqs.page_offs = trans_cfg->cb_data_offs;
1973 trans_pcie->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
1974 trans_pcie->txqs.queue_alloc_cmd_ver = trans_cfg->queue_alloc_cmd_ver;
1975
1976 if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1977 trans_pcie->n_no_reclaim_cmds = 0;
1978 else
1979 trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1980 if (trans_pcie->n_no_reclaim_cmds)
1981 memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1982 trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1983
1984 trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
1985 trans_pcie->rx_page_order =
1986 iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
1987 trans_pcie->rx_buf_bytes =
1988 iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
1989 trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
1990 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1991 trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
1992
1993 trans_pcie->txqs.bc_table_dword = trans_cfg->bc_table_dword;
1994 trans_pcie->scd_set_active = trans_cfg->scd_set_active;
1995
1996 trans->command_groups = trans_cfg->command_groups;
1997 trans->command_groups_size = trans_cfg->command_groups_size;
1998
1999
2000 trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
2001 }
2002
iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions * dram_regions,struct device * dev)2003 void iwl_trans_pcie_free_pnvm_dram_regions(struct iwl_dram_regions *dram_regions,
2004 struct device *dev)
2005 {
2006 u8 i;
2007 struct iwl_dram_data *desc_dram = &dram_regions->prph_scratch_mem_desc;
2008
2009 /* free DRAM payloads */
2010 for (i = 0; i < dram_regions->n_regions; i++) {
2011 dma_free_coherent(dev, dram_regions->drams[i].size,
2012 dram_regions->drams[i].block,
2013 dram_regions->drams[i].physical);
2014 }
2015 dram_regions->n_regions = 0;
2016
2017 /* free DRAM addresses array */
2018 if (desc_dram->block) {
2019 dma_free_coherent(dev, desc_dram->size,
2020 desc_dram->block,
2021 desc_dram->physical);
2022 }
2023 memset(desc_dram, 0, sizeof(*desc_dram));
2024 }
2025
iwl_pcie_free_invalid_tx_cmd(struct iwl_trans * trans)2026 static void iwl_pcie_free_invalid_tx_cmd(struct iwl_trans *trans)
2027 {
2028 iwl_pcie_free_dma_ptr(trans, &trans->invalid_tx_cmd);
2029 }
2030
iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans * trans)2031 static int iwl_pcie_alloc_invalid_tx_cmd(struct iwl_trans *trans)
2032 {
2033 struct iwl_cmd_header_wide bad_cmd = {
2034 .cmd = INVALID_WR_PTR_CMD,
2035 .group_id = DEBUG_GROUP,
2036 .sequence = cpu_to_le16(0xffff),
2037 .length = cpu_to_le16(0),
2038 .version = 0,
2039 };
2040 int ret;
2041
2042 ret = iwl_pcie_alloc_dma_ptr(trans, &trans->invalid_tx_cmd,
2043 sizeof(bad_cmd));
2044 if (ret)
2045 return ret;
2046 memcpy(trans->invalid_tx_cmd.addr, &bad_cmd, sizeof(bad_cmd));
2047 return 0;
2048 }
2049
iwl_trans_pcie_free(struct iwl_trans * trans)2050 void iwl_trans_pcie_free(struct iwl_trans *trans)
2051 {
2052 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2053 int i;
2054
2055 iwl_pcie_synchronize_irqs(trans);
2056
2057 if (trans->trans_cfg->gen2)
2058 iwl_txq_gen2_tx_free(trans);
2059 else
2060 iwl_pcie_tx_free(trans);
2061 iwl_pcie_rx_free(trans);
2062
2063 if (trans_pcie->rba.alloc_wq) {
2064 destroy_workqueue(trans_pcie->rba.alloc_wq);
2065 trans_pcie->rba.alloc_wq = NULL;
2066 }
2067
2068 if (trans_pcie->msix_enabled) {
2069 for (i = 0; i < trans_pcie->alloc_vecs; i++) {
2070 irq_set_affinity_hint(
2071 trans_pcie->msix_entries[i].vector,
2072 NULL);
2073 }
2074
2075 trans_pcie->msix_enabled = false;
2076 } else {
2077 iwl_pcie_free_ict(trans);
2078 }
2079
2080 free_netdev(trans_pcie->napi_dev);
2081
2082 iwl_pcie_free_invalid_tx_cmd(trans);
2083
2084 iwl_pcie_free_fw_monitor(trans);
2085
2086 iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->pnvm_data,
2087 trans->dev);
2088 iwl_trans_pcie_free_pnvm_dram_regions(&trans_pcie->reduced_tables_data,
2089 trans->dev);
2090
2091 mutex_destroy(&trans_pcie->mutex);
2092
2093 if (trans_pcie->txqs.tso_hdr_page) {
2094 for_each_possible_cpu(i) {
2095 struct iwl_tso_hdr_page *p =
2096 per_cpu_ptr(trans_pcie->txqs.tso_hdr_page, i);
2097
2098 if (p && p->page)
2099 __free_page(p->page);
2100 }
2101
2102 free_percpu(trans_pcie->txqs.tso_hdr_page);
2103 }
2104
2105 iwl_trans_free(trans);
2106 }
2107
2108 struct iwl_trans_pcie_removal {
2109 struct pci_dev *pdev;
2110 struct work_struct work;
2111 bool rescan;
2112 };
2113
iwl_trans_pcie_removal_wk(struct work_struct * wk)2114 static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
2115 {
2116 struct iwl_trans_pcie_removal *removal =
2117 container_of(wk, struct iwl_trans_pcie_removal, work);
2118 struct pci_dev *pdev = removal->pdev;
2119 static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
2120 struct pci_bus *bus;
2121
2122 pci_lock_rescan_remove();
2123
2124 bus = pdev->bus;
2125 /* in this case, something else already removed the device */
2126 if (!bus)
2127 goto out;
2128
2129 dev_err(&pdev->dev, "Device gone - attempting removal\n");
2130
2131 kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
2132
2133 pci_stop_and_remove_bus_device(pdev);
2134 pci_dev_put(pdev);
2135
2136 if (removal->rescan) {
2137 if (bus->parent)
2138 bus = bus->parent;
2139 pci_rescan_bus(bus);
2140 }
2141
2142 out:
2143 pci_unlock_rescan_remove();
2144
2145 kfree(removal);
2146 module_put(THIS_MODULE);
2147 }
2148
iwl_trans_pcie_remove(struct iwl_trans * trans,bool rescan)2149 void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan)
2150 {
2151 struct iwl_trans_pcie_removal *removal;
2152
2153 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2154 return;
2155
2156 IWL_ERR(trans, "Device gone - scheduling removal!\n");
2157 iwl_pcie_dump_csr(trans);
2158
2159 /*
2160 * get a module reference to avoid doing this
2161 * while unloading anyway and to avoid
2162 * scheduling a work with code that's being
2163 * removed.
2164 */
2165 if (!try_module_get(THIS_MODULE)) {
2166 IWL_ERR(trans,
2167 "Module is being unloaded - abort\n");
2168 return;
2169 }
2170
2171 removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
2172 if (!removal) {
2173 module_put(THIS_MODULE);
2174 return;
2175 }
2176 /*
2177 * we don't need to clear this flag, because
2178 * the trans will be freed and reallocated.
2179 */
2180 set_bit(STATUS_TRANS_DEAD, &trans->status);
2181
2182 removal->pdev = to_pci_dev(trans->dev);
2183 removal->rescan = rescan;
2184 INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
2185 pci_dev_get(removal->pdev);
2186 schedule_work(&removal->work);
2187 }
2188 EXPORT_SYMBOL(iwl_trans_pcie_remove);
2189
2190 /*
2191 * This version doesn't disable BHs but rather assumes they're
2192 * already disabled.
2193 */
__iwl_trans_pcie_grab_nic_access(struct iwl_trans * trans,bool silent)2194 bool __iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
2195 {
2196 int ret;
2197 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2198 u32 write = CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ;
2199 u32 mask = CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
2200 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP;
2201 u32 poll = CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN;
2202
2203 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2204 return false;
2205
2206 spin_lock(&trans_pcie->reg_lock);
2207
2208 if (trans_pcie->cmd_hold_nic_awake)
2209 goto out;
2210
2211 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
2212 write = CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ;
2213 mask = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
2214 poll = CSR_GP_CNTRL_REG_FLAG_MAC_STATUS;
2215 }
2216
2217 /* this bit wakes up the NIC */
2218 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, write);
2219 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
2220 udelay(2);
2221
2222 /*
2223 * These bits say the device is running, and should keep running for
2224 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
2225 * but they do not indicate that embedded SRAM is restored yet;
2226 * HW with volatile SRAM must save/restore contents to/from
2227 * host DRAM when sleeping/waking for power-saving.
2228 * Each direction takes approximately 1/4 millisecond; with this
2229 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
2230 * series of register accesses are expected (e.g. reading Event Log),
2231 * to keep device from sleeping.
2232 *
2233 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
2234 * SRAM is okay/restored. We don't check that here because this call
2235 * is just for hardware register access; but GP1 MAC_SLEEP
2236 * check is a good idea before accessing the SRAM of HW with
2237 * volatile SRAM (e.g. reading Event Log).
2238 *
2239 * 5000 series and later (including 1000 series) have non-volatile SRAM,
2240 * and do not save/restore SRAM when power cycling.
2241 */
2242 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, poll, mask, 15000);
2243 if (unlikely(ret < 0)) {
2244 u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
2245
2246 if (silent) {
2247 spin_unlock(&trans_pcie->reg_lock);
2248 return false;
2249 }
2250
2251 WARN_ONCE(1,
2252 "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
2253 cntrl);
2254
2255 iwl_trans_pcie_dump_regs(trans);
2256
2257 if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U)
2258 iwl_trans_pcie_remove(trans, false);
2259 else
2260 iwl_write32(trans, CSR_RESET,
2261 CSR_RESET_REG_FLAG_FORCE_NMI);
2262
2263 spin_unlock(&trans_pcie->reg_lock);
2264 return false;
2265 }
2266
2267 out:
2268 /*
2269 * Fool sparse by faking we release the lock - sparse will
2270 * track nic_access anyway.
2271 */
2272 __release(&trans_pcie->reg_lock);
2273 return true;
2274 }
2275
iwl_trans_pcie_grab_nic_access(struct iwl_trans * trans)2276 bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
2277 {
2278 bool ret;
2279
2280 local_bh_disable();
2281 ret = __iwl_trans_pcie_grab_nic_access(trans, false);
2282 if (ret) {
2283 /* keep BHs disabled until iwl_trans_pcie_release_nic_access */
2284 return ret;
2285 }
2286 local_bh_enable();
2287 return false;
2288 }
2289
__releases(nic_access_nobh)2290 void __releases(nic_access_nobh)
2291 iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
2292 {
2293 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2294
2295 lockdep_assert_held(&trans_pcie->reg_lock);
2296
2297 /*
2298 * Fool sparse by faking we acquiring the lock - sparse will
2299 * track nic_access anyway.
2300 */
2301 __acquire(&trans_pcie->reg_lock);
2302
2303 if (trans_pcie->cmd_hold_nic_awake)
2304 goto out;
2305 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
2306 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
2307 CSR_GP_CNTRL_REG_FLAG_BZ_MAC_ACCESS_REQ);
2308 else
2309 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
2310 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2311 /*
2312 * Above we read the CSR_GP_CNTRL register, which will flush
2313 * any previous writes, but we need the write that clears the
2314 * MAC_ACCESS_REQ bit to be performed before any other writes
2315 * scheduled on different CPUs (after we drop reg_lock).
2316 */
2317 out:
2318 __release(nic_access_nobh);
2319 spin_unlock_bh(&trans_pcie->reg_lock);
2320 }
2321
iwl_trans_pcie_read_mem(struct iwl_trans * trans,u32 addr,void * buf,int dwords)2322 int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
2323 void *buf, int dwords)
2324 {
2325 #define IWL_MAX_HW_ERRS 5
2326 unsigned int num_consec_hw_errors = 0;
2327 int offs = 0;
2328 u32 *vals = buf;
2329
2330 while (offs < dwords) {
2331 /* limit the time we spin here under lock to 1/2s */
2332 unsigned long end = jiffies + HZ / 2;
2333 bool resched = false;
2334
2335 if (iwl_trans_grab_nic_access(trans)) {
2336 iwl_write32(trans, HBUS_TARG_MEM_RADDR,
2337 addr + 4 * offs);
2338
2339 while (offs < dwords) {
2340 vals[offs] = iwl_read32(trans,
2341 HBUS_TARG_MEM_RDAT);
2342
2343 if (iwl_trans_is_hw_error_value(vals[offs]))
2344 num_consec_hw_errors++;
2345 else
2346 num_consec_hw_errors = 0;
2347
2348 if (num_consec_hw_errors >= IWL_MAX_HW_ERRS) {
2349 iwl_trans_release_nic_access(trans);
2350 return -EIO;
2351 }
2352
2353 offs++;
2354
2355 if (time_after(jiffies, end)) {
2356 resched = true;
2357 break;
2358 }
2359 }
2360 iwl_trans_release_nic_access(trans);
2361
2362 if (resched)
2363 cond_resched();
2364 } else {
2365 return -EBUSY;
2366 }
2367 }
2368
2369 return 0;
2370 }
2371
iwl_trans_pcie_write_mem(struct iwl_trans * trans,u32 addr,const void * buf,int dwords)2372 int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
2373 const void *buf, int dwords)
2374 {
2375 int offs, ret = 0;
2376 const u32 *vals = buf;
2377
2378 if (iwl_trans_grab_nic_access(trans)) {
2379 iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
2380 for (offs = 0; offs < dwords; offs++)
2381 iwl_write32(trans, HBUS_TARG_MEM_WDAT,
2382 vals ? vals[offs] : 0);
2383 iwl_trans_release_nic_access(trans);
2384 } else {
2385 ret = -EBUSY;
2386 }
2387 return ret;
2388 }
2389
iwl_trans_pcie_read_config32(struct iwl_trans * trans,u32 ofs,u32 * val)2390 int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
2391 u32 *val)
2392 {
2393 return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
2394 ofs, val);
2395 }
2396
2397 #define IWL_FLUSH_WAIT_MS 2000
2398
iwl_trans_pcie_rxq_dma_data(struct iwl_trans * trans,int queue,struct iwl_trans_rxq_dma_data * data)2399 int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
2400 struct iwl_trans_rxq_dma_data *data)
2401 {
2402 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2403
2404 if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
2405 return -EINVAL;
2406
2407 data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
2408 data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
2409 data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
2410 data->fr_bd_wid = 0;
2411
2412 return 0;
2413 }
2414
iwl_trans_pcie_wait_txq_empty(struct iwl_trans * trans,int txq_idx)2415 int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
2416 {
2417 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2418 struct iwl_txq *txq;
2419 unsigned long now = jiffies;
2420 bool overflow_tx;
2421 u8 wr_ptr;
2422
2423 /* Make sure the NIC is still alive in the bus */
2424 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2425 return -ENODEV;
2426
2427 if (!test_bit(txq_idx, trans_pcie->txqs.queue_used))
2428 return -EINVAL;
2429
2430 IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
2431 txq = trans_pcie->txqs.txq[txq_idx];
2432
2433 spin_lock_bh(&txq->lock);
2434 overflow_tx = txq->overflow_tx ||
2435 !skb_queue_empty(&txq->overflow_q);
2436 spin_unlock_bh(&txq->lock);
2437
2438 wr_ptr = READ_ONCE(txq->write_ptr);
2439
2440 while ((txq->read_ptr != READ_ONCE(txq->write_ptr) ||
2441 overflow_tx) &&
2442 !time_after(jiffies,
2443 now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
2444 u8 write_ptr = READ_ONCE(txq->write_ptr);
2445
2446 /*
2447 * If write pointer moved during the wait, warn only
2448 * if the TX came from op mode. In case TX came from
2449 * trans layer (overflow TX) don't warn.
2450 */
2451 if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx,
2452 "WR pointer moved while flushing %d -> %d\n",
2453 wr_ptr, write_ptr))
2454 return -ETIMEDOUT;
2455 wr_ptr = write_ptr;
2456
2457 usleep_range(1000, 2000);
2458
2459 spin_lock_bh(&txq->lock);
2460 overflow_tx = txq->overflow_tx ||
2461 !skb_queue_empty(&txq->overflow_q);
2462 spin_unlock_bh(&txq->lock);
2463 }
2464
2465 if (txq->read_ptr != txq->write_ptr) {
2466 IWL_ERR(trans,
2467 "fail to flush all tx fifo queues Q %d\n", txq_idx);
2468 iwl_txq_log_scd_error(trans, txq);
2469 return -ETIMEDOUT;
2470 }
2471
2472 IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);
2473
2474 return 0;
2475 }
2476
iwl_trans_pcie_wait_txqs_empty(struct iwl_trans * trans,u32 txq_bm)2477 int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
2478 {
2479 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2480 int cnt;
2481 int ret = 0;
2482
2483 /* waiting for all the tx frames complete might take a while */
2484 for (cnt = 0;
2485 cnt < trans->trans_cfg->base_params->num_of_queues;
2486 cnt++) {
2487
2488 if (cnt == trans_pcie->txqs.cmd.q_id)
2489 continue;
2490 if (!test_bit(cnt, trans_pcie->txqs.queue_used))
2491 continue;
2492 if (!(BIT(cnt) & txq_bm))
2493 continue;
2494
2495 ret = iwl_trans_pcie_wait_txq_empty(trans, cnt);
2496 if (ret)
2497 break;
2498 }
2499
2500 return ret;
2501 }
2502
iwl_trans_pcie_set_bits_mask(struct iwl_trans * trans,u32 reg,u32 mask,u32 value)2503 void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
2504 u32 mask, u32 value)
2505 {
2506 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2507
2508 spin_lock_bh(&trans_pcie->reg_lock);
2509 __iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
2510 spin_unlock_bh(&trans_pcie->reg_lock);
2511 }
2512
get_csr_string(int cmd)2513 static const char *get_csr_string(int cmd)
2514 {
2515 #define IWL_CMD(x) case x: return #x
2516 switch (cmd) {
2517 IWL_CMD(CSR_HW_IF_CONFIG_REG);
2518 IWL_CMD(CSR_INT_COALESCING);
2519 IWL_CMD(CSR_INT);
2520 IWL_CMD(CSR_INT_MASK);
2521 IWL_CMD(CSR_FH_INT_STATUS);
2522 IWL_CMD(CSR_GPIO_IN);
2523 IWL_CMD(CSR_RESET);
2524 IWL_CMD(CSR_GP_CNTRL);
2525 IWL_CMD(CSR_HW_REV);
2526 IWL_CMD(CSR_EEPROM_REG);
2527 IWL_CMD(CSR_EEPROM_GP);
2528 IWL_CMD(CSR_OTP_GP_REG);
2529 IWL_CMD(CSR_GIO_REG);
2530 IWL_CMD(CSR_GP_UCODE_REG);
2531 IWL_CMD(CSR_GP_DRIVER_REG);
2532 IWL_CMD(CSR_UCODE_DRV_GP1);
2533 IWL_CMD(CSR_UCODE_DRV_GP2);
2534 IWL_CMD(CSR_LED_REG);
2535 IWL_CMD(CSR_DRAM_INT_TBL_REG);
2536 IWL_CMD(CSR_GIO_CHICKEN_BITS);
2537 IWL_CMD(CSR_ANA_PLL_CFG);
2538 IWL_CMD(CSR_HW_REV_WA_REG);
2539 IWL_CMD(CSR_MONITOR_STATUS_REG);
2540 IWL_CMD(CSR_DBG_HPET_MEM_REG);
2541 default:
2542 return "UNKNOWN";
2543 }
2544 #undef IWL_CMD
2545 }
2546
iwl_pcie_dump_csr(struct iwl_trans * trans)2547 void iwl_pcie_dump_csr(struct iwl_trans *trans)
2548 {
2549 int i;
2550 static const u32 csr_tbl[] = {
2551 CSR_HW_IF_CONFIG_REG,
2552 CSR_INT_COALESCING,
2553 CSR_INT,
2554 CSR_INT_MASK,
2555 CSR_FH_INT_STATUS,
2556 CSR_GPIO_IN,
2557 CSR_RESET,
2558 CSR_GP_CNTRL,
2559 CSR_HW_REV,
2560 CSR_EEPROM_REG,
2561 CSR_EEPROM_GP,
2562 CSR_OTP_GP_REG,
2563 CSR_GIO_REG,
2564 CSR_GP_UCODE_REG,
2565 CSR_GP_DRIVER_REG,
2566 CSR_UCODE_DRV_GP1,
2567 CSR_UCODE_DRV_GP2,
2568 CSR_LED_REG,
2569 CSR_DRAM_INT_TBL_REG,
2570 CSR_GIO_CHICKEN_BITS,
2571 CSR_ANA_PLL_CFG,
2572 CSR_MONITOR_STATUS_REG,
2573 CSR_HW_REV_WA_REG,
2574 CSR_DBG_HPET_MEM_REG
2575 };
2576 IWL_ERR(trans, "CSR values:\n");
2577 IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
2578 "CSR_INT_PERIODIC_REG)\n");
2579 for (i = 0; i < ARRAY_SIZE(csr_tbl); i++) {
2580 IWL_ERR(trans, " %25s: 0X%08x\n",
2581 get_csr_string(csr_tbl[i]),
2582 iwl_read32(trans, csr_tbl[i]));
2583 }
2584 }
2585
2586 #ifdef CONFIG_IWLWIFI_DEBUGFS
2587 /* create and remove of files */
2588 #define DEBUGFS_ADD_FILE(name, parent, mode) do { \
2589 debugfs_create_file(#name, mode, parent, trans, \
2590 &iwl_dbgfs_##name##_ops); \
2591 } while (0)
2592
2593 /* file operation */
2594 #define DEBUGFS_READ_FILE_OPS(name) \
2595 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2596 .read = iwl_dbgfs_##name##_read, \
2597 .open = simple_open, \
2598 .llseek = generic_file_llseek, \
2599 };
2600
2601 #define DEBUGFS_WRITE_FILE_OPS(name) \
2602 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2603 .write = iwl_dbgfs_##name##_write, \
2604 .open = simple_open, \
2605 .llseek = generic_file_llseek, \
2606 };
2607
2608 #define DEBUGFS_READ_WRITE_FILE_OPS(name) \
2609 static const struct file_operations iwl_dbgfs_##name##_ops = { \
2610 .write = iwl_dbgfs_##name##_write, \
2611 .read = iwl_dbgfs_##name##_read, \
2612 .open = simple_open, \
2613 .llseek = generic_file_llseek, \
2614 };
2615
2616 struct iwl_dbgfs_tx_queue_priv {
2617 struct iwl_trans *trans;
2618 };
2619
2620 struct iwl_dbgfs_tx_queue_state {
2621 loff_t pos;
2622 };
2623
iwl_dbgfs_tx_queue_seq_start(struct seq_file * seq,loff_t * pos)2624 static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)
2625 {
2626 struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2627 struct iwl_dbgfs_tx_queue_state *state;
2628
2629 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2630 return NULL;
2631
2632 state = kmalloc(sizeof(*state), GFP_KERNEL);
2633 if (!state)
2634 return NULL;
2635 state->pos = *pos;
2636 return state;
2637 }
2638
iwl_dbgfs_tx_queue_seq_next(struct seq_file * seq,void * v,loff_t * pos)2639 static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,
2640 void *v, loff_t *pos)
2641 {
2642 struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2643 struct iwl_dbgfs_tx_queue_state *state = v;
2644
2645 *pos = ++state->pos;
2646
2647 if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2648 return NULL;
2649
2650 return state;
2651 }
2652
iwl_dbgfs_tx_queue_seq_stop(struct seq_file * seq,void * v)2653 static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v)
2654 {
2655 kfree(v);
2656 }
2657
iwl_dbgfs_tx_queue_seq_show(struct seq_file * seq,void * v)2658 static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
2659 {
2660 struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2661 struct iwl_dbgfs_tx_queue_state *state = v;
2662 struct iwl_trans *trans = priv->trans;
2663 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2664 struct iwl_txq *txq = trans_pcie->txqs.txq[state->pos];
2665
2666 seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
2667 (unsigned int)state->pos,
2668 !!test_bit(state->pos, trans_pcie->txqs.queue_used),
2669 !!test_bit(state->pos, trans_pcie->txqs.queue_stopped));
2670 if (txq)
2671 seq_printf(seq,
2672 "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
2673 txq->read_ptr, txq->write_ptr,
2674 txq->need_update, txq->frozen,
2675 txq->n_window, txq->ampdu);
2676 else
2677 seq_puts(seq, "(unallocated)");
2678
2679 if (state->pos == trans_pcie->txqs.cmd.q_id)
2680 seq_puts(seq, " (HCMD)");
2681 seq_puts(seq, "\n");
2682
2683 return 0;
2684 }
2685
2686 static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = {
2687 .start = iwl_dbgfs_tx_queue_seq_start,
2688 .next = iwl_dbgfs_tx_queue_seq_next,
2689 .stop = iwl_dbgfs_tx_queue_seq_stop,
2690 .show = iwl_dbgfs_tx_queue_seq_show,
2691 };
2692
iwl_dbgfs_tx_queue_open(struct inode * inode,struct file * filp)2693 static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp)
2694 {
2695 struct iwl_dbgfs_tx_queue_priv *priv;
2696
2697 priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops,
2698 sizeof(*priv));
2699
2700 if (!priv)
2701 return -ENOMEM;
2702
2703 priv->trans = inode->i_private;
2704 return 0;
2705 }
2706
iwl_dbgfs_rx_queue_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2707 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
2708 char __user *user_buf,
2709 size_t count, loff_t *ppos)
2710 {
2711 struct iwl_trans *trans = file->private_data;
2712 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2713 char *buf;
2714 int pos = 0, i, ret;
2715 size_t bufsz;
2716
2717 bufsz = sizeof(char) * 121 * trans->num_rx_queues;
2718
2719 if (!trans_pcie->rxq)
2720 return -EAGAIN;
2721
2722 buf = kzalloc(bufsz, GFP_KERNEL);
2723 if (!buf)
2724 return -ENOMEM;
2725
2726 for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
2727 struct iwl_rxq *rxq = &trans_pcie->rxq[i];
2728
2729 spin_lock_bh(&rxq->lock);
2730
2731 pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
2732 i);
2733 pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
2734 rxq->read);
2735 pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
2736 rxq->write);
2737 pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
2738 rxq->write_actual);
2739 pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
2740 rxq->need_update);
2741 pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
2742 rxq->free_count);
2743 if (rxq->rb_stts) {
2744 u32 r = iwl_get_closed_rb_stts(trans, rxq);
2745 pos += scnprintf(buf + pos, bufsz - pos,
2746 "\tclosed_rb_num: %u\n", r);
2747 } else {
2748 pos += scnprintf(buf + pos, bufsz - pos,
2749 "\tclosed_rb_num: Not Allocated\n");
2750 }
2751 spin_unlock_bh(&rxq->lock);
2752 }
2753 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2754 kfree(buf);
2755
2756 return ret;
2757 }
2758
iwl_dbgfs_interrupt_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2759 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
2760 char __user *user_buf,
2761 size_t count, loff_t *ppos)
2762 {
2763 struct iwl_trans *trans = file->private_data;
2764 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2765 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2766
2767 int pos = 0;
2768 char *buf;
2769 int bufsz = 24 * 64; /* 24 items * 64 char per item */
2770 ssize_t ret;
2771
2772 buf = kzalloc(bufsz, GFP_KERNEL);
2773 if (!buf)
2774 return -ENOMEM;
2775
2776 pos += scnprintf(buf + pos, bufsz - pos,
2777 "Interrupt Statistics Report:\n");
2778
2779 pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
2780 isr_stats->hw);
2781 pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
2782 isr_stats->sw);
2783 if (isr_stats->sw || isr_stats->hw) {
2784 pos += scnprintf(buf + pos, bufsz - pos,
2785 "\tLast Restarting Code: 0x%X\n",
2786 isr_stats->err_code);
2787 }
2788 #ifdef CONFIG_IWLWIFI_DEBUG
2789 pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
2790 isr_stats->sch);
2791 pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
2792 isr_stats->alive);
2793 #endif
2794 pos += scnprintf(buf + pos, bufsz - pos,
2795 "HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
2796
2797 pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
2798 isr_stats->ctkill);
2799
2800 pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
2801 isr_stats->wakeup);
2802
2803 pos += scnprintf(buf + pos, bufsz - pos,
2804 "Rx command responses:\t\t %u\n", isr_stats->rx);
2805
2806 pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
2807 isr_stats->tx);
2808
2809 pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
2810 isr_stats->unhandled);
2811
2812 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2813 kfree(buf);
2814 return ret;
2815 }
2816
iwl_dbgfs_interrupt_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)2817 static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
2818 const char __user *user_buf,
2819 size_t count, loff_t *ppos)
2820 {
2821 struct iwl_trans *trans = file->private_data;
2822 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2823 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2824 u32 reset_flag;
2825 int ret;
2826
2827 ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag);
2828 if (ret)
2829 return ret;
2830 if (reset_flag == 0)
2831 memset(isr_stats, 0, sizeof(*isr_stats));
2832
2833 return count;
2834 }
2835
iwl_dbgfs_csr_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)2836 static ssize_t iwl_dbgfs_csr_write(struct file *file,
2837 const char __user *user_buf,
2838 size_t count, loff_t *ppos)
2839 {
2840 struct iwl_trans *trans = file->private_data;
2841
2842 iwl_pcie_dump_csr(trans);
2843
2844 return count;
2845 }
2846
iwl_dbgfs_fh_reg_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2847 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2848 char __user *user_buf,
2849 size_t count, loff_t *ppos)
2850 {
2851 struct iwl_trans *trans = file->private_data;
2852 char *buf = NULL;
2853 ssize_t ret;
2854
2855 ret = iwl_dump_fh(trans, &buf);
2856 if (ret < 0)
2857 return ret;
2858 if (!buf)
2859 return -EINVAL;
2860 ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2861 kfree(buf);
2862 return ret;
2863 }
2864
iwl_dbgfs_rfkill_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2865 static ssize_t iwl_dbgfs_rfkill_read(struct file *file,
2866 char __user *user_buf,
2867 size_t count, loff_t *ppos)
2868 {
2869 struct iwl_trans *trans = file->private_data;
2870 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2871 char buf[100];
2872 int pos;
2873
2874 pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",
2875 trans_pcie->debug_rfkill,
2876 !(iwl_read32(trans, CSR_GP_CNTRL) &
2877 CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));
2878
2879 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2880 }
2881
iwl_dbgfs_rfkill_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)2882 static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
2883 const char __user *user_buf,
2884 size_t count, loff_t *ppos)
2885 {
2886 struct iwl_trans *trans = file->private_data;
2887 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2888 bool new_value;
2889 int ret;
2890
2891 ret = kstrtobool_from_user(user_buf, count, &new_value);
2892 if (ret)
2893 return ret;
2894 if (new_value == trans_pcie->debug_rfkill)
2895 return count;
2896 IWL_WARN(trans, "changing debug rfkill %d->%d\n",
2897 trans_pcie->debug_rfkill, new_value);
2898 trans_pcie->debug_rfkill = new_value;
2899 iwl_pcie_handle_rfkill_irq(trans, false);
2900
2901 return count;
2902 }
2903
iwl_dbgfs_monitor_data_open(struct inode * inode,struct file * file)2904 static int iwl_dbgfs_monitor_data_open(struct inode *inode,
2905 struct file *file)
2906 {
2907 struct iwl_trans *trans = inode->i_private;
2908 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2909
2910 if (!trans->dbg.dest_tlv ||
2911 trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) {
2912 IWL_ERR(trans, "Debug destination is not set to DRAM\n");
2913 return -ENOENT;
2914 }
2915
2916 if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)
2917 return -EBUSY;
2918
2919 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;
2920 return simple_open(inode, file);
2921 }
2922
iwl_dbgfs_monitor_data_release(struct inode * inode,struct file * file)2923 static int iwl_dbgfs_monitor_data_release(struct inode *inode,
2924 struct file *file)
2925 {
2926 struct iwl_trans_pcie *trans_pcie =
2927 IWL_TRANS_GET_PCIE_TRANS(inode->i_private);
2928
2929 if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
2930 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
2931 return 0;
2932 }
2933
iwl_write_to_user_buf(char __user * user_buf,ssize_t count,void * buf,ssize_t * size,ssize_t * bytes_copied)2934 static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
2935 void *buf, ssize_t *size,
2936 ssize_t *bytes_copied)
2937 {
2938 ssize_t buf_size_left = count - *bytes_copied;
2939
2940 buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
2941 if (*size > buf_size_left)
2942 *size = buf_size_left;
2943
2944 *size -= copy_to_user(user_buf, buf, *size);
2945 *bytes_copied += *size;
2946
2947 if (buf_size_left == *size)
2948 return true;
2949 return false;
2950 }
2951
iwl_dbgfs_monitor_data_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)2952 static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
2953 char __user *user_buf,
2954 size_t count, loff_t *ppos)
2955 {
2956 struct iwl_trans *trans = file->private_data;
2957 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2958 u8 *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
2959 struct cont_rec *data = &trans_pcie->fw_mon_data;
2960 u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
2961 ssize_t size, bytes_copied = 0;
2962 bool b_full;
2963
2964 if (trans->dbg.dest_tlv) {
2965 write_ptr_addr =
2966 le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
2967 wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
2968 } else {
2969 write_ptr_addr = MON_BUFF_WRPTR;
2970 wrap_cnt_addr = MON_BUFF_CYCLE_CNT;
2971 }
2972
2973 if (unlikely(!trans->dbg.rec_on))
2974 return 0;
2975
2976 mutex_lock(&data->mutex);
2977 if (data->state ==
2978 IWL_FW_MON_DBGFS_STATE_DISABLED) {
2979 mutex_unlock(&data->mutex);
2980 return 0;
2981 }
2982
2983 /* write_ptr position in bytes rather then DW */
2984 write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32);
2985 wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr);
2986
2987 if (data->prev_wrap_cnt == wrap_cnt) {
2988 size = write_ptr - data->prev_wr_ptr;
2989 curr_buf = cpu_addr + data->prev_wr_ptr;
2990 b_full = iwl_write_to_user_buf(user_buf, count,
2991 curr_buf, &size,
2992 &bytes_copied);
2993 data->prev_wr_ptr += size;
2994
2995 } else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2996 write_ptr < data->prev_wr_ptr) {
2997 size = trans->dbg.fw_mon.size - data->prev_wr_ptr;
2998 curr_buf = cpu_addr + data->prev_wr_ptr;
2999 b_full = iwl_write_to_user_buf(user_buf, count,
3000 curr_buf, &size,
3001 &bytes_copied);
3002 data->prev_wr_ptr += size;
3003
3004 if (!b_full) {
3005 size = write_ptr;
3006 b_full = iwl_write_to_user_buf(user_buf, count,
3007 cpu_addr, &size,
3008 &bytes_copied);
3009 data->prev_wr_ptr = size;
3010 data->prev_wrap_cnt++;
3011 }
3012 } else {
3013 if (data->prev_wrap_cnt == wrap_cnt - 1 &&
3014 write_ptr > data->prev_wr_ptr)
3015 IWL_WARN(trans,
3016 "write pointer passed previous write pointer, start copying from the beginning\n");
3017 else if (!unlikely(data->prev_wrap_cnt == 0 &&
3018 data->prev_wr_ptr == 0))
3019 IWL_WARN(trans,
3020 "monitor data is out of sync, start copying from the beginning\n");
3021
3022 size = write_ptr;
3023 b_full = iwl_write_to_user_buf(user_buf, count,
3024 cpu_addr, &size,
3025 &bytes_copied);
3026 data->prev_wr_ptr = size;
3027 data->prev_wrap_cnt = wrap_cnt;
3028 }
3029
3030 mutex_unlock(&data->mutex);
3031
3032 return bytes_copied;
3033 }
3034
iwl_dbgfs_rf_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)3035 static ssize_t iwl_dbgfs_rf_read(struct file *file,
3036 char __user *user_buf,
3037 size_t count, loff_t *ppos)
3038 {
3039 struct iwl_trans *trans = file->private_data;
3040 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3041
3042 if (!trans_pcie->rf_name[0])
3043 return -ENODEV;
3044
3045 return simple_read_from_buffer(user_buf, count, ppos,
3046 trans_pcie->rf_name,
3047 strlen(trans_pcie->rf_name));
3048 }
3049
3050 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
3051 DEBUGFS_READ_FILE_OPS(fh_reg);
3052 DEBUGFS_READ_FILE_OPS(rx_queue);
3053 DEBUGFS_WRITE_FILE_OPS(csr);
3054 DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
3055 DEBUGFS_READ_FILE_OPS(rf);
3056
3057 static const struct file_operations iwl_dbgfs_tx_queue_ops = {
3058 .owner = THIS_MODULE,
3059 .open = iwl_dbgfs_tx_queue_open,
3060 .read = seq_read,
3061 .llseek = seq_lseek,
3062 .release = seq_release_private,
3063 };
3064
3065 static const struct file_operations iwl_dbgfs_monitor_data_ops = {
3066 .read = iwl_dbgfs_monitor_data_read,
3067 .open = iwl_dbgfs_monitor_data_open,
3068 .release = iwl_dbgfs_monitor_data_release,
3069 };
3070
3071 /* Create the debugfs files and directories */
iwl_trans_pcie_dbgfs_register(struct iwl_trans * trans)3072 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
3073 {
3074 struct dentry *dir = trans->dbgfs_dir;
3075
3076 DEBUGFS_ADD_FILE(rx_queue, dir, 0400);
3077 DEBUGFS_ADD_FILE(tx_queue, dir, 0400);
3078 DEBUGFS_ADD_FILE(interrupt, dir, 0600);
3079 DEBUGFS_ADD_FILE(csr, dir, 0200);
3080 DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
3081 DEBUGFS_ADD_FILE(rfkill, dir, 0600);
3082 DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
3083 DEBUGFS_ADD_FILE(rf, dir, 0400);
3084 }
3085
iwl_trans_pcie_debugfs_cleanup(struct iwl_trans * trans)3086 void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
3087 {
3088 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3089 struct cont_rec *data = &trans_pcie->fw_mon_data;
3090
3091 mutex_lock(&data->mutex);
3092 data->state = IWL_FW_MON_DBGFS_STATE_DISABLED;
3093 mutex_unlock(&data->mutex);
3094 }
3095 #endif /*CONFIG_IWLWIFI_DEBUGFS */
3096
iwl_trans_pcie_get_cmdlen(struct iwl_trans * trans,void * tfd)3097 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
3098 {
3099 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3100 u32 cmdlen = 0;
3101 int i;
3102
3103 for (i = 0; i < trans_pcie->txqs.tfd.max_tbs; i++)
3104 cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);
3105
3106 return cmdlen;
3107 }
3108
iwl_trans_pcie_dump_rbs(struct iwl_trans * trans,struct iwl_fw_error_dump_data ** data,int allocated_rb_nums)3109 static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
3110 struct iwl_fw_error_dump_data **data,
3111 int allocated_rb_nums)
3112 {
3113 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3114 int max_len = trans_pcie->rx_buf_bytes;
3115 /* Dump RBs is supported only for pre-9000 devices (1 queue) */
3116 struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3117 u32 i, r, j, rb_len = 0;
3118
3119 spin_lock_bh(&rxq->lock);
3120
3121 r = iwl_get_closed_rb_stts(trans, rxq);
3122
3123 for (i = rxq->read, j = 0;
3124 i != r && j < allocated_rb_nums;
3125 i = (i + 1) & RX_QUEUE_MASK, j++) {
3126 struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
3127 struct iwl_fw_error_dump_rb *rb;
3128
3129 dma_sync_single_for_cpu(trans->dev, rxb->page_dma,
3130 max_len, DMA_FROM_DEVICE);
3131
3132 rb_len += sizeof(**data) + sizeof(*rb) + max_len;
3133
3134 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
3135 (*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
3136 rb = (void *)(*data)->data;
3137 rb->index = cpu_to_le32(i);
3138 memcpy(rb->data, page_address(rxb->page), max_len);
3139
3140 *data = iwl_fw_error_next_data(*data);
3141 }
3142
3143 spin_unlock_bh(&rxq->lock);
3144
3145 return rb_len;
3146 }
3147 #define IWL_CSR_TO_DUMP (0x250)
3148
iwl_trans_pcie_dump_csr(struct iwl_trans * trans,struct iwl_fw_error_dump_data ** data)3149 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
3150 struct iwl_fw_error_dump_data **data)
3151 {
3152 u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
3153 __le32 *val;
3154 int i;
3155
3156 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
3157 (*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
3158 val = (void *)(*data)->data;
3159
3160 for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
3161 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
3162
3163 *data = iwl_fw_error_next_data(*data);
3164
3165 return csr_len;
3166 }
3167
iwl_trans_pcie_fh_regs_dump(struct iwl_trans * trans,struct iwl_fw_error_dump_data ** data)3168 static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
3169 struct iwl_fw_error_dump_data **data)
3170 {
3171 u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
3172 __le32 *val;
3173 int i;
3174
3175 if (!iwl_trans_grab_nic_access(trans))
3176 return 0;
3177
3178 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
3179 (*data)->len = cpu_to_le32(fh_regs_len);
3180 val = (void *)(*data)->data;
3181
3182 if (!trans->trans_cfg->gen2)
3183 for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
3184 i += sizeof(u32))
3185 *val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
3186 else
3187 for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2);
3188 i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2);
3189 i += sizeof(u32))
3190 *val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
3191 i));
3192
3193 iwl_trans_release_nic_access(trans);
3194
3195 *data = iwl_fw_error_next_data(*data);
3196
3197 return sizeof(**data) + fh_regs_len;
3198 }
3199
3200 static u32
iwl_trans_pci_dump_marbh_monitor(struct iwl_trans * trans,struct iwl_fw_error_dump_fw_mon * fw_mon_data,u32 monitor_len)3201 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
3202 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
3203 u32 monitor_len)
3204 {
3205 u32 buf_size_in_dwords = (monitor_len >> 2);
3206 u32 *buffer = (u32 *)fw_mon_data->data;
3207 u32 i;
3208
3209 if (!iwl_trans_grab_nic_access(trans))
3210 return 0;
3211
3212 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
3213 for (i = 0; i < buf_size_in_dwords; i++)
3214 buffer[i] = iwl_read_umac_prph_no_grab(trans,
3215 MON_DMARB_RD_DATA_ADDR);
3216 iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
3217
3218 iwl_trans_release_nic_access(trans);
3219
3220 return monitor_len;
3221 }
3222
3223 static void
iwl_trans_pcie_dump_pointers(struct iwl_trans * trans,struct iwl_fw_error_dump_fw_mon * fw_mon_data)3224 iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
3225 struct iwl_fw_error_dump_fw_mon *fw_mon_data)
3226 {
3227 u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
3228
3229 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3230 base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
3231 base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
3232 write_ptr = DBGC_CUR_DBGBUF_STATUS;
3233 wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
3234 } else if (trans->dbg.dest_tlv) {
3235 write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
3236 wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
3237 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3238 } else {
3239 base = MON_BUFF_BASE_ADDR;
3240 write_ptr = MON_BUFF_WRPTR;
3241 wrap_cnt = MON_BUFF_CYCLE_CNT;
3242 }
3243
3244 write_ptr_val = iwl_read_prph(trans, write_ptr);
3245 fw_mon_data->fw_mon_cycle_cnt =
3246 cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
3247 fw_mon_data->fw_mon_base_ptr =
3248 cpu_to_le32(iwl_read_prph(trans, base));
3249 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3250 fw_mon_data->fw_mon_base_high_ptr =
3251 cpu_to_le32(iwl_read_prph(trans, base_high));
3252 write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
3253 /* convert wrtPtr to DWs, to align with all HWs */
3254 write_ptr_val >>= 2;
3255 }
3256 fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);
3257 }
3258
3259 static u32
iwl_trans_pcie_dump_monitor(struct iwl_trans * trans,struct iwl_fw_error_dump_data ** data,u32 monitor_len)3260 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
3261 struct iwl_fw_error_dump_data **data,
3262 u32 monitor_len)
3263 {
3264 struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
3265 u32 len = 0;
3266
3267 if (trans->dbg.dest_tlv ||
3268 (fw_mon->size &&
3269 (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
3270 trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
3271 struct iwl_fw_error_dump_fw_mon *fw_mon_data;
3272
3273 (*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
3274 fw_mon_data = (void *)(*data)->data;
3275
3276 iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
3277
3278 len += sizeof(**data) + sizeof(*fw_mon_data);
3279 if (fw_mon->size) {
3280 memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);
3281 monitor_len = fw_mon->size;
3282 } else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
3283 u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
3284 /*
3285 * Update pointers to reflect actual values after
3286 * shifting
3287 */
3288 if (trans->dbg.dest_tlv->version) {
3289 base = (iwl_read_prph(trans, base) &
3290 IWL_LDBG_M2S_BUF_BA_MSK) <<
3291 trans->dbg.dest_tlv->base_shift;
3292 base *= IWL_M2S_UNIT_SIZE;
3293 base += trans->cfg->smem_offset;
3294 } else {
3295 base = iwl_read_prph(trans, base) <<
3296 trans->dbg.dest_tlv->base_shift;
3297 }
3298
3299 iwl_trans_read_mem(trans, base, fw_mon_data->data,
3300 monitor_len / sizeof(u32));
3301 } else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {
3302 monitor_len =
3303 iwl_trans_pci_dump_marbh_monitor(trans,
3304 fw_mon_data,
3305 monitor_len);
3306 } else {
3307 /* Didn't match anything - output no monitor data */
3308 monitor_len = 0;
3309 }
3310
3311 len += monitor_len;
3312 (*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
3313 }
3314
3315 return len;
3316 }
3317
iwl_trans_get_fw_monitor_len(struct iwl_trans * trans,u32 * len)3318 static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
3319 {
3320 if (trans->dbg.fw_mon.size) {
3321 *len += sizeof(struct iwl_fw_error_dump_data) +
3322 sizeof(struct iwl_fw_error_dump_fw_mon) +
3323 trans->dbg.fw_mon.size;
3324 return trans->dbg.fw_mon.size;
3325 } else if (trans->dbg.dest_tlv) {
3326 u32 base, end, cfg_reg, monitor_len;
3327
3328 if (trans->dbg.dest_tlv->version == 1) {
3329 cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3330 cfg_reg = iwl_read_prph(trans, cfg_reg);
3331 base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
3332 trans->dbg.dest_tlv->base_shift;
3333 base *= IWL_M2S_UNIT_SIZE;
3334 base += trans->cfg->smem_offset;
3335
3336 monitor_len =
3337 (cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
3338 trans->dbg.dest_tlv->end_shift;
3339 monitor_len *= IWL_M2S_UNIT_SIZE;
3340 } else {
3341 base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3342 end = le32_to_cpu(trans->dbg.dest_tlv->end_reg);
3343
3344 base = iwl_read_prph(trans, base) <<
3345 trans->dbg.dest_tlv->base_shift;
3346 end = iwl_read_prph(trans, end) <<
3347 trans->dbg.dest_tlv->end_shift;
3348
3349 /* Make "end" point to the actual end */
3350 if (trans->trans_cfg->device_family >=
3351 IWL_DEVICE_FAMILY_8000 ||
3352 trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
3353 end += (1 << trans->dbg.dest_tlv->end_shift);
3354 monitor_len = end - base;
3355 }
3356 *len += sizeof(struct iwl_fw_error_dump_data) +
3357 sizeof(struct iwl_fw_error_dump_fw_mon) +
3358 monitor_len;
3359 return monitor_len;
3360 }
3361 return 0;
3362 }
3363
3364 struct iwl_trans_dump_data *
iwl_trans_pcie_dump_data(struct iwl_trans * trans,u32 dump_mask,const struct iwl_dump_sanitize_ops * sanitize_ops,void * sanitize_ctx)3365 iwl_trans_pcie_dump_data(struct iwl_trans *trans, u32 dump_mask,
3366 const struct iwl_dump_sanitize_ops *sanitize_ops,
3367 void *sanitize_ctx)
3368 {
3369 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3370 struct iwl_fw_error_dump_data *data;
3371 struct iwl_txq *cmdq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
3372 struct iwl_fw_error_dump_txcmd *txcmd;
3373 struct iwl_trans_dump_data *dump_data;
3374 u32 len, num_rbs = 0, monitor_len = 0;
3375 int i, ptr;
3376 bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
3377 !trans->trans_cfg->mq_rx_supported &&
3378 dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
3379
3380 if (!dump_mask)
3381 return NULL;
3382
3383 /* transport dump header */
3384 len = sizeof(*dump_data);
3385
3386 /* host commands */
3387 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq)
3388 len += sizeof(*data) +
3389 cmdq->n_window * (sizeof(*txcmd) +
3390 TFD_MAX_PAYLOAD_SIZE);
3391
3392 /* FW monitor */
3393 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3394 monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
3395
3396 /* CSR registers */
3397 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3398 len += sizeof(*data) + IWL_CSR_TO_DUMP;
3399
3400 /* FH registers */
3401 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
3402 if (trans->trans_cfg->gen2)
3403 len += sizeof(*data) +
3404 (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
3405 iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
3406 else
3407 len += sizeof(*data) +
3408 (FH_MEM_UPPER_BOUND -
3409 FH_MEM_LOWER_BOUND);
3410 }
3411
3412 if (dump_rbs) {
3413 /* Dump RBs is supported only for pre-9000 devices (1 queue) */
3414 struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3415 /* RBs */
3416 spin_lock_bh(&rxq->lock);
3417 num_rbs = iwl_get_closed_rb_stts(trans, rxq);
3418 num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
3419 spin_unlock_bh(&rxq->lock);
3420
3421 len += num_rbs * (sizeof(*data) +
3422 sizeof(struct iwl_fw_error_dump_rb) +
3423 (PAGE_SIZE << trans_pcie->rx_page_order));
3424 }
3425
3426 /* Paged memory for gen2 HW */
3427 if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
3428 for (i = 0; i < trans->init_dram.paging_cnt; i++)
3429 len += sizeof(*data) +
3430 sizeof(struct iwl_fw_error_dump_paging) +
3431 trans->init_dram.paging[i].size;
3432
3433 dump_data = vzalloc(len);
3434 if (!dump_data)
3435 return NULL;
3436
3437 len = 0;
3438 data = (void *)dump_data->data;
3439
3440 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
3441 u16 tfd_size = trans_pcie->txqs.tfd.size;
3442
3443 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
3444 txcmd = (void *)data->data;
3445 spin_lock_bh(&cmdq->lock);
3446 ptr = cmdq->write_ptr;
3447 for (i = 0; i < cmdq->n_window; i++) {
3448 u8 idx = iwl_txq_get_cmd_index(cmdq, ptr);
3449 u8 tfdidx;
3450 u32 caplen, cmdlen;
3451
3452 if (trans->trans_cfg->gen2)
3453 tfdidx = idx;
3454 else
3455 tfdidx = ptr;
3456
3457 cmdlen = iwl_trans_pcie_get_cmdlen(trans,
3458 (u8 *)cmdq->tfds +
3459 tfd_size * tfdidx);
3460 caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
3461
3462 if (cmdlen) {
3463 len += sizeof(*txcmd) + caplen;
3464 txcmd->cmdlen = cpu_to_le32(cmdlen);
3465 txcmd->caplen = cpu_to_le32(caplen);
3466 memcpy(txcmd->data, cmdq->entries[idx].cmd,
3467 caplen);
3468 if (sanitize_ops && sanitize_ops->frob_hcmd)
3469 sanitize_ops->frob_hcmd(sanitize_ctx,
3470 txcmd->data,
3471 caplen);
3472 txcmd = (void *)((u8 *)txcmd->data + caplen);
3473 }
3474
3475 ptr = iwl_txq_dec_wrap(trans, ptr);
3476 }
3477 spin_unlock_bh(&cmdq->lock);
3478
3479 data->len = cpu_to_le32(len);
3480 len += sizeof(*data);
3481 data = iwl_fw_error_next_data(data);
3482 }
3483
3484 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3485 len += iwl_trans_pcie_dump_csr(trans, &data);
3486 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
3487 len += iwl_trans_pcie_fh_regs_dump(trans, &data);
3488 if (dump_rbs)
3489 len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
3490
3491 /* Paged memory for gen2 HW */
3492 if (trans->trans_cfg->gen2 &&
3493 dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
3494 for (i = 0; i < trans->init_dram.paging_cnt; i++) {
3495 struct iwl_fw_error_dump_paging *paging;
3496 u32 page_len = trans->init_dram.paging[i].size;
3497
3498 data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
3499 data->len = cpu_to_le32(sizeof(*paging) + page_len);
3500 paging = (void *)data->data;
3501 paging->index = cpu_to_le32(i);
3502 memcpy(paging->data,
3503 trans->init_dram.paging[i].block, page_len);
3504 data = iwl_fw_error_next_data(data);
3505
3506 len += sizeof(*data) + sizeof(*paging) + page_len;
3507 }
3508 }
3509 if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3510 len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
3511
3512 dump_data->len = len;
3513
3514 return dump_data;
3515 }
3516
iwl_trans_pci_interrupts(struct iwl_trans * trans,bool enable)3517 void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
3518 {
3519 if (enable)
3520 iwl_enable_interrupts(trans);
3521 else
3522 iwl_disable_interrupts(trans);
3523 }
3524
iwl_trans_pcie_sync_nmi(struct iwl_trans * trans)3525 void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
3526 {
3527 u32 inta_addr, sw_err_bit;
3528 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3529
3530 if (trans_pcie->msix_enabled) {
3531 inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
3532 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
3533 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
3534 else
3535 sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
3536 } else {
3537 inta_addr = CSR_INT;
3538 sw_err_bit = CSR_INT_BIT_SW_ERR;
3539 }
3540
3541 iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
3542 }
3543
iwl_trans_pcie_alloc(struct pci_dev * pdev,const struct pci_device_id * ent,const struct iwl_cfg_trans_params * cfg_trans)3544 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3545 const struct pci_device_id *ent,
3546 const struct iwl_cfg_trans_params *cfg_trans)
3547 {
3548 struct iwl_trans_pcie *trans_pcie, **priv;
3549 struct iwl_trans *trans;
3550 int ret, addr_size;
3551 void __iomem * const *table;
3552 u32 bar0;
3553
3554 /* reassign our BAR 0 if invalid due to possible runtime PM races */
3555 pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &bar0);
3556 if (bar0 == PCI_BASE_ADDRESS_MEM_TYPE_64) {
3557 ret = pci_assign_resource(pdev, 0);
3558 if (ret)
3559 return ERR_PTR(ret);
3560 }
3561
3562 ret = pcim_enable_device(pdev);
3563 if (ret)
3564 return ERR_PTR(ret);
3565
3566 trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev,
3567 cfg_trans);
3568 if (!trans)
3569 return ERR_PTR(-ENOMEM);
3570
3571 trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3572
3573 if (trans->trans_cfg->gen2) {
3574 trans_pcie->txqs.tfd.addr_size = 64;
3575 trans_pcie->txqs.tfd.max_tbs = IWL_TFH_NUM_TBS;
3576 trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfh_tfd);
3577 } else {
3578 trans_pcie->txqs.tfd.addr_size = 36;
3579 trans_pcie->txqs.tfd.max_tbs = IWL_NUM_OF_TBS;
3580 trans_pcie->txqs.tfd.size = sizeof(struct iwl_tfd);
3581 }
3582 trans->max_skb_frags = IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie);
3583
3584 /* Set a short watchdog for the command queue */
3585 trans_pcie->txqs.cmd.wdg_timeout = IWL_DEF_WD_TIMEOUT;
3586
3587 trans_pcie->txqs.tso_hdr_page = alloc_percpu(struct iwl_tso_hdr_page);
3588 if (!trans_pcie->txqs.tso_hdr_page) {
3589 ret = -ENOMEM;
3590 goto out_free_trans;
3591 }
3592
3593 if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
3594 trans_pcie->txqs.bc_tbl_size =
3595 sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_BZ;
3596 else if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
3597 trans_pcie->txqs.bc_tbl_size =
3598 sizeof(struct iwl_gen3_bc_tbl_entry) * TFD_QUEUE_BC_SIZE_GEN3_AX210;
3599 else
3600 trans_pcie->txqs.bc_tbl_size = sizeof(struct iwlagn_scd_bc_tbl);
3601 /*
3602 * For gen2 devices, we use a single allocation for each byte-count
3603 * table, but they're pretty small (1k) so use a DMA pool that we
3604 * allocate here.
3605 */
3606 if (trans->trans_cfg->gen2) {
3607 trans_pcie->txqs.bc_pool =
3608 dmam_pool_create("iwlwifi:bc", trans->dev,
3609 trans_pcie->txqs.bc_tbl_size,
3610 256, 0);
3611 if (!trans_pcie->txqs.bc_pool) {
3612 ret = -ENOMEM;
3613 goto out_free_tso;
3614 }
3615 }
3616
3617 /* Some things must not change even if the config does */
3618 WARN_ON(trans_pcie->txqs.tfd.addr_size !=
3619 (trans->trans_cfg->gen2 ? 64 : 36));
3620
3621 /* Initialize NAPI here - it should be before registering to mac80211
3622 * in the opmode but after the HW struct is allocated.
3623 */
3624 trans_pcie->napi_dev = alloc_netdev_dummy(sizeof(struct iwl_trans_pcie *));
3625 if (!trans_pcie->napi_dev) {
3626 ret = -ENOMEM;
3627 goto out_free_tso;
3628 }
3629 /* The private struct in netdev is a pointer to struct iwl_trans_pcie */
3630 priv = netdev_priv(trans_pcie->napi_dev);
3631 *priv = trans_pcie;
3632
3633 trans_pcie->trans = trans;
3634 trans_pcie->opmode_down = true;
3635 spin_lock_init(&trans_pcie->irq_lock);
3636 spin_lock_init(&trans_pcie->reg_lock);
3637 spin_lock_init(&trans_pcie->alloc_page_lock);
3638 mutex_init(&trans_pcie->mutex);
3639 init_waitqueue_head(&trans_pcie->ucode_write_waitq);
3640 init_waitqueue_head(&trans_pcie->fw_reset_waitq);
3641 init_waitqueue_head(&trans_pcie->imr_waitq);
3642
3643 trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
3644 WQ_HIGHPRI | WQ_UNBOUND, 0);
3645 if (!trans_pcie->rba.alloc_wq) {
3646 ret = -ENOMEM;
3647 goto out_free_ndev;
3648 }
3649 INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
3650
3651 trans_pcie->debug_rfkill = -1;
3652
3653 if (!cfg_trans->base_params->pcie_l1_allowed) {
3654 /*
3655 * W/A - seems to solve weird behavior. We need to remove this
3656 * if we don't want to stay in L1 all the time. This wastes a
3657 * lot of power.
3658 */
3659 pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
3660 PCIE_LINK_STATE_L1 |
3661 PCIE_LINK_STATE_CLKPM);
3662 }
3663
3664 pci_set_master(pdev);
3665
3666 addr_size = trans_pcie->txqs.tfd.addr_size;
3667 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(addr_size));
3668 if (ret) {
3669 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
3670 /* both attempts failed: */
3671 if (ret) {
3672 dev_err(&pdev->dev, "No suitable DMA available\n");
3673 goto out_no_pci;
3674 }
3675 }
3676
3677 ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME);
3678 if (ret) {
3679 dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
3680 goto out_no_pci;
3681 }
3682
3683 table = pcim_iomap_table(pdev);
3684 if (!table) {
3685 dev_err(&pdev->dev, "pcim_iomap_table failed\n");
3686 ret = -ENOMEM;
3687 goto out_no_pci;
3688 }
3689
3690 trans_pcie->hw_base = table[0];
3691 if (!trans_pcie->hw_base) {
3692 dev_err(&pdev->dev, "couldn't find IO mem in first BAR\n");
3693 ret = -ENODEV;
3694 goto out_no_pci;
3695 }
3696
3697 /* We disable the RETRY_TIMEOUT register (0x41) to keep
3698 * PCI Tx retries from interfering with C3 CPU state */
3699 pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
3700
3701 trans_pcie->pci_dev = pdev;
3702 iwl_disable_interrupts(trans);
3703
3704 trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
3705 if (trans->hw_rev == 0xffffffff) {
3706 dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
3707 ret = -EIO;
3708 goto out_no_pci;
3709 }
3710
3711 /*
3712 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
3713 * changed, and now the revision step also includes bit 0-1 (no more
3714 * "dash" value). To keep hw_rev backwards compatible - we'll store it
3715 * in the old format.
3716 */
3717 if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000)
3718 trans->hw_rev_step = trans->hw_rev & 0xF;
3719 else
3720 trans->hw_rev_step = (trans->hw_rev & 0xC) >> 2;
3721
3722 IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
3723
3724 iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans);
3725 trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
3726 snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
3727 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
3728
3729 init_waitqueue_head(&trans_pcie->sx_waitq);
3730
3731 ret = iwl_pcie_alloc_invalid_tx_cmd(trans);
3732 if (ret)
3733 goto out_no_pci;
3734
3735 if (trans_pcie->msix_enabled) {
3736 ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
3737 if (ret)
3738 goto out_no_pci;
3739 } else {
3740 ret = iwl_pcie_alloc_ict(trans);
3741 if (ret)
3742 goto out_no_pci;
3743
3744 ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
3745 iwl_pcie_isr,
3746 iwl_pcie_irq_handler,
3747 IRQF_SHARED, DRV_NAME, trans);
3748 if (ret) {
3749 IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
3750 goto out_free_ict;
3751 }
3752 }
3753
3754 #ifdef CONFIG_IWLWIFI_DEBUGFS
3755 trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
3756 mutex_init(&trans_pcie->fw_mon_data.mutex);
3757 #endif
3758
3759 iwl_dbg_tlv_init(trans);
3760
3761 return trans;
3762
3763 out_free_ict:
3764 iwl_pcie_free_ict(trans);
3765 out_no_pci:
3766 destroy_workqueue(trans_pcie->rba.alloc_wq);
3767 out_free_ndev:
3768 free_netdev(trans_pcie->napi_dev);
3769 out_free_tso:
3770 free_percpu(trans_pcie->txqs.tso_hdr_page);
3771 out_free_trans:
3772 iwl_trans_free(trans);
3773 return ERR_PTR(ret);
3774 }
3775
iwl_trans_pcie_copy_imr_fh(struct iwl_trans * trans,u32 dst_addr,u64 src_addr,u32 byte_cnt)3776 void iwl_trans_pcie_copy_imr_fh(struct iwl_trans *trans,
3777 u32 dst_addr, u64 src_addr, u32 byte_cnt)
3778 {
3779 iwl_write_prph(trans, IMR_UREG_CHICK,
3780 iwl_read_prph(trans, IMR_UREG_CHICK) |
3781 IMR_UREG_CHICK_HALT_UMAC_PERMANENTLY_MSK);
3782 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_SRAM_ADDR, dst_addr);
3783 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_LSB,
3784 (u32)(src_addr & 0xFFFFFFFF));
3785 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_DRAM_ADDR_MSB,
3786 iwl_get_dma_hi_addr(src_addr));
3787 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_BC, byte_cnt);
3788 iwl_write_prph(trans, IMR_TFH_SRV_DMA_CHNL0_CTRL,
3789 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_IRQ_TARGET_POS |
3790 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_DMA_EN_POS |
3791 IMR_TFH_SRV_DMA_CHNL0_CTRL_D2S_RS_MSK);
3792 }
3793
iwl_trans_pcie_copy_imr(struct iwl_trans * trans,u32 dst_addr,u64 src_addr,u32 byte_cnt)3794 int iwl_trans_pcie_copy_imr(struct iwl_trans *trans,
3795 u32 dst_addr, u64 src_addr, u32 byte_cnt)
3796 {
3797 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3798 int ret = -1;
3799
3800 trans_pcie->imr_status = IMR_D2S_REQUESTED;
3801 iwl_trans_pcie_copy_imr_fh(trans, dst_addr, src_addr, byte_cnt);
3802 ret = wait_event_timeout(trans_pcie->imr_waitq,
3803 trans_pcie->imr_status !=
3804 IMR_D2S_REQUESTED, 5 * HZ);
3805 if (!ret || trans_pcie->imr_status == IMR_D2S_ERROR) {
3806 IWL_ERR(trans, "Failed to copy IMR Memory chunk!\n");
3807 iwl_trans_pcie_dump_regs(trans);
3808 return -ETIMEDOUT;
3809 }
3810 trans_pcie->imr_status = IMR_D2S_IDLE;
3811 return 0;
3812 }
3813