• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22  * USA
23  *
24  * The full GNU General Public License is included in this distribution
25  * in the file called COPYING.
26  *
27  * Contact Information:
28  *  Intel Linux Wireless <ilw@linux.intel.com>
29  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30  *
31  * BSD LICENSE
32  *
33  * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
34  * All rights reserved.
35  *
36  * Redistribution and use in source and binary forms, with or without
37  * modification, are permitted provided that the following conditions
38  * are met:
39  *
40  *  * Redistributions of source code must retain the above copyright
41  *    notice, this list of conditions and the following disclaimer.
42  *  * Redistributions in binary form must reproduce the above copyright
43  *    notice, this list of conditions and the following disclaimer in
44  *    the documentation and/or other materials provided with the
45  *    distribution.
46  *  * Neither the name Intel Corporation nor the names of its
47  *    contributors may be used to endorse or promote products derived
48  *    from this software without specific prior written permission.
49  *
50  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61  *
62  *****************************************************************************/
63 #include <linux/pci.h>
64 #include <linux/pci-aspm.h>
65 #include <linux/interrupt.h>
66 #include <linux/debugfs.h>
67 #include <linux/sched.h>
68 #include <linux/bitops.h>
69 #include <linux/gfp.h>
70 
71 #include "iwl-drv.h"
72 #include "iwl-trans.h"
73 #include "iwl-csr.h"
74 #include "iwl-prph.h"
75 #include "iwl-agn-hw.h"
76 #include "internal.h"
77 
__iwl_trans_pcie_set_bits_mask(struct iwl_trans * trans,u32 reg,u32 mask,u32 value)78 static void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
79 						  u32 reg, u32 mask, u32 value)
80 {
81 	u32 v;
82 
83 #ifdef CONFIG_IWLWIFI_DEBUG
84 	WARN_ON_ONCE(value & ~mask);
85 #endif
86 
87 	v = iwl_read32(trans, reg);
88 	v &= ~mask;
89 	v |= value;
90 	iwl_write32(trans, reg, v);
91 }
92 
__iwl_trans_pcie_clear_bit(struct iwl_trans * trans,u32 reg,u32 mask)93 static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
94 					      u32 reg, u32 mask)
95 {
96 	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
97 }
98 
__iwl_trans_pcie_set_bit(struct iwl_trans * trans,u32 reg,u32 mask)99 static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
100 					    u32 reg, u32 mask)
101 {
102 	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
103 }
104 
iwl_pcie_set_pwr(struct iwl_trans * trans,bool vaux)105 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
106 {
107 	if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
108 		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
109 				       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
110 				       ~APMG_PS_CTRL_MSK_PWR_SRC);
111 	else
112 		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
113 				       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
114 				       ~APMG_PS_CTRL_MSK_PWR_SRC);
115 }
116 
117 /* PCI registers */
118 #define PCI_CFG_RETRY_TIMEOUT	0x041
119 
iwl_pcie_apm_config(struct iwl_trans * trans)120 static void iwl_pcie_apm_config(struct iwl_trans *trans)
121 {
122 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
123 	u16 lctl;
124 
125 	/*
126 	 * HW bug W/A for instability in PCIe bus L0S->L1 transition.
127 	 * Check if BIOS (or OS) enabled L1-ASPM on this device.
128 	 * If so (likely), disable L0S, so device moves directly L0->L1;
129 	 *    costs negligible amount of power savings.
130 	 * If not (unlikely), enable L0S, so there is at least some
131 	 *    power savings, even without L1.
132 	 */
133 	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
134 	if (lctl & PCI_EXP_LNKCTL_ASPM_L1) {
135 		/* L1-ASPM enabled; disable(!) L0S */
136 		iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
137 		dev_info(trans->dev, "L1 Enabled; Disabling L0S\n");
138 	} else {
139 		/* L1-ASPM disabled; enable(!) L0S */
140 		iwl_clear_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
141 		dev_info(trans->dev, "L1 Disabled; Enabling L0S\n");
142 	}
143 	trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
144 }
145 
146 /*
147  * Start up NIC's basic functionality after it has been reset
148  * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
149  * NOTE:  This does not load uCode nor start the embedded processor
150  */
iwl_pcie_apm_init(struct iwl_trans * trans)151 static int iwl_pcie_apm_init(struct iwl_trans *trans)
152 {
153 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
154 	int ret = 0;
155 	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
156 
157 	/*
158 	 * Use "set_bit" below rather than "write", to preserve any hardware
159 	 * bits already set by default after reset.
160 	 */
161 
162 	/* Disable L0S exit timer (platform NMI Work/Around) */
163 	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
164 		    CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
165 
166 	/*
167 	 * Disable L0s without affecting L1;
168 	 *  don't wait for ICH L0s (ICH bug W/A)
169 	 */
170 	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
171 		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
172 
173 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
174 	iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
175 
176 	/*
177 	 * Enable HAP INTA (interrupt from management bus) to
178 	 * wake device's PCI Express link L1a -> L0s
179 	 */
180 	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
181 		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
182 
183 	iwl_pcie_apm_config(trans);
184 
185 	/* Configure analog phase-lock-loop before activating to D0A */
186 	if (trans->cfg->base_params->pll_cfg_val)
187 		iwl_set_bit(trans, CSR_ANA_PLL_CFG,
188 			    trans->cfg->base_params->pll_cfg_val);
189 
190 	/*
191 	 * Set "initialization complete" bit to move adapter from
192 	 * D0U* --> D0A* (powered-up active) state.
193 	 */
194 	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
195 
196 	/*
197 	 * Wait for clock stabilization; once stabilized, access to
198 	 * device-internal resources is supported, e.g. iwl_write_prph()
199 	 * and accesses to uCode SRAM.
200 	 */
201 	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
202 			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
203 			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
204 	if (ret < 0) {
205 		IWL_DEBUG_INFO(trans, "Failed to init the card\n");
206 		goto out;
207 	}
208 
209 	/*
210 	 * Enable DMA clock and wait for it to stabilize.
211 	 *
212 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
213 	 * do not disable clocks.  This preserves any hardware bits already
214 	 * set by default in "CLK_CTRL_REG" after reset.
215 	 */
216 	iwl_write_prph(trans, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
217 	udelay(20);
218 
219 	/* Disable L1-Active */
220 	iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
221 			  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
222 
223 	set_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
224 
225 out:
226 	return ret;
227 }
228 
iwl_pcie_apm_stop_master(struct iwl_trans * trans)229 static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
230 {
231 	int ret = 0;
232 
233 	/* stop device's busmaster DMA activity */
234 	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
235 
236 	ret = iwl_poll_bit(trans, CSR_RESET,
237 			   CSR_RESET_REG_FLAG_MASTER_DISABLED,
238 			   CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
239 	if (ret)
240 		IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
241 
242 	IWL_DEBUG_INFO(trans, "stop master\n");
243 
244 	return ret;
245 }
246 
iwl_pcie_apm_stop(struct iwl_trans * trans)247 static void iwl_pcie_apm_stop(struct iwl_trans *trans)
248 {
249 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
250 	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
251 
252 	clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
253 
254 	/* Stop device's DMA activity */
255 	iwl_pcie_apm_stop_master(trans);
256 
257 	/* Reset the entire device */
258 	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
259 
260 	udelay(10);
261 
262 	/*
263 	 * Clear "initialization complete" bit to move adapter from
264 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
265 	 */
266 	iwl_clear_bit(trans, CSR_GP_CNTRL,
267 		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
268 }
269 
iwl_pcie_nic_init(struct iwl_trans * trans)270 static int iwl_pcie_nic_init(struct iwl_trans *trans)
271 {
272 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
273 	unsigned long flags;
274 
275 	/* nic_init */
276 	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
277 	iwl_pcie_apm_init(trans);
278 
279 	/* Set interrupt coalescing calibration timer to default (512 usecs) */
280 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
281 
282 	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
283 
284 	iwl_pcie_set_pwr(trans, false);
285 
286 	iwl_op_mode_nic_config(trans->op_mode);
287 
288 	/* Allocate the RX queue, or reset if it is already allocated */
289 	iwl_pcie_rx_init(trans);
290 
291 	/* Allocate or reset and init all Tx and Command queues */
292 	if (iwl_pcie_tx_init(trans))
293 		return -ENOMEM;
294 
295 	if (trans->cfg->base_params->shadow_reg_enable) {
296 		/* enable shadow regs in HW */
297 		iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
298 		IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
299 	}
300 
301 	return 0;
302 }
303 
304 #define HW_READY_TIMEOUT (50)
305 
306 /* Note: returns poll_bit return value, which is >= 0 if success */
iwl_pcie_set_hw_ready(struct iwl_trans * trans)307 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
308 {
309 	int ret;
310 
311 	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
312 		    CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
313 
314 	/* See if we got it */
315 	ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
316 			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
317 			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
318 			   HW_READY_TIMEOUT);
319 
320 	IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
321 	return ret;
322 }
323 
324 /* Note: returns standard 0/-ERROR code */
iwl_pcie_prepare_card_hw(struct iwl_trans * trans)325 static int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
326 {
327 	int ret;
328 	int t = 0;
329 
330 	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
331 
332 	ret = iwl_pcie_set_hw_ready(trans);
333 	/* If the card is ready, exit 0 */
334 	if (ret >= 0)
335 		return 0;
336 
337 	/* If HW is not ready, prepare the conditions to check again */
338 	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
339 		    CSR_HW_IF_CONFIG_REG_PREPARE);
340 
341 	do {
342 		ret = iwl_pcie_set_hw_ready(trans);
343 		if (ret >= 0)
344 			return 0;
345 
346 		usleep_range(200, 1000);
347 		t += 200;
348 	} while (t < 150000);
349 
350 	return ret;
351 }
352 
353 /*
354  * ucode
355  */
iwl_pcie_load_firmware_chunk(struct iwl_trans * trans,u32 dst_addr,dma_addr_t phy_addr,u32 byte_cnt)356 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans, u32 dst_addr,
357 				   dma_addr_t phy_addr, u32 byte_cnt)
358 {
359 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
360 	int ret;
361 
362 	trans_pcie->ucode_write_complete = false;
363 
364 	iwl_write_direct32(trans,
365 			   FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
366 			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
367 
368 	iwl_write_direct32(trans,
369 			   FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
370 			   dst_addr);
371 
372 	iwl_write_direct32(trans,
373 			   FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
374 			   phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
375 
376 	iwl_write_direct32(trans,
377 			   FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
378 			   (iwl_get_dma_hi_addr(phy_addr)
379 				<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
380 
381 	iwl_write_direct32(trans,
382 			   FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
383 			   1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM |
384 			   1 << FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX |
385 			   FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
386 
387 	iwl_write_direct32(trans,
388 			   FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
389 			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE	|
390 			   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE	|
391 			   FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
392 
393 	ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
394 				 trans_pcie->ucode_write_complete, 5 * HZ);
395 	if (!ret) {
396 		IWL_ERR(trans, "Failed to load firmware chunk!\n");
397 		return -ETIMEDOUT;
398 	}
399 
400 	return 0;
401 }
402 
iwl_pcie_load_section(struct iwl_trans * trans,u8 section_num,const struct fw_desc * section)403 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
404 			    const struct fw_desc *section)
405 {
406 	u8 *v_addr;
407 	dma_addr_t p_addr;
408 	u32 offset;
409 	int ret = 0;
410 
411 	IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
412 		     section_num);
413 
414 	v_addr = dma_alloc_coherent(trans->dev, PAGE_SIZE, &p_addr, GFP_KERNEL);
415 	if (!v_addr)
416 		return -ENOMEM;
417 
418 	for (offset = 0; offset < section->len; offset += PAGE_SIZE) {
419 		u32 copy_size;
420 
421 		copy_size = min_t(u32, PAGE_SIZE, section->len - offset);
422 
423 		memcpy(v_addr, (u8 *)section->data + offset, copy_size);
424 		ret = iwl_pcie_load_firmware_chunk(trans,
425 						   section->offset + offset,
426 						   p_addr, copy_size);
427 		if (ret) {
428 			IWL_ERR(trans,
429 				"Could not load the [%d] uCode section\n",
430 				section_num);
431 			break;
432 		}
433 	}
434 
435 	dma_free_coherent(trans->dev, PAGE_SIZE, v_addr, p_addr);
436 	return ret;
437 }
438 
iwl_pcie_load_given_ucode(struct iwl_trans * trans,const struct fw_img * image)439 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
440 				const struct fw_img *image)
441 {
442 	int i, ret = 0;
443 
444 	for (i = 0; i < IWL_UCODE_SECTION_MAX; i++) {
445 		if (!image->sec[i].data)
446 			break;
447 
448 		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
449 		if (ret)
450 			return ret;
451 	}
452 
453 	/* Remove all resets to allow NIC to operate */
454 	iwl_write32(trans, CSR_RESET, 0);
455 
456 	return 0;
457 }
458 
iwl_trans_pcie_start_fw(struct iwl_trans * trans,const struct fw_img * fw,bool run_in_rfkill)459 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
460 				   const struct fw_img *fw, bool run_in_rfkill)
461 {
462 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
463 	int ret;
464 	bool hw_rfkill;
465 
466 	/* This may fail if AMT took ownership of the device */
467 	if (iwl_pcie_prepare_card_hw(trans)) {
468 		IWL_WARN(trans, "Exit HW not ready\n");
469 		return -EIO;
470 	}
471 
472 	clear_bit(STATUS_FW_ERROR, &trans_pcie->status);
473 
474 	iwl_enable_rfkill_int(trans);
475 
476 	/* If platform's RF_KILL switch is NOT set to KILL */
477 	hw_rfkill = iwl_is_rfkill_set(trans);
478 	if (hw_rfkill)
479 		set_bit(STATUS_RFKILL, &trans_pcie->status);
480 	else
481 		clear_bit(STATUS_RFKILL, &trans_pcie->status);
482 	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
483 	if (hw_rfkill && !run_in_rfkill)
484 		return -ERFKILL;
485 
486 	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
487 
488 	ret = iwl_pcie_nic_init(trans);
489 	if (ret) {
490 		IWL_ERR(trans, "Unable to init nic\n");
491 		return ret;
492 	}
493 
494 	/* make sure rfkill handshake bits are cleared */
495 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
496 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
497 		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
498 
499 	/* clear (again), then enable host interrupts */
500 	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
501 	iwl_enable_interrupts(trans);
502 
503 	/* really make sure rfkill handshake bits are cleared */
504 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
505 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
506 
507 	/* Load the given image to the HW */
508 	return iwl_pcie_load_given_ucode(trans, fw);
509 }
510 
iwl_trans_pcie_fw_alive(struct iwl_trans * trans,u32 scd_addr)511 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
512 {
513 	iwl_pcie_reset_ict(trans);
514 	iwl_pcie_tx_start(trans, scd_addr);
515 }
516 
iwl_trans_pcie_stop_device(struct iwl_trans * trans)517 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
518 {
519 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
520 	unsigned long flags;
521 
522 	/* tell the device to stop sending interrupts */
523 	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
524 	iwl_disable_interrupts(trans);
525 	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
526 
527 	/* device going down, Stop using ICT table */
528 	iwl_pcie_disable_ict(trans);
529 
530 	/*
531 	 * If a HW restart happens during firmware loading,
532 	 * then the firmware loading might call this function
533 	 * and later it might be called again due to the
534 	 * restart. So don't process again if the device is
535 	 * already dead.
536 	 */
537 	if (test_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status)) {
538 		iwl_pcie_tx_stop(trans);
539 		iwl_pcie_rx_stop(trans);
540 
541 		/* Power-down device's busmaster DMA clocks */
542 		iwl_write_prph(trans, APMG_CLK_DIS_REG,
543 			       APMG_CLK_VAL_DMA_CLK_RQT);
544 		udelay(5);
545 	}
546 
547 	/* Make sure (redundant) we've released our request to stay awake */
548 	iwl_clear_bit(trans, CSR_GP_CNTRL,
549 		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
550 
551 	/* Stop the device, and put it in low power state */
552 	iwl_pcie_apm_stop(trans);
553 
554 	/* Upon stop, the APM issues an interrupt if HW RF kill is set.
555 	 * Clean again the interrupt here
556 	 */
557 	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
558 	iwl_disable_interrupts(trans);
559 	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
560 
561 	iwl_enable_rfkill_int(trans);
562 
563 	/* stop and reset the on-board processor */
564 	iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
565 
566 	/* clear all status bits */
567 	clear_bit(STATUS_HCMD_ACTIVE, &trans_pcie->status);
568 	clear_bit(STATUS_INT_ENABLED, &trans_pcie->status);
569 	clear_bit(STATUS_DEVICE_ENABLED, &trans_pcie->status);
570 	clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
571 	clear_bit(STATUS_RFKILL, &trans_pcie->status);
572 }
573 
iwl_trans_pcie_d3_suspend(struct iwl_trans * trans)574 static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans)
575 {
576 	/* let the ucode operate on its own */
577 	iwl_write32(trans, CSR_UCODE_DRV_GP1_SET,
578 		    CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
579 
580 	iwl_disable_interrupts(trans);
581 	iwl_pcie_disable_ict(trans);
582 
583 	iwl_clear_bit(trans, CSR_GP_CNTRL,
584 		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
585 	iwl_clear_bit(trans, CSR_GP_CNTRL,
586 		      CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
587 
588 	/*
589 	 * reset TX queues -- some of their registers reset during S3
590 	 * so if we don't reset everything here the D3 image would try
591 	 * to execute some invalid memory upon resume
592 	 */
593 	iwl_trans_pcie_tx_reset(trans);
594 
595 	iwl_pcie_set_pwr(trans, true);
596 }
597 
iwl_trans_pcie_d3_resume(struct iwl_trans * trans,enum iwl_d3_status * status)598 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
599 				    enum iwl_d3_status *status)
600 {
601 	u32 val;
602 	int ret;
603 
604 	iwl_pcie_set_pwr(trans, false);
605 
606 	val = iwl_read32(trans, CSR_RESET);
607 	if (val & CSR_RESET_REG_FLAG_NEVO_RESET) {
608 		*status = IWL_D3_STATUS_RESET;
609 		return 0;
610 	}
611 
612 	/*
613 	 * Also enables interrupts - none will happen as the device doesn't
614 	 * know we're waking it up, only when the opmode actually tells it
615 	 * after this call.
616 	 */
617 	iwl_pcie_reset_ict(trans);
618 
619 	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
620 	iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
621 
622 	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
623 			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
624 			   CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
625 			   25000);
626 	if (ret) {
627 		IWL_ERR(trans, "Failed to resume the device (mac ready)\n");
628 		return ret;
629 	}
630 
631 	iwl_trans_pcie_tx_reset(trans);
632 
633 	ret = iwl_pcie_rx_init(trans);
634 	if (ret) {
635 		IWL_ERR(trans, "Failed to resume the device (RX reset)\n");
636 		return ret;
637 	}
638 
639 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
640 		    CSR_UCODE_DRV_GP1_BIT_D3_CFG_COMPLETE);
641 
642 	*status = IWL_D3_STATUS_ALIVE;
643 	return 0;
644 }
645 
iwl_trans_pcie_start_hw(struct iwl_trans * trans)646 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
647 {
648 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
649 	bool hw_rfkill;
650 	int err;
651 
652 	err = iwl_pcie_prepare_card_hw(trans);
653 	if (err) {
654 		IWL_ERR(trans, "Error while preparing HW: %d\n", err);
655 		return err;
656 	}
657 
658 	iwl_pcie_apm_init(trans);
659 
660 	/* From now on, the op_mode will be kept updated about RF kill state */
661 	iwl_enable_rfkill_int(trans);
662 
663 	hw_rfkill = iwl_is_rfkill_set(trans);
664 	if (hw_rfkill)
665 		set_bit(STATUS_RFKILL, &trans_pcie->status);
666 	else
667 		clear_bit(STATUS_RFKILL, &trans_pcie->status);
668 	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
669 
670 	return 0;
671 }
672 
iwl_trans_pcie_stop_hw(struct iwl_trans * trans,bool op_mode_leaving)673 static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
674 				   bool op_mode_leaving)
675 {
676 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
677 	bool hw_rfkill;
678 	unsigned long flags;
679 
680 	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
681 	iwl_disable_interrupts(trans);
682 	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
683 
684 	iwl_pcie_apm_stop(trans);
685 
686 	spin_lock_irqsave(&trans_pcie->irq_lock, flags);
687 	iwl_disable_interrupts(trans);
688 	spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
689 
690 	iwl_pcie_disable_ict(trans);
691 
692 	if (!op_mode_leaving) {
693 		/*
694 		 * Even if we stop the HW, we still want the RF kill
695 		 * interrupt
696 		 */
697 		iwl_enable_rfkill_int(trans);
698 
699 		/*
700 		 * Check again since the RF kill state may have changed while
701 		 * all the interrupts were disabled, in this case we couldn't
702 		 * receive the RF kill interrupt and update the state in the
703 		 * op_mode.
704 		 */
705 		hw_rfkill = iwl_is_rfkill_set(trans);
706 		if (hw_rfkill)
707 			set_bit(STATUS_RFKILL, &trans_pcie->status);
708 		else
709 			clear_bit(STATUS_RFKILL, &trans_pcie->status);
710 		iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
711 	}
712 }
713 
iwl_trans_pcie_write8(struct iwl_trans * trans,u32 ofs,u8 val)714 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
715 {
716 	writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
717 }
718 
iwl_trans_pcie_write32(struct iwl_trans * trans,u32 ofs,u32 val)719 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
720 {
721 	writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
722 }
723 
iwl_trans_pcie_read32(struct iwl_trans * trans,u32 ofs)724 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
725 {
726 	return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
727 }
728 
iwl_trans_pcie_read_prph(struct iwl_trans * trans,u32 reg)729 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
730 {
731 	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
732 			       ((reg & 0x000FFFFF) | (3 << 24)));
733 	return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
734 }
735 
iwl_trans_pcie_write_prph(struct iwl_trans * trans,u32 addr,u32 val)736 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
737 				      u32 val)
738 {
739 	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
740 			       ((addr & 0x000FFFFF) | (3 << 24)));
741 	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
742 }
743 
iwl_trans_pcie_configure(struct iwl_trans * trans,const struct iwl_trans_config * trans_cfg)744 static void iwl_trans_pcie_configure(struct iwl_trans *trans,
745 				     const struct iwl_trans_config *trans_cfg)
746 {
747 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
748 
749 	trans_pcie->cmd_queue = trans_cfg->cmd_queue;
750 	trans_pcie->cmd_fifo = trans_cfg->cmd_fifo;
751 	if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
752 		trans_pcie->n_no_reclaim_cmds = 0;
753 	else
754 		trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
755 	if (trans_pcie->n_no_reclaim_cmds)
756 		memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
757 		       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
758 
759 	trans_pcie->rx_buf_size_8k = trans_cfg->rx_buf_size_8k;
760 	if (trans_pcie->rx_buf_size_8k)
761 		trans_pcie->rx_page_order = get_order(8 * 1024);
762 	else
763 		trans_pcie->rx_page_order = get_order(4 * 1024);
764 
765 	trans_pcie->wd_timeout =
766 		msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
767 
768 	trans_pcie->command_names = trans_cfg->command_names;
769 	trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
770 }
771 
iwl_trans_pcie_free(struct iwl_trans * trans)772 void iwl_trans_pcie_free(struct iwl_trans *trans)
773 {
774 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
775 
776 	synchronize_irq(trans_pcie->pci_dev->irq);
777 
778 	iwl_pcie_tx_free(trans);
779 	iwl_pcie_rx_free(trans);
780 
781 	free_irq(trans_pcie->pci_dev->irq, trans);
782 	iwl_pcie_free_ict(trans);
783 
784 	pci_disable_msi(trans_pcie->pci_dev);
785 	iounmap(trans_pcie->hw_base);
786 	pci_release_regions(trans_pcie->pci_dev);
787 	pci_disable_device(trans_pcie->pci_dev);
788 	kmem_cache_destroy(trans->dev_cmd_pool);
789 
790 	kfree(trans);
791 }
792 
iwl_trans_pcie_set_pmi(struct iwl_trans * trans,bool state)793 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
794 {
795 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
796 
797 	if (state)
798 		set_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
799 	else
800 		clear_bit(STATUS_TPOWER_PMI, &trans_pcie->status);
801 }
802 
803 #ifdef CONFIG_PM_SLEEP
iwl_trans_pcie_suspend(struct iwl_trans * trans)804 static int iwl_trans_pcie_suspend(struct iwl_trans *trans)
805 {
806 	return 0;
807 }
808 
iwl_trans_pcie_resume(struct iwl_trans * trans)809 static int iwl_trans_pcie_resume(struct iwl_trans *trans)
810 {
811 	bool hw_rfkill;
812 
813 	iwl_enable_rfkill_int(trans);
814 
815 	hw_rfkill = iwl_is_rfkill_set(trans);
816 	iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
817 
818 	return 0;
819 }
820 #endif /* CONFIG_PM_SLEEP */
821 
iwl_trans_pcie_grab_nic_access(struct iwl_trans * trans,bool silent,unsigned long * flags)822 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
823 						unsigned long *flags)
824 {
825 	int ret;
826 	struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans);
827 	spin_lock_irqsave(&pcie_trans->reg_lock, *flags);
828 
829 	/* this bit wakes up the NIC */
830 	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
831 				 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
832 
833 	/*
834 	 * These bits say the device is running, and should keep running for
835 	 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
836 	 * but they do not indicate that embedded SRAM is restored yet;
837 	 * 3945 and 4965 have volatile SRAM, and must save/restore contents
838 	 * to/from host DRAM when sleeping/waking for power-saving.
839 	 * Each direction takes approximately 1/4 millisecond; with this
840 	 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
841 	 * series of register accesses are expected (e.g. reading Event Log),
842 	 * to keep device from sleeping.
843 	 *
844 	 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
845 	 * SRAM is okay/restored.  We don't check that here because this call
846 	 * is just for hardware register access; but GP1 MAC_SLEEP check is a
847 	 * good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
848 	 *
849 	 * 5000 series and later (including 1000 series) have non-volatile SRAM,
850 	 * and do not save/restore SRAM when power cycling.
851 	 */
852 	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
853 			   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
854 			   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
855 			    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
856 	if (unlikely(ret < 0)) {
857 		iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
858 		if (!silent) {
859 			u32 val = iwl_read32(trans, CSR_GP_CNTRL);
860 			WARN_ONCE(1,
861 				  "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
862 				  val);
863 			spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags);
864 			return false;
865 		}
866 	}
867 
868 	/*
869 	 * Fool sparse by faking we release the lock - sparse will
870 	 * track nic_access anyway.
871 	 */
872 	__release(&pcie_trans->reg_lock);
873 	return true;
874 }
875 
iwl_trans_pcie_release_nic_access(struct iwl_trans * trans,unsigned long * flags)876 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
877 					      unsigned long *flags)
878 {
879 	struct iwl_trans_pcie *pcie_trans = IWL_TRANS_GET_PCIE_TRANS(trans);
880 
881 	lockdep_assert_held(&pcie_trans->reg_lock);
882 
883 	/*
884 	 * Fool sparse by faking we acquiring the lock - sparse will
885 	 * track nic_access anyway.
886 	 */
887 	__acquire(&pcie_trans->reg_lock);
888 
889 	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
890 				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
891 	/*
892 	 * Above we read the CSR_GP_CNTRL register, which will flush
893 	 * any previous writes, but we need the write that clears the
894 	 * MAC_ACCESS_REQ bit to be performed before any other writes
895 	 * scheduled on different CPUs (after we drop reg_lock).
896 	 */
897 	mmiowb();
898 	spin_unlock_irqrestore(&pcie_trans->reg_lock, *flags);
899 }
900 
iwl_trans_pcie_read_mem(struct iwl_trans * trans,u32 addr,void * buf,int dwords)901 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
902 				   void *buf, int dwords)
903 {
904 	unsigned long flags;
905 	int offs, ret = 0;
906 	u32 *vals = buf;
907 
908 	if (iwl_trans_grab_nic_access(trans, false, &flags)) {
909 		iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
910 		for (offs = 0; offs < dwords; offs++)
911 			vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
912 		iwl_trans_release_nic_access(trans, &flags);
913 	} else {
914 		ret = -EBUSY;
915 	}
916 	return ret;
917 }
918 
iwl_trans_pcie_write_mem(struct iwl_trans * trans,u32 addr,void * buf,int dwords)919 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
920 				    void *buf, int dwords)
921 {
922 	unsigned long flags;
923 	int offs, ret = 0;
924 	u32 *vals = buf;
925 
926 	if (iwl_trans_grab_nic_access(trans, false, &flags)) {
927 		iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
928 		for (offs = 0; offs < dwords; offs++)
929 			iwl_write32(trans, HBUS_TARG_MEM_WDAT,
930 				    vals ? vals[offs] : 0);
931 		iwl_trans_release_nic_access(trans, &flags);
932 	} else {
933 		ret = -EBUSY;
934 	}
935 	return ret;
936 }
937 
938 #define IWL_FLUSH_WAIT_MS	2000
939 
iwl_trans_pcie_wait_txq_empty(struct iwl_trans * trans)940 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
941 {
942 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
943 	struct iwl_txq *txq;
944 	struct iwl_queue *q;
945 	int cnt;
946 	unsigned long now = jiffies;
947 	u32 scd_sram_addr;
948 	u8 buf[16];
949 	int ret = 0;
950 
951 	/* waiting for all the tx frames complete might take a while */
952 	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
953 		if (cnt == trans_pcie->cmd_queue)
954 			continue;
955 		txq = &trans_pcie->txq[cnt];
956 		q = &txq->q;
957 		while (q->read_ptr != q->write_ptr && !time_after(jiffies,
958 		       now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS)))
959 			msleep(1);
960 
961 		if (q->read_ptr != q->write_ptr) {
962 			IWL_ERR(trans,
963 				"fail to flush all tx fifo queues Q %d\n", cnt);
964 			ret = -ETIMEDOUT;
965 			break;
966 		}
967 	}
968 
969 	if (!ret)
970 		return 0;
971 
972 	IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
973 		txq->q.read_ptr, txq->q.write_ptr);
974 
975 	scd_sram_addr = trans_pcie->scd_base_addr +
976 			SCD_TX_STTS_QUEUE_OFFSET(txq->q.id);
977 	iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
978 
979 	iwl_print_hex_error(trans, buf, sizeof(buf));
980 
981 	for (cnt = 0; cnt < FH_TCSR_CHNL_NUM; cnt++)
982 		IWL_ERR(trans, "FH TRBs(%d) = 0x%08x\n", cnt,
983 			iwl_read_direct32(trans, FH_TX_TRB_REG(cnt)));
984 
985 	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
986 		u32 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(cnt));
987 		u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
988 		bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
989 		u32 tbl_dw =
990 			iwl_trans_read_mem32(trans, trans_pcie->scd_base_addr +
991 					     SCD_TRANS_TBL_OFFSET_QUEUE(cnt));
992 
993 		if (cnt & 0x1)
994 			tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
995 		else
996 			tbl_dw = tbl_dw & 0x0000FFFF;
997 
998 		IWL_ERR(trans,
999 			"Q %d is %sactive and mapped to fifo %d ra_tid 0x%04x [%d,%d]\n",
1000 			cnt, active ? "" : "in", fifo, tbl_dw,
1001 			iwl_read_prph(trans,
1002 				      SCD_QUEUE_RDPTR(cnt)) & (txq->q.n_bd - 1),
1003 			iwl_read_prph(trans, SCD_QUEUE_WRPTR(cnt)));
1004 	}
1005 
1006 	return ret;
1007 }
1008 
iwl_trans_pcie_set_bits_mask(struct iwl_trans * trans,u32 reg,u32 mask,u32 value)1009 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
1010 					 u32 mask, u32 value)
1011 {
1012 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1013 	unsigned long flags;
1014 
1015 	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1016 	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
1017 	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1018 }
1019 
get_fh_string(int cmd)1020 static const char *get_fh_string(int cmd)
1021 {
1022 #define IWL_CMD(x) case x: return #x
1023 	switch (cmd) {
1024 	IWL_CMD(FH_RSCSR_CHNL0_STTS_WPTR_REG);
1025 	IWL_CMD(FH_RSCSR_CHNL0_RBDCB_BASE_REG);
1026 	IWL_CMD(FH_RSCSR_CHNL0_WPTR);
1027 	IWL_CMD(FH_MEM_RCSR_CHNL0_CONFIG_REG);
1028 	IWL_CMD(FH_MEM_RSSR_SHARED_CTRL_REG);
1029 	IWL_CMD(FH_MEM_RSSR_RX_STATUS_REG);
1030 	IWL_CMD(FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV);
1031 	IWL_CMD(FH_TSSR_TX_STATUS_REG);
1032 	IWL_CMD(FH_TSSR_TX_ERROR_REG);
1033 	default:
1034 		return "UNKNOWN";
1035 	}
1036 #undef IWL_CMD
1037 }
1038 
iwl_pcie_dump_fh(struct iwl_trans * trans,char ** buf)1039 int iwl_pcie_dump_fh(struct iwl_trans *trans, char **buf)
1040 {
1041 	int i;
1042 	static const u32 fh_tbl[] = {
1043 		FH_RSCSR_CHNL0_STTS_WPTR_REG,
1044 		FH_RSCSR_CHNL0_RBDCB_BASE_REG,
1045 		FH_RSCSR_CHNL0_WPTR,
1046 		FH_MEM_RCSR_CHNL0_CONFIG_REG,
1047 		FH_MEM_RSSR_SHARED_CTRL_REG,
1048 		FH_MEM_RSSR_RX_STATUS_REG,
1049 		FH_MEM_RSSR_RX_ENABLE_ERR_IRQ2DRV,
1050 		FH_TSSR_TX_STATUS_REG,
1051 		FH_TSSR_TX_ERROR_REG
1052 	};
1053 
1054 #ifdef CONFIG_IWLWIFI_DEBUGFS
1055 	if (buf) {
1056 		int pos = 0;
1057 		size_t bufsz = ARRAY_SIZE(fh_tbl) * 48 + 40;
1058 
1059 		*buf = kmalloc(bufsz, GFP_KERNEL);
1060 		if (!*buf)
1061 			return -ENOMEM;
1062 
1063 		pos += scnprintf(*buf + pos, bufsz - pos,
1064 				"FH register values:\n");
1065 
1066 		for (i = 0; i < ARRAY_SIZE(fh_tbl); i++)
1067 			pos += scnprintf(*buf + pos, bufsz - pos,
1068 				"  %34s: 0X%08x\n",
1069 				get_fh_string(fh_tbl[i]),
1070 				iwl_read_direct32(trans, fh_tbl[i]));
1071 
1072 		return pos;
1073 	}
1074 #endif
1075 
1076 	IWL_ERR(trans, "FH register values:\n");
1077 	for (i = 0; i <  ARRAY_SIZE(fh_tbl); i++)
1078 		IWL_ERR(trans, "  %34s: 0X%08x\n",
1079 			get_fh_string(fh_tbl[i]),
1080 			iwl_read_direct32(trans, fh_tbl[i]));
1081 
1082 	return 0;
1083 }
1084 
get_csr_string(int cmd)1085 static const char *get_csr_string(int cmd)
1086 {
1087 #define IWL_CMD(x) case x: return #x
1088 	switch (cmd) {
1089 	IWL_CMD(CSR_HW_IF_CONFIG_REG);
1090 	IWL_CMD(CSR_INT_COALESCING);
1091 	IWL_CMD(CSR_INT);
1092 	IWL_CMD(CSR_INT_MASK);
1093 	IWL_CMD(CSR_FH_INT_STATUS);
1094 	IWL_CMD(CSR_GPIO_IN);
1095 	IWL_CMD(CSR_RESET);
1096 	IWL_CMD(CSR_GP_CNTRL);
1097 	IWL_CMD(CSR_HW_REV);
1098 	IWL_CMD(CSR_EEPROM_REG);
1099 	IWL_CMD(CSR_EEPROM_GP);
1100 	IWL_CMD(CSR_OTP_GP_REG);
1101 	IWL_CMD(CSR_GIO_REG);
1102 	IWL_CMD(CSR_GP_UCODE_REG);
1103 	IWL_CMD(CSR_GP_DRIVER_REG);
1104 	IWL_CMD(CSR_UCODE_DRV_GP1);
1105 	IWL_CMD(CSR_UCODE_DRV_GP2);
1106 	IWL_CMD(CSR_LED_REG);
1107 	IWL_CMD(CSR_DRAM_INT_TBL_REG);
1108 	IWL_CMD(CSR_GIO_CHICKEN_BITS);
1109 	IWL_CMD(CSR_ANA_PLL_CFG);
1110 	IWL_CMD(CSR_HW_REV_WA_REG);
1111 	IWL_CMD(CSR_DBG_HPET_MEM_REG);
1112 	default:
1113 		return "UNKNOWN";
1114 	}
1115 #undef IWL_CMD
1116 }
1117 
iwl_pcie_dump_csr(struct iwl_trans * trans)1118 void iwl_pcie_dump_csr(struct iwl_trans *trans)
1119 {
1120 	int i;
1121 	static const u32 csr_tbl[] = {
1122 		CSR_HW_IF_CONFIG_REG,
1123 		CSR_INT_COALESCING,
1124 		CSR_INT,
1125 		CSR_INT_MASK,
1126 		CSR_FH_INT_STATUS,
1127 		CSR_GPIO_IN,
1128 		CSR_RESET,
1129 		CSR_GP_CNTRL,
1130 		CSR_HW_REV,
1131 		CSR_EEPROM_REG,
1132 		CSR_EEPROM_GP,
1133 		CSR_OTP_GP_REG,
1134 		CSR_GIO_REG,
1135 		CSR_GP_UCODE_REG,
1136 		CSR_GP_DRIVER_REG,
1137 		CSR_UCODE_DRV_GP1,
1138 		CSR_UCODE_DRV_GP2,
1139 		CSR_LED_REG,
1140 		CSR_DRAM_INT_TBL_REG,
1141 		CSR_GIO_CHICKEN_BITS,
1142 		CSR_ANA_PLL_CFG,
1143 		CSR_HW_REV_WA_REG,
1144 		CSR_DBG_HPET_MEM_REG
1145 	};
1146 	IWL_ERR(trans, "CSR values:\n");
1147 	IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
1148 		"CSR_INT_PERIODIC_REG)\n");
1149 	for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
1150 		IWL_ERR(trans, "  %25s: 0X%08x\n",
1151 			get_csr_string(csr_tbl[i]),
1152 			iwl_read32(trans, csr_tbl[i]));
1153 	}
1154 }
1155 
1156 #ifdef CONFIG_IWLWIFI_DEBUGFS
1157 /* create and remove of files */
1158 #define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
1159 	if (!debugfs_create_file(#name, mode, parent, trans,		\
1160 				 &iwl_dbgfs_##name##_ops))		\
1161 		goto err;						\
1162 } while (0)
1163 
1164 /* file operation */
1165 #define DEBUGFS_READ_FUNC(name)                                         \
1166 static ssize_t iwl_dbgfs_##name##_read(struct file *file,               \
1167 					char __user *user_buf,          \
1168 					size_t count, loff_t *ppos);
1169 
1170 #define DEBUGFS_WRITE_FUNC(name)                                        \
1171 static ssize_t iwl_dbgfs_##name##_write(struct file *file,              \
1172 					const char __user *user_buf,    \
1173 					size_t count, loff_t *ppos);
1174 
1175 #define DEBUGFS_READ_FILE_OPS(name)					\
1176 	DEBUGFS_READ_FUNC(name);					\
1177 static const struct file_operations iwl_dbgfs_##name##_ops = {		\
1178 	.read = iwl_dbgfs_##name##_read,				\
1179 	.open = simple_open,						\
1180 	.llseek = generic_file_llseek,					\
1181 };
1182 
1183 #define DEBUGFS_WRITE_FILE_OPS(name)                                    \
1184 	DEBUGFS_WRITE_FUNC(name);                                       \
1185 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
1186 	.write = iwl_dbgfs_##name##_write,                              \
1187 	.open = simple_open,						\
1188 	.llseek = generic_file_llseek,					\
1189 };
1190 
1191 #define DEBUGFS_READ_WRITE_FILE_OPS(name)				\
1192 	DEBUGFS_READ_FUNC(name);					\
1193 	DEBUGFS_WRITE_FUNC(name);					\
1194 static const struct file_operations iwl_dbgfs_##name##_ops = {		\
1195 	.write = iwl_dbgfs_##name##_write,				\
1196 	.read = iwl_dbgfs_##name##_read,				\
1197 	.open = simple_open,						\
1198 	.llseek = generic_file_llseek,					\
1199 };
1200 
iwl_dbgfs_tx_queue_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)1201 static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
1202 				       char __user *user_buf,
1203 				       size_t count, loff_t *ppos)
1204 {
1205 	struct iwl_trans *trans = file->private_data;
1206 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1207 	struct iwl_txq *txq;
1208 	struct iwl_queue *q;
1209 	char *buf;
1210 	int pos = 0;
1211 	int cnt;
1212 	int ret;
1213 	size_t bufsz;
1214 
1215 	bufsz = sizeof(char) * 64 * trans->cfg->base_params->num_of_queues;
1216 
1217 	if (!trans_pcie->txq)
1218 		return -EAGAIN;
1219 
1220 	buf = kzalloc(bufsz, GFP_KERNEL);
1221 	if (!buf)
1222 		return -ENOMEM;
1223 
1224 	for (cnt = 0; cnt < trans->cfg->base_params->num_of_queues; cnt++) {
1225 		txq = &trans_pcie->txq[cnt];
1226 		q = &txq->q;
1227 		pos += scnprintf(buf + pos, bufsz - pos,
1228 				"hwq %.2d: read=%u write=%u use=%d stop=%d\n",
1229 				cnt, q->read_ptr, q->write_ptr,
1230 				!!test_bit(cnt, trans_pcie->queue_used),
1231 				!!test_bit(cnt, trans_pcie->queue_stopped));
1232 	}
1233 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1234 	kfree(buf);
1235 	return ret;
1236 }
1237 
iwl_dbgfs_rx_queue_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)1238 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
1239 				       char __user *user_buf,
1240 				       size_t count, loff_t *ppos)
1241 {
1242 	struct iwl_trans *trans = file->private_data;
1243 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1244 	struct iwl_rxq *rxq = &trans_pcie->rxq;
1245 	char buf[256];
1246 	int pos = 0;
1247 	const size_t bufsz = sizeof(buf);
1248 
1249 	pos += scnprintf(buf + pos, bufsz - pos, "read: %u\n",
1250 						rxq->read);
1251 	pos += scnprintf(buf + pos, bufsz - pos, "write: %u\n",
1252 						rxq->write);
1253 	pos += scnprintf(buf + pos, bufsz - pos, "free_count: %u\n",
1254 						rxq->free_count);
1255 	if (rxq->rb_stts) {
1256 		pos += scnprintf(buf + pos, bufsz - pos, "closed_rb_num: %u\n",
1257 			 le16_to_cpu(rxq->rb_stts->closed_rb_num) &  0x0FFF);
1258 	} else {
1259 		pos += scnprintf(buf + pos, bufsz - pos,
1260 					"closed_rb_num: Not Allocated\n");
1261 	}
1262 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1263 }
1264 
iwl_dbgfs_interrupt_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)1265 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
1266 					char __user *user_buf,
1267 					size_t count, loff_t *ppos)
1268 {
1269 	struct iwl_trans *trans = file->private_data;
1270 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1271 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1272 
1273 	int pos = 0;
1274 	char *buf;
1275 	int bufsz = 24 * 64; /* 24 items * 64 char per item */
1276 	ssize_t ret;
1277 
1278 	buf = kzalloc(bufsz, GFP_KERNEL);
1279 	if (!buf)
1280 		return -ENOMEM;
1281 
1282 	pos += scnprintf(buf + pos, bufsz - pos,
1283 			"Interrupt Statistics Report:\n");
1284 
1285 	pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
1286 		isr_stats->hw);
1287 	pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
1288 		isr_stats->sw);
1289 	if (isr_stats->sw || isr_stats->hw) {
1290 		pos += scnprintf(buf + pos, bufsz - pos,
1291 			"\tLast Restarting Code:  0x%X\n",
1292 			isr_stats->err_code);
1293 	}
1294 #ifdef CONFIG_IWLWIFI_DEBUG
1295 	pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
1296 		isr_stats->sch);
1297 	pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
1298 		isr_stats->alive);
1299 #endif
1300 	pos += scnprintf(buf + pos, bufsz - pos,
1301 		"HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
1302 
1303 	pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
1304 		isr_stats->ctkill);
1305 
1306 	pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
1307 		isr_stats->wakeup);
1308 
1309 	pos += scnprintf(buf + pos, bufsz - pos,
1310 		"Rx command responses:\t\t %u\n", isr_stats->rx);
1311 
1312 	pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
1313 		isr_stats->tx);
1314 
1315 	pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
1316 		isr_stats->unhandled);
1317 
1318 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1319 	kfree(buf);
1320 	return ret;
1321 }
1322 
iwl_dbgfs_interrupt_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)1323 static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
1324 					 const char __user *user_buf,
1325 					 size_t count, loff_t *ppos)
1326 {
1327 	struct iwl_trans *trans = file->private_data;
1328 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1329 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1330 
1331 	char buf[8];
1332 	int buf_size;
1333 	u32 reset_flag;
1334 
1335 	memset(buf, 0, sizeof(buf));
1336 	buf_size = min(count, sizeof(buf) -  1);
1337 	if (copy_from_user(buf, user_buf, buf_size))
1338 		return -EFAULT;
1339 	if (sscanf(buf, "%x", &reset_flag) != 1)
1340 		return -EFAULT;
1341 	if (reset_flag == 0)
1342 		memset(isr_stats, 0, sizeof(*isr_stats));
1343 
1344 	return count;
1345 }
1346 
iwl_dbgfs_csr_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)1347 static ssize_t iwl_dbgfs_csr_write(struct file *file,
1348 				   const char __user *user_buf,
1349 				   size_t count, loff_t *ppos)
1350 {
1351 	struct iwl_trans *trans = file->private_data;
1352 	char buf[8];
1353 	int buf_size;
1354 	int csr;
1355 
1356 	memset(buf, 0, sizeof(buf));
1357 	buf_size = min(count, sizeof(buf) -  1);
1358 	if (copy_from_user(buf, user_buf, buf_size))
1359 		return -EFAULT;
1360 	if (sscanf(buf, "%d", &csr) != 1)
1361 		return -EFAULT;
1362 
1363 	iwl_pcie_dump_csr(trans);
1364 
1365 	return count;
1366 }
1367 
iwl_dbgfs_fh_reg_read(struct file * file,char __user * user_buf,size_t count,loff_t * ppos)1368 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
1369 				     char __user *user_buf,
1370 				     size_t count, loff_t *ppos)
1371 {
1372 	struct iwl_trans *trans = file->private_data;
1373 	char *buf = NULL;
1374 	int pos = 0;
1375 	ssize_t ret = -EFAULT;
1376 
1377 	ret = pos = iwl_pcie_dump_fh(trans, &buf);
1378 	if (buf) {
1379 		ret = simple_read_from_buffer(user_buf,
1380 					      count, ppos, buf, pos);
1381 		kfree(buf);
1382 	}
1383 
1384 	return ret;
1385 }
1386 
1387 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
1388 DEBUGFS_READ_FILE_OPS(fh_reg);
1389 DEBUGFS_READ_FILE_OPS(rx_queue);
1390 DEBUGFS_READ_FILE_OPS(tx_queue);
1391 DEBUGFS_WRITE_FILE_OPS(csr);
1392 
1393 /*
1394  * Create the debugfs files and directories
1395  *
1396  */
iwl_trans_pcie_dbgfs_register(struct iwl_trans * trans,struct dentry * dir)1397 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1398 					 struct dentry *dir)
1399 {
1400 	DEBUGFS_ADD_FILE(rx_queue, dir, S_IRUSR);
1401 	DEBUGFS_ADD_FILE(tx_queue, dir, S_IRUSR);
1402 	DEBUGFS_ADD_FILE(interrupt, dir, S_IWUSR | S_IRUSR);
1403 	DEBUGFS_ADD_FILE(csr, dir, S_IWUSR);
1404 	DEBUGFS_ADD_FILE(fh_reg, dir, S_IRUSR);
1405 	return 0;
1406 
1407 err:
1408 	IWL_ERR(trans, "failed to create the trans debugfs entry\n");
1409 	return -ENOMEM;
1410 }
1411 #else
iwl_trans_pcie_dbgfs_register(struct iwl_trans * trans,struct dentry * dir)1412 static int iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans,
1413 					 struct dentry *dir)
1414 {
1415 	return 0;
1416 }
1417 #endif /*CONFIG_IWLWIFI_DEBUGFS */
1418 
1419 static const struct iwl_trans_ops trans_ops_pcie = {
1420 	.start_hw = iwl_trans_pcie_start_hw,
1421 	.stop_hw = iwl_trans_pcie_stop_hw,
1422 	.fw_alive = iwl_trans_pcie_fw_alive,
1423 	.start_fw = iwl_trans_pcie_start_fw,
1424 	.stop_device = iwl_trans_pcie_stop_device,
1425 
1426 	.d3_suspend = iwl_trans_pcie_d3_suspend,
1427 	.d3_resume = iwl_trans_pcie_d3_resume,
1428 
1429 	.send_cmd = iwl_trans_pcie_send_hcmd,
1430 
1431 	.tx = iwl_trans_pcie_tx,
1432 	.reclaim = iwl_trans_pcie_reclaim,
1433 
1434 	.txq_disable = iwl_trans_pcie_txq_disable,
1435 	.txq_enable = iwl_trans_pcie_txq_enable,
1436 
1437 	.dbgfs_register = iwl_trans_pcie_dbgfs_register,
1438 
1439 	.wait_tx_queue_empty = iwl_trans_pcie_wait_txq_empty,
1440 
1441 #ifdef CONFIG_PM_SLEEP
1442 	.suspend = iwl_trans_pcie_suspend,
1443 	.resume = iwl_trans_pcie_resume,
1444 #endif
1445 	.write8 = iwl_trans_pcie_write8,
1446 	.write32 = iwl_trans_pcie_write32,
1447 	.read32 = iwl_trans_pcie_read32,
1448 	.read_prph = iwl_trans_pcie_read_prph,
1449 	.write_prph = iwl_trans_pcie_write_prph,
1450 	.read_mem = iwl_trans_pcie_read_mem,
1451 	.write_mem = iwl_trans_pcie_write_mem,
1452 	.configure = iwl_trans_pcie_configure,
1453 	.set_pmi = iwl_trans_pcie_set_pmi,
1454 	.grab_nic_access = iwl_trans_pcie_grab_nic_access,
1455 	.release_nic_access = iwl_trans_pcie_release_nic_access,
1456 	.set_bits_mask = iwl_trans_pcie_set_bits_mask,
1457 };
1458 
iwl_trans_pcie_alloc(struct pci_dev * pdev,const struct pci_device_id * ent,const struct iwl_cfg * cfg)1459 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
1460 				       const struct pci_device_id *ent,
1461 				       const struct iwl_cfg *cfg)
1462 {
1463 	struct iwl_trans_pcie *trans_pcie;
1464 	struct iwl_trans *trans;
1465 	u16 pci_cmd;
1466 	int err;
1467 
1468 	trans = kzalloc(sizeof(struct iwl_trans) +
1469 			sizeof(struct iwl_trans_pcie), GFP_KERNEL);
1470 
1471 	if (!trans)
1472 		return NULL;
1473 
1474 	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1475 
1476 	trans->ops = &trans_ops_pcie;
1477 	trans->cfg = cfg;
1478 	trans_lockdep_init(trans);
1479 	trans_pcie->trans = trans;
1480 	spin_lock_init(&trans_pcie->irq_lock);
1481 	spin_lock_init(&trans_pcie->reg_lock);
1482 	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
1483 
1484 	/* W/A - seems to solve weird behavior. We need to remove this if we
1485 	 * don't want to stay in L1 all the time. This wastes a lot of power */
1486 	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
1487 			       PCIE_LINK_STATE_CLKPM);
1488 
1489 	if (pci_enable_device(pdev)) {
1490 		err = -ENODEV;
1491 		goto out_no_pci;
1492 	}
1493 
1494 	pci_set_master(pdev);
1495 
1496 	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(36));
1497 	if (!err)
1498 		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(36));
1499 	if (err) {
1500 		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
1501 		if (!err)
1502 			err = pci_set_consistent_dma_mask(pdev,
1503 							  DMA_BIT_MASK(32));
1504 		/* both attempts failed: */
1505 		if (err) {
1506 			dev_err(&pdev->dev, "No suitable DMA available\n");
1507 			goto out_pci_disable_device;
1508 		}
1509 	}
1510 
1511 	err = pci_request_regions(pdev, DRV_NAME);
1512 	if (err) {
1513 		dev_err(&pdev->dev, "pci_request_regions failed\n");
1514 		goto out_pci_disable_device;
1515 	}
1516 
1517 	trans_pcie->hw_base = pci_ioremap_bar(pdev, 0);
1518 	if (!trans_pcie->hw_base) {
1519 		dev_err(&pdev->dev, "pci_ioremap_bar failed\n");
1520 		err = -ENODEV;
1521 		goto out_pci_release_regions;
1522 	}
1523 
1524 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
1525 	 * PCI Tx retries from interfering with C3 CPU state */
1526 	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
1527 
1528 	err = pci_enable_msi(pdev);
1529 	if (err) {
1530 		dev_err(&pdev->dev, "pci_enable_msi failed(0X%x)\n", err);
1531 		/* enable rfkill interrupt: hw bug w/a */
1532 		pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1533 		if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1534 			pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1535 			pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1536 		}
1537 	}
1538 
1539 	trans->dev = &pdev->dev;
1540 	trans_pcie->pci_dev = pdev;
1541 	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
1542 	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
1543 	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
1544 		 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
1545 
1546 	/* Initialize the wait queue for commands */
1547 	init_waitqueue_head(&trans_pcie->wait_command_queue);
1548 
1549 	snprintf(trans->dev_cmd_pool_name, sizeof(trans->dev_cmd_pool_name),
1550 		 "iwl_cmd_pool:%s", dev_name(trans->dev));
1551 
1552 	trans->dev_cmd_headroom = 0;
1553 	trans->dev_cmd_pool =
1554 		kmem_cache_create(trans->dev_cmd_pool_name,
1555 				  sizeof(struct iwl_device_cmd)
1556 				  + trans->dev_cmd_headroom,
1557 				  sizeof(void *),
1558 				  SLAB_HWCACHE_ALIGN,
1559 				  NULL);
1560 
1561 	if (!trans->dev_cmd_pool)
1562 		goto out_pci_disable_msi;
1563 
1564 	trans_pcie->inta_mask = CSR_INI_SET_MASK;
1565 
1566 	if (iwl_pcie_alloc_ict(trans))
1567 		goto out_free_cmd_pool;
1568 
1569 	if (request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
1570 				 iwl_pcie_irq_handler,
1571 				 IRQF_SHARED, DRV_NAME, trans)) {
1572 		IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
1573 		goto out_free_ict;
1574 	}
1575 
1576 	return trans;
1577 
1578 out_free_ict:
1579 	iwl_pcie_free_ict(trans);
1580 out_free_cmd_pool:
1581 	kmem_cache_destroy(trans->dev_cmd_pool);
1582 out_pci_disable_msi:
1583 	pci_disable_msi(pdev);
1584 out_pci_release_regions:
1585 	pci_release_regions(pdev);
1586 out_pci_disable_device:
1587 	pci_disable_device(pdev);
1588 out_no_pci:
1589 	kfree(trans);
1590 	return NULL;
1591 }
1592