• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2017 Intel Deutschland GmbH
9  * Copyright(c) 2018 - 2020 Intel Corporation
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of version 2 of the GNU General Public License as
13  * published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but
16  * WITHOUT ANY WARRANTY; without even the implied warranty of
17  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
18  * General Public License for more details.
19  *
20  * BSD LICENSE
21  *
22  * Copyright(c) 2017 Intel Deutschland GmbH
23  * Copyright(c) 2018 - 2020 Intel Corporation
24  * All rights reserved.
25  *
26  * Redistribution and use in source and binary forms, with or without
27  * modification, are permitted provided that the following conditions
28  * are met:
29  *
30  *  * Redistributions of source code must retain the above copyright
31  *    notice, this list of conditions and the following disclaimer.
32  *  * Redistributions in binary form must reproduce the above copyright
33  *    notice, this list of conditions and the following disclaimer in
34  *    the documentation and/or other materials provided with the
35  *    distribution.
36  *  * Neither the name Intel Corporation nor the names of its
37  *    contributors may be used to endorse or promote products derived
38  *    from this software without specific prior written permission.
39  *
40  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
41  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
42  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
43  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
44  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
45  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
46  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
47  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
48  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
49  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
50  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
51  *
52  *****************************************************************************/
53 #include "iwl-trans.h"
54 #include "iwl-prph.h"
55 #include "iwl-context-info.h"
56 #include "iwl-context-info-gen3.h"
57 #include "internal.h"
58 #include "fw/dbg.h"
59 
60 /*
61  * Start up NIC's basic functionality after it has been reset
62  * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
63  * NOTE:  This does not load uCode nor start the embedded processor
64  */
iwl_pcie_gen2_apm_init(struct iwl_trans * trans)65 int iwl_pcie_gen2_apm_init(struct iwl_trans *trans)
66 {
67 	int ret = 0;
68 
69 	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
70 
71 	/*
72 	 * Use "set_bit" below rather than "write", to preserve any hardware
73 	 * bits already set by default after reset.
74 	 */
75 
76 	/*
77 	 * Disable L0s without affecting L1;
78 	 * don't wait for ICH L0s (ICH bug W/A)
79 	 */
80 	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
81 		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
82 
83 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
84 	iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
85 
86 	/*
87 	 * Enable HAP INTA (interrupt from management bus) to
88 	 * wake device's PCI Express link L1a -> L0s
89 	 */
90 	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
91 		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
92 
93 	iwl_pcie_apm_config(trans);
94 
95 	ret = iwl_finish_nic_init(trans, trans->trans_cfg);
96 	if (ret)
97 		return ret;
98 
99 	set_bit(STATUS_DEVICE_ENABLED, &trans->status);
100 
101 	return 0;
102 }
103 
iwl_pcie_gen2_apm_stop(struct iwl_trans * trans,bool op_mode_leave)104 static void iwl_pcie_gen2_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
105 {
106 	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
107 
108 	if (op_mode_leave) {
109 		if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
110 			iwl_pcie_gen2_apm_init(trans);
111 
112 		/* inform ME that we are leaving */
113 		iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
114 			    CSR_RESET_LINK_PWR_MGMT_DISABLED);
115 		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
116 			    CSR_HW_IF_CONFIG_REG_PREPARE |
117 			    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
118 		mdelay(1);
119 		iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
120 			      CSR_RESET_LINK_PWR_MGMT_DISABLED);
121 		mdelay(5);
122 	}
123 
124 	clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
125 
126 	/* Stop device's DMA activity */
127 	iwl_pcie_apm_stop_master(trans);
128 
129 	iwl_trans_sw_reset(trans);
130 
131 	/*
132 	 * Clear "initialization complete" bit to move adapter from
133 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
134 	 */
135 	iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
136 }
137 
_iwl_trans_pcie_gen2_stop_device(struct iwl_trans * trans)138 void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
139 {
140 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
141 
142 	lockdep_assert_held(&trans_pcie->mutex);
143 
144 	if (trans_pcie->is_down)
145 		return;
146 
147 	trans_pcie->is_down = true;
148 
149 	/* tell the device to stop sending interrupts */
150 	iwl_disable_interrupts(trans);
151 
152 	/* device going down, Stop using ICT table */
153 	iwl_pcie_disable_ict(trans);
154 
155 	/*
156 	 * If a HW restart happens during firmware loading,
157 	 * then the firmware loading might call this function
158 	 * and later it might be called again due to the
159 	 * restart. So don't process again if the device is
160 	 * already dead.
161 	 */
162 	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
163 		IWL_DEBUG_INFO(trans,
164 			       "DEVICE_ENABLED bit was set and is now cleared\n");
165 		iwl_txq_gen2_tx_stop(trans);
166 		iwl_pcie_rx_stop(trans);
167 	}
168 
169 	iwl_pcie_ctxt_info_free_paging(trans);
170 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
171 		iwl_pcie_ctxt_info_gen3_free(trans);
172 	else
173 		iwl_pcie_ctxt_info_free(trans);
174 
175 	/* Make sure (redundant) we've released our request to stay awake */
176 	iwl_clear_bit(trans, CSR_GP_CNTRL,
177 		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
178 
179 	/* Stop the device, and put it in low power state */
180 	iwl_pcie_gen2_apm_stop(trans, false);
181 
182 	iwl_trans_sw_reset(trans);
183 
184 	/*
185 	 * Upon stop, the IVAR table gets erased, so msi-x won't
186 	 * work. This causes a bug in RF-KILL flows, since the interrupt
187 	 * that enables radio won't fire on the correct irq, and the
188 	 * driver won't be able to handle the interrupt.
189 	 * Configure the IVAR table again after reset.
190 	 */
191 	iwl_pcie_conf_msix_hw(trans_pcie);
192 
193 	/*
194 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
195 	 * This is a bug in certain verions of the hardware.
196 	 * Certain devices also keep sending HW RF kill interrupt all
197 	 * the time, unless the interrupt is ACKed even if the interrupt
198 	 * should be masked. Re-ACK all the interrupts here.
199 	 */
200 	iwl_disable_interrupts(trans);
201 
202 	/* clear all status bits */
203 	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
204 	clear_bit(STATUS_INT_ENABLED, &trans->status);
205 	clear_bit(STATUS_TPOWER_PMI, &trans->status);
206 
207 	/*
208 	 * Even if we stop the HW, we still want the RF kill
209 	 * interrupt
210 	 */
211 	iwl_enable_rfkill_int(trans);
212 
213 	/* re-take ownership to prevent other users from stealing the device */
214 	iwl_pcie_prepare_card_hw(trans);
215 }
216 
iwl_trans_pcie_gen2_stop_device(struct iwl_trans * trans)217 void iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans)
218 {
219 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
220 	bool was_in_rfkill;
221 
222 	mutex_lock(&trans_pcie->mutex);
223 	trans_pcie->opmode_down = true;
224 	was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
225 	_iwl_trans_pcie_gen2_stop_device(trans);
226 	iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
227 	mutex_unlock(&trans_pcie->mutex);
228 }
229 
iwl_pcie_gen2_nic_init(struct iwl_trans * trans)230 static int iwl_pcie_gen2_nic_init(struct iwl_trans *trans)
231 {
232 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
233 	int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE,
234 			       trans->cfg->min_txq_size);
235 
236 	/* TODO: most of the logic can be removed in A0 - but not in Z0 */
237 	spin_lock(&trans_pcie->irq_lock);
238 	iwl_pcie_gen2_apm_init(trans);
239 	spin_unlock(&trans_pcie->irq_lock);
240 
241 	iwl_op_mode_nic_config(trans->op_mode);
242 
243 	/* Allocate the RX queue, or reset if it is already allocated */
244 	if (iwl_pcie_gen2_rx_init(trans))
245 		return -ENOMEM;
246 
247 	/* Allocate or reset and init all Tx and Command queues */
248 	if (iwl_txq_gen2_init(trans, trans->txqs.cmd.q_id, queue_size))
249 		return -ENOMEM;
250 
251 	/* enable shadow regs in HW */
252 	iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
253 	IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
254 
255 	return 0;
256 }
257 
iwl_trans_pcie_gen2_fw_alive(struct iwl_trans * trans,u32 scd_addr)258 void iwl_trans_pcie_gen2_fw_alive(struct iwl_trans *trans, u32 scd_addr)
259 {
260 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
261 
262 	iwl_pcie_reset_ict(trans);
263 
264 	/* make sure all queue are not stopped/used */
265 	memset(trans->txqs.queue_stopped, 0,
266 	       sizeof(trans->txqs.queue_stopped));
267 	memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used));
268 
269 	/* now that we got alive we can free the fw image & the context info.
270 	 * paging memory cannot be freed included since FW will still use it
271 	 */
272 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
273 		iwl_pcie_ctxt_info_free(trans);
274 
275 	/*
276 	 * Re-enable all the interrupts, including the RF-Kill one, now that
277 	 * the firmware is alive.
278 	 */
279 	iwl_enable_interrupts(trans);
280 	mutex_lock(&trans_pcie->mutex);
281 	iwl_pcie_check_hw_rf_kill(trans);
282 	mutex_unlock(&trans_pcie->mutex);
283 }
284 
iwl_pcie_set_ltr(struct iwl_trans * trans)285 static void iwl_pcie_set_ltr(struct iwl_trans *trans)
286 {
287 	u32 ltr_val = CSR_LTR_LONG_VAL_AD_NO_SNOOP_REQ |
288 		      u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
289 				      CSR_LTR_LONG_VAL_AD_NO_SNOOP_SCALE) |
290 		      u32_encode_bits(250,
291 				      CSR_LTR_LONG_VAL_AD_NO_SNOOP_VAL) |
292 		      CSR_LTR_LONG_VAL_AD_SNOOP_REQ |
293 		      u32_encode_bits(CSR_LTR_LONG_VAL_AD_SCALE_USEC,
294 				      CSR_LTR_LONG_VAL_AD_SNOOP_SCALE) |
295 		      u32_encode_bits(250, CSR_LTR_LONG_VAL_AD_SNOOP_VAL);
296 
297 	/*
298 	 * To workaround hardware latency issues during the boot process,
299 	 * initialize the LTR to ~250 usec (see ltr_val above).
300 	 * The firmware initializes this again later (to a smaller value).
301 	 */
302 	if ((trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_AX210 ||
303 	     trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) &&
304 	    !trans->trans_cfg->integrated) {
305 		iwl_write32(trans, CSR_LTR_LONG_VAL_AD, ltr_val);
306 	} else if (trans->trans_cfg->integrated &&
307 		   trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000) {
308 		iwl_write_prph(trans, HPM_MAC_LTR_CSR, HPM_MAC_LRT_ENABLE_ALL);
309 		iwl_write_prph(trans, HPM_UMAC_LTR, ltr_val);
310 	}
311 }
312 
iwl_trans_pcie_gen2_start_fw(struct iwl_trans * trans,const struct fw_img * fw,bool run_in_rfkill)313 int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
314 				 const struct fw_img *fw, bool run_in_rfkill)
315 {
316 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
317 	bool hw_rfkill;
318 	int ret;
319 
320 	/* This may fail if AMT took ownership of the device */
321 	if (iwl_pcie_prepare_card_hw(trans)) {
322 		IWL_WARN(trans, "Exit HW not ready\n");
323 		return -EIO;
324 	}
325 
326 	iwl_enable_rfkill_int(trans);
327 
328 	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
329 
330 	/*
331 	 * We enabled the RF-Kill interrupt and the handler may very
332 	 * well be running. Disable the interrupts to make sure no other
333 	 * interrupt can be fired.
334 	 */
335 	iwl_disable_interrupts(trans);
336 
337 	/* Make sure it finished running */
338 	iwl_pcie_synchronize_irqs(trans);
339 
340 	mutex_lock(&trans_pcie->mutex);
341 
342 	/* If platform's RF_KILL switch is NOT set to KILL */
343 	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
344 	if (hw_rfkill && !run_in_rfkill) {
345 		ret = -ERFKILL;
346 		goto out;
347 	}
348 
349 	/* Someone called stop_device, don't try to start_fw */
350 	if (trans_pcie->is_down) {
351 		IWL_WARN(trans,
352 			 "Can't start_fw since the HW hasn't been started\n");
353 		ret = -EIO;
354 		goto out;
355 	}
356 
357 	/* make sure rfkill handshake bits are cleared */
358 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
359 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
360 		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
361 
362 	/* clear (again), then enable host interrupts */
363 	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
364 
365 	ret = iwl_pcie_gen2_nic_init(trans);
366 	if (ret) {
367 		IWL_ERR(trans, "Unable to init nic\n");
368 		goto out;
369 	}
370 
371 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
372 		ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
373 	else
374 		ret = iwl_pcie_ctxt_info_init(trans, fw);
375 	if (ret)
376 		goto out;
377 
378 	iwl_pcie_set_ltr(trans);
379 
380 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
381 		iwl_write_umac_prph(trans, UREG_CPU_INIT_RUN, 1);
382 	else
383 		iwl_write_prph(trans, UREG_CPU_INIT_RUN, 1);
384 
385 	/* re-check RF-Kill state since we may have missed the interrupt */
386 	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
387 	if (hw_rfkill && !run_in_rfkill)
388 		ret = -ERFKILL;
389 
390 out:
391 	mutex_unlock(&trans_pcie->mutex);
392 	return ret;
393 }
394