• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2  *
3  * This file is provided under a dual BSD/GPLv2 license.  When using or
4  * redistributing this file, you may do so under either license.
5  *
6  * GPL LICENSE SUMMARY
7  *
8  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
9  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
10  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of version 2 of the GNU General Public License as
14  * published by the Free Software Foundation.
15  *
16  * This program is distributed in the hope that it will be useful, but
17  * WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
19  * General Public License for more details.
20  *
21  * You should have received a copy of the GNU General Public License
22  * along with this program; if not, write to the Free Software
23  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
24  * USA
25  *
26  * The full GNU General Public License is included in this distribution
27  * in the file called COPYING.
28  *
29  * Contact Information:
30  *  Intel Linux Wireless <linuxwifi@intel.com>
31  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
32  *
33  * BSD LICENSE
34  *
35  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
36  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
37  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
38  * All rights reserved.
39  *
40  * Redistribution and use in source and binary forms, with or without
41  * modification, are permitted provided that the following conditions
42  * are met:
43  *
44  *  * Redistributions of source code must retain the above copyright
45  *    notice, this list of conditions and the following disclaimer.
46  *  * Redistributions in binary form must reproduce the above copyright
47  *    notice, this list of conditions and the following disclaimer in
48  *    the documentation and/or other materials provided with the
49  *    distribution.
50  *  * Neither the name Intel Corporation nor the names of its
51  *    contributors may be used to endorse or promote products derived
52  *    from this software without specific prior written permission.
53  *
54  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
55  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
56  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
57  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
58  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
59  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
60  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
61  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
62  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
63  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
64  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
65  *
66  *****************************************************************************/
67 #ifndef __iwl_trans_h__
68 #define __iwl_trans_h__
69 
70 #include <linux/ieee80211.h>
71 #include <linux/mm.h> /* for page_address */
72 #include <linux/lockdep.h>
73 #include <linux/kernel.h>
74 
75 #include "iwl-debug.h"
76 #include "iwl-config.h"
77 #include "fw/img.h"
78 #include "iwl-op-mode.h"
79 #include "fw/api/cmdhdr.h"
80 #include "fw/api/txq.h"
81 
82 /**
83  * DOC: Transport layer - what is it ?
84  *
85  * The transport layer is the layer that deals with the HW directly. It provides
86  * an abstraction of the underlying HW to the upper layer. The transport layer
87  * doesn't provide any policy, algorithm or anything of this kind, but only
88  * mechanisms to make the HW do something. It is not completely stateless but
89  * close to it.
90  * We will have an implementation for each different supported bus.
91  */
92 
93 /**
94  * DOC: Life cycle of the transport layer
95  *
96  * The transport layer has a very precise life cycle.
97  *
98  *	1) A helper function is called during the module initialization and
99  *	   registers the bus driver's ops with the transport's alloc function.
100  *	2) Bus's probe calls to the transport layer's allocation functions.
101  *	   Of course this function is bus specific.
102  *	3) This allocation functions will spawn the upper layer which will
103  *	   register mac80211.
104  *
105  *	4) At some point (i.e. mac80211's start call), the op_mode will call
106  *	   the following sequence:
107  *	   start_hw
108  *	   start_fw
109  *
110  *	5) Then when finished (or reset):
111  *	   stop_device
112  *
113  *	6) Eventually, the free function will be called.
114  */
115 
116 #define FH_RSCSR_FRAME_SIZE_MSK		0x00003FFF	/* bits 0-13 */
117 #define FH_RSCSR_FRAME_INVALID		0x55550000
118 #define FH_RSCSR_FRAME_ALIGN		0x40
119 #define FH_RSCSR_RPA_EN			BIT(25)
120 #define FH_RSCSR_RADA_EN		BIT(26)
121 #define FH_RSCSR_RXQ_POS		16
122 #define FH_RSCSR_RXQ_MASK		0x3F0000
123 
124 struct iwl_rx_packet {
125 	/*
126 	 * The first 4 bytes of the RX frame header contain both the RX frame
127 	 * size and some flags.
128 	 * Bit fields:
129 	 * 31:    flag flush RB request
130 	 * 30:    flag ignore TC (terminal counter) request
131 	 * 29:    flag fast IRQ request
132 	 * 28-27: Reserved
133 	 * 26:    RADA enabled
134 	 * 25:    Offload enabled
135 	 * 24:    RPF enabled
136 	 * 23:    RSS enabled
137 	 * 22:    Checksum enabled
138 	 * 21-16: RX queue
139 	 * 15-14: Reserved
140 	 * 13-00: RX frame size
141 	 */
142 	__le32 len_n_flags;
143 	struct iwl_cmd_header hdr;
144 	u8 data[];
145 } __packed;
146 
iwl_rx_packet_len(const struct iwl_rx_packet * pkt)147 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
148 {
149 	return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
150 }
151 
iwl_rx_packet_payload_len(const struct iwl_rx_packet * pkt)152 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
153 {
154 	return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
155 }
156 
157 /**
158  * enum CMD_MODE - how to send the host commands ?
159  *
160  * @CMD_ASYNC: Return right away and don't wait for the response
161  * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
162  *	the response. The caller needs to call iwl_free_resp when done.
163  * @CMD_HIGH_PRIO: The command is high priority - it goes to the front of the
164  *	command queue, but after other high priority commands. Valid only
165  *	with CMD_ASYNC.
166  * @CMD_SEND_IN_IDLE: The command should be sent even when the trans is idle.
167  * @CMD_MAKE_TRANS_IDLE: The command response should mark the trans as idle.
168  * @CMD_WAKE_UP_TRANS: The command response should wake up the trans
169  *	(i.e. mark it as non-idle).
170  * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
171  *	called after this command completes. Valid only with CMD_ASYNC.
172  */
173 enum CMD_MODE {
174 	CMD_ASYNC		= BIT(0),
175 	CMD_WANT_SKB		= BIT(1),
176 	CMD_SEND_IN_RFKILL	= BIT(2),
177 	CMD_HIGH_PRIO		= BIT(3),
178 	CMD_SEND_IN_IDLE	= BIT(4),
179 	CMD_MAKE_TRANS_IDLE	= BIT(5),
180 	CMD_WAKE_UP_TRANS	= BIT(6),
181 	CMD_WANT_ASYNC_CALLBACK	= BIT(7),
182 };
183 
184 #define DEF_CMD_PAYLOAD_SIZE 320
185 
186 /**
187  * struct iwl_device_cmd
188  *
189  * For allocation of the command and tx queues, this establishes the overall
190  * size of the largest command we send to uCode, except for commands that
191  * aren't fully copied and use other TFD space.
192  */
193 struct iwl_device_cmd {
194 	union {
195 		struct {
196 			struct iwl_cmd_header hdr;	/* uCode API */
197 			u8 payload[DEF_CMD_PAYLOAD_SIZE];
198 		};
199 		struct {
200 			struct iwl_cmd_header_wide hdr_wide;
201 			u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
202 					sizeof(struct iwl_cmd_header_wide) +
203 					sizeof(struct iwl_cmd_header)];
204 		};
205 	};
206 } __packed;
207 
208 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
209 
210 /*
211  * number of transfer buffers (fragments) per transmit frame descriptor;
212  * this is just the driver's idea, the hardware supports 20
213  */
214 #define IWL_MAX_CMD_TBS_PER_TFD	2
215 
216 /**
217  * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
218  *
219  * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
220  *	ring. The transport layer doesn't map the command's buffer to DMA, but
221  *	rather copies it to a previously allocated DMA buffer. This flag tells
222  *	the transport layer not to copy the command, but to map the existing
223  *	buffer (that is passed in) instead. This saves the memcpy and allows
224  *	commands that are bigger than the fixed buffer to be submitted.
225  *	Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
226  * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
227  *	chunk internally and free it again after the command completes. This
228  *	can (currently) be used only once per command.
229  *	Note that a TFD entry after a DUP one cannot be a normal copied one.
230  */
231 enum iwl_hcmd_dataflag {
232 	IWL_HCMD_DFL_NOCOPY	= BIT(0),
233 	IWL_HCMD_DFL_DUP	= BIT(1),
234 };
235 
236 /**
237  * struct iwl_host_cmd - Host command to the uCode
238  *
239  * @data: array of chunks that composes the data of the host command
240  * @resp_pkt: response packet, if %CMD_WANT_SKB was set
241  * @_rx_page_order: (internally used to free response packet)
242  * @_rx_page_addr: (internally used to free response packet)
243  * @flags: can be CMD_*
244  * @len: array of the lengths of the chunks in data
245  * @dataflags: IWL_HCMD_DFL_*
246  * @id: command id of the host command, for wide commands encoding the
247  *	version and group as well
248  */
249 struct iwl_host_cmd {
250 	const void *data[IWL_MAX_CMD_TBS_PER_TFD];
251 	struct iwl_rx_packet *resp_pkt;
252 	unsigned long _rx_page_addr;
253 	u32 _rx_page_order;
254 
255 	u32 flags;
256 	u32 id;
257 	u16 len[IWL_MAX_CMD_TBS_PER_TFD];
258 	u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
259 };
260 
iwl_free_resp(struct iwl_host_cmd * cmd)261 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
262 {
263 	free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
264 }
265 
266 struct iwl_rx_cmd_buffer {
267 	struct page *_page;
268 	int _offset;
269 	bool _page_stolen;
270 	u32 _rx_page_order;
271 	unsigned int truesize;
272 };
273 
rxb_addr(struct iwl_rx_cmd_buffer * r)274 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
275 {
276 	return (void *)((unsigned long)page_address(r->_page) + r->_offset);
277 }
278 
rxb_offset(struct iwl_rx_cmd_buffer * r)279 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
280 {
281 	return r->_offset;
282 }
283 
rxb_steal_page(struct iwl_rx_cmd_buffer * r)284 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
285 {
286 	r->_page_stolen = true;
287 	get_page(r->_page);
288 	return r->_page;
289 }
290 
iwl_free_rxb(struct iwl_rx_cmd_buffer * r)291 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
292 {
293 	__free_pages(r->_page, r->_rx_page_order);
294 }
295 
296 #define MAX_NO_RECLAIM_CMDS	6
297 
298 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
299 
300 /*
301  * Maximum number of HW queues the transport layer
302  * currently supports
303  */
304 #define IWL_MAX_HW_QUEUES		32
305 #define IWL_MAX_TVQM_QUEUES		512
306 
307 #define IWL_MAX_TID_COUNT	8
308 #define IWL_MGMT_TID		15
309 #define IWL_FRAME_LIMIT	64
310 #define IWL_MAX_RX_HW_QUEUES	16
311 
312 /**
313  * enum iwl_wowlan_status - WoWLAN image/device status
314  * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
315  * @IWL_D3_STATUS_RESET: device was reset while suspended
316  */
317 enum iwl_d3_status {
318 	IWL_D3_STATUS_ALIVE,
319 	IWL_D3_STATUS_RESET,
320 };
321 
322 /**
323  * enum iwl_trans_status: transport status flags
324  * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
325  * @STATUS_DEVICE_ENABLED: APM is enabled
326  * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
327  * @STATUS_INT_ENABLED: interrupts are enabled
328  * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
329  * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
330  * @STATUS_FW_ERROR: the fw is in error state
331  * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
332  *	are sent
333  * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
334  * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
335  */
336 enum iwl_trans_status {
337 	STATUS_SYNC_HCMD_ACTIVE,
338 	STATUS_DEVICE_ENABLED,
339 	STATUS_TPOWER_PMI,
340 	STATUS_INT_ENABLED,
341 	STATUS_RFKILL_HW,
342 	STATUS_RFKILL_OPMODE,
343 	STATUS_FW_ERROR,
344 	STATUS_TRANS_GOING_IDLE,
345 	STATUS_TRANS_IDLE,
346 	STATUS_TRANS_DEAD,
347 };
348 
349 static inline int
iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)350 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
351 {
352 	switch (rb_size) {
353 	case IWL_AMSDU_4K:
354 		return get_order(4 * 1024);
355 	case IWL_AMSDU_8K:
356 		return get_order(8 * 1024);
357 	case IWL_AMSDU_12K:
358 		return get_order(12 * 1024);
359 	default:
360 		WARN_ON(1);
361 		return -1;
362 	}
363 }
364 
365 struct iwl_hcmd_names {
366 	u8 cmd_id;
367 	const char *const cmd_name;
368 };
369 
370 #define HCMD_NAME(x)	\
371 	{ .cmd_id = x, .cmd_name = #x }
372 
373 struct iwl_hcmd_arr {
374 	const struct iwl_hcmd_names *arr;
375 	int size;
376 };
377 
378 #define HCMD_ARR(x)	\
379 	{ .arr = x, .size = ARRAY_SIZE(x) }
380 
381 /**
382  * struct iwl_trans_config - transport configuration
383  *
384  * @op_mode: pointer to the upper layer.
385  * @cmd_queue: the index of the command queue.
386  *	Must be set before start_fw.
387  * @cmd_fifo: the fifo for host commands
388  * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
389  * @no_reclaim_cmds: Some devices erroneously don't set the
390  *	SEQ_RX_FRAME bit on some notifications, this is the
391  *	list of such notifications to filter. Max length is
392  *	%MAX_NO_RECLAIM_CMDS.
393  * @n_no_reclaim_cmds: # of commands in list
394  * @rx_buf_size: RX buffer size needed for A-MSDUs
395  *	if unset 4k will be the RX buffer size
396  * @bc_table_dword: set to true if the BC table expects the byte count to be
397  *	in DWORD (as opposed to bytes)
398  * @scd_set_active: should the transport configure the SCD for HCMD queue
399  * @sw_csum_tx: transport should compute the TCP checksum
400  * @command_groups: array of command groups, each member is an array of the
401  *	commands in the group; for debugging only
402  * @command_groups_size: number of command groups, to avoid illegal access
403  * @sdio_adma_addr: the default address to set for the ADMA in SDIO mode until
404  *	we get the ALIVE from the uCode
405  * @cb_data_offs: offset inside skb->cb to store transport data at, must have
406  *	space for at least two pointers
407  */
408 struct iwl_trans_config {
409 	struct iwl_op_mode *op_mode;
410 
411 	u8 cmd_queue;
412 	u8 cmd_fifo;
413 	unsigned int cmd_q_wdg_timeout;
414 	const u8 *no_reclaim_cmds;
415 	unsigned int n_no_reclaim_cmds;
416 
417 	enum iwl_amsdu_size rx_buf_size;
418 	bool bc_table_dword;
419 	bool scd_set_active;
420 	bool sw_csum_tx;
421 	const struct iwl_hcmd_arr *command_groups;
422 	int command_groups_size;
423 
424 	u32 sdio_adma_addr;
425 
426 	u8 cb_data_offs;
427 };
428 
429 struct iwl_trans_dump_data {
430 	u32 len;
431 	u8 data[];
432 };
433 
434 struct iwl_trans;
435 
436 struct iwl_trans_txq_scd_cfg {
437 	u8 fifo;
438 	u8 sta_id;
439 	u8 tid;
440 	bool aggregate;
441 	int frame_limit;
442 };
443 
444 /**
445  * struct iwl_trans_ops - transport specific operations
446  *
447  * All the handlers MUST be implemented
448  *
449  * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken
450  *	out of a low power state. From that point on, the HW can send
451  *	interrupts. May sleep.
452  * @op_mode_leave: Turn off the HW RF kill indication if on
453  *	May sleep
454  * @start_fw: allocates and inits all the resources for the transport
455  *	layer. Also kick a fw image.
456  *	May sleep
457  * @fw_alive: called when the fw sends alive notification. If the fw provides
458  *	the SCD base address in SRAM, then provide it here, or 0 otherwise.
459  *	May sleep
460  * @stop_device: stops the whole device (embedded CPU put to reset) and stops
461  *	the HW. If low_power is true, the NIC will be put in low power state.
462  *	From that point on, the HW will be stopped but will still issue an
463  *	interrupt if the HW RF kill switch is triggered.
464  *	This callback must do the right thing and not crash even if %start_hw()
465  *	was called but not &start_fw(). May sleep.
466  * @d3_suspend: put the device into the correct mode for WoWLAN during
467  *	suspend. This is optional, if not implemented WoWLAN will not be
468  *	supported. This callback may sleep.
469  * @d3_resume: resume the device after WoWLAN, enabling the opmode to
470  *	talk to the WoWLAN image to get its status. This is optional, if not
471  *	implemented WoWLAN will not be supported. This callback may sleep.
472  * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
473  *	If RFkill is asserted in the middle of a SYNC host command, it must
474  *	return -ERFKILL straight away.
475  *	May sleep only if CMD_ASYNC is not set
476  * @tx: send an skb. The transport relies on the op_mode to zero the
477  *	the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
478  *	the CSUM will be taken care of (TCP CSUM and IP header in case of
479  *	IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
480  *	header if it is IPv4.
481  *	Must be atomic
482  * @reclaim: free packet until ssn. Returns a list of freed packets.
483  *	Must be atomic
484  * @txq_enable: setup a queue. To setup an AC queue, use the
485  *	iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
486  *	this one. The op_mode must not configure the HCMD queue. The scheduler
487  *	configuration may be %NULL, in which case the hardware will not be
488  *	configured. If true is returned, the operation mode needs to increment
489  *	the sequence number of the packets routed to this queue because of a
490  *	hardware scheduler bug. May sleep.
491  * @txq_disable: de-configure a Tx queue to send AMPDUs
492  *	Must be atomic
493  * @txq_set_shared_mode: change Tx queue shared/unshared marking
494  * @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
495  * @wait_txq_empty: wait until specific tx queue is empty. May sleep.
496  * @freeze_txq_timer: prevents the timer of the queue from firing until the
497  *	queue is set to awake. Must be atomic.
498  * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
499  *	that the transport needs to refcount the calls since this function
500  *	will be called several times with block = true, and then the queues
501  *	need to be unblocked only after the same number of calls with
502  *	block = false.
503  * @write8: write a u8 to a register at offset ofs from the BAR
504  * @write32: write a u32 to a register at offset ofs from the BAR
505  * @read32: read a u32 register at offset ofs from the BAR
506  * @read_prph: read a DWORD from a periphery register
507  * @write_prph: write a DWORD to a periphery register
508  * @read_mem: read device's SRAM in DWORD
509  * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
510  *	will be zeroed.
511  * @configure: configure parameters required by the transport layer from
512  *	the op_mode. May be called several times before start_fw, can't be
513  *	called after that.
514  * @set_pmi: set the power pmi state
515  * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
516  *	Sleeping is not allowed between grab_nic_access and
517  *	release_nic_access.
518  * @release_nic_access: let the NIC go to sleep. The "flags" parameter
519  *	must be the same one that was sent before to the grab_nic_access.
520  * @set_bits_mask - set SRAM register according to value and mask.
521  * @ref: grab a reference to the transport/FW layers, disallowing
522  *	certain low power states
523  * @unref: release a reference previously taken with @ref. Note that
524  *	initially the reference count is 1, making an initial @unref
525  *	necessary to allow low power states.
526  * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
527  *	TX'ed commands and similar. The buffer will be vfree'd by the caller.
528  *	Note that the transport must fill in the proper file headers.
529  */
530 struct iwl_trans_ops {
531 
532 	int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power);
533 	void (*op_mode_leave)(struct iwl_trans *iwl_trans);
534 	int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
535 			bool run_in_rfkill);
536 	int (*update_sf)(struct iwl_trans *trans,
537 			 struct iwl_sf_region *st_fwrd_space);
538 	void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
539 	void (*stop_device)(struct iwl_trans *trans, bool low_power);
540 
541 	void (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
542 	int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
543 			 bool test, bool reset);
544 
545 	int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
546 
547 	int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
548 		  struct iwl_device_cmd *dev_cmd, int queue);
549 	void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
550 			struct sk_buff_head *skbs);
551 
552 	bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
553 			   const struct iwl_trans_txq_scd_cfg *cfg,
554 			   unsigned int queue_wdg_timeout);
555 	void (*txq_disable)(struct iwl_trans *trans, int queue,
556 			    bool configure_scd);
557 	/* a000 functions */
558 	int (*txq_alloc)(struct iwl_trans *trans,
559 			 struct iwl_tx_queue_cfg_cmd *cmd,
560 			 int cmd_id,
561 			 unsigned int queue_wdg_timeout);
562 	void (*txq_free)(struct iwl_trans *trans, int queue);
563 
564 	void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
565 				    bool shared);
566 
567 	int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
568 	int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
569 	void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
570 				 bool freeze);
571 	void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
572 
573 	void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
574 	void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
575 	u32 (*read32)(struct iwl_trans *trans, u32 ofs);
576 	u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
577 	void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
578 	int (*read_mem)(struct iwl_trans *trans, u32 addr,
579 			void *buf, int dwords);
580 	int (*write_mem)(struct iwl_trans *trans, u32 addr,
581 			 const void *buf, int dwords);
582 	void (*configure)(struct iwl_trans *trans,
583 			  const struct iwl_trans_config *trans_cfg);
584 	void (*set_pmi)(struct iwl_trans *trans, bool state);
585 	bool (*grab_nic_access)(struct iwl_trans *trans, unsigned long *flags);
586 	void (*release_nic_access)(struct iwl_trans *trans,
587 				   unsigned long *flags);
588 	void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
589 			      u32 value);
590 	void (*ref)(struct iwl_trans *trans);
591 	void (*unref)(struct iwl_trans *trans);
592 	int  (*suspend)(struct iwl_trans *trans);
593 	void (*resume)(struct iwl_trans *trans);
594 
595 	struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
596 						 const struct iwl_fw_dbg_trigger_tlv
597 						 *trigger);
598 };
599 
600 /**
601  * enum iwl_trans_state - state of the transport layer
602  *
603  * @IWL_TRANS_NO_FW: no fw has sent an alive response
604  * @IWL_TRANS_FW_ALIVE: a fw has sent an alive response
605  */
606 enum iwl_trans_state {
607 	IWL_TRANS_NO_FW = 0,
608 	IWL_TRANS_FW_ALIVE	= 1,
609 };
610 
611 /**
612  * DOC: Platform power management
613  *
614  * There are two types of platform power management: system-wide
615  * (WoWLAN) and runtime.
616  *
617  * In system-wide power management the entire platform goes into a low
618  * power state (e.g. idle or suspend to RAM) at the same time and the
619  * device is configured as a wakeup source for the entire platform.
620  * This is usually triggered by userspace activity (e.g. the user
621  * presses the suspend button or a power management daemon decides to
622  * put the platform in low power mode).  The device's behavior in this
623  * mode is dictated by the wake-on-WLAN configuration.
624  *
625  * In runtime power management, only the devices which are themselves
626  * idle enter a low power state.  This is done at runtime, which means
627  * that the entire system is still running normally.  This mode is
628  * usually triggered automatically by the device driver and requires
629  * the ability to enter and exit the low power modes in a very short
630  * time, so there is not much impact in usability.
631  *
632  * The terms used for the device's behavior are as follows:
633  *
634  *	- D0: the device is fully powered and the host is awake;
635  *	- D3: the device is in low power mode and only reacts to
636  *		specific events (e.g. magic-packet received or scan
637  *		results found);
638  *	- D0I3: the device is in low power mode and reacts to any
639  *		activity (e.g. RX);
640  *
641  * These terms reflect the power modes in the firmware and are not to
642  * be confused with the physical device power state.  The NIC can be
643  * in D0I3 mode even if, for instance, the PCI device is in D3 state.
644  */
645 
646 /**
647  * enum iwl_plat_pm_mode - platform power management mode
648  *
649  * This enumeration describes the device's platform power management
650  * behavior when in idle mode (i.e. runtime power management) or when
651  * in system-wide suspend (i.e WoWLAN).
652  *
653  * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
654  *	device.  At runtime, this means that nothing happens and the
655  *	device always remains in active.  In system-wide suspend mode,
656  *	it means that the all connections will be closed automatically
657  *	by mac80211 before the platform is suspended.
658  * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
659  *	For runtime power management, this mode is not officially
660  *	supported.
661  * @IWL_PLAT_PM_MODE_D0I3: the device goes into D0I3 mode.
662  */
663 enum iwl_plat_pm_mode {
664 	IWL_PLAT_PM_MODE_DISABLED,
665 	IWL_PLAT_PM_MODE_D3,
666 	IWL_PLAT_PM_MODE_D0I3,
667 };
668 
669 /* Max time to wait for trans to become idle/non-idle on d0i3
670  * enter/exit (in msecs).
671  */
672 #define IWL_TRANS_IDLE_TIMEOUT 2000
673 
674 /**
675  * struct iwl_trans - transport common data
676  *
677  * @ops - pointer to iwl_trans_ops
678  * @op_mode - pointer to the op_mode
679  * @cfg - pointer to the configuration
680  * @drv - pointer to iwl_drv
681  * @status: a bit-mask of transport status flags
682  * @dev - pointer to struct device * that represents the device
683  * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
684  *	0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
685  * @hw_rf_id a u32 with the device RF ID
686  * @hw_id: a u32 with the ID of the device / sub-device.
687  *	Set during transport allocation.
688  * @hw_id_str: a string with info about HW ID. Set during transport allocation.
689  * @pm_support: set to true in start_hw if link pm is supported
690  * @ltr_enabled: set to true if the LTR is enabled
691  * @wide_cmd_header: true when ucode supports wide command header format
692  * @num_rx_queues: number of RX queues allocated by the transport;
693  *	the transport must set this before calling iwl_drv_start()
694  * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
695  *	The user should use iwl_trans_{alloc,free}_tx_cmd.
696  * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
697  *	starting the firmware, used for tracing
698  * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
699  *	start of the 802.11 header in the @rx_mpdu_cmd
700  * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
701  * @dbg_dest_tlv: points to the destination TLV for debug
702  * @dbg_conf_tlv: array of pointers to configuration TLVs for debug
703  * @dbg_trigger_tlv: array of pointers to triggers TLVs for debug
704  * @dbg_dest_reg_num: num of reg_ops in %dbg_dest_tlv
705  * @paging_req_addr: The location were the FW will upload / download the pages
706  *	from. The address is set by the opmode
707  * @paging_db: Pointer to the opmode paging data base, the pointer is set by
708  *	the opmode.
709  * @paging_download_buf: Buffer used for copying all of the pages before
710  *	downloading them to the FW. The buffer is allocated in the opmode
711  * @system_pm_mode: the system-wide power management mode in use.
712  *	This mode is set dynamically, depending on the WoWLAN values
713  *	configured from the userspace at runtime.
714  * @runtime_pm_mode: the runtime power management mode in use.  This
715  *	mode is set during the initialization phase and is not
716  *	supposed to change during runtime.
717  */
718 struct iwl_trans {
719 	const struct iwl_trans_ops *ops;
720 	struct iwl_op_mode *op_mode;
721 	const struct iwl_cfg *cfg;
722 	struct iwl_drv *drv;
723 	enum iwl_trans_state state;
724 	unsigned long status;
725 
726 	struct device *dev;
727 	u32 max_skb_frags;
728 	u32 hw_rev;
729 	u32 hw_rf_id;
730 	u32 hw_id;
731 	char hw_id_str[52];
732 
733 	u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
734 
735 	bool pm_support;
736 	bool ltr_enabled;
737 
738 	const struct iwl_hcmd_arr *command_groups;
739 	int command_groups_size;
740 	bool wide_cmd_header;
741 
742 	u8 num_rx_queues;
743 
744 	/* The following fields are internal only */
745 	struct kmem_cache *dev_cmd_pool;
746 	char dev_cmd_pool_name[50];
747 
748 	struct dentry *dbgfs_dir;
749 
750 #ifdef CONFIG_LOCKDEP
751 	struct lockdep_map sync_cmd_lockdep_map;
752 #endif
753 
754 	u64 dflt_pwr_limit;
755 
756 	const struct iwl_fw_dbg_dest_tlv *dbg_dest_tlv;
757 	const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
758 	struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
759 	u8 dbg_dest_reg_num;
760 
761 	/*
762 	 * Paging parameters - All of the parameters should be set by the
763 	 * opmode when paging is enabled
764 	 */
765 	u32 paging_req_addr;
766 	struct iwl_fw_paging *paging_db;
767 	void *paging_download_buf;
768 
769 	enum iwl_plat_pm_mode system_pm_mode;
770 	enum iwl_plat_pm_mode runtime_pm_mode;
771 	bool suspending;
772 
773 	/* pointer to trans specific struct */
774 	/*Ensure that this pointer will always be aligned to sizeof pointer */
775 	char trans_specific[0] __aligned(sizeof(void *));
776 };
777 
778 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
779 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
780 
iwl_trans_configure(struct iwl_trans * trans,const struct iwl_trans_config * trans_cfg)781 static inline void iwl_trans_configure(struct iwl_trans *trans,
782 				       const struct iwl_trans_config *trans_cfg)
783 {
784 	trans->op_mode = trans_cfg->op_mode;
785 
786 	trans->ops->configure(trans, trans_cfg);
787 	WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
788 }
789 
_iwl_trans_start_hw(struct iwl_trans * trans,bool low_power)790 static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power)
791 {
792 	might_sleep();
793 
794 	return trans->ops->start_hw(trans, low_power);
795 }
796 
iwl_trans_start_hw(struct iwl_trans * trans)797 static inline int iwl_trans_start_hw(struct iwl_trans *trans)
798 {
799 	return trans->ops->start_hw(trans, true);
800 }
801 
iwl_trans_op_mode_leave(struct iwl_trans * trans)802 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
803 {
804 	might_sleep();
805 
806 	if (trans->ops->op_mode_leave)
807 		trans->ops->op_mode_leave(trans);
808 
809 	trans->op_mode = NULL;
810 
811 	trans->state = IWL_TRANS_NO_FW;
812 }
813 
iwl_trans_fw_alive(struct iwl_trans * trans,u32 scd_addr)814 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
815 {
816 	might_sleep();
817 
818 	trans->state = IWL_TRANS_FW_ALIVE;
819 
820 	trans->ops->fw_alive(trans, scd_addr);
821 }
822 
iwl_trans_start_fw(struct iwl_trans * trans,const struct fw_img * fw,bool run_in_rfkill)823 static inline int iwl_trans_start_fw(struct iwl_trans *trans,
824 				     const struct fw_img *fw,
825 				     bool run_in_rfkill)
826 {
827 	might_sleep();
828 
829 	WARN_ON_ONCE(!trans->rx_mpdu_cmd);
830 
831 	clear_bit(STATUS_FW_ERROR, &trans->status);
832 	return trans->ops->start_fw(trans, fw, run_in_rfkill);
833 }
834 
iwl_trans_update_sf(struct iwl_trans * trans,struct iwl_sf_region * st_fwrd_space)835 static inline int iwl_trans_update_sf(struct iwl_trans *trans,
836 				      struct iwl_sf_region *st_fwrd_space)
837 {
838 	might_sleep();
839 
840 	if (trans->ops->update_sf)
841 		return trans->ops->update_sf(trans, st_fwrd_space);
842 
843 	return 0;
844 }
845 
_iwl_trans_stop_device(struct iwl_trans * trans,bool low_power)846 static inline void _iwl_trans_stop_device(struct iwl_trans *trans,
847 					  bool low_power)
848 {
849 	might_sleep();
850 
851 	trans->ops->stop_device(trans, low_power);
852 
853 	trans->state = IWL_TRANS_NO_FW;
854 }
855 
iwl_trans_stop_device(struct iwl_trans * trans)856 static inline void iwl_trans_stop_device(struct iwl_trans *trans)
857 {
858 	_iwl_trans_stop_device(trans, true);
859 }
860 
iwl_trans_d3_suspend(struct iwl_trans * trans,bool test,bool reset)861 static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
862 					bool reset)
863 {
864 	might_sleep();
865 	if (trans->ops->d3_suspend)
866 		trans->ops->d3_suspend(trans, test, reset);
867 }
868 
iwl_trans_d3_resume(struct iwl_trans * trans,enum iwl_d3_status * status,bool test,bool reset)869 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
870 				      enum iwl_d3_status *status,
871 				      bool test, bool reset)
872 {
873 	might_sleep();
874 	if (!trans->ops->d3_resume)
875 		return 0;
876 
877 	return trans->ops->d3_resume(trans, status, test, reset);
878 }
879 
iwl_trans_ref(struct iwl_trans * trans)880 static inline void iwl_trans_ref(struct iwl_trans *trans)
881 {
882 	if (trans->ops->ref)
883 		trans->ops->ref(trans);
884 }
885 
iwl_trans_unref(struct iwl_trans * trans)886 static inline void iwl_trans_unref(struct iwl_trans *trans)
887 {
888 	if (trans->ops->unref)
889 		trans->ops->unref(trans);
890 }
891 
iwl_trans_suspend(struct iwl_trans * trans)892 static inline int iwl_trans_suspend(struct iwl_trans *trans)
893 {
894 	if (!trans->ops->suspend)
895 		return 0;
896 
897 	return trans->ops->suspend(trans);
898 }
899 
iwl_trans_resume(struct iwl_trans * trans)900 static inline void iwl_trans_resume(struct iwl_trans *trans)
901 {
902 	if (trans->ops->resume)
903 		trans->ops->resume(trans);
904 }
905 
906 static inline struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans * trans,const struct iwl_fw_dbg_trigger_tlv * trigger)907 iwl_trans_dump_data(struct iwl_trans *trans,
908 		    const struct iwl_fw_dbg_trigger_tlv *trigger)
909 {
910 	if (!trans->ops->dump_data)
911 		return NULL;
912 	return trans->ops->dump_data(trans, trigger);
913 }
914 
915 static inline struct iwl_device_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans * trans)916 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
917 {
918 	return kmem_cache_alloc(trans->dev_cmd_pool, GFP_ATOMIC);
919 }
920 
921 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
922 
iwl_trans_free_tx_cmd(struct iwl_trans * trans,struct iwl_device_cmd * dev_cmd)923 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
924 					 struct iwl_device_cmd *dev_cmd)
925 {
926 	kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
927 }
928 
iwl_trans_tx(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_device_cmd * dev_cmd,int queue)929 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
930 			       struct iwl_device_cmd *dev_cmd, int queue)
931 {
932 	if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
933 		return -EIO;
934 
935 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
936 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
937 		return -EIO;
938 	}
939 
940 	return trans->ops->tx(trans, skb, dev_cmd, queue);
941 }
942 
iwl_trans_reclaim(struct iwl_trans * trans,int queue,int ssn,struct sk_buff_head * skbs)943 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
944 				     int ssn, struct sk_buff_head *skbs)
945 {
946 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
947 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
948 		return;
949 	}
950 
951 	trans->ops->reclaim(trans, queue, ssn, skbs);
952 }
953 
iwl_trans_txq_disable(struct iwl_trans * trans,int queue,bool configure_scd)954 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
955 					 bool configure_scd)
956 {
957 	trans->ops->txq_disable(trans, queue, configure_scd);
958 }
959 
960 static inline bool
iwl_trans_txq_enable_cfg(struct iwl_trans * trans,int queue,u16 ssn,const struct iwl_trans_txq_scd_cfg * cfg,unsigned int queue_wdg_timeout)961 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
962 			 const struct iwl_trans_txq_scd_cfg *cfg,
963 			 unsigned int queue_wdg_timeout)
964 {
965 	might_sleep();
966 
967 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
968 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
969 		return false;
970 	}
971 
972 	return trans->ops->txq_enable(trans, queue, ssn,
973 				      cfg, queue_wdg_timeout);
974 }
975 
976 static inline void
iwl_trans_txq_free(struct iwl_trans * trans,int queue)977 iwl_trans_txq_free(struct iwl_trans *trans, int queue)
978 {
979 	if (WARN_ON_ONCE(!trans->ops->txq_free))
980 		return;
981 
982 	trans->ops->txq_free(trans, queue);
983 }
984 
985 static inline int
iwl_trans_txq_alloc(struct iwl_trans * trans,struct iwl_tx_queue_cfg_cmd * cmd,int cmd_id,unsigned int queue_wdg_timeout)986 iwl_trans_txq_alloc(struct iwl_trans *trans,
987 		    struct iwl_tx_queue_cfg_cmd *cmd,
988 		    int cmd_id,
989 		    unsigned int queue_wdg_timeout)
990 {
991 	might_sleep();
992 
993 	if (WARN_ON_ONCE(!trans->ops->txq_alloc))
994 		return -ENOTSUPP;
995 
996 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
997 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
998 		return -EIO;
999 	}
1000 
1001 	return trans->ops->txq_alloc(trans, cmd, cmd_id, queue_wdg_timeout);
1002 }
1003 
iwl_trans_txq_set_shared_mode(struct iwl_trans * trans,int queue,bool shared_mode)1004 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1005 						 int queue, bool shared_mode)
1006 {
1007 	if (trans->ops->txq_set_shared_mode)
1008 		trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1009 }
1010 
iwl_trans_txq_enable(struct iwl_trans * trans,int queue,int fifo,int sta_id,int tid,int frame_limit,u16 ssn,unsigned int queue_wdg_timeout)1011 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1012 					int fifo, int sta_id, int tid,
1013 					int frame_limit, u16 ssn,
1014 					unsigned int queue_wdg_timeout)
1015 {
1016 	struct iwl_trans_txq_scd_cfg cfg = {
1017 		.fifo = fifo,
1018 		.sta_id = sta_id,
1019 		.tid = tid,
1020 		.frame_limit = frame_limit,
1021 		.aggregate = sta_id >= 0,
1022 	};
1023 
1024 	iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1025 }
1026 
1027 static inline
iwl_trans_ac_txq_enable(struct iwl_trans * trans,int queue,int fifo,unsigned int queue_wdg_timeout)1028 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1029 			     unsigned int queue_wdg_timeout)
1030 {
1031 	struct iwl_trans_txq_scd_cfg cfg = {
1032 		.fifo = fifo,
1033 		.sta_id = -1,
1034 		.tid = IWL_MAX_TID_COUNT,
1035 		.frame_limit = IWL_FRAME_LIMIT,
1036 		.aggregate = false,
1037 	};
1038 
1039 	iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1040 }
1041 
iwl_trans_freeze_txq_timer(struct iwl_trans * trans,unsigned long txqs,bool freeze)1042 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1043 					      unsigned long txqs,
1044 					      bool freeze)
1045 {
1046 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1047 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1048 		return;
1049 	}
1050 
1051 	if (trans->ops->freeze_txq_timer)
1052 		trans->ops->freeze_txq_timer(trans, txqs, freeze);
1053 }
1054 
iwl_trans_block_txq_ptrs(struct iwl_trans * trans,bool block)1055 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1056 					    bool block)
1057 {
1058 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1059 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1060 		return;
1061 	}
1062 
1063 	if (trans->ops->block_txq_ptrs)
1064 		trans->ops->block_txq_ptrs(trans, block);
1065 }
1066 
iwl_trans_wait_tx_queues_empty(struct iwl_trans * trans,u32 txqs)1067 static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1068 						 u32 txqs)
1069 {
1070 	if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1071 		return -ENOTSUPP;
1072 
1073 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1074 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1075 		return -EIO;
1076 	}
1077 
1078 	return trans->ops->wait_tx_queues_empty(trans, txqs);
1079 }
1080 
iwl_trans_wait_txq_empty(struct iwl_trans * trans,int queue)1081 static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
1082 {
1083 	if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1084 		return -ENOTSUPP;
1085 
1086 	if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1087 		IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1088 		return -EIO;
1089 	}
1090 
1091 	return trans->ops->wait_txq_empty(trans, queue);
1092 }
1093 
iwl_trans_write8(struct iwl_trans * trans,u32 ofs,u8 val)1094 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1095 {
1096 	trans->ops->write8(trans, ofs, val);
1097 }
1098 
iwl_trans_write32(struct iwl_trans * trans,u32 ofs,u32 val)1099 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1100 {
1101 	trans->ops->write32(trans, ofs, val);
1102 }
1103 
iwl_trans_read32(struct iwl_trans * trans,u32 ofs)1104 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1105 {
1106 	return trans->ops->read32(trans, ofs);
1107 }
1108 
iwl_trans_read_prph(struct iwl_trans * trans,u32 ofs)1109 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1110 {
1111 	return trans->ops->read_prph(trans, ofs);
1112 }
1113 
iwl_trans_write_prph(struct iwl_trans * trans,u32 ofs,u32 val)1114 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1115 					u32 val)
1116 {
1117 	return trans->ops->write_prph(trans, ofs, val);
1118 }
1119 
iwl_trans_read_mem(struct iwl_trans * trans,u32 addr,void * buf,int dwords)1120 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1121 				     void *buf, int dwords)
1122 {
1123 	return trans->ops->read_mem(trans, addr, buf, dwords);
1124 }
1125 
1126 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize)		      \
1127 	do {								      \
1128 		if (__builtin_constant_p(bufsize))			      \
1129 			BUILD_BUG_ON((bufsize) % sizeof(u32));		      \
1130 		iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1131 	} while (0)
1132 
iwl_trans_read_mem32(struct iwl_trans * trans,u32 addr)1133 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1134 {
1135 	u32 value;
1136 
1137 	if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
1138 		return 0xa5a5a5a5;
1139 
1140 	return value;
1141 }
1142 
iwl_trans_write_mem(struct iwl_trans * trans,u32 addr,const void * buf,int dwords)1143 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1144 				      const void *buf, int dwords)
1145 {
1146 	return trans->ops->write_mem(trans, addr, buf, dwords);
1147 }
1148 
iwl_trans_write_mem32(struct iwl_trans * trans,u32 addr,u32 val)1149 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1150 					u32 val)
1151 {
1152 	return iwl_trans_write_mem(trans, addr, &val, 1);
1153 }
1154 
iwl_trans_set_pmi(struct iwl_trans * trans,bool state)1155 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1156 {
1157 	if (trans->ops->set_pmi)
1158 		trans->ops->set_pmi(trans, state);
1159 }
1160 
1161 static inline void
iwl_trans_set_bits_mask(struct iwl_trans * trans,u32 reg,u32 mask,u32 value)1162 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1163 {
1164 	trans->ops->set_bits_mask(trans, reg, mask, value);
1165 }
1166 
1167 #define iwl_trans_grab_nic_access(trans, flags)	\
1168 	__cond_lock(nic_access,				\
1169 		    likely((trans)->ops->grab_nic_access(trans, flags)))
1170 
__releases(nic_access)1171 static inline void __releases(nic_access)
1172 iwl_trans_release_nic_access(struct iwl_trans *trans, unsigned long *flags)
1173 {
1174 	trans->ops->release_nic_access(trans, flags);
1175 	__release(nic_access);
1176 }
1177 
iwl_trans_fw_error(struct iwl_trans * trans)1178 static inline void iwl_trans_fw_error(struct iwl_trans *trans)
1179 {
1180 	if (WARN_ON_ONCE(!trans->op_mode))
1181 		return;
1182 
1183 	/* prevent double restarts due to the same erroneous FW */
1184 	if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status))
1185 		iwl_op_mode_nic_error(trans->op_mode);
1186 }
1187 
1188 /*****************************************************
1189  * transport helper functions
1190  *****************************************************/
1191 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1192 				  struct device *dev,
1193 				  const struct iwl_cfg *cfg,
1194 				  const struct iwl_trans_ops *ops);
1195 void iwl_trans_free(struct iwl_trans *trans);
1196 
1197 /*****************************************************
1198 * driver (transport) register/unregister functions
1199 ******************************************************/
1200 int __must_check iwl_pci_register_driver(void);
1201 void iwl_pci_unregister_driver(void);
1202 
1203 #endif /* __iwl_trans_h__ */
1204