1 /* SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause */
2 /*
3 * Copyright (C) 2005-2014, 2018-2023 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7 #ifndef __iwl_trans_h__
8 #define __iwl_trans_h__
9
10 #include <linux/ieee80211.h>
11 #include <linux/mm.h> /* for page_address */
12 #include <linux/lockdep.h>
13 #include <linux/kernel.h>
14
15 #include "iwl-debug.h"
16 #include "iwl-config.h"
17 #include "fw/img.h"
18 #include "iwl-op-mode.h"
19 #include <linux/firmware.h>
20 #include "fw/api/cmdhdr.h"
21 #include "fw/api/txq.h"
22 #include "fw/api/dbg-tlv.h"
23 #include "iwl-dbg-tlv.h"
24
25 /**
26 * DOC: Transport layer - what is it ?
27 *
28 * The transport layer is the layer that deals with the HW directly. It provides
29 * an abstraction of the underlying HW to the upper layer. The transport layer
30 * doesn't provide any policy, algorithm or anything of this kind, but only
31 * mechanisms to make the HW do something. It is not completely stateless but
32 * close to it.
33 * We will have an implementation for each different supported bus.
34 */
35
36 /**
37 * DOC: Life cycle of the transport layer
38 *
39 * The transport layer has a very precise life cycle.
40 *
41 * 1) A helper function is called during the module initialization and
42 * registers the bus driver's ops with the transport's alloc function.
43 * 2) Bus's probe calls to the transport layer's allocation functions.
44 * Of course this function is bus specific.
45 * 3) This allocation functions will spawn the upper layer which will
46 * register mac80211.
47 *
48 * 4) At some point (i.e. mac80211's start call), the op_mode will call
49 * the following sequence:
50 * start_hw
51 * start_fw
52 *
53 * 5) Then when finished (or reset):
54 * stop_device
55 *
56 * 6) Eventually, the free function will be called.
57 */
58
59 /* default preset 0 (start from bit 16)*/
60 #define IWL_FW_DBG_DOMAIN_POS 16
61 #define IWL_FW_DBG_DOMAIN BIT(IWL_FW_DBG_DOMAIN_POS)
62
63 #define IWL_TRANS_FW_DBG_DOMAIN(trans) IWL_FW_INI_DOMAIN_ALWAYS_ON
64
65 #define FH_RSCSR_FRAME_SIZE_MSK 0x00003FFF /* bits 0-13 */
66 #define FH_RSCSR_FRAME_INVALID 0x55550000
67 #define FH_RSCSR_FRAME_ALIGN 0x40
68 #define FH_RSCSR_RPA_EN BIT(25)
69 #define FH_RSCSR_RADA_EN BIT(26)
70 #define FH_RSCSR_RXQ_POS 16
71 #define FH_RSCSR_RXQ_MASK 0x3F0000
72
73 struct iwl_rx_packet {
74 /*
75 * The first 4 bytes of the RX frame header contain both the RX frame
76 * size and some flags.
77 * Bit fields:
78 * 31: flag flush RB request
79 * 30: flag ignore TC (terminal counter) request
80 * 29: flag fast IRQ request
81 * 28-27: Reserved
82 * 26: RADA enabled
83 * 25: Offload enabled
84 * 24: RPF enabled
85 * 23: RSS enabled
86 * 22: Checksum enabled
87 * 21-16: RX queue
88 * 15-14: Reserved
89 * 13-00: RX frame size
90 */
91 __le32 len_n_flags;
92 struct iwl_cmd_header hdr;
93 u8 data[];
94 } __packed;
95
iwl_rx_packet_len(const struct iwl_rx_packet * pkt)96 static inline u32 iwl_rx_packet_len(const struct iwl_rx_packet *pkt)
97 {
98 return le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
99 }
100
iwl_rx_packet_payload_len(const struct iwl_rx_packet * pkt)101 static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
102 {
103 return iwl_rx_packet_len(pkt) - sizeof(pkt->hdr);
104 }
105
106 /**
107 * enum CMD_MODE - how to send the host commands ?
108 *
109 * @CMD_ASYNC: Return right away and don't wait for the response
110 * @CMD_WANT_SKB: Not valid with CMD_ASYNC. The caller needs the buffer of
111 * the response. The caller needs to call iwl_free_resp when done.
112 * @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
113 * called after this command completes. Valid only with CMD_ASYNC.
114 * @CMD_SEND_IN_D3: Allow the command to be sent in D3 mode, relevant to
115 * SUSPEND and RESUME commands. We are in D3 mode when we set
116 * trans->system_pm_mode to IWL_PLAT_PM_MODE_D3.
117 */
118 enum CMD_MODE {
119 CMD_ASYNC = BIT(0),
120 CMD_WANT_SKB = BIT(1),
121 CMD_SEND_IN_RFKILL = BIT(2),
122 CMD_WANT_ASYNC_CALLBACK = BIT(3),
123 CMD_SEND_IN_D3 = BIT(4),
124 };
125
126 #define DEF_CMD_PAYLOAD_SIZE 320
127
128 /**
129 * struct iwl_device_cmd
130 *
131 * For allocation of the command and tx queues, this establishes the overall
132 * size of the largest command we send to uCode, except for commands that
133 * aren't fully copied and use other TFD space.
134 */
135 struct iwl_device_cmd {
136 union {
137 struct {
138 struct iwl_cmd_header hdr; /* uCode API */
139 u8 payload[DEF_CMD_PAYLOAD_SIZE];
140 };
141 struct {
142 struct iwl_cmd_header_wide hdr_wide;
143 u8 payload_wide[DEF_CMD_PAYLOAD_SIZE -
144 sizeof(struct iwl_cmd_header_wide) +
145 sizeof(struct iwl_cmd_header)];
146 };
147 };
148 } __packed;
149
150 /**
151 * struct iwl_device_tx_cmd - buffer for TX command
152 * @hdr: the header
153 * @payload: the payload placeholder
154 *
155 * The actual structure is sized dynamically according to need.
156 */
157 struct iwl_device_tx_cmd {
158 struct iwl_cmd_header hdr;
159 u8 payload[];
160 } __packed;
161
162 #define TFD_MAX_PAYLOAD_SIZE (sizeof(struct iwl_device_cmd))
163
164 /*
165 * number of transfer buffers (fragments) per transmit frame descriptor;
166 * this is just the driver's idea, the hardware supports 20
167 */
168 #define IWL_MAX_CMD_TBS_PER_TFD 2
169
170 /* We need 2 entries for the TX command and header, and another one might
171 * be needed for potential data in the SKB's head. The remaining ones can
172 * be used for frags.
173 */
174 #define IWL_TRANS_MAX_FRAGS(trans) ((trans)->txqs.tfd.max_tbs - 3)
175
176 /**
177 * enum iwl_hcmd_dataflag - flag for each one of the chunks of the command
178 *
179 * @IWL_HCMD_DFL_NOCOPY: By default, the command is copied to the host command's
180 * ring. The transport layer doesn't map the command's buffer to DMA, but
181 * rather copies it to a previously allocated DMA buffer. This flag tells
182 * the transport layer not to copy the command, but to map the existing
183 * buffer (that is passed in) instead. This saves the memcpy and allows
184 * commands that are bigger than the fixed buffer to be submitted.
185 * Note that a TFD entry after a NOCOPY one cannot be a normal copied one.
186 * @IWL_HCMD_DFL_DUP: Only valid without NOCOPY, duplicate the memory for this
187 * chunk internally and free it again after the command completes. This
188 * can (currently) be used only once per command.
189 * Note that a TFD entry after a DUP one cannot be a normal copied one.
190 */
191 enum iwl_hcmd_dataflag {
192 IWL_HCMD_DFL_NOCOPY = BIT(0),
193 IWL_HCMD_DFL_DUP = BIT(1),
194 };
195
196 enum iwl_error_event_table_status {
197 IWL_ERROR_EVENT_TABLE_LMAC1 = BIT(0),
198 IWL_ERROR_EVENT_TABLE_LMAC2 = BIT(1),
199 IWL_ERROR_EVENT_TABLE_UMAC = BIT(2),
200 IWL_ERROR_EVENT_TABLE_TCM1 = BIT(3),
201 IWL_ERROR_EVENT_TABLE_TCM2 = BIT(4),
202 IWL_ERROR_EVENT_TABLE_RCM1 = BIT(5),
203 IWL_ERROR_EVENT_TABLE_RCM2 = BIT(6),
204 };
205
206 /**
207 * struct iwl_host_cmd - Host command to the uCode
208 *
209 * @data: array of chunks that composes the data of the host command
210 * @resp_pkt: response packet, if %CMD_WANT_SKB was set
211 * @_rx_page_order: (internally used to free response packet)
212 * @_rx_page_addr: (internally used to free response packet)
213 * @flags: can be CMD_*
214 * @len: array of the lengths of the chunks in data
215 * @dataflags: IWL_HCMD_DFL_*
216 * @id: command id of the host command, for wide commands encoding the
217 * version and group as well
218 */
219 struct iwl_host_cmd {
220 const void *data[IWL_MAX_CMD_TBS_PER_TFD];
221 struct iwl_rx_packet *resp_pkt;
222 unsigned long _rx_page_addr;
223 u32 _rx_page_order;
224
225 u32 flags;
226 u32 id;
227 u16 len[IWL_MAX_CMD_TBS_PER_TFD];
228 u8 dataflags[IWL_MAX_CMD_TBS_PER_TFD];
229 };
230
iwl_free_resp(struct iwl_host_cmd * cmd)231 static inline void iwl_free_resp(struct iwl_host_cmd *cmd)
232 {
233 free_pages(cmd->_rx_page_addr, cmd->_rx_page_order);
234 }
235
236 struct iwl_rx_cmd_buffer {
237 struct page *_page;
238 int _offset;
239 bool _page_stolen;
240 u32 _rx_page_order;
241 unsigned int truesize;
242 };
243
rxb_addr(struct iwl_rx_cmd_buffer * r)244 static inline void *rxb_addr(struct iwl_rx_cmd_buffer *r)
245 {
246 return (void *)((unsigned long)page_address(r->_page) + r->_offset);
247 }
248
rxb_offset(struct iwl_rx_cmd_buffer * r)249 static inline int rxb_offset(struct iwl_rx_cmd_buffer *r)
250 {
251 return r->_offset;
252 }
253
rxb_steal_page(struct iwl_rx_cmd_buffer * r)254 static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
255 {
256 r->_page_stolen = true;
257 get_page(r->_page);
258 return r->_page;
259 }
260
iwl_free_rxb(struct iwl_rx_cmd_buffer * r)261 static inline void iwl_free_rxb(struct iwl_rx_cmd_buffer *r)
262 {
263 __free_pages(r->_page, r->_rx_page_order);
264 }
265
266 #define MAX_NO_RECLAIM_CMDS 6
267
268 #define IWL_MASK(lo, hi) ((1 << (hi)) | ((1 << (hi)) - (1 << (lo))))
269
270 /*
271 * Maximum number of HW queues the transport layer
272 * currently supports
273 */
274 #define IWL_MAX_HW_QUEUES 32
275 #define IWL_MAX_TVQM_QUEUES 512
276
277 #define IWL_MAX_TID_COUNT 8
278 #define IWL_MGMT_TID 15
279 #define IWL_FRAME_LIMIT 64
280 #define IWL_MAX_RX_HW_QUEUES 16
281 #define IWL_9000_MAX_RX_HW_QUEUES 1
282
283 /**
284 * enum iwl_wowlan_status - WoWLAN image/device status
285 * @IWL_D3_STATUS_ALIVE: firmware is still running after resume
286 * @IWL_D3_STATUS_RESET: device was reset while suspended
287 */
288 enum iwl_d3_status {
289 IWL_D3_STATUS_ALIVE,
290 IWL_D3_STATUS_RESET,
291 };
292
293 /**
294 * enum iwl_trans_status: transport status flags
295 * @STATUS_SYNC_HCMD_ACTIVE: a SYNC command is being processed
296 * @STATUS_DEVICE_ENABLED: APM is enabled
297 * @STATUS_TPOWER_PMI: the device might be asleep (need to wake it up)
298 * @STATUS_INT_ENABLED: interrupts are enabled
299 * @STATUS_RFKILL_HW: the actual HW state of the RF-kill switch
300 * @STATUS_RFKILL_OPMODE: RF-kill state reported to opmode
301 * @STATUS_FW_ERROR: the fw is in error state
302 * @STATUS_TRANS_GOING_IDLE: shutting down the trans, only special commands
303 * are sent
304 * @STATUS_TRANS_IDLE: the trans is idle - general commands are not to be sent
305 * @STATUS_TRANS_DEAD: trans is dead - avoid any read/write operation
306 * @STATUS_SUPPRESS_CMD_ERROR_ONCE: suppress "FW error in SYNC CMD" once,
307 * e.g. for testing
308 */
309 enum iwl_trans_status {
310 STATUS_SYNC_HCMD_ACTIVE,
311 STATUS_DEVICE_ENABLED,
312 STATUS_TPOWER_PMI,
313 STATUS_INT_ENABLED,
314 STATUS_RFKILL_HW,
315 STATUS_RFKILL_OPMODE,
316 STATUS_FW_ERROR,
317 STATUS_TRANS_GOING_IDLE,
318 STATUS_TRANS_IDLE,
319 STATUS_TRANS_DEAD,
320 STATUS_SUPPRESS_CMD_ERROR_ONCE,
321 };
322
323 static inline int
iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)324 iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
325 {
326 switch (rb_size) {
327 case IWL_AMSDU_2K:
328 return get_order(2 * 1024);
329 case IWL_AMSDU_4K:
330 return get_order(4 * 1024);
331 case IWL_AMSDU_8K:
332 return get_order(8 * 1024);
333 case IWL_AMSDU_12K:
334 return get_order(16 * 1024);
335 default:
336 WARN_ON(1);
337 return -1;
338 }
339 }
340
341 static inline int
iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)342 iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
343 {
344 switch (rb_size) {
345 case IWL_AMSDU_2K:
346 return 2 * 1024;
347 case IWL_AMSDU_4K:
348 return 4 * 1024;
349 case IWL_AMSDU_8K:
350 return 8 * 1024;
351 case IWL_AMSDU_12K:
352 return 16 * 1024;
353 default:
354 WARN_ON(1);
355 return 0;
356 }
357 }
358
359 struct iwl_hcmd_names {
360 u8 cmd_id;
361 const char *const cmd_name;
362 };
363
364 #define HCMD_NAME(x) \
365 { .cmd_id = x, .cmd_name = #x }
366
367 struct iwl_hcmd_arr {
368 const struct iwl_hcmd_names *arr;
369 int size;
370 };
371
372 #define HCMD_ARR(x) \
373 { .arr = x, .size = ARRAY_SIZE(x) }
374
375 /**
376 * struct iwl_dump_sanitize_ops - dump sanitization operations
377 * @frob_txf: Scrub the TX FIFO data
378 * @frob_hcmd: Scrub a host command, the %hcmd pointer is to the header
379 * but that might be short or long (&struct iwl_cmd_header or
380 * &struct iwl_cmd_header_wide)
381 * @frob_mem: Scrub memory data
382 */
383 struct iwl_dump_sanitize_ops {
384 void (*frob_txf)(void *ctx, void *buf, size_t buflen);
385 void (*frob_hcmd)(void *ctx, void *hcmd, size_t buflen);
386 void (*frob_mem)(void *ctx, u32 mem_addr, void *mem, size_t buflen);
387 };
388
389 /**
390 * struct iwl_trans_config - transport configuration
391 *
392 * @op_mode: pointer to the upper layer.
393 * @cmd_queue: the index of the command queue.
394 * Must be set before start_fw.
395 * @cmd_fifo: the fifo for host commands
396 * @cmd_q_wdg_timeout: the timeout of the watchdog timer for the command queue.
397 * @no_reclaim_cmds: Some devices erroneously don't set the
398 * SEQ_RX_FRAME bit on some notifications, this is the
399 * list of such notifications to filter. Max length is
400 * %MAX_NO_RECLAIM_CMDS.
401 * @n_no_reclaim_cmds: # of commands in list
402 * @rx_buf_size: RX buffer size needed for A-MSDUs
403 * if unset 4k will be the RX buffer size
404 * @bc_table_dword: set to true if the BC table expects the byte count to be
405 * in DWORD (as opposed to bytes)
406 * @scd_set_active: should the transport configure the SCD for HCMD queue
407 * @command_groups: array of command groups, each member is an array of the
408 * commands in the group; for debugging only
409 * @command_groups_size: number of command groups, to avoid illegal access
410 * @cb_data_offs: offset inside skb->cb to store transport data at, must have
411 * space for at least two pointers
412 * @fw_reset_handshake: firmware supports reset flow handshake
413 * @queue_alloc_cmd_ver: queue allocation command version, set to 0
414 * for using the older SCD_QUEUE_CFG, set to the version of
415 * SCD_QUEUE_CONFIG_CMD otherwise.
416 */
417 struct iwl_trans_config {
418 struct iwl_op_mode *op_mode;
419
420 u8 cmd_queue;
421 u8 cmd_fifo;
422 unsigned int cmd_q_wdg_timeout;
423 const u8 *no_reclaim_cmds;
424 unsigned int n_no_reclaim_cmds;
425
426 enum iwl_amsdu_size rx_buf_size;
427 bool bc_table_dword;
428 bool scd_set_active;
429 const struct iwl_hcmd_arr *command_groups;
430 int command_groups_size;
431
432 u8 cb_data_offs;
433 bool fw_reset_handshake;
434 u8 queue_alloc_cmd_ver;
435 };
436
437 struct iwl_trans_dump_data {
438 u32 len;
439 u8 data[];
440 };
441
442 struct iwl_trans;
443
444 struct iwl_trans_txq_scd_cfg {
445 u8 fifo;
446 u8 sta_id;
447 u8 tid;
448 bool aggregate;
449 int frame_limit;
450 };
451
452 /**
453 * struct iwl_trans_rxq_dma_data - RX queue DMA data
454 * @fr_bd_cb: DMA address of free BD cyclic buffer
455 * @fr_bd_wid: Initial write index of the free BD cyclic buffer
456 * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
457 * @ur_bd_cb: DMA address of used BD cyclic buffer
458 */
459 struct iwl_trans_rxq_dma_data {
460 u64 fr_bd_cb;
461 u32 fr_bd_wid;
462 u64 urbd_stts_wrptr;
463 u64 ur_bd_cb;
464 };
465
466 /* maximal number of DRAM MAP entries supported by FW */
467 #define IPC_DRAM_MAP_ENTRY_NUM_MAX 64
468
469 /**
470 * struct iwl_pnvm_image - contains info about the parsed pnvm image
471 * @chunks: array of pointers to pnvm payloads and their sizes
472 * @n_chunks: the number of the pnvm payloads.
473 * @version: the version of the loaded PNVM image
474 */
475 struct iwl_pnvm_image {
476 struct {
477 const void *data;
478 u32 len;
479 } chunks[IPC_DRAM_MAP_ENTRY_NUM_MAX];
480 u32 n_chunks;
481 u32 version;
482 };
483
484 /**
485 * struct iwl_trans_ops - transport specific operations
486 *
487 * All the handlers MUST be implemented
488 *
489 * @start_hw: starts the HW. From that point on, the HW can send interrupts.
490 * May sleep.
491 * @op_mode_leave: Turn off the HW RF kill indication if on
492 * May sleep
493 * @start_fw: allocates and inits all the resources for the transport
494 * layer. Also kick a fw image.
495 * May sleep
496 * @fw_alive: called when the fw sends alive notification. If the fw provides
497 * the SCD base address in SRAM, then provide it here, or 0 otherwise.
498 * May sleep
499 * @stop_device: stops the whole device (embedded CPU put to reset) and stops
500 * the HW. From that point on, the HW will be stopped but will still issue
501 * an interrupt if the HW RF kill switch is triggered.
502 * This callback must do the right thing and not crash even if %start_hw()
503 * was called but not &start_fw(). May sleep.
504 * @d3_suspend: put the device into the correct mode for WoWLAN during
505 * suspend. This is optional, if not implemented WoWLAN will not be
506 * supported. This callback may sleep.
507 * @d3_resume: resume the device after WoWLAN, enabling the opmode to
508 * talk to the WoWLAN image to get its status. This is optional, if not
509 * implemented WoWLAN will not be supported. This callback may sleep.
510 * @send_cmd:send a host command. Must return -ERFKILL if RFkill is asserted.
511 * If RFkill is asserted in the middle of a SYNC host command, it must
512 * return -ERFKILL straight away.
513 * May sleep only if CMD_ASYNC is not set
514 * @tx: send an skb. The transport relies on the op_mode to zero the
515 * the ieee80211_tx_info->driver_data. If the MPDU is an A-MSDU, all
516 * the CSUM will be taken care of (TCP CSUM and IP header in case of
517 * IPv4). If the MPDU is a single MSDU, the op_mode must compute the IP
518 * header if it is IPv4.
519 * Must be atomic
520 * @reclaim: free packet until ssn. Returns a list of freed packets.
521 * Must be atomic
522 * @txq_enable: setup a queue. To setup an AC queue, use the
523 * iwl_trans_ac_txq_enable wrapper. fw_alive must have been called before
524 * this one. The op_mode must not configure the HCMD queue. The scheduler
525 * configuration may be %NULL, in which case the hardware will not be
526 * configured. If true is returned, the operation mode needs to increment
527 * the sequence number of the packets routed to this queue because of a
528 * hardware scheduler bug. May sleep.
529 * @txq_disable: de-configure a Tx queue to send AMPDUs
530 * Must be atomic
531 * @txq_set_shared_mode: change Tx queue shared/unshared marking
532 * @wait_tx_queues_empty: wait until tx queues are empty. May sleep.
533 * @wait_txq_empty: wait until specific tx queue is empty. May sleep.
534 * @freeze_txq_timer: prevents the timer of the queue from firing until the
535 * queue is set to awake. Must be atomic.
536 * @block_txq_ptrs: stop updating the write pointers of the Tx queues. Note
537 * that the transport needs to refcount the calls since this function
538 * will be called several times with block = true, and then the queues
539 * need to be unblocked only after the same number of calls with
540 * block = false.
541 * @write8: write a u8 to a register at offset ofs from the BAR
542 * @write32: write a u32 to a register at offset ofs from the BAR
543 * @read32: read a u32 register at offset ofs from the BAR
544 * @read_prph: read a DWORD from a periphery register
545 * @write_prph: write a DWORD to a periphery register
546 * @read_mem: read device's SRAM in DWORD
547 * @write_mem: write device's SRAM in DWORD. If %buf is %NULL, then the memory
548 * will be zeroed.
549 * @read_config32: read a u32 value from the device's config space at
550 * the given offset.
551 * @configure: configure parameters required by the transport layer from
552 * the op_mode. May be called several times before start_fw, can't be
553 * called after that.
554 * @set_pmi: set the power pmi state
555 * @grab_nic_access: wake the NIC to be able to access non-HBUS regs.
556 * Sleeping is not allowed between grab_nic_access and
557 * release_nic_access.
558 * @release_nic_access: let the NIC go to sleep. The "flags" parameter
559 * must be the same one that was sent before to the grab_nic_access.
560 * @set_bits_mask - set SRAM register according to value and mask.
561 * @dump_data: return a vmalloc'ed buffer with debug data, maybe containing last
562 * TX'ed commands and similar. The buffer will be vfree'd by the caller.
563 * Note that the transport must fill in the proper file headers.
564 * @debugfs_cleanup: used in the driver unload flow to make a proper cleanup
565 * of the trans debugfs
566 * @load_pnvm: save the pnvm data in DRAM
567 * @set_pnvm: set the pnvm data in the prph scratch buffer, inside the
568 * context info.
569 * @load_reduce_power: copy reduce power table to the corresponding DRAM memory
570 * @set_reduce_power: set reduce power table addresses in the sratch buffer
571 * @interrupts: disable/enable interrupts to transport
572 */
573 struct iwl_trans_ops {
574
575 int (*start_hw)(struct iwl_trans *iwl_trans);
576 void (*op_mode_leave)(struct iwl_trans *iwl_trans);
577 int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
578 bool run_in_rfkill);
579 void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
580 void (*stop_device)(struct iwl_trans *trans);
581
582 int (*d3_suspend)(struct iwl_trans *trans, bool test, bool reset);
583 int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
584 bool test, bool reset);
585
586 int (*send_cmd)(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
587
588 int (*tx)(struct iwl_trans *trans, struct sk_buff *skb,
589 struct iwl_device_tx_cmd *dev_cmd, int queue);
590 void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
591 struct sk_buff_head *skbs, bool is_flush);
592
593 void (*set_q_ptrs)(struct iwl_trans *trans, int queue, int ptr);
594
595 bool (*txq_enable)(struct iwl_trans *trans, int queue, u16 ssn,
596 const struct iwl_trans_txq_scd_cfg *cfg,
597 unsigned int queue_wdg_timeout);
598 void (*txq_disable)(struct iwl_trans *trans, int queue,
599 bool configure_scd);
600 /* 22000 functions */
601 int (*txq_alloc)(struct iwl_trans *trans, u32 flags,
602 u32 sta_mask, u8 tid,
603 int size, unsigned int queue_wdg_timeout);
604 void (*txq_free)(struct iwl_trans *trans, int queue);
605 int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
606 struct iwl_trans_rxq_dma_data *data);
607
608 void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
609 bool shared);
610
611 int (*wait_tx_queues_empty)(struct iwl_trans *trans, u32 txq_bm);
612 int (*wait_txq_empty)(struct iwl_trans *trans, int queue);
613 void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
614 bool freeze);
615 void (*block_txq_ptrs)(struct iwl_trans *trans, bool block);
616
617 void (*write8)(struct iwl_trans *trans, u32 ofs, u8 val);
618 void (*write32)(struct iwl_trans *trans, u32 ofs, u32 val);
619 u32 (*read32)(struct iwl_trans *trans, u32 ofs);
620 u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
621 void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
622 int (*read_mem)(struct iwl_trans *trans, u32 addr,
623 void *buf, int dwords);
624 int (*write_mem)(struct iwl_trans *trans, u32 addr,
625 const void *buf, int dwords);
626 int (*read_config32)(struct iwl_trans *trans, u32 ofs, u32 *val);
627 void (*configure)(struct iwl_trans *trans,
628 const struct iwl_trans_config *trans_cfg);
629 void (*set_pmi)(struct iwl_trans *trans, bool state);
630 int (*sw_reset)(struct iwl_trans *trans, bool retake_ownership);
631 bool (*grab_nic_access)(struct iwl_trans *trans);
632 void (*release_nic_access)(struct iwl_trans *trans);
633 void (*set_bits_mask)(struct iwl_trans *trans, u32 reg, u32 mask,
634 u32 value);
635
636 struct iwl_trans_dump_data *(*dump_data)(struct iwl_trans *trans,
637 u32 dump_mask,
638 const struct iwl_dump_sanitize_ops *sanitize_ops,
639 void *sanitize_ctx);
640 void (*debugfs_cleanup)(struct iwl_trans *trans);
641 void (*sync_nmi)(struct iwl_trans *trans);
642 int (*load_pnvm)(struct iwl_trans *trans,
643 const struct iwl_pnvm_image *pnvm_payloads,
644 const struct iwl_ucode_capabilities *capa);
645 void (*set_pnvm)(struct iwl_trans *trans,
646 const struct iwl_ucode_capabilities *capa);
647 int (*load_reduce_power)(struct iwl_trans *trans,
648 const struct iwl_pnvm_image *payloads,
649 const struct iwl_ucode_capabilities *capa);
650 void (*set_reduce_power)(struct iwl_trans *trans,
651 const struct iwl_ucode_capabilities *capa);
652
653 void (*interrupts)(struct iwl_trans *trans, bool enable);
654 int (*imr_dma_data)(struct iwl_trans *trans,
655 u32 dst_addr, u64 src_addr,
656 u32 byte_cnt);
657
658 };
659
660 /**
661 * enum iwl_trans_state - state of the transport layer
662 *
663 * @IWL_TRANS_NO_FW: firmware wasn't started yet, or crashed
664 * @IWL_TRANS_FW_STARTED: FW was started, but not alive yet
665 * @IWL_TRANS_FW_ALIVE: FW has sent an alive response
666 */
667 enum iwl_trans_state {
668 IWL_TRANS_NO_FW,
669 IWL_TRANS_FW_STARTED,
670 IWL_TRANS_FW_ALIVE,
671 };
672
673 /**
674 * DOC: Platform power management
675 *
676 * In system-wide power management the entire platform goes into a low
677 * power state (e.g. idle or suspend to RAM) at the same time and the
678 * device is configured as a wakeup source for the entire platform.
679 * This is usually triggered by userspace activity (e.g. the user
680 * presses the suspend button or a power management daemon decides to
681 * put the platform in low power mode). The device's behavior in this
682 * mode is dictated by the wake-on-WLAN configuration.
683 *
684 * The terms used for the device's behavior are as follows:
685 *
686 * - D0: the device is fully powered and the host is awake;
687 * - D3: the device is in low power mode and only reacts to
688 * specific events (e.g. magic-packet received or scan
689 * results found);
690 *
691 * These terms reflect the power modes in the firmware and are not to
692 * be confused with the physical device power state.
693 */
694
695 /**
696 * enum iwl_plat_pm_mode - platform power management mode
697 *
698 * This enumeration describes the device's platform power management
699 * behavior when in system-wide suspend (i.e WoWLAN).
700 *
701 * @IWL_PLAT_PM_MODE_DISABLED: power management is disabled for this
702 * device. In system-wide suspend mode, it means that the all
703 * connections will be closed automatically by mac80211 before
704 * the platform is suspended.
705 * @IWL_PLAT_PM_MODE_D3: the device goes into D3 mode (i.e. WoWLAN).
706 */
707 enum iwl_plat_pm_mode {
708 IWL_PLAT_PM_MODE_DISABLED,
709 IWL_PLAT_PM_MODE_D3,
710 };
711
712 /**
713 * enum iwl_ini_cfg_state
714 * @IWL_INI_CFG_STATE_NOT_LOADED: no debug cfg was given
715 * @IWL_INI_CFG_STATE_LOADED: debug cfg was found and loaded
716 * @IWL_INI_CFG_STATE_CORRUPTED: debug cfg was found and some of the TLVs
717 * are corrupted. The rest of the debug TLVs will still be used
718 */
719 enum iwl_ini_cfg_state {
720 IWL_INI_CFG_STATE_NOT_LOADED,
721 IWL_INI_CFG_STATE_LOADED,
722 IWL_INI_CFG_STATE_CORRUPTED,
723 };
724
725 /* Max time to wait for nmi interrupt */
726 #define IWL_TRANS_NMI_TIMEOUT (HZ / 4)
727
728 /**
729 * struct iwl_dram_data
730 * @physical: page phy pointer
731 * @block: pointer to the allocated block/page
732 * @size: size of the block/page
733 */
734 struct iwl_dram_data {
735 dma_addr_t physical;
736 void *block;
737 int size;
738 };
739
740 /**
741 * @drams: array of several DRAM areas that contains the pnvm and power
742 * reduction table payloads.
743 * @n_regions: number of DRAM regions that were allocated
744 * @prph_scratch_mem_desc: points to a structure allocated in dram,
745 * designed to show FW where all the payloads are.
746 */
747 struct iwl_dram_regions {
748 struct iwl_dram_data drams[IPC_DRAM_MAP_ENTRY_NUM_MAX];
749 struct iwl_dram_data prph_scratch_mem_desc;
750 u8 n_regions;
751 };
752
753 /**
754 * struct iwl_fw_mon - fw monitor per allocation id
755 * @num_frags: number of fragments
756 * @frags: an array of DRAM buffer fragments
757 */
758 struct iwl_fw_mon {
759 u32 num_frags;
760 struct iwl_dram_data *frags;
761 };
762
763 /**
764 * struct iwl_self_init_dram - dram data used by self init process
765 * @fw: lmac and umac dram data
766 * @fw_cnt: total number of items in array
767 * @paging: paging dram data
768 * @paging_cnt: total number of items in array
769 */
770 struct iwl_self_init_dram {
771 struct iwl_dram_data *fw;
772 int fw_cnt;
773 struct iwl_dram_data *paging;
774 int paging_cnt;
775 };
776
777 /**
778 * struct iwl_imr_data - imr dram data used during debug process
779 * @imr_enable: imr enable status received from fw
780 * @imr_size: imr dram size received from fw
781 * @sram_addr: sram address from debug tlv
782 * @sram_size: sram size from debug tlv
783 * @imr2sram_remainbyte`: size remained after each dma transfer
784 * @imr_curr_addr: current dst address used during dma transfer
785 * @imr_base_addr: imr address received from fw
786 */
787 struct iwl_imr_data {
788 u32 imr_enable;
789 u32 imr_size;
790 u32 sram_addr;
791 u32 sram_size;
792 u32 imr2sram_remainbyte;
793 u64 imr_curr_addr;
794 __le64 imr_base_addr;
795 };
796
797 #define IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES 32
798
799 /**
800 * struct iwl_pc_data - program counter details
801 * @pc_name: cpu name
802 * @pc_address: cpu program counter
803 */
804 struct iwl_pc_data {
805 u8 pc_name[IWL_TRANS_CURRENT_PC_NAME_MAX_BYTES];
806 u32 pc_address;
807 };
808
809 /**
810 * struct iwl_trans_debug - transport debug related data
811 *
812 * @n_dest_reg: num of reg_ops in %dbg_dest_tlv
813 * @rec_on: true iff there is a fw debug recording currently active
814 * @dest_tlv: points to the destination TLV for debug
815 * @conf_tlv: array of pointers to configuration TLVs for debug
816 * @trigger_tlv: array of pointers to triggers TLVs for debug
817 * @lmac_error_event_table: addrs of lmacs error tables
818 * @umac_error_event_table: addr of umac error table
819 * @tcm_error_event_table: address(es) of TCM error table(s)
820 * @rcm_error_event_table: address(es) of RCM error table(s)
821 * @error_event_table_tlv_status: bitmap that indicates what error table
822 * pointers was recevied via TLV. uses enum &iwl_error_event_table_status
823 * @internal_ini_cfg: internal debug cfg state. Uses &enum iwl_ini_cfg_state
824 * @external_ini_cfg: external debug cfg state. Uses &enum iwl_ini_cfg_state
825 * @fw_mon_cfg: debug buffer allocation configuration
826 * @fw_mon_ini: DRAM buffer fragments per allocation id
827 * @fw_mon: DRAM buffer for firmware monitor
828 * @hw_error: equals true if hw error interrupt was received from the FW
829 * @ini_dest: debug monitor destination uses &enum iwl_fw_ini_buffer_location
830 * @active_regions: active regions
831 * @debug_info_tlv_list: list of debug info TLVs
832 * @time_point: array of debug time points
833 * @periodic_trig_list: periodic triggers list
834 * @domains_bitmap: bitmap of active domains other than &IWL_FW_INI_DOMAIN_ALWAYS_ON
835 * @ucode_preset: preset based on ucode
836 * @dump_file_name_ext: dump file name extension
837 * @dump_file_name_ext_valid: dump file name extension if valid or not
838 * @num_pc: number of program counter for cpu
839 * @pc_data: details of the program counter
840 */
841 struct iwl_trans_debug {
842 u8 n_dest_reg;
843 bool rec_on;
844
845 const struct iwl_fw_dbg_dest_tlv_v1 *dest_tlv;
846 const struct iwl_fw_dbg_conf_tlv *conf_tlv[FW_DBG_CONF_MAX];
847 struct iwl_fw_dbg_trigger_tlv * const *trigger_tlv;
848
849 u32 lmac_error_event_table[2];
850 u32 umac_error_event_table;
851 u32 tcm_error_event_table[2];
852 u32 rcm_error_event_table[2];
853 unsigned int error_event_table_tlv_status;
854
855 enum iwl_ini_cfg_state internal_ini_cfg;
856 enum iwl_ini_cfg_state external_ini_cfg;
857
858 struct iwl_fw_ini_allocation_tlv fw_mon_cfg[IWL_FW_INI_ALLOCATION_NUM];
859 struct iwl_fw_mon fw_mon_ini[IWL_FW_INI_ALLOCATION_NUM];
860
861 struct iwl_dram_data fw_mon;
862
863 bool hw_error;
864 enum iwl_fw_ini_buffer_location ini_dest;
865
866 u64 unsupported_region_msk;
867 struct iwl_ucode_tlv *active_regions[IWL_FW_INI_MAX_REGION_ID];
868 struct list_head debug_info_tlv_list;
869 struct iwl_dbg_tlv_time_point_data
870 time_point[IWL_FW_INI_TIME_POINT_NUM];
871 struct list_head periodic_trig_list;
872
873 u32 domains_bitmap;
874 u32 ucode_preset;
875 bool restart_required;
876 u32 last_tp_resetfw;
877 struct iwl_imr_data imr_data;
878 u8 dump_file_name_ext[IWL_FW_INI_MAX_NAME];
879 bool dump_file_name_ext_valid;
880 u32 num_pc;
881 struct iwl_pc_data *pc_data;
882 };
883
884 struct iwl_dma_ptr {
885 dma_addr_t dma;
886 void *addr;
887 size_t size;
888 };
889
890 struct iwl_cmd_meta {
891 /* only for SYNC commands, iff the reply skb is wanted */
892 struct iwl_host_cmd *source;
893 u32 flags;
894 u32 tbs;
895 };
896
897 /*
898 * The FH will write back to the first TB only, so we need to copy some data
899 * into the buffer regardless of whether it should be mapped or not.
900 * This indicates how big the first TB must be to include the scratch buffer
901 * and the assigned PN.
902 * Since PN location is 8 bytes at offset 12, it's 20 now.
903 * If we make it bigger then allocations will be bigger and copy slower, so
904 * that's probably not useful.
905 */
906 #define IWL_FIRST_TB_SIZE 20
907 #define IWL_FIRST_TB_SIZE_ALIGN ALIGN(IWL_FIRST_TB_SIZE, 64)
908
909 struct iwl_pcie_txq_entry {
910 void *cmd;
911 struct sk_buff *skb;
912 /* buffer to free after command completes */
913 const void *free_buf;
914 struct iwl_cmd_meta meta;
915 };
916
917 struct iwl_pcie_first_tb_buf {
918 u8 buf[IWL_FIRST_TB_SIZE_ALIGN];
919 };
920
921 /**
922 * struct iwl_txq - Tx Queue for DMA
923 * @q: generic Rx/Tx queue descriptor
924 * @tfds: transmit frame descriptors (DMA memory)
925 * @first_tb_bufs: start of command headers, including scratch buffers, for
926 * the writeback -- this is DMA memory and an array holding one buffer
927 * for each command on the queue
928 * @first_tb_dma: DMA address for the first_tb_bufs start
929 * @entries: transmit entries (driver state)
930 * @lock: queue lock
931 * @stuck_timer: timer that fires if queue gets stuck
932 * @trans: pointer back to transport (for timer)
933 * @need_update: indicates need to update read/write index
934 * @ampdu: true if this queue is an ampdu queue for an specific RA/TID
935 * @wd_timeout: queue watchdog timeout (jiffies) - per queue
936 * @frozen: tx stuck queue timer is frozen
937 * @frozen_expiry_remainder: remember how long until the timer fires
938 * @bc_tbl: byte count table of the queue (relevant only for gen2 transport)
939 * @write_ptr: 1-st empty entry (index) host_w
940 * @read_ptr: last used entry (index) host_r
941 * @dma_addr: physical addr for BD's
942 * @n_window: safe queue window
943 * @id: queue id
944 * @low_mark: low watermark, resume queue if free space more than this
945 * @high_mark: high watermark, stop queue if free space less than this
946 *
947 * A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
948 * descriptors) and required locking structures.
949 *
950 * Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
951 * always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
952 * there might be HW changes in the future). For the normal TX
953 * queues, n_window, which is the size of the software queue data
954 * is also 256; however, for the command queue, n_window is only
955 * 32 since we don't need so many commands pending. Since the HW
956 * still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
957 * This means that we end up with the following:
958 * HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
959 * SW entries: | 0 | ... | 31 |
960 * where N is a number between 0 and 7. This means that the SW
961 * data is a window overlayed over the HW queue.
962 */
963 struct iwl_txq {
964 void *tfds;
965 struct iwl_pcie_first_tb_buf *first_tb_bufs;
966 dma_addr_t first_tb_dma;
967 struct iwl_pcie_txq_entry *entries;
968 /* lock for syncing changes on the queue */
969 spinlock_t lock;
970 unsigned long frozen_expiry_remainder;
971 struct timer_list stuck_timer;
972 struct iwl_trans *trans;
973 bool need_update;
974 bool frozen;
975 bool ampdu;
976 int block;
977 unsigned long wd_timeout;
978 struct sk_buff_head overflow_q;
979 struct iwl_dma_ptr bc_tbl;
980
981 int write_ptr;
982 int read_ptr;
983 dma_addr_t dma_addr;
984 int n_window;
985 u32 id;
986 int low_mark;
987 int high_mark;
988
989 bool overflow_tx;
990 };
991
992 /**
993 * struct iwl_trans_txqs - transport tx queues data
994 *
995 * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
996 * @page_offs: offset from skb->cb to mac header page pointer
997 * @dev_cmd_offs: offset from skb->cb to iwl_device_tx_cmd pointer
998 * @queue_used - bit mask of used queues
999 * @queue_stopped - bit mask of stopped queues
1000 * @scd_bc_tbls: gen1 pointer to the byte count table of the scheduler
1001 * @queue_alloc_cmd_ver: queue allocation command version
1002 */
1003 struct iwl_trans_txqs {
1004 unsigned long queue_used[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
1005 unsigned long queue_stopped[BITS_TO_LONGS(IWL_MAX_TVQM_QUEUES)];
1006 struct iwl_txq *txq[IWL_MAX_TVQM_QUEUES];
1007 struct dma_pool *bc_pool;
1008 size_t bc_tbl_size;
1009 bool bc_table_dword;
1010 u8 page_offs;
1011 u8 dev_cmd_offs;
1012 struct iwl_tso_hdr_page __percpu *tso_hdr_page;
1013
1014 struct {
1015 u8 fifo;
1016 u8 q_id;
1017 unsigned int wdg_timeout;
1018 } cmd;
1019
1020 struct {
1021 u8 max_tbs;
1022 u16 size;
1023 u8 addr_size;
1024 } tfd;
1025
1026 struct iwl_dma_ptr scd_bc_tbls;
1027
1028 u8 queue_alloc_cmd_ver;
1029 };
1030
1031 /**
1032 * struct iwl_trans - transport common data
1033 *
1034 * @csme_own - true if we couldn't get ownership on the device
1035 * @ops - pointer to iwl_trans_ops
1036 * @op_mode - pointer to the op_mode
1037 * @trans_cfg: the trans-specific configuration part
1038 * @cfg - pointer to the configuration
1039 * @drv - pointer to iwl_drv
1040 * @status: a bit-mask of transport status flags
1041 * @dev - pointer to struct device * that represents the device
1042 * @max_skb_frags: maximum number of fragments an SKB can have when transmitted.
1043 * 0 indicates that frag SKBs (NETIF_F_SG) aren't supported.
1044 * @hw_rf_id a u32 with the device RF ID
1045 * @hw_crf_id a u32 with the device CRF ID
1046 * @hw_wfpm_id a u32 with the device wfpm ID
1047 * @hw_id: a u32 with the ID of the device / sub-device.
1048 * Set during transport allocation.
1049 * @hw_id_str: a string with info about HW ID. Set during transport allocation.
1050 * @hw_rev_step: The mac step of the HW
1051 * @pm_support: set to true in start_hw if link pm is supported
1052 * @ltr_enabled: set to true if the LTR is enabled
1053 * @fail_to_parse_pnvm_image: set to true if pnvm parsing failed
1054 * @failed_to_load_reduce_power_image: set to true if pnvm loading failed
1055 * @wide_cmd_header: true when ucode supports wide command header format
1056 * @wait_command_queue: wait queue for sync commands
1057 * @num_rx_queues: number of RX queues allocated by the transport;
1058 * the transport must set this before calling iwl_drv_start()
1059 * @iml_len: the length of the image loader
1060 * @iml: a pointer to the image loader itself
1061 * @dev_cmd_pool: pool for Tx cmd allocation - for internal use only.
1062 * The user should use iwl_trans_{alloc,free}_tx_cmd.
1063 * @rx_mpdu_cmd: MPDU RX command ID, must be assigned by opmode before
1064 * starting the firmware, used for tracing
1065 * @rx_mpdu_cmd_hdr_size: used for tracing, amount of data before the
1066 * start of the 802.11 header in the @rx_mpdu_cmd
1067 * @dflt_pwr_limit: default power limit fetched from the platform (ACPI)
1068 * @system_pm_mode: the system-wide power management mode in use.
1069 * This mode is set dynamically, depending on the WoWLAN values
1070 * configured from the userspace at runtime.
1071 * @iwl_trans_txqs: transport tx queues data.
1072 * @mbx_addr_0_step: step address data 0
1073 * @mbx_addr_1_step: step address data 1
1074 * @pcie_link_speed: current PCIe link speed (%PCI_EXP_LNKSTA_CLS_*),
1075 * only valid for discrete (not integrated) NICs
1076 * @invalid_tx_cmd: invalid TX command buffer
1077 */
1078 struct iwl_trans {
1079 bool csme_own;
1080 const struct iwl_trans_ops *ops;
1081 struct iwl_op_mode *op_mode;
1082 const struct iwl_cfg_trans_params *trans_cfg;
1083 const struct iwl_cfg *cfg;
1084 struct iwl_drv *drv;
1085 enum iwl_trans_state state;
1086 unsigned long status;
1087
1088 struct device *dev;
1089 u32 max_skb_frags;
1090 u32 hw_rev;
1091 u32 hw_rev_step;
1092 u32 hw_rf_id;
1093 u32 hw_crf_id;
1094 u32 hw_cnv_id;
1095 u32 hw_wfpm_id;
1096 u32 hw_id;
1097 char hw_id_str[52];
1098 u32 sku_id[3];
1099
1100 u8 rx_mpdu_cmd, rx_mpdu_cmd_hdr_size;
1101
1102 bool pm_support;
1103 bool ltr_enabled;
1104 u8 pnvm_loaded:1;
1105 u8 fail_to_parse_pnvm_image:1;
1106 u8 reduce_power_loaded:1;
1107 u8 failed_to_load_reduce_power_image:1;
1108
1109 const struct iwl_hcmd_arr *command_groups;
1110 int command_groups_size;
1111 bool wide_cmd_header;
1112
1113 wait_queue_head_t wait_command_queue;
1114 u8 num_rx_queues;
1115
1116 size_t iml_len;
1117 u8 *iml;
1118
1119 /* The following fields are internal only */
1120 struct kmem_cache *dev_cmd_pool;
1121 char dev_cmd_pool_name[50];
1122
1123 struct dentry *dbgfs_dir;
1124
1125 #ifdef CONFIG_LOCKDEP
1126 struct lockdep_map sync_cmd_lockdep_map;
1127 #endif
1128
1129 struct iwl_trans_debug dbg;
1130 struct iwl_self_init_dram init_dram;
1131
1132 enum iwl_plat_pm_mode system_pm_mode;
1133
1134 const char *name;
1135 struct iwl_trans_txqs txqs;
1136 u32 mbx_addr_0_step;
1137 u32 mbx_addr_1_step;
1138
1139 u8 pcie_link_speed;
1140
1141 struct iwl_dma_ptr invalid_tx_cmd;
1142
1143 /* pointer to trans specific struct */
1144 /*Ensure that this pointer will always be aligned to sizeof pointer */
1145 char trans_specific[] __aligned(sizeof(void *));
1146 };
1147
1148 const char *iwl_get_cmd_string(struct iwl_trans *trans, u32 id);
1149 int iwl_cmd_groups_verify_sorted(const struct iwl_trans_config *trans);
1150
iwl_trans_configure(struct iwl_trans * trans,const struct iwl_trans_config * trans_cfg)1151 static inline void iwl_trans_configure(struct iwl_trans *trans,
1152 const struct iwl_trans_config *trans_cfg)
1153 {
1154 trans->op_mode = trans_cfg->op_mode;
1155
1156 trans->ops->configure(trans, trans_cfg);
1157 WARN_ON(iwl_cmd_groups_verify_sorted(trans_cfg));
1158 }
1159
iwl_trans_start_hw(struct iwl_trans * trans)1160 static inline int iwl_trans_start_hw(struct iwl_trans *trans)
1161 {
1162 might_sleep();
1163
1164 return trans->ops->start_hw(trans);
1165 }
1166
iwl_trans_op_mode_leave(struct iwl_trans * trans)1167 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
1168 {
1169 might_sleep();
1170
1171 if (trans->ops->op_mode_leave)
1172 trans->ops->op_mode_leave(trans);
1173
1174 trans->op_mode = NULL;
1175
1176 trans->state = IWL_TRANS_NO_FW;
1177 }
1178
iwl_trans_fw_alive(struct iwl_trans * trans,u32 scd_addr)1179 static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1180 {
1181 might_sleep();
1182
1183 trans->state = IWL_TRANS_FW_ALIVE;
1184
1185 trans->ops->fw_alive(trans, scd_addr);
1186 }
1187
iwl_trans_start_fw(struct iwl_trans * trans,const struct fw_img * fw,bool run_in_rfkill)1188 static inline int iwl_trans_start_fw(struct iwl_trans *trans,
1189 const struct fw_img *fw,
1190 bool run_in_rfkill)
1191 {
1192 int ret;
1193
1194 might_sleep();
1195
1196 WARN_ON_ONCE(!trans->rx_mpdu_cmd);
1197
1198 clear_bit(STATUS_FW_ERROR, &trans->status);
1199 ret = trans->ops->start_fw(trans, fw, run_in_rfkill);
1200 if (ret == 0)
1201 trans->state = IWL_TRANS_FW_STARTED;
1202
1203 return ret;
1204 }
1205
iwl_trans_stop_device(struct iwl_trans * trans)1206 static inline void iwl_trans_stop_device(struct iwl_trans *trans)
1207 {
1208 might_sleep();
1209
1210 trans->ops->stop_device(trans);
1211
1212 trans->state = IWL_TRANS_NO_FW;
1213 }
1214
iwl_trans_d3_suspend(struct iwl_trans * trans,bool test,bool reset)1215 static inline int iwl_trans_d3_suspend(struct iwl_trans *trans, bool test,
1216 bool reset)
1217 {
1218 might_sleep();
1219 if (!trans->ops->d3_suspend)
1220 return -EOPNOTSUPP;
1221
1222 return trans->ops->d3_suspend(trans, test, reset);
1223 }
1224
iwl_trans_d3_resume(struct iwl_trans * trans,enum iwl_d3_status * status,bool test,bool reset)1225 static inline int iwl_trans_d3_resume(struct iwl_trans *trans,
1226 enum iwl_d3_status *status,
1227 bool test, bool reset)
1228 {
1229 might_sleep();
1230 if (!trans->ops->d3_resume)
1231 return -EOPNOTSUPP;
1232
1233 return trans->ops->d3_resume(trans, status, test, reset);
1234 }
1235
1236 static inline struct iwl_trans_dump_data *
iwl_trans_dump_data(struct iwl_trans * trans,u32 dump_mask,const struct iwl_dump_sanitize_ops * sanitize_ops,void * sanitize_ctx)1237 iwl_trans_dump_data(struct iwl_trans *trans, u32 dump_mask,
1238 const struct iwl_dump_sanitize_ops *sanitize_ops,
1239 void *sanitize_ctx)
1240 {
1241 if (!trans->ops->dump_data)
1242 return NULL;
1243 return trans->ops->dump_data(trans, dump_mask,
1244 sanitize_ops, sanitize_ctx);
1245 }
1246
1247 static inline struct iwl_device_tx_cmd *
iwl_trans_alloc_tx_cmd(struct iwl_trans * trans)1248 iwl_trans_alloc_tx_cmd(struct iwl_trans *trans)
1249 {
1250 return kmem_cache_zalloc(trans->dev_cmd_pool, GFP_ATOMIC);
1251 }
1252
1253 int iwl_trans_send_cmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd);
1254
iwl_trans_free_tx_cmd(struct iwl_trans * trans,struct iwl_device_tx_cmd * dev_cmd)1255 static inline void iwl_trans_free_tx_cmd(struct iwl_trans *trans,
1256 struct iwl_device_tx_cmd *dev_cmd)
1257 {
1258 kmem_cache_free(trans->dev_cmd_pool, dev_cmd);
1259 }
1260
iwl_trans_tx(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_cmd,int queue)1261 static inline int iwl_trans_tx(struct iwl_trans *trans, struct sk_buff *skb,
1262 struct iwl_device_tx_cmd *dev_cmd, int queue)
1263 {
1264 if (unlikely(test_bit(STATUS_FW_ERROR, &trans->status)))
1265 return -EIO;
1266
1267 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1268 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1269 return -EIO;
1270 }
1271
1272 return trans->ops->tx(trans, skb, dev_cmd, queue);
1273 }
1274
iwl_trans_reclaim(struct iwl_trans * trans,int queue,int ssn,struct sk_buff_head * skbs,bool is_flush)1275 static inline void iwl_trans_reclaim(struct iwl_trans *trans, int queue,
1276 int ssn, struct sk_buff_head *skbs,
1277 bool is_flush)
1278 {
1279 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1280 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1281 return;
1282 }
1283
1284 trans->ops->reclaim(trans, queue, ssn, skbs, is_flush);
1285 }
1286
iwl_trans_set_q_ptrs(struct iwl_trans * trans,int queue,int ptr)1287 static inline void iwl_trans_set_q_ptrs(struct iwl_trans *trans, int queue,
1288 int ptr)
1289 {
1290 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1291 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1292 return;
1293 }
1294
1295 trans->ops->set_q_ptrs(trans, queue, ptr);
1296 }
1297
iwl_trans_txq_disable(struct iwl_trans * trans,int queue,bool configure_scd)1298 static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue,
1299 bool configure_scd)
1300 {
1301 trans->ops->txq_disable(trans, queue, configure_scd);
1302 }
1303
1304 static inline bool
iwl_trans_txq_enable_cfg(struct iwl_trans * trans,int queue,u16 ssn,const struct iwl_trans_txq_scd_cfg * cfg,unsigned int queue_wdg_timeout)1305 iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
1306 const struct iwl_trans_txq_scd_cfg *cfg,
1307 unsigned int queue_wdg_timeout)
1308 {
1309 might_sleep();
1310
1311 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1312 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1313 return false;
1314 }
1315
1316 return trans->ops->txq_enable(trans, queue, ssn,
1317 cfg, queue_wdg_timeout);
1318 }
1319
1320 static inline int
iwl_trans_get_rxq_dma_data(struct iwl_trans * trans,int queue,struct iwl_trans_rxq_dma_data * data)1321 iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
1322 struct iwl_trans_rxq_dma_data *data)
1323 {
1324 if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
1325 return -ENOTSUPP;
1326
1327 return trans->ops->rxq_dma_data(trans, queue, data);
1328 }
1329
1330 static inline void
iwl_trans_txq_free(struct iwl_trans * trans,int queue)1331 iwl_trans_txq_free(struct iwl_trans *trans, int queue)
1332 {
1333 if (WARN_ON_ONCE(!trans->ops->txq_free))
1334 return;
1335
1336 trans->ops->txq_free(trans, queue);
1337 }
1338
1339 static inline int
iwl_trans_txq_alloc(struct iwl_trans * trans,u32 flags,u32 sta_mask,u8 tid,int size,unsigned int wdg_timeout)1340 iwl_trans_txq_alloc(struct iwl_trans *trans,
1341 u32 flags, u32 sta_mask, u8 tid,
1342 int size, unsigned int wdg_timeout)
1343 {
1344 might_sleep();
1345
1346 if (WARN_ON_ONCE(!trans->ops->txq_alloc))
1347 return -ENOTSUPP;
1348
1349 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1350 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1351 return -EIO;
1352 }
1353
1354 return trans->ops->txq_alloc(trans, flags, sta_mask, tid,
1355 size, wdg_timeout);
1356 }
1357
iwl_trans_txq_set_shared_mode(struct iwl_trans * trans,int queue,bool shared_mode)1358 static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
1359 int queue, bool shared_mode)
1360 {
1361 if (trans->ops->txq_set_shared_mode)
1362 trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
1363 }
1364
iwl_trans_txq_enable(struct iwl_trans * trans,int queue,int fifo,int sta_id,int tid,int frame_limit,u16 ssn,unsigned int queue_wdg_timeout)1365 static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
1366 int fifo, int sta_id, int tid,
1367 int frame_limit, u16 ssn,
1368 unsigned int queue_wdg_timeout)
1369 {
1370 struct iwl_trans_txq_scd_cfg cfg = {
1371 .fifo = fifo,
1372 .sta_id = sta_id,
1373 .tid = tid,
1374 .frame_limit = frame_limit,
1375 .aggregate = sta_id >= 0,
1376 };
1377
1378 iwl_trans_txq_enable_cfg(trans, queue, ssn, &cfg, queue_wdg_timeout);
1379 }
1380
1381 static inline
iwl_trans_ac_txq_enable(struct iwl_trans * trans,int queue,int fifo,unsigned int queue_wdg_timeout)1382 void iwl_trans_ac_txq_enable(struct iwl_trans *trans, int queue, int fifo,
1383 unsigned int queue_wdg_timeout)
1384 {
1385 struct iwl_trans_txq_scd_cfg cfg = {
1386 .fifo = fifo,
1387 .sta_id = -1,
1388 .tid = IWL_MAX_TID_COUNT,
1389 .frame_limit = IWL_FRAME_LIMIT,
1390 .aggregate = false,
1391 };
1392
1393 iwl_trans_txq_enable_cfg(trans, queue, 0, &cfg, queue_wdg_timeout);
1394 }
1395
iwl_trans_freeze_txq_timer(struct iwl_trans * trans,unsigned long txqs,bool freeze)1396 static inline void iwl_trans_freeze_txq_timer(struct iwl_trans *trans,
1397 unsigned long txqs,
1398 bool freeze)
1399 {
1400 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1401 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1402 return;
1403 }
1404
1405 if (trans->ops->freeze_txq_timer)
1406 trans->ops->freeze_txq_timer(trans, txqs, freeze);
1407 }
1408
iwl_trans_block_txq_ptrs(struct iwl_trans * trans,bool block)1409 static inline void iwl_trans_block_txq_ptrs(struct iwl_trans *trans,
1410 bool block)
1411 {
1412 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1413 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1414 return;
1415 }
1416
1417 if (trans->ops->block_txq_ptrs)
1418 trans->ops->block_txq_ptrs(trans, block);
1419 }
1420
iwl_trans_wait_tx_queues_empty(struct iwl_trans * trans,u32 txqs)1421 static inline int iwl_trans_wait_tx_queues_empty(struct iwl_trans *trans,
1422 u32 txqs)
1423 {
1424 if (WARN_ON_ONCE(!trans->ops->wait_tx_queues_empty))
1425 return -ENOTSUPP;
1426
1427 /* No need to wait if the firmware is not alive */
1428 if (trans->state != IWL_TRANS_FW_ALIVE) {
1429 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1430 return -EIO;
1431 }
1432
1433 return trans->ops->wait_tx_queues_empty(trans, txqs);
1434 }
1435
iwl_trans_wait_txq_empty(struct iwl_trans * trans,int queue)1436 static inline int iwl_trans_wait_txq_empty(struct iwl_trans *trans, int queue)
1437 {
1438 if (WARN_ON_ONCE(!trans->ops->wait_txq_empty))
1439 return -ENOTSUPP;
1440
1441 if (WARN_ON_ONCE(trans->state != IWL_TRANS_FW_ALIVE)) {
1442 IWL_ERR(trans, "%s bad state = %d\n", __func__, trans->state);
1443 return -EIO;
1444 }
1445
1446 return trans->ops->wait_txq_empty(trans, queue);
1447 }
1448
iwl_trans_write8(struct iwl_trans * trans,u32 ofs,u8 val)1449 static inline void iwl_trans_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1450 {
1451 trans->ops->write8(trans, ofs, val);
1452 }
1453
iwl_trans_write32(struct iwl_trans * trans,u32 ofs,u32 val)1454 static inline void iwl_trans_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1455 {
1456 trans->ops->write32(trans, ofs, val);
1457 }
1458
iwl_trans_read32(struct iwl_trans * trans,u32 ofs)1459 static inline u32 iwl_trans_read32(struct iwl_trans *trans, u32 ofs)
1460 {
1461 return trans->ops->read32(trans, ofs);
1462 }
1463
iwl_trans_read_prph(struct iwl_trans * trans,u32 ofs)1464 static inline u32 iwl_trans_read_prph(struct iwl_trans *trans, u32 ofs)
1465 {
1466 return trans->ops->read_prph(trans, ofs);
1467 }
1468
iwl_trans_write_prph(struct iwl_trans * trans,u32 ofs,u32 val)1469 static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
1470 u32 val)
1471 {
1472 return trans->ops->write_prph(trans, ofs, val);
1473 }
1474
iwl_trans_read_mem(struct iwl_trans * trans,u32 addr,void * buf,int dwords)1475 static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
1476 void *buf, int dwords)
1477 {
1478 return trans->ops->read_mem(trans, addr, buf, dwords);
1479 }
1480
1481 #define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
1482 do { \
1483 if (__builtin_constant_p(bufsize)) \
1484 BUILD_BUG_ON((bufsize) % sizeof(u32)); \
1485 iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
1486 } while (0)
1487
iwl_trans_write_imr_mem(struct iwl_trans * trans,u32 dst_addr,u64 src_addr,u32 byte_cnt)1488 static inline int iwl_trans_write_imr_mem(struct iwl_trans *trans,
1489 u32 dst_addr, u64 src_addr,
1490 u32 byte_cnt)
1491 {
1492 if (trans->ops->imr_dma_data)
1493 return trans->ops->imr_dma_data(trans, dst_addr, src_addr, byte_cnt);
1494 return 0;
1495 }
1496
iwl_trans_read_mem32(struct iwl_trans * trans,u32 addr)1497 static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
1498 {
1499 u32 value;
1500
1501 if (iwl_trans_read_mem(trans, addr, &value, 1))
1502 return 0xa5a5a5a5;
1503
1504 return value;
1505 }
1506
iwl_trans_write_mem(struct iwl_trans * trans,u32 addr,const void * buf,int dwords)1507 static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
1508 const void *buf, int dwords)
1509 {
1510 return trans->ops->write_mem(trans, addr, buf, dwords);
1511 }
1512
iwl_trans_write_mem32(struct iwl_trans * trans,u32 addr,u32 val)1513 static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
1514 u32 val)
1515 {
1516 return iwl_trans_write_mem(trans, addr, &val, 1);
1517 }
1518
iwl_trans_set_pmi(struct iwl_trans * trans,bool state)1519 static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
1520 {
1521 if (trans->ops->set_pmi)
1522 trans->ops->set_pmi(trans, state);
1523 }
1524
iwl_trans_sw_reset(struct iwl_trans * trans,bool retake_ownership)1525 static inline int iwl_trans_sw_reset(struct iwl_trans *trans,
1526 bool retake_ownership)
1527 {
1528 if (trans->ops->sw_reset)
1529 return trans->ops->sw_reset(trans, retake_ownership);
1530 return 0;
1531 }
1532
1533 static inline void
iwl_trans_set_bits_mask(struct iwl_trans * trans,u32 reg,u32 mask,u32 value)1534 iwl_trans_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value)
1535 {
1536 trans->ops->set_bits_mask(trans, reg, mask, value);
1537 }
1538
1539 #define iwl_trans_grab_nic_access(trans) \
1540 __cond_lock(nic_access, \
1541 likely((trans)->ops->grab_nic_access(trans)))
1542
__releases(nic_access)1543 static inline void __releases(nic_access)
1544 iwl_trans_release_nic_access(struct iwl_trans *trans)
1545 {
1546 trans->ops->release_nic_access(trans);
1547 __release(nic_access);
1548 }
1549
iwl_trans_fw_error(struct iwl_trans * trans,bool sync)1550 static inline void iwl_trans_fw_error(struct iwl_trans *trans, bool sync)
1551 {
1552 if (WARN_ON_ONCE(!trans->op_mode))
1553 return;
1554
1555 /* prevent double restarts due to the same erroneous FW */
1556 if (!test_and_set_bit(STATUS_FW_ERROR, &trans->status)) {
1557 iwl_op_mode_nic_error(trans->op_mode, sync);
1558 trans->state = IWL_TRANS_NO_FW;
1559 }
1560 }
1561
iwl_trans_fw_running(struct iwl_trans * trans)1562 static inline bool iwl_trans_fw_running(struct iwl_trans *trans)
1563 {
1564 return trans->state == IWL_TRANS_FW_ALIVE;
1565 }
1566
iwl_trans_sync_nmi(struct iwl_trans * trans)1567 static inline void iwl_trans_sync_nmi(struct iwl_trans *trans)
1568 {
1569 if (trans->ops->sync_nmi)
1570 trans->ops->sync_nmi(trans);
1571 }
1572
1573 void iwl_trans_sync_nmi_with_addr(struct iwl_trans *trans, u32 inta_addr,
1574 u32 sw_err_bit);
1575
iwl_trans_load_pnvm(struct iwl_trans * trans,const struct iwl_pnvm_image * pnvm_data,const struct iwl_ucode_capabilities * capa)1576 static inline int iwl_trans_load_pnvm(struct iwl_trans *trans,
1577 const struct iwl_pnvm_image *pnvm_data,
1578 const struct iwl_ucode_capabilities *capa)
1579 {
1580 return trans->ops->load_pnvm(trans, pnvm_data, capa);
1581 }
1582
iwl_trans_set_pnvm(struct iwl_trans * trans,const struct iwl_ucode_capabilities * capa)1583 static inline void iwl_trans_set_pnvm(struct iwl_trans *trans,
1584 const struct iwl_ucode_capabilities *capa)
1585 {
1586 if (trans->ops->set_pnvm)
1587 trans->ops->set_pnvm(trans, capa);
1588 }
1589
iwl_trans_load_reduce_power(struct iwl_trans * trans,const struct iwl_pnvm_image * payloads,const struct iwl_ucode_capabilities * capa)1590 static inline int iwl_trans_load_reduce_power
1591 (struct iwl_trans *trans,
1592 const struct iwl_pnvm_image *payloads,
1593 const struct iwl_ucode_capabilities *capa)
1594 {
1595 return trans->ops->load_reduce_power(trans, payloads, capa);
1596 }
1597
1598 static inline void
iwl_trans_set_reduce_power(struct iwl_trans * trans,const struct iwl_ucode_capabilities * capa)1599 iwl_trans_set_reduce_power(struct iwl_trans *trans,
1600 const struct iwl_ucode_capabilities *capa)
1601 {
1602 if (trans->ops->set_reduce_power)
1603 trans->ops->set_reduce_power(trans, capa);
1604 }
1605
iwl_trans_dbg_ini_valid(struct iwl_trans * trans)1606 static inline bool iwl_trans_dbg_ini_valid(struct iwl_trans *trans)
1607 {
1608 return trans->dbg.internal_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED ||
1609 trans->dbg.external_ini_cfg != IWL_INI_CFG_STATE_NOT_LOADED;
1610 }
1611
iwl_trans_interrupts(struct iwl_trans * trans,bool enable)1612 static inline void iwl_trans_interrupts(struct iwl_trans *trans, bool enable)
1613 {
1614 if (trans->ops->interrupts)
1615 trans->ops->interrupts(trans, enable);
1616 }
1617
1618 /*****************************************************
1619 * transport helper functions
1620 *****************************************************/
1621 struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
1622 struct device *dev,
1623 const struct iwl_trans_ops *ops,
1624 const struct iwl_cfg_trans_params *cfg_trans);
1625 int iwl_trans_init(struct iwl_trans *trans);
1626 void iwl_trans_free(struct iwl_trans *trans);
1627
iwl_trans_is_hw_error_value(u32 val)1628 static inline bool iwl_trans_is_hw_error_value(u32 val)
1629 {
1630 return ((val & ~0xf) == 0xa5a5a5a0) || ((val & ~0xf) == 0x5a5a5a50);
1631 }
1632
1633 /*****************************************************
1634 * driver (transport) register/unregister functions
1635 ******************************************************/
1636 int __must_check iwl_pci_register_driver(void);
1637 void iwl_pci_unregister_driver(void);
1638 void iwl_trans_pcie_remove(struct iwl_trans *trans, bool rescan);
1639
1640 #endif /* __iwl_trans_h__ */
1641