1 /****************************************************************************
2 * Driver for Solarflare network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2005-2013 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11 /* Common definitions for all Efx net driver code */
12
13 #ifndef EFX_NET_DRIVER_H
14 #define EFX_NET_DRIVER_H
15
16 #include <linux/netdevice.h>
17 #include <linux/etherdevice.h>
18 #include <linux/ethtool.h>
19 #include <linux/if_vlan.h>
20 #include <linux/timer.h>
21 #include <linux/mdio.h>
22 #include <linux/list.h>
23 #include <linux/pci.h>
24 #include <linux/device.h>
25 #include <linux/highmem.h>
26 #include <linux/workqueue.h>
27 #include <linux/mutex.h>
28 #include <linux/rwsem.h>
29 #include <linux/vmalloc.h>
30 #include <linux/i2c.h>
31 #include <linux/mtd/mtd.h>
32 #include <net/busy_poll.h>
33
34 #include "enum.h"
35 #include "bitfield.h"
36 #include "filter.h"
37
38 /**************************************************************************
39 *
40 * Build definitions
41 *
42 **************************************************************************/
43
44 #define EFX_DRIVER_VERSION "4.0"
45
46 #ifdef DEBUG
47 #define EFX_BUG_ON_PARANOID(x) BUG_ON(x)
48 #define EFX_WARN_ON_PARANOID(x) WARN_ON(x)
49 #else
50 #define EFX_BUG_ON_PARANOID(x) do {} while (0)
51 #define EFX_WARN_ON_PARANOID(x) do {} while (0)
52 #endif
53
54 /**************************************************************************
55 *
56 * Efx data structures
57 *
58 **************************************************************************/
59
60 #define EFX_MAX_CHANNELS 32U
61 #define EFX_MAX_RX_QUEUES EFX_MAX_CHANNELS
62 #define EFX_EXTRA_CHANNEL_IOV 0
63 #define EFX_EXTRA_CHANNEL_PTP 1
64 #define EFX_MAX_EXTRA_CHANNELS 2U
65
66 /* Checksum generation is a per-queue option in hardware, so each
67 * queue visible to the networking core is backed by two hardware TX
68 * queues. */
69 #define EFX_MAX_TX_TC 2
70 #define EFX_MAX_CORE_TX_QUEUES (EFX_MAX_TX_TC * EFX_MAX_CHANNELS)
71 #define EFX_TXQ_TYPE_OFFLOAD 1 /* flag */
72 #define EFX_TXQ_TYPE_HIGHPRI 2 /* flag */
73 #define EFX_TXQ_TYPES 4
74 #define EFX_MAX_TX_QUEUES (EFX_TXQ_TYPES * EFX_MAX_CHANNELS)
75
76 /* Maximum possible MTU the driver supports */
77 #define EFX_MAX_MTU (9 * 1024)
78
79 /* Minimum MTU, from RFC791 (IP) */
80 #define EFX_MIN_MTU 68
81
82 /* Size of an RX scatter buffer. Small enough to pack 2 into a 4K page,
83 * and should be a multiple of the cache line size.
84 */
85 #define EFX_RX_USR_BUF_SIZE (2048 - 256)
86
87 /* If possible, we should ensure cache line alignment at start and end
88 * of every buffer. Otherwise, we just need to ensure 4-byte
89 * alignment of the network header.
90 */
91 #if NET_IP_ALIGN == 0
92 #define EFX_RX_BUF_ALIGNMENT L1_CACHE_BYTES
93 #else
94 #define EFX_RX_BUF_ALIGNMENT 4
95 #endif
96
97 /* Forward declare Precision Time Protocol (PTP) support structure. */
98 struct efx_ptp_data;
99 struct hwtstamp_config;
100
101 struct efx_self_tests;
102
103 /**
104 * struct efx_buffer - A general-purpose DMA buffer
105 * @addr: host base address of the buffer
106 * @dma_addr: DMA base address of the buffer
107 * @len: Buffer length, in bytes
108 *
109 * The NIC uses these buffers for its interrupt status registers and
110 * MAC stats dumps.
111 */
112 struct efx_buffer {
113 void *addr;
114 dma_addr_t dma_addr;
115 unsigned int len;
116 };
117
118 /**
119 * struct efx_special_buffer - DMA buffer entered into buffer table
120 * @buf: Standard &struct efx_buffer
121 * @index: Buffer index within controller;s buffer table
122 * @entries: Number of buffer table entries
123 *
124 * The NIC has a buffer table that maps buffers of size %EFX_BUF_SIZE.
125 * Event and descriptor rings are addressed via one or more buffer
126 * table entries (and so can be physically non-contiguous, although we
127 * currently do not take advantage of that). On Falcon and Siena we
128 * have to take care of allocating and initialising the entries
129 * ourselves. On later hardware this is managed by the firmware and
130 * @index and @entries are left as 0.
131 */
132 struct efx_special_buffer {
133 struct efx_buffer buf;
134 unsigned int index;
135 unsigned int entries;
136 };
137
138 /**
139 * struct efx_tx_buffer - buffer state for a TX descriptor
140 * @skb: When @flags & %EFX_TX_BUF_SKB, the associated socket buffer to be
141 * freed when descriptor completes
142 * @heap_buf: When @flags & %EFX_TX_BUF_HEAP, the associated heap buffer to be
143 * freed when descriptor completes.
144 * @option: When @flags & %EFX_TX_BUF_OPTION, a NIC-specific option descriptor.
145 * @dma_addr: DMA address of the fragment.
146 * @flags: Flags for allocation and DMA mapping type
147 * @len: Length of this fragment.
148 * This field is zero when the queue slot is empty.
149 * @unmap_len: Length of this fragment to unmap
150 * @dma_offset: Offset of @dma_addr from the address of the backing DMA mapping.
151 * Only valid if @unmap_len != 0.
152 */
153 struct efx_tx_buffer {
154 union {
155 const struct sk_buff *skb;
156 void *heap_buf;
157 };
158 union {
159 efx_qword_t option;
160 dma_addr_t dma_addr;
161 };
162 unsigned short flags;
163 unsigned short len;
164 unsigned short unmap_len;
165 unsigned short dma_offset;
166 };
167 #define EFX_TX_BUF_CONT 1 /* not last descriptor of packet */
168 #define EFX_TX_BUF_SKB 2 /* buffer is last part of skb */
169 #define EFX_TX_BUF_HEAP 4 /* buffer was allocated with kmalloc() */
170 #define EFX_TX_BUF_MAP_SINGLE 8 /* buffer was mapped with dma_map_single() */
171 #define EFX_TX_BUF_OPTION 0x10 /* empty buffer for option descriptor */
172
173 /**
174 * struct efx_tx_queue - An Efx TX queue
175 *
176 * This is a ring buffer of TX fragments.
177 * Since the TX completion path always executes on the same
178 * CPU and the xmit path can operate on different CPUs,
179 * performance is increased by ensuring that the completion
180 * path and the xmit path operate on different cache lines.
181 * This is particularly important if the xmit path is always
182 * executing on one CPU which is different from the completion
183 * path. There is also a cache line for members which are
184 * read but not written on the fast path.
185 *
186 * @efx: The associated Efx NIC
187 * @queue: DMA queue number
188 * @tso_version: Version of TSO in use for this queue.
189 * @channel: The associated channel
190 * @core_txq: The networking core TX queue structure
191 * @buffer: The software buffer ring
192 * @tsoh_page: Array of pages of TSO header buffers
193 * @txd: The hardware descriptor ring
194 * @ptr_mask: The size of the ring minus 1.
195 * @piobuf: PIO buffer region for this TX queue (shared with its partner).
196 * Size of the region is efx_piobuf_size.
197 * @piobuf_offset: Buffer offset to be specified in PIO descriptors
198 * @initialised: Has hardware queue been initialised?
199 * @read_count: Current read pointer.
200 * This is the number of buffers that have been removed from both rings.
201 * @old_write_count: The value of @write_count when last checked.
202 * This is here for performance reasons. The xmit path will
203 * only get the up-to-date value of @write_count if this
204 * variable indicates that the queue is empty. This is to
205 * avoid cache-line ping-pong between the xmit path and the
206 * completion path.
207 * @merge_events: Number of TX merged completion events
208 * @insert_count: Current insert pointer
209 * This is the number of buffers that have been added to the
210 * software ring.
211 * @write_count: Current write pointer
212 * This is the number of buffers that have been added to the
213 * hardware ring.
214 * @old_read_count: The value of read_count when last checked.
215 * This is here for performance reasons. The xmit path will
216 * only get the up-to-date value of read_count if this
217 * variable indicates that the queue is full. This is to
218 * avoid cache-line ping-pong between the xmit path and the
219 * completion path.
220 * @tso_bursts: Number of times TSO xmit invoked by kernel
221 * @tso_long_headers: Number of packets with headers too long for standard
222 * blocks
223 * @tso_packets: Number of packets via the TSO xmit path
224 * @pushes: Number of times the TX push feature has been used
225 * @pio_packets: Number of times the TX PIO feature has been used
226 * @xmit_more_available: Are any packets waiting to be pushed to the NIC
227 * @empty_read_count: If the completion path has seen the queue as empty
228 * and the transmission path has not yet checked this, the value of
229 * @read_count bitwise-added to %EFX_EMPTY_COUNT_VALID; otherwise 0.
230 */
231 struct efx_tx_queue {
232 /* Members which don't change on the fast path */
233 struct efx_nic *efx ____cacheline_aligned_in_smp;
234 unsigned queue;
235 unsigned int tso_version;
236 struct efx_channel *channel;
237 struct netdev_queue *core_txq;
238 struct efx_tx_buffer *buffer;
239 struct efx_buffer *tsoh_page;
240 struct efx_special_buffer txd;
241 unsigned int ptr_mask;
242 void __iomem *piobuf;
243 unsigned int piobuf_offset;
244 bool initialised;
245
246 /* Members used mainly on the completion path */
247 unsigned int read_count ____cacheline_aligned_in_smp;
248 unsigned int old_write_count;
249 unsigned int merge_events;
250 unsigned int bytes_compl;
251 unsigned int pkts_compl;
252
253 /* Members used only on the xmit path */
254 unsigned int insert_count ____cacheline_aligned_in_smp;
255 unsigned int write_count;
256 unsigned int old_read_count;
257 unsigned int tso_bursts;
258 unsigned int tso_long_headers;
259 unsigned int tso_packets;
260 unsigned int pushes;
261 unsigned int pio_packets;
262 bool xmit_more_available;
263 /* Statistics to supplement MAC stats */
264 unsigned long tx_packets;
265
266 /* Members shared between paths and sometimes updated */
267 unsigned int empty_read_count ____cacheline_aligned_in_smp;
268 #define EFX_EMPTY_COUNT_VALID 0x80000000
269 atomic_t flush_outstanding;
270 };
271
272 /**
273 * struct efx_rx_buffer - An Efx RX data buffer
274 * @dma_addr: DMA base address of the buffer
275 * @page: The associated page buffer.
276 * Will be %NULL if the buffer slot is currently free.
277 * @page_offset: If pending: offset in @page of DMA base address.
278 * If completed: offset in @page of Ethernet header.
279 * @len: If pending: length for DMA descriptor.
280 * If completed: received length, excluding hash prefix.
281 * @flags: Flags for buffer and packet state. These are only set on the
282 * first buffer of a scattered packet.
283 */
284 struct efx_rx_buffer {
285 dma_addr_t dma_addr;
286 struct page *page;
287 u16 page_offset;
288 u16 len;
289 u16 flags;
290 };
291 #define EFX_RX_BUF_LAST_IN_PAGE 0x0001
292 #define EFX_RX_PKT_CSUMMED 0x0002
293 #define EFX_RX_PKT_DISCARD 0x0004
294 #define EFX_RX_PKT_TCP 0x0040
295 #define EFX_RX_PKT_PREFIX_LEN 0x0080 /* length is in prefix only */
296
297 /**
298 * struct efx_rx_page_state - Page-based rx buffer state
299 *
300 * Inserted at the start of every page allocated for receive buffers.
301 * Used to facilitate sharing dma mappings between recycled rx buffers
302 * and those passed up to the kernel.
303 *
304 * @dma_addr: The dma address of this page.
305 */
306 struct efx_rx_page_state {
307 dma_addr_t dma_addr;
308
309 unsigned int __pad[0] ____cacheline_aligned;
310 };
311
312 /**
313 * struct efx_rx_queue - An Efx RX queue
314 * @efx: The associated Efx NIC
315 * @core_index: Index of network core RX queue. Will be >= 0 iff this
316 * is associated with a real RX queue.
317 * @buffer: The software buffer ring
318 * @rxd: The hardware descriptor ring
319 * @ptr_mask: The size of the ring minus 1.
320 * @refill_enabled: Enable refill whenever fill level is low
321 * @flush_pending: Set when a RX flush is pending. Has the same lifetime as
322 * @rxq_flush_pending.
323 * @added_count: Number of buffers added to the receive queue.
324 * @notified_count: Number of buffers given to NIC (<= @added_count).
325 * @removed_count: Number of buffers removed from the receive queue.
326 * @scatter_n: Used by NIC specific receive code.
327 * @scatter_len: Used by NIC specific receive code.
328 * @page_ring: The ring to store DMA mapped pages for reuse.
329 * @page_add: Counter to calculate the write pointer for the recycle ring.
330 * @page_remove: Counter to calculate the read pointer for the recycle ring.
331 * @page_recycle_count: The number of pages that have been recycled.
332 * @page_recycle_failed: The number of pages that couldn't be recycled because
333 * the kernel still held a reference to them.
334 * @page_recycle_full: The number of pages that were released because the
335 * recycle ring was full.
336 * @page_ptr_mask: The number of pages in the RX recycle ring minus 1.
337 * @max_fill: RX descriptor maximum fill level (<= ring size)
338 * @fast_fill_trigger: RX descriptor fill level that will trigger a fast fill
339 * (<= @max_fill)
340 * @min_fill: RX descriptor minimum non-zero fill level.
341 * This records the minimum fill level observed when a ring
342 * refill was triggered.
343 * @recycle_count: RX buffer recycle counter.
344 * @slow_fill: Timer used to defer efx_nic_generate_fill_event().
345 */
346 struct efx_rx_queue {
347 struct efx_nic *efx;
348 int core_index;
349 struct efx_rx_buffer *buffer;
350 struct efx_special_buffer rxd;
351 unsigned int ptr_mask;
352 bool refill_enabled;
353 bool flush_pending;
354
355 unsigned int added_count;
356 unsigned int notified_count;
357 unsigned int removed_count;
358 unsigned int scatter_n;
359 unsigned int scatter_len;
360 struct page **page_ring;
361 unsigned int page_add;
362 unsigned int page_remove;
363 unsigned int page_recycle_count;
364 unsigned int page_recycle_failed;
365 unsigned int page_recycle_full;
366 unsigned int page_ptr_mask;
367 unsigned int max_fill;
368 unsigned int fast_fill_trigger;
369 unsigned int min_fill;
370 unsigned int min_overfill;
371 unsigned int recycle_count;
372 struct timer_list slow_fill;
373 unsigned int slow_fill_count;
374 /* Statistics to supplement MAC stats */
375 unsigned long rx_packets;
376 };
377
378 enum efx_sync_events_state {
379 SYNC_EVENTS_DISABLED = 0,
380 SYNC_EVENTS_QUIESCENT,
381 SYNC_EVENTS_REQUESTED,
382 SYNC_EVENTS_VALID,
383 };
384
385 /**
386 * struct efx_channel - An Efx channel
387 *
388 * A channel comprises an event queue, at least one TX queue, at least
389 * one RX queue, and an associated tasklet for processing the event
390 * queue.
391 *
392 * @efx: Associated Efx NIC
393 * @channel: Channel instance number
394 * @type: Channel type definition
395 * @eventq_init: Event queue initialised flag
396 * @enabled: Channel enabled indicator
397 * @irq: IRQ number (MSI and MSI-X only)
398 * @irq_moderation_us: IRQ moderation value (in microseconds)
399 * @napi_dev: Net device used with NAPI
400 * @napi_str: NAPI control structure
401 * @state: state for NAPI vs busy polling
402 * @state_lock: lock protecting @state
403 * @eventq: Event queue buffer
404 * @eventq_mask: Event queue pointer mask
405 * @eventq_read_ptr: Event queue read pointer
406 * @event_test_cpu: Last CPU to handle interrupt or test event for this channel
407 * @irq_count: Number of IRQs since last adaptive moderation decision
408 * @irq_mod_score: IRQ moderation score
409 * @rps_flow_id: Flow IDs of filters allocated for accelerated RFS,
410 * indexed by filter ID
411 * @n_rx_tobe_disc: Count of RX_TOBE_DISC errors
412 * @n_rx_ip_hdr_chksum_err: Count of RX IP header checksum errors
413 * @n_rx_tcp_udp_chksum_err: Count of RX TCP and UDP checksum errors
414 * @n_rx_mcast_mismatch: Count of unmatched multicast frames
415 * @n_rx_frm_trunc: Count of RX_FRM_TRUNC errors
416 * @n_rx_overlength: Count of RX_OVERLENGTH errors
417 * @n_skbuff_leaks: Count of skbuffs leaked due to RX overrun
418 * @n_rx_nodesc_trunc: Number of RX packets truncated and then dropped due to
419 * lack of descriptors
420 * @n_rx_merge_events: Number of RX merged completion events
421 * @n_rx_merge_packets: Number of RX packets completed by merged events
422 * @rx_pkt_n_frags: Number of fragments in next packet to be delivered by
423 * __efx_rx_packet(), or zero if there is none
424 * @rx_pkt_index: Ring index of first buffer for next packet to be delivered
425 * by __efx_rx_packet(), if @rx_pkt_n_frags != 0
426 * @rx_queue: RX queue for this channel
427 * @tx_queue: TX queues for this channel
428 * @sync_events_state: Current state of sync events on this channel
429 * @sync_timestamp_major: Major part of the last ptp sync event
430 * @sync_timestamp_minor: Minor part of the last ptp sync event
431 */
432 struct efx_channel {
433 struct efx_nic *efx;
434 int channel;
435 const struct efx_channel_type *type;
436 bool eventq_init;
437 bool enabled;
438 int irq;
439 unsigned int irq_moderation_us;
440 struct net_device *napi_dev;
441 struct napi_struct napi_str;
442 #ifdef CONFIG_NET_RX_BUSY_POLL
443 unsigned long busy_poll_state;
444 #endif
445 struct efx_special_buffer eventq;
446 unsigned int eventq_mask;
447 unsigned int eventq_read_ptr;
448 int event_test_cpu;
449
450 unsigned int irq_count;
451 unsigned int irq_mod_score;
452 #ifdef CONFIG_RFS_ACCEL
453 unsigned int rfs_filters_added;
454 #define RPS_FLOW_ID_INVALID 0xFFFFFFFF
455 u32 *rps_flow_id;
456 #endif
457
458 unsigned n_rx_tobe_disc;
459 unsigned n_rx_ip_hdr_chksum_err;
460 unsigned n_rx_tcp_udp_chksum_err;
461 unsigned n_rx_mcast_mismatch;
462 unsigned n_rx_frm_trunc;
463 unsigned n_rx_overlength;
464 unsigned n_skbuff_leaks;
465 unsigned int n_rx_nodesc_trunc;
466 unsigned int n_rx_merge_events;
467 unsigned int n_rx_merge_packets;
468
469 unsigned int rx_pkt_n_frags;
470 unsigned int rx_pkt_index;
471
472 struct efx_rx_queue rx_queue;
473 struct efx_tx_queue tx_queue[EFX_TXQ_TYPES];
474
475 enum efx_sync_events_state sync_events_state;
476 u32 sync_timestamp_major;
477 u32 sync_timestamp_minor;
478 };
479
480 #ifdef CONFIG_NET_RX_BUSY_POLL
481 enum efx_channel_busy_poll_state {
482 EFX_CHANNEL_STATE_IDLE = 0,
483 EFX_CHANNEL_STATE_NAPI = BIT(0),
484 EFX_CHANNEL_STATE_NAPI_REQ_BIT = 1,
485 EFX_CHANNEL_STATE_NAPI_REQ = BIT(1),
486 EFX_CHANNEL_STATE_POLL_BIT = 2,
487 EFX_CHANNEL_STATE_POLL = BIT(2),
488 EFX_CHANNEL_STATE_DISABLE_BIT = 3,
489 };
490
efx_channel_busy_poll_init(struct efx_channel * channel)491 static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
492 {
493 WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
494 }
495
496 /* Called from the device poll routine to get ownership of a channel. */
efx_channel_lock_napi(struct efx_channel * channel)497 static inline bool efx_channel_lock_napi(struct efx_channel *channel)
498 {
499 unsigned long prev, old = READ_ONCE(channel->busy_poll_state);
500
501 while (1) {
502 switch (old) {
503 case EFX_CHANNEL_STATE_POLL:
504 /* Ensure efx_channel_try_lock_poll() wont starve us */
505 set_bit(EFX_CHANNEL_STATE_NAPI_REQ_BIT,
506 &channel->busy_poll_state);
507 /* fallthrough */
508 case EFX_CHANNEL_STATE_POLL | EFX_CHANNEL_STATE_NAPI_REQ:
509 return false;
510 default:
511 break;
512 }
513 prev = cmpxchg(&channel->busy_poll_state, old,
514 EFX_CHANNEL_STATE_NAPI);
515 if (unlikely(prev != old)) {
516 /* This is likely to mean we've just entered polling
517 * state. Go back round to set the REQ bit.
518 */
519 old = prev;
520 continue;
521 }
522 return true;
523 }
524 }
525
efx_channel_unlock_napi(struct efx_channel * channel)526 static inline void efx_channel_unlock_napi(struct efx_channel *channel)
527 {
528 /* Make sure write has completed from efx_channel_lock_napi() */
529 smp_wmb();
530 WRITE_ONCE(channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE);
531 }
532
533 /* Called from efx_busy_poll(). */
efx_channel_try_lock_poll(struct efx_channel * channel)534 static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
535 {
536 return cmpxchg(&channel->busy_poll_state, EFX_CHANNEL_STATE_IDLE,
537 EFX_CHANNEL_STATE_POLL) == EFX_CHANNEL_STATE_IDLE;
538 }
539
efx_channel_unlock_poll(struct efx_channel * channel)540 static inline void efx_channel_unlock_poll(struct efx_channel *channel)
541 {
542 clear_bit_unlock(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
543 }
544
efx_channel_busy_polling(struct efx_channel * channel)545 static inline bool efx_channel_busy_polling(struct efx_channel *channel)
546 {
547 return test_bit(EFX_CHANNEL_STATE_POLL_BIT, &channel->busy_poll_state);
548 }
549
efx_channel_enable(struct efx_channel * channel)550 static inline void efx_channel_enable(struct efx_channel *channel)
551 {
552 clear_bit_unlock(EFX_CHANNEL_STATE_DISABLE_BIT,
553 &channel->busy_poll_state);
554 }
555
556 /* Stop further polling or napi access.
557 * Returns false if the channel is currently busy polling.
558 */
efx_channel_disable(struct efx_channel * channel)559 static inline bool efx_channel_disable(struct efx_channel *channel)
560 {
561 set_bit(EFX_CHANNEL_STATE_DISABLE_BIT, &channel->busy_poll_state);
562 /* Implicit barrier in efx_channel_busy_polling() */
563 return !efx_channel_busy_polling(channel);
564 }
565
566 #else /* CONFIG_NET_RX_BUSY_POLL */
567
efx_channel_busy_poll_init(struct efx_channel * channel)568 static inline void efx_channel_busy_poll_init(struct efx_channel *channel)
569 {
570 }
571
efx_channel_lock_napi(struct efx_channel * channel)572 static inline bool efx_channel_lock_napi(struct efx_channel *channel)
573 {
574 return true;
575 }
576
efx_channel_unlock_napi(struct efx_channel * channel)577 static inline void efx_channel_unlock_napi(struct efx_channel *channel)
578 {
579 }
580
efx_channel_try_lock_poll(struct efx_channel * channel)581 static inline bool efx_channel_try_lock_poll(struct efx_channel *channel)
582 {
583 return false;
584 }
585
efx_channel_unlock_poll(struct efx_channel * channel)586 static inline void efx_channel_unlock_poll(struct efx_channel *channel)
587 {
588 }
589
efx_channel_busy_polling(struct efx_channel * channel)590 static inline bool efx_channel_busy_polling(struct efx_channel *channel)
591 {
592 return false;
593 }
594
efx_channel_enable(struct efx_channel * channel)595 static inline void efx_channel_enable(struct efx_channel *channel)
596 {
597 }
598
efx_channel_disable(struct efx_channel * channel)599 static inline bool efx_channel_disable(struct efx_channel *channel)
600 {
601 return true;
602 }
603 #endif /* CONFIG_NET_RX_BUSY_POLL */
604
605 /**
606 * struct efx_msi_context - Context for each MSI
607 * @efx: The associated NIC
608 * @index: Index of the channel/IRQ
609 * @name: Name of the channel/IRQ
610 *
611 * Unlike &struct efx_channel, this is never reallocated and is always
612 * safe for the IRQ handler to access.
613 */
614 struct efx_msi_context {
615 struct efx_nic *efx;
616 unsigned int index;
617 char name[IFNAMSIZ + 6];
618 };
619
620 /**
621 * struct efx_channel_type - distinguishes traffic and extra channels
622 * @handle_no_channel: Handle failure to allocate an extra channel
623 * @pre_probe: Set up extra state prior to initialisation
624 * @post_remove: Tear down extra state after finalisation, if allocated.
625 * May be called on channels that have not been probed.
626 * @get_name: Generate the channel's name (used for its IRQ handler)
627 * @copy: Copy the channel state prior to reallocation. May be %NULL if
628 * reallocation is not supported.
629 * @receive_skb: Handle an skb ready to be passed to netif_receive_skb()
630 * @keep_eventq: Flag for whether event queue should be kept initialised
631 * while the device is stopped
632 */
633 struct efx_channel_type {
634 void (*handle_no_channel)(struct efx_nic *);
635 int (*pre_probe)(struct efx_channel *);
636 void (*post_remove)(struct efx_channel *);
637 void (*get_name)(struct efx_channel *, char *buf, size_t len);
638 struct efx_channel *(*copy)(const struct efx_channel *);
639 bool (*receive_skb)(struct efx_channel *, struct sk_buff *);
640 bool keep_eventq;
641 };
642
643 enum efx_led_mode {
644 EFX_LED_OFF = 0,
645 EFX_LED_ON = 1,
646 EFX_LED_DEFAULT = 2
647 };
648
649 #define STRING_TABLE_LOOKUP(val, member) \
650 ((val) < member ## _max) ? member ## _names[val] : "(invalid)"
651
652 extern const char *const efx_loopback_mode_names[];
653 extern const unsigned int efx_loopback_mode_max;
654 #define LOOPBACK_MODE(efx) \
655 STRING_TABLE_LOOKUP((efx)->loopback_mode, efx_loopback_mode)
656
657 extern const char *const efx_reset_type_names[];
658 extern const unsigned int efx_reset_type_max;
659 #define RESET_TYPE(type) \
660 STRING_TABLE_LOOKUP(type, efx_reset_type)
661
662 enum efx_int_mode {
663 /* Be careful if altering to correct macro below */
664 EFX_INT_MODE_MSIX = 0,
665 EFX_INT_MODE_MSI = 1,
666 EFX_INT_MODE_LEGACY = 2,
667 EFX_INT_MODE_MAX /* Insert any new items before this */
668 };
669 #define EFX_INT_MODE_USE_MSI(x) (((x)->interrupt_mode) <= EFX_INT_MODE_MSI)
670
671 enum nic_state {
672 STATE_UNINIT = 0, /* device being probed/removed or is frozen */
673 STATE_READY = 1, /* hardware ready and netdev registered */
674 STATE_DISABLED = 2, /* device disabled due to hardware errors */
675 STATE_RECOVERY = 3, /* device recovering from PCI error */
676 };
677
678 /* Forward declaration */
679 struct efx_nic;
680
681 /* Pseudo bit-mask flow control field */
682 #define EFX_FC_RX FLOW_CTRL_RX
683 #define EFX_FC_TX FLOW_CTRL_TX
684 #define EFX_FC_AUTO 4
685
686 /**
687 * struct efx_link_state - Current state of the link
688 * @up: Link is up
689 * @fd: Link is full-duplex
690 * @fc: Actual flow control flags
691 * @speed: Link speed (Mbps)
692 */
693 struct efx_link_state {
694 bool up;
695 bool fd;
696 u8 fc;
697 unsigned int speed;
698 };
699
efx_link_state_equal(const struct efx_link_state * left,const struct efx_link_state * right)700 static inline bool efx_link_state_equal(const struct efx_link_state *left,
701 const struct efx_link_state *right)
702 {
703 return left->up == right->up && left->fd == right->fd &&
704 left->fc == right->fc && left->speed == right->speed;
705 }
706
707 /**
708 * struct efx_phy_operations - Efx PHY operations table
709 * @probe: Probe PHY and initialise efx->mdio.mode_support, efx->mdio.mmds,
710 * efx->loopback_modes.
711 * @init: Initialise PHY
712 * @fini: Shut down PHY
713 * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
714 * @poll: Update @link_state and report whether it changed.
715 * Serialised by the mac_lock.
716 * @get_settings: Get ethtool settings. Serialised by the mac_lock.
717 * @set_settings: Set ethtool settings. Serialised by the mac_lock.
718 * @set_npage_adv: Set abilities advertised in (Extended) Next Page
719 * (only needed where AN bit is set in mmds)
720 * @test_alive: Test that PHY is 'alive' (online)
721 * @test_name: Get the name of a PHY-specific test/result
722 * @run_tests: Run tests and record results as appropriate (offline).
723 * Flags are the ethtool tests flags.
724 */
725 struct efx_phy_operations {
726 int (*probe) (struct efx_nic *efx);
727 int (*init) (struct efx_nic *efx);
728 void (*fini) (struct efx_nic *efx);
729 void (*remove) (struct efx_nic *efx);
730 int (*reconfigure) (struct efx_nic *efx);
731 bool (*poll) (struct efx_nic *efx);
732 void (*get_settings) (struct efx_nic *efx,
733 struct ethtool_cmd *ecmd);
734 int (*set_settings) (struct efx_nic *efx,
735 struct ethtool_cmd *ecmd);
736 void (*set_npage_adv) (struct efx_nic *efx, u32);
737 int (*test_alive) (struct efx_nic *efx);
738 const char *(*test_name) (struct efx_nic *efx, unsigned int index);
739 int (*run_tests) (struct efx_nic *efx, int *results, unsigned flags);
740 int (*get_module_eeprom) (struct efx_nic *efx,
741 struct ethtool_eeprom *ee,
742 u8 *data);
743 int (*get_module_info) (struct efx_nic *efx,
744 struct ethtool_modinfo *modinfo);
745 };
746
747 /**
748 * enum efx_phy_mode - PHY operating mode flags
749 * @PHY_MODE_NORMAL: on and should pass traffic
750 * @PHY_MODE_TX_DISABLED: on with TX disabled
751 * @PHY_MODE_LOW_POWER: set to low power through MDIO
752 * @PHY_MODE_OFF: switched off through external control
753 * @PHY_MODE_SPECIAL: on but will not pass traffic
754 */
755 enum efx_phy_mode {
756 PHY_MODE_NORMAL = 0,
757 PHY_MODE_TX_DISABLED = 1,
758 PHY_MODE_LOW_POWER = 2,
759 PHY_MODE_OFF = 4,
760 PHY_MODE_SPECIAL = 8,
761 };
762
efx_phy_mode_disabled(enum efx_phy_mode mode)763 static inline bool efx_phy_mode_disabled(enum efx_phy_mode mode)
764 {
765 return !!(mode & ~PHY_MODE_TX_DISABLED);
766 }
767
768 /**
769 * struct efx_hw_stat_desc - Description of a hardware statistic
770 * @name: Name of the statistic as visible through ethtool, or %NULL if
771 * it should not be exposed
772 * @dma_width: Width in bits (0 for non-DMA statistics)
773 * @offset: Offset within stats (ignored for non-DMA statistics)
774 */
775 struct efx_hw_stat_desc {
776 const char *name;
777 u16 dma_width;
778 u16 offset;
779 };
780
781 /* Number of bits used in a multicast filter hash address */
782 #define EFX_MCAST_HASH_BITS 8
783
784 /* Number of (single-bit) entries in a multicast filter hash */
785 #define EFX_MCAST_HASH_ENTRIES (1 << EFX_MCAST_HASH_BITS)
786
787 /* An Efx multicast filter hash */
788 union efx_multicast_hash {
789 u8 byte[EFX_MCAST_HASH_ENTRIES / 8];
790 efx_oword_t oword[EFX_MCAST_HASH_ENTRIES / sizeof(efx_oword_t) / 8];
791 };
792
793 struct vfdi_status;
794
795 /**
796 * struct efx_nic - an Efx NIC
797 * @name: Device name (net device name or bus id before net device registered)
798 * @pci_dev: The PCI device
799 * @node: List node for maintaning primary/secondary function lists
800 * @primary: &struct efx_nic instance for the primary function of this
801 * controller. May be the same structure, and may be %NULL if no
802 * primary function is bound. Serialised by rtnl_lock.
803 * @secondary_list: List of &struct efx_nic instances for the secondary PCI
804 * functions of the controller, if this is for the primary function.
805 * Serialised by rtnl_lock.
806 * @type: Controller type attributes
807 * @legacy_irq: IRQ number
808 * @workqueue: Workqueue for port reconfigures and the HW monitor.
809 * Work items do not hold and must not acquire RTNL.
810 * @workqueue_name: Name of workqueue
811 * @reset_work: Scheduled reset workitem
812 * @membase_phys: Memory BAR value as physical address
813 * @membase: Memory BAR value
814 * @interrupt_mode: Interrupt mode
815 * @timer_quantum_ns: Interrupt timer quantum, in nanoseconds
816 * @timer_max_ns: Interrupt timer maximum value, in nanoseconds
817 * @irq_rx_adaptive: Adaptive IRQ moderation enabled for RX event queues
818 * @irq_rx_mod_step_us: Step size for IRQ moderation for RX event queues
819 * @irq_rx_moderation_us: IRQ moderation time for RX event queues
820 * @msg_enable: Log message enable flags
821 * @state: Device state number (%STATE_*). Serialised by the rtnl_lock.
822 * @reset_pending: Bitmask for pending resets
823 * @tx_queue: TX DMA queues
824 * @rx_queue: RX DMA queues
825 * @channel: Channels
826 * @msi_context: Context for each MSI
827 * @extra_channel_types: Types of extra (non-traffic) channels that
828 * should be allocated for this NIC
829 * @rxq_entries: Size of receive queues requested by user.
830 * @txq_entries: Size of transmit queues requested by user.
831 * @txq_stop_thresh: TX queue fill level at or above which we stop it.
832 * @txq_wake_thresh: TX queue fill level at or below which we wake it.
833 * @tx_dc_base: Base qword address in SRAM of TX queue descriptor caches
834 * @rx_dc_base: Base qword address in SRAM of RX queue descriptor caches
835 * @sram_lim_qw: Qword address limit of SRAM
836 * @next_buffer_table: First available buffer table id
837 * @n_channels: Number of channels in use
838 * @n_rx_channels: Number of channels used for RX (= number of RX queues)
839 * @n_tx_channels: Number of channels used for TX
840 * @rx_ip_align: RX DMA address offset to have IP header aligned in
841 * in accordance with NET_IP_ALIGN
842 * @rx_dma_len: Current maximum RX DMA length
843 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
844 * @rx_buffer_truesize: Amortised allocation size of an RX buffer,
845 * for use in sk_buff::truesize
846 * @rx_prefix_size: Size of RX prefix before packet data
847 * @rx_packet_hash_offset: Offset of RX flow hash from start of packet data
848 * (valid only if @rx_prefix_size != 0; always negative)
849 * @rx_packet_len_offset: Offset of RX packet length from start of packet data
850 * (valid only for NICs that set %EFX_RX_PKT_PREFIX_LEN; always negative)
851 * @rx_packet_ts_offset: Offset of timestamp from start of packet data
852 * (valid only if channel->sync_timestamps_enabled; always negative)
853 * @rx_hash_key: Toeplitz hash key for RSS
854 * @rx_indir_table: Indirection table for RSS
855 * @rx_scatter: Scatter mode enabled for receives
856 * @int_error_count: Number of internal errors seen recently
857 * @int_error_expire: Time at which error count will be expired
858 * @irq_soft_enabled: Are IRQs soft-enabled? If not, IRQ handler will
859 * acknowledge but do nothing else.
860 * @irq_status: Interrupt status buffer
861 * @irq_zero_count: Number of legacy IRQs seen with queue flags == 0
862 * @irq_level: IRQ level/index for IRQs not triggered by an event queue
863 * @selftest_work: Work item for asynchronous self-test
864 * @mtd_list: List of MTDs attached to the NIC
865 * @nic_data: Hardware dependent state
866 * @mcdi: Management-Controller-to-Driver Interface state
867 * @mac_lock: MAC access lock. Protects @port_enabled, @phy_mode,
868 * efx_monitor() and efx_reconfigure_port()
869 * @port_enabled: Port enabled indicator.
870 * Serialises efx_stop_all(), efx_start_all(), efx_monitor() and
871 * efx_mac_work() with kernel interfaces. Safe to read under any
872 * one of the rtnl_lock, mac_lock, or netif_tx_lock, but all three must
873 * be held to modify it.
874 * @port_initialized: Port initialized?
875 * @net_dev: Operating system network device. Consider holding the rtnl lock
876 * @fixed_features: Features which cannot be turned off
877 * @stats_buffer: DMA buffer for statistics
878 * @phy_type: PHY type
879 * @phy_op: PHY interface
880 * @phy_data: PHY private data (including PHY-specific stats)
881 * @mdio: PHY MDIO interface
882 * @mdio_bus: PHY MDIO bus ID (only used by Siena)
883 * @phy_mode: PHY operating mode. Serialised by @mac_lock.
884 * @link_advertising: Autonegotiation advertising flags
885 * @link_state: Current state of the link
886 * @n_link_state_changes: Number of times the link has changed state
887 * @unicast_filter: Flag for Falcon-arch simple unicast filter.
888 * Protected by @mac_lock.
889 * @multicast_hash: Multicast hash table for Falcon-arch.
890 * Protected by @mac_lock.
891 * @wanted_fc: Wanted flow control flags
892 * @fc_disable: When non-zero flow control is disabled. Typically used to
893 * ensure that network back pressure doesn't delay dma queue flushes.
894 * Serialised by the rtnl lock.
895 * @mac_work: Work item for changing MAC promiscuity and multicast hash
896 * @loopback_mode: Loopback status
897 * @loopback_modes: Supported loopback mode bitmask
898 * @loopback_selftest: Offline self-test private state
899 * @filter_sem: Filter table rw_semaphore, for freeing the table
900 * @filter_lock: Filter table lock, for mere content changes
901 * @filter_state: Architecture-dependent filter table state
902 * @rps_expire_channel: Next channel to check for expiry
903 * @rps_expire_index: Next index to check for expiry in
904 * @rps_expire_channel's @rps_flow_id
905 * @active_queues: Count of RX and TX queues that haven't been flushed and drained.
906 * @rxq_flush_pending: Count of number of receive queues that need to be flushed.
907 * Decremented when the efx_flush_rx_queue() is called.
908 * @rxq_flush_outstanding: Count of number of RX flushes started but not yet
909 * completed (either success or failure). Not used when MCDI is used to
910 * flush receive queues.
911 * @flush_wq: wait queue used by efx_nic_flush_queues() to wait for flush completions.
912 * @vf_count: Number of VFs intended to be enabled.
913 * @vf_init_count: Number of VFs that have been fully initialised.
914 * @vi_scale: log2 number of vnics per VF.
915 * @ptp_data: PTP state data
916 * @vpd_sn: Serial number read from VPD
917 * @monitor_work: Hardware monitor workitem
918 * @biu_lock: BIU (bus interface unit) lock
919 * @last_irq_cpu: Last CPU to handle a possible test interrupt. This
920 * field is used by efx_test_interrupts() to verify that an
921 * interrupt has occurred.
922 * @stats_lock: Statistics update lock. Must be held when calling
923 * efx_nic_type::{update,start,stop}_stats.
924 * @n_rx_noskb_drops: Count of RX packets dropped due to failure to allocate an skb
925 *
926 * This is stored in the private area of the &struct net_device.
927 */
928 struct efx_nic {
929 /* The following fields should be written very rarely */
930
931 char name[IFNAMSIZ];
932 struct list_head node;
933 struct efx_nic *primary;
934 struct list_head secondary_list;
935 struct pci_dev *pci_dev;
936 unsigned int port_num;
937 const struct efx_nic_type *type;
938 int legacy_irq;
939 bool eeh_disabled_legacy_irq;
940 struct workqueue_struct *workqueue;
941 char workqueue_name[16];
942 struct work_struct reset_work;
943 resource_size_t membase_phys;
944 void __iomem *membase;
945
946 enum efx_int_mode interrupt_mode;
947 unsigned int timer_quantum_ns;
948 unsigned int timer_max_ns;
949 bool irq_rx_adaptive;
950 unsigned int irq_mod_step_us;
951 unsigned int irq_rx_moderation_us;
952 u32 msg_enable;
953
954 enum nic_state state;
955 unsigned long reset_pending;
956
957 struct efx_channel *channel[EFX_MAX_CHANNELS];
958 struct efx_msi_context msi_context[EFX_MAX_CHANNELS];
959 const struct efx_channel_type *
960 extra_channel_type[EFX_MAX_EXTRA_CHANNELS];
961
962 unsigned rxq_entries;
963 unsigned txq_entries;
964 unsigned int txq_stop_thresh;
965 unsigned int txq_wake_thresh;
966
967 unsigned tx_dc_base;
968 unsigned rx_dc_base;
969 unsigned sram_lim_qw;
970 unsigned next_buffer_table;
971
972 unsigned int max_channels;
973 unsigned int max_tx_channels;
974 unsigned n_channels;
975 unsigned n_rx_channels;
976 unsigned rss_spread;
977 unsigned tx_channel_offset;
978 unsigned n_tx_channels;
979 unsigned int rx_ip_align;
980 unsigned int rx_dma_len;
981 unsigned int rx_buffer_order;
982 unsigned int rx_buffer_truesize;
983 unsigned int rx_page_buf_step;
984 unsigned int rx_bufs_per_page;
985 unsigned int rx_pages_per_batch;
986 unsigned int rx_prefix_size;
987 int rx_packet_hash_offset;
988 int rx_packet_len_offset;
989 int rx_packet_ts_offset;
990 u8 rx_hash_key[40];
991 u32 rx_indir_table[128];
992 bool rx_scatter;
993
994 unsigned int_error_count;
995 unsigned long int_error_expire;
996
997 bool irq_soft_enabled;
998 struct efx_buffer irq_status;
999 unsigned irq_zero_count;
1000 unsigned irq_level;
1001 struct delayed_work selftest_work;
1002
1003 #ifdef CONFIG_SFC_MTD
1004 struct list_head mtd_list;
1005 #endif
1006
1007 void *nic_data;
1008 struct efx_mcdi_data *mcdi;
1009
1010 struct mutex mac_lock;
1011 struct work_struct mac_work;
1012 bool port_enabled;
1013
1014 bool mc_bist_for_other_fn;
1015 bool port_initialized;
1016 struct net_device *net_dev;
1017
1018 netdev_features_t fixed_features;
1019
1020 struct efx_buffer stats_buffer;
1021 u64 rx_nodesc_drops_total;
1022 u64 rx_nodesc_drops_while_down;
1023 bool rx_nodesc_drops_prev_state;
1024
1025 unsigned int phy_type;
1026 const struct efx_phy_operations *phy_op;
1027 void *phy_data;
1028 struct mdio_if_info mdio;
1029 unsigned int mdio_bus;
1030 enum efx_phy_mode phy_mode;
1031
1032 u32 link_advertising;
1033 struct efx_link_state link_state;
1034 unsigned int n_link_state_changes;
1035
1036 bool unicast_filter;
1037 union efx_multicast_hash multicast_hash;
1038 u8 wanted_fc;
1039 unsigned fc_disable;
1040
1041 atomic_t rx_reset;
1042 enum efx_loopback_mode loopback_mode;
1043 u64 loopback_modes;
1044
1045 void *loopback_selftest;
1046
1047 struct rw_semaphore filter_sem;
1048 spinlock_t filter_lock;
1049 void *filter_state;
1050 #ifdef CONFIG_RFS_ACCEL
1051 unsigned int rps_expire_channel;
1052 unsigned int rps_expire_index;
1053 #endif
1054
1055 atomic_t active_queues;
1056 atomic_t rxq_flush_pending;
1057 atomic_t rxq_flush_outstanding;
1058 wait_queue_head_t flush_wq;
1059
1060 #ifdef CONFIG_SFC_SRIOV
1061 unsigned vf_count;
1062 unsigned vf_init_count;
1063 unsigned vi_scale;
1064 #endif
1065
1066 struct efx_ptp_data *ptp_data;
1067
1068 char *vpd_sn;
1069
1070 /* The following fields may be written more often */
1071
1072 struct delayed_work monitor_work ____cacheline_aligned_in_smp;
1073 spinlock_t biu_lock;
1074 int last_irq_cpu;
1075 spinlock_t stats_lock;
1076 atomic_t n_rx_noskb_drops;
1077 };
1078
efx_dev_registered(struct efx_nic * efx)1079 static inline int efx_dev_registered(struct efx_nic *efx)
1080 {
1081 return efx->net_dev->reg_state == NETREG_REGISTERED;
1082 }
1083
efx_port_num(struct efx_nic * efx)1084 static inline unsigned int efx_port_num(struct efx_nic *efx)
1085 {
1086 return efx->port_num;
1087 }
1088
1089 struct efx_mtd_partition {
1090 struct list_head node;
1091 struct mtd_info mtd;
1092 const char *dev_type_name;
1093 const char *type_name;
1094 char name[IFNAMSIZ + 20];
1095 };
1096
1097 /**
1098 * struct efx_nic_type - Efx device type definition
1099 * @mem_bar: Get the memory BAR
1100 * @mem_map_size: Get memory BAR mapped size
1101 * @probe: Probe the controller
1102 * @remove: Free resources allocated by probe()
1103 * @init: Initialise the controller
1104 * @dimension_resources: Dimension controller resources (buffer table,
1105 * and VIs once the available interrupt resources are clear)
1106 * @fini: Shut down the controller
1107 * @monitor: Periodic function for polling link state and hardware monitor
1108 * @map_reset_reason: Map ethtool reset reason to a reset method
1109 * @map_reset_flags: Map ethtool reset flags to a reset method, if possible
1110 * @reset: Reset the controller hardware and possibly the PHY. This will
1111 * be called while the controller is uninitialised.
1112 * @probe_port: Probe the MAC and PHY
1113 * @remove_port: Free resources allocated by probe_port()
1114 * @handle_global_event: Handle a "global" event (may be %NULL)
1115 * @fini_dmaq: Flush and finalise DMA queues (RX and TX queues)
1116 * @prepare_flush: Prepare the hardware for flushing the DMA queues
1117 * (for Falcon architecture)
1118 * @finish_flush: Clean up after flushing the DMA queues (for Falcon
1119 * architecture)
1120 * @prepare_flr: Prepare for an FLR
1121 * @finish_flr: Clean up after an FLR
1122 * @describe_stats: Describe statistics for ethtool
1123 * @update_stats: Update statistics not provided by event handling.
1124 * Either argument may be %NULL.
1125 * @start_stats: Start the regular fetching of statistics
1126 * @pull_stats: Pull stats from the NIC and wait until they arrive.
1127 * @stop_stats: Stop the regular fetching of statistics
1128 * @set_id_led: Set state of identifying LED or revert to automatic function
1129 * @push_irq_moderation: Apply interrupt moderation value
1130 * @reconfigure_port: Push loopback/power/txdis changes to the MAC and PHY
1131 * @prepare_enable_fc_tx: Prepare MAC to enable pause frame TX (may be %NULL)
1132 * @reconfigure_mac: Push MAC address, MTU, flow control and filter settings
1133 * to the hardware. Serialised by the mac_lock.
1134 * @check_mac_fault: Check MAC fault state. True if fault present.
1135 * @get_wol: Get WoL configuration from driver state
1136 * @set_wol: Push WoL configuration to the NIC
1137 * @resume_wol: Synchronise WoL state between driver and MC (e.g. after resume)
1138 * @test_chip: Test registers. May use efx_farch_test_registers(), and is
1139 * expected to reset the NIC.
1140 * @test_nvram: Test validity of NVRAM contents
1141 * @mcdi_request: Send an MCDI request with the given header and SDU.
1142 * The SDU length may be any value from 0 up to the protocol-
1143 * defined maximum, but its buffer will be padded to a multiple
1144 * of 4 bytes.
1145 * @mcdi_poll_response: Test whether an MCDI response is available.
1146 * @mcdi_read_response: Read the MCDI response PDU. The offset will
1147 * be a multiple of 4. The length may not be, but the buffer
1148 * will be padded so it is safe to round up.
1149 * @mcdi_poll_reboot: Test whether the MCDI has rebooted. If so,
1150 * return an appropriate error code for aborting any current
1151 * request; otherwise return 0.
1152 * @irq_enable_master: Enable IRQs on the NIC. Each event queue must
1153 * be separately enabled after this.
1154 * @irq_test_generate: Generate a test IRQ
1155 * @irq_disable_non_ev: Disable non-event IRQs on the NIC. Each event
1156 * queue must be separately disabled before this.
1157 * @irq_handle_msi: Handle MSI for a channel. The @dev_id argument is
1158 * a pointer to the &struct efx_msi_context for the channel.
1159 * @irq_handle_legacy: Handle legacy interrupt. The @dev_id argument
1160 * is a pointer to the &struct efx_nic.
1161 * @tx_probe: Allocate resources for TX queue
1162 * @tx_init: Initialise TX queue on the NIC
1163 * @tx_remove: Free resources for TX queue
1164 * @tx_write: Write TX descriptors and doorbell
1165 * @rx_push_rss_config: Write RSS hash key and indirection table to the NIC
1166 * @rx_probe: Allocate resources for RX queue
1167 * @rx_init: Initialise RX queue on the NIC
1168 * @rx_remove: Free resources for RX queue
1169 * @rx_write: Write RX descriptors and doorbell
1170 * @rx_defer_refill: Generate a refill reminder event
1171 * @ev_probe: Allocate resources for event queue
1172 * @ev_init: Initialise event queue on the NIC
1173 * @ev_fini: Deinitialise event queue on the NIC
1174 * @ev_remove: Free resources for event queue
1175 * @ev_process: Process events for a queue, up to the given NAPI quota
1176 * @ev_read_ack: Acknowledge read events on a queue, rearming its IRQ
1177 * @ev_test_generate: Generate a test event
1178 * @filter_table_probe: Probe filter capabilities and set up filter software state
1179 * @filter_table_restore: Restore filters removed from hardware
1180 * @filter_table_remove: Remove filters from hardware and tear down software state
1181 * @filter_update_rx_scatter: Update filters after change to rx scatter setting
1182 * @filter_insert: add or replace a filter
1183 * @filter_remove_safe: remove a filter by ID, carefully
1184 * @filter_get_safe: retrieve a filter by ID, carefully
1185 * @filter_clear_rx: Remove all RX filters whose priority is less than or
1186 * equal to the given priority and is not %EFX_FILTER_PRI_AUTO
1187 * @filter_count_rx_used: Get the number of filters in use at a given priority
1188 * @filter_get_rx_id_limit: Get maximum value of a filter id, plus 1
1189 * @filter_get_rx_ids: Get list of RX filters at a given priority
1190 * @filter_rfs_insert: Add or replace a filter for RFS. This must be
1191 * atomic. The hardware change may be asynchronous but should
1192 * not be delayed for long. It may fail if this can't be done
1193 * atomically.
1194 * @filter_rfs_expire_one: Consider expiring a filter inserted for RFS.
1195 * This must check whether the specified table entry is used by RFS
1196 * and that rps_may_expire_flow() returns true for it.
1197 * @mtd_probe: Probe and add MTD partitions associated with this net device,
1198 * using efx_mtd_add()
1199 * @mtd_rename: Set an MTD partition name using the net device name
1200 * @mtd_read: Read from an MTD partition
1201 * @mtd_erase: Erase part of an MTD partition
1202 * @mtd_write: Write to an MTD partition
1203 * @mtd_sync: Wait for write-back to complete on MTD partition. This
1204 * also notifies the driver that a writer has finished using this
1205 * partition.
1206 * @ptp_write_host_time: Send host time to MC as part of sync protocol
1207 * @ptp_set_ts_sync_events: Enable or disable sync events for inline RX
1208 * timestamping, possibly only temporarily for the purposes of a reset.
1209 * @ptp_set_ts_config: Set hardware timestamp configuration. The flags
1210 * and tx_type will already have been validated but this operation
1211 * must validate and update rx_filter.
1212 * @set_mac_address: Set the MAC address of the device
1213 * @revision: Hardware architecture revision
1214 * @txd_ptr_tbl_base: TX descriptor ring base address
1215 * @rxd_ptr_tbl_base: RX descriptor ring base address
1216 * @buf_tbl_base: Buffer table base address
1217 * @evq_ptr_tbl_base: Event queue pointer table base address
1218 * @evq_rptr_tbl_base: Event queue read-pointer table base address
1219 * @max_dma_mask: Maximum possible DMA mask
1220 * @rx_prefix_size: Size of RX prefix before packet data
1221 * @rx_hash_offset: Offset of RX flow hash within prefix
1222 * @rx_ts_offset: Offset of timestamp within prefix
1223 * @rx_buffer_padding: Size of padding at end of RX packet
1224 * @can_rx_scatter: NIC is able to scatter packets to multiple buffers
1225 * @always_rx_scatter: NIC will always scatter packets to multiple buffers
1226 * @max_interrupt_mode: Highest capability interrupt mode supported
1227 * from &enum efx_init_mode.
1228 * @timer_period_max: Maximum period of interrupt timer (in ticks)
1229 * @offload_features: net_device feature flags for protocol offload
1230 * features implemented in hardware
1231 * @mcdi_max_ver: Maximum MCDI version supported
1232 * @hwtstamp_filters: Mask of hardware timestamp filter types supported
1233 */
1234 struct efx_nic_type {
1235 bool is_vf;
1236 unsigned int mem_bar;
1237 unsigned int (*mem_map_size)(struct efx_nic *efx);
1238 int (*probe)(struct efx_nic *efx);
1239 void (*remove)(struct efx_nic *efx);
1240 int (*init)(struct efx_nic *efx);
1241 int (*dimension_resources)(struct efx_nic *efx);
1242 void (*fini)(struct efx_nic *efx);
1243 void (*monitor)(struct efx_nic *efx);
1244 enum reset_type (*map_reset_reason)(enum reset_type reason);
1245 int (*map_reset_flags)(u32 *flags);
1246 int (*reset)(struct efx_nic *efx, enum reset_type method);
1247 int (*probe_port)(struct efx_nic *efx);
1248 void (*remove_port)(struct efx_nic *efx);
1249 bool (*handle_global_event)(struct efx_channel *channel, efx_qword_t *);
1250 int (*fini_dmaq)(struct efx_nic *efx);
1251 void (*prepare_flush)(struct efx_nic *efx);
1252 void (*finish_flush)(struct efx_nic *efx);
1253 void (*prepare_flr)(struct efx_nic *efx);
1254 void (*finish_flr)(struct efx_nic *efx);
1255 size_t (*describe_stats)(struct efx_nic *efx, u8 *names);
1256 size_t (*update_stats)(struct efx_nic *efx, u64 *full_stats,
1257 struct rtnl_link_stats64 *core_stats);
1258 void (*start_stats)(struct efx_nic *efx);
1259 void (*pull_stats)(struct efx_nic *efx);
1260 void (*stop_stats)(struct efx_nic *efx);
1261 void (*set_id_led)(struct efx_nic *efx, enum efx_led_mode mode);
1262 void (*push_irq_moderation)(struct efx_channel *channel);
1263 int (*reconfigure_port)(struct efx_nic *efx);
1264 void (*prepare_enable_fc_tx)(struct efx_nic *efx);
1265 int (*reconfigure_mac)(struct efx_nic *efx);
1266 bool (*check_mac_fault)(struct efx_nic *efx);
1267 void (*get_wol)(struct efx_nic *efx, struct ethtool_wolinfo *wol);
1268 int (*set_wol)(struct efx_nic *efx, u32 type);
1269 void (*resume_wol)(struct efx_nic *efx);
1270 int (*test_chip)(struct efx_nic *efx, struct efx_self_tests *tests);
1271 int (*test_nvram)(struct efx_nic *efx);
1272 void (*mcdi_request)(struct efx_nic *efx,
1273 const efx_dword_t *hdr, size_t hdr_len,
1274 const efx_dword_t *sdu, size_t sdu_len);
1275 bool (*mcdi_poll_response)(struct efx_nic *efx);
1276 void (*mcdi_read_response)(struct efx_nic *efx, efx_dword_t *pdu,
1277 size_t pdu_offset, size_t pdu_len);
1278 int (*mcdi_poll_reboot)(struct efx_nic *efx);
1279 void (*mcdi_reboot_detected)(struct efx_nic *efx);
1280 void (*irq_enable_master)(struct efx_nic *efx);
1281 int (*irq_test_generate)(struct efx_nic *efx);
1282 void (*irq_disable_non_ev)(struct efx_nic *efx);
1283 irqreturn_t (*irq_handle_msi)(int irq, void *dev_id);
1284 irqreturn_t (*irq_handle_legacy)(int irq, void *dev_id);
1285 int (*tx_probe)(struct efx_tx_queue *tx_queue);
1286 void (*tx_init)(struct efx_tx_queue *tx_queue);
1287 void (*tx_remove)(struct efx_tx_queue *tx_queue);
1288 void (*tx_write)(struct efx_tx_queue *tx_queue);
1289 int (*rx_push_rss_config)(struct efx_nic *efx, bool user,
1290 const u32 *rx_indir_table);
1291 int (*rx_probe)(struct efx_rx_queue *rx_queue);
1292 void (*rx_init)(struct efx_rx_queue *rx_queue);
1293 void (*rx_remove)(struct efx_rx_queue *rx_queue);
1294 void (*rx_write)(struct efx_rx_queue *rx_queue);
1295 void (*rx_defer_refill)(struct efx_rx_queue *rx_queue);
1296 int (*ev_probe)(struct efx_channel *channel);
1297 int (*ev_init)(struct efx_channel *channel);
1298 void (*ev_fini)(struct efx_channel *channel);
1299 void (*ev_remove)(struct efx_channel *channel);
1300 int (*ev_process)(struct efx_channel *channel, int quota);
1301 void (*ev_read_ack)(struct efx_channel *channel);
1302 void (*ev_test_generate)(struct efx_channel *channel);
1303 int (*filter_table_probe)(struct efx_nic *efx);
1304 void (*filter_table_restore)(struct efx_nic *efx);
1305 void (*filter_table_remove)(struct efx_nic *efx);
1306 void (*filter_update_rx_scatter)(struct efx_nic *efx);
1307 s32 (*filter_insert)(struct efx_nic *efx,
1308 struct efx_filter_spec *spec, bool replace);
1309 int (*filter_remove_safe)(struct efx_nic *efx,
1310 enum efx_filter_priority priority,
1311 u32 filter_id);
1312 int (*filter_get_safe)(struct efx_nic *efx,
1313 enum efx_filter_priority priority,
1314 u32 filter_id, struct efx_filter_spec *);
1315 int (*filter_clear_rx)(struct efx_nic *efx,
1316 enum efx_filter_priority priority);
1317 u32 (*filter_count_rx_used)(struct efx_nic *efx,
1318 enum efx_filter_priority priority);
1319 u32 (*filter_get_rx_id_limit)(struct efx_nic *efx);
1320 s32 (*filter_get_rx_ids)(struct efx_nic *efx,
1321 enum efx_filter_priority priority,
1322 u32 *buf, u32 size);
1323 #ifdef CONFIG_RFS_ACCEL
1324 s32 (*filter_rfs_insert)(struct efx_nic *efx,
1325 struct efx_filter_spec *spec);
1326 bool (*filter_rfs_expire_one)(struct efx_nic *efx, u32 flow_id,
1327 unsigned int index);
1328 #endif
1329 #ifdef CONFIG_SFC_MTD
1330 int (*mtd_probe)(struct efx_nic *efx);
1331 void (*mtd_rename)(struct efx_mtd_partition *part);
1332 int (*mtd_read)(struct mtd_info *mtd, loff_t start, size_t len,
1333 size_t *retlen, u8 *buffer);
1334 int (*mtd_erase)(struct mtd_info *mtd, loff_t start, size_t len);
1335 int (*mtd_write)(struct mtd_info *mtd, loff_t start, size_t len,
1336 size_t *retlen, const u8 *buffer);
1337 int (*mtd_sync)(struct mtd_info *mtd);
1338 #endif
1339 void (*ptp_write_host_time)(struct efx_nic *efx, u32 host_time);
1340 int (*ptp_set_ts_sync_events)(struct efx_nic *efx, bool en, bool temp);
1341 int (*ptp_set_ts_config)(struct efx_nic *efx,
1342 struct hwtstamp_config *init);
1343 int (*sriov_configure)(struct efx_nic *efx, int num_vfs);
1344 int (*vlan_rx_add_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
1345 int (*vlan_rx_kill_vid)(struct efx_nic *efx, __be16 proto, u16 vid);
1346 int (*sriov_init)(struct efx_nic *efx);
1347 void (*sriov_fini)(struct efx_nic *efx);
1348 bool (*sriov_wanted)(struct efx_nic *efx);
1349 void (*sriov_reset)(struct efx_nic *efx);
1350 void (*sriov_flr)(struct efx_nic *efx, unsigned vf_i);
1351 int (*sriov_set_vf_mac)(struct efx_nic *efx, int vf_i, u8 *mac);
1352 int (*sriov_set_vf_vlan)(struct efx_nic *efx, int vf_i, u16 vlan,
1353 u8 qos);
1354 int (*sriov_set_vf_spoofchk)(struct efx_nic *efx, int vf_i,
1355 bool spoofchk);
1356 int (*sriov_get_vf_config)(struct efx_nic *efx, int vf_i,
1357 struct ifla_vf_info *ivi);
1358 int (*sriov_set_vf_link_state)(struct efx_nic *efx, int vf_i,
1359 int link_state);
1360 int (*sriov_get_phys_port_id)(struct efx_nic *efx,
1361 struct netdev_phys_item_id *ppid);
1362 int (*vswitching_probe)(struct efx_nic *efx);
1363 int (*vswitching_restore)(struct efx_nic *efx);
1364 void (*vswitching_remove)(struct efx_nic *efx);
1365 int (*get_mac_address)(struct efx_nic *efx, unsigned char *perm_addr);
1366 int (*set_mac_address)(struct efx_nic *efx);
1367
1368 int revision;
1369 unsigned int txd_ptr_tbl_base;
1370 unsigned int rxd_ptr_tbl_base;
1371 unsigned int buf_tbl_base;
1372 unsigned int evq_ptr_tbl_base;
1373 unsigned int evq_rptr_tbl_base;
1374 u64 max_dma_mask;
1375 unsigned int rx_prefix_size;
1376 unsigned int rx_hash_offset;
1377 unsigned int rx_ts_offset;
1378 unsigned int rx_buffer_padding;
1379 bool can_rx_scatter;
1380 bool always_rx_scatter;
1381 unsigned int max_interrupt_mode;
1382 unsigned int timer_period_max;
1383 netdev_features_t offload_features;
1384 int mcdi_max_ver;
1385 unsigned int max_rx_ip_filters;
1386 u32 hwtstamp_filters;
1387 };
1388
1389 /**************************************************************************
1390 *
1391 * Prototypes and inline functions
1392 *
1393 *************************************************************************/
1394
1395 static inline struct efx_channel *
efx_get_channel(struct efx_nic * efx,unsigned index)1396 efx_get_channel(struct efx_nic *efx, unsigned index)
1397 {
1398 EFX_BUG_ON_PARANOID(index >= efx->n_channels);
1399 return efx->channel[index];
1400 }
1401
1402 /* Iterate over all used channels */
1403 #define efx_for_each_channel(_channel, _efx) \
1404 for (_channel = (_efx)->channel[0]; \
1405 _channel; \
1406 _channel = (_channel->channel + 1 < (_efx)->n_channels) ? \
1407 (_efx)->channel[_channel->channel + 1] : NULL)
1408
1409 /* Iterate over all used channels in reverse */
1410 #define efx_for_each_channel_rev(_channel, _efx) \
1411 for (_channel = (_efx)->channel[(_efx)->n_channels - 1]; \
1412 _channel; \
1413 _channel = _channel->channel ? \
1414 (_efx)->channel[_channel->channel - 1] : NULL)
1415
1416 static inline struct efx_tx_queue *
efx_get_tx_queue(struct efx_nic * efx,unsigned index,unsigned type)1417 efx_get_tx_queue(struct efx_nic *efx, unsigned index, unsigned type)
1418 {
1419 EFX_BUG_ON_PARANOID(index >= efx->n_tx_channels ||
1420 type >= EFX_TXQ_TYPES);
1421 return &efx->channel[efx->tx_channel_offset + index]->tx_queue[type];
1422 }
1423
efx_channel_has_tx_queues(struct efx_channel * channel)1424 static inline bool efx_channel_has_tx_queues(struct efx_channel *channel)
1425 {
1426 return channel->channel - channel->efx->tx_channel_offset <
1427 channel->efx->n_tx_channels;
1428 }
1429
1430 static inline struct efx_tx_queue *
efx_channel_get_tx_queue(struct efx_channel * channel,unsigned type)1431 efx_channel_get_tx_queue(struct efx_channel *channel, unsigned type)
1432 {
1433 EFX_BUG_ON_PARANOID(!efx_channel_has_tx_queues(channel) ||
1434 type >= EFX_TXQ_TYPES);
1435 return &channel->tx_queue[type];
1436 }
1437
efx_tx_queue_used(struct efx_tx_queue * tx_queue)1438 static inline bool efx_tx_queue_used(struct efx_tx_queue *tx_queue)
1439 {
1440 return !(tx_queue->efx->net_dev->num_tc < 2 &&
1441 tx_queue->queue & EFX_TXQ_TYPE_HIGHPRI);
1442 }
1443
1444 /* Iterate over all TX queues belonging to a channel */
1445 #define efx_for_each_channel_tx_queue(_tx_queue, _channel) \
1446 if (!efx_channel_has_tx_queues(_channel)) \
1447 ; \
1448 else \
1449 for (_tx_queue = (_channel)->tx_queue; \
1450 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES && \
1451 efx_tx_queue_used(_tx_queue); \
1452 _tx_queue++)
1453
1454 /* Iterate over all possible TX queues belonging to a channel */
1455 #define efx_for_each_possible_channel_tx_queue(_tx_queue, _channel) \
1456 if (!efx_channel_has_tx_queues(_channel)) \
1457 ; \
1458 else \
1459 for (_tx_queue = (_channel)->tx_queue; \
1460 _tx_queue < (_channel)->tx_queue + EFX_TXQ_TYPES; \
1461 _tx_queue++)
1462
efx_channel_has_rx_queue(struct efx_channel * channel)1463 static inline bool efx_channel_has_rx_queue(struct efx_channel *channel)
1464 {
1465 return channel->rx_queue.core_index >= 0;
1466 }
1467
1468 static inline struct efx_rx_queue *
efx_channel_get_rx_queue(struct efx_channel * channel)1469 efx_channel_get_rx_queue(struct efx_channel *channel)
1470 {
1471 EFX_BUG_ON_PARANOID(!efx_channel_has_rx_queue(channel));
1472 return &channel->rx_queue;
1473 }
1474
1475 /* Iterate over all RX queues belonging to a channel */
1476 #define efx_for_each_channel_rx_queue(_rx_queue, _channel) \
1477 if (!efx_channel_has_rx_queue(_channel)) \
1478 ; \
1479 else \
1480 for (_rx_queue = &(_channel)->rx_queue; \
1481 _rx_queue; \
1482 _rx_queue = NULL)
1483
1484 static inline struct efx_channel *
efx_rx_queue_channel(struct efx_rx_queue * rx_queue)1485 efx_rx_queue_channel(struct efx_rx_queue *rx_queue)
1486 {
1487 return container_of(rx_queue, struct efx_channel, rx_queue);
1488 }
1489
efx_rx_queue_index(struct efx_rx_queue * rx_queue)1490 static inline int efx_rx_queue_index(struct efx_rx_queue *rx_queue)
1491 {
1492 return efx_rx_queue_channel(rx_queue)->channel;
1493 }
1494
1495 /* Returns a pointer to the specified receive buffer in the RX
1496 * descriptor queue.
1497 */
efx_rx_buffer(struct efx_rx_queue * rx_queue,unsigned int index)1498 static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
1499 unsigned int index)
1500 {
1501 return &rx_queue->buffer[index];
1502 }
1503
1504 /**
1505 * EFX_MAX_FRAME_LEN - calculate maximum frame length
1506 *
1507 * This calculates the maximum frame length that will be used for a
1508 * given MTU. The frame length will be equal to the MTU plus a
1509 * constant amount of header space and padding. This is the quantity
1510 * that the net driver will program into the MAC as the maximum frame
1511 * length.
1512 *
1513 * The 10G MAC requires 8-byte alignment on the frame
1514 * length, so we round up to the nearest 8.
1515 *
1516 * Re-clocking by the XGXS on RX can reduce an IPG to 32 bits (half an
1517 * XGMII cycle). If the frame length reaches the maximum value in the
1518 * same cycle, the XMAC can miss the IPG altogether. We work around
1519 * this by adding a further 16 bytes.
1520 */
1521 #define EFX_FRAME_PAD 16
1522 #define EFX_MAX_FRAME_LEN(mtu) \
1523 (ALIGN(((mtu) + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN + EFX_FRAME_PAD), 8))
1524
efx_xmit_with_hwtstamp(struct sk_buff * skb)1525 static inline bool efx_xmit_with_hwtstamp(struct sk_buff *skb)
1526 {
1527 return skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP;
1528 }
efx_xmit_hwtstamp_pending(struct sk_buff * skb)1529 static inline void efx_xmit_hwtstamp_pending(struct sk_buff *skb)
1530 {
1531 skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
1532 }
1533
1534 /* Get all supported features.
1535 * If a feature is not fixed, it is present in hw_features.
1536 * If a feature is fixed, it does not present in hw_features, but
1537 * always in features.
1538 */
efx_supported_features(const struct efx_nic * efx)1539 static inline netdev_features_t efx_supported_features(const struct efx_nic *efx)
1540 {
1541 const struct net_device *net_dev = efx->net_dev;
1542
1543 return net_dev->features | net_dev->hw_features;
1544 }
1545
1546 #endif /* EFX_NET_DRIVER_H */
1547