• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #ifndef _GVE_H_
8 #define _GVE_H_
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/u64_stats_sync.h>
14 
15 #include "gve_desc.h"
16 #include "gve_desc_dqo.h"
17 
18 #ifndef PCI_VENDOR_ID_GOOGLE
19 #define PCI_VENDOR_ID_GOOGLE	0x1ae0
20 #endif
21 
22 #define PCI_DEV_ID_GVNIC	0x0042
23 
24 #define GVE_REGISTER_BAR	0
25 #define GVE_DOORBELL_BAR	2
26 
27 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
28 #define GVE_TX_MAX_IOVEC	4
29 /* 1 for management, 1 for rx, 1 for tx */
30 #define GVE_MIN_MSIX 3
31 
32 /* Numbers of gve tx/rx stats in stats report. */
33 #define GVE_TX_STATS_REPORT_NUM	6
34 #define GVE_RX_STATS_REPORT_NUM	2
35 
36 /* Interval to schedule a stats report update, 20000ms. */
37 #define GVE_STATS_REPORT_TIMER_PERIOD	20000
38 
39 /* Numbers of NIC tx/rx stats in stats report. */
40 #define NIC_TX_STATS_REPORT_NUM	0
41 #define NIC_RX_STATS_REPORT_NUM	4
42 
43 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
44 
45 /* PTYPEs are always 10 bits. */
46 #define GVE_NUM_PTYPES	1024
47 
48 #define GVE_RX_BUFFER_SIZE_DQO 2048
49 
50 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
51 
52 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
53 struct gve_rx_desc_queue {
54 	struct gve_rx_desc *desc_ring; /* the descriptor ring */
55 	dma_addr_t bus; /* the bus for the desc_ring */
56 	u8 seqno; /* the next expected seqno for this desc*/
57 };
58 
59 /* The page info for a single slot in the RX data queue */
60 struct gve_rx_slot_page_info {
61 	struct page *page;
62 	void *page_address;
63 	u32 page_offset; /* offset to write to in page */
64 	int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
65 	u8 can_flip;
66 };
67 
68 /* A list of pages registered with the device during setup and used by a queue
69  * as buffers
70  */
71 struct gve_queue_page_list {
72 	u32 id; /* unique id */
73 	u32 num_entries;
74 	struct page **pages; /* list of num_entries pages */
75 	dma_addr_t *page_buses; /* the dma addrs of the pages */
76 };
77 
78 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
79 struct gve_rx_data_queue {
80 	union gve_rx_data_slot *data_ring; /* read by NIC */
81 	dma_addr_t data_bus; /* dma mapping of the slots */
82 	struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
83 	struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
84 	u8 raw_addressing; /* use raw_addressing? */
85 };
86 
87 struct gve_priv;
88 
89 /* RX buffer queue for posting buffers to HW.
90  * Each RX (completion) queue has a corresponding buffer queue.
91  */
92 struct gve_rx_buf_queue_dqo {
93 	struct gve_rx_desc_dqo *desc_ring;
94 	dma_addr_t bus;
95 	u32 head; /* Pointer to start cleaning buffers at. */
96 	u32 tail; /* Last posted buffer index + 1 */
97 	u32 mask; /* Mask for indices to the size of the ring */
98 };
99 
100 /* RX completion queue to receive packets from HW. */
101 struct gve_rx_compl_queue_dqo {
102 	struct gve_rx_compl_desc_dqo *desc_ring;
103 	dma_addr_t bus;
104 
105 	/* Number of slots which did not have a buffer posted yet. We should not
106 	 * post more buffers than the queue size to avoid HW overrunning the
107 	 * queue.
108 	 */
109 	int num_free_slots;
110 
111 	/* HW uses a "generation bit" to notify SW of new descriptors. When a
112 	 * descriptor's generation bit is different from the current generation,
113 	 * that descriptor is ready to be consumed by SW.
114 	 */
115 	u8 cur_gen_bit;
116 
117 	/* Pointer into desc_ring where the next completion descriptor will be
118 	 * received.
119 	 */
120 	u32 head;
121 	u32 mask; /* Mask for indices to the size of the ring */
122 };
123 
124 /* Stores state for tracking buffers posted to HW */
125 struct gve_rx_buf_state_dqo {
126 	/* The page posted to HW. */
127 	struct gve_rx_slot_page_info page_info;
128 
129 	/* The DMA address corresponding to `page_info`. */
130 	dma_addr_t addr;
131 
132 	/* Last offset into the page when it only had a single reference, at
133 	 * which point every other offset is free to be reused.
134 	 */
135 	u32 last_single_ref_offset;
136 
137 	/* Linked list index to next element in the list, or -1 if none */
138 	s16 next;
139 };
140 
141 /* `head` and `tail` are indices into an array, or -1 if empty. */
142 struct gve_index_list {
143 	s16 head;
144 	s16 tail;
145 };
146 
147 /* A single received packet split across multiple buffers may be
148  * reconstructed using the information in this structure.
149  */
150 struct gve_rx_ctx {
151 	/* head and tail of skb chain for the current packet or NULL if none */
152 	struct sk_buff *skb_head;
153 	struct sk_buff *skb_tail;
154 	u16 total_expected_size;
155 	u8 expected_frag_cnt;
156 	u8 curr_frag_cnt;
157 	u8 reuse_frags;
158 };
159 
160 /* Contains datapath state used to represent an RX queue. */
161 struct gve_rx_ring {
162 	struct gve_priv *gve;
163 	union {
164 		/* GQI fields */
165 		struct {
166 			struct gve_rx_desc_queue desc;
167 			struct gve_rx_data_queue data;
168 
169 			/* threshold for posting new buffs and descs */
170 			u32 db_threshold;
171 			u16 packet_buffer_size;
172 		};
173 
174 		/* DQO fields. */
175 		struct {
176 			struct gve_rx_buf_queue_dqo bufq;
177 			struct gve_rx_compl_queue_dqo complq;
178 
179 			struct gve_rx_buf_state_dqo *buf_states;
180 			u16 num_buf_states;
181 
182 			/* Linked list of gve_rx_buf_state_dqo. Index into
183 			 * buf_states, or -1 if empty.
184 			 */
185 			s16 free_buf_states;
186 
187 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
188 			 * buf_states, or -1 if empty.
189 			 *
190 			 * This list contains buf_states which are pointing to
191 			 * valid buffers.
192 			 *
193 			 * We use a FIFO here in order to increase the
194 			 * probability that buffers can be reused by increasing
195 			 * the time between usages.
196 			 */
197 			struct gve_index_list recycled_buf_states;
198 
199 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
200 			 * buf_states, or -1 if empty.
201 			 *
202 			 * This list contains buf_states which have buffers
203 			 * which cannot be reused yet.
204 			 */
205 			struct gve_index_list used_buf_states;
206 		} dqo;
207 	};
208 
209 	u64 rbytes; /* free-running bytes received */
210 	u64 rpackets; /* free-running packets received */
211 	u32 cnt; /* free-running total number of completed packets */
212 	u32 fill_cnt; /* free-running total number of descs and buffs posted */
213 	u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
214 	u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
215 	u64 rx_copied_pkt; /* free-running total number of copied packets */
216 	u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
217 	u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
218 	u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
219 	u64 rx_cont_packet_cnt; /* free-running multi-fragment packets received */
220 	u64 rx_frag_flip_cnt; /* free-running count of rx segments where page_flip was used */
221 	u64 rx_frag_copy_cnt; /* free-running count of rx segments copied into skb linear portion */
222 	u32 q_num; /* queue index */
223 	u32 ntfy_id; /* notification block index */
224 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
225 	dma_addr_t q_resources_bus; /* dma address for the queue resources */
226 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
227 
228 	struct gve_rx_ctx ctx; /* Info for packet currently being processed in this ring. */
229 };
230 
231 /* A TX desc ring entry */
232 union gve_tx_desc {
233 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
234 	struct gve_tx_mtd_desc mtd; /* optional metadata descriptor */
235 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
236 };
237 
238 /* Tracks the memory in the fifo occupied by a segment of a packet */
239 struct gve_tx_iovec {
240 	u32 iov_offset; /* offset into this segment */
241 	u32 iov_len; /* length */
242 	u32 iov_padding; /* padding associated with this segment */
243 };
244 
245 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
246  * ring entry but only used for a pkt_desc not a seg_desc
247  */
248 struct gve_tx_buffer_state {
249 	struct sk_buff *skb; /* skb for this pkt */
250 	union {
251 		struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
252 		struct {
253 			DEFINE_DMA_UNMAP_ADDR(dma);
254 			DEFINE_DMA_UNMAP_LEN(len);
255 		};
256 	};
257 };
258 
259 /* A TX buffer - each queue has one */
260 struct gve_tx_fifo {
261 	void *base; /* address of base of FIFO */
262 	u32 size; /* total size */
263 	atomic_t available; /* how much space is still available */
264 	u32 head; /* offset to write at */
265 	struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
266 };
267 
268 /* TX descriptor for DQO format */
269 union gve_tx_desc_dqo {
270 	struct gve_tx_pkt_desc_dqo pkt;
271 	struct gve_tx_tso_context_desc_dqo tso_ctx;
272 	struct gve_tx_general_context_desc_dqo general_ctx;
273 };
274 
275 enum gve_packet_state {
276 	/* Packet is in free list, available to be allocated.
277 	 * This should always be zero since state is not explicitly initialized.
278 	 */
279 	GVE_PACKET_STATE_UNALLOCATED,
280 	/* Packet is expecting a regular data completion or miss completion */
281 	GVE_PACKET_STATE_PENDING_DATA_COMPL,
282 	/* Packet has received a miss completion and is expecting a
283 	 * re-injection completion.
284 	 */
285 	GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
286 	/* No valid completion received within the specified timeout. */
287 	GVE_PACKET_STATE_TIMED_OUT_COMPL,
288 };
289 
290 struct gve_tx_pending_packet_dqo {
291 	struct sk_buff *skb; /* skb for this packet */
292 
293 	/* 0th element corresponds to the linear portion of `skb`, should be
294 	 * unmapped with `dma_unmap_single`.
295 	 *
296 	 * All others correspond to `skb`'s frags and should be unmapped with
297 	 * `dma_unmap_page`.
298 	 */
299 	DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
300 	DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
301 	u16 num_bufs;
302 
303 	/* Linked list index to next element in the list, or -1 if none */
304 	s16 next;
305 
306 	/* Linked list index to prev element in the list, or -1 if none.
307 	 * Used for tracking either outstanding miss completions or prematurely
308 	 * freed packets.
309 	 */
310 	s16 prev;
311 
312 	/* Identifies the current state of the packet as defined in
313 	 * `enum gve_packet_state`.
314 	 */
315 	u8 state;
316 
317 	/* If packet is an outstanding miss completion, then the packet is
318 	 * freed if the corresponding re-injection completion is not received
319 	 * before kernel jiffies exceeds timeout_jiffies.
320 	 */
321 	unsigned long timeout_jiffies;
322 };
323 
324 /* Contains datapath state used to represent a TX queue. */
325 struct gve_tx_ring {
326 	/* Cacheline 0 -- Accessed & dirtied during transmit */
327 	union {
328 		/* GQI fields */
329 		struct {
330 			struct gve_tx_fifo tx_fifo;
331 			u32 req; /* driver tracked head pointer */
332 			u32 done; /* driver tracked tail pointer */
333 		};
334 
335 		/* DQO fields. */
336 		struct {
337 			/* Linked list of gve_tx_pending_packet_dqo. Index into
338 			 * pending_packets, or -1 if empty.
339 			 *
340 			 * This is a consumer list owned by the TX path. When it
341 			 * runs out, the producer list is stolen from the
342 			 * completion handling path
343 			 * (dqo_compl.free_pending_packets).
344 			 */
345 			s16 free_pending_packets;
346 
347 			/* Cached value of `dqo_compl.hw_tx_head` */
348 			u32 head;
349 			u32 tail; /* Last posted buffer index + 1 */
350 
351 			/* Index of the last descriptor with "report event" bit
352 			 * set.
353 			 */
354 			u32 last_re_idx;
355 		} dqo_tx;
356 	};
357 
358 	/* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
359 	union {
360 		/* GQI fields */
361 		struct {
362 			/* Spinlock for when cleanup in progress */
363 			spinlock_t clean_lock;
364 		};
365 
366 		/* DQO fields. */
367 		struct {
368 			u32 head; /* Last read on compl_desc */
369 
370 			/* Tracks the current gen bit of compl_q */
371 			u8 cur_gen_bit;
372 
373 			/* Linked list of gve_tx_pending_packet_dqo. Index into
374 			 * pending_packets, or -1 if empty.
375 			 *
376 			 * This is the producer list, owned by the completion
377 			 * handling path. When the consumer list
378 			 * (dqo_tx.free_pending_packets) is runs out, this list
379 			 * will be stolen.
380 			 */
381 			atomic_t free_pending_packets;
382 
383 			/* Last TX ring index fetched by HW */
384 			atomic_t hw_tx_head;
385 
386 			/* List to track pending packets which received a miss
387 			 * completion but not a corresponding reinjection.
388 			 */
389 			struct gve_index_list miss_completions;
390 
391 			/* List to track pending packets that were completed
392 			 * before receiving a valid completion because they
393 			 * reached a specified timeout.
394 			 */
395 			struct gve_index_list timed_out_completions;
396 		} dqo_compl;
397 	} ____cacheline_aligned;
398 	u64 pkt_done; /* free-running - total packets completed */
399 	u64 bytes_done; /* free-running - total bytes completed */
400 	u64 dropped_pkt; /* free-running - total packets dropped */
401 	u64 dma_mapping_error; /* count of dma mapping errors */
402 
403 	/* Cacheline 2 -- Read-mostly fields */
404 	union {
405 		/* GQI fields */
406 		struct {
407 			union gve_tx_desc *desc;
408 
409 			/* Maps 1:1 to a desc */
410 			struct gve_tx_buffer_state *info;
411 		};
412 
413 		/* DQO fields. */
414 		struct {
415 			union gve_tx_desc_dqo *tx_ring;
416 			struct gve_tx_compl_desc *compl_ring;
417 
418 			struct gve_tx_pending_packet_dqo *pending_packets;
419 			s16 num_pending_packets;
420 
421 			u32 complq_mask; /* complq size is complq_mask + 1 */
422 		} dqo;
423 	} ____cacheline_aligned;
424 	struct netdev_queue *netdev_txq;
425 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
426 	struct device *dev;
427 	u32 mask; /* masks req and done down to queue size */
428 	u8 raw_addressing; /* use raw_addressing? */
429 
430 	/* Slow-path fields */
431 	u32 q_num ____cacheline_aligned; /* queue idx */
432 	u32 stop_queue; /* count of queue stops */
433 	u32 wake_queue; /* count of queue wakes */
434 	u32 queue_timeout; /* count of queue timeouts */
435 	u32 ntfy_id; /* notification block index */
436 	u32 last_kick_msec; /* Last time the queue was kicked */
437 	dma_addr_t bus; /* dma address of the descr ring */
438 	dma_addr_t q_resources_bus; /* dma address of the queue resources */
439 	dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
440 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
441 } ____cacheline_aligned;
442 
443 /* Wraps the info for one irq including the napi struct and the queues
444  * associated with that irq.
445  */
446 struct gve_notify_block {
447 	__be32 *irq_db_index; /* pointer to idx into Bar2 */
448 	char name[IFNAMSIZ + 16]; /* name registered with the kernel */
449 	struct napi_struct napi; /* kernel napi struct for this block */
450 	struct gve_priv *priv;
451 	struct gve_tx_ring *tx; /* tx rings on this block */
452 	struct gve_rx_ring *rx; /* rx rings on this block */
453 };
454 
455 /* Tracks allowed and current queue settings */
456 struct gve_queue_config {
457 	u16 max_queues;
458 	u16 num_queues; /* current */
459 };
460 
461 /* Tracks the available and used qpl IDs */
462 struct gve_qpl_config {
463 	u32 qpl_map_size; /* map memory size */
464 	unsigned long *qpl_id_map; /* bitmap of used qpl ids */
465 };
466 
467 struct gve_options_dqo_rda {
468 	u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
469 	u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
470 };
471 
472 struct gve_irq_db {
473 	__be32 index;
474 } ____cacheline_aligned;
475 
476 struct gve_ptype {
477 	u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
478 	u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
479 };
480 
481 struct gve_ptype_lut {
482 	struct gve_ptype ptypes[GVE_NUM_PTYPES];
483 };
484 
485 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
486  * when the entire configure_device_resources command is zeroed out and the
487  * queue_format is not specified.
488  */
489 enum gve_queue_format {
490 	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
491 	GVE_GQI_RDA_FORMAT		= 0x1,
492 	GVE_GQI_QPL_FORMAT		= 0x2,
493 	GVE_DQO_RDA_FORMAT		= 0x3,
494 };
495 
496 struct gve_priv {
497 	struct net_device *dev;
498 	struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
499 	struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
500 	struct gve_queue_page_list *qpls; /* array of num qpls */
501 	struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
502 	struct gve_irq_db *irq_db_indices; /* array of num_ntfy_blks */
503 	dma_addr_t irq_db_indices_bus;
504 	struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
505 	char mgmt_msix_name[IFNAMSIZ + 16];
506 	u32 mgmt_msix_idx;
507 	__be32 *counter_array; /* array of num_event_counters */
508 	dma_addr_t counter_array_bus;
509 
510 	u16 num_event_counters;
511 	u16 tx_desc_cnt; /* num desc per ring */
512 	u16 rx_desc_cnt; /* num desc per ring */
513 	u16 tx_pages_per_qpl; /* tx buffer length */
514 	u16 rx_data_slot_cnt; /* rx buffer length */
515 	u64 max_registered_pages;
516 	u64 num_registered_pages; /* num pages registered with NIC */
517 	u32 rx_copybreak; /* copy packets smaller than this */
518 	u16 default_num_queues; /* default num queues to set up */
519 
520 	struct gve_queue_config tx_cfg;
521 	struct gve_queue_config rx_cfg;
522 	struct gve_qpl_config qpl_cfg; /* map used QPL ids */
523 	u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
524 
525 	struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
526 	__be32 __iomem *db_bar2; /* "array" of doorbells */
527 	u32 msg_enable;	/* level for netif* netdev print macros	*/
528 	struct pci_dev *pdev;
529 
530 	/* metrics */
531 	u32 tx_timeo_cnt;
532 
533 	/* Admin queue - see gve_adminq.h*/
534 	union gve_adminq_command *adminq;
535 	dma_addr_t adminq_bus_addr;
536 	u32 adminq_mask; /* masks prod_cnt to adminq size */
537 	u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
538 	u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
539 	u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
540 	/* free-running count of per AQ cmd executed */
541 	u32 adminq_describe_device_cnt;
542 	u32 adminq_cfg_device_resources_cnt;
543 	u32 adminq_register_page_list_cnt;
544 	u32 adminq_unregister_page_list_cnt;
545 	u32 adminq_create_tx_queue_cnt;
546 	u32 adminq_create_rx_queue_cnt;
547 	u32 adminq_destroy_tx_queue_cnt;
548 	u32 adminq_destroy_rx_queue_cnt;
549 	u32 adminq_dcfg_device_resources_cnt;
550 	u32 adminq_set_driver_parameter_cnt;
551 	u32 adminq_report_stats_cnt;
552 	u32 adminq_report_link_speed_cnt;
553 	u32 adminq_get_ptype_map_cnt;
554 
555 	/* Global stats */
556 	u32 interface_up_cnt; /* count of times interface turned up since last reset */
557 	u32 interface_down_cnt; /* count of times interface turned down since last reset */
558 	u32 reset_cnt; /* count of reset */
559 	u32 page_alloc_fail; /* count of page alloc fails */
560 	u32 dma_mapping_error; /* count of dma mapping errors */
561 	u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
562 	u32 suspend_cnt; /* count of times suspended */
563 	u32 resume_cnt; /* count of times resumed */
564 	struct workqueue_struct *gve_wq;
565 	struct work_struct service_task;
566 	struct work_struct stats_report_task;
567 	unsigned long service_task_flags;
568 	unsigned long state_flags;
569 
570 	struct gve_stats_report *stats_report;
571 	u64 stats_report_len;
572 	dma_addr_t stats_report_bus; /* dma address for the stats report */
573 	unsigned long ethtool_flags;
574 
575 	unsigned long stats_report_timer_period;
576 	struct timer_list stats_report_timer;
577 
578 	/* Gvnic device link speed from hypervisor. */
579 	u64 link_speed;
580 	bool up_before_suspend; /* True if dev was up before suspend */
581 
582 	struct gve_options_dqo_rda options_dqo_rda;
583 	struct gve_ptype_lut *ptype_lut_dqo;
584 
585 	/* Must be a power of two. */
586 	int data_buffer_size_dqo;
587 
588 	enum gve_queue_format queue_format;
589 
590 	/* Interrupt coalescing settings */
591 	u32 tx_coalesce_usecs;
592 	u32 rx_coalesce_usecs;
593 };
594 
595 enum gve_service_task_flags_bit {
596 	GVE_PRIV_FLAGS_DO_RESET			= 1,
597 	GVE_PRIV_FLAGS_RESET_IN_PROGRESS	= 2,
598 	GVE_PRIV_FLAGS_PROBE_IN_PROGRESS	= 3,
599 	GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
600 };
601 
602 enum gve_state_flags_bit {
603 	GVE_PRIV_FLAGS_ADMIN_QUEUE_OK		= 1,
604 	GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK	= 2,
605 	GVE_PRIV_FLAGS_DEVICE_RINGS_OK		= 3,
606 	GVE_PRIV_FLAGS_NAPI_ENABLED		= 4,
607 };
608 
609 enum gve_ethtool_flags_bit {
610 	GVE_PRIV_FLAGS_REPORT_STATS		= 0,
611 };
612 
gve_get_do_reset(struct gve_priv * priv)613 static inline bool gve_get_do_reset(struct gve_priv *priv)
614 {
615 	return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
616 }
617 
gve_set_do_reset(struct gve_priv * priv)618 static inline void gve_set_do_reset(struct gve_priv *priv)
619 {
620 	set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
621 }
622 
gve_clear_do_reset(struct gve_priv * priv)623 static inline void gve_clear_do_reset(struct gve_priv *priv)
624 {
625 	clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
626 }
627 
gve_get_reset_in_progress(struct gve_priv * priv)628 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
629 {
630 	return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
631 			&priv->service_task_flags);
632 }
633 
gve_set_reset_in_progress(struct gve_priv * priv)634 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
635 {
636 	set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
637 }
638 
gve_clear_reset_in_progress(struct gve_priv * priv)639 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
640 {
641 	clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
642 }
643 
gve_get_probe_in_progress(struct gve_priv * priv)644 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
645 {
646 	return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
647 			&priv->service_task_flags);
648 }
649 
gve_set_probe_in_progress(struct gve_priv * priv)650 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
651 {
652 	set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
653 }
654 
gve_clear_probe_in_progress(struct gve_priv * priv)655 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
656 {
657 	clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
658 }
659 
gve_get_do_report_stats(struct gve_priv * priv)660 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
661 {
662 	return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
663 			&priv->service_task_flags);
664 }
665 
gve_set_do_report_stats(struct gve_priv * priv)666 static inline void gve_set_do_report_stats(struct gve_priv *priv)
667 {
668 	set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
669 }
670 
gve_clear_do_report_stats(struct gve_priv * priv)671 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
672 {
673 	clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
674 }
675 
gve_get_admin_queue_ok(struct gve_priv * priv)676 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
677 {
678 	return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
679 }
680 
gve_set_admin_queue_ok(struct gve_priv * priv)681 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
682 {
683 	set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
684 }
685 
gve_clear_admin_queue_ok(struct gve_priv * priv)686 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
687 {
688 	clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
689 }
690 
gve_get_device_resources_ok(struct gve_priv * priv)691 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
692 {
693 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
694 }
695 
gve_set_device_resources_ok(struct gve_priv * priv)696 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
697 {
698 	set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
699 }
700 
gve_clear_device_resources_ok(struct gve_priv * priv)701 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
702 {
703 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
704 }
705 
gve_get_device_rings_ok(struct gve_priv * priv)706 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
707 {
708 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
709 }
710 
gve_set_device_rings_ok(struct gve_priv * priv)711 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
712 {
713 	set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
714 }
715 
gve_clear_device_rings_ok(struct gve_priv * priv)716 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
717 {
718 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
719 }
720 
gve_get_napi_enabled(struct gve_priv * priv)721 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
722 {
723 	return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
724 }
725 
gve_set_napi_enabled(struct gve_priv * priv)726 static inline void gve_set_napi_enabled(struct gve_priv *priv)
727 {
728 	set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
729 }
730 
gve_clear_napi_enabled(struct gve_priv * priv)731 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
732 {
733 	clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
734 }
735 
gve_get_report_stats(struct gve_priv * priv)736 static inline bool gve_get_report_stats(struct gve_priv *priv)
737 {
738 	return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
739 }
740 
gve_clear_report_stats(struct gve_priv * priv)741 static inline void gve_clear_report_stats(struct gve_priv *priv)
742 {
743 	clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
744 }
745 
746 /* Returns the address of the ntfy_blocks irq doorbell
747  */
gve_irq_doorbell(struct gve_priv * priv,struct gve_notify_block * block)748 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
749 					       struct gve_notify_block *block)
750 {
751 	return &priv->db_bar2[be32_to_cpu(*block->irq_db_index)];
752 }
753 
754 /* Returns the index into ntfy_blocks of the given tx ring's block
755  */
gve_tx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)756 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
757 {
758 	return queue_idx;
759 }
760 
761 /* Returns the index into ntfy_blocks of the given rx ring's block
762  */
gve_rx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)763 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
764 {
765 	return (priv->num_ntfy_blks / 2) + queue_idx;
766 }
767 
768 /* Returns the number of tx queue page lists
769  */
gve_num_tx_qpls(struct gve_priv * priv)770 static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
771 {
772 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
773 		return 0;
774 
775 	return priv->tx_cfg.num_queues;
776 }
777 
778 /* Returns the number of rx queue page lists
779  */
gve_num_rx_qpls(struct gve_priv * priv)780 static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
781 {
782 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
783 		return 0;
784 
785 	return priv->rx_cfg.num_queues;
786 }
787 
788 /* Returns a pointer to the next available tx qpl in the list of qpls
789  */
790 static inline
gve_assign_tx_qpl(struct gve_priv * priv)791 struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
792 {
793 	int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
794 				     priv->qpl_cfg.qpl_map_size);
795 
796 	/* we are out of tx qpls */
797 	if (id >= gve_num_tx_qpls(priv))
798 		return NULL;
799 
800 	set_bit(id, priv->qpl_cfg.qpl_id_map);
801 	return &priv->qpls[id];
802 }
803 
804 /* Returns a pointer to the next available rx qpl in the list of qpls
805  */
806 static inline
gve_assign_rx_qpl(struct gve_priv * priv)807 struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
808 {
809 	int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
810 				    priv->qpl_cfg.qpl_map_size,
811 				    gve_num_tx_qpls(priv));
812 
813 	/* we are out of rx qpls */
814 	if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
815 		return NULL;
816 
817 	set_bit(id, priv->qpl_cfg.qpl_id_map);
818 	return &priv->qpls[id];
819 }
820 
821 /* Unassigns the qpl with the given id
822  */
gve_unassign_qpl(struct gve_priv * priv,int id)823 static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
824 {
825 	clear_bit(id, priv->qpl_cfg.qpl_id_map);
826 }
827 
828 /* Returns the correct dma direction for tx and rx qpls
829  */
gve_qpl_dma_dir(struct gve_priv * priv,int id)830 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
831 						      int id)
832 {
833 	if (id < gve_num_tx_qpls(priv))
834 		return DMA_TO_DEVICE;
835 	else
836 		return DMA_FROM_DEVICE;
837 }
838 
gve_is_gqi(struct gve_priv * priv)839 static inline bool gve_is_gqi(struct gve_priv *priv)
840 {
841 	return priv->queue_format == GVE_GQI_RDA_FORMAT ||
842 		priv->queue_format == GVE_GQI_QPL_FORMAT;
843 }
844 
845 /* buffers */
846 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
847 		   struct page **page, dma_addr_t *dma,
848 		   enum dma_data_direction, gfp_t gfp_flags);
849 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
850 		   enum dma_data_direction);
851 /* tx handling */
852 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
853 bool gve_tx_poll(struct gve_notify_block *block, int budget);
854 int gve_tx_alloc_rings(struct gve_priv *priv);
855 void gve_tx_free_rings_gqi(struct gve_priv *priv);
856 u32 gve_tx_load_event_counter(struct gve_priv *priv,
857 			      struct gve_tx_ring *tx);
858 bool gve_tx_clean_pending(struct gve_priv *priv, struct gve_tx_ring *tx);
859 /* rx handling */
860 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
861 int gve_rx_poll(struct gve_notify_block *block, int budget);
862 bool gve_rx_work_pending(struct gve_rx_ring *rx);
863 int gve_rx_alloc_rings(struct gve_priv *priv);
864 void gve_rx_free_rings_gqi(struct gve_priv *priv);
865 /* Reset */
866 void gve_schedule_reset(struct gve_priv *priv);
867 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
868 int gve_adjust_queues(struct gve_priv *priv,
869 		      struct gve_queue_config new_rx_config,
870 		      struct gve_queue_config new_tx_config);
871 /* report stats handling */
872 void gve_handle_report_stats(struct gve_priv *priv);
873 /* exported by ethtool.c */
874 extern const struct ethtool_ops gve_ethtool_ops;
875 /* needed by ethtool */
876 extern const char gve_version_str[];
877 #endif /* _GVE_H_ */
878