• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: (GPL-2.0 OR MIT)
2  * Google virtual Ethernet (gve) driver
3  *
4  * Copyright (C) 2015-2021 Google, Inc.
5  */
6 
7 #ifndef _GVE_H_
8 #define _GVE_H_
9 
10 #include <linux/dma-mapping.h>
11 #include <linux/netdevice.h>
12 #include <linux/pci.h>
13 #include <linux/u64_stats_sync.h>
14 
15 #include "gve_desc.h"
16 #include "gve_desc_dqo.h"
17 
18 #ifndef PCI_VENDOR_ID_GOOGLE
19 #define PCI_VENDOR_ID_GOOGLE	0x1ae0
20 #endif
21 
22 #define PCI_DEV_ID_GVNIC	0x0042
23 
24 #define GVE_REGISTER_BAR	0
25 #define GVE_DOORBELL_BAR	2
26 
27 /* Driver can alloc up to 2 segments for the header and 2 for the payload. */
28 #define GVE_TX_MAX_IOVEC	4
29 /* 1 for management, 1 for rx, 1 for tx */
30 #define GVE_MIN_MSIX 3
31 
32 /* Numbers of gve tx/rx stats in stats report. */
33 #define GVE_TX_STATS_REPORT_NUM	6
34 #define GVE_RX_STATS_REPORT_NUM	2
35 
36 /* Interval to schedule a stats report update, 20000ms. */
37 #define GVE_STATS_REPORT_TIMER_PERIOD	20000
38 
39 /* Numbers of NIC tx/rx stats in stats report. */
40 #define NIC_TX_STATS_REPORT_NUM	0
41 #define NIC_RX_STATS_REPORT_NUM	4
42 
43 #define GVE_DATA_SLOT_ADDR_PAGE_MASK (~(PAGE_SIZE - 1))
44 
45 /* PTYPEs are always 10 bits. */
46 #define GVE_NUM_PTYPES	1024
47 
48 #define GVE_RX_BUFFER_SIZE_DQO 2048
49 
50 #define GVE_GQ_TX_MIN_PKT_DESC_BYTES 182
51 
52 /* Each slot in the desc ring has a 1:1 mapping to a slot in the data ring */
53 struct gve_rx_desc_queue {
54 	struct gve_rx_desc *desc_ring; /* the descriptor ring */
55 	dma_addr_t bus; /* the bus for the desc_ring */
56 	u8 seqno; /* the next expected seqno for this desc*/
57 };
58 
59 /* The page info for a single slot in the RX data queue */
60 struct gve_rx_slot_page_info {
61 	struct page *page;
62 	void *page_address;
63 	u32 page_offset; /* offset to write to in page */
64 	int pagecnt_bias; /* expected pagecnt if only the driver has a ref */
65 	u8 can_flip;
66 };
67 
68 /* A list of pages registered with the device during setup and used by a queue
69  * as buffers
70  */
71 struct gve_queue_page_list {
72 	u32 id; /* unique id */
73 	u32 num_entries;
74 	struct page **pages; /* list of num_entries pages */
75 	dma_addr_t *page_buses; /* the dma addrs of the pages */
76 };
77 
78 /* Each slot in the data ring has a 1:1 mapping to a slot in the desc ring */
79 struct gve_rx_data_queue {
80 	union gve_rx_data_slot *data_ring; /* read by NIC */
81 	dma_addr_t data_bus; /* dma mapping of the slots */
82 	struct gve_rx_slot_page_info *page_info; /* page info of the buffers */
83 	struct gve_queue_page_list *qpl; /* qpl assigned to this queue */
84 	u8 raw_addressing; /* use raw_addressing? */
85 };
86 
87 struct gve_priv;
88 
89 /* RX buffer queue for posting buffers to HW.
90  * Each RX (completion) queue has a corresponding buffer queue.
91  */
92 struct gve_rx_buf_queue_dqo {
93 	struct gve_rx_desc_dqo *desc_ring;
94 	dma_addr_t bus;
95 	u32 head; /* Pointer to start cleaning buffers at. */
96 	u32 tail; /* Last posted buffer index + 1 */
97 	u32 mask; /* Mask for indices to the size of the ring */
98 };
99 
100 /* RX completion queue to receive packets from HW. */
101 struct gve_rx_compl_queue_dqo {
102 	struct gve_rx_compl_desc_dqo *desc_ring;
103 	dma_addr_t bus;
104 
105 	/* Number of slots which did not have a buffer posted yet. We should not
106 	 * post more buffers than the queue size to avoid HW overrunning the
107 	 * queue.
108 	 */
109 	int num_free_slots;
110 
111 	/* HW uses a "generation bit" to notify SW of new descriptors. When a
112 	 * descriptor's generation bit is different from the current generation,
113 	 * that descriptor is ready to be consumed by SW.
114 	 */
115 	u8 cur_gen_bit;
116 
117 	/* Pointer into desc_ring where the next completion descriptor will be
118 	 * received.
119 	 */
120 	u32 head;
121 	u32 mask; /* Mask for indices to the size of the ring */
122 };
123 
124 /* Stores state for tracking buffers posted to HW */
125 struct gve_rx_buf_state_dqo {
126 	/* The page posted to HW. */
127 	struct gve_rx_slot_page_info page_info;
128 
129 	/* The DMA address corresponding to `page_info`. */
130 	dma_addr_t addr;
131 
132 	/* Last offset into the page when it only had a single reference, at
133 	 * which point every other offset is free to be reused.
134 	 */
135 	u32 last_single_ref_offset;
136 
137 	/* Linked list index to next element in the list, or -1 if none */
138 	s16 next;
139 };
140 
141 /* `head` and `tail` are indices into an array, or -1 if empty. */
142 struct gve_index_list {
143 	s16 head;
144 	s16 tail;
145 };
146 
147 /* Contains datapath state used to represent an RX queue. */
148 struct gve_rx_ring {
149 	struct gve_priv *gve;
150 	union {
151 		/* GQI fields */
152 		struct {
153 			struct gve_rx_desc_queue desc;
154 			struct gve_rx_data_queue data;
155 
156 			/* threshold for posting new buffs and descs */
157 			u32 db_threshold;
158 		};
159 
160 		/* DQO fields. */
161 		struct {
162 			struct gve_rx_buf_queue_dqo bufq;
163 			struct gve_rx_compl_queue_dqo complq;
164 
165 			struct gve_rx_buf_state_dqo *buf_states;
166 			u16 num_buf_states;
167 
168 			/* Linked list of gve_rx_buf_state_dqo. Index into
169 			 * buf_states, or -1 if empty.
170 			 */
171 			s16 free_buf_states;
172 
173 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
174 			 * buf_states, or -1 if empty.
175 			 *
176 			 * This list contains buf_states which are pointing to
177 			 * valid buffers.
178 			 *
179 			 * We use a FIFO here in order to increase the
180 			 * probability that buffers can be reused by increasing
181 			 * the time between usages.
182 			 */
183 			struct gve_index_list recycled_buf_states;
184 
185 			/* Linked list of gve_rx_buf_state_dqo. Indexes into
186 			 * buf_states, or -1 if empty.
187 			 *
188 			 * This list contains buf_states which have buffers
189 			 * which cannot be reused yet.
190 			 */
191 			struct gve_index_list used_buf_states;
192 		} dqo;
193 	};
194 
195 	u64 rbytes; /* free-running bytes received */
196 	u64 rpackets; /* free-running packets received */
197 	u32 cnt; /* free-running total number of completed packets */
198 	u32 fill_cnt; /* free-running total number of descs and buffs posted */
199 	u32 mask; /* masks the cnt and fill_cnt to the size of the ring */
200 	u64 rx_copybreak_pkt; /* free-running count of copybreak packets */
201 	u64 rx_copied_pkt; /* free-running total number of copied packets */
202 	u64 rx_skb_alloc_fail; /* free-running count of skb alloc fails */
203 	u64 rx_buf_alloc_fail; /* free-running count of buffer alloc fails */
204 	u64 rx_desc_err_dropped_pkt; /* free-running count of packets dropped by descriptor error */
205 	u32 q_num; /* queue index */
206 	u32 ntfy_id; /* notification block index */
207 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
208 	dma_addr_t q_resources_bus; /* dma address for the queue resources */
209 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
210 
211 	/* head and tail of skb chain for the current packet or NULL if none */
212 	struct sk_buff *skb_head;
213 	struct sk_buff *skb_tail;
214 };
215 
216 /* A TX desc ring entry */
217 union gve_tx_desc {
218 	struct gve_tx_pkt_desc pkt; /* first desc for a packet */
219 	struct gve_tx_seg_desc seg; /* subsequent descs for a packet */
220 };
221 
222 /* Tracks the memory in the fifo occupied by a segment of a packet */
223 struct gve_tx_iovec {
224 	u32 iov_offset; /* offset into this segment */
225 	u32 iov_len; /* length */
226 	u32 iov_padding; /* padding associated with this segment */
227 };
228 
229 /* Tracks the memory in the fifo occupied by the skb. Mapped 1:1 to a desc
230  * ring entry but only used for a pkt_desc not a seg_desc
231  */
232 struct gve_tx_buffer_state {
233 	struct sk_buff *skb; /* skb for this pkt */
234 	union {
235 		struct gve_tx_iovec iov[GVE_TX_MAX_IOVEC]; /* segments of this pkt */
236 		struct {
237 			DEFINE_DMA_UNMAP_ADDR(dma);
238 			DEFINE_DMA_UNMAP_LEN(len);
239 		};
240 	};
241 };
242 
243 /* A TX buffer - each queue has one */
244 struct gve_tx_fifo {
245 	void *base; /* address of base of FIFO */
246 	u32 size; /* total size */
247 	atomic_t available; /* how much space is still available */
248 	u32 head; /* offset to write at */
249 	struct gve_queue_page_list *qpl; /* QPL mapped into this FIFO */
250 };
251 
252 /* TX descriptor for DQO format */
253 union gve_tx_desc_dqo {
254 	struct gve_tx_pkt_desc_dqo pkt;
255 	struct gve_tx_tso_context_desc_dqo tso_ctx;
256 	struct gve_tx_general_context_desc_dqo general_ctx;
257 };
258 
259 enum gve_packet_state {
260 	/* Packet is in free list, available to be allocated.
261 	 * This should always be zero since state is not explicitly initialized.
262 	 */
263 	GVE_PACKET_STATE_UNALLOCATED,
264 	/* Packet is expecting a regular data completion or miss completion */
265 	GVE_PACKET_STATE_PENDING_DATA_COMPL,
266 	/* Packet has received a miss completion and is expecting a
267 	 * re-injection completion.
268 	 */
269 	GVE_PACKET_STATE_PENDING_REINJECT_COMPL,
270 	/* No valid completion received within the specified timeout. */
271 	GVE_PACKET_STATE_TIMED_OUT_COMPL,
272 };
273 
274 struct gve_tx_pending_packet_dqo {
275 	struct sk_buff *skb; /* skb for this packet */
276 
277 	/* 0th element corresponds to the linear portion of `skb`, should be
278 	 * unmapped with `dma_unmap_single`.
279 	 *
280 	 * All others correspond to `skb`'s frags and should be unmapped with
281 	 * `dma_unmap_page`.
282 	 */
283 	DEFINE_DMA_UNMAP_ADDR(dma[MAX_SKB_FRAGS + 1]);
284 	DEFINE_DMA_UNMAP_LEN(len[MAX_SKB_FRAGS + 1]);
285 	u16 num_bufs;
286 
287 	/* Linked list index to next element in the list, or -1 if none */
288 	s16 next;
289 
290 	/* Linked list index to prev element in the list, or -1 if none.
291 	 * Used for tracking either outstanding miss completions or prematurely
292 	 * freed packets.
293 	 */
294 	s16 prev;
295 
296 	/* Identifies the current state of the packet as defined in
297 	 * `enum gve_packet_state`.
298 	 */
299 	u8 state;
300 
301 	/* If packet is an outstanding miss completion, then the packet is
302 	 * freed if the corresponding re-injection completion is not received
303 	 * before kernel jiffies exceeds timeout_jiffies.
304 	 */
305 	unsigned long timeout_jiffies;
306 };
307 
308 /* Contains datapath state used to represent a TX queue. */
309 struct gve_tx_ring {
310 	/* Cacheline 0 -- Accessed & dirtied during transmit */
311 	union {
312 		/* GQI fields */
313 		struct {
314 			struct gve_tx_fifo tx_fifo;
315 			u32 req; /* driver tracked head pointer */
316 			u32 done; /* driver tracked tail pointer */
317 		};
318 
319 		/* DQO fields. */
320 		struct {
321 			/* Linked list of gve_tx_pending_packet_dqo. Index into
322 			 * pending_packets, or -1 if empty.
323 			 *
324 			 * This is a consumer list owned by the TX path. When it
325 			 * runs out, the producer list is stolen from the
326 			 * completion handling path
327 			 * (dqo_compl.free_pending_packets).
328 			 */
329 			s16 free_pending_packets;
330 
331 			/* Cached value of `dqo_compl.hw_tx_head` */
332 			u32 head;
333 			u32 tail; /* Last posted buffer index + 1 */
334 
335 			/* Index of the last descriptor with "report event" bit
336 			 * set.
337 			 */
338 			u32 last_re_idx;
339 		} dqo_tx;
340 	};
341 
342 	/* Cacheline 1 -- Accessed & dirtied during gve_clean_tx_done */
343 	union {
344 		/* GQI fields */
345 		struct {
346 			/* NIC tail pointer */
347 			__be32 last_nic_done;
348 		};
349 
350 		/* DQO fields. */
351 		struct {
352 			u32 head; /* Last read on compl_desc */
353 
354 			/* Tracks the current gen bit of compl_q */
355 			u8 cur_gen_bit;
356 
357 			/* Linked list of gve_tx_pending_packet_dqo. Index into
358 			 * pending_packets, or -1 if empty.
359 			 *
360 			 * This is the producer list, owned by the completion
361 			 * handling path. When the consumer list
362 			 * (dqo_tx.free_pending_packets) is runs out, this list
363 			 * will be stolen.
364 			 */
365 			atomic_t free_pending_packets;
366 
367 			/* Last TX ring index fetched by HW */
368 			atomic_t hw_tx_head;
369 
370 			/* List to track pending packets which received a miss
371 			 * completion but not a corresponding reinjection.
372 			 */
373 			struct gve_index_list miss_completions;
374 
375 			/* List to track pending packets that were completed
376 			 * before receiving a valid completion because they
377 			 * reached a specified timeout.
378 			 */
379 			struct gve_index_list timed_out_completions;
380 		} dqo_compl;
381 	} ____cacheline_aligned;
382 	u64 pkt_done; /* free-running - total packets completed */
383 	u64 bytes_done; /* free-running - total bytes completed */
384 	u64 dropped_pkt; /* free-running - total packets dropped */
385 	u64 dma_mapping_error; /* count of dma mapping errors */
386 
387 	/* Cacheline 2 -- Read-mostly fields */
388 	union {
389 		/* GQI fields */
390 		struct {
391 			union gve_tx_desc *desc;
392 
393 			/* Maps 1:1 to a desc */
394 			struct gve_tx_buffer_state *info;
395 		};
396 
397 		/* DQO fields. */
398 		struct {
399 			union gve_tx_desc_dqo *tx_ring;
400 			struct gve_tx_compl_desc *compl_ring;
401 
402 			struct gve_tx_pending_packet_dqo *pending_packets;
403 			s16 num_pending_packets;
404 
405 			u32 complq_mask; /* complq size is complq_mask + 1 */
406 		} dqo;
407 	} ____cacheline_aligned;
408 	struct netdev_queue *netdev_txq;
409 	struct gve_queue_resources *q_resources; /* head and tail pointer idx */
410 	struct device *dev;
411 	u32 mask; /* masks req and done down to queue size */
412 	u8 raw_addressing; /* use raw_addressing? */
413 
414 	/* Slow-path fields */
415 	u32 q_num ____cacheline_aligned; /* queue idx */
416 	u32 stop_queue; /* count of queue stops */
417 	u32 wake_queue; /* count of queue wakes */
418 	u32 queue_timeout; /* count of queue timeouts */
419 	u32 ntfy_id; /* notification block index */
420 	u32 last_kick_msec; /* Last time the queue was kicked */
421 	dma_addr_t bus; /* dma address of the descr ring */
422 	dma_addr_t q_resources_bus; /* dma address of the queue resources */
423 	dma_addr_t complq_bus_dqo; /* dma address of the dqo.compl_ring */
424 	struct u64_stats_sync statss; /* sync stats for 32bit archs */
425 } ____cacheline_aligned;
426 
427 /* Wraps the info for one irq including the napi struct and the queues
428  * associated with that irq.
429  */
430 struct gve_notify_block {
431 	__be32 irq_db_index; /* idx into Bar2 - set by device, must be 1st */
432 	char name[IFNAMSIZ + 16]; /* name registered with the kernel */
433 	struct napi_struct napi; /* kernel napi struct for this block */
434 	struct gve_priv *priv;
435 	struct gve_tx_ring *tx; /* tx rings on this block */
436 	struct gve_rx_ring *rx; /* rx rings on this block */
437 } ____cacheline_aligned;
438 
439 /* Tracks allowed and current queue settings */
440 struct gve_queue_config {
441 	u16 max_queues;
442 	u16 num_queues; /* current */
443 };
444 
445 /* Tracks the available and used qpl IDs */
446 struct gve_qpl_config {
447 	u32 qpl_map_size; /* map memory size */
448 	unsigned long *qpl_id_map; /* bitmap of used qpl ids */
449 };
450 
451 struct gve_options_dqo_rda {
452 	u16 tx_comp_ring_entries; /* number of tx_comp descriptors */
453 	u16 rx_buff_ring_entries; /* number of rx_buff descriptors */
454 };
455 
456 struct gve_ptype {
457 	u8 l3_type;  /* `gve_l3_type` in gve_adminq.h */
458 	u8 l4_type;  /* `gve_l4_type` in gve_adminq.h */
459 };
460 
461 struct gve_ptype_lut {
462 	struct gve_ptype ptypes[GVE_NUM_PTYPES];
463 };
464 
465 /* GVE_QUEUE_FORMAT_UNSPECIFIED must be zero since 0 is the default value
466  * when the entire configure_device_resources command is zeroed out and the
467  * queue_format is not specified.
468  */
469 enum gve_queue_format {
470 	GVE_QUEUE_FORMAT_UNSPECIFIED	= 0x0,
471 	GVE_GQI_RDA_FORMAT		= 0x1,
472 	GVE_GQI_QPL_FORMAT		= 0x2,
473 	GVE_DQO_RDA_FORMAT		= 0x3,
474 };
475 
476 struct gve_priv {
477 	struct net_device *dev;
478 	struct gve_tx_ring *tx; /* array of tx_cfg.num_queues */
479 	struct gve_rx_ring *rx; /* array of rx_cfg.num_queues */
480 	struct gve_queue_page_list *qpls; /* array of num qpls */
481 	struct gve_notify_block *ntfy_blocks; /* array of num_ntfy_blks */
482 	dma_addr_t ntfy_block_bus;
483 	struct msix_entry *msix_vectors; /* array of num_ntfy_blks + 1 */
484 	char mgmt_msix_name[IFNAMSIZ + 16];
485 	u32 mgmt_msix_idx;
486 	__be32 *counter_array; /* array of num_event_counters */
487 	dma_addr_t counter_array_bus;
488 
489 	u16 num_event_counters;
490 	u16 tx_desc_cnt; /* num desc per ring */
491 	u16 rx_desc_cnt; /* num desc per ring */
492 	u16 tx_pages_per_qpl; /* tx buffer length */
493 	u16 rx_data_slot_cnt; /* rx buffer length */
494 	u64 max_registered_pages;
495 	u64 num_registered_pages; /* num pages registered with NIC */
496 	u32 rx_copybreak; /* copy packets smaller than this */
497 	u16 default_num_queues; /* default num queues to set up */
498 
499 	struct gve_queue_config tx_cfg;
500 	struct gve_queue_config rx_cfg;
501 	struct gve_qpl_config qpl_cfg; /* map used QPL ids */
502 	u32 num_ntfy_blks; /* spilt between TX and RX so must be even */
503 
504 	struct gve_registers __iomem *reg_bar0; /* see gve_register.h */
505 	__be32 __iomem *db_bar2; /* "array" of doorbells */
506 	u32 msg_enable;	/* level for netif* netdev print macros	*/
507 	struct pci_dev *pdev;
508 
509 	/* metrics */
510 	u32 tx_timeo_cnt;
511 
512 	/* Admin queue - see gve_adminq.h*/
513 	union gve_adminq_command *adminq;
514 	dma_addr_t adminq_bus_addr;
515 	u32 adminq_mask; /* masks prod_cnt to adminq size */
516 	u32 adminq_prod_cnt; /* free-running count of AQ cmds executed */
517 	u32 adminq_cmd_fail; /* free-running count of AQ cmds failed */
518 	u32 adminq_timeouts; /* free-running count of AQ cmds timeouts */
519 	/* free-running count of per AQ cmd executed */
520 	u32 adminq_describe_device_cnt;
521 	u32 adminq_cfg_device_resources_cnt;
522 	u32 adminq_register_page_list_cnt;
523 	u32 adminq_unregister_page_list_cnt;
524 	u32 adminq_create_tx_queue_cnt;
525 	u32 adminq_create_rx_queue_cnt;
526 	u32 adminq_destroy_tx_queue_cnt;
527 	u32 adminq_destroy_rx_queue_cnt;
528 	u32 adminq_dcfg_device_resources_cnt;
529 	u32 adminq_set_driver_parameter_cnt;
530 	u32 adminq_report_stats_cnt;
531 	u32 adminq_report_link_speed_cnt;
532 	u32 adminq_get_ptype_map_cnt;
533 
534 	/* Global stats */
535 	u32 interface_up_cnt; /* count of times interface turned up since last reset */
536 	u32 interface_down_cnt; /* count of times interface turned down since last reset */
537 	u32 reset_cnt; /* count of reset */
538 	u32 page_alloc_fail; /* count of page alloc fails */
539 	u32 dma_mapping_error; /* count of dma mapping errors */
540 	u32 stats_report_trigger_cnt; /* count of device-requested stats-reports since last reset */
541 	struct workqueue_struct *gve_wq;
542 	struct work_struct service_task;
543 	struct work_struct stats_report_task;
544 	unsigned long service_task_flags;
545 	unsigned long state_flags;
546 
547 	struct gve_stats_report *stats_report;
548 	u64 stats_report_len;
549 	dma_addr_t stats_report_bus; /* dma address for the stats report */
550 	unsigned long ethtool_flags;
551 
552 	unsigned long stats_report_timer_period;
553 	struct timer_list stats_report_timer;
554 
555 	/* Gvnic device link speed from hypervisor. */
556 	u64 link_speed;
557 
558 	struct gve_options_dqo_rda options_dqo_rda;
559 	struct gve_ptype_lut *ptype_lut_dqo;
560 
561 	/* Must be a power of two. */
562 	int data_buffer_size_dqo;
563 
564 	enum gve_queue_format queue_format;
565 };
566 
567 enum gve_service_task_flags_bit {
568 	GVE_PRIV_FLAGS_DO_RESET			= 1,
569 	GVE_PRIV_FLAGS_RESET_IN_PROGRESS	= 2,
570 	GVE_PRIV_FLAGS_PROBE_IN_PROGRESS	= 3,
571 	GVE_PRIV_FLAGS_DO_REPORT_STATS = 4,
572 };
573 
574 enum gve_state_flags_bit {
575 	GVE_PRIV_FLAGS_ADMIN_QUEUE_OK		= 1,
576 	GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK	= 2,
577 	GVE_PRIV_FLAGS_DEVICE_RINGS_OK		= 3,
578 	GVE_PRIV_FLAGS_NAPI_ENABLED		= 4,
579 };
580 
581 enum gve_ethtool_flags_bit {
582 	GVE_PRIV_FLAGS_REPORT_STATS		= 0,
583 };
584 
gve_get_do_reset(struct gve_priv * priv)585 static inline bool gve_get_do_reset(struct gve_priv *priv)
586 {
587 	return test_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
588 }
589 
gve_set_do_reset(struct gve_priv * priv)590 static inline void gve_set_do_reset(struct gve_priv *priv)
591 {
592 	set_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
593 }
594 
gve_clear_do_reset(struct gve_priv * priv)595 static inline void gve_clear_do_reset(struct gve_priv *priv)
596 {
597 	clear_bit(GVE_PRIV_FLAGS_DO_RESET, &priv->service_task_flags);
598 }
599 
gve_get_reset_in_progress(struct gve_priv * priv)600 static inline bool gve_get_reset_in_progress(struct gve_priv *priv)
601 {
602 	return test_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS,
603 			&priv->service_task_flags);
604 }
605 
gve_set_reset_in_progress(struct gve_priv * priv)606 static inline void gve_set_reset_in_progress(struct gve_priv *priv)
607 {
608 	set_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
609 }
610 
gve_clear_reset_in_progress(struct gve_priv * priv)611 static inline void gve_clear_reset_in_progress(struct gve_priv *priv)
612 {
613 	clear_bit(GVE_PRIV_FLAGS_RESET_IN_PROGRESS, &priv->service_task_flags);
614 }
615 
gve_get_probe_in_progress(struct gve_priv * priv)616 static inline bool gve_get_probe_in_progress(struct gve_priv *priv)
617 {
618 	return test_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS,
619 			&priv->service_task_flags);
620 }
621 
gve_set_probe_in_progress(struct gve_priv * priv)622 static inline void gve_set_probe_in_progress(struct gve_priv *priv)
623 {
624 	set_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
625 }
626 
gve_clear_probe_in_progress(struct gve_priv * priv)627 static inline void gve_clear_probe_in_progress(struct gve_priv *priv)
628 {
629 	clear_bit(GVE_PRIV_FLAGS_PROBE_IN_PROGRESS, &priv->service_task_flags);
630 }
631 
gve_get_do_report_stats(struct gve_priv * priv)632 static inline bool gve_get_do_report_stats(struct gve_priv *priv)
633 {
634 	return test_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS,
635 			&priv->service_task_flags);
636 }
637 
gve_set_do_report_stats(struct gve_priv * priv)638 static inline void gve_set_do_report_stats(struct gve_priv *priv)
639 {
640 	set_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
641 }
642 
gve_clear_do_report_stats(struct gve_priv * priv)643 static inline void gve_clear_do_report_stats(struct gve_priv *priv)
644 {
645 	clear_bit(GVE_PRIV_FLAGS_DO_REPORT_STATS, &priv->service_task_flags);
646 }
647 
gve_get_admin_queue_ok(struct gve_priv * priv)648 static inline bool gve_get_admin_queue_ok(struct gve_priv *priv)
649 {
650 	return test_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
651 }
652 
gve_set_admin_queue_ok(struct gve_priv * priv)653 static inline void gve_set_admin_queue_ok(struct gve_priv *priv)
654 {
655 	set_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
656 }
657 
gve_clear_admin_queue_ok(struct gve_priv * priv)658 static inline void gve_clear_admin_queue_ok(struct gve_priv *priv)
659 {
660 	clear_bit(GVE_PRIV_FLAGS_ADMIN_QUEUE_OK, &priv->state_flags);
661 }
662 
gve_get_device_resources_ok(struct gve_priv * priv)663 static inline bool gve_get_device_resources_ok(struct gve_priv *priv)
664 {
665 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
666 }
667 
gve_set_device_resources_ok(struct gve_priv * priv)668 static inline void gve_set_device_resources_ok(struct gve_priv *priv)
669 {
670 	set_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
671 }
672 
gve_clear_device_resources_ok(struct gve_priv * priv)673 static inline void gve_clear_device_resources_ok(struct gve_priv *priv)
674 {
675 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RESOURCES_OK, &priv->state_flags);
676 }
677 
gve_get_device_rings_ok(struct gve_priv * priv)678 static inline bool gve_get_device_rings_ok(struct gve_priv *priv)
679 {
680 	return test_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
681 }
682 
gve_set_device_rings_ok(struct gve_priv * priv)683 static inline void gve_set_device_rings_ok(struct gve_priv *priv)
684 {
685 	set_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
686 }
687 
gve_clear_device_rings_ok(struct gve_priv * priv)688 static inline void gve_clear_device_rings_ok(struct gve_priv *priv)
689 {
690 	clear_bit(GVE_PRIV_FLAGS_DEVICE_RINGS_OK, &priv->state_flags);
691 }
692 
gve_get_napi_enabled(struct gve_priv * priv)693 static inline bool gve_get_napi_enabled(struct gve_priv *priv)
694 {
695 	return test_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
696 }
697 
gve_set_napi_enabled(struct gve_priv * priv)698 static inline void gve_set_napi_enabled(struct gve_priv *priv)
699 {
700 	set_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
701 }
702 
gve_clear_napi_enabled(struct gve_priv * priv)703 static inline void gve_clear_napi_enabled(struct gve_priv *priv)
704 {
705 	clear_bit(GVE_PRIV_FLAGS_NAPI_ENABLED, &priv->state_flags);
706 }
707 
gve_get_report_stats(struct gve_priv * priv)708 static inline bool gve_get_report_stats(struct gve_priv *priv)
709 {
710 	return test_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
711 }
712 
gve_clear_report_stats(struct gve_priv * priv)713 static inline void gve_clear_report_stats(struct gve_priv *priv)
714 {
715 	clear_bit(GVE_PRIV_FLAGS_REPORT_STATS, &priv->ethtool_flags);
716 }
717 
718 /* Returns the address of the ntfy_blocks irq doorbell
719  */
gve_irq_doorbell(struct gve_priv * priv,struct gve_notify_block * block)720 static inline __be32 __iomem *gve_irq_doorbell(struct gve_priv *priv,
721 					       struct gve_notify_block *block)
722 {
723 	return &priv->db_bar2[be32_to_cpu(block->irq_db_index)];
724 }
725 
726 /* Returns the index into ntfy_blocks of the given tx ring's block
727  */
gve_tx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)728 static inline u32 gve_tx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
729 {
730 	return queue_idx;
731 }
732 
733 /* Returns the index into ntfy_blocks of the given rx ring's block
734  */
gve_rx_idx_to_ntfy(struct gve_priv * priv,u32 queue_idx)735 static inline u32 gve_rx_idx_to_ntfy(struct gve_priv *priv, u32 queue_idx)
736 {
737 	return (priv->num_ntfy_blks / 2) + queue_idx;
738 }
739 
740 /* Returns the number of tx queue page lists
741  */
gve_num_tx_qpls(struct gve_priv * priv)742 static inline u32 gve_num_tx_qpls(struct gve_priv *priv)
743 {
744 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
745 		return 0;
746 
747 	return priv->tx_cfg.num_queues;
748 }
749 
750 /* Returns the number of rx queue page lists
751  */
gve_num_rx_qpls(struct gve_priv * priv)752 static inline u32 gve_num_rx_qpls(struct gve_priv *priv)
753 {
754 	if (priv->queue_format != GVE_GQI_QPL_FORMAT)
755 		return 0;
756 
757 	return priv->rx_cfg.num_queues;
758 }
759 
760 /* Returns a pointer to the next available tx qpl in the list of qpls
761  */
762 static inline
gve_assign_tx_qpl(struct gve_priv * priv)763 struct gve_queue_page_list *gve_assign_tx_qpl(struct gve_priv *priv)
764 {
765 	int id = find_first_zero_bit(priv->qpl_cfg.qpl_id_map,
766 				     priv->qpl_cfg.qpl_map_size);
767 
768 	/* we are out of tx qpls */
769 	if (id >= gve_num_tx_qpls(priv))
770 		return NULL;
771 
772 	set_bit(id, priv->qpl_cfg.qpl_id_map);
773 	return &priv->qpls[id];
774 }
775 
776 /* Returns a pointer to the next available rx qpl in the list of qpls
777  */
778 static inline
gve_assign_rx_qpl(struct gve_priv * priv)779 struct gve_queue_page_list *gve_assign_rx_qpl(struct gve_priv *priv)
780 {
781 	int id = find_next_zero_bit(priv->qpl_cfg.qpl_id_map,
782 				    priv->qpl_cfg.qpl_map_size,
783 				    gve_num_tx_qpls(priv));
784 
785 	/* we are out of rx qpls */
786 	if (id == gve_num_tx_qpls(priv) + gve_num_rx_qpls(priv))
787 		return NULL;
788 
789 	set_bit(id, priv->qpl_cfg.qpl_id_map);
790 	return &priv->qpls[id];
791 }
792 
793 /* Unassigns the qpl with the given id
794  */
gve_unassign_qpl(struct gve_priv * priv,int id)795 static inline void gve_unassign_qpl(struct gve_priv *priv, int id)
796 {
797 	clear_bit(id, priv->qpl_cfg.qpl_id_map);
798 }
799 
800 /* Returns the correct dma direction for tx and rx qpls
801  */
gve_qpl_dma_dir(struct gve_priv * priv,int id)802 static inline enum dma_data_direction gve_qpl_dma_dir(struct gve_priv *priv,
803 						      int id)
804 {
805 	if (id < gve_num_tx_qpls(priv))
806 		return DMA_TO_DEVICE;
807 	else
808 		return DMA_FROM_DEVICE;
809 }
810 
gve_is_gqi(struct gve_priv * priv)811 static inline bool gve_is_gqi(struct gve_priv *priv)
812 {
813 	return priv->queue_format == GVE_GQI_RDA_FORMAT ||
814 		priv->queue_format == GVE_GQI_QPL_FORMAT;
815 }
816 
817 /* buffers */
818 int gve_alloc_page(struct gve_priv *priv, struct device *dev,
819 		   struct page **page, dma_addr_t *dma,
820 		   enum dma_data_direction, gfp_t gfp_flags);
821 void gve_free_page(struct device *dev, struct page *page, dma_addr_t dma,
822 		   enum dma_data_direction);
823 /* tx handling */
824 netdev_tx_t gve_tx(struct sk_buff *skb, struct net_device *dev);
825 bool gve_tx_poll(struct gve_notify_block *block, int budget);
826 int gve_tx_alloc_rings(struct gve_priv *priv);
827 void gve_tx_free_rings_gqi(struct gve_priv *priv);
828 __be32 gve_tx_load_event_counter(struct gve_priv *priv,
829 				 struct gve_tx_ring *tx);
830 /* rx handling */
831 void gve_rx_write_doorbell(struct gve_priv *priv, struct gve_rx_ring *rx);
832 bool gve_rx_poll(struct gve_notify_block *block, int budget);
833 int gve_rx_alloc_rings(struct gve_priv *priv);
834 void gve_rx_free_rings_gqi(struct gve_priv *priv);
835 bool gve_clean_rx_done(struct gve_rx_ring *rx, int budget,
836 		       netdev_features_t feat);
837 /* Reset */
838 void gve_schedule_reset(struct gve_priv *priv);
839 int gve_reset(struct gve_priv *priv, bool attempt_teardown);
840 int gve_adjust_queues(struct gve_priv *priv,
841 		      struct gve_queue_config new_rx_config,
842 		      struct gve_queue_config new_tx_config);
843 /* report stats handling */
844 void gve_handle_report_stats(struct gve_priv *priv);
845 /* exported by ethtool.c */
846 extern const struct ethtool_ops gve_ethtool_ops;
847 /* needed by ethtool */
848 extern const char gve_version_str[];
849 #endif /* _GVE_H_ */
850