1 // SPDX-License-Identifier: (GPL-2.0 OR MIT)
2 /* Google virtual Ethernet (gve) driver
3 *
4 * Copyright (C) 2015-2021 Google, Inc.
5 */
6
7 #include <linux/etherdevice.h>
8 #include <linux/pci.h>
9 #include "gve.h"
10 #include "gve_adminq.h"
11 #include "gve_register.h"
12
13 #define GVE_MAX_ADMINQ_RELEASE_CHECK 500
14 #define GVE_ADMINQ_SLEEP_LEN 20
15 #define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100
16
17 #define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \
18 "Expected: length=%d, feature_mask=%x.\n" \
19 "Actual: length=%d, feature_mask=%x.\n"
20
21 #define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver.\n"
22
23 static
gve_get_next_option(struct gve_device_descriptor * descriptor,struct gve_device_option * option)24 struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
25 struct gve_device_option *option)
26 {
27 void *option_end, *descriptor_end;
28
29 option_end = (void *)(option + 1) + be16_to_cpu(option->option_length);
30 descriptor_end = (void *)descriptor + be16_to_cpu(descriptor->total_length);
31
32 return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;
33 }
34
35 #define GVE_DEVICE_OPTION_NO_MIN_RING_SIZE 8
36
37 static
gve_parse_device_option(struct gve_priv * priv,struct gve_device_descriptor * device_descriptor,struct gve_device_option * option,struct gve_device_option_gqi_rda ** dev_op_gqi_rda,struct gve_device_option_gqi_qpl ** dev_op_gqi_qpl,struct gve_device_option_dqo_rda ** dev_op_dqo_rda,struct gve_device_option_jumbo_frames ** dev_op_jumbo_frames,struct gve_device_option_dqo_qpl ** dev_op_dqo_qpl,struct gve_device_option_buffer_sizes ** dev_op_buffer_sizes,struct gve_device_option_flow_steering ** dev_op_flow_steering,struct gve_device_option_rss_config ** dev_op_rss_config,struct gve_device_option_modify_ring ** dev_op_modify_ring)38 void gve_parse_device_option(struct gve_priv *priv,
39 struct gve_device_descriptor *device_descriptor,
40 struct gve_device_option *option,
41 struct gve_device_option_gqi_rda **dev_op_gqi_rda,
42 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
43 struct gve_device_option_dqo_rda **dev_op_dqo_rda,
44 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
45 struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
46 struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
47 struct gve_device_option_flow_steering **dev_op_flow_steering,
48 struct gve_device_option_rss_config **dev_op_rss_config,
49 struct gve_device_option_modify_ring **dev_op_modify_ring)
50 {
51 u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
52 u16 option_length = be16_to_cpu(option->option_length);
53 u16 option_id = be16_to_cpu(option->option_id);
54
55 /* If the length or feature mask doesn't match, continue without
56 * enabling the feature.
57 */
58 switch (option_id) {
59 case GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING:
60 if (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING ||
61 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING) {
62 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
63 "Raw Addressing",
64 GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING,
65 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING,
66 option_length, req_feat_mask);
67 break;
68 }
69
70 dev_info(&priv->pdev->dev,
71 "Gqi raw addressing device option enabled.\n");
72 priv->queue_format = GVE_GQI_RDA_FORMAT;
73 break;
74 case GVE_DEV_OPT_ID_GQI_RDA:
75 if (option_length < sizeof(**dev_op_gqi_rda) ||
76 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA) {
77 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
78 "GQI RDA", (int)sizeof(**dev_op_gqi_rda),
79 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA,
80 option_length, req_feat_mask);
81 break;
82 }
83
84 if (option_length > sizeof(**dev_op_gqi_rda)) {
85 dev_warn(&priv->pdev->dev,
86 GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI RDA");
87 }
88 *dev_op_gqi_rda = (void *)(option + 1);
89 break;
90 case GVE_DEV_OPT_ID_GQI_QPL:
91 if (option_length < sizeof(**dev_op_gqi_qpl) ||
92 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) {
93 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
94 "GQI QPL", (int)sizeof(**dev_op_gqi_qpl),
95 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL,
96 option_length, req_feat_mask);
97 break;
98 }
99
100 if (option_length > sizeof(**dev_op_gqi_qpl)) {
101 dev_warn(&priv->pdev->dev,
102 GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI QPL");
103 }
104 *dev_op_gqi_qpl = (void *)(option + 1);
105 break;
106 case GVE_DEV_OPT_ID_DQO_RDA:
107 if (option_length < sizeof(**dev_op_dqo_rda) ||
108 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) {
109 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
110 "DQO RDA", (int)sizeof(**dev_op_dqo_rda),
111 GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA,
112 option_length, req_feat_mask);
113 break;
114 }
115
116 if (option_length > sizeof(**dev_op_dqo_rda)) {
117 dev_warn(&priv->pdev->dev,
118 GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO RDA");
119 }
120 *dev_op_dqo_rda = (void *)(option + 1);
121 break;
122 case GVE_DEV_OPT_ID_DQO_QPL:
123 if (option_length < sizeof(**dev_op_dqo_qpl) ||
124 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL) {
125 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
126 "DQO QPL", (int)sizeof(**dev_op_dqo_qpl),
127 GVE_DEV_OPT_REQ_FEAT_MASK_DQO_QPL,
128 option_length, req_feat_mask);
129 break;
130 }
131
132 if (option_length > sizeof(**dev_op_dqo_qpl)) {
133 dev_warn(&priv->pdev->dev,
134 GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO QPL");
135 }
136 *dev_op_dqo_qpl = (void *)(option + 1);
137 break;
138 case GVE_DEV_OPT_ID_JUMBO_FRAMES:
139 if (option_length < sizeof(**dev_op_jumbo_frames) ||
140 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {
141 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
142 "Jumbo Frames",
143 (int)sizeof(**dev_op_jumbo_frames),
144 GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES,
145 option_length, req_feat_mask);
146 break;
147 }
148
149 if (option_length > sizeof(**dev_op_jumbo_frames)) {
150 dev_warn(&priv->pdev->dev,
151 GVE_DEVICE_OPTION_TOO_BIG_FMT,
152 "Jumbo Frames");
153 }
154 *dev_op_jumbo_frames = (void *)(option + 1);
155 break;
156 case GVE_DEV_OPT_ID_BUFFER_SIZES:
157 if (option_length < sizeof(**dev_op_buffer_sizes) ||
158 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES) {
159 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
160 "Buffer Sizes",
161 (int)sizeof(**dev_op_buffer_sizes),
162 GVE_DEV_OPT_REQ_FEAT_MASK_BUFFER_SIZES,
163 option_length, req_feat_mask);
164 break;
165 }
166
167 if (option_length > sizeof(**dev_op_buffer_sizes))
168 dev_warn(&priv->pdev->dev,
169 GVE_DEVICE_OPTION_TOO_BIG_FMT,
170 "Buffer Sizes");
171 *dev_op_buffer_sizes = (void *)(option + 1);
172 break;
173 case GVE_DEV_OPT_ID_MODIFY_RING:
174 if (option_length < GVE_DEVICE_OPTION_NO_MIN_RING_SIZE ||
175 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING) {
176 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
177 "Modify Ring", (int)sizeof(**dev_op_modify_ring),
178 GVE_DEV_OPT_REQ_FEAT_MASK_MODIFY_RING,
179 option_length, req_feat_mask);
180 break;
181 }
182
183 if (option_length > sizeof(**dev_op_modify_ring)) {
184 dev_warn(&priv->pdev->dev,
185 GVE_DEVICE_OPTION_TOO_BIG_FMT, "Modify Ring");
186 }
187
188 *dev_op_modify_ring = (void *)(option + 1);
189
190 /* device has not provided min ring size */
191 if (option_length == GVE_DEVICE_OPTION_NO_MIN_RING_SIZE)
192 priv->default_min_ring_size = true;
193 break;
194 case GVE_DEV_OPT_ID_FLOW_STEERING:
195 if (option_length < sizeof(**dev_op_flow_steering) ||
196 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING) {
197 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
198 "Flow Steering",
199 (int)sizeof(**dev_op_flow_steering),
200 GVE_DEV_OPT_REQ_FEAT_MASK_FLOW_STEERING,
201 option_length, req_feat_mask);
202 break;
203 }
204
205 if (option_length > sizeof(**dev_op_flow_steering))
206 dev_warn(&priv->pdev->dev,
207 GVE_DEVICE_OPTION_TOO_BIG_FMT,
208 "Flow Steering");
209 *dev_op_flow_steering = (void *)(option + 1);
210 break;
211 case GVE_DEV_OPT_ID_RSS_CONFIG:
212 if (option_length < sizeof(**dev_op_rss_config) ||
213 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG) {
214 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
215 "RSS config",
216 (int)sizeof(**dev_op_rss_config),
217 GVE_DEV_OPT_REQ_FEAT_MASK_RSS_CONFIG,
218 option_length, req_feat_mask);
219 break;
220 }
221
222 if (option_length > sizeof(**dev_op_rss_config))
223 dev_warn(&priv->pdev->dev,
224 GVE_DEVICE_OPTION_TOO_BIG_FMT,
225 "RSS config");
226 *dev_op_rss_config = (void *)(option + 1);
227 break;
228 default:
229 /* If we don't recognize the option just continue
230 * without doing anything.
231 */
232 dev_dbg(&priv->pdev->dev, "Unrecognized device option 0x%hx not enabled.\n",
233 option_id);
234 }
235 }
236
237 /* Process all device options for a given describe device call. */
238 static int
gve_process_device_options(struct gve_priv * priv,struct gve_device_descriptor * descriptor,struct gve_device_option_gqi_rda ** dev_op_gqi_rda,struct gve_device_option_gqi_qpl ** dev_op_gqi_qpl,struct gve_device_option_dqo_rda ** dev_op_dqo_rda,struct gve_device_option_jumbo_frames ** dev_op_jumbo_frames,struct gve_device_option_dqo_qpl ** dev_op_dqo_qpl,struct gve_device_option_buffer_sizes ** dev_op_buffer_sizes,struct gve_device_option_flow_steering ** dev_op_flow_steering,struct gve_device_option_rss_config ** dev_op_rss_config,struct gve_device_option_modify_ring ** dev_op_modify_ring)239 gve_process_device_options(struct gve_priv *priv,
240 struct gve_device_descriptor *descriptor,
241 struct gve_device_option_gqi_rda **dev_op_gqi_rda,
242 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
243 struct gve_device_option_dqo_rda **dev_op_dqo_rda,
244 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames,
245 struct gve_device_option_dqo_qpl **dev_op_dqo_qpl,
246 struct gve_device_option_buffer_sizes **dev_op_buffer_sizes,
247 struct gve_device_option_flow_steering **dev_op_flow_steering,
248 struct gve_device_option_rss_config **dev_op_rss_config,
249 struct gve_device_option_modify_ring **dev_op_modify_ring)
250 {
251 const int num_options = be16_to_cpu(descriptor->num_device_options);
252 struct gve_device_option *dev_opt;
253 int i;
254
255 /* The options struct directly follows the device descriptor. */
256 dev_opt = (void *)(descriptor + 1);
257 for (i = 0; i < num_options; i++) {
258 struct gve_device_option *next_opt;
259
260 next_opt = gve_get_next_option(descriptor, dev_opt);
261 if (!next_opt) {
262 dev_err(&priv->dev->dev,
263 "options exceed device_descriptor's total length.\n");
264 return -EINVAL;
265 }
266
267 gve_parse_device_option(priv, descriptor, dev_opt,
268 dev_op_gqi_rda, dev_op_gqi_qpl,
269 dev_op_dqo_rda, dev_op_jumbo_frames,
270 dev_op_dqo_qpl, dev_op_buffer_sizes,
271 dev_op_flow_steering, dev_op_rss_config,
272 dev_op_modify_ring);
273 dev_opt = next_opt;
274 }
275
276 return 0;
277 }
278
gve_adminq_alloc(struct device * dev,struct gve_priv * priv)279 int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
280 {
281 priv->adminq_pool = dma_pool_create("adminq_pool", dev,
282 GVE_ADMINQ_BUFFER_SIZE, 0, 0);
283 if (unlikely(!priv->adminq_pool))
284 return -ENOMEM;
285 priv->adminq = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL,
286 &priv->adminq_bus_addr);
287 if (unlikely(!priv->adminq)) {
288 dma_pool_destroy(priv->adminq_pool);
289 return -ENOMEM;
290 }
291
292 priv->adminq_mask =
293 (GVE_ADMINQ_BUFFER_SIZE / sizeof(union gve_adminq_command)) - 1;
294 priv->adminq_prod_cnt = 0;
295 priv->adminq_cmd_fail = 0;
296 priv->adminq_timeouts = 0;
297 priv->adminq_describe_device_cnt = 0;
298 priv->adminq_cfg_device_resources_cnt = 0;
299 priv->adminq_register_page_list_cnt = 0;
300 priv->adminq_unregister_page_list_cnt = 0;
301 priv->adminq_create_tx_queue_cnt = 0;
302 priv->adminq_create_rx_queue_cnt = 0;
303 priv->adminq_destroy_tx_queue_cnt = 0;
304 priv->adminq_destroy_rx_queue_cnt = 0;
305 priv->adminq_dcfg_device_resources_cnt = 0;
306 priv->adminq_set_driver_parameter_cnt = 0;
307 priv->adminq_report_stats_cnt = 0;
308 priv->adminq_report_link_speed_cnt = 0;
309 priv->adminq_get_ptype_map_cnt = 0;
310 priv->adminq_query_flow_rules_cnt = 0;
311 priv->adminq_cfg_flow_rule_cnt = 0;
312 priv->adminq_cfg_rss_cnt = 0;
313 priv->adminq_query_rss_cnt = 0;
314
315 /* Setup Admin queue with the device */
316 if (priv->pdev->revision < 0x1) {
317 iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
318 &priv->reg_bar0->adminq_pfn);
319 } else {
320 iowrite16be(GVE_ADMINQ_BUFFER_SIZE,
321 &priv->reg_bar0->adminq_length);
322 #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
323 iowrite32be(priv->adminq_bus_addr >> 32,
324 &priv->reg_bar0->adminq_base_address_hi);
325 #endif
326 iowrite32be(priv->adminq_bus_addr,
327 &priv->reg_bar0->adminq_base_address_lo);
328 iowrite32be(GVE_DRIVER_STATUS_RUN_MASK, &priv->reg_bar0->driver_status);
329 }
330 mutex_init(&priv->adminq_lock);
331 gve_set_admin_queue_ok(priv);
332 return 0;
333 }
334
gve_adminq_release(struct gve_priv * priv)335 void gve_adminq_release(struct gve_priv *priv)
336 {
337 int i = 0;
338
339 /* Tell the device the adminq is leaving */
340 if (priv->pdev->revision < 0x1) {
341 iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
342 while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
343 /* If this is reached the device is unrecoverable and still
344 * holding memory. Continue looping to avoid memory corruption,
345 * but WARN so it is visible what is going on.
346 */
347 if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
348 WARN(1, "Unrecoverable platform error!");
349 i++;
350 msleep(GVE_ADMINQ_SLEEP_LEN);
351 }
352 } else {
353 iowrite32be(GVE_DRIVER_STATUS_RESET_MASK, &priv->reg_bar0->driver_status);
354 while (!(ioread32be(&priv->reg_bar0->device_status)
355 & GVE_DEVICE_STATUS_DEVICE_IS_RESET)) {
356 if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
357 WARN(1, "Unrecoverable platform error!");
358 i++;
359 msleep(GVE_ADMINQ_SLEEP_LEN);
360 }
361 }
362 gve_clear_device_rings_ok(priv);
363 gve_clear_device_resources_ok(priv);
364 gve_clear_admin_queue_ok(priv);
365 }
366
gve_adminq_free(struct device * dev,struct gve_priv * priv)367 void gve_adminq_free(struct device *dev, struct gve_priv *priv)
368 {
369 if (!gve_get_admin_queue_ok(priv))
370 return;
371 gve_adminq_release(priv);
372 dma_pool_free(priv->adminq_pool, priv->adminq, priv->adminq_bus_addr);
373 dma_pool_destroy(priv->adminq_pool);
374 gve_clear_admin_queue_ok(priv);
375 }
376
gve_adminq_kick_cmd(struct gve_priv * priv,u32 prod_cnt)377 static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt)
378 {
379 iowrite32be(prod_cnt, &priv->reg_bar0->adminq_doorbell);
380 }
381
gve_adminq_wait_for_cmd(struct gve_priv * priv,u32 prod_cnt)382 static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
383 {
384 int i;
385
386 for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) {
387 if (ioread32be(&priv->reg_bar0->adminq_event_counter)
388 == prod_cnt)
389 return true;
390 msleep(GVE_ADMINQ_SLEEP_LEN);
391 }
392
393 return false;
394 }
395
gve_adminq_parse_err(struct gve_priv * priv,u32 status)396 static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
397 {
398 if (status != GVE_ADMINQ_COMMAND_PASSED &&
399 status != GVE_ADMINQ_COMMAND_UNSET) {
400 dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status);
401 priv->adminq_cmd_fail++;
402 }
403 switch (status) {
404 case GVE_ADMINQ_COMMAND_PASSED:
405 return 0;
406 case GVE_ADMINQ_COMMAND_UNSET:
407 dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
408 return -EINVAL;
409 case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
410 case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
411 case GVE_ADMINQ_COMMAND_ERROR_DATALOSS:
412 case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION:
413 case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE:
414 return -EAGAIN;
415 case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS:
416 case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR:
417 case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT:
418 case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND:
419 case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE:
420 case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR:
421 return -EINVAL;
422 case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED:
423 return -ETIME;
424 case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED:
425 case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED:
426 return -EACCES;
427 case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:
428 return -ENOMEM;
429 case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
430 return -EOPNOTSUPP;
431 default:
432 dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status);
433 return -EINVAL;
434 }
435 }
436
437 /* Flushes all AQ commands currently queued and waits for them to complete.
438 * If there are failures, it will return the first error.
439 */
gve_adminq_kick_and_wait(struct gve_priv * priv)440 static int gve_adminq_kick_and_wait(struct gve_priv *priv)
441 {
442 int tail, head;
443 int i;
444
445 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
446 head = priv->adminq_prod_cnt;
447
448 gve_adminq_kick_cmd(priv, head);
449 if (!gve_adminq_wait_for_cmd(priv, head)) {
450 dev_err(&priv->pdev->dev, "AQ commands timed out, need to reset AQ\n");
451 priv->adminq_timeouts++;
452 return -ENOTRECOVERABLE;
453 }
454
455 for (i = tail; i < head; i++) {
456 union gve_adminq_command *cmd;
457 u32 status, err;
458
459 cmd = &priv->adminq[i & priv->adminq_mask];
460 status = be32_to_cpu(READ_ONCE(cmd->status));
461 err = gve_adminq_parse_err(priv, status);
462 if (err)
463 // Return the first error if we failed.
464 return err;
465 }
466
467 return 0;
468 }
469
470 /* This function is not threadsafe - the caller is responsible for any
471 * necessary locks.
472 */
gve_adminq_issue_cmd(struct gve_priv * priv,union gve_adminq_command * cmd_orig)473 static int gve_adminq_issue_cmd(struct gve_priv *priv,
474 union gve_adminq_command *cmd_orig)
475 {
476 union gve_adminq_command *cmd;
477 u32 opcode;
478 u32 tail;
479
480 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
481
482 // Check if next command will overflow the buffer.
483 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
484 (tail & priv->adminq_mask)) {
485 int err;
486
487 // Flush existing commands to make room.
488 err = gve_adminq_kick_and_wait(priv);
489 if (err)
490 return err;
491
492 // Retry.
493 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
494 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
495 (tail & priv->adminq_mask)) {
496 // This should never happen. We just flushed the
497 // command queue so there should be enough space.
498 return -ENOMEM;
499 }
500 }
501
502 cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
503 priv->adminq_prod_cnt++;
504
505 memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
506 opcode = be32_to_cpu(READ_ONCE(cmd->opcode));
507 if (opcode == GVE_ADMINQ_EXTENDED_COMMAND)
508 opcode = be32_to_cpu(cmd->extended_command.inner_opcode);
509
510 switch (opcode) {
511 case GVE_ADMINQ_DESCRIBE_DEVICE:
512 priv->adminq_describe_device_cnt++;
513 break;
514 case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
515 priv->adminq_cfg_device_resources_cnt++;
516 break;
517 case GVE_ADMINQ_REGISTER_PAGE_LIST:
518 priv->adminq_register_page_list_cnt++;
519 break;
520 case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
521 priv->adminq_unregister_page_list_cnt++;
522 break;
523 case GVE_ADMINQ_CREATE_TX_QUEUE:
524 priv->adminq_create_tx_queue_cnt++;
525 break;
526 case GVE_ADMINQ_CREATE_RX_QUEUE:
527 priv->adminq_create_rx_queue_cnt++;
528 break;
529 case GVE_ADMINQ_DESTROY_TX_QUEUE:
530 priv->adminq_destroy_tx_queue_cnt++;
531 break;
532 case GVE_ADMINQ_DESTROY_RX_QUEUE:
533 priv->adminq_destroy_rx_queue_cnt++;
534 break;
535 case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
536 priv->adminq_dcfg_device_resources_cnt++;
537 break;
538 case GVE_ADMINQ_SET_DRIVER_PARAMETER:
539 priv->adminq_set_driver_parameter_cnt++;
540 break;
541 case GVE_ADMINQ_REPORT_STATS:
542 priv->adminq_report_stats_cnt++;
543 break;
544 case GVE_ADMINQ_REPORT_LINK_SPEED:
545 priv->adminq_report_link_speed_cnt++;
546 break;
547 case GVE_ADMINQ_GET_PTYPE_MAP:
548 priv->adminq_get_ptype_map_cnt++;
549 break;
550 case GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY:
551 priv->adminq_verify_driver_compatibility_cnt++;
552 break;
553 case GVE_ADMINQ_QUERY_FLOW_RULES:
554 priv->adminq_query_flow_rules_cnt++;
555 break;
556 case GVE_ADMINQ_CONFIGURE_FLOW_RULE:
557 priv->adminq_cfg_flow_rule_cnt++;
558 break;
559 case GVE_ADMINQ_CONFIGURE_RSS:
560 priv->adminq_cfg_rss_cnt++;
561 break;
562 case GVE_ADMINQ_QUERY_RSS:
563 priv->adminq_query_rss_cnt++;
564 break;
565 default:
566 dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
567 return -EINVAL;
568 }
569
570 return 0;
571 }
572
gve_adminq_execute_cmd(struct gve_priv * priv,union gve_adminq_command * cmd_orig)573 static int gve_adminq_execute_cmd(struct gve_priv *priv,
574 union gve_adminq_command *cmd_orig)
575 {
576 u32 tail, head;
577 int err;
578
579 mutex_lock(&priv->adminq_lock);
580 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
581 head = priv->adminq_prod_cnt;
582 if (tail != head) {
583 err = -EINVAL;
584 goto out;
585 }
586
587 err = gve_adminq_issue_cmd(priv, cmd_orig);
588 if (err)
589 goto out;
590
591 err = gve_adminq_kick_and_wait(priv);
592
593 out:
594 mutex_unlock(&priv->adminq_lock);
595 return err;
596 }
597
gve_adminq_execute_extended_cmd(struct gve_priv * priv,u32 opcode,size_t cmd_size,void * cmd_orig)598 static int gve_adminq_execute_extended_cmd(struct gve_priv *priv, u32 opcode,
599 size_t cmd_size, void *cmd_orig)
600 {
601 union gve_adminq_command cmd;
602 dma_addr_t inner_cmd_bus;
603 void *inner_cmd;
604 int err;
605
606 inner_cmd = dma_alloc_coherent(&priv->pdev->dev, cmd_size,
607 &inner_cmd_bus, GFP_KERNEL);
608 if (!inner_cmd)
609 return -ENOMEM;
610
611 memcpy(inner_cmd, cmd_orig, cmd_size);
612
613 memset(&cmd, 0, sizeof(cmd));
614 cmd.opcode = cpu_to_be32(GVE_ADMINQ_EXTENDED_COMMAND);
615 cmd.extended_command = (struct gve_adminq_extended_command) {
616 .inner_opcode = cpu_to_be32(opcode),
617 .inner_length = cpu_to_be32(cmd_size),
618 .inner_command_addr = cpu_to_be64(inner_cmd_bus),
619 };
620
621 err = gve_adminq_execute_cmd(priv, &cmd);
622
623 dma_free_coherent(&priv->pdev->dev, cmd_size, inner_cmd, inner_cmd_bus);
624 return err;
625 }
626
627 /* The device specifies that the management vector can either be the first irq
628 * or the last irq. ntfy_blk_msix_base_idx indicates the first irq assigned to
629 * the ntfy blks. It if is 0 then the management vector is last, if it is 1 then
630 * the management vector is first.
631 *
632 * gve arranges the msix vectors so that the management vector is last.
633 */
634 #define GVE_NTFY_BLK_BASE_MSIX_IDX 0
gve_adminq_configure_device_resources(struct gve_priv * priv,dma_addr_t counter_array_bus_addr,u32 num_counters,dma_addr_t db_array_bus_addr,u32 num_ntfy_blks)635 int gve_adminq_configure_device_resources(struct gve_priv *priv,
636 dma_addr_t counter_array_bus_addr,
637 u32 num_counters,
638 dma_addr_t db_array_bus_addr,
639 u32 num_ntfy_blks)
640 {
641 union gve_adminq_command cmd;
642
643 memset(&cmd, 0, sizeof(cmd));
644 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES);
645 cmd.configure_device_resources =
646 (struct gve_adminq_configure_device_resources) {
647 .counter_array = cpu_to_be64(counter_array_bus_addr),
648 .num_counters = cpu_to_be32(num_counters),
649 .irq_db_addr = cpu_to_be64(db_array_bus_addr),
650 .num_irq_dbs = cpu_to_be32(num_ntfy_blks),
651 .irq_db_stride = cpu_to_be32(sizeof(*priv->irq_db_indices)),
652 .ntfy_blk_msix_base_idx =
653 cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
654 .queue_format = priv->queue_format,
655 };
656
657 return gve_adminq_execute_cmd(priv, &cmd);
658 }
659
gve_adminq_deconfigure_device_resources(struct gve_priv * priv)660 int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
661 {
662 union gve_adminq_command cmd;
663
664 memset(&cmd, 0, sizeof(cmd));
665 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES);
666
667 return gve_adminq_execute_cmd(priv, &cmd);
668 }
669
gve_adminq_create_tx_queue(struct gve_priv * priv,u32 queue_index)670 static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
671 {
672 struct gve_tx_ring *tx = &priv->tx[queue_index];
673 union gve_adminq_command cmd;
674
675 memset(&cmd, 0, sizeof(cmd));
676 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
677 cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
678 .queue_id = cpu_to_be32(queue_index),
679 .queue_resources_addr =
680 cpu_to_be64(tx->q_resources_bus),
681 .tx_ring_addr = cpu_to_be64(tx->bus),
682 .ntfy_id = cpu_to_be32(tx->ntfy_id),
683 .tx_ring_size = cpu_to_be16(priv->tx_desc_cnt),
684 };
685
686 if (gve_is_gqi(priv)) {
687 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
688 GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id;
689
690 cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
691 } else {
692 u32 qpl_id = 0;
693
694 if (priv->queue_format == GVE_DQO_RDA_FORMAT)
695 qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
696 else
697 qpl_id = tx->dqo.qpl->id;
698 cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
699 cmd.create_tx_queue.tx_comp_ring_addr =
700 cpu_to_be64(tx->complq_bus_dqo);
701 cmd.create_tx_queue.tx_comp_ring_size =
702 cpu_to_be16(priv->tx_desc_cnt);
703 }
704
705 return gve_adminq_issue_cmd(priv, &cmd);
706 }
707
gve_adminq_create_tx_queues(struct gve_priv * priv,u32 start_id,u32 num_queues)708 int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
709 {
710 int err;
711 int i;
712
713 for (i = start_id; i < start_id + num_queues; i++) {
714 err = gve_adminq_create_tx_queue(priv, i);
715 if (err)
716 return err;
717 }
718
719 return gve_adminq_kick_and_wait(priv);
720 }
721
gve_adminq_get_create_rx_queue_cmd(struct gve_priv * priv,union gve_adminq_command * cmd,u32 queue_index)722 static void gve_adminq_get_create_rx_queue_cmd(struct gve_priv *priv,
723 union gve_adminq_command *cmd,
724 u32 queue_index)
725 {
726 struct gve_rx_ring *rx = &priv->rx[queue_index];
727
728 memset(cmd, 0, sizeof(*cmd));
729 cmd->opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
730 cmd->create_rx_queue = (struct gve_adminq_create_rx_queue) {
731 .queue_id = cpu_to_be32(queue_index),
732 .ntfy_id = cpu_to_be32(rx->ntfy_id),
733 .queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
734 .rx_ring_size = cpu_to_be16(priv->rx_desc_cnt),
735 };
736
737 if (gve_is_gqi(priv)) {
738 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
739 GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
740
741 cmd->create_rx_queue.rx_desc_ring_addr =
742 cpu_to_be64(rx->desc.bus);
743 cmd->create_rx_queue.rx_data_ring_addr =
744 cpu_to_be64(rx->data.data_bus);
745 cmd->create_rx_queue.index = cpu_to_be32(queue_index);
746 cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
747 cmd->create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
748 } else {
749 u32 qpl_id = 0;
750
751 if (priv->queue_format == GVE_DQO_RDA_FORMAT)
752 qpl_id = GVE_RAW_ADDRESSING_QPL_ID;
753 else
754 qpl_id = rx->dqo.qpl->id;
755 cmd->create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
756 cmd->create_rx_queue.rx_desc_ring_addr =
757 cpu_to_be64(rx->dqo.complq.bus);
758 cmd->create_rx_queue.rx_data_ring_addr =
759 cpu_to_be64(rx->dqo.bufq.bus);
760 cmd->create_rx_queue.packet_buffer_size =
761 cpu_to_be16(priv->data_buffer_size_dqo);
762 cmd->create_rx_queue.rx_buff_ring_size =
763 cpu_to_be16(priv->rx_desc_cnt);
764 cmd->create_rx_queue.enable_rsc =
765 !!(priv->dev->features & NETIF_F_LRO);
766 if (priv->header_split_enabled)
767 cmd->create_rx_queue.header_buffer_size =
768 cpu_to_be16(priv->header_buf_size);
769 }
770 }
771
gve_adminq_create_rx_queue(struct gve_priv * priv,u32 queue_index)772 static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
773 {
774 union gve_adminq_command cmd;
775
776 gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index);
777 return gve_adminq_issue_cmd(priv, &cmd);
778 }
779
780 /* Unlike gve_adminq_create_rx_queue, this actually rings the doorbell */
gve_adminq_create_single_rx_queue(struct gve_priv * priv,u32 queue_index)781 int gve_adminq_create_single_rx_queue(struct gve_priv *priv, u32 queue_index)
782 {
783 union gve_adminq_command cmd;
784
785 gve_adminq_get_create_rx_queue_cmd(priv, &cmd, queue_index);
786 return gve_adminq_execute_cmd(priv, &cmd);
787 }
788
gve_adminq_create_rx_queues(struct gve_priv * priv,u32 num_queues)789 int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
790 {
791 int err;
792 int i;
793
794 for (i = 0; i < num_queues; i++) {
795 err = gve_adminq_create_rx_queue(priv, i);
796 if (err)
797 return err;
798 }
799
800 return gve_adminq_kick_and_wait(priv);
801 }
802
gve_adminq_destroy_tx_queue(struct gve_priv * priv,u32 queue_index)803 static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
804 {
805 union gve_adminq_command cmd;
806 int err;
807
808 memset(&cmd, 0, sizeof(cmd));
809 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
810 cmd.destroy_tx_queue = (struct gve_adminq_destroy_tx_queue) {
811 .queue_id = cpu_to_be32(queue_index),
812 };
813
814 err = gve_adminq_issue_cmd(priv, &cmd);
815 if (err)
816 return err;
817
818 return 0;
819 }
820
gve_adminq_destroy_tx_queues(struct gve_priv * priv,u32 start_id,u32 num_queues)821 int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 start_id, u32 num_queues)
822 {
823 int err;
824 int i;
825
826 for (i = start_id; i < start_id + num_queues; i++) {
827 err = gve_adminq_destroy_tx_queue(priv, i);
828 if (err)
829 return err;
830 }
831
832 return gve_adminq_kick_and_wait(priv);
833 }
834
gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command * cmd,u32 queue_index)835 static void gve_adminq_make_destroy_rx_queue_cmd(union gve_adminq_command *cmd,
836 u32 queue_index)
837 {
838 memset(cmd, 0, sizeof(*cmd));
839 cmd->opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
840 cmd->destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
841 .queue_id = cpu_to_be32(queue_index),
842 };
843 }
844
gve_adminq_destroy_rx_queue(struct gve_priv * priv,u32 queue_index)845 static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
846 {
847 union gve_adminq_command cmd;
848
849 gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index);
850 return gve_adminq_issue_cmd(priv, &cmd);
851 }
852
853 /* Unlike gve_adminq_destroy_rx_queue, this actually rings the doorbell */
gve_adminq_destroy_single_rx_queue(struct gve_priv * priv,u32 queue_index)854 int gve_adminq_destroy_single_rx_queue(struct gve_priv *priv, u32 queue_index)
855 {
856 union gve_adminq_command cmd;
857
858 gve_adminq_make_destroy_rx_queue_cmd(&cmd, queue_index);
859 return gve_adminq_execute_cmd(priv, &cmd);
860 }
861
gve_adminq_destroy_rx_queues(struct gve_priv * priv,u32 num_queues)862 int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
863 {
864 int err;
865 int i;
866
867 for (i = 0; i < num_queues; i++) {
868 err = gve_adminq_destroy_rx_queue(priv, i);
869 if (err)
870 return err;
871 }
872
873 return gve_adminq_kick_and_wait(priv);
874 }
875
gve_set_default_desc_cnt(struct gve_priv * priv,const struct gve_device_descriptor * descriptor)876 static void gve_set_default_desc_cnt(struct gve_priv *priv,
877 const struct gve_device_descriptor *descriptor)
878 {
879 priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
880 priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
881
882 /* set default ranges */
883 priv->max_tx_desc_cnt = priv->tx_desc_cnt;
884 priv->max_rx_desc_cnt = priv->rx_desc_cnt;
885 priv->min_tx_desc_cnt = priv->tx_desc_cnt;
886 priv->min_rx_desc_cnt = priv->rx_desc_cnt;
887 }
888
gve_enable_supported_features(struct gve_priv * priv,u32 supported_features_mask,const struct gve_device_option_jumbo_frames * dev_op_jumbo_frames,const struct gve_device_option_dqo_qpl * dev_op_dqo_qpl,const struct gve_device_option_buffer_sizes * dev_op_buffer_sizes,const struct gve_device_option_flow_steering * dev_op_flow_steering,const struct gve_device_option_rss_config * dev_op_rss_config,const struct gve_device_option_modify_ring * dev_op_modify_ring)889 static void gve_enable_supported_features(struct gve_priv *priv,
890 u32 supported_features_mask,
891 const struct gve_device_option_jumbo_frames
892 *dev_op_jumbo_frames,
893 const struct gve_device_option_dqo_qpl
894 *dev_op_dqo_qpl,
895 const struct gve_device_option_buffer_sizes
896 *dev_op_buffer_sizes,
897 const struct gve_device_option_flow_steering
898 *dev_op_flow_steering,
899 const struct gve_device_option_rss_config
900 *dev_op_rss_config,
901 const struct gve_device_option_modify_ring
902 *dev_op_modify_ring)
903 {
904 /* Before control reaches this point, the page-size-capped max MTU from
905 * the gve_device_descriptor field has already been stored in
906 * priv->dev->max_mtu. We overwrite it with the true max MTU below.
907 */
908 if (dev_op_jumbo_frames &&
909 (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {
910 dev_info(&priv->pdev->dev,
911 "JUMBO FRAMES device option enabled.\n");
912 priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu);
913 }
914
915 /* Override pages for qpl for DQO-QPL */
916 if (dev_op_dqo_qpl) {
917 priv->tx_pages_per_qpl =
918 be16_to_cpu(dev_op_dqo_qpl->tx_pages_per_qpl);
919 if (priv->tx_pages_per_qpl == 0)
920 priv->tx_pages_per_qpl = DQO_QPL_DEFAULT_TX_PAGES;
921 }
922
923 if (dev_op_buffer_sizes &&
924 (supported_features_mask & GVE_SUP_BUFFER_SIZES_MASK)) {
925 priv->max_rx_buffer_size =
926 be16_to_cpu(dev_op_buffer_sizes->packet_buffer_size);
927 priv->header_buf_size =
928 be16_to_cpu(dev_op_buffer_sizes->header_buffer_size);
929 dev_info(&priv->pdev->dev,
930 "BUFFER SIZES device option enabled with max_rx_buffer_size of %u, header_buf_size of %u.\n",
931 priv->max_rx_buffer_size, priv->header_buf_size);
932 }
933
934 /* Read and store ring size ranges given by device */
935 if (dev_op_modify_ring &&
936 (supported_features_mask & GVE_SUP_MODIFY_RING_MASK)) {
937 priv->modify_ring_size_enabled = true;
938
939 /* max ring size for DQO QPL should not be overwritten because of device limit */
940 if (priv->queue_format != GVE_DQO_QPL_FORMAT) {
941 priv->max_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_rx_ring_size);
942 priv->max_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->max_tx_ring_size);
943 }
944 if (priv->default_min_ring_size) {
945 /* If device hasn't provided minimums, use default minimums */
946 priv->min_tx_desc_cnt = GVE_DEFAULT_MIN_TX_RING_SIZE;
947 priv->min_rx_desc_cnt = GVE_DEFAULT_MIN_RX_RING_SIZE;
948 } else {
949 priv->min_rx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_rx_ring_size);
950 priv->min_tx_desc_cnt = be16_to_cpu(dev_op_modify_ring->min_tx_ring_size);
951 }
952 }
953
954 if (dev_op_flow_steering &&
955 (supported_features_mask & GVE_SUP_FLOW_STEERING_MASK)) {
956 if (dev_op_flow_steering->max_flow_rules) {
957 priv->max_flow_rules =
958 be32_to_cpu(dev_op_flow_steering->max_flow_rules);
959 priv->dev->hw_features |= NETIF_F_NTUPLE;
960 dev_info(&priv->pdev->dev,
961 "FLOW STEERING device option enabled with max rule limit of %u.\n",
962 priv->max_flow_rules);
963 }
964 }
965
966 if (dev_op_rss_config &&
967 (supported_features_mask & GVE_SUP_RSS_CONFIG_MASK)) {
968 priv->rss_key_size =
969 be16_to_cpu(dev_op_rss_config->hash_key_size);
970 priv->rss_lut_size =
971 be16_to_cpu(dev_op_rss_config->hash_lut_size);
972 }
973 }
974
gve_adminq_describe_device(struct gve_priv * priv)975 int gve_adminq_describe_device(struct gve_priv *priv)
976 {
977 struct gve_device_option_flow_steering *dev_op_flow_steering = NULL;
978 struct gve_device_option_buffer_sizes *dev_op_buffer_sizes = NULL;
979 struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
980 struct gve_device_option_modify_ring *dev_op_modify_ring = NULL;
981 struct gve_device_option_rss_config *dev_op_rss_config = NULL;
982 struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
983 struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
984 struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
985 struct gve_device_option_dqo_qpl *dev_op_dqo_qpl = NULL;
986 struct gve_device_descriptor *descriptor;
987 u32 supported_features_mask = 0;
988 union gve_adminq_command cmd;
989 dma_addr_t descriptor_bus;
990 int err = 0;
991 u8 *mac;
992 u16 mtu;
993
994 memset(&cmd, 0, sizeof(cmd));
995 descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL,
996 &descriptor_bus);
997 if (!descriptor)
998 return -ENOMEM;
999 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE);
1000 cmd.describe_device.device_descriptor_addr =
1001 cpu_to_be64(descriptor_bus);
1002 cmd.describe_device.device_descriptor_version =
1003 cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
1004 cmd.describe_device.available_length =
1005 cpu_to_be32(GVE_ADMINQ_BUFFER_SIZE);
1006
1007 err = gve_adminq_execute_cmd(priv, &cmd);
1008 if (err)
1009 goto free_device_descriptor;
1010
1011 err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
1012 &dev_op_gqi_qpl, &dev_op_dqo_rda,
1013 &dev_op_jumbo_frames, &dev_op_dqo_qpl,
1014 &dev_op_buffer_sizes,
1015 &dev_op_flow_steering,
1016 &dev_op_rss_config,
1017 &dev_op_modify_ring);
1018 if (err)
1019 goto free_device_descriptor;
1020
1021 /* If the GQI_RAW_ADDRESSING option is not enabled and the queue format
1022 * is not set to GqiRda, choose the queue format in a priority order:
1023 * DqoRda, DqoQpl, GqiRda, GqiQpl. Use GqiQpl as default.
1024 */
1025 if (dev_op_dqo_rda) {
1026 priv->queue_format = GVE_DQO_RDA_FORMAT;
1027 dev_info(&priv->pdev->dev,
1028 "Driver is running with DQO RDA queue format.\n");
1029 supported_features_mask =
1030 be32_to_cpu(dev_op_dqo_rda->supported_features_mask);
1031 } else if (dev_op_dqo_qpl) {
1032 priv->queue_format = GVE_DQO_QPL_FORMAT;
1033 supported_features_mask =
1034 be32_to_cpu(dev_op_dqo_qpl->supported_features_mask);
1035 } else if (dev_op_gqi_rda) {
1036 priv->queue_format = GVE_GQI_RDA_FORMAT;
1037 dev_info(&priv->pdev->dev,
1038 "Driver is running with GQI RDA queue format.\n");
1039 supported_features_mask =
1040 be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
1041 } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
1042 dev_info(&priv->pdev->dev,
1043 "Driver is running with GQI RDA queue format.\n");
1044 } else {
1045 priv->queue_format = GVE_GQI_QPL_FORMAT;
1046 if (dev_op_gqi_qpl)
1047 supported_features_mask =
1048 be32_to_cpu(dev_op_gqi_qpl->supported_features_mask);
1049 dev_info(&priv->pdev->dev,
1050 "Driver is running with GQI QPL queue format.\n");
1051 }
1052
1053 /* set default descriptor counts */
1054 gve_set_default_desc_cnt(priv, descriptor);
1055
1056 /* DQO supports LRO. */
1057 if (!gve_is_gqi(priv))
1058 priv->dev->hw_features |= NETIF_F_LRO;
1059
1060 priv->max_registered_pages =
1061 be64_to_cpu(descriptor->max_registered_pages);
1062 mtu = be16_to_cpu(descriptor->mtu);
1063 if (mtu < ETH_MIN_MTU) {
1064 dev_err(&priv->pdev->dev, "MTU %d below minimum MTU\n", mtu);
1065 err = -EINVAL;
1066 goto free_device_descriptor;
1067 }
1068 priv->dev->max_mtu = mtu;
1069 priv->num_event_counters = be16_to_cpu(descriptor->counters);
1070 eth_hw_addr_set(priv->dev, descriptor->mac);
1071 mac = descriptor->mac;
1072 dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
1073 priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
1074 priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
1075
1076 gve_enable_supported_features(priv, supported_features_mask,
1077 dev_op_jumbo_frames, dev_op_dqo_qpl,
1078 dev_op_buffer_sizes, dev_op_flow_steering,
1079 dev_op_rss_config, dev_op_modify_ring);
1080
1081 free_device_descriptor:
1082 dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
1083 return err;
1084 }
1085
gve_adminq_register_page_list(struct gve_priv * priv,struct gve_queue_page_list * qpl)1086 int gve_adminq_register_page_list(struct gve_priv *priv,
1087 struct gve_queue_page_list *qpl)
1088 {
1089 struct device *hdev = &priv->pdev->dev;
1090 u32 num_entries = qpl->num_entries;
1091 u32 size = num_entries * sizeof(qpl->page_buses[0]);
1092 union gve_adminq_command cmd;
1093 dma_addr_t page_list_bus;
1094 __be64 *page_list;
1095 int err;
1096 int i;
1097
1098 memset(&cmd, 0, sizeof(cmd));
1099 page_list = dma_alloc_coherent(hdev, size, &page_list_bus, GFP_KERNEL);
1100 if (!page_list)
1101 return -ENOMEM;
1102
1103 for (i = 0; i < num_entries; i++)
1104 page_list[i] = cpu_to_be64(qpl->page_buses[i]);
1105
1106 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REGISTER_PAGE_LIST);
1107 cmd.reg_page_list = (struct gve_adminq_register_page_list) {
1108 .page_list_id = cpu_to_be32(qpl->id),
1109 .num_pages = cpu_to_be32(num_entries),
1110 .page_address_list_addr = cpu_to_be64(page_list_bus),
1111 .page_size = cpu_to_be64(PAGE_SIZE),
1112 };
1113
1114 err = gve_adminq_execute_cmd(priv, &cmd);
1115 dma_free_coherent(hdev, size, page_list, page_list_bus);
1116 return err;
1117 }
1118
gve_adminq_unregister_page_list(struct gve_priv * priv,u32 page_list_id)1119 int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
1120 {
1121 union gve_adminq_command cmd;
1122
1123 memset(&cmd, 0, sizeof(cmd));
1124 cmd.opcode = cpu_to_be32(GVE_ADMINQ_UNREGISTER_PAGE_LIST);
1125 cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) {
1126 .page_list_id = cpu_to_be32(page_list_id),
1127 };
1128
1129 return gve_adminq_execute_cmd(priv, &cmd);
1130 }
1131
gve_adminq_set_mtu(struct gve_priv * priv,u64 mtu)1132 int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
1133 {
1134 union gve_adminq_command cmd;
1135
1136 memset(&cmd, 0, sizeof(cmd));
1137 cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
1138 cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
1139 .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),
1140 .parameter_value = cpu_to_be64(mtu),
1141 };
1142
1143 return gve_adminq_execute_cmd(priv, &cmd);
1144 }
1145
gve_adminq_report_stats(struct gve_priv * priv,u64 stats_report_len,dma_addr_t stats_report_addr,u64 interval)1146 int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
1147 dma_addr_t stats_report_addr, u64 interval)
1148 {
1149 union gve_adminq_command cmd;
1150
1151 memset(&cmd, 0, sizeof(cmd));
1152 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_STATS);
1153 cmd.report_stats = (struct gve_adminq_report_stats) {
1154 .stats_report_len = cpu_to_be64(stats_report_len),
1155 .stats_report_addr = cpu_to_be64(stats_report_addr),
1156 .interval = cpu_to_be64(interval),
1157 };
1158
1159 return gve_adminq_execute_cmd(priv, &cmd);
1160 }
1161
gve_adminq_verify_driver_compatibility(struct gve_priv * priv,u64 driver_info_len,dma_addr_t driver_info_addr)1162 int gve_adminq_verify_driver_compatibility(struct gve_priv *priv,
1163 u64 driver_info_len,
1164 dma_addr_t driver_info_addr)
1165 {
1166 union gve_adminq_command cmd;
1167
1168 memset(&cmd, 0, sizeof(cmd));
1169 cmd.opcode = cpu_to_be32(GVE_ADMINQ_VERIFY_DRIVER_COMPATIBILITY);
1170 cmd.verify_driver_compatibility = (struct gve_adminq_verify_driver_compatibility) {
1171 .driver_info_len = cpu_to_be64(driver_info_len),
1172 .driver_info_addr = cpu_to_be64(driver_info_addr),
1173 };
1174
1175 return gve_adminq_execute_cmd(priv, &cmd);
1176 }
1177
gve_adminq_report_link_speed(struct gve_priv * priv)1178 int gve_adminq_report_link_speed(struct gve_priv *priv)
1179 {
1180 union gve_adminq_command gvnic_cmd;
1181 dma_addr_t link_speed_region_bus;
1182 __be64 *link_speed_region;
1183 int err;
1184
1185 link_speed_region =
1186 dma_alloc_coherent(&priv->pdev->dev, sizeof(*link_speed_region),
1187 &link_speed_region_bus, GFP_KERNEL);
1188
1189 if (!link_speed_region)
1190 return -ENOMEM;
1191
1192 memset(&gvnic_cmd, 0, sizeof(gvnic_cmd));
1193 gvnic_cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_LINK_SPEED);
1194 gvnic_cmd.report_link_speed.link_speed_address =
1195 cpu_to_be64(link_speed_region_bus);
1196
1197 err = gve_adminq_execute_cmd(priv, &gvnic_cmd);
1198
1199 priv->link_speed = be64_to_cpu(*link_speed_region);
1200 dma_free_coherent(&priv->pdev->dev, sizeof(*link_speed_region), link_speed_region,
1201 link_speed_region_bus);
1202 return err;
1203 }
1204
gve_adminq_get_ptype_map_dqo(struct gve_priv * priv,struct gve_ptype_lut * ptype_lut)1205 int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
1206 struct gve_ptype_lut *ptype_lut)
1207 {
1208 struct gve_ptype_map *ptype_map;
1209 union gve_adminq_command cmd;
1210 dma_addr_t ptype_map_bus;
1211 int err = 0;
1212 int i;
1213
1214 memset(&cmd, 0, sizeof(cmd));
1215 ptype_map = dma_alloc_coherent(&priv->pdev->dev, sizeof(*ptype_map),
1216 &ptype_map_bus, GFP_KERNEL);
1217 if (!ptype_map)
1218 return -ENOMEM;
1219
1220 cmd.opcode = cpu_to_be32(GVE_ADMINQ_GET_PTYPE_MAP);
1221 cmd.get_ptype_map = (struct gve_adminq_get_ptype_map) {
1222 .ptype_map_len = cpu_to_be64(sizeof(*ptype_map)),
1223 .ptype_map_addr = cpu_to_be64(ptype_map_bus),
1224 };
1225
1226 err = gve_adminq_execute_cmd(priv, &cmd);
1227 if (err)
1228 goto err;
1229
1230 /* Populate ptype_lut. */
1231 for (i = 0; i < GVE_NUM_PTYPES; i++) {
1232 ptype_lut->ptypes[i].l3_type =
1233 ptype_map->ptypes[i].l3_type;
1234 ptype_lut->ptypes[i].l4_type =
1235 ptype_map->ptypes[i].l4_type;
1236 }
1237 err:
1238 dma_free_coherent(&priv->pdev->dev, sizeof(*ptype_map), ptype_map,
1239 ptype_map_bus);
1240 return err;
1241 }
1242
1243 static int
gve_adminq_configure_flow_rule(struct gve_priv * priv,struct gve_adminq_configure_flow_rule * flow_rule_cmd)1244 gve_adminq_configure_flow_rule(struct gve_priv *priv,
1245 struct gve_adminq_configure_flow_rule *flow_rule_cmd)
1246 {
1247 int err = gve_adminq_execute_extended_cmd(priv,
1248 GVE_ADMINQ_CONFIGURE_FLOW_RULE,
1249 sizeof(struct gve_adminq_configure_flow_rule),
1250 flow_rule_cmd);
1251
1252 if (err == -ETIME) {
1253 dev_err(&priv->pdev->dev, "Timeout to configure the flow rule, trigger reset");
1254 gve_reset(priv, true);
1255 } else if (!err) {
1256 priv->flow_rules_cache.rules_cache_synced = false;
1257 }
1258
1259 return err;
1260 }
1261
gve_adminq_add_flow_rule(struct gve_priv * priv,struct gve_adminq_flow_rule * rule,u32 loc)1262 int gve_adminq_add_flow_rule(struct gve_priv *priv, struct gve_adminq_flow_rule *rule, u32 loc)
1263 {
1264 struct gve_adminq_configure_flow_rule flow_rule_cmd = {
1265 .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_ADD),
1266 .location = cpu_to_be32(loc),
1267 .rule = *rule,
1268 };
1269
1270 return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd);
1271 }
1272
gve_adminq_del_flow_rule(struct gve_priv * priv,u32 loc)1273 int gve_adminq_del_flow_rule(struct gve_priv *priv, u32 loc)
1274 {
1275 struct gve_adminq_configure_flow_rule flow_rule_cmd = {
1276 .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_DEL),
1277 .location = cpu_to_be32(loc),
1278 };
1279
1280 return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd);
1281 }
1282
gve_adminq_reset_flow_rules(struct gve_priv * priv)1283 int gve_adminq_reset_flow_rules(struct gve_priv *priv)
1284 {
1285 struct gve_adminq_configure_flow_rule flow_rule_cmd = {
1286 .opcode = cpu_to_be16(GVE_FLOW_RULE_CFG_RESET),
1287 };
1288
1289 return gve_adminq_configure_flow_rule(priv, &flow_rule_cmd);
1290 }
1291
gve_adminq_configure_rss(struct gve_priv * priv,struct ethtool_rxfh_param * rxfh)1292 int gve_adminq_configure_rss(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh)
1293 {
1294 dma_addr_t lut_bus = 0, key_bus = 0;
1295 u16 key_size = 0, lut_size = 0;
1296 union gve_adminq_command cmd;
1297 __be32 *lut = NULL;
1298 u8 hash_alg = 0;
1299 u8 *key = NULL;
1300 int err = 0;
1301 u16 i;
1302
1303 switch (rxfh->hfunc) {
1304 case ETH_RSS_HASH_NO_CHANGE:
1305 break;
1306 case ETH_RSS_HASH_TOP:
1307 hash_alg = ETH_RSS_HASH_TOP;
1308 break;
1309 default:
1310 return -EOPNOTSUPP;
1311 }
1312
1313 if (rxfh->indir) {
1314 lut_size = priv->rss_lut_size;
1315 lut = dma_alloc_coherent(&priv->pdev->dev,
1316 lut_size * sizeof(*lut),
1317 &lut_bus, GFP_KERNEL);
1318 if (!lut)
1319 return -ENOMEM;
1320
1321 for (i = 0; i < priv->rss_lut_size; i++)
1322 lut[i] = cpu_to_be32(rxfh->indir[i]);
1323 }
1324
1325 if (rxfh->key) {
1326 key_size = priv->rss_key_size;
1327 key = dma_alloc_coherent(&priv->pdev->dev,
1328 key_size, &key_bus, GFP_KERNEL);
1329 if (!key) {
1330 err = -ENOMEM;
1331 goto out;
1332 }
1333
1334 memcpy(key, rxfh->key, key_size);
1335 }
1336
1337 /* Zero-valued fields in the cmd.configure_rss instruct the device to
1338 * not update those fields.
1339 */
1340 memset(&cmd, 0, sizeof(cmd));
1341 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_RSS);
1342 cmd.configure_rss = (struct gve_adminq_configure_rss) {
1343 .hash_types = cpu_to_be16(BIT(GVE_RSS_HASH_TCPV4) |
1344 BIT(GVE_RSS_HASH_UDPV4) |
1345 BIT(GVE_RSS_HASH_TCPV6) |
1346 BIT(GVE_RSS_HASH_UDPV6)),
1347 .hash_alg = hash_alg,
1348 .hash_key_size = cpu_to_be16(key_size),
1349 .hash_lut_size = cpu_to_be16(lut_size),
1350 .hash_key_addr = cpu_to_be64(key_bus),
1351 .hash_lut_addr = cpu_to_be64(lut_bus),
1352 };
1353
1354 err = gve_adminq_execute_cmd(priv, &cmd);
1355
1356 out:
1357 if (lut)
1358 dma_free_coherent(&priv->pdev->dev,
1359 lut_size * sizeof(*lut),
1360 lut, lut_bus);
1361 if (key)
1362 dma_free_coherent(&priv->pdev->dev,
1363 key_size, key, key_bus);
1364 return err;
1365 }
1366
1367 /* In the dma memory that the driver allocated for the device to query the flow rules, the device
1368 * will first write it with a struct of gve_query_flow_rules_descriptor. Next to it, the device
1369 * will write an array of rules or rule ids with the count that specified in the descriptor.
1370 * For GVE_FLOW_RULE_QUERY_STATS, the device will only write the descriptor.
1371 */
gve_adminq_process_flow_rules_query(struct gve_priv * priv,u16 query_opcode,struct gve_query_flow_rules_descriptor * descriptor)1372 static int gve_adminq_process_flow_rules_query(struct gve_priv *priv, u16 query_opcode,
1373 struct gve_query_flow_rules_descriptor *descriptor)
1374 {
1375 struct gve_flow_rules_cache *flow_rules_cache = &priv->flow_rules_cache;
1376 u32 num_queried_rules, total_memory_len, rule_info_len;
1377 void *rule_info;
1378
1379 total_memory_len = be32_to_cpu(descriptor->total_length);
1380 num_queried_rules = be32_to_cpu(descriptor->num_queried_rules);
1381 rule_info = (void *)(descriptor + 1);
1382
1383 switch (query_opcode) {
1384 case GVE_FLOW_RULE_QUERY_RULES:
1385 rule_info_len = num_queried_rules * sizeof(*flow_rules_cache->rules_cache);
1386 if (sizeof(*descriptor) + rule_info_len != total_memory_len) {
1387 dev_err(&priv->dev->dev, "flow rules query is out of memory.\n");
1388 return -ENOMEM;
1389 }
1390
1391 memcpy(flow_rules_cache->rules_cache, rule_info, rule_info_len);
1392 flow_rules_cache->rules_cache_num = num_queried_rules;
1393 break;
1394 case GVE_FLOW_RULE_QUERY_IDS:
1395 rule_info_len = num_queried_rules * sizeof(*flow_rules_cache->rule_ids_cache);
1396 if (sizeof(*descriptor) + rule_info_len != total_memory_len) {
1397 dev_err(&priv->dev->dev, "flow rule ids query is out of memory.\n");
1398 return -ENOMEM;
1399 }
1400
1401 memcpy(flow_rules_cache->rule_ids_cache, rule_info, rule_info_len);
1402 flow_rules_cache->rule_ids_cache_num = num_queried_rules;
1403 break;
1404 case GVE_FLOW_RULE_QUERY_STATS:
1405 priv->num_flow_rules = be32_to_cpu(descriptor->num_flow_rules);
1406 priv->max_flow_rules = be32_to_cpu(descriptor->max_flow_rules);
1407 return 0;
1408 default:
1409 return -EINVAL;
1410 }
1411
1412 return 0;
1413 }
1414
gve_adminq_query_flow_rules(struct gve_priv * priv,u16 query_opcode,u32 starting_loc)1415 int gve_adminq_query_flow_rules(struct gve_priv *priv, u16 query_opcode, u32 starting_loc)
1416 {
1417 struct gve_query_flow_rules_descriptor *descriptor;
1418 union gve_adminq_command cmd;
1419 dma_addr_t descriptor_bus;
1420 int err = 0;
1421
1422 memset(&cmd, 0, sizeof(cmd));
1423 descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, &descriptor_bus);
1424 if (!descriptor)
1425 return -ENOMEM;
1426
1427 cmd.opcode = cpu_to_be32(GVE_ADMINQ_QUERY_FLOW_RULES);
1428 cmd.query_flow_rules = (struct gve_adminq_query_flow_rules) {
1429 .opcode = cpu_to_be16(query_opcode),
1430 .starting_rule_id = cpu_to_be32(starting_loc),
1431 .available_length = cpu_to_be64(GVE_ADMINQ_BUFFER_SIZE),
1432 .rule_descriptor_addr = cpu_to_be64(descriptor_bus),
1433 };
1434 err = gve_adminq_execute_cmd(priv, &cmd);
1435 if (err)
1436 goto out;
1437
1438 err = gve_adminq_process_flow_rules_query(priv, query_opcode, descriptor);
1439
1440 out:
1441 dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
1442 return err;
1443 }
1444
gve_adminq_process_rss_query(struct gve_priv * priv,struct gve_query_rss_descriptor * descriptor,struct ethtool_rxfh_param * rxfh)1445 static int gve_adminq_process_rss_query(struct gve_priv *priv,
1446 struct gve_query_rss_descriptor *descriptor,
1447 struct ethtool_rxfh_param *rxfh)
1448 {
1449 u32 total_memory_length;
1450 u16 hash_lut_length;
1451 void *rss_info_addr;
1452 __be32 *lut;
1453 u16 i;
1454
1455 total_memory_length = be32_to_cpu(descriptor->total_length);
1456 hash_lut_length = priv->rss_lut_size * sizeof(*rxfh->indir);
1457
1458 if (sizeof(*descriptor) + priv->rss_key_size + hash_lut_length != total_memory_length) {
1459 dev_err(&priv->dev->dev,
1460 "rss query desc from device has invalid length parameter.\n");
1461 return -EINVAL;
1462 }
1463
1464 rxfh->hfunc = descriptor->hash_alg;
1465
1466 rss_info_addr = (void *)(descriptor + 1);
1467 if (rxfh->key)
1468 memcpy(rxfh->key, rss_info_addr, priv->rss_key_size);
1469
1470 rss_info_addr += priv->rss_key_size;
1471 lut = (__be32 *)rss_info_addr;
1472 if (rxfh->indir) {
1473 for (i = 0; i < priv->rss_lut_size; i++)
1474 rxfh->indir[i] = be32_to_cpu(lut[i]);
1475 }
1476
1477 return 0;
1478 }
1479
gve_adminq_query_rss_config(struct gve_priv * priv,struct ethtool_rxfh_param * rxfh)1480 int gve_adminq_query_rss_config(struct gve_priv *priv, struct ethtool_rxfh_param *rxfh)
1481 {
1482 struct gve_query_rss_descriptor *descriptor;
1483 union gve_adminq_command cmd;
1484 dma_addr_t descriptor_bus;
1485 int err = 0;
1486
1487 descriptor = dma_pool_alloc(priv->adminq_pool, GFP_KERNEL, &descriptor_bus);
1488 if (!descriptor)
1489 return -ENOMEM;
1490
1491 memset(&cmd, 0, sizeof(cmd));
1492 cmd.opcode = cpu_to_be32(GVE_ADMINQ_QUERY_RSS);
1493 cmd.query_rss = (struct gve_adminq_query_rss) {
1494 .available_length = cpu_to_be64(GVE_ADMINQ_BUFFER_SIZE),
1495 .rss_descriptor_addr = cpu_to_be64(descriptor_bus),
1496 };
1497 err = gve_adminq_execute_cmd(priv, &cmd);
1498 if (err)
1499 goto out;
1500
1501 err = gve_adminq_process_rss_query(priv, descriptor, rxfh);
1502
1503 out:
1504 dma_pool_free(priv->adminq_pool, descriptor, descriptor_bus);
1505 return err;
1506 }
1507