1
2
3
4
5
6
7#include <linux/etherdevice.h>
8#include <linux/pci.h>
9#include "gve.h"
10#include "gve_adminq.h"
11#include "gve_register.h"
12
13#define GVE_MAX_ADMINQ_RELEASE_CHECK 500
14#define GVE_ADMINQ_SLEEP_LEN 20
15#define GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK 100
16
17#define GVE_DEVICE_OPTION_ERROR_FMT "%s option error:\n" \
18"Expected: length=%d, feature_mask=%x.\n" \
19"Actual: length=%d, feature_mask=%x.\n"
20
21#define GVE_DEVICE_OPTION_TOO_BIG_FMT "Length of %s option larger than expected. Possible older version of guest driver.\n"
22
23static
24struct gve_device_option *gve_get_next_option(struct gve_device_descriptor *descriptor,
25 struct gve_device_option *option)
26{
27 void *option_end, *descriptor_end;
28
29 option_end = (void *)(option + 1) + be16_to_cpu(option->option_length);
30 descriptor_end = (void *)descriptor + be16_to_cpu(descriptor->total_length);
31
32 return option_end > descriptor_end ? NULL : (struct gve_device_option *)option_end;
33}
34
35static
36void gve_parse_device_option(struct gve_priv *priv,
37 struct gve_device_descriptor *device_descriptor,
38 struct gve_device_option *option,
39 struct gve_device_option_gqi_rda **dev_op_gqi_rda,
40 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
41 struct gve_device_option_dqo_rda **dev_op_dqo_rda,
42 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
43{
44 u32 req_feat_mask = be32_to_cpu(option->required_features_mask);
45 u16 option_length = be16_to_cpu(option->option_length);
46 u16 option_id = be16_to_cpu(option->option_id);
47
48
49
50
51 switch (option_id) {
52 case GVE_DEV_OPT_ID_GQI_RAW_ADDRESSING:
53 if (option_length != GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING ||
54 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING) {
55 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
56 "Raw Addressing",
57 GVE_DEV_OPT_LEN_GQI_RAW_ADDRESSING,
58 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RAW_ADDRESSING,
59 option_length, req_feat_mask);
60 break;
61 }
62
63 dev_info(&priv->pdev->dev,
64 "Gqi raw addressing device option enabled.\n");
65 priv->queue_format = GVE_GQI_RDA_FORMAT;
66 break;
67 case GVE_DEV_OPT_ID_GQI_RDA:
68 if (option_length < sizeof(**dev_op_gqi_rda) ||
69 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA) {
70 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
71 "GQI RDA", (int)sizeof(**dev_op_gqi_rda),
72 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_RDA,
73 option_length, req_feat_mask);
74 break;
75 }
76
77 if (option_length > sizeof(**dev_op_gqi_rda)) {
78 dev_warn(&priv->pdev->dev,
79 GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI RDA");
80 }
81 *dev_op_gqi_rda = (void *)(option + 1);
82 break;
83 case GVE_DEV_OPT_ID_GQI_QPL:
84 if (option_length < sizeof(**dev_op_gqi_qpl) ||
85 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL) {
86 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
87 "GQI QPL", (int)sizeof(**dev_op_gqi_qpl),
88 GVE_DEV_OPT_REQ_FEAT_MASK_GQI_QPL,
89 option_length, req_feat_mask);
90 break;
91 }
92
93 if (option_length > sizeof(**dev_op_gqi_qpl)) {
94 dev_warn(&priv->pdev->dev,
95 GVE_DEVICE_OPTION_TOO_BIG_FMT, "GQI QPL");
96 }
97 *dev_op_gqi_qpl = (void *)(option + 1);
98 break;
99 case GVE_DEV_OPT_ID_DQO_RDA:
100 if (option_length < sizeof(**dev_op_dqo_rda) ||
101 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA) {
102 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
103 "DQO RDA", (int)sizeof(**dev_op_dqo_rda),
104 GVE_DEV_OPT_REQ_FEAT_MASK_DQO_RDA,
105 option_length, req_feat_mask);
106 break;
107 }
108
109 if (option_length > sizeof(**dev_op_dqo_rda)) {
110 dev_warn(&priv->pdev->dev,
111 GVE_DEVICE_OPTION_TOO_BIG_FMT, "DQO RDA");
112 }
113 *dev_op_dqo_rda = (void *)(option + 1);
114 break;
115 case GVE_DEV_OPT_ID_JUMBO_FRAMES:
116 if (option_length < sizeof(**dev_op_jumbo_frames) ||
117 req_feat_mask != GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES) {
118 dev_warn(&priv->pdev->dev, GVE_DEVICE_OPTION_ERROR_FMT,
119 "Jumbo Frames",
120 (int)sizeof(**dev_op_jumbo_frames),
121 GVE_DEV_OPT_REQ_FEAT_MASK_JUMBO_FRAMES,
122 option_length, req_feat_mask);
123 break;
124 }
125
126 if (option_length > sizeof(**dev_op_jumbo_frames)) {
127 dev_warn(&priv->pdev->dev,
128 GVE_DEVICE_OPTION_TOO_BIG_FMT,
129 "Jumbo Frames");
130 }
131 *dev_op_jumbo_frames = (void *)(option + 1);
132 break;
133 default:
134
135
136
137 dev_dbg(&priv->pdev->dev, "Unrecognized device option 0x%hx not enabled.\n",
138 option_id);
139 }
140}
141
142
143static int
144gve_process_device_options(struct gve_priv *priv,
145 struct gve_device_descriptor *descriptor,
146 struct gve_device_option_gqi_rda **dev_op_gqi_rda,
147 struct gve_device_option_gqi_qpl **dev_op_gqi_qpl,
148 struct gve_device_option_dqo_rda **dev_op_dqo_rda,
149 struct gve_device_option_jumbo_frames **dev_op_jumbo_frames)
150{
151 const int num_options = be16_to_cpu(descriptor->num_device_options);
152 struct gve_device_option *dev_opt;
153 int i;
154
155
156 dev_opt = (void *)(descriptor + 1);
157 for (i = 0; i < num_options; i++) {
158 struct gve_device_option *next_opt;
159
160 next_opt = gve_get_next_option(descriptor, dev_opt);
161 if (!next_opt) {
162 dev_err(&priv->dev->dev,
163 "options exceed device_descriptor's total length.\n");
164 return -EINVAL;
165 }
166
167 gve_parse_device_option(priv, descriptor, dev_opt,
168 dev_op_gqi_rda, dev_op_gqi_qpl,
169 dev_op_dqo_rda, dev_op_jumbo_frames);
170 dev_opt = next_opt;
171 }
172
173 return 0;
174}
175
176int gve_adminq_alloc(struct device *dev, struct gve_priv *priv)
177{
178 priv->adminq = dma_alloc_coherent(dev, PAGE_SIZE,
179 &priv->adminq_bus_addr, GFP_KERNEL);
180 if (unlikely(!priv->adminq))
181 return -ENOMEM;
182
183 priv->adminq_mask = (PAGE_SIZE / sizeof(union gve_adminq_command)) - 1;
184 priv->adminq_prod_cnt = 0;
185 priv->adminq_cmd_fail = 0;
186 priv->adminq_timeouts = 0;
187 priv->adminq_describe_device_cnt = 0;
188 priv->adminq_cfg_device_resources_cnt = 0;
189 priv->adminq_register_page_list_cnt = 0;
190 priv->adminq_unregister_page_list_cnt = 0;
191 priv->adminq_create_tx_queue_cnt = 0;
192 priv->adminq_create_rx_queue_cnt = 0;
193 priv->adminq_destroy_tx_queue_cnt = 0;
194 priv->adminq_destroy_rx_queue_cnt = 0;
195 priv->adminq_dcfg_device_resources_cnt = 0;
196 priv->adminq_set_driver_parameter_cnt = 0;
197 priv->adminq_report_stats_cnt = 0;
198 priv->adminq_report_link_speed_cnt = 0;
199 priv->adminq_get_ptype_map_cnt = 0;
200
201
202 iowrite32be(priv->adminq_bus_addr / PAGE_SIZE,
203 &priv->reg_bar0->adminq_pfn);
204
205 gve_set_admin_queue_ok(priv);
206 return 0;
207}
208
209void gve_adminq_release(struct gve_priv *priv)
210{
211 int i = 0;
212
213
214 iowrite32be(0x0, &priv->reg_bar0->adminq_pfn);
215 while (ioread32be(&priv->reg_bar0->adminq_pfn)) {
216
217
218
219
220 if (i == GVE_MAX_ADMINQ_RELEASE_CHECK)
221 WARN(1, "Unrecoverable platform error!");
222 i++;
223 msleep(GVE_ADMINQ_SLEEP_LEN);
224 }
225 gve_clear_device_rings_ok(priv);
226 gve_clear_device_resources_ok(priv);
227 gve_clear_admin_queue_ok(priv);
228}
229
230void gve_adminq_free(struct device *dev, struct gve_priv *priv)
231{
232 if (!gve_get_admin_queue_ok(priv))
233 return;
234 gve_adminq_release(priv);
235 dma_free_coherent(dev, PAGE_SIZE, priv->adminq, priv->adminq_bus_addr);
236 gve_clear_admin_queue_ok(priv);
237}
238
239static void gve_adminq_kick_cmd(struct gve_priv *priv, u32 prod_cnt)
240{
241 iowrite32be(prod_cnt, &priv->reg_bar0->adminq_doorbell);
242}
243
244static bool gve_adminq_wait_for_cmd(struct gve_priv *priv, u32 prod_cnt)
245{
246 int i;
247
248 for (i = 0; i < GVE_MAX_ADMINQ_EVENT_COUNTER_CHECK; i++) {
249 if (ioread32be(&priv->reg_bar0->adminq_event_counter)
250 == prod_cnt)
251 return true;
252 msleep(GVE_ADMINQ_SLEEP_LEN);
253 }
254
255 return false;
256}
257
258static int gve_adminq_parse_err(struct gve_priv *priv, u32 status)
259{
260 if (status != GVE_ADMINQ_COMMAND_PASSED &&
261 status != GVE_ADMINQ_COMMAND_UNSET) {
262 dev_err(&priv->pdev->dev, "AQ command failed with status %d\n", status);
263 priv->adminq_cmd_fail++;
264 }
265 switch (status) {
266 case GVE_ADMINQ_COMMAND_PASSED:
267 return 0;
268 case GVE_ADMINQ_COMMAND_UNSET:
269 dev_err(&priv->pdev->dev, "parse_aq_err: err and status both unset, this should not be possible.\n");
270 return -EINVAL;
271 case GVE_ADMINQ_COMMAND_ERROR_ABORTED:
272 case GVE_ADMINQ_COMMAND_ERROR_CANCELLED:
273 case GVE_ADMINQ_COMMAND_ERROR_DATALOSS:
274 case GVE_ADMINQ_COMMAND_ERROR_FAILED_PRECONDITION:
275 case GVE_ADMINQ_COMMAND_ERROR_UNAVAILABLE:
276 return -EAGAIN;
277 case GVE_ADMINQ_COMMAND_ERROR_ALREADY_EXISTS:
278 case GVE_ADMINQ_COMMAND_ERROR_INTERNAL_ERROR:
279 case GVE_ADMINQ_COMMAND_ERROR_INVALID_ARGUMENT:
280 case GVE_ADMINQ_COMMAND_ERROR_NOT_FOUND:
281 case GVE_ADMINQ_COMMAND_ERROR_OUT_OF_RANGE:
282 case GVE_ADMINQ_COMMAND_ERROR_UNKNOWN_ERROR:
283 return -EINVAL;
284 case GVE_ADMINQ_COMMAND_ERROR_DEADLINE_EXCEEDED:
285 return -ETIME;
286 case GVE_ADMINQ_COMMAND_ERROR_PERMISSION_DENIED:
287 case GVE_ADMINQ_COMMAND_ERROR_UNAUTHENTICATED:
288 return -EACCES;
289 case GVE_ADMINQ_COMMAND_ERROR_RESOURCE_EXHAUSTED:
290 return -ENOMEM;
291 case GVE_ADMINQ_COMMAND_ERROR_UNIMPLEMENTED:
292 return -ENOTSUPP;
293 default:
294 dev_err(&priv->pdev->dev, "parse_aq_err: unknown status code %d\n", status);
295 return -EINVAL;
296 }
297}
298
299
300
301
302static int gve_adminq_kick_and_wait(struct gve_priv *priv)
303{
304 u32 tail, head;
305 int i;
306
307 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
308 head = priv->adminq_prod_cnt;
309
310 gve_adminq_kick_cmd(priv, head);
311 if (!gve_adminq_wait_for_cmd(priv, head)) {
312 dev_err(&priv->pdev->dev, "AQ commands timed out, need to reset AQ\n");
313 priv->adminq_timeouts++;
314 return -ENOTRECOVERABLE;
315 }
316
317 for (i = tail; i < head; i++) {
318 union gve_adminq_command *cmd;
319 u32 status, err;
320
321 cmd = &priv->adminq[i & priv->adminq_mask];
322 status = be32_to_cpu(READ_ONCE(cmd->status));
323 err = gve_adminq_parse_err(priv, status);
324 if (err)
325
326 return err;
327 }
328
329 return 0;
330}
331
332
333
334
335static int gve_adminq_issue_cmd(struct gve_priv *priv,
336 union gve_adminq_command *cmd_orig)
337{
338 union gve_adminq_command *cmd;
339 u32 opcode;
340 u32 tail;
341
342 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
343
344
345 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
346 (tail & priv->adminq_mask)) {
347 int err;
348
349
350 err = gve_adminq_kick_and_wait(priv);
351 if (err)
352 return err;
353
354
355 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
356 if (((priv->adminq_prod_cnt + 1) & priv->adminq_mask) ==
357 (tail & priv->adminq_mask)) {
358
359
360 return -ENOMEM;
361 }
362 }
363
364 cmd = &priv->adminq[priv->adminq_prod_cnt & priv->adminq_mask];
365 priv->adminq_prod_cnt++;
366
367 memcpy(cmd, cmd_orig, sizeof(*cmd_orig));
368 opcode = be32_to_cpu(READ_ONCE(cmd->opcode));
369
370 switch (opcode) {
371 case GVE_ADMINQ_DESCRIBE_DEVICE:
372 priv->adminq_describe_device_cnt++;
373 break;
374 case GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES:
375 priv->adminq_cfg_device_resources_cnt++;
376 break;
377 case GVE_ADMINQ_REGISTER_PAGE_LIST:
378 priv->adminq_register_page_list_cnt++;
379 break;
380 case GVE_ADMINQ_UNREGISTER_PAGE_LIST:
381 priv->adminq_unregister_page_list_cnt++;
382 break;
383 case GVE_ADMINQ_CREATE_TX_QUEUE:
384 priv->adminq_create_tx_queue_cnt++;
385 break;
386 case GVE_ADMINQ_CREATE_RX_QUEUE:
387 priv->adminq_create_rx_queue_cnt++;
388 break;
389 case GVE_ADMINQ_DESTROY_TX_QUEUE:
390 priv->adminq_destroy_tx_queue_cnt++;
391 break;
392 case GVE_ADMINQ_DESTROY_RX_QUEUE:
393 priv->adminq_destroy_rx_queue_cnt++;
394 break;
395 case GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES:
396 priv->adminq_dcfg_device_resources_cnt++;
397 break;
398 case GVE_ADMINQ_SET_DRIVER_PARAMETER:
399 priv->adminq_set_driver_parameter_cnt++;
400 break;
401 case GVE_ADMINQ_REPORT_STATS:
402 priv->adminq_report_stats_cnt++;
403 break;
404 case GVE_ADMINQ_REPORT_LINK_SPEED:
405 priv->adminq_report_link_speed_cnt++;
406 break;
407 case GVE_ADMINQ_GET_PTYPE_MAP:
408 priv->adminq_get_ptype_map_cnt++;
409 break;
410 default:
411 dev_err(&priv->pdev->dev, "unknown AQ command opcode %d\n", opcode);
412 }
413
414 return 0;
415}
416
417
418
419
420
421
422static int gve_adminq_execute_cmd(struct gve_priv *priv,
423 union gve_adminq_command *cmd_orig)
424{
425 u32 tail, head;
426 int err;
427
428 tail = ioread32be(&priv->reg_bar0->adminq_event_counter);
429 head = priv->adminq_prod_cnt;
430 if (tail != head)
431
432 return -EINVAL;
433
434 err = gve_adminq_issue_cmd(priv, cmd_orig);
435 if (err)
436 return err;
437
438 return gve_adminq_kick_and_wait(priv);
439}
440
441
442
443
444
445
446
447
448#define GVE_NTFY_BLK_BASE_MSIX_IDX 0
449int gve_adminq_configure_device_resources(struct gve_priv *priv,
450 dma_addr_t counter_array_bus_addr,
451 u32 num_counters,
452 dma_addr_t db_array_bus_addr,
453 u32 num_ntfy_blks)
454{
455 union gve_adminq_command cmd;
456
457 memset(&cmd, 0, sizeof(cmd));
458 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CONFIGURE_DEVICE_RESOURCES);
459 cmd.configure_device_resources =
460 (struct gve_adminq_configure_device_resources) {
461 .counter_array = cpu_to_be64(counter_array_bus_addr),
462 .num_counters = cpu_to_be32(num_counters),
463 .irq_db_addr = cpu_to_be64(db_array_bus_addr),
464 .num_irq_dbs = cpu_to_be32(num_ntfy_blks),
465 .irq_db_stride = cpu_to_be32(sizeof(priv->ntfy_blocks[0])),
466 .ntfy_blk_msix_base_idx =
467 cpu_to_be32(GVE_NTFY_BLK_BASE_MSIX_IDX),
468 .queue_format = priv->queue_format,
469 };
470
471 return gve_adminq_execute_cmd(priv, &cmd);
472}
473
474int gve_adminq_deconfigure_device_resources(struct gve_priv *priv)
475{
476 union gve_adminq_command cmd;
477
478 memset(&cmd, 0, sizeof(cmd));
479 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DECONFIGURE_DEVICE_RESOURCES);
480
481 return gve_adminq_execute_cmd(priv, &cmd);
482}
483
484static int gve_adminq_create_tx_queue(struct gve_priv *priv, u32 queue_index)
485{
486 struct gve_tx_ring *tx = &priv->tx[queue_index];
487 union gve_adminq_command cmd;
488
489 memset(&cmd, 0, sizeof(cmd));
490 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_TX_QUEUE);
491 cmd.create_tx_queue = (struct gve_adminq_create_tx_queue) {
492 .queue_id = cpu_to_be32(queue_index),
493 .queue_resources_addr =
494 cpu_to_be64(tx->q_resources_bus),
495 .tx_ring_addr = cpu_to_be64(tx->bus),
496 .ntfy_id = cpu_to_be32(tx->ntfy_id),
497 };
498
499 if (gve_is_gqi(priv)) {
500 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
501 GVE_RAW_ADDRESSING_QPL_ID : tx->tx_fifo.qpl->id;
502
503 cmd.create_tx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
504 } else {
505 cmd.create_tx_queue.tx_ring_size =
506 cpu_to_be16(priv->tx_desc_cnt);
507 cmd.create_tx_queue.tx_comp_ring_addr =
508 cpu_to_be64(tx->complq_bus_dqo);
509 cmd.create_tx_queue.tx_comp_ring_size =
510 cpu_to_be16(priv->options_dqo_rda.tx_comp_ring_entries);
511 }
512
513 return gve_adminq_issue_cmd(priv, &cmd);
514}
515
516int gve_adminq_create_tx_queues(struct gve_priv *priv, u32 num_queues)
517{
518 int err;
519 int i;
520
521 for (i = 0; i < num_queues; i++) {
522 err = gve_adminq_create_tx_queue(priv, i);
523 if (err)
524 return err;
525 }
526
527 return gve_adminq_kick_and_wait(priv);
528}
529
530static int gve_adminq_create_rx_queue(struct gve_priv *priv, u32 queue_index)
531{
532 struct gve_rx_ring *rx = &priv->rx[queue_index];
533 union gve_adminq_command cmd;
534
535 memset(&cmd, 0, sizeof(cmd));
536 cmd.opcode = cpu_to_be32(GVE_ADMINQ_CREATE_RX_QUEUE);
537 cmd.create_rx_queue = (struct gve_adminq_create_rx_queue) {
538 .queue_id = cpu_to_be32(queue_index),
539 .ntfy_id = cpu_to_be32(rx->ntfy_id),
540 .queue_resources_addr = cpu_to_be64(rx->q_resources_bus),
541 };
542
543 if (gve_is_gqi(priv)) {
544 u32 qpl_id = priv->queue_format == GVE_GQI_RDA_FORMAT ?
545 GVE_RAW_ADDRESSING_QPL_ID : rx->data.qpl->id;
546
547 cmd.create_rx_queue.rx_desc_ring_addr =
548 cpu_to_be64(rx->desc.bus),
549 cmd.create_rx_queue.rx_data_ring_addr =
550 cpu_to_be64(rx->data.data_bus),
551 cmd.create_rx_queue.index = cpu_to_be32(queue_index);
552 cmd.create_rx_queue.queue_page_list_id = cpu_to_be32(qpl_id);
553 cmd.create_rx_queue.packet_buffer_size = cpu_to_be16(rx->packet_buffer_size);
554 } else {
555 cmd.create_rx_queue.rx_ring_size =
556 cpu_to_be16(priv->rx_desc_cnt);
557 cmd.create_rx_queue.rx_desc_ring_addr =
558 cpu_to_be64(rx->dqo.complq.bus);
559 cmd.create_rx_queue.rx_data_ring_addr =
560 cpu_to_be64(rx->dqo.bufq.bus);
561 cmd.create_rx_queue.packet_buffer_size =
562 cpu_to_be16(priv->data_buffer_size_dqo);
563 cmd.create_rx_queue.rx_buff_ring_size =
564 cpu_to_be16(priv->options_dqo_rda.rx_buff_ring_entries);
565 cmd.create_rx_queue.enable_rsc =
566 !!(priv->dev->features & NETIF_F_LRO);
567 }
568
569 return gve_adminq_issue_cmd(priv, &cmd);
570}
571
572int gve_adminq_create_rx_queues(struct gve_priv *priv, u32 num_queues)
573{
574 int err;
575 int i;
576
577 for (i = 0; i < num_queues; i++) {
578 err = gve_adminq_create_rx_queue(priv, i);
579 if (err)
580 return err;
581 }
582
583 return gve_adminq_kick_and_wait(priv);
584}
585
586static int gve_adminq_destroy_tx_queue(struct gve_priv *priv, u32 queue_index)
587{
588 union gve_adminq_command cmd;
589 int err;
590
591 memset(&cmd, 0, sizeof(cmd));
592 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_TX_QUEUE);
593 cmd.destroy_tx_queue = (struct gve_adminq_destroy_tx_queue) {
594 .queue_id = cpu_to_be32(queue_index),
595 };
596
597 err = gve_adminq_issue_cmd(priv, &cmd);
598 if (err)
599 return err;
600
601 return 0;
602}
603
604int gve_adminq_destroy_tx_queues(struct gve_priv *priv, u32 num_queues)
605{
606 int err;
607 int i;
608
609 for (i = 0; i < num_queues; i++) {
610 err = gve_adminq_destroy_tx_queue(priv, i);
611 if (err)
612 return err;
613 }
614
615 return gve_adminq_kick_and_wait(priv);
616}
617
618static int gve_adminq_destroy_rx_queue(struct gve_priv *priv, u32 queue_index)
619{
620 union gve_adminq_command cmd;
621 int err;
622
623 memset(&cmd, 0, sizeof(cmd));
624 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESTROY_RX_QUEUE);
625 cmd.destroy_rx_queue = (struct gve_adminq_destroy_rx_queue) {
626 .queue_id = cpu_to_be32(queue_index),
627 };
628
629 err = gve_adminq_issue_cmd(priv, &cmd);
630 if (err)
631 return err;
632
633 return 0;
634}
635
636int gve_adminq_destroy_rx_queues(struct gve_priv *priv, u32 num_queues)
637{
638 int err;
639 int i;
640
641 for (i = 0; i < num_queues; i++) {
642 err = gve_adminq_destroy_rx_queue(priv, i);
643 if (err)
644 return err;
645 }
646
647 return gve_adminq_kick_and_wait(priv);
648}
649
650static int gve_set_desc_cnt(struct gve_priv *priv,
651 struct gve_device_descriptor *descriptor)
652{
653 priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
654 if (priv->tx_desc_cnt * sizeof(priv->tx->desc[0]) < PAGE_SIZE) {
655 dev_err(&priv->pdev->dev, "Tx desc count %d too low\n",
656 priv->tx_desc_cnt);
657 return -EINVAL;
658 }
659 priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
660 if (priv->rx_desc_cnt * sizeof(priv->rx->desc.desc_ring[0])
661 < PAGE_SIZE) {
662 dev_err(&priv->pdev->dev, "Rx desc count %d too low\n",
663 priv->rx_desc_cnt);
664 return -EINVAL;
665 }
666 return 0;
667}
668
669static int
670gve_set_desc_cnt_dqo(struct gve_priv *priv,
671 const struct gve_device_descriptor *descriptor,
672 const struct gve_device_option_dqo_rda *dev_op_dqo_rda)
673{
674 priv->tx_desc_cnt = be16_to_cpu(descriptor->tx_queue_entries);
675 priv->options_dqo_rda.tx_comp_ring_entries =
676 be16_to_cpu(dev_op_dqo_rda->tx_comp_ring_entries);
677 priv->rx_desc_cnt = be16_to_cpu(descriptor->rx_queue_entries);
678 priv->options_dqo_rda.rx_buff_ring_entries =
679 be16_to_cpu(dev_op_dqo_rda->rx_buff_ring_entries);
680
681 return 0;
682}
683
684static void gve_enable_supported_features(struct gve_priv *priv,
685 u32 supported_features_mask,
686 const struct gve_device_option_jumbo_frames
687 *dev_op_jumbo_frames)
688{
689
690
691
692
693 if (dev_op_jumbo_frames &&
694 (supported_features_mask & GVE_SUP_JUMBO_FRAMES_MASK)) {
695 dev_info(&priv->pdev->dev,
696 "JUMBO FRAMES device option enabled.\n");
697 priv->dev->max_mtu = be16_to_cpu(dev_op_jumbo_frames->max_mtu);
698 }
699}
700
701int gve_adminq_describe_device(struct gve_priv *priv)
702{
703 struct gve_device_option_jumbo_frames *dev_op_jumbo_frames = NULL;
704 struct gve_device_option_gqi_rda *dev_op_gqi_rda = NULL;
705 struct gve_device_option_gqi_qpl *dev_op_gqi_qpl = NULL;
706 struct gve_device_option_dqo_rda *dev_op_dqo_rda = NULL;
707 struct gve_device_descriptor *descriptor;
708 u32 supported_features_mask = 0;
709 union gve_adminq_command cmd;
710 dma_addr_t descriptor_bus;
711 int err = 0;
712 u8 *mac;
713 u16 mtu;
714
715 memset(&cmd, 0, sizeof(cmd));
716 descriptor = dma_alloc_coherent(&priv->pdev->dev, PAGE_SIZE,
717 &descriptor_bus, GFP_KERNEL);
718 if (!descriptor)
719 return -ENOMEM;
720 cmd.opcode = cpu_to_be32(GVE_ADMINQ_DESCRIBE_DEVICE);
721 cmd.describe_device.device_descriptor_addr =
722 cpu_to_be64(descriptor_bus);
723 cmd.describe_device.device_descriptor_version =
724 cpu_to_be32(GVE_ADMINQ_DEVICE_DESCRIPTOR_VERSION);
725 cmd.describe_device.available_length = cpu_to_be32(PAGE_SIZE);
726
727 err = gve_adminq_execute_cmd(priv, &cmd);
728 if (err)
729 goto free_device_descriptor;
730
731 err = gve_process_device_options(priv, descriptor, &dev_op_gqi_rda,
732 &dev_op_gqi_qpl, &dev_op_dqo_rda,
733 &dev_op_jumbo_frames);
734 if (err)
735 goto free_device_descriptor;
736
737
738
739
740
741 if (dev_op_dqo_rda) {
742 priv->queue_format = GVE_DQO_RDA_FORMAT;
743 dev_info(&priv->pdev->dev,
744 "Driver is running with DQO RDA queue format.\n");
745 supported_features_mask =
746 be32_to_cpu(dev_op_dqo_rda->supported_features_mask);
747 } else if (dev_op_gqi_rda) {
748 priv->queue_format = GVE_GQI_RDA_FORMAT;
749 dev_info(&priv->pdev->dev,
750 "Driver is running with GQI RDA queue format.\n");
751 supported_features_mask =
752 be32_to_cpu(dev_op_gqi_rda->supported_features_mask);
753 } else if (priv->queue_format == GVE_GQI_RDA_FORMAT) {
754 dev_info(&priv->pdev->dev,
755 "Driver is running with GQI RDA queue format.\n");
756 } else {
757 priv->queue_format = GVE_GQI_QPL_FORMAT;
758 if (dev_op_gqi_qpl)
759 supported_features_mask =
760 be32_to_cpu(dev_op_gqi_qpl->supported_features_mask);
761 dev_info(&priv->pdev->dev,
762 "Driver is running with GQI QPL queue format.\n");
763 }
764 if (gve_is_gqi(priv)) {
765 err = gve_set_desc_cnt(priv, descriptor);
766 } else {
767
768 priv->dev->hw_features |= NETIF_F_LRO;
769 err = gve_set_desc_cnt_dqo(priv, descriptor, dev_op_dqo_rda);
770 }
771 if (err)
772 goto free_device_descriptor;
773
774 priv->max_registered_pages =
775 be64_to_cpu(descriptor->max_registered_pages);
776 mtu = be16_to_cpu(descriptor->mtu);
777 if (mtu < ETH_MIN_MTU) {
778 dev_err(&priv->pdev->dev, "MTU %d below minimum MTU\n", mtu);
779 err = -EINVAL;
780 goto free_device_descriptor;
781 }
782 priv->dev->max_mtu = mtu;
783 priv->num_event_counters = be16_to_cpu(descriptor->counters);
784 eth_hw_addr_set(priv->dev, descriptor->mac);
785 mac = descriptor->mac;
786 dev_info(&priv->pdev->dev, "MAC addr: %pM\n", mac);
787 priv->tx_pages_per_qpl = be16_to_cpu(descriptor->tx_pages_per_qpl);
788 priv->rx_data_slot_cnt = be16_to_cpu(descriptor->rx_pages_per_qpl);
789
790 if (gve_is_gqi(priv) && priv->rx_data_slot_cnt < priv->rx_desc_cnt) {
791 dev_err(&priv->pdev->dev, "rx_data_slot_cnt cannot be smaller than rx_desc_cnt, setting rx_desc_cnt down to %d.\n",
792 priv->rx_data_slot_cnt);
793 priv->rx_desc_cnt = priv->rx_data_slot_cnt;
794 }
795 priv->default_num_queues = be16_to_cpu(descriptor->default_num_queues);
796
797 gve_enable_supported_features(priv, supported_features_mask,
798 dev_op_jumbo_frames);
799
800free_device_descriptor:
801 dma_free_coherent(&priv->pdev->dev, PAGE_SIZE, descriptor,
802 descriptor_bus);
803 return err;
804}
805
806int gve_adminq_register_page_list(struct gve_priv *priv,
807 struct gve_queue_page_list *qpl)
808{
809 struct device *hdev = &priv->pdev->dev;
810 u32 num_entries = qpl->num_entries;
811 u32 size = num_entries * sizeof(qpl->page_buses[0]);
812 union gve_adminq_command cmd;
813 dma_addr_t page_list_bus;
814 __be64 *page_list;
815 int err;
816 int i;
817
818 memset(&cmd, 0, sizeof(cmd));
819 page_list = dma_alloc_coherent(hdev, size, &page_list_bus, GFP_KERNEL);
820 if (!page_list)
821 return -ENOMEM;
822
823 for (i = 0; i < num_entries; i++)
824 page_list[i] = cpu_to_be64(qpl->page_buses[i]);
825
826 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REGISTER_PAGE_LIST);
827 cmd.reg_page_list = (struct gve_adminq_register_page_list) {
828 .page_list_id = cpu_to_be32(qpl->id),
829 .num_pages = cpu_to_be32(num_entries),
830 .page_address_list_addr = cpu_to_be64(page_list_bus),
831 };
832
833 err = gve_adminq_execute_cmd(priv, &cmd);
834 dma_free_coherent(hdev, size, page_list, page_list_bus);
835 return err;
836}
837
838int gve_adminq_unregister_page_list(struct gve_priv *priv, u32 page_list_id)
839{
840 union gve_adminq_command cmd;
841
842 memset(&cmd, 0, sizeof(cmd));
843 cmd.opcode = cpu_to_be32(GVE_ADMINQ_UNREGISTER_PAGE_LIST);
844 cmd.unreg_page_list = (struct gve_adminq_unregister_page_list) {
845 .page_list_id = cpu_to_be32(page_list_id),
846 };
847
848 return gve_adminq_execute_cmd(priv, &cmd);
849}
850
851int gve_adminq_set_mtu(struct gve_priv *priv, u64 mtu)
852{
853 union gve_adminq_command cmd;
854
855 memset(&cmd, 0, sizeof(cmd));
856 cmd.opcode = cpu_to_be32(GVE_ADMINQ_SET_DRIVER_PARAMETER);
857 cmd.set_driver_param = (struct gve_adminq_set_driver_parameter) {
858 .parameter_type = cpu_to_be32(GVE_SET_PARAM_MTU),
859 .parameter_value = cpu_to_be64(mtu),
860 };
861
862 return gve_adminq_execute_cmd(priv, &cmd);
863}
864
865int gve_adminq_report_stats(struct gve_priv *priv, u64 stats_report_len,
866 dma_addr_t stats_report_addr, u64 interval)
867{
868 union gve_adminq_command cmd;
869
870 memset(&cmd, 0, sizeof(cmd));
871 cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_STATS);
872 cmd.report_stats = (struct gve_adminq_report_stats) {
873 .stats_report_len = cpu_to_be64(stats_report_len),
874 .stats_report_addr = cpu_to_be64(stats_report_addr),
875 .interval = cpu_to_be64(interval),
876 };
877
878 return gve_adminq_execute_cmd(priv, &cmd);
879}
880
881int gve_adminq_report_link_speed(struct gve_priv *priv)
882{
883 union gve_adminq_command gvnic_cmd;
884 dma_addr_t link_speed_region_bus;
885 __be64 *link_speed_region;
886 int err;
887
888 link_speed_region =
889 dma_alloc_coherent(&priv->pdev->dev, sizeof(*link_speed_region),
890 &link_speed_region_bus, GFP_KERNEL);
891
892 if (!link_speed_region)
893 return -ENOMEM;
894
895 memset(&gvnic_cmd, 0, sizeof(gvnic_cmd));
896 gvnic_cmd.opcode = cpu_to_be32(GVE_ADMINQ_REPORT_LINK_SPEED);
897 gvnic_cmd.report_link_speed.link_speed_address =
898 cpu_to_be64(link_speed_region_bus);
899
900 err = gve_adminq_execute_cmd(priv, &gvnic_cmd);
901
902 priv->link_speed = be64_to_cpu(*link_speed_region);
903 dma_free_coherent(&priv->pdev->dev, sizeof(*link_speed_region), link_speed_region,
904 link_speed_region_bus);
905 return err;
906}
907
908int gve_adminq_get_ptype_map_dqo(struct gve_priv *priv,
909 struct gve_ptype_lut *ptype_lut)
910{
911 struct gve_ptype_map *ptype_map;
912 union gve_adminq_command cmd;
913 dma_addr_t ptype_map_bus;
914 int err = 0;
915 int i;
916
917 memset(&cmd, 0, sizeof(cmd));
918 ptype_map = dma_alloc_coherent(&priv->pdev->dev, sizeof(*ptype_map),
919 &ptype_map_bus, GFP_KERNEL);
920 if (!ptype_map)
921 return -ENOMEM;
922
923 cmd.opcode = cpu_to_be32(GVE_ADMINQ_GET_PTYPE_MAP);
924 cmd.get_ptype_map = (struct gve_adminq_get_ptype_map) {
925 .ptype_map_len = cpu_to_be64(sizeof(*ptype_map)),
926 .ptype_map_addr = cpu_to_be64(ptype_map_bus),
927 };
928
929 err = gve_adminq_execute_cmd(priv, &cmd);
930 if (err)
931 goto err;
932
933
934 for (i = 0; i < GVE_NUM_PTYPES; i++) {
935 ptype_lut->ptypes[i].l3_type =
936 ptype_map->ptypes[i].l3_type;
937 ptype_lut->ptypes[i].l4_type =
938 ptype_map->ptypes[i].l4_type;
939 }
940err:
941 dma_free_coherent(&priv->pdev->dev, sizeof(*ptype_map), ptype_map,
942 ptype_map_bus);
943 return err;
944}
945