1
2
3
4
5
6#include "ena_com.h"
7
8
9
10
11
12#define ADMIN_CMD_TIMEOUT_US (3000000)
13
14#define ENA_ASYNC_QUEUE_DEPTH 16
15#define ENA_ADMIN_QUEUE_DEPTH 32
16
17
18#define ENA_CTRL_MAJOR 0
19#define ENA_CTRL_MINOR 0
20#define ENA_CTRL_SUB_MINOR 1
21
22#define MIN_ENA_CTRL_VER \
23 (((ENA_CTRL_MAJOR) << \
24 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
25 ((ENA_CTRL_MINOR) << \
26 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
27 (ENA_CTRL_SUB_MINOR))
28
29#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
30#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
31
32#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
33
34#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
35
36#define ENA_REGS_ADMIN_INTR_MASK 1
37
38#define ENA_MIN_ADMIN_POLL_US 100
39
40#define ENA_MAX_ADMIN_POLL_US 5000
41
42
43
44
45
46enum ena_cmd_status {
47 ENA_CMD_SUBMITTED,
48 ENA_CMD_COMPLETED,
49
50 ENA_CMD_ABORTED,
51};
52
53struct ena_comp_ctx {
54 struct completion wait_event;
55 struct ena_admin_acq_entry *user_cqe;
56 u32 comp_size;
57 enum ena_cmd_status status;
58
59 u8 comp_status;
60 u8 cmd_opcode;
61 bool occupied;
62};
63
64struct ena_com_stats_ctx {
65 struct ena_admin_aq_get_stats_cmd get_cmd;
66 struct ena_admin_acq_get_stats_resp get_resp;
67};
68
69static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
70 struct ena_common_mem_addr *ena_addr,
71 dma_addr_t addr)
72{
73 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
74 netdev_err(ena_dev->net_device,
75 "DMA address has more bits that the device supports\n");
76 return -EINVAL;
77 }
78
79 ena_addr->mem_addr_low = lower_32_bits(addr);
80 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
81
82 return 0;
83}
84
85static int ena_com_admin_init_sq(struct ena_com_admin_queue *admin_queue)
86{
87 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
88 struct ena_com_admin_sq *sq = &admin_queue->sq;
89 u16 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
90
91 sq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
92 &sq->dma_addr, GFP_KERNEL);
93
94 if (!sq->entries) {
95 netdev_err(ena_dev->net_device, "Memory allocation failed\n");
96 return -ENOMEM;
97 }
98
99 sq->head = 0;
100 sq->tail = 0;
101 sq->phase = 1;
102
103 sq->db_addr = NULL;
104
105 return 0;
106}
107
108static int ena_com_admin_init_cq(struct ena_com_admin_queue *admin_queue)
109{
110 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
111 struct ena_com_admin_cq *cq = &admin_queue->cq;
112 u16 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
113
114 cq->entries = dma_alloc_coherent(admin_queue->q_dmadev, size,
115 &cq->dma_addr, GFP_KERNEL);
116
117 if (!cq->entries) {
118 netdev_err(ena_dev->net_device, "Memory allocation failed\n");
119 return -ENOMEM;
120 }
121
122 cq->head = 0;
123 cq->phase = 1;
124
125 return 0;
126}
127
128static int ena_com_admin_init_aenq(struct ena_com_dev *ena_dev,
129 struct ena_aenq_handlers *aenq_handlers)
130{
131 struct ena_com_aenq *aenq = &ena_dev->aenq;
132 u32 addr_low, addr_high, aenq_caps;
133 u16 size;
134
135 ena_dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
136 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
137 aenq->entries = dma_alloc_coherent(ena_dev->dmadev, size,
138 &aenq->dma_addr, GFP_KERNEL);
139
140 if (!aenq->entries) {
141 netdev_err(ena_dev->net_device, "Memory allocation failed\n");
142 return -ENOMEM;
143 }
144
145 aenq->head = aenq->q_depth;
146 aenq->phase = 1;
147
148 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
149 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
150
151 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
152 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
153
154 aenq_caps = 0;
155 aenq_caps |= ena_dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
156 aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
157 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
158 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
159 writel(aenq_caps, ena_dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
160
161 if (unlikely(!aenq_handlers)) {
162 netdev_err(ena_dev->net_device,
163 "AENQ handlers pointer is NULL\n");
164 return -EINVAL;
165 }
166
167 aenq->aenq_handlers = aenq_handlers;
168
169 return 0;
170}
171
172static void comp_ctxt_release(struct ena_com_admin_queue *queue,
173 struct ena_comp_ctx *comp_ctx)
174{
175 comp_ctx->occupied = false;
176 atomic_dec(&queue->outstanding_cmds);
177}
178
179static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *admin_queue,
180 u16 command_id, bool capture)
181{
182 if (unlikely(command_id >= admin_queue->q_depth)) {
183 netdev_err(admin_queue->ena_dev->net_device,
184 "Command id is larger than the queue size. cmd_id: %u queue size %d\n",
185 command_id, admin_queue->q_depth);
186 return NULL;
187 }
188
189 if (unlikely(!admin_queue->comp_ctx)) {
190 netdev_err(admin_queue->ena_dev->net_device,
191 "Completion context is NULL\n");
192 return NULL;
193 }
194
195 if (unlikely(admin_queue->comp_ctx[command_id].occupied && capture)) {
196 netdev_err(admin_queue->ena_dev->net_device,
197 "Completion context is occupied\n");
198 return NULL;
199 }
200
201 if (capture) {
202 atomic_inc(&admin_queue->outstanding_cmds);
203 admin_queue->comp_ctx[command_id].occupied = true;
204 }
205
206 return &admin_queue->comp_ctx[command_id];
207}
208
209static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
210 struct ena_admin_aq_entry *cmd,
211 size_t cmd_size_in_bytes,
212 struct ena_admin_acq_entry *comp,
213 size_t comp_size_in_bytes)
214{
215 struct ena_comp_ctx *comp_ctx;
216 u16 tail_masked, cmd_id;
217 u16 queue_size_mask;
218 u16 cnt;
219
220 queue_size_mask = admin_queue->q_depth - 1;
221
222 tail_masked = admin_queue->sq.tail & queue_size_mask;
223
224
225 cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
226 if (cnt >= admin_queue->q_depth) {
227 netdev_dbg(admin_queue->ena_dev->net_device,
228 "Admin queue is full.\n");
229 admin_queue->stats.out_of_space++;
230 return ERR_PTR(-ENOSPC);
231 }
232
233 cmd_id = admin_queue->curr_cmd_id;
234
235 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
236 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
237
238 cmd->aq_common_descriptor.command_id |= cmd_id &
239 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
240
241 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
242 if (unlikely(!comp_ctx))
243 return ERR_PTR(-EINVAL);
244
245 comp_ctx->status = ENA_CMD_SUBMITTED;
246 comp_ctx->comp_size = (u32)comp_size_in_bytes;
247 comp_ctx->user_cqe = comp;
248 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
249
250 reinit_completion(&comp_ctx->wait_event);
251
252 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
253
254 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
255 queue_size_mask;
256
257 admin_queue->sq.tail++;
258 admin_queue->stats.submitted_cmd++;
259
260 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
261 admin_queue->sq.phase = !admin_queue->sq.phase;
262
263 writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
264
265 return comp_ctx;
266}
267
268static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *admin_queue)
269{
270 struct ena_com_dev *ena_dev = admin_queue->ena_dev;
271 size_t size = admin_queue->q_depth * sizeof(struct ena_comp_ctx);
272 struct ena_comp_ctx *comp_ctx;
273 u16 i;
274
275 admin_queue->comp_ctx =
276 devm_kzalloc(admin_queue->q_dmadev, size, GFP_KERNEL);
277 if (unlikely(!admin_queue->comp_ctx)) {
278 netdev_err(ena_dev->net_device, "Memory allocation failed\n");
279 return -ENOMEM;
280 }
281
282 for (i = 0; i < admin_queue->q_depth; i++) {
283 comp_ctx = get_comp_ctxt(admin_queue, i, false);
284 if (comp_ctx)
285 init_completion(&comp_ctx->wait_event);
286 }
287
288 return 0;
289}
290
291static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
292 struct ena_admin_aq_entry *cmd,
293 size_t cmd_size_in_bytes,
294 struct ena_admin_acq_entry *comp,
295 size_t comp_size_in_bytes)
296{
297 unsigned long flags = 0;
298 struct ena_comp_ctx *comp_ctx;
299
300 spin_lock_irqsave(&admin_queue->q_lock, flags);
301 if (unlikely(!admin_queue->running_state)) {
302 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
303 return ERR_PTR(-ENODEV);
304 }
305 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
306 cmd_size_in_bytes,
307 comp,
308 comp_size_in_bytes);
309 if (IS_ERR(comp_ctx))
310 admin_queue->running_state = false;
311 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
312
313 return comp_ctx;
314}
315
316static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
317 struct ena_com_create_io_ctx *ctx,
318 struct ena_com_io_sq *io_sq)
319{
320 size_t size;
321 int dev_node = 0;
322
323 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
324
325 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
326 io_sq->desc_entry_size =
327 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
328 sizeof(struct ena_eth_io_tx_desc) :
329 sizeof(struct ena_eth_io_rx_desc);
330
331 size = io_sq->desc_entry_size * io_sq->q_depth;
332
333 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
334 dev_node = dev_to_node(ena_dev->dmadev);
335 set_dev_node(ena_dev->dmadev, ctx->numa_node);
336 io_sq->desc_addr.virt_addr =
337 dma_alloc_coherent(ena_dev->dmadev, size,
338 &io_sq->desc_addr.phys_addr,
339 GFP_KERNEL);
340 set_dev_node(ena_dev->dmadev, dev_node);
341 if (!io_sq->desc_addr.virt_addr) {
342 io_sq->desc_addr.virt_addr =
343 dma_alloc_coherent(ena_dev->dmadev, size,
344 &io_sq->desc_addr.phys_addr,
345 GFP_KERNEL);
346 }
347
348 if (!io_sq->desc_addr.virt_addr) {
349 netdev_err(ena_dev->net_device,
350 "Memory allocation failed\n");
351 return -ENOMEM;
352 }
353 }
354
355 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
356
357 io_sq->bounce_buf_ctrl.buffer_size =
358 ena_dev->llq_info.desc_list_entry_size;
359 io_sq->bounce_buf_ctrl.buffers_num =
360 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
361 io_sq->bounce_buf_ctrl.next_to_use = 0;
362
363 size = io_sq->bounce_buf_ctrl.buffer_size *
364 io_sq->bounce_buf_ctrl.buffers_num;
365
366 dev_node = dev_to_node(ena_dev->dmadev);
367 set_dev_node(ena_dev->dmadev, ctx->numa_node);
368 io_sq->bounce_buf_ctrl.base_buffer =
369 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
370 set_dev_node(ena_dev->dmadev, dev_node);
371 if (!io_sq->bounce_buf_ctrl.base_buffer)
372 io_sq->bounce_buf_ctrl.base_buffer =
373 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
374
375 if (!io_sq->bounce_buf_ctrl.base_buffer) {
376 netdev_err(ena_dev->net_device,
377 "Bounce buffer memory allocation failed\n");
378 return -ENOMEM;
379 }
380
381 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
382 sizeof(io_sq->llq_info));
383
384
385 io_sq->llq_buf_ctrl.curr_bounce_buf =
386 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
387 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
388 0x0, io_sq->llq_info.desc_list_entry_size);
389 io_sq->llq_buf_ctrl.descs_left_in_line =
390 io_sq->llq_info.descs_num_before_header;
391 io_sq->disable_meta_caching =
392 io_sq->llq_info.disable_meta_caching;
393
394 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
395 io_sq->entries_in_tx_burst_left =
396 io_sq->llq_info.max_entries_in_tx_burst;
397 }
398
399 io_sq->tail = 0;
400 io_sq->next_to_comp = 0;
401 io_sq->phase = 1;
402
403 return 0;
404}
405
406static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
407 struct ena_com_create_io_ctx *ctx,
408 struct ena_com_io_cq *io_cq)
409{
410 size_t size;
411 int prev_node = 0;
412
413 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
414
415
416 io_cq->cdesc_entry_size_in_bytes =
417 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
418 sizeof(struct ena_eth_io_tx_cdesc) :
419 sizeof(struct ena_eth_io_rx_cdesc_base);
420
421 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
422
423 prev_node = dev_to_node(ena_dev->dmadev);
424 set_dev_node(ena_dev->dmadev, ctx->numa_node);
425 io_cq->cdesc_addr.virt_addr =
426 dma_alloc_coherent(ena_dev->dmadev, size,
427 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
428 set_dev_node(ena_dev->dmadev, prev_node);
429 if (!io_cq->cdesc_addr.virt_addr) {
430 io_cq->cdesc_addr.virt_addr =
431 dma_alloc_coherent(ena_dev->dmadev, size,
432 &io_cq->cdesc_addr.phys_addr,
433 GFP_KERNEL);
434 }
435
436 if (!io_cq->cdesc_addr.virt_addr) {
437 netdev_err(ena_dev->net_device, "Memory allocation failed\n");
438 return -ENOMEM;
439 }
440
441 io_cq->phase = 1;
442 io_cq->head = 0;
443
444 return 0;
445}
446
447static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
448 struct ena_admin_acq_entry *cqe)
449{
450 struct ena_comp_ctx *comp_ctx;
451 u16 cmd_id;
452
453 cmd_id = cqe->acq_common_descriptor.command &
454 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
455
456 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
457 if (unlikely(!comp_ctx)) {
458 netdev_err(admin_queue->ena_dev->net_device,
459 "comp_ctx is NULL. Changing the admin queue running state\n");
460 admin_queue->running_state = false;
461 return;
462 }
463
464 comp_ctx->status = ENA_CMD_COMPLETED;
465 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
466
467 if (comp_ctx->user_cqe)
468 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
469
470 if (!admin_queue->polling)
471 complete(&comp_ctx->wait_event);
472}
473
474static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
475{
476 struct ena_admin_acq_entry *cqe = NULL;
477 u16 comp_num = 0;
478 u16 head_masked;
479 u8 phase;
480
481 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
482 phase = admin_queue->cq.phase;
483
484 cqe = &admin_queue->cq.entries[head_masked];
485
486
487 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
488 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
489
490
491
492 dma_rmb();
493 ena_com_handle_single_admin_completion(admin_queue, cqe);
494
495 head_masked++;
496 comp_num++;
497 if (unlikely(head_masked == admin_queue->q_depth)) {
498 head_masked = 0;
499 phase = !phase;
500 }
501
502 cqe = &admin_queue->cq.entries[head_masked];
503 }
504
505 admin_queue->cq.head += comp_num;
506 admin_queue->cq.phase = phase;
507 admin_queue->sq.head += comp_num;
508 admin_queue->stats.completed_cmd += comp_num;
509}
510
511static int ena_com_comp_status_to_errno(struct ena_com_admin_queue *admin_queue,
512 u8 comp_status)
513{
514 if (unlikely(comp_status != 0))
515 netdev_err(admin_queue->ena_dev->net_device,
516 "Admin command failed[%u]\n", comp_status);
517
518 switch (comp_status) {
519 case ENA_ADMIN_SUCCESS:
520 return 0;
521 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
522 return -ENOMEM;
523 case ENA_ADMIN_UNSUPPORTED_OPCODE:
524 return -EOPNOTSUPP;
525 case ENA_ADMIN_BAD_OPCODE:
526 case ENA_ADMIN_MALFORMED_REQUEST:
527 case ENA_ADMIN_ILLEGAL_PARAMETER:
528 case ENA_ADMIN_UNKNOWN_ERROR:
529 return -EINVAL;
530 case ENA_ADMIN_RESOURCE_BUSY:
531 return -EAGAIN;
532 }
533
534 return -EINVAL;
535}
536
537static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
538{
539 delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us);
540 delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
541 usleep_range(delay_us, 2 * delay_us);
542}
543
544static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
545 struct ena_com_admin_queue *admin_queue)
546{
547 unsigned long flags = 0;
548 unsigned long timeout;
549 int ret;
550 u32 exp = 0;
551
552 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
553
554 while (1) {
555 spin_lock_irqsave(&admin_queue->q_lock, flags);
556 ena_com_handle_admin_completion(admin_queue);
557 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
558
559 if (comp_ctx->status != ENA_CMD_SUBMITTED)
560 break;
561
562 if (time_is_before_jiffies(timeout)) {
563 netdev_err(admin_queue->ena_dev->net_device,
564 "Wait for completion (polling) timeout\n");
565
566 spin_lock_irqsave(&admin_queue->q_lock, flags);
567 admin_queue->stats.no_completion++;
568 admin_queue->running_state = false;
569 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
570
571 ret = -ETIME;
572 goto err;
573 }
574
575 ena_delay_exponential_backoff_us(exp++,
576 admin_queue->ena_dev->ena_min_poll_delay_us);
577 }
578
579 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
580 netdev_err(admin_queue->ena_dev->net_device,
581 "Command was aborted\n");
582 spin_lock_irqsave(&admin_queue->q_lock, flags);
583 admin_queue->stats.aborted_cmd++;
584 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
585 ret = -ENODEV;
586 goto err;
587 }
588
589 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
590 comp_ctx->status);
591
592 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
593err:
594 comp_ctxt_release(admin_queue, comp_ctx);
595 return ret;
596}
597
598
599
600
601
602
603
604static int ena_com_set_llq(struct ena_com_dev *ena_dev)
605{
606 struct ena_com_admin_queue *admin_queue;
607 struct ena_admin_set_feat_cmd cmd;
608 struct ena_admin_set_feat_resp resp;
609 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
610 int ret;
611
612 memset(&cmd, 0x0, sizeof(cmd));
613 admin_queue = &ena_dev->admin_queue;
614
615 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
616 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
617
618 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
619 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
620 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
621 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
622
623 cmd.u.llq.accel_mode.u.set.enabled_flags =
624 BIT(ENA_ADMIN_DISABLE_META_CACHING) |
625 BIT(ENA_ADMIN_LIMIT_TX_BURST);
626
627 ret = ena_com_execute_admin_command(admin_queue,
628 (struct ena_admin_aq_entry *)&cmd,
629 sizeof(cmd),
630 (struct ena_admin_acq_entry *)&resp,
631 sizeof(resp));
632
633 if (unlikely(ret))
634 netdev_err(ena_dev->net_device,
635 "Failed to set LLQ configurations: %d\n", ret);
636
637 return ret;
638}
639
640static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
641 struct ena_admin_feature_llq_desc *llq_features,
642 struct ena_llq_configurations *llq_default_cfg)
643{
644 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
645 struct ena_admin_accel_mode_get llq_accel_mode_get;
646 u16 supported_feat;
647 int rc;
648
649 memset(llq_info, 0, sizeof(*llq_info));
650
651 supported_feat = llq_features->header_location_ctrl_supported;
652
653 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
654 llq_info->header_location_ctrl =
655 llq_default_cfg->llq_header_location;
656 } else {
657 netdev_err(ena_dev->net_device,
658 "Invalid header location control, supported: 0x%x\n",
659 supported_feat);
660 return -EINVAL;
661 }
662
663 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
664 supported_feat = llq_features->descriptors_stride_ctrl_supported;
665 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
666 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
667 } else {
668 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
669 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
670 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
671 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
672 } else {
673 netdev_err(ena_dev->net_device,
674 "Invalid desc_stride_ctrl, supported: 0x%x\n",
675 supported_feat);
676 return -EINVAL;
677 }
678
679 netdev_err(ena_dev->net_device,
680 "Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
681 llq_default_cfg->llq_stride_ctrl,
682 supported_feat, llq_info->desc_stride_ctrl);
683 }
684 } else {
685 llq_info->desc_stride_ctrl = 0;
686 }
687
688 supported_feat = llq_features->entry_size_ctrl_supported;
689 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
690 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
691 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
692 } else {
693 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
694 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
695 llq_info->desc_list_entry_size = 128;
696 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
697 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
698 llq_info->desc_list_entry_size = 192;
699 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
700 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
701 llq_info->desc_list_entry_size = 256;
702 } else {
703 netdev_err(ena_dev->net_device,
704 "Invalid entry_size_ctrl, supported: 0x%x\n",
705 supported_feat);
706 return -EINVAL;
707 }
708
709 netdev_err(ena_dev->net_device,
710 "Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
711 llq_default_cfg->llq_ring_entry_size, supported_feat,
712 llq_info->desc_list_entry_size);
713 }
714 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
715
716
717
718 netdev_err(ena_dev->net_device, "Illegal entry size %d\n",
719 llq_info->desc_list_entry_size);
720 return -EINVAL;
721 }
722
723 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
724 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
725 sizeof(struct ena_eth_io_tx_desc);
726 else
727 llq_info->descs_per_entry = 1;
728
729 supported_feat = llq_features->desc_num_before_header_supported;
730 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
731 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
732 } else {
733 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
734 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
735 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
736 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
737 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
738 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
739 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
740 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
741 } else {
742 netdev_err(ena_dev->net_device,
743 "Invalid descs_num_before_header, supported: 0x%x\n",
744 supported_feat);
745 return -EINVAL;
746 }
747
748 netdev_err(ena_dev->net_device,
749 "Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
750 llq_default_cfg->llq_num_decs_before_header,
751 supported_feat, llq_info->descs_num_before_header);
752 }
753
754 llq_accel_mode_get = llq_features->accel_mode.u.get;
755
756 llq_info->disable_meta_caching =
757 !!(llq_accel_mode_get.supported_flags &
758 BIT(ENA_ADMIN_DISABLE_META_CACHING));
759
760 if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
761 llq_info->max_entries_in_tx_burst =
762 llq_accel_mode_get.max_tx_burst_size /
763 llq_default_cfg->llq_ring_entry_size_value;
764
765 rc = ena_com_set_llq(ena_dev);
766 if (rc)
767 netdev_err(ena_dev->net_device,
768 "Cannot set LLQ configuration: %d\n", rc);
769
770 return rc;
771}
772
773static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
774 struct ena_com_admin_queue *admin_queue)
775{
776 unsigned long flags = 0;
777 int ret;
778
779 wait_for_completion_timeout(&comp_ctx->wait_event,
780 usecs_to_jiffies(
781 admin_queue->completion_timeout));
782
783
784
785
786
787
788 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
789 spin_lock_irqsave(&admin_queue->q_lock, flags);
790 ena_com_handle_admin_completion(admin_queue);
791 admin_queue->stats.no_completion++;
792 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
793
794 if (comp_ctx->status == ENA_CMD_COMPLETED) {
795 netdev_err(admin_queue->ena_dev->net_device,
796 "The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
797 comp_ctx->cmd_opcode,
798 admin_queue->auto_polling ? "ON" : "OFF");
799
800 if (admin_queue->auto_polling)
801 admin_queue->polling = true;
802 } else {
803 netdev_err(admin_queue->ena_dev->net_device,
804 "The ena device didn't send a completion for the admin cmd %d status %d\n",
805 comp_ctx->cmd_opcode, comp_ctx->status);
806 }
807
808
809
810
811 if (!admin_queue->polling) {
812 admin_queue->running_state = false;
813 ret = -ETIME;
814 goto err;
815 }
816 }
817
818 ret = ena_com_comp_status_to_errno(admin_queue, comp_ctx->comp_status);
819err:
820 comp_ctxt_release(admin_queue, comp_ctx);
821 return ret;
822}
823
824
825
826
827
828static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
829{
830 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
831 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
832 mmio_read->read_resp;
833 u32 mmio_read_reg, ret, i;
834 unsigned long flags = 0;
835 u32 timeout = mmio_read->reg_read_to;
836
837 might_sleep();
838
839 if (timeout == 0)
840 timeout = ENA_REG_READ_TIMEOUT;
841
842
843 if (!mmio_read->readless_supported)
844 return readl(ena_dev->reg_bar + offset);
845
846 spin_lock_irqsave(&mmio_read->lock, flags);
847 mmio_read->seq_num++;
848
849 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
850 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
851 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
852 mmio_read_reg |= mmio_read->seq_num &
853 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
854
855 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
856
857 for (i = 0; i < timeout; i++) {
858 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
859 break;
860
861 udelay(1);
862 }
863
864 if (unlikely(i == timeout)) {
865 netdev_err(ena_dev->net_device,
866 "Reading reg failed for timeout. expected: req id[%u] offset[%u] actual: req id[%u] offset[%u]\n",
867 mmio_read->seq_num, offset, read_resp->req_id,
868 read_resp->reg_off);
869 ret = ENA_MMIO_READ_TIMEOUT;
870 goto err;
871 }
872
873 if (read_resp->reg_off != offset) {
874 netdev_err(ena_dev->net_device,
875 "Read failure: wrong offset provided\n");
876 ret = ENA_MMIO_READ_TIMEOUT;
877 } else {
878 ret = read_resp->reg_val;
879 }
880err:
881 spin_unlock_irqrestore(&mmio_read->lock, flags);
882
883 return ret;
884}
885
886
887
888
889
890
891
892
893static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
894 struct ena_com_admin_queue *admin_queue)
895{
896 if (admin_queue->polling)
897 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
898 admin_queue);
899
900 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
901 admin_queue);
902}
903
904static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
905 struct ena_com_io_sq *io_sq)
906{
907 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
908 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
909 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
910 u8 direction;
911 int ret;
912
913 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
914
915 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
916 direction = ENA_ADMIN_SQ_DIRECTION_TX;
917 else
918 direction = ENA_ADMIN_SQ_DIRECTION_RX;
919
920 destroy_cmd.sq.sq_identity |= (direction <<
921 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
922 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
923
924 destroy_cmd.sq.sq_idx = io_sq->idx;
925 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
926
927 ret = ena_com_execute_admin_command(admin_queue,
928 (struct ena_admin_aq_entry *)&destroy_cmd,
929 sizeof(destroy_cmd),
930 (struct ena_admin_acq_entry *)&destroy_resp,
931 sizeof(destroy_resp));
932
933 if (unlikely(ret && (ret != -ENODEV)))
934 netdev_err(ena_dev->net_device,
935 "Failed to destroy io sq error: %d\n", ret);
936
937 return ret;
938}
939
940static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
941 struct ena_com_io_sq *io_sq,
942 struct ena_com_io_cq *io_cq)
943{
944 size_t size;
945
946 if (io_cq->cdesc_addr.virt_addr) {
947 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
948
949 dma_free_coherent(ena_dev->dmadev, size,
950 io_cq->cdesc_addr.virt_addr,
951 io_cq->cdesc_addr.phys_addr);
952
953 io_cq->cdesc_addr.virt_addr = NULL;
954 }
955
956 if (io_sq->desc_addr.virt_addr) {
957 size = io_sq->desc_entry_size * io_sq->q_depth;
958
959 dma_free_coherent(ena_dev->dmadev, size,
960 io_sq->desc_addr.virt_addr,
961 io_sq->desc_addr.phys_addr);
962
963 io_sq->desc_addr.virt_addr = NULL;
964 }
965
966 if (io_sq->bounce_buf_ctrl.base_buffer) {
967 devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
968 io_sq->bounce_buf_ctrl.base_buffer = NULL;
969 }
970}
971
972static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
973 u16 exp_state)
974{
975 u32 val, exp = 0;
976 unsigned long timeout_stamp;
977
978
979 timeout_stamp = jiffies + usecs_to_jiffies(100 * 1000 * timeout);
980
981 while (1) {
982 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
983
984 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
985 netdev_err(ena_dev->net_device,
986 "Reg read timeout occurred\n");
987 return -ETIME;
988 }
989
990 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
991 exp_state)
992 return 0;
993
994 if (time_is_before_jiffies(timeout_stamp))
995 return -ETIME;
996
997 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
998 }
999}
1000
1001static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
1002 enum ena_admin_aq_feature_id feature_id)
1003{
1004 u32 feature_mask = 1 << feature_id;
1005
1006
1007 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
1008 !(ena_dev->supported_features & feature_mask))
1009 return false;
1010
1011 return true;
1012}
1013
1014static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
1015 struct ena_admin_get_feat_resp *get_resp,
1016 enum ena_admin_aq_feature_id feature_id,
1017 dma_addr_t control_buf_dma_addr,
1018 u32 control_buff_size,
1019 u8 feature_ver)
1020{
1021 struct ena_com_admin_queue *admin_queue;
1022 struct ena_admin_get_feat_cmd get_cmd;
1023 int ret;
1024
1025 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
1026 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
1027 feature_id);
1028 return -EOPNOTSUPP;
1029 }
1030
1031 memset(&get_cmd, 0x0, sizeof(get_cmd));
1032 admin_queue = &ena_dev->admin_queue;
1033
1034 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1035
1036 if (control_buff_size)
1037 get_cmd.aq_common_descriptor.flags =
1038 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1039 else
1040 get_cmd.aq_common_descriptor.flags = 0;
1041
1042 ret = ena_com_mem_addr_set(ena_dev,
1043 &get_cmd.control_buffer.address,
1044 control_buf_dma_addr);
1045 if (unlikely(ret)) {
1046 netdev_err(ena_dev->net_device, "Memory address set failed\n");
1047 return ret;
1048 }
1049
1050 get_cmd.control_buffer.length = control_buff_size;
1051 get_cmd.feat_common.feature_version = feature_ver;
1052 get_cmd.feat_common.feature_id = feature_id;
1053
1054 ret = ena_com_execute_admin_command(admin_queue,
1055 (struct ena_admin_aq_entry *)
1056 &get_cmd,
1057 sizeof(get_cmd),
1058 (struct ena_admin_acq_entry *)
1059 get_resp,
1060 sizeof(*get_resp));
1061
1062 if (unlikely(ret))
1063 netdev_err(ena_dev->net_device,
1064 "Failed to submit get_feature command %d error: %d\n",
1065 feature_id, ret);
1066
1067 return ret;
1068}
1069
1070static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1071 struct ena_admin_get_feat_resp *get_resp,
1072 enum ena_admin_aq_feature_id feature_id,
1073 u8 feature_ver)
1074{
1075 return ena_com_get_feature_ex(ena_dev,
1076 get_resp,
1077 feature_id,
1078 0,
1079 0,
1080 feature_ver);
1081}
1082
1083int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1084{
1085 return ena_dev->rss.hash_func;
1086}
1087
1088static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1089{
1090 struct ena_admin_feature_rss_flow_hash_control *hash_key =
1091 (ena_dev->rss).hash_key;
1092
1093 netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
1094
1095
1096
1097 hash_key->key_parts = ENA_ADMIN_RSS_KEY_PARTS;
1098}
1099
1100static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1101{
1102 struct ena_rss *rss = &ena_dev->rss;
1103
1104 if (!ena_com_check_supported_feature_id(ena_dev,
1105 ENA_ADMIN_RSS_HASH_FUNCTION))
1106 return -EOPNOTSUPP;
1107
1108 rss->hash_key =
1109 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1110 &rss->hash_key_dma_addr, GFP_KERNEL);
1111
1112 if (unlikely(!rss->hash_key))
1113 return -ENOMEM;
1114
1115 return 0;
1116}
1117
1118static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1119{
1120 struct ena_rss *rss = &ena_dev->rss;
1121
1122 if (rss->hash_key)
1123 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1124 rss->hash_key, rss->hash_key_dma_addr);
1125 rss->hash_key = NULL;
1126}
1127
1128static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1129{
1130 struct ena_rss *rss = &ena_dev->rss;
1131
1132 rss->hash_ctrl =
1133 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1134 &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1135
1136 if (unlikely(!rss->hash_ctrl))
1137 return -ENOMEM;
1138
1139 return 0;
1140}
1141
1142static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1143{
1144 struct ena_rss *rss = &ena_dev->rss;
1145
1146 if (rss->hash_ctrl)
1147 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1148 rss->hash_ctrl, rss->hash_ctrl_dma_addr);
1149 rss->hash_ctrl = NULL;
1150}
1151
1152static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1153 u16 log_size)
1154{
1155 struct ena_rss *rss = &ena_dev->rss;
1156 struct ena_admin_get_feat_resp get_resp;
1157 size_t tbl_size;
1158 int ret;
1159
1160 ret = ena_com_get_feature(ena_dev, &get_resp,
1161 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG, 0);
1162 if (unlikely(ret))
1163 return ret;
1164
1165 if ((get_resp.u.ind_table.min_size > log_size) ||
1166 (get_resp.u.ind_table.max_size < log_size)) {
1167 netdev_err(ena_dev->net_device,
1168 "Indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1169 1 << log_size, 1 << get_resp.u.ind_table.min_size,
1170 1 << get_resp.u.ind_table.max_size);
1171 return -EINVAL;
1172 }
1173
1174 tbl_size = (1ULL << log_size) *
1175 sizeof(struct ena_admin_rss_ind_table_entry);
1176
1177 rss->rss_ind_tbl =
1178 dma_alloc_coherent(ena_dev->dmadev, tbl_size,
1179 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
1180 if (unlikely(!rss->rss_ind_tbl))
1181 goto mem_err1;
1182
1183 tbl_size = (1ULL << log_size) * sizeof(u16);
1184 rss->host_rss_ind_tbl =
1185 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
1186 if (unlikely(!rss->host_rss_ind_tbl))
1187 goto mem_err2;
1188
1189 rss->tbl_log_size = log_size;
1190
1191 return 0;
1192
1193mem_err2:
1194 tbl_size = (1ULL << log_size) *
1195 sizeof(struct ena_admin_rss_ind_table_entry);
1196
1197 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1198 rss->rss_ind_tbl_dma_addr);
1199 rss->rss_ind_tbl = NULL;
1200mem_err1:
1201 rss->tbl_log_size = 0;
1202 return -ENOMEM;
1203}
1204
1205static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1206{
1207 struct ena_rss *rss = &ena_dev->rss;
1208 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1209 sizeof(struct ena_admin_rss_ind_table_entry);
1210
1211 if (rss->rss_ind_tbl)
1212 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1213 rss->rss_ind_tbl_dma_addr);
1214 rss->rss_ind_tbl = NULL;
1215
1216 if (rss->host_rss_ind_tbl)
1217 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
1218 rss->host_rss_ind_tbl = NULL;
1219}
1220
1221static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1222 struct ena_com_io_sq *io_sq, u16 cq_idx)
1223{
1224 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1225 struct ena_admin_aq_create_sq_cmd create_cmd;
1226 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1227 u8 direction;
1228 int ret;
1229
1230 memset(&create_cmd, 0x0, sizeof(create_cmd));
1231
1232 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1233
1234 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1235 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1236 else
1237 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1238
1239 create_cmd.sq_identity |= (direction <<
1240 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1241 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1242
1243 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1244 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1245
1246 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1247 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1248 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1249
1250 create_cmd.sq_caps_3 |=
1251 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1252
1253 create_cmd.cq_idx = cq_idx;
1254 create_cmd.sq_depth = io_sq->q_depth;
1255
1256 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1257 ret = ena_com_mem_addr_set(ena_dev,
1258 &create_cmd.sq_ba,
1259 io_sq->desc_addr.phys_addr);
1260 if (unlikely(ret)) {
1261 netdev_err(ena_dev->net_device,
1262 "Memory address set failed\n");
1263 return ret;
1264 }
1265 }
1266
1267 ret = ena_com_execute_admin_command(admin_queue,
1268 (struct ena_admin_aq_entry *)&create_cmd,
1269 sizeof(create_cmd),
1270 (struct ena_admin_acq_entry *)&cmd_completion,
1271 sizeof(cmd_completion));
1272 if (unlikely(ret)) {
1273 netdev_err(ena_dev->net_device,
1274 "Failed to create IO SQ. error: %d\n", ret);
1275 return ret;
1276 }
1277
1278 io_sq->idx = cmd_completion.sq_idx;
1279
1280 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1281 (uintptr_t)cmd_completion.sq_doorbell_offset);
1282
1283 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1284 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1285 + cmd_completion.llq_headers_offset);
1286
1287 io_sq->desc_addr.pbuf_dev_addr =
1288 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1289 cmd_completion.llq_descriptors_offset);
1290 }
1291
1292 netdev_dbg(ena_dev->net_device, "Created sq[%u], depth[%u]\n",
1293 io_sq->idx, io_sq->q_depth);
1294
1295 return ret;
1296}
1297
1298static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1299{
1300 struct ena_rss *rss = &ena_dev->rss;
1301 struct ena_com_io_sq *io_sq;
1302 u16 qid;
1303 int i;
1304
1305 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1306 qid = rss->host_rss_ind_tbl[i];
1307 if (qid >= ENA_TOTAL_NUM_QUEUES)
1308 return -EINVAL;
1309
1310 io_sq = &ena_dev->io_sq_queues[qid];
1311
1312 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1313 return -EINVAL;
1314
1315 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1316 }
1317
1318 return 0;
1319}
1320
1321static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1322 u16 intr_delay_resolution)
1323{
1324 u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1325
1326 if (unlikely(!intr_delay_resolution)) {
1327 netdev_err(ena_dev->net_device,
1328 "Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1329 intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1330 }
1331
1332
1333 ena_dev->intr_moder_rx_interval =
1334 ena_dev->intr_moder_rx_interval *
1335 prev_intr_delay_resolution /
1336 intr_delay_resolution;
1337
1338
1339 ena_dev->intr_moder_tx_interval =
1340 ena_dev->intr_moder_tx_interval *
1341 prev_intr_delay_resolution /
1342 intr_delay_resolution;
1343
1344 ena_dev->intr_delay_resolution = intr_delay_resolution;
1345}
1346
1347
1348
1349
1350
1351int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1352 struct ena_admin_aq_entry *cmd,
1353 size_t cmd_size,
1354 struct ena_admin_acq_entry *comp,
1355 size_t comp_size)
1356{
1357 struct ena_comp_ctx *comp_ctx;
1358 int ret;
1359
1360 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1361 comp, comp_size);
1362 if (IS_ERR(comp_ctx)) {
1363 ret = PTR_ERR(comp_ctx);
1364 if (ret == -ENODEV)
1365 netdev_dbg(admin_queue->ena_dev->net_device,
1366 "Failed to submit command [%d]\n", ret);
1367 else
1368 netdev_err(admin_queue->ena_dev->net_device,
1369 "Failed to submit command [%d]\n", ret);
1370
1371 return ret;
1372 }
1373
1374 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1375 if (unlikely(ret)) {
1376 if (admin_queue->running_state)
1377 netdev_err(admin_queue->ena_dev->net_device,
1378 "Failed to process command. ret = %d\n", ret);
1379 else
1380 netdev_dbg(admin_queue->ena_dev->net_device,
1381 "Failed to process command. ret = %d\n", ret);
1382 }
1383 return ret;
1384}
1385
1386int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1387 struct ena_com_io_cq *io_cq)
1388{
1389 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1390 struct ena_admin_aq_create_cq_cmd create_cmd;
1391 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1392 int ret;
1393
1394 memset(&create_cmd, 0x0, sizeof(create_cmd));
1395
1396 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1397
1398 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1399 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1400 create_cmd.cq_caps_1 |=
1401 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1402
1403 create_cmd.msix_vector = io_cq->msix_vector;
1404 create_cmd.cq_depth = io_cq->q_depth;
1405
1406 ret = ena_com_mem_addr_set(ena_dev,
1407 &create_cmd.cq_ba,
1408 io_cq->cdesc_addr.phys_addr);
1409 if (unlikely(ret)) {
1410 netdev_err(ena_dev->net_device, "Memory address set failed\n");
1411 return ret;
1412 }
1413
1414 ret = ena_com_execute_admin_command(admin_queue,
1415 (struct ena_admin_aq_entry *)&create_cmd,
1416 sizeof(create_cmd),
1417 (struct ena_admin_acq_entry *)&cmd_completion,
1418 sizeof(cmd_completion));
1419 if (unlikely(ret)) {
1420 netdev_err(ena_dev->net_device,
1421 "Failed to create IO CQ. error: %d\n", ret);
1422 return ret;
1423 }
1424
1425 io_cq->idx = cmd_completion.cq_idx;
1426
1427 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1428 cmd_completion.cq_interrupt_unmask_register_offset);
1429
1430 if (cmd_completion.cq_head_db_register_offset)
1431 io_cq->cq_head_db_reg =
1432 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1433 cmd_completion.cq_head_db_register_offset);
1434
1435 if (cmd_completion.numa_node_register_offset)
1436 io_cq->numa_node_cfg_reg =
1437 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1438 cmd_completion.numa_node_register_offset);
1439
1440 netdev_dbg(ena_dev->net_device, "Created cq[%u], depth[%u]\n",
1441 io_cq->idx, io_cq->q_depth);
1442
1443 return ret;
1444}
1445
1446int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1447 struct ena_com_io_sq **io_sq,
1448 struct ena_com_io_cq **io_cq)
1449{
1450 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1451 netdev_err(ena_dev->net_device,
1452 "Invalid queue number %d but the max is %d\n", qid,
1453 ENA_TOTAL_NUM_QUEUES);
1454 return -EINVAL;
1455 }
1456
1457 *io_sq = &ena_dev->io_sq_queues[qid];
1458 *io_cq = &ena_dev->io_cq_queues[qid];
1459
1460 return 0;
1461}
1462
1463void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1464{
1465 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1466 struct ena_comp_ctx *comp_ctx;
1467 u16 i;
1468
1469 if (!admin_queue->comp_ctx)
1470 return;
1471
1472 for (i = 0; i < admin_queue->q_depth; i++) {
1473 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1474 if (unlikely(!comp_ctx))
1475 break;
1476
1477 comp_ctx->status = ENA_CMD_ABORTED;
1478
1479 complete(&comp_ctx->wait_event);
1480 }
1481}
1482
1483void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1484{
1485 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1486 unsigned long flags = 0;
1487 u32 exp = 0;
1488
1489 spin_lock_irqsave(&admin_queue->q_lock, flags);
1490 while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1491 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1492 ena_delay_exponential_backoff_us(exp++,
1493 ena_dev->ena_min_poll_delay_us);
1494 spin_lock_irqsave(&admin_queue->q_lock, flags);
1495 }
1496 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1497}
1498
1499int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1500 struct ena_com_io_cq *io_cq)
1501{
1502 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1503 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1504 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1505 int ret;
1506
1507 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1508
1509 destroy_cmd.cq_idx = io_cq->idx;
1510 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1511
1512 ret = ena_com_execute_admin_command(admin_queue,
1513 (struct ena_admin_aq_entry *)&destroy_cmd,
1514 sizeof(destroy_cmd),
1515 (struct ena_admin_acq_entry *)&destroy_resp,
1516 sizeof(destroy_resp));
1517
1518 if (unlikely(ret && (ret != -ENODEV)))
1519 netdev_err(ena_dev->net_device,
1520 "Failed to destroy IO CQ. error: %d\n", ret);
1521
1522 return ret;
1523}
1524
1525bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1526{
1527 return ena_dev->admin_queue.running_state;
1528}
1529
1530void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1531{
1532 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1533 unsigned long flags = 0;
1534
1535 spin_lock_irqsave(&admin_queue->q_lock, flags);
1536 ena_dev->admin_queue.running_state = state;
1537 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1538}
1539
1540void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1541{
1542 u16 depth = ena_dev->aenq.q_depth;
1543
1544 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1545
1546
1547
1548
1549 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1550}
1551
1552int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1553{
1554 struct ena_com_admin_queue *admin_queue;
1555 struct ena_admin_set_feat_cmd cmd;
1556 struct ena_admin_set_feat_resp resp;
1557 struct ena_admin_get_feat_resp get_resp;
1558 int ret;
1559
1560 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1561 if (ret) {
1562 dev_info(ena_dev->dmadev, "Can't get aenq configuration\n");
1563 return ret;
1564 }
1565
1566 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1567 netdev_warn(ena_dev->net_device,
1568 "Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1569 get_resp.u.aenq.supported_groups, groups_flag);
1570 return -EOPNOTSUPP;
1571 }
1572
1573 memset(&cmd, 0x0, sizeof(cmd));
1574 admin_queue = &ena_dev->admin_queue;
1575
1576 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1577 cmd.aq_common_descriptor.flags = 0;
1578 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1579 cmd.u.aenq.enabled_groups = groups_flag;
1580
1581 ret = ena_com_execute_admin_command(admin_queue,
1582 (struct ena_admin_aq_entry *)&cmd,
1583 sizeof(cmd),
1584 (struct ena_admin_acq_entry *)&resp,
1585 sizeof(resp));
1586
1587 if (unlikely(ret))
1588 netdev_err(ena_dev->net_device,
1589 "Failed to config AENQ ret: %d\n", ret);
1590
1591 return ret;
1592}
1593
1594int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1595{
1596 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1597 u32 width;
1598
1599 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1600 netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1601 return -ETIME;
1602 }
1603
1604 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1605 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1606
1607 netdev_dbg(ena_dev->net_device, "ENA dma width: %d\n", width);
1608
1609 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1610 netdev_err(ena_dev->net_device, "DMA width illegal value: %d\n",
1611 width);
1612 return -EINVAL;
1613 }
1614
1615 ena_dev->dma_addr_bits = width;
1616
1617 return width;
1618}
1619
1620int ena_com_validate_version(struct ena_com_dev *ena_dev)
1621{
1622 u32 ver;
1623 u32 ctrl_ver;
1624 u32 ctrl_ver_masked;
1625
1626
1627
1628
1629 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1630 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1631 ENA_REGS_CONTROLLER_VERSION_OFF);
1632
1633 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1634 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1635 netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1636 return -ETIME;
1637 }
1638
1639 dev_info(ena_dev->dmadev, "ENA device version: %d.%d\n",
1640 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1641 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1642 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1643
1644 dev_info(ena_dev->dmadev,
1645 "ENA controller version: %d.%d.%d implementation version %d\n",
1646 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1647 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1648 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1649 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1650 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1651 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1652 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1653
1654 ctrl_ver_masked =
1655 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1656 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1657 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1658
1659
1660 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1661 netdev_err(ena_dev->net_device,
1662 "ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1663 return -1;
1664 }
1665
1666 return 0;
1667}
1668
1669static void
1670ena_com_free_ena_admin_queue_comp_ctx(struct ena_com_dev *ena_dev,
1671 struct ena_com_admin_queue *admin_queue)
1672
1673{
1674 if (!admin_queue->comp_ctx)
1675 return;
1676
1677 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1678
1679 admin_queue->comp_ctx = NULL;
1680}
1681
1682void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1683{
1684 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1685 struct ena_com_admin_cq *cq = &admin_queue->cq;
1686 struct ena_com_admin_sq *sq = &admin_queue->sq;
1687 struct ena_com_aenq *aenq = &ena_dev->aenq;
1688 u16 size;
1689
1690 ena_com_free_ena_admin_queue_comp_ctx(ena_dev, admin_queue);
1691
1692 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1693 if (sq->entries)
1694 dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1695 sq->dma_addr);
1696 sq->entries = NULL;
1697
1698 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1699 if (cq->entries)
1700 dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1701 cq->dma_addr);
1702 cq->entries = NULL;
1703
1704 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1705 if (ena_dev->aenq.entries)
1706 dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1707 aenq->dma_addr);
1708 aenq->entries = NULL;
1709}
1710
1711void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1712{
1713 u32 mask_value = 0;
1714
1715 if (polling)
1716 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1717
1718 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1719 ena_dev->admin_queue.polling = polling;
1720}
1721
1722void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1723 bool polling)
1724{
1725 ena_dev->admin_queue.auto_polling = polling;
1726}
1727
1728int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1729{
1730 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1731
1732 spin_lock_init(&mmio_read->lock);
1733 mmio_read->read_resp =
1734 dma_alloc_coherent(ena_dev->dmadev,
1735 sizeof(*mmio_read->read_resp),
1736 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1737 if (unlikely(!mmio_read->read_resp))
1738 goto err;
1739
1740 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1741
1742 mmio_read->read_resp->req_id = 0x0;
1743 mmio_read->seq_num = 0x0;
1744 mmio_read->readless_supported = true;
1745
1746 return 0;
1747
1748err:
1749
1750 return -ENOMEM;
1751}
1752
1753void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1754{
1755 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1756
1757 mmio_read->readless_supported = readless_supported;
1758}
1759
1760void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1761{
1762 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1763
1764 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1765 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1766
1767 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1768 mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1769
1770 mmio_read->read_resp = NULL;
1771}
1772
1773void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1774{
1775 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1776 u32 addr_low, addr_high;
1777
1778 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1779 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1780
1781 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1782 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1783}
1784
1785int ena_com_admin_init(struct ena_com_dev *ena_dev,
1786 struct ena_aenq_handlers *aenq_handlers)
1787{
1788 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1789 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1790 int ret;
1791
1792 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1793
1794 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1795 netdev_err(ena_dev->net_device, "Reg read timeout occurred\n");
1796 return -ETIME;
1797 }
1798
1799 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1800 netdev_err(ena_dev->net_device,
1801 "Device isn't ready, abort com init\n");
1802 return -ENODEV;
1803 }
1804
1805 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1806
1807 admin_queue->q_dmadev = ena_dev->dmadev;
1808 admin_queue->polling = false;
1809 admin_queue->curr_cmd_id = 0;
1810
1811 atomic_set(&admin_queue->outstanding_cmds, 0);
1812
1813 spin_lock_init(&admin_queue->q_lock);
1814
1815 ret = ena_com_init_comp_ctxt(admin_queue);
1816 if (ret)
1817 goto error;
1818
1819 ret = ena_com_admin_init_sq(admin_queue);
1820 if (ret)
1821 goto error;
1822
1823 ret = ena_com_admin_init_cq(admin_queue);
1824 if (ret)
1825 goto error;
1826
1827 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1828 ENA_REGS_AQ_DB_OFF);
1829
1830 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1831 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1832
1833 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1834 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1835
1836 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1837 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1838
1839 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1840 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1841
1842 aq_caps = 0;
1843 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1844 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1845 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1846 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1847
1848 acq_caps = 0;
1849 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1850 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1851 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1852 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1853
1854 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1855 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1856 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1857 if (ret)
1858 goto error;
1859
1860 admin_queue->ena_dev = ena_dev;
1861 admin_queue->running_state = true;
1862
1863 return 0;
1864error:
1865 ena_com_admin_destroy(ena_dev);
1866
1867 return ret;
1868}
1869
1870int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1871 struct ena_com_create_io_ctx *ctx)
1872{
1873 struct ena_com_io_sq *io_sq;
1874 struct ena_com_io_cq *io_cq;
1875 int ret;
1876
1877 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1878 netdev_err(ena_dev->net_device,
1879 "Qid (%d) is bigger than max num of queues (%d)\n",
1880 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1881 return -EINVAL;
1882 }
1883
1884 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1885 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1886
1887 memset(io_sq, 0x0, sizeof(*io_sq));
1888 memset(io_cq, 0x0, sizeof(*io_cq));
1889
1890
1891 io_cq->q_depth = ctx->queue_size;
1892 io_cq->direction = ctx->direction;
1893 io_cq->qid = ctx->qid;
1894
1895 io_cq->msix_vector = ctx->msix_vector;
1896
1897 io_sq->q_depth = ctx->queue_size;
1898 io_sq->direction = ctx->direction;
1899 io_sq->qid = ctx->qid;
1900
1901 io_sq->mem_queue_type = ctx->mem_queue_type;
1902
1903 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1904
1905 io_sq->tx_max_header_size =
1906 min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1907
1908 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1909 if (ret)
1910 goto error;
1911 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1912 if (ret)
1913 goto error;
1914
1915 ret = ena_com_create_io_cq(ena_dev, io_cq);
1916 if (ret)
1917 goto error;
1918
1919 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1920 if (ret)
1921 goto destroy_io_cq;
1922
1923 return 0;
1924
1925destroy_io_cq:
1926 ena_com_destroy_io_cq(ena_dev, io_cq);
1927error:
1928 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1929 return ret;
1930}
1931
1932void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1933{
1934 struct ena_com_io_sq *io_sq;
1935 struct ena_com_io_cq *io_cq;
1936
1937 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1938 netdev_err(ena_dev->net_device,
1939 "Qid (%d) is bigger than max num of queues (%d)\n",
1940 qid, ENA_TOTAL_NUM_QUEUES);
1941 return;
1942 }
1943
1944 io_sq = &ena_dev->io_sq_queues[qid];
1945 io_cq = &ena_dev->io_cq_queues[qid];
1946
1947 ena_com_destroy_io_sq(ena_dev, io_sq);
1948 ena_com_destroy_io_cq(ena_dev, io_cq);
1949
1950 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1951}
1952
1953int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1954 struct ena_admin_get_feat_resp *resp)
1955{
1956 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1957}
1958
1959int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1960 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1961{
1962 struct ena_admin_get_feat_resp get_resp;
1963 int rc;
1964
1965 rc = ena_com_get_feature(ena_dev, &get_resp,
1966 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1967 if (rc)
1968 return rc;
1969
1970 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1971 sizeof(get_resp.u.dev_attr));
1972
1973 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1974
1975 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1976 rc = ena_com_get_feature(ena_dev, &get_resp,
1977 ENA_ADMIN_MAX_QUEUES_EXT,
1978 ENA_FEATURE_MAX_QUEUE_EXT_VER);
1979 if (rc)
1980 return rc;
1981
1982 if (get_resp.u.max_queue_ext.version !=
1983 ENA_FEATURE_MAX_QUEUE_EXT_VER)
1984 return -EINVAL;
1985
1986 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1987 sizeof(get_resp.u.max_queue_ext));
1988 ena_dev->tx_max_header_size =
1989 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1990 } else {
1991 rc = ena_com_get_feature(ena_dev, &get_resp,
1992 ENA_ADMIN_MAX_QUEUES_NUM, 0);
1993 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1994 sizeof(get_resp.u.max_queue));
1995 ena_dev->tx_max_header_size =
1996 get_resp.u.max_queue.max_header_size;
1997
1998 if (rc)
1999 return rc;
2000 }
2001
2002 rc = ena_com_get_feature(ena_dev, &get_resp,
2003 ENA_ADMIN_AENQ_CONFIG, 0);
2004 if (rc)
2005 return rc;
2006
2007 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
2008 sizeof(get_resp.u.aenq));
2009
2010 rc = ena_com_get_feature(ena_dev, &get_resp,
2011 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2012 if (rc)
2013 return rc;
2014
2015 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
2016 sizeof(get_resp.u.offload));
2017
2018
2019
2020
2021 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
2022
2023 if (!rc)
2024 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
2025 sizeof(get_resp.u.hw_hints));
2026 else if (rc == -EOPNOTSUPP)
2027 memset(&get_feat_ctx->hw_hints, 0x0,
2028 sizeof(get_feat_ctx->hw_hints));
2029 else
2030 return rc;
2031
2032 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
2033 if (!rc)
2034 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
2035 sizeof(get_resp.u.llq));
2036 else if (rc == -EOPNOTSUPP)
2037 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2038 else
2039 return rc;
2040
2041 return 0;
2042}
2043
2044void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2045{
2046 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2047}
2048
2049
2050
2051
2052static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *ena_dev,
2053 u16 group)
2054{
2055 struct ena_aenq_handlers *aenq_handlers = ena_dev->aenq.aenq_handlers;
2056
2057 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2058 return aenq_handlers->handlers[group];
2059
2060 return aenq_handlers->unimplemented_handler;
2061}
2062
2063
2064
2065
2066
2067void ena_com_aenq_intr_handler(struct ena_com_dev *ena_dev, void *data)
2068{
2069 struct ena_admin_aenq_entry *aenq_e;
2070 struct ena_admin_aenq_common_desc *aenq_common;
2071 struct ena_com_aenq *aenq = &ena_dev->aenq;
2072 u64 timestamp;
2073 ena_aenq_handler handler_cb;
2074 u16 masked_head, processed = 0;
2075 u8 phase;
2076
2077 masked_head = aenq->head & (aenq->q_depth - 1);
2078 phase = aenq->phase;
2079 aenq_e = &aenq->entries[masked_head];
2080 aenq_common = &aenq_e->aenq_common_desc;
2081
2082
2083 while ((READ_ONCE(aenq_common->flags) &
2084 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2085
2086
2087
2088 dma_rmb();
2089
2090 timestamp = (u64)aenq_common->timestamp_low |
2091 ((u64)aenq_common->timestamp_high << 32);
2092
2093 netdev_dbg(ena_dev->net_device,
2094 "AENQ! Group[%x] Syndrome[%x] timestamp: [%llus]\n",
2095 aenq_common->group, aenq_common->syndrome, timestamp);
2096
2097
2098 handler_cb = ena_com_get_specific_aenq_cb(ena_dev,
2099 aenq_common->group);
2100 handler_cb(data, aenq_e);
2101
2102
2103 masked_head++;
2104 processed++;
2105
2106 if (unlikely(masked_head == aenq->q_depth)) {
2107 masked_head = 0;
2108 phase = !phase;
2109 }
2110 aenq_e = &aenq->entries[masked_head];
2111 aenq_common = &aenq_e->aenq_common_desc;
2112 }
2113
2114 aenq->head += processed;
2115 aenq->phase = phase;
2116
2117
2118 if (!processed)
2119 return;
2120
2121
2122 mb();
2123 writel_relaxed((u32)aenq->head,
2124 ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2125}
2126
2127int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2128 enum ena_regs_reset_reason_types reset_reason)
2129{
2130 u32 stat, timeout, cap, reset_val;
2131 int rc;
2132
2133 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2134 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2135
2136 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2137 (cap == ENA_MMIO_READ_TIMEOUT))) {
2138 netdev_err(ena_dev->net_device, "Reg read32 timeout occurred\n");
2139 return -ETIME;
2140 }
2141
2142 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2143 netdev_err(ena_dev->net_device,
2144 "Device isn't ready, can't reset device\n");
2145 return -EINVAL;
2146 }
2147
2148 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2149 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2150 if (timeout == 0) {
2151 netdev_err(ena_dev->net_device, "Invalid timeout value\n");
2152 return -EINVAL;
2153 }
2154
2155
2156 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2157 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2158 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2159 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2160
2161
2162 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2163
2164 rc = wait_for_reset_state(ena_dev, timeout,
2165 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2166 if (rc != 0) {
2167 netdev_err(ena_dev->net_device,
2168 "Reset indication didn't turn on\n");
2169 return rc;
2170 }
2171
2172
2173 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2174 rc = wait_for_reset_state(ena_dev, timeout, 0);
2175 if (rc != 0) {
2176 netdev_err(ena_dev->net_device,
2177 "Reset indication didn't turn off\n");
2178 return rc;
2179 }
2180
2181 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2182 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2183 if (timeout)
2184
2185 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2186 else
2187 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2188
2189 return 0;
2190}
2191
2192static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2193 struct ena_com_stats_ctx *ctx,
2194 enum ena_admin_get_stats_type type)
2195{
2196 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2197 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2198 struct ena_com_admin_queue *admin_queue;
2199 int ret;
2200
2201 admin_queue = &ena_dev->admin_queue;
2202
2203 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2204 get_cmd->aq_common_descriptor.flags = 0;
2205 get_cmd->type = type;
2206
2207 ret = ena_com_execute_admin_command(admin_queue,
2208 (struct ena_admin_aq_entry *)get_cmd,
2209 sizeof(*get_cmd),
2210 (struct ena_admin_acq_entry *)get_resp,
2211 sizeof(*get_resp));
2212
2213 if (unlikely(ret))
2214 netdev_err(ena_dev->net_device,
2215 "Failed to get stats. error: %d\n", ret);
2216
2217 return ret;
2218}
2219
2220int ena_com_get_eni_stats(struct ena_com_dev *ena_dev,
2221 struct ena_admin_eni_stats *stats)
2222{
2223 struct ena_com_stats_ctx ctx;
2224 int ret;
2225
2226 memset(&ctx, 0x0, sizeof(ctx));
2227 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_ENI);
2228 if (likely(ret == 0))
2229 memcpy(stats, &ctx.get_resp.u.eni_stats,
2230 sizeof(ctx.get_resp.u.eni_stats));
2231
2232 return ret;
2233}
2234
2235int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2236 struct ena_admin_basic_stats *stats)
2237{
2238 struct ena_com_stats_ctx ctx;
2239 int ret;
2240
2241 memset(&ctx, 0x0, sizeof(ctx));
2242 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2243 if (likely(ret == 0))
2244 memcpy(stats, &ctx.get_resp.u.basic_stats,
2245 sizeof(ctx.get_resp.u.basic_stats));
2246
2247 return ret;
2248}
2249
2250int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, u32 mtu)
2251{
2252 struct ena_com_admin_queue *admin_queue;
2253 struct ena_admin_set_feat_cmd cmd;
2254 struct ena_admin_set_feat_resp resp;
2255 int ret;
2256
2257 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2258 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2259 ENA_ADMIN_MTU);
2260 return -EOPNOTSUPP;
2261 }
2262
2263 memset(&cmd, 0x0, sizeof(cmd));
2264 admin_queue = &ena_dev->admin_queue;
2265
2266 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2267 cmd.aq_common_descriptor.flags = 0;
2268 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2269 cmd.u.mtu.mtu = mtu;
2270
2271 ret = ena_com_execute_admin_command(admin_queue,
2272 (struct ena_admin_aq_entry *)&cmd,
2273 sizeof(cmd),
2274 (struct ena_admin_acq_entry *)&resp,
2275 sizeof(resp));
2276
2277 if (unlikely(ret))
2278 netdev_err(ena_dev->net_device,
2279 "Failed to set mtu %d. error: %d\n", mtu, ret);
2280
2281 return ret;
2282}
2283
2284int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2285 struct ena_admin_feature_offload_desc *offload)
2286{
2287 int ret;
2288 struct ena_admin_get_feat_resp resp;
2289
2290 ret = ena_com_get_feature(ena_dev, &resp,
2291 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2292 if (unlikely(ret)) {
2293 netdev_err(ena_dev->net_device,
2294 "Failed to get offload capabilities %d\n", ret);
2295 return ret;
2296 }
2297
2298 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2299
2300 return 0;
2301}
2302
2303int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2304{
2305 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2306 struct ena_rss *rss = &ena_dev->rss;
2307 struct ena_admin_set_feat_cmd cmd;
2308 struct ena_admin_set_feat_resp resp;
2309 struct ena_admin_get_feat_resp get_resp;
2310 int ret;
2311
2312 if (!ena_com_check_supported_feature_id(ena_dev,
2313 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2314 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2315 ENA_ADMIN_RSS_HASH_FUNCTION);
2316 return -EOPNOTSUPP;
2317 }
2318
2319
2320 ret = ena_com_get_feature(ena_dev, &get_resp,
2321 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2322 if (unlikely(ret))
2323 return ret;
2324
2325 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2326 netdev_err(ena_dev->net_device,
2327 "Func hash %d isn't supported by device, abort\n",
2328 rss->hash_func);
2329 return -EOPNOTSUPP;
2330 }
2331
2332 memset(&cmd, 0x0, sizeof(cmd));
2333
2334 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2335 cmd.aq_common_descriptor.flags =
2336 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2337 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2338 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2339 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2340
2341 ret = ena_com_mem_addr_set(ena_dev,
2342 &cmd.control_buffer.address,
2343 rss->hash_key_dma_addr);
2344 if (unlikely(ret)) {
2345 netdev_err(ena_dev->net_device, "Memory address set failed\n");
2346 return ret;
2347 }
2348
2349 cmd.control_buffer.length = sizeof(*rss->hash_key);
2350
2351 ret = ena_com_execute_admin_command(admin_queue,
2352 (struct ena_admin_aq_entry *)&cmd,
2353 sizeof(cmd),
2354 (struct ena_admin_acq_entry *)&resp,
2355 sizeof(resp));
2356 if (unlikely(ret)) {
2357 netdev_err(ena_dev->net_device,
2358 "Failed to set hash function %d. error: %d\n",
2359 rss->hash_func, ret);
2360 return -EINVAL;
2361 }
2362
2363 return 0;
2364}
2365
2366int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2367 enum ena_admin_hash_functions func,
2368 const u8 *key, u16 key_len, u32 init_val)
2369{
2370 struct ena_admin_feature_rss_flow_hash_control *hash_key;
2371 struct ena_admin_get_feat_resp get_resp;
2372 enum ena_admin_hash_functions old_func;
2373 struct ena_rss *rss = &ena_dev->rss;
2374 int rc;
2375
2376 hash_key = rss->hash_key;
2377
2378
2379 if (unlikely(key_len & 0x3))
2380 return -EINVAL;
2381
2382 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2383 ENA_ADMIN_RSS_HASH_FUNCTION,
2384 rss->hash_key_dma_addr,
2385 sizeof(*rss->hash_key), 0);
2386 if (unlikely(rc))
2387 return rc;
2388
2389 if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2390 netdev_err(ena_dev->net_device,
2391 "Flow hash function %d isn't supported\n", func);
2392 return -EOPNOTSUPP;
2393 }
2394
2395 switch (func) {
2396 case ENA_ADMIN_TOEPLITZ:
2397 if (key) {
2398 if (key_len != sizeof(hash_key->key)) {
2399 netdev_err(ena_dev->net_device,
2400 "key len (%u) doesn't equal the supported size (%zu)\n",
2401 key_len, sizeof(hash_key->key));
2402 return -EINVAL;
2403 }
2404 memcpy(hash_key->key, key, key_len);
2405 rss->hash_init_val = init_val;
2406 hash_key->key_parts = key_len / sizeof(hash_key->key[0]);
2407 }
2408 break;
2409 case ENA_ADMIN_CRC32:
2410 rss->hash_init_val = init_val;
2411 break;
2412 default:
2413 netdev_err(ena_dev->net_device, "Invalid hash function (%d)\n",
2414 func);
2415 return -EINVAL;
2416 }
2417
2418 old_func = rss->hash_func;
2419 rss->hash_func = func;
2420 rc = ena_com_set_hash_function(ena_dev);
2421
2422
2423 if (unlikely(rc))
2424 rss->hash_func = old_func;
2425
2426 return rc;
2427}
2428
2429int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2430 enum ena_admin_hash_functions *func)
2431{
2432 struct ena_rss *rss = &ena_dev->rss;
2433 struct ena_admin_get_feat_resp get_resp;
2434 int rc;
2435
2436 if (unlikely(!func))
2437 return -EINVAL;
2438
2439 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2440 ENA_ADMIN_RSS_HASH_FUNCTION,
2441 rss->hash_key_dma_addr,
2442 sizeof(*rss->hash_key), 0);
2443 if (unlikely(rc))
2444 return rc;
2445
2446
2447 rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
2448 if (rss->hash_func)
2449 rss->hash_func--;
2450
2451 *func = rss->hash_func;
2452
2453 return 0;
2454}
2455
2456int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2457{
2458 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2459 ena_dev->rss.hash_key;
2460
2461 if (key)
2462 memcpy(key, hash_key->key,
2463 (size_t)(hash_key->key_parts) * sizeof(hash_key->key[0]));
2464
2465 return 0;
2466}
2467
2468int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2469 enum ena_admin_flow_hash_proto proto,
2470 u16 *fields)
2471{
2472 struct ena_rss *rss = &ena_dev->rss;
2473 struct ena_admin_get_feat_resp get_resp;
2474 int rc;
2475
2476 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2477 ENA_ADMIN_RSS_HASH_INPUT,
2478 rss->hash_ctrl_dma_addr,
2479 sizeof(*rss->hash_ctrl), 0);
2480 if (unlikely(rc))
2481 return rc;
2482
2483 if (fields)
2484 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2485
2486 return 0;
2487}
2488
2489int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2490{
2491 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2492 struct ena_rss *rss = &ena_dev->rss;
2493 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2494 struct ena_admin_set_feat_cmd cmd;
2495 struct ena_admin_set_feat_resp resp;
2496 int ret;
2497
2498 if (!ena_com_check_supported_feature_id(ena_dev,
2499 ENA_ADMIN_RSS_HASH_INPUT)) {
2500 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2501 ENA_ADMIN_RSS_HASH_INPUT);
2502 return -EOPNOTSUPP;
2503 }
2504
2505 memset(&cmd, 0x0, sizeof(cmd));
2506
2507 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2508 cmd.aq_common_descriptor.flags =
2509 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2510 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2511 cmd.u.flow_hash_input.enabled_input_sort =
2512 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2513 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2514
2515 ret = ena_com_mem_addr_set(ena_dev,
2516 &cmd.control_buffer.address,
2517 rss->hash_ctrl_dma_addr);
2518 if (unlikely(ret)) {
2519 netdev_err(ena_dev->net_device, "Memory address set failed\n");
2520 return ret;
2521 }
2522 cmd.control_buffer.length = sizeof(*hash_ctrl);
2523
2524 ret = ena_com_execute_admin_command(admin_queue,
2525 (struct ena_admin_aq_entry *)&cmd,
2526 sizeof(cmd),
2527 (struct ena_admin_acq_entry *)&resp,
2528 sizeof(resp));
2529 if (unlikely(ret))
2530 netdev_err(ena_dev->net_device,
2531 "Failed to set hash input. error: %d\n", ret);
2532
2533 return ret;
2534}
2535
2536int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2537{
2538 struct ena_rss *rss = &ena_dev->rss;
2539 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2540 rss->hash_ctrl;
2541 u16 available_fields = 0;
2542 int rc, i;
2543
2544
2545 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2546 if (unlikely(rc))
2547 return rc;
2548
2549 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2550 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2551 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2552
2553 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2554 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2555 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2556
2557 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2558 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2559 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2560
2561 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2562 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2563 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2564
2565 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2566 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2567
2568 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2569 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2570
2571 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2572 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2573
2574 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2575 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2576
2577 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2578 available_fields = hash_ctrl->selected_fields[i].fields &
2579 hash_ctrl->supported_fields[i].fields;
2580 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2581 netdev_err(ena_dev->net_device,
2582 "Hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2583 i, hash_ctrl->supported_fields[i].fields,
2584 hash_ctrl->selected_fields[i].fields);
2585 return -EOPNOTSUPP;
2586 }
2587 }
2588
2589 rc = ena_com_set_hash_ctrl(ena_dev);
2590
2591
2592 if (unlikely(rc))
2593 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2594
2595 return rc;
2596}
2597
2598int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2599 enum ena_admin_flow_hash_proto proto,
2600 u16 hash_fields)
2601{
2602 struct ena_rss *rss = &ena_dev->rss;
2603 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2604 u16 supported_fields;
2605 int rc;
2606
2607 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2608 netdev_err(ena_dev->net_device, "Invalid proto num (%u)\n",
2609 proto);
2610 return -EINVAL;
2611 }
2612
2613
2614 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2615 if (unlikely(rc))
2616 return rc;
2617
2618
2619 supported_fields = hash_ctrl->supported_fields[proto].fields;
2620 if ((hash_fields & supported_fields) != hash_fields) {
2621 netdev_err(ena_dev->net_device,
2622 "Proto %d doesn't support the required fields %x. supports only: %x\n",
2623 proto, hash_fields, supported_fields);
2624 }
2625
2626 hash_ctrl->selected_fields[proto].fields = hash_fields;
2627
2628 rc = ena_com_set_hash_ctrl(ena_dev);
2629
2630
2631 if (unlikely(rc))
2632 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2633
2634 return 0;
2635}
2636
2637int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2638 u16 entry_idx, u16 entry_value)
2639{
2640 struct ena_rss *rss = &ena_dev->rss;
2641
2642 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2643 return -EINVAL;
2644
2645 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2646 return -EINVAL;
2647
2648 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2649
2650 return 0;
2651}
2652
2653int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2654{
2655 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2656 struct ena_rss *rss = &ena_dev->rss;
2657 struct ena_admin_set_feat_cmd cmd;
2658 struct ena_admin_set_feat_resp resp;
2659 int ret;
2660
2661 if (!ena_com_check_supported_feature_id(
2662 ena_dev, ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG)) {
2663 netdev_dbg(ena_dev->net_device, "Feature %d isn't supported\n",
2664 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG);
2665 return -EOPNOTSUPP;
2666 }
2667
2668 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2669 if (ret) {
2670 netdev_err(ena_dev->net_device,
2671 "Failed to convert host indirection table to device table\n");
2672 return ret;
2673 }
2674
2675 memset(&cmd, 0x0, sizeof(cmd));
2676
2677 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2678 cmd.aq_common_descriptor.flags =
2679 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2680 cmd.feat_common.feature_id = ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG;
2681 cmd.u.ind_table.size = rss->tbl_log_size;
2682 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2683
2684 ret = ena_com_mem_addr_set(ena_dev,
2685 &cmd.control_buffer.address,
2686 rss->rss_ind_tbl_dma_addr);
2687 if (unlikely(ret)) {
2688 netdev_err(ena_dev->net_device, "Memory address set failed\n");
2689 return ret;
2690 }
2691
2692 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2693 sizeof(struct ena_admin_rss_ind_table_entry);
2694
2695 ret = ena_com_execute_admin_command(admin_queue,
2696 (struct ena_admin_aq_entry *)&cmd,
2697 sizeof(cmd),
2698 (struct ena_admin_acq_entry *)&resp,
2699 sizeof(resp));
2700
2701 if (unlikely(ret))
2702 netdev_err(ena_dev->net_device,
2703 "Failed to set indirect table. error: %d\n", ret);
2704
2705 return ret;
2706}
2707
2708int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2709{
2710 struct ena_rss *rss = &ena_dev->rss;
2711 struct ena_admin_get_feat_resp get_resp;
2712 u32 tbl_size;
2713 int i, rc;
2714
2715 tbl_size = (1ULL << rss->tbl_log_size) *
2716 sizeof(struct ena_admin_rss_ind_table_entry);
2717
2718 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2719 ENA_ADMIN_RSS_INDIRECTION_TABLE_CONFIG,
2720 rss->rss_ind_tbl_dma_addr,
2721 tbl_size, 0);
2722 if (unlikely(rc))
2723 return rc;
2724
2725 if (!ind_tbl)
2726 return 0;
2727
2728 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2729 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2730
2731 return 0;
2732}
2733
2734int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2735{
2736 int rc;
2737
2738 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2739
2740 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2741 if (unlikely(rc))
2742 goto err_indr_tbl;
2743
2744
2745
2746
2747
2748 rc = ena_com_hash_key_allocate(ena_dev);
2749 if (likely(!rc))
2750 ena_com_hash_key_fill_default_key(ena_dev);
2751 else if (rc != -EOPNOTSUPP)
2752 goto err_hash_key;
2753
2754 rc = ena_com_hash_ctrl_init(ena_dev);
2755 if (unlikely(rc))
2756 goto err_hash_ctrl;
2757
2758 return 0;
2759
2760err_hash_ctrl:
2761 ena_com_hash_key_destroy(ena_dev);
2762err_hash_key:
2763 ena_com_indirect_table_destroy(ena_dev);
2764err_indr_tbl:
2765
2766 return rc;
2767}
2768
2769void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2770{
2771 ena_com_indirect_table_destroy(ena_dev);
2772 ena_com_hash_key_destroy(ena_dev);
2773 ena_com_hash_ctrl_destroy(ena_dev);
2774
2775 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2776}
2777
2778int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2779{
2780 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2781
2782 host_attr->host_info =
2783 dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2784 &host_attr->host_info_dma_addr, GFP_KERNEL);
2785 if (unlikely(!host_attr->host_info))
2786 return -ENOMEM;
2787
2788 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2789 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2790 (ENA_COMMON_SPEC_VERSION_MINOR));
2791
2792 return 0;
2793}
2794
2795int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2796 u32 debug_area_size)
2797{
2798 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2799
2800 host_attr->debug_area_virt_addr =
2801 dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2802 &host_attr->debug_area_dma_addr, GFP_KERNEL);
2803 if (unlikely(!host_attr->debug_area_virt_addr)) {
2804 host_attr->debug_area_size = 0;
2805 return -ENOMEM;
2806 }
2807
2808 host_attr->debug_area_size = debug_area_size;
2809
2810 return 0;
2811}
2812
2813void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2814{
2815 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2816
2817 if (host_attr->host_info) {
2818 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2819 host_attr->host_info_dma_addr);
2820 host_attr->host_info = NULL;
2821 }
2822}
2823
2824void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2825{
2826 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2827
2828 if (host_attr->debug_area_virt_addr) {
2829 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2830 host_attr->debug_area_virt_addr,
2831 host_attr->debug_area_dma_addr);
2832 host_attr->debug_area_virt_addr = NULL;
2833 }
2834}
2835
2836int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2837{
2838 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2839 struct ena_com_admin_queue *admin_queue;
2840 struct ena_admin_set_feat_cmd cmd;
2841 struct ena_admin_set_feat_resp resp;
2842
2843 int ret;
2844
2845
2846
2847
2848
2849 memset(&cmd, 0x0, sizeof(cmd));
2850 admin_queue = &ena_dev->admin_queue;
2851
2852 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2853 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2854
2855 ret = ena_com_mem_addr_set(ena_dev,
2856 &cmd.u.host_attr.debug_ba,
2857 host_attr->debug_area_dma_addr);
2858 if (unlikely(ret)) {
2859 netdev_err(ena_dev->net_device, "Memory address set failed\n");
2860 return ret;
2861 }
2862
2863 ret = ena_com_mem_addr_set(ena_dev,
2864 &cmd.u.host_attr.os_info_ba,
2865 host_attr->host_info_dma_addr);
2866 if (unlikely(ret)) {
2867 netdev_err(ena_dev->net_device, "Memory address set failed\n");
2868 return ret;
2869 }
2870
2871 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2872
2873 ret = ena_com_execute_admin_command(admin_queue,
2874 (struct ena_admin_aq_entry *)&cmd,
2875 sizeof(cmd),
2876 (struct ena_admin_acq_entry *)&resp,
2877 sizeof(resp));
2878
2879 if (unlikely(ret))
2880 netdev_err(ena_dev->net_device,
2881 "Failed to set host attributes: %d\n", ret);
2882
2883 return ret;
2884}
2885
2886
2887bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2888{
2889 return ena_com_check_supported_feature_id(ena_dev,
2890 ENA_ADMIN_INTERRUPT_MODERATION);
2891}
2892
2893static int ena_com_update_nonadaptive_moderation_interval(struct ena_com_dev *ena_dev,
2894 u32 coalesce_usecs,
2895 u32 intr_delay_resolution,
2896 u32 *intr_moder_interval)
2897{
2898 if (!intr_delay_resolution) {
2899 netdev_err(ena_dev->net_device,
2900 "Illegal interrupt delay granularity value\n");
2901 return -EFAULT;
2902 }
2903
2904 *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
2905
2906 return 0;
2907}
2908
2909int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2910 u32 tx_coalesce_usecs)
2911{
2912 return ena_com_update_nonadaptive_moderation_interval(ena_dev,
2913 tx_coalesce_usecs,
2914 ena_dev->intr_delay_resolution,
2915 &ena_dev->intr_moder_tx_interval);
2916}
2917
2918int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2919 u32 rx_coalesce_usecs)
2920{
2921 return ena_com_update_nonadaptive_moderation_interval(ena_dev,
2922 rx_coalesce_usecs,
2923 ena_dev->intr_delay_resolution,
2924 &ena_dev->intr_moder_rx_interval);
2925}
2926
2927int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2928{
2929 struct ena_admin_get_feat_resp get_resp;
2930 u16 delay_resolution;
2931 int rc;
2932
2933 rc = ena_com_get_feature(ena_dev, &get_resp,
2934 ENA_ADMIN_INTERRUPT_MODERATION, 0);
2935
2936 if (rc) {
2937 if (rc == -EOPNOTSUPP) {
2938 netdev_dbg(ena_dev->net_device,
2939 "Feature %d isn't supported\n",
2940 ENA_ADMIN_INTERRUPT_MODERATION);
2941 rc = 0;
2942 } else {
2943 netdev_err(ena_dev->net_device,
2944 "Failed to get interrupt moderation admin cmd. rc: %d\n",
2945 rc);
2946 }
2947
2948
2949 ena_com_disable_adaptive_moderation(ena_dev);
2950 return rc;
2951 }
2952
2953
2954 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2955 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2956
2957
2958 ena_com_disable_adaptive_moderation(ena_dev);
2959
2960 return 0;
2961}
2962
2963unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2964{
2965 return ena_dev->intr_moder_tx_interval;
2966}
2967
2968unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2969{
2970 return ena_dev->intr_moder_rx_interval;
2971}
2972
2973int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2974 struct ena_admin_feature_llq_desc *llq_features,
2975 struct ena_llq_configurations *llq_default_cfg)
2976{
2977 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
2978 int rc;
2979
2980 if (!llq_features->max_llq_num) {
2981 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2982 return 0;
2983 }
2984
2985 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2986 if (rc)
2987 return rc;
2988
2989 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
2990 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
2991
2992 if (unlikely(ena_dev->tx_max_header_size == 0)) {
2993 netdev_err(ena_dev->net_device,
2994 "The size of the LLQ entry is smaller than needed\n");
2995 return -EINVAL;
2996 }
2997
2998 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2999
3000 return 0;
3001}
3002