1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include "ena_com.h"
34
35
36
37
38
39#define ADMIN_CMD_TIMEOUT_US (3000000)
40
41#define ENA_ASYNC_QUEUE_DEPTH 16
42#define ENA_ADMIN_QUEUE_DEPTH 32
43
44
45#define ENA_CTRL_MAJOR 0
46#define ENA_CTRL_MINOR 0
47#define ENA_CTRL_SUB_MINOR 1
48
49#define MIN_ENA_CTRL_VER \
50 (((ENA_CTRL_MAJOR) << \
51 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
52 ((ENA_CTRL_MINOR) << \
53 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
54 (ENA_CTRL_SUB_MINOR))
55
56#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
57#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
58
59#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
60
61#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
62
63#define ENA_REGS_ADMIN_INTR_MASK 1
64
65#define ENA_POLL_MS 5
66
67
68
69
70
71enum ena_cmd_status {
72 ENA_CMD_SUBMITTED,
73 ENA_CMD_COMPLETED,
74
75 ENA_CMD_ABORTED,
76};
77
78struct ena_comp_ctx {
79 struct completion wait_event;
80 struct ena_admin_acq_entry *user_cqe;
81 u32 comp_size;
82 enum ena_cmd_status status;
83
84 u8 comp_status;
85 u8 cmd_opcode;
86 bool occupied;
87};
88
89struct ena_com_stats_ctx {
90 struct ena_admin_aq_get_stats_cmd get_cmd;
91 struct ena_admin_acq_get_stats_resp get_resp;
92};
93
94static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
95 struct ena_common_mem_addr *ena_addr,
96 dma_addr_t addr)
97{
98 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
99 pr_err("dma address has more bits that the device supports\n");
100 return -EINVAL;
101 }
102
103 ena_addr->mem_addr_low = lower_32_bits(addr);
104 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
105
106 return 0;
107}
108
109static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
110{
111 struct ena_com_admin_sq *sq = &queue->sq;
112 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
113
114 sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
115 GFP_KERNEL);
116
117 if (!sq->entries) {
118 pr_err("memory allocation failed\n");
119 return -ENOMEM;
120 }
121
122 sq->head = 0;
123 sq->tail = 0;
124 sq->phase = 1;
125
126 sq->db_addr = NULL;
127
128 return 0;
129}
130
131static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
132{
133 struct ena_com_admin_cq *cq = &queue->cq;
134 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
135
136 cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
137 GFP_KERNEL);
138
139 if (!cq->entries) {
140 pr_err("memory allocation failed\n");
141 return -ENOMEM;
142 }
143
144 cq->head = 0;
145 cq->phase = 1;
146
147 return 0;
148}
149
150static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
151 struct ena_aenq_handlers *aenq_handlers)
152{
153 struct ena_com_aenq *aenq = &dev->aenq;
154 u32 addr_low, addr_high, aenq_caps;
155 u16 size;
156
157 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
158 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
159 aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
160 GFP_KERNEL);
161
162 if (!aenq->entries) {
163 pr_err("memory allocation failed\n");
164 return -ENOMEM;
165 }
166
167 aenq->head = aenq->q_depth;
168 aenq->phase = 1;
169
170 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
171 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
172
173 writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
174 writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
175
176 aenq_caps = 0;
177 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
178 aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
179 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
180 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
181 writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
182
183 if (unlikely(!aenq_handlers)) {
184 pr_err("aenq handlers pointer is NULL\n");
185 return -EINVAL;
186 }
187
188 aenq->aenq_handlers = aenq_handlers;
189
190 return 0;
191}
192
193static void comp_ctxt_release(struct ena_com_admin_queue *queue,
194 struct ena_comp_ctx *comp_ctx)
195{
196 comp_ctx->occupied = false;
197 atomic_dec(&queue->outstanding_cmds);
198}
199
200static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
201 u16 command_id, bool capture)
202{
203 if (unlikely(command_id >= queue->q_depth)) {
204 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
205 command_id, queue->q_depth);
206 return NULL;
207 }
208
209 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
210 pr_err("Completion context is occupied\n");
211 return NULL;
212 }
213
214 if (capture) {
215 atomic_inc(&queue->outstanding_cmds);
216 queue->comp_ctx[command_id].occupied = true;
217 }
218
219 return &queue->comp_ctx[command_id];
220}
221
222static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
223 struct ena_admin_aq_entry *cmd,
224 size_t cmd_size_in_bytes,
225 struct ena_admin_acq_entry *comp,
226 size_t comp_size_in_bytes)
227{
228 struct ena_comp_ctx *comp_ctx;
229 u16 tail_masked, cmd_id;
230 u16 queue_size_mask;
231 u16 cnt;
232
233 queue_size_mask = admin_queue->q_depth - 1;
234
235 tail_masked = admin_queue->sq.tail & queue_size_mask;
236
237
238 cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
239 if (cnt >= admin_queue->q_depth) {
240 pr_debug("admin queue is full.\n");
241 admin_queue->stats.out_of_space++;
242 return ERR_PTR(-ENOSPC);
243 }
244
245 cmd_id = admin_queue->curr_cmd_id;
246
247 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
248 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
249
250 cmd->aq_common_descriptor.command_id |= cmd_id &
251 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
252
253 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
254 if (unlikely(!comp_ctx))
255 return ERR_PTR(-EINVAL);
256
257 comp_ctx->status = ENA_CMD_SUBMITTED;
258 comp_ctx->comp_size = (u32)comp_size_in_bytes;
259 comp_ctx->user_cqe = comp;
260 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
261
262 reinit_completion(&comp_ctx->wait_event);
263
264 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
265
266 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
267 queue_size_mask;
268
269 admin_queue->sq.tail++;
270 admin_queue->stats.submitted_cmd++;
271
272 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
273 admin_queue->sq.phase = !admin_queue->sq.phase;
274
275 writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
276
277 return comp_ctx;
278}
279
280static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
281{
282 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
283 struct ena_comp_ctx *comp_ctx;
284 u16 i;
285
286 queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
287 if (unlikely(!queue->comp_ctx)) {
288 pr_err("memory allocation failed\n");
289 return -ENOMEM;
290 }
291
292 for (i = 0; i < queue->q_depth; i++) {
293 comp_ctx = get_comp_ctxt(queue, i, false);
294 if (comp_ctx)
295 init_completion(&comp_ctx->wait_event);
296 }
297
298 return 0;
299}
300
301static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
302 struct ena_admin_aq_entry *cmd,
303 size_t cmd_size_in_bytes,
304 struct ena_admin_acq_entry *comp,
305 size_t comp_size_in_bytes)
306{
307 unsigned long flags = 0;
308 struct ena_comp_ctx *comp_ctx;
309
310 spin_lock_irqsave(&admin_queue->q_lock, flags);
311 if (unlikely(!admin_queue->running_state)) {
312 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
313 return ERR_PTR(-ENODEV);
314 }
315 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
316 cmd_size_in_bytes,
317 comp,
318 comp_size_in_bytes);
319 if (IS_ERR(comp_ctx))
320 admin_queue->running_state = false;
321 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
322
323 return comp_ctx;
324}
325
326static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
327 struct ena_com_create_io_ctx *ctx,
328 struct ena_com_io_sq *io_sq)
329{
330 size_t size;
331 int dev_node = 0;
332
333 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
334
335 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
336 io_sq->desc_entry_size =
337 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
338 sizeof(struct ena_eth_io_tx_desc) :
339 sizeof(struct ena_eth_io_rx_desc);
340
341 size = io_sq->desc_entry_size * io_sq->q_depth;
342
343 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
344 dev_node = dev_to_node(ena_dev->dmadev);
345 set_dev_node(ena_dev->dmadev, ctx->numa_node);
346 io_sq->desc_addr.virt_addr =
347 dma_alloc_coherent(ena_dev->dmadev, size,
348 &io_sq->desc_addr.phys_addr,
349 GFP_KERNEL);
350 set_dev_node(ena_dev->dmadev, dev_node);
351 if (!io_sq->desc_addr.virt_addr) {
352 io_sq->desc_addr.virt_addr =
353 dma_alloc_coherent(ena_dev->dmadev, size,
354 &io_sq->desc_addr.phys_addr,
355 GFP_KERNEL);
356 }
357
358 if (!io_sq->desc_addr.virt_addr) {
359 pr_err("memory allocation failed\n");
360 return -ENOMEM;
361 }
362 }
363
364 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
365
366 io_sq->bounce_buf_ctrl.buffer_size =
367 ena_dev->llq_info.desc_list_entry_size;
368 io_sq->bounce_buf_ctrl.buffers_num =
369 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
370 io_sq->bounce_buf_ctrl.next_to_use = 0;
371
372 size = io_sq->bounce_buf_ctrl.buffer_size *
373 io_sq->bounce_buf_ctrl.buffers_num;
374
375 dev_node = dev_to_node(ena_dev->dmadev);
376 set_dev_node(ena_dev->dmadev, ctx->numa_node);
377 io_sq->bounce_buf_ctrl.base_buffer =
378 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
379 set_dev_node(ena_dev->dmadev, dev_node);
380 if (!io_sq->bounce_buf_ctrl.base_buffer)
381 io_sq->bounce_buf_ctrl.base_buffer =
382 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
383
384 if (!io_sq->bounce_buf_ctrl.base_buffer) {
385 pr_err("bounce buffer memory allocation failed\n");
386 return -ENOMEM;
387 }
388
389 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
390 sizeof(io_sq->llq_info));
391
392
393 io_sq->llq_buf_ctrl.curr_bounce_buf =
394 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
395 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
396 0x0, io_sq->llq_info.desc_list_entry_size);
397 io_sq->llq_buf_ctrl.descs_left_in_line =
398 io_sq->llq_info.descs_num_before_header;
399
400 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
401 io_sq->entries_in_tx_burst_left =
402 io_sq->llq_info.max_entries_in_tx_burst;
403 }
404
405 io_sq->tail = 0;
406 io_sq->next_to_comp = 0;
407 io_sq->phase = 1;
408
409 return 0;
410}
411
412static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
413 struct ena_com_create_io_ctx *ctx,
414 struct ena_com_io_cq *io_cq)
415{
416 size_t size;
417 int prev_node = 0;
418
419 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
420
421
422 io_cq->cdesc_entry_size_in_bytes =
423 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
424 sizeof(struct ena_eth_io_tx_cdesc) :
425 sizeof(struct ena_eth_io_rx_cdesc_base);
426
427 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
428
429 prev_node = dev_to_node(ena_dev->dmadev);
430 set_dev_node(ena_dev->dmadev, ctx->numa_node);
431 io_cq->cdesc_addr.virt_addr =
432 dma_alloc_coherent(ena_dev->dmadev, size,
433 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
434 set_dev_node(ena_dev->dmadev, prev_node);
435 if (!io_cq->cdesc_addr.virt_addr) {
436 io_cq->cdesc_addr.virt_addr =
437 dma_alloc_coherent(ena_dev->dmadev, size,
438 &io_cq->cdesc_addr.phys_addr,
439 GFP_KERNEL);
440 }
441
442 if (!io_cq->cdesc_addr.virt_addr) {
443 pr_err("memory allocation failed\n");
444 return -ENOMEM;
445 }
446
447 io_cq->phase = 1;
448 io_cq->head = 0;
449
450 return 0;
451}
452
453static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
454 struct ena_admin_acq_entry *cqe)
455{
456 struct ena_comp_ctx *comp_ctx;
457 u16 cmd_id;
458
459 cmd_id = cqe->acq_common_descriptor.command &
460 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
461
462 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
463 if (unlikely(!comp_ctx)) {
464 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
465 admin_queue->running_state = false;
466 return;
467 }
468
469 comp_ctx->status = ENA_CMD_COMPLETED;
470 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
471
472 if (comp_ctx->user_cqe)
473 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
474
475 if (!admin_queue->polling)
476 complete(&comp_ctx->wait_event);
477}
478
479static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
480{
481 struct ena_admin_acq_entry *cqe = NULL;
482 u16 comp_num = 0;
483 u16 head_masked;
484 u8 phase;
485
486 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
487 phase = admin_queue->cq.phase;
488
489 cqe = &admin_queue->cq.entries[head_masked];
490
491
492 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
493 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
494
495
496
497 dma_rmb();
498 ena_com_handle_single_admin_completion(admin_queue, cqe);
499
500 head_masked++;
501 comp_num++;
502 if (unlikely(head_masked == admin_queue->q_depth)) {
503 head_masked = 0;
504 phase = !phase;
505 }
506
507 cqe = &admin_queue->cq.entries[head_masked];
508 }
509
510 admin_queue->cq.head += comp_num;
511 admin_queue->cq.phase = phase;
512 admin_queue->sq.head += comp_num;
513 admin_queue->stats.completed_cmd += comp_num;
514}
515
516static int ena_com_comp_status_to_errno(u8 comp_status)
517{
518 if (unlikely(comp_status != 0))
519 pr_err("admin command failed[%u]\n", comp_status);
520
521 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
522 return -EINVAL;
523
524 switch (comp_status) {
525 case ENA_ADMIN_SUCCESS:
526 return 0;
527 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
528 return -ENOMEM;
529 case ENA_ADMIN_UNSUPPORTED_OPCODE:
530 return -EOPNOTSUPP;
531 case ENA_ADMIN_BAD_OPCODE:
532 case ENA_ADMIN_MALFORMED_REQUEST:
533 case ENA_ADMIN_ILLEGAL_PARAMETER:
534 case ENA_ADMIN_UNKNOWN_ERROR:
535 return -EINVAL;
536 }
537
538 return 0;
539}
540
541static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
542 struct ena_com_admin_queue *admin_queue)
543{
544 unsigned long flags = 0;
545 unsigned long timeout;
546 int ret;
547
548 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
549
550 while (1) {
551 spin_lock_irqsave(&admin_queue->q_lock, flags);
552 ena_com_handle_admin_completion(admin_queue);
553 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
554
555 if (comp_ctx->status != ENA_CMD_SUBMITTED)
556 break;
557
558 if (time_is_before_jiffies(timeout)) {
559 pr_err("Wait for completion (polling) timeout\n");
560
561 spin_lock_irqsave(&admin_queue->q_lock, flags);
562 admin_queue->stats.no_completion++;
563 admin_queue->running_state = false;
564 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
565
566 ret = -ETIME;
567 goto err;
568 }
569
570 msleep(ENA_POLL_MS);
571 }
572
573 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
574 pr_err("Command was aborted\n");
575 spin_lock_irqsave(&admin_queue->q_lock, flags);
576 admin_queue->stats.aborted_cmd++;
577 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
578 ret = -ENODEV;
579 goto err;
580 }
581
582 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
583 comp_ctx->status);
584
585 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
586err:
587 comp_ctxt_release(admin_queue, comp_ctx);
588 return ret;
589}
590
591
592
593
594
595
596
597static int ena_com_set_llq(struct ena_com_dev *ena_dev)
598{
599 struct ena_com_admin_queue *admin_queue;
600 struct ena_admin_set_feat_cmd cmd;
601 struct ena_admin_set_feat_resp resp;
602 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
603 int ret;
604
605 memset(&cmd, 0x0, sizeof(cmd));
606 admin_queue = &ena_dev->admin_queue;
607
608 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
609 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
610
611 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
612 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
613 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
614 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
615
616 ret = ena_com_execute_admin_command(admin_queue,
617 (struct ena_admin_aq_entry *)&cmd,
618 sizeof(cmd),
619 (struct ena_admin_acq_entry *)&resp,
620 sizeof(resp));
621
622 if (unlikely(ret))
623 pr_err("Failed to set LLQ configurations: %d\n", ret);
624
625 return ret;
626}
627
628static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
629 struct ena_admin_feature_llq_desc *llq_features,
630 struct ena_llq_configurations *llq_default_cfg)
631{
632 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
633 u16 supported_feat;
634 int rc;
635
636 memset(llq_info, 0, sizeof(*llq_info));
637
638 supported_feat = llq_features->header_location_ctrl_supported;
639
640 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
641 llq_info->header_location_ctrl =
642 llq_default_cfg->llq_header_location;
643 } else {
644 pr_err("Invalid header location control, supported: 0x%x\n",
645 supported_feat);
646 return -EINVAL;
647 }
648
649 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
650 supported_feat = llq_features->descriptors_stride_ctrl_supported;
651 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
652 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
653 } else {
654 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
655 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
656 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
657 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
658 } else {
659 pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
660 supported_feat);
661 return -EINVAL;
662 }
663
664 pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
665 llq_default_cfg->llq_stride_ctrl, supported_feat,
666 llq_info->desc_stride_ctrl);
667 }
668 } else {
669 llq_info->desc_stride_ctrl = 0;
670 }
671
672 supported_feat = llq_features->entry_size_ctrl_supported;
673 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
674 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
675 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
676 } else {
677 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
678 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
679 llq_info->desc_list_entry_size = 128;
680 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
681 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
682 llq_info->desc_list_entry_size = 192;
683 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
684 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
685 llq_info->desc_list_entry_size = 256;
686 } else {
687 pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
688 supported_feat);
689 return -EINVAL;
690 }
691
692 pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
693 llq_default_cfg->llq_ring_entry_size, supported_feat,
694 llq_info->desc_list_entry_size);
695 }
696 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
697
698
699
700 pr_err("illegal entry size %d\n",
701 llq_info->desc_list_entry_size);
702 return -EINVAL;
703 }
704
705 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
706 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
707 sizeof(struct ena_eth_io_tx_desc);
708 else
709 llq_info->descs_per_entry = 1;
710
711 supported_feat = llq_features->desc_num_before_header_supported;
712 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
713 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
714 } else {
715 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
716 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
717 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
718 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
719 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
720 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
721 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
722 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
723 } else {
724 pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
725 supported_feat);
726 return -EINVAL;
727 }
728
729 pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
730 llq_default_cfg->llq_num_decs_before_header,
731 supported_feat, llq_info->descs_num_before_header);
732 }
733
734 llq_info->max_entries_in_tx_burst =
735 (u16)(llq_features->max_tx_burst_size / llq_default_cfg->llq_ring_entry_size_value);
736
737 rc = ena_com_set_llq(ena_dev);
738 if (rc)
739 pr_err("Cannot set LLQ configuration: %d\n", rc);
740
741 return rc;
742}
743
744static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
745 struct ena_com_admin_queue *admin_queue)
746{
747 unsigned long flags = 0;
748 int ret;
749
750 wait_for_completion_timeout(&comp_ctx->wait_event,
751 usecs_to_jiffies(
752 admin_queue->completion_timeout));
753
754
755
756
757
758
759 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
760 spin_lock_irqsave(&admin_queue->q_lock, flags);
761 ena_com_handle_admin_completion(admin_queue);
762 admin_queue->stats.no_completion++;
763 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
764
765 if (comp_ctx->status == ENA_CMD_COMPLETED) {
766 pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
767 comp_ctx->cmd_opcode,
768 admin_queue->auto_polling ? "ON" : "OFF");
769
770 if (admin_queue->auto_polling)
771 admin_queue->polling = true;
772 } else {
773 pr_err("The ena device doesn't send a completion for the admin cmd %d status %d\n",
774 comp_ctx->cmd_opcode, comp_ctx->status);
775 }
776
777
778
779
780 if (!admin_queue->polling) {
781 admin_queue->running_state = false;
782 ret = -ETIME;
783 goto err;
784 }
785 }
786
787 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
788err:
789 comp_ctxt_release(admin_queue, comp_ctx);
790 return ret;
791}
792
793
794
795
796
797static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
798{
799 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
800 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
801 mmio_read->read_resp;
802 u32 mmio_read_reg, ret, i;
803 unsigned long flags = 0;
804 u32 timeout = mmio_read->reg_read_to;
805
806 might_sleep();
807
808 if (timeout == 0)
809 timeout = ENA_REG_READ_TIMEOUT;
810
811
812 if (!mmio_read->readless_supported)
813 return readl(ena_dev->reg_bar + offset);
814
815 spin_lock_irqsave(&mmio_read->lock, flags);
816 mmio_read->seq_num++;
817
818 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
819 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
820 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
821 mmio_read_reg |= mmio_read->seq_num &
822 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
823
824 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
825
826 for (i = 0; i < timeout; i++) {
827 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
828 break;
829
830 udelay(1);
831 }
832
833 if (unlikely(i == timeout)) {
834 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
835 mmio_read->seq_num, offset, read_resp->req_id,
836 read_resp->reg_off);
837 ret = ENA_MMIO_READ_TIMEOUT;
838 goto err;
839 }
840
841 if (read_resp->reg_off != offset) {
842 pr_err("Read failure: wrong offset provided\n");
843 ret = ENA_MMIO_READ_TIMEOUT;
844 } else {
845 ret = read_resp->reg_val;
846 }
847err:
848 spin_unlock_irqrestore(&mmio_read->lock, flags);
849
850 return ret;
851}
852
853
854
855
856
857
858
859
860static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
861 struct ena_com_admin_queue *admin_queue)
862{
863 if (admin_queue->polling)
864 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
865 admin_queue);
866
867 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
868 admin_queue);
869}
870
871static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
872 struct ena_com_io_sq *io_sq)
873{
874 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
875 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
876 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
877 u8 direction;
878 int ret;
879
880 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
881
882 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
883 direction = ENA_ADMIN_SQ_DIRECTION_TX;
884 else
885 direction = ENA_ADMIN_SQ_DIRECTION_RX;
886
887 destroy_cmd.sq.sq_identity |= (direction <<
888 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
889 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
890
891 destroy_cmd.sq.sq_idx = io_sq->idx;
892 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
893
894 ret = ena_com_execute_admin_command(admin_queue,
895 (struct ena_admin_aq_entry *)&destroy_cmd,
896 sizeof(destroy_cmd),
897 (struct ena_admin_acq_entry *)&destroy_resp,
898 sizeof(destroy_resp));
899
900 if (unlikely(ret && (ret != -ENODEV)))
901 pr_err("failed to destroy io sq error: %d\n", ret);
902
903 return ret;
904}
905
906static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
907 struct ena_com_io_sq *io_sq,
908 struct ena_com_io_cq *io_cq)
909{
910 size_t size;
911
912 if (io_cq->cdesc_addr.virt_addr) {
913 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
914
915 dma_free_coherent(ena_dev->dmadev, size,
916 io_cq->cdesc_addr.virt_addr,
917 io_cq->cdesc_addr.phys_addr);
918
919 io_cq->cdesc_addr.virt_addr = NULL;
920 }
921
922 if (io_sq->desc_addr.virt_addr) {
923 size = io_sq->desc_entry_size * io_sq->q_depth;
924
925 dma_free_coherent(ena_dev->dmadev, size,
926 io_sq->desc_addr.virt_addr,
927 io_sq->desc_addr.phys_addr);
928
929 io_sq->desc_addr.virt_addr = NULL;
930 }
931
932 if (io_sq->bounce_buf_ctrl.base_buffer) {
933 devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
934 io_sq->bounce_buf_ctrl.base_buffer = NULL;
935 }
936}
937
938static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
939 u16 exp_state)
940{
941 u32 val, i;
942
943
944 timeout = (timeout * 100) / ENA_POLL_MS;
945
946 for (i = 0; i < timeout; i++) {
947 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
948
949 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
950 pr_err("Reg read timeout occurred\n");
951 return -ETIME;
952 }
953
954 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
955 exp_state)
956 return 0;
957
958 msleep(ENA_POLL_MS);
959 }
960
961 return -ETIME;
962}
963
964static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
965 enum ena_admin_aq_feature_id feature_id)
966{
967 u32 feature_mask = 1 << feature_id;
968
969
970 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
971 !(ena_dev->supported_features & feature_mask))
972 return false;
973
974 return true;
975}
976
977static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
978 struct ena_admin_get_feat_resp *get_resp,
979 enum ena_admin_aq_feature_id feature_id,
980 dma_addr_t control_buf_dma_addr,
981 u32 control_buff_size,
982 u8 feature_ver)
983{
984 struct ena_com_admin_queue *admin_queue;
985 struct ena_admin_get_feat_cmd get_cmd;
986 int ret;
987
988 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
989 pr_debug("Feature %d isn't supported\n", feature_id);
990 return -EOPNOTSUPP;
991 }
992
993 memset(&get_cmd, 0x0, sizeof(get_cmd));
994 admin_queue = &ena_dev->admin_queue;
995
996 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
997
998 if (control_buff_size)
999 get_cmd.aq_common_descriptor.flags =
1000 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1001 else
1002 get_cmd.aq_common_descriptor.flags = 0;
1003
1004 ret = ena_com_mem_addr_set(ena_dev,
1005 &get_cmd.control_buffer.address,
1006 control_buf_dma_addr);
1007 if (unlikely(ret)) {
1008 pr_err("memory address set failed\n");
1009 return ret;
1010 }
1011
1012 get_cmd.control_buffer.length = control_buff_size;
1013 get_cmd.feat_common.feature_version = feature_ver;
1014 get_cmd.feat_common.feature_id = feature_id;
1015
1016 ret = ena_com_execute_admin_command(admin_queue,
1017 (struct ena_admin_aq_entry *)
1018 &get_cmd,
1019 sizeof(get_cmd),
1020 (struct ena_admin_acq_entry *)
1021 get_resp,
1022 sizeof(*get_resp));
1023
1024 if (unlikely(ret))
1025 pr_err("Failed to submit get_feature command %d error: %d\n",
1026 feature_id, ret);
1027
1028 return ret;
1029}
1030
1031static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1032 struct ena_admin_get_feat_resp *get_resp,
1033 enum ena_admin_aq_feature_id feature_id,
1034 u8 feature_ver)
1035{
1036 return ena_com_get_feature_ex(ena_dev,
1037 get_resp,
1038 feature_id,
1039 0,
1040 0,
1041 feature_ver);
1042}
1043
1044static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1045{
1046 struct ena_rss *rss = &ena_dev->rss;
1047
1048 rss->hash_key =
1049 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1050 &rss->hash_key_dma_addr, GFP_KERNEL);
1051
1052 if (unlikely(!rss->hash_key))
1053 return -ENOMEM;
1054
1055 return 0;
1056}
1057
1058static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1059{
1060 struct ena_rss *rss = &ena_dev->rss;
1061
1062 if (rss->hash_key)
1063 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1064 rss->hash_key, rss->hash_key_dma_addr);
1065 rss->hash_key = NULL;
1066}
1067
1068static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1069{
1070 struct ena_rss *rss = &ena_dev->rss;
1071
1072 rss->hash_ctrl =
1073 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1074 &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1075
1076 if (unlikely(!rss->hash_ctrl))
1077 return -ENOMEM;
1078
1079 return 0;
1080}
1081
1082static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1083{
1084 struct ena_rss *rss = &ena_dev->rss;
1085
1086 if (rss->hash_ctrl)
1087 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1088 rss->hash_ctrl, rss->hash_ctrl_dma_addr);
1089 rss->hash_ctrl = NULL;
1090}
1091
1092static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1093 u16 log_size)
1094{
1095 struct ena_rss *rss = &ena_dev->rss;
1096 struct ena_admin_get_feat_resp get_resp;
1097 size_t tbl_size;
1098 int ret;
1099
1100 ret = ena_com_get_feature(ena_dev, &get_resp,
1101 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
1102 if (unlikely(ret))
1103 return ret;
1104
1105 if ((get_resp.u.ind_table.min_size > log_size) ||
1106 (get_resp.u.ind_table.max_size < log_size)) {
1107 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1108 1 << log_size, 1 << get_resp.u.ind_table.min_size,
1109 1 << get_resp.u.ind_table.max_size);
1110 return -EINVAL;
1111 }
1112
1113 tbl_size = (1ULL << log_size) *
1114 sizeof(struct ena_admin_rss_ind_table_entry);
1115
1116 rss->rss_ind_tbl =
1117 dma_alloc_coherent(ena_dev->dmadev, tbl_size,
1118 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
1119 if (unlikely(!rss->rss_ind_tbl))
1120 goto mem_err1;
1121
1122 tbl_size = (1ULL << log_size) * sizeof(u16);
1123 rss->host_rss_ind_tbl =
1124 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
1125 if (unlikely(!rss->host_rss_ind_tbl))
1126 goto mem_err2;
1127
1128 rss->tbl_log_size = log_size;
1129
1130 return 0;
1131
1132mem_err2:
1133 tbl_size = (1ULL << log_size) *
1134 sizeof(struct ena_admin_rss_ind_table_entry);
1135
1136 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1137 rss->rss_ind_tbl_dma_addr);
1138 rss->rss_ind_tbl = NULL;
1139mem_err1:
1140 rss->tbl_log_size = 0;
1141 return -ENOMEM;
1142}
1143
1144static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1145{
1146 struct ena_rss *rss = &ena_dev->rss;
1147 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1148 sizeof(struct ena_admin_rss_ind_table_entry);
1149
1150 if (rss->rss_ind_tbl)
1151 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1152 rss->rss_ind_tbl_dma_addr);
1153 rss->rss_ind_tbl = NULL;
1154
1155 if (rss->host_rss_ind_tbl)
1156 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
1157 rss->host_rss_ind_tbl = NULL;
1158}
1159
1160static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1161 struct ena_com_io_sq *io_sq, u16 cq_idx)
1162{
1163 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1164 struct ena_admin_aq_create_sq_cmd create_cmd;
1165 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1166 u8 direction;
1167 int ret;
1168
1169 memset(&create_cmd, 0x0, sizeof(create_cmd));
1170
1171 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1172
1173 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1174 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1175 else
1176 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1177
1178 create_cmd.sq_identity |= (direction <<
1179 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1180 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1181
1182 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1183 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1184
1185 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1186 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1187 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1188
1189 create_cmd.sq_caps_3 |=
1190 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1191
1192 create_cmd.cq_idx = cq_idx;
1193 create_cmd.sq_depth = io_sq->q_depth;
1194
1195 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1196 ret = ena_com_mem_addr_set(ena_dev,
1197 &create_cmd.sq_ba,
1198 io_sq->desc_addr.phys_addr);
1199 if (unlikely(ret)) {
1200 pr_err("memory address set failed\n");
1201 return ret;
1202 }
1203 }
1204
1205 ret = ena_com_execute_admin_command(admin_queue,
1206 (struct ena_admin_aq_entry *)&create_cmd,
1207 sizeof(create_cmd),
1208 (struct ena_admin_acq_entry *)&cmd_completion,
1209 sizeof(cmd_completion));
1210 if (unlikely(ret)) {
1211 pr_err("Failed to create IO SQ. error: %d\n", ret);
1212 return ret;
1213 }
1214
1215 io_sq->idx = cmd_completion.sq_idx;
1216
1217 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1218 (uintptr_t)cmd_completion.sq_doorbell_offset);
1219
1220 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1221 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1222 + cmd_completion.llq_headers_offset);
1223
1224 io_sq->desc_addr.pbuf_dev_addr =
1225 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1226 cmd_completion.llq_descriptors_offset);
1227 }
1228
1229 pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1230
1231 return ret;
1232}
1233
1234static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1235{
1236 struct ena_rss *rss = &ena_dev->rss;
1237 struct ena_com_io_sq *io_sq;
1238 u16 qid;
1239 int i;
1240
1241 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1242 qid = rss->host_rss_ind_tbl[i];
1243 if (qid >= ENA_TOTAL_NUM_QUEUES)
1244 return -EINVAL;
1245
1246 io_sq = &ena_dev->io_sq_queues[qid];
1247
1248 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1249 return -EINVAL;
1250
1251 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1252 }
1253
1254 return 0;
1255}
1256
1257static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1258{
1259 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1260 struct ena_rss *rss = &ena_dev->rss;
1261 u8 idx;
1262 u16 i;
1263
1264 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1265 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1266
1267 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1268 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1269 return -EINVAL;
1270 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1271
1272 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1273 return -EINVAL;
1274
1275 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1276 }
1277
1278 return 0;
1279}
1280
1281static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1282{
1283 size_t size;
1284
1285 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1286
1287 ena_dev->intr_moder_tbl =
1288 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
1289 if (!ena_dev->intr_moder_tbl)
1290 return -ENOMEM;
1291
1292 ena_com_config_default_interrupt_moderation_table(ena_dev);
1293
1294 return 0;
1295}
1296
1297static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1298 u16 intr_delay_resolution)
1299{
1300 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1301 unsigned int i;
1302
1303 if (!intr_delay_resolution) {
1304 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1305 intr_delay_resolution = 1;
1306 }
1307 ena_dev->intr_delay_resolution = intr_delay_resolution;
1308
1309
1310 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1311 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1312
1313
1314 ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1315}
1316
1317
1318
1319
1320
1321int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1322 struct ena_admin_aq_entry *cmd,
1323 size_t cmd_size,
1324 struct ena_admin_acq_entry *comp,
1325 size_t comp_size)
1326{
1327 struct ena_comp_ctx *comp_ctx;
1328 int ret;
1329
1330 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1331 comp, comp_size);
1332 if (IS_ERR(comp_ctx)) {
1333 if (comp_ctx == ERR_PTR(-ENODEV))
1334 pr_debug("Failed to submit command [%ld]\n",
1335 PTR_ERR(comp_ctx));
1336 else
1337 pr_err("Failed to submit command [%ld]\n",
1338 PTR_ERR(comp_ctx));
1339
1340 return PTR_ERR(comp_ctx);
1341 }
1342
1343 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1344 if (unlikely(ret)) {
1345 if (admin_queue->running_state)
1346 pr_err("Failed to process command. ret = %d\n", ret);
1347 else
1348 pr_debug("Failed to process command. ret = %d\n", ret);
1349 }
1350 return ret;
1351}
1352
1353int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1354 struct ena_com_io_cq *io_cq)
1355{
1356 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1357 struct ena_admin_aq_create_cq_cmd create_cmd;
1358 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1359 int ret;
1360
1361 memset(&create_cmd, 0x0, sizeof(create_cmd));
1362
1363 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1364
1365 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1366 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1367 create_cmd.cq_caps_1 |=
1368 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1369
1370 create_cmd.msix_vector = io_cq->msix_vector;
1371 create_cmd.cq_depth = io_cq->q_depth;
1372
1373 ret = ena_com_mem_addr_set(ena_dev,
1374 &create_cmd.cq_ba,
1375 io_cq->cdesc_addr.phys_addr);
1376 if (unlikely(ret)) {
1377 pr_err("memory address set failed\n");
1378 return ret;
1379 }
1380
1381 ret = ena_com_execute_admin_command(admin_queue,
1382 (struct ena_admin_aq_entry *)&create_cmd,
1383 sizeof(create_cmd),
1384 (struct ena_admin_acq_entry *)&cmd_completion,
1385 sizeof(cmd_completion));
1386 if (unlikely(ret)) {
1387 pr_err("Failed to create IO CQ. error: %d\n", ret);
1388 return ret;
1389 }
1390
1391 io_cq->idx = cmd_completion.cq_idx;
1392
1393 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1394 cmd_completion.cq_interrupt_unmask_register_offset);
1395
1396 if (cmd_completion.cq_head_db_register_offset)
1397 io_cq->cq_head_db_reg =
1398 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1399 cmd_completion.cq_head_db_register_offset);
1400
1401 if (cmd_completion.numa_node_register_offset)
1402 io_cq->numa_node_cfg_reg =
1403 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1404 cmd_completion.numa_node_register_offset);
1405
1406 pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1407
1408 return ret;
1409}
1410
1411int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1412 struct ena_com_io_sq **io_sq,
1413 struct ena_com_io_cq **io_cq)
1414{
1415 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1416 pr_err("Invalid queue number %d but the max is %d\n", qid,
1417 ENA_TOTAL_NUM_QUEUES);
1418 return -EINVAL;
1419 }
1420
1421 *io_sq = &ena_dev->io_sq_queues[qid];
1422 *io_cq = &ena_dev->io_cq_queues[qid];
1423
1424 return 0;
1425}
1426
1427void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1428{
1429 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1430 struct ena_comp_ctx *comp_ctx;
1431 u16 i;
1432
1433 if (!admin_queue->comp_ctx)
1434 return;
1435
1436 for (i = 0; i < admin_queue->q_depth; i++) {
1437 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1438 if (unlikely(!comp_ctx))
1439 break;
1440
1441 comp_ctx->status = ENA_CMD_ABORTED;
1442
1443 complete(&comp_ctx->wait_event);
1444 }
1445}
1446
1447void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1448{
1449 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1450 unsigned long flags = 0;
1451
1452 spin_lock_irqsave(&admin_queue->q_lock, flags);
1453 while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1454 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1455 msleep(ENA_POLL_MS);
1456 spin_lock_irqsave(&admin_queue->q_lock, flags);
1457 }
1458 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1459}
1460
1461int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1462 struct ena_com_io_cq *io_cq)
1463{
1464 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1465 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1466 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1467 int ret;
1468
1469 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1470
1471 destroy_cmd.cq_idx = io_cq->idx;
1472 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1473
1474 ret = ena_com_execute_admin_command(admin_queue,
1475 (struct ena_admin_aq_entry *)&destroy_cmd,
1476 sizeof(destroy_cmd),
1477 (struct ena_admin_acq_entry *)&destroy_resp,
1478 sizeof(destroy_resp));
1479
1480 if (unlikely(ret && (ret != -ENODEV)))
1481 pr_err("Failed to destroy IO CQ. error: %d\n", ret);
1482
1483 return ret;
1484}
1485
1486bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1487{
1488 return ena_dev->admin_queue.running_state;
1489}
1490
1491void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1492{
1493 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1494 unsigned long flags = 0;
1495
1496 spin_lock_irqsave(&admin_queue->q_lock, flags);
1497 ena_dev->admin_queue.running_state = state;
1498 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1499}
1500
1501void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1502{
1503 u16 depth = ena_dev->aenq.q_depth;
1504
1505 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1506
1507
1508
1509
1510 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1511}
1512
1513int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1514{
1515 struct ena_com_admin_queue *admin_queue;
1516 struct ena_admin_set_feat_cmd cmd;
1517 struct ena_admin_set_feat_resp resp;
1518 struct ena_admin_get_feat_resp get_resp;
1519 int ret;
1520
1521 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1522 if (ret) {
1523 pr_info("Can't get aenq configuration\n");
1524 return ret;
1525 }
1526
1527 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1528 pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1529 get_resp.u.aenq.supported_groups, groups_flag);
1530 return -EOPNOTSUPP;
1531 }
1532
1533 memset(&cmd, 0x0, sizeof(cmd));
1534 admin_queue = &ena_dev->admin_queue;
1535
1536 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1537 cmd.aq_common_descriptor.flags = 0;
1538 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1539 cmd.u.aenq.enabled_groups = groups_flag;
1540
1541 ret = ena_com_execute_admin_command(admin_queue,
1542 (struct ena_admin_aq_entry *)&cmd,
1543 sizeof(cmd),
1544 (struct ena_admin_acq_entry *)&resp,
1545 sizeof(resp));
1546
1547 if (unlikely(ret))
1548 pr_err("Failed to config AENQ ret: %d\n", ret);
1549
1550 return ret;
1551}
1552
1553int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1554{
1555 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1556 int width;
1557
1558 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1559 pr_err("Reg read timeout occurred\n");
1560 return -ETIME;
1561 }
1562
1563 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1564 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1565
1566 pr_debug("ENA dma width: %d\n", width);
1567
1568 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1569 pr_err("DMA width illegal value: %d\n", width);
1570 return -EINVAL;
1571 }
1572
1573 ena_dev->dma_addr_bits = width;
1574
1575 return width;
1576}
1577
1578int ena_com_validate_version(struct ena_com_dev *ena_dev)
1579{
1580 u32 ver;
1581 u32 ctrl_ver;
1582 u32 ctrl_ver_masked;
1583
1584
1585
1586
1587 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1588 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1589 ENA_REGS_CONTROLLER_VERSION_OFF);
1590
1591 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1592 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1593 pr_err("Reg read timeout occurred\n");
1594 return -ETIME;
1595 }
1596
1597 pr_info("ena device version: %d.%d\n",
1598 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1599 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1600 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1601
1602 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1603 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1604 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1605 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1606 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1607 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1608 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1609 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1610
1611 ctrl_ver_masked =
1612 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1613 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1614 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1615
1616
1617 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1618 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1619 return -1;
1620 }
1621
1622 return 0;
1623}
1624
1625void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1626{
1627 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1628 struct ena_com_admin_cq *cq = &admin_queue->cq;
1629 struct ena_com_admin_sq *sq = &admin_queue->sq;
1630 struct ena_com_aenq *aenq = &ena_dev->aenq;
1631 u16 size;
1632
1633 if (admin_queue->comp_ctx)
1634 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1635 admin_queue->comp_ctx = NULL;
1636 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1637 if (sq->entries)
1638 dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1639 sq->dma_addr);
1640 sq->entries = NULL;
1641
1642 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1643 if (cq->entries)
1644 dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1645 cq->dma_addr);
1646 cq->entries = NULL;
1647
1648 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1649 if (ena_dev->aenq.entries)
1650 dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1651 aenq->dma_addr);
1652 aenq->entries = NULL;
1653}
1654
1655void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1656{
1657 u32 mask_value = 0;
1658
1659 if (polling)
1660 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1661
1662 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1663 ena_dev->admin_queue.polling = polling;
1664}
1665
1666void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1667 bool polling)
1668{
1669 ena_dev->admin_queue.auto_polling = polling;
1670}
1671
1672int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1673{
1674 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1675
1676 spin_lock_init(&mmio_read->lock);
1677 mmio_read->read_resp =
1678 dma_alloc_coherent(ena_dev->dmadev,
1679 sizeof(*mmio_read->read_resp),
1680 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1681 if (unlikely(!mmio_read->read_resp))
1682 goto err;
1683
1684 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1685
1686 mmio_read->read_resp->req_id = 0x0;
1687 mmio_read->seq_num = 0x0;
1688 mmio_read->readless_supported = true;
1689
1690 return 0;
1691
1692err:
1693
1694 return -ENOMEM;
1695}
1696
1697void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1698{
1699 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1700
1701 mmio_read->readless_supported = readless_supported;
1702}
1703
1704void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1705{
1706 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1707
1708 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1709 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1710
1711 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1712 mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1713
1714 mmio_read->read_resp = NULL;
1715}
1716
1717void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1718{
1719 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1720 u32 addr_low, addr_high;
1721
1722 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1723 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1724
1725 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1726 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1727}
1728
1729int ena_com_admin_init(struct ena_com_dev *ena_dev,
1730 struct ena_aenq_handlers *aenq_handlers)
1731{
1732 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1733 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1734 int ret;
1735
1736 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1737
1738 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1739 pr_err("Reg read timeout occurred\n");
1740 return -ETIME;
1741 }
1742
1743 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1744 pr_err("Device isn't ready, abort com init\n");
1745 return -ENODEV;
1746 }
1747
1748 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1749
1750 admin_queue->q_dmadev = ena_dev->dmadev;
1751 admin_queue->polling = false;
1752 admin_queue->curr_cmd_id = 0;
1753
1754 atomic_set(&admin_queue->outstanding_cmds, 0);
1755
1756 spin_lock_init(&admin_queue->q_lock);
1757
1758 ret = ena_com_init_comp_ctxt(admin_queue);
1759 if (ret)
1760 goto error;
1761
1762 ret = ena_com_admin_init_sq(admin_queue);
1763 if (ret)
1764 goto error;
1765
1766 ret = ena_com_admin_init_cq(admin_queue);
1767 if (ret)
1768 goto error;
1769
1770 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1771 ENA_REGS_AQ_DB_OFF);
1772
1773 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1774 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1775
1776 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1777 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1778
1779 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1780 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1781
1782 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1783 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1784
1785 aq_caps = 0;
1786 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1787 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1788 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1789 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1790
1791 acq_caps = 0;
1792 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1793 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1794 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1795 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1796
1797 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1798 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1799 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1800 if (ret)
1801 goto error;
1802
1803 admin_queue->running_state = true;
1804
1805 return 0;
1806error:
1807 ena_com_admin_destroy(ena_dev);
1808
1809 return ret;
1810}
1811
1812int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1813 struct ena_com_create_io_ctx *ctx)
1814{
1815 struct ena_com_io_sq *io_sq;
1816 struct ena_com_io_cq *io_cq;
1817 int ret;
1818
1819 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1820 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1821 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1822 return -EINVAL;
1823 }
1824
1825 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1826 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1827
1828 memset(io_sq, 0x0, sizeof(*io_sq));
1829 memset(io_cq, 0x0, sizeof(*io_cq));
1830
1831
1832 io_cq->q_depth = ctx->queue_size;
1833 io_cq->direction = ctx->direction;
1834 io_cq->qid = ctx->qid;
1835
1836 io_cq->msix_vector = ctx->msix_vector;
1837
1838 io_sq->q_depth = ctx->queue_size;
1839 io_sq->direction = ctx->direction;
1840 io_sq->qid = ctx->qid;
1841
1842 io_sq->mem_queue_type = ctx->mem_queue_type;
1843
1844 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1845
1846 io_sq->tx_max_header_size =
1847 min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1848
1849 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1850 if (ret)
1851 goto error;
1852 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1853 if (ret)
1854 goto error;
1855
1856 ret = ena_com_create_io_cq(ena_dev, io_cq);
1857 if (ret)
1858 goto error;
1859
1860 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1861 if (ret)
1862 goto destroy_io_cq;
1863
1864 return 0;
1865
1866destroy_io_cq:
1867 ena_com_destroy_io_cq(ena_dev, io_cq);
1868error:
1869 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1870 return ret;
1871}
1872
1873void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1874{
1875 struct ena_com_io_sq *io_sq;
1876 struct ena_com_io_cq *io_cq;
1877
1878 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1879 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
1880 ENA_TOTAL_NUM_QUEUES);
1881 return;
1882 }
1883
1884 io_sq = &ena_dev->io_sq_queues[qid];
1885 io_cq = &ena_dev->io_cq_queues[qid];
1886
1887 ena_com_destroy_io_sq(ena_dev, io_sq);
1888 ena_com_destroy_io_cq(ena_dev, io_cq);
1889
1890 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1891}
1892
1893int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1894 struct ena_admin_get_feat_resp *resp)
1895{
1896 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1897}
1898
1899int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1900 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1901{
1902 struct ena_admin_get_feat_resp get_resp;
1903 int rc;
1904
1905 rc = ena_com_get_feature(ena_dev, &get_resp,
1906 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1907 if (rc)
1908 return rc;
1909
1910 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1911 sizeof(get_resp.u.dev_attr));
1912 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1913
1914 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1915 rc = ena_com_get_feature(ena_dev, &get_resp,
1916 ENA_ADMIN_MAX_QUEUES_EXT,
1917 ENA_FEATURE_MAX_QUEUE_EXT_VER);
1918 if (rc)
1919 return rc;
1920
1921 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
1922 return -EINVAL;
1923
1924 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1925 sizeof(get_resp.u.max_queue_ext));
1926 ena_dev->tx_max_header_size =
1927 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1928 } else {
1929 rc = ena_com_get_feature(ena_dev, &get_resp,
1930 ENA_ADMIN_MAX_QUEUES_NUM, 0);
1931 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1932 sizeof(get_resp.u.max_queue));
1933 ena_dev->tx_max_header_size =
1934 get_resp.u.max_queue.max_header_size;
1935
1936 if (rc)
1937 return rc;
1938 }
1939
1940 rc = ena_com_get_feature(ena_dev, &get_resp,
1941 ENA_ADMIN_AENQ_CONFIG, 0);
1942 if (rc)
1943 return rc;
1944
1945 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1946 sizeof(get_resp.u.aenq));
1947
1948 rc = ena_com_get_feature(ena_dev, &get_resp,
1949 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
1950 if (rc)
1951 return rc;
1952
1953 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1954 sizeof(get_resp.u.offload));
1955
1956
1957
1958
1959 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
1960
1961 if (!rc)
1962 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1963 sizeof(get_resp.u.hw_hints));
1964 else if (rc == -EOPNOTSUPP)
1965 memset(&get_feat_ctx->hw_hints, 0x0,
1966 sizeof(get_feat_ctx->hw_hints));
1967 else
1968 return rc;
1969
1970 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
1971 if (!rc)
1972 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
1973 sizeof(get_resp.u.llq));
1974 else if (rc == -EOPNOTSUPP)
1975 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
1976 else
1977 return rc;
1978
1979 return 0;
1980}
1981
1982void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1983{
1984 ena_com_handle_admin_completion(&ena_dev->admin_queue);
1985}
1986
1987
1988
1989
1990static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1991 u16 group)
1992{
1993 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1994
1995 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1996 return aenq_handlers->handlers[group];
1997
1998 return aenq_handlers->unimplemented_handler;
1999}
2000
2001
2002
2003
2004
2005void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
2006{
2007 struct ena_admin_aenq_entry *aenq_e;
2008 struct ena_admin_aenq_common_desc *aenq_common;
2009 struct ena_com_aenq *aenq = &dev->aenq;
2010 unsigned long long timestamp;
2011 ena_aenq_handler handler_cb;
2012 u16 masked_head, processed = 0;
2013 u8 phase;
2014
2015 masked_head = aenq->head & (aenq->q_depth - 1);
2016 phase = aenq->phase;
2017 aenq_e = &aenq->entries[masked_head];
2018 aenq_common = &aenq_e->aenq_common_desc;
2019
2020
2021 while ((READ_ONCE(aenq_common->flags) &
2022 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2023
2024
2025
2026 dma_rmb();
2027
2028 timestamp =
2029 (unsigned long long)aenq_common->timestamp_low |
2030 ((unsigned long long)aenq_common->timestamp_high << 32);
2031 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
2032 aenq_common->group, aenq_common->syndrom, timestamp);
2033
2034
2035 handler_cb = ena_com_get_specific_aenq_cb(dev,
2036 aenq_common->group);
2037 handler_cb(data, aenq_e);
2038
2039
2040 masked_head++;
2041 processed++;
2042
2043 if (unlikely(masked_head == aenq->q_depth)) {
2044 masked_head = 0;
2045 phase = !phase;
2046 }
2047 aenq_e = &aenq->entries[masked_head];
2048 aenq_common = &aenq_e->aenq_common_desc;
2049 }
2050
2051 aenq->head += processed;
2052 aenq->phase = phase;
2053
2054
2055 if (!processed)
2056 return;
2057
2058
2059 mb();
2060 writel_relaxed((u32)aenq->head,
2061 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2062}
2063
2064int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2065 enum ena_regs_reset_reason_types reset_reason)
2066{
2067 u32 stat, timeout, cap, reset_val;
2068 int rc;
2069
2070 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2071 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2072
2073 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2074 (cap == ENA_MMIO_READ_TIMEOUT))) {
2075 pr_err("Reg read32 timeout occurred\n");
2076 return -ETIME;
2077 }
2078
2079 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2080 pr_err("Device isn't ready, can't reset device\n");
2081 return -EINVAL;
2082 }
2083
2084 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2085 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2086 if (timeout == 0) {
2087 pr_err("Invalid timeout value\n");
2088 return -EINVAL;
2089 }
2090
2091
2092 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2093 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2094 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2095 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2096
2097
2098 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2099
2100 rc = wait_for_reset_state(ena_dev, timeout,
2101 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2102 if (rc != 0) {
2103 pr_err("Reset indication didn't turn on\n");
2104 return rc;
2105 }
2106
2107
2108 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2109 rc = wait_for_reset_state(ena_dev, timeout, 0);
2110 if (rc != 0) {
2111 pr_err("Reset indication didn't turn off\n");
2112 return rc;
2113 }
2114
2115 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2116 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2117 if (timeout)
2118
2119 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2120 else
2121 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2122
2123 return 0;
2124}
2125
2126static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2127 struct ena_com_stats_ctx *ctx,
2128 enum ena_admin_get_stats_type type)
2129{
2130 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2131 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2132 struct ena_com_admin_queue *admin_queue;
2133 int ret;
2134
2135 admin_queue = &ena_dev->admin_queue;
2136
2137 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2138 get_cmd->aq_common_descriptor.flags = 0;
2139 get_cmd->type = type;
2140
2141 ret = ena_com_execute_admin_command(admin_queue,
2142 (struct ena_admin_aq_entry *)get_cmd,
2143 sizeof(*get_cmd),
2144 (struct ena_admin_acq_entry *)get_resp,
2145 sizeof(*get_resp));
2146
2147 if (unlikely(ret))
2148 pr_err("Failed to get stats. error: %d\n", ret);
2149
2150 return ret;
2151}
2152
2153int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2154 struct ena_admin_basic_stats *stats)
2155{
2156 struct ena_com_stats_ctx ctx;
2157 int ret;
2158
2159 memset(&ctx, 0x0, sizeof(ctx));
2160 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2161 if (likely(ret == 0))
2162 memcpy(stats, &ctx.get_resp.basic_stats,
2163 sizeof(ctx.get_resp.basic_stats));
2164
2165 return ret;
2166}
2167
2168int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2169{
2170 struct ena_com_admin_queue *admin_queue;
2171 struct ena_admin_set_feat_cmd cmd;
2172 struct ena_admin_set_feat_resp resp;
2173 int ret;
2174
2175 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2176 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2177 return -EOPNOTSUPP;
2178 }
2179
2180 memset(&cmd, 0x0, sizeof(cmd));
2181 admin_queue = &ena_dev->admin_queue;
2182
2183 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2184 cmd.aq_common_descriptor.flags = 0;
2185 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2186 cmd.u.mtu.mtu = mtu;
2187
2188 ret = ena_com_execute_admin_command(admin_queue,
2189 (struct ena_admin_aq_entry *)&cmd,
2190 sizeof(cmd),
2191 (struct ena_admin_acq_entry *)&resp,
2192 sizeof(resp));
2193
2194 if (unlikely(ret))
2195 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2196
2197 return ret;
2198}
2199
2200int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2201 struct ena_admin_feature_offload_desc *offload)
2202{
2203 int ret;
2204 struct ena_admin_get_feat_resp resp;
2205
2206 ret = ena_com_get_feature(ena_dev, &resp,
2207 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2208 if (unlikely(ret)) {
2209 pr_err("Failed to get offload capabilities %d\n", ret);
2210 return ret;
2211 }
2212
2213 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2214
2215 return 0;
2216}
2217
2218int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2219{
2220 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2221 struct ena_rss *rss = &ena_dev->rss;
2222 struct ena_admin_set_feat_cmd cmd;
2223 struct ena_admin_set_feat_resp resp;
2224 struct ena_admin_get_feat_resp get_resp;
2225 int ret;
2226
2227 if (!ena_com_check_supported_feature_id(ena_dev,
2228 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2229 pr_debug("Feature %d isn't supported\n",
2230 ENA_ADMIN_RSS_HASH_FUNCTION);
2231 return -EOPNOTSUPP;
2232 }
2233
2234
2235 ret = ena_com_get_feature(ena_dev, &get_resp,
2236 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2237 if (unlikely(ret))
2238 return ret;
2239
2240 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2241 pr_err("Func hash %d isn't supported by device, abort\n",
2242 rss->hash_func);
2243 return -EOPNOTSUPP;
2244 }
2245
2246 memset(&cmd, 0x0, sizeof(cmd));
2247
2248 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2249 cmd.aq_common_descriptor.flags =
2250 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2251 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2252 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2253 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2254
2255 ret = ena_com_mem_addr_set(ena_dev,
2256 &cmd.control_buffer.address,
2257 rss->hash_key_dma_addr);
2258 if (unlikely(ret)) {
2259 pr_err("memory address set failed\n");
2260 return ret;
2261 }
2262
2263 cmd.control_buffer.length = sizeof(*rss->hash_key);
2264
2265 ret = ena_com_execute_admin_command(admin_queue,
2266 (struct ena_admin_aq_entry *)&cmd,
2267 sizeof(cmd),
2268 (struct ena_admin_acq_entry *)&resp,
2269 sizeof(resp));
2270 if (unlikely(ret)) {
2271 pr_err("Failed to set hash function %d. error: %d\n",
2272 rss->hash_func, ret);
2273 return -EINVAL;
2274 }
2275
2276 return 0;
2277}
2278
2279int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2280 enum ena_admin_hash_functions func,
2281 const u8 *key, u16 key_len, u32 init_val)
2282{
2283 struct ena_rss *rss = &ena_dev->rss;
2284 struct ena_admin_get_feat_resp get_resp;
2285 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2286 rss->hash_key;
2287 int rc;
2288
2289
2290 if (unlikely(key_len & 0x3))
2291 return -EINVAL;
2292
2293 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2294 ENA_ADMIN_RSS_HASH_FUNCTION,
2295 rss->hash_key_dma_addr,
2296 sizeof(*rss->hash_key), 0);
2297 if (unlikely(rc))
2298 return rc;
2299
2300 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2301 pr_err("Flow hash function %d isn't supported\n", func);
2302 return -EOPNOTSUPP;
2303 }
2304
2305 switch (func) {
2306 case ENA_ADMIN_TOEPLITZ:
2307 if (key_len > sizeof(hash_key->key)) {
2308 pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
2309 key_len, sizeof(hash_key->key));
2310 return -EINVAL;
2311 }
2312
2313 memcpy(hash_key->key, key, key_len);
2314 rss->hash_init_val = init_val;
2315 hash_key->keys_num = key_len >> 2;
2316 break;
2317 case ENA_ADMIN_CRC32:
2318 rss->hash_init_val = init_val;
2319 break;
2320 default:
2321 pr_err("Invalid hash function (%d)\n", func);
2322 return -EINVAL;
2323 }
2324
2325 rss->hash_func = func;
2326 rc = ena_com_set_hash_function(ena_dev);
2327
2328
2329 if (unlikely(rc))
2330 ena_com_get_hash_function(ena_dev, NULL, NULL);
2331
2332 return rc;
2333}
2334
2335int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2336 enum ena_admin_hash_functions *func,
2337 u8 *key)
2338{
2339 struct ena_rss *rss = &ena_dev->rss;
2340 struct ena_admin_get_feat_resp get_resp;
2341 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2342 rss->hash_key;
2343 int rc;
2344
2345 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2346 ENA_ADMIN_RSS_HASH_FUNCTION,
2347 rss->hash_key_dma_addr,
2348 sizeof(*rss->hash_key), 0);
2349 if (unlikely(rc))
2350 return rc;
2351
2352 rss->hash_func = get_resp.u.flow_hash_func.selected_func;
2353 if (func)
2354 *func = rss->hash_func;
2355
2356 if (key)
2357 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2358
2359 return 0;
2360}
2361
2362int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2363 enum ena_admin_flow_hash_proto proto,
2364 u16 *fields)
2365{
2366 struct ena_rss *rss = &ena_dev->rss;
2367 struct ena_admin_get_feat_resp get_resp;
2368 int rc;
2369
2370 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2371 ENA_ADMIN_RSS_HASH_INPUT,
2372 rss->hash_ctrl_dma_addr,
2373 sizeof(*rss->hash_ctrl), 0);
2374 if (unlikely(rc))
2375 return rc;
2376
2377 if (fields)
2378 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2379
2380 return 0;
2381}
2382
2383int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2384{
2385 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2386 struct ena_rss *rss = &ena_dev->rss;
2387 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2388 struct ena_admin_set_feat_cmd cmd;
2389 struct ena_admin_set_feat_resp resp;
2390 int ret;
2391
2392 if (!ena_com_check_supported_feature_id(ena_dev,
2393 ENA_ADMIN_RSS_HASH_INPUT)) {
2394 pr_debug("Feature %d isn't supported\n",
2395 ENA_ADMIN_RSS_HASH_INPUT);
2396 return -EOPNOTSUPP;
2397 }
2398
2399 memset(&cmd, 0x0, sizeof(cmd));
2400
2401 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2402 cmd.aq_common_descriptor.flags =
2403 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2404 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2405 cmd.u.flow_hash_input.enabled_input_sort =
2406 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2407 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2408
2409 ret = ena_com_mem_addr_set(ena_dev,
2410 &cmd.control_buffer.address,
2411 rss->hash_ctrl_dma_addr);
2412 if (unlikely(ret)) {
2413 pr_err("memory address set failed\n");
2414 return ret;
2415 }
2416 cmd.control_buffer.length = sizeof(*hash_ctrl);
2417
2418 ret = ena_com_execute_admin_command(admin_queue,
2419 (struct ena_admin_aq_entry *)&cmd,
2420 sizeof(cmd),
2421 (struct ena_admin_acq_entry *)&resp,
2422 sizeof(resp));
2423 if (unlikely(ret))
2424 pr_err("Failed to set hash input. error: %d\n", ret);
2425
2426 return ret;
2427}
2428
2429int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2430{
2431 struct ena_rss *rss = &ena_dev->rss;
2432 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2433 rss->hash_ctrl;
2434 u16 available_fields = 0;
2435 int rc, i;
2436
2437
2438 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2439 if (unlikely(rc))
2440 return rc;
2441
2442 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2443 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2444 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2445
2446 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2447 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2448 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2449
2450 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2451 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2452 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2453
2454 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2455 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2456 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2457
2458 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2459 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2460
2461 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2462 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2463
2464 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2465 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2466
2467 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2468 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2469
2470 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2471 available_fields = hash_ctrl->selected_fields[i].fields &
2472 hash_ctrl->supported_fields[i].fields;
2473 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2474 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2475 i, hash_ctrl->supported_fields[i].fields,
2476 hash_ctrl->selected_fields[i].fields);
2477 return -EOPNOTSUPP;
2478 }
2479 }
2480
2481 rc = ena_com_set_hash_ctrl(ena_dev);
2482
2483
2484 if (unlikely(rc))
2485 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2486
2487 return rc;
2488}
2489
2490int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2491 enum ena_admin_flow_hash_proto proto,
2492 u16 hash_fields)
2493{
2494 struct ena_rss *rss = &ena_dev->rss;
2495 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2496 u16 supported_fields;
2497 int rc;
2498
2499 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2500 pr_err("Invalid proto num (%u)\n", proto);
2501 return -EINVAL;
2502 }
2503
2504
2505 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2506 if (unlikely(rc))
2507 return rc;
2508
2509
2510 supported_fields = hash_ctrl->supported_fields[proto].fields;
2511 if ((hash_fields & supported_fields) != hash_fields) {
2512 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2513 proto, hash_fields, supported_fields);
2514 }
2515
2516 hash_ctrl->selected_fields[proto].fields = hash_fields;
2517
2518 rc = ena_com_set_hash_ctrl(ena_dev);
2519
2520
2521 if (unlikely(rc))
2522 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2523
2524 return 0;
2525}
2526
2527int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2528 u16 entry_idx, u16 entry_value)
2529{
2530 struct ena_rss *rss = &ena_dev->rss;
2531
2532 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2533 return -EINVAL;
2534
2535 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2536 return -EINVAL;
2537
2538 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2539
2540 return 0;
2541}
2542
2543int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2544{
2545 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2546 struct ena_rss *rss = &ena_dev->rss;
2547 struct ena_admin_set_feat_cmd cmd;
2548 struct ena_admin_set_feat_resp resp;
2549 int ret;
2550
2551 if (!ena_com_check_supported_feature_id(
2552 ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2553 pr_debug("Feature %d isn't supported\n",
2554 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2555 return -EOPNOTSUPP;
2556 }
2557
2558 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2559 if (ret) {
2560 pr_err("Failed to convert host indirection table to device table\n");
2561 return ret;
2562 }
2563
2564 memset(&cmd, 0x0, sizeof(cmd));
2565
2566 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2567 cmd.aq_common_descriptor.flags =
2568 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2569 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2570 cmd.u.ind_table.size = rss->tbl_log_size;
2571 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2572
2573 ret = ena_com_mem_addr_set(ena_dev,
2574 &cmd.control_buffer.address,
2575 rss->rss_ind_tbl_dma_addr);
2576 if (unlikely(ret)) {
2577 pr_err("memory address set failed\n");
2578 return ret;
2579 }
2580
2581 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2582 sizeof(struct ena_admin_rss_ind_table_entry);
2583
2584 ret = ena_com_execute_admin_command(admin_queue,
2585 (struct ena_admin_aq_entry *)&cmd,
2586 sizeof(cmd),
2587 (struct ena_admin_acq_entry *)&resp,
2588 sizeof(resp));
2589
2590 if (unlikely(ret))
2591 pr_err("Failed to set indirect table. error: %d\n", ret);
2592
2593 return ret;
2594}
2595
2596int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2597{
2598 struct ena_rss *rss = &ena_dev->rss;
2599 struct ena_admin_get_feat_resp get_resp;
2600 u32 tbl_size;
2601 int i, rc;
2602
2603 tbl_size = (1ULL << rss->tbl_log_size) *
2604 sizeof(struct ena_admin_rss_ind_table_entry);
2605
2606 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2607 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2608 rss->rss_ind_tbl_dma_addr,
2609 tbl_size, 0);
2610 if (unlikely(rc))
2611 return rc;
2612
2613 if (!ind_tbl)
2614 return 0;
2615
2616 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2617 if (unlikely(rc))
2618 return rc;
2619
2620 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2621 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2622
2623 return 0;
2624}
2625
2626int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2627{
2628 int rc;
2629
2630 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2631
2632 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2633 if (unlikely(rc))
2634 goto err_indr_tbl;
2635
2636 rc = ena_com_hash_key_allocate(ena_dev);
2637 if (unlikely(rc))
2638 goto err_hash_key;
2639
2640 rc = ena_com_hash_ctrl_init(ena_dev);
2641 if (unlikely(rc))
2642 goto err_hash_ctrl;
2643
2644 return 0;
2645
2646err_hash_ctrl:
2647 ena_com_hash_key_destroy(ena_dev);
2648err_hash_key:
2649 ena_com_indirect_table_destroy(ena_dev);
2650err_indr_tbl:
2651
2652 return rc;
2653}
2654
2655void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2656{
2657 ena_com_indirect_table_destroy(ena_dev);
2658 ena_com_hash_key_destroy(ena_dev);
2659 ena_com_hash_ctrl_destroy(ena_dev);
2660
2661 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2662}
2663
2664int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2665{
2666 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2667
2668 host_attr->host_info =
2669 dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2670 &host_attr->host_info_dma_addr, GFP_KERNEL);
2671 if (unlikely(!host_attr->host_info))
2672 return -ENOMEM;
2673
2674 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2675 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2676 (ENA_COMMON_SPEC_VERSION_MINOR));
2677
2678 return 0;
2679}
2680
2681int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2682 u32 debug_area_size)
2683{
2684 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2685
2686 host_attr->debug_area_virt_addr =
2687 dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2688 &host_attr->debug_area_dma_addr,
2689 GFP_KERNEL);
2690 if (unlikely(!host_attr->debug_area_virt_addr)) {
2691 host_attr->debug_area_size = 0;
2692 return -ENOMEM;
2693 }
2694
2695 host_attr->debug_area_size = debug_area_size;
2696
2697 return 0;
2698}
2699
2700void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2701{
2702 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2703
2704 if (host_attr->host_info) {
2705 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2706 host_attr->host_info_dma_addr);
2707 host_attr->host_info = NULL;
2708 }
2709}
2710
2711void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2712{
2713 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2714
2715 if (host_attr->debug_area_virt_addr) {
2716 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2717 host_attr->debug_area_virt_addr,
2718 host_attr->debug_area_dma_addr);
2719 host_attr->debug_area_virt_addr = NULL;
2720 }
2721}
2722
2723int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2724{
2725 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2726 struct ena_com_admin_queue *admin_queue;
2727 struct ena_admin_set_feat_cmd cmd;
2728 struct ena_admin_set_feat_resp resp;
2729
2730 int ret;
2731
2732
2733
2734
2735
2736 memset(&cmd, 0x0, sizeof(cmd));
2737 admin_queue = &ena_dev->admin_queue;
2738
2739 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2740 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2741
2742 ret = ena_com_mem_addr_set(ena_dev,
2743 &cmd.u.host_attr.debug_ba,
2744 host_attr->debug_area_dma_addr);
2745 if (unlikely(ret)) {
2746 pr_err("memory address set failed\n");
2747 return ret;
2748 }
2749
2750 ret = ena_com_mem_addr_set(ena_dev,
2751 &cmd.u.host_attr.os_info_ba,
2752 host_attr->host_info_dma_addr);
2753 if (unlikely(ret)) {
2754 pr_err("memory address set failed\n");
2755 return ret;
2756 }
2757
2758 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2759
2760 ret = ena_com_execute_admin_command(admin_queue,
2761 (struct ena_admin_aq_entry *)&cmd,
2762 sizeof(cmd),
2763 (struct ena_admin_acq_entry *)&resp,
2764 sizeof(resp));
2765
2766 if (unlikely(ret))
2767 pr_err("Failed to set host attributes: %d\n", ret);
2768
2769 return ret;
2770}
2771
2772
2773bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2774{
2775 return ena_com_check_supported_feature_id(ena_dev,
2776 ENA_ADMIN_INTERRUPT_MODERATION);
2777}
2778
2779int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2780 u32 tx_coalesce_usecs)
2781{
2782 if (!ena_dev->intr_delay_resolution) {
2783 pr_err("Illegal interrupt delay granularity value\n");
2784 return -EFAULT;
2785 }
2786
2787 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2788 ena_dev->intr_delay_resolution;
2789
2790 return 0;
2791}
2792
2793int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2794 u32 rx_coalesce_usecs)
2795{
2796 if (!ena_dev->intr_delay_resolution) {
2797 pr_err("Illegal interrupt delay granularity value\n");
2798 return -EFAULT;
2799 }
2800
2801
2802
2803
2804 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2805 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2806
2807 return 0;
2808}
2809
2810void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2811{
2812 if (ena_dev->intr_moder_tbl)
2813 devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2814 ena_dev->intr_moder_tbl = NULL;
2815}
2816
2817int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2818{
2819 struct ena_admin_get_feat_resp get_resp;
2820 u16 delay_resolution;
2821 int rc;
2822
2823 rc = ena_com_get_feature(ena_dev, &get_resp,
2824 ENA_ADMIN_INTERRUPT_MODERATION, 0);
2825
2826 if (rc) {
2827 if (rc == -EOPNOTSUPP) {
2828 pr_debug("Feature %d isn't supported\n",
2829 ENA_ADMIN_INTERRUPT_MODERATION);
2830 rc = 0;
2831 } else {
2832 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2833 rc);
2834 }
2835
2836
2837 ena_com_disable_adaptive_moderation(ena_dev);
2838 return rc;
2839 }
2840
2841 rc = ena_com_init_interrupt_moderation_table(ena_dev);
2842 if (rc)
2843 goto err;
2844
2845
2846 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2847 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2848
2849
2850
2851
2852 ena_com_disable_adaptive_moderation(ena_dev);
2853
2854 return 0;
2855err:
2856 ena_com_destroy_interrupt_moderation(ena_dev);
2857 return rc;
2858}
2859
2860void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2861{
2862 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2863
2864 if (!intr_moder_tbl)
2865 return;
2866
2867 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2868 ENA_INTR_LOWEST_USECS;
2869 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2870 ENA_INTR_LOWEST_PKTS;
2871 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2872 ENA_INTR_LOWEST_BYTES;
2873
2874 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2875 ENA_INTR_LOW_USECS;
2876 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2877 ENA_INTR_LOW_PKTS;
2878 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2879 ENA_INTR_LOW_BYTES;
2880
2881 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2882 ENA_INTR_MID_USECS;
2883 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2884 ENA_INTR_MID_PKTS;
2885 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2886 ENA_INTR_MID_BYTES;
2887
2888 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2889 ENA_INTR_HIGH_USECS;
2890 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2891 ENA_INTR_HIGH_PKTS;
2892 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
2893 ENA_INTR_HIGH_BYTES;
2894
2895 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
2896 ENA_INTR_HIGHEST_USECS;
2897 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
2898 ENA_INTR_HIGHEST_PKTS;
2899 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
2900 ENA_INTR_HIGHEST_BYTES;
2901}
2902
2903unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2904{
2905 return ena_dev->intr_moder_tx_interval;
2906}
2907
2908unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2909{
2910 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2911
2912 if (intr_moder_tbl)
2913 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
2914
2915 return 0;
2916}
2917
2918void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2919 enum ena_intr_moder_level level,
2920 struct ena_intr_moder_entry *entry)
2921{
2922 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2923
2924 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2925 return;
2926
2927 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
2928 if (ena_dev->intr_delay_resolution)
2929 intr_moder_tbl[level].intr_moder_interval /=
2930 ena_dev->intr_delay_resolution;
2931 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
2932
2933
2934 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
2935 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
2936}
2937
2938void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
2939 enum ena_intr_moder_level level,
2940 struct ena_intr_moder_entry *entry)
2941{
2942 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2943
2944 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2945 return;
2946
2947 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
2948 if (ena_dev->intr_delay_resolution)
2949 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
2950 entry->pkts_per_interval =
2951 intr_moder_tbl[level].pkts_per_interval;
2952 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
2953}
2954
2955int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2956 struct ena_admin_feature_llq_desc *llq_features,
2957 struct ena_llq_configurations *llq_default_cfg)
2958{
2959 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
2960 int rc;
2961
2962 if (!llq_features->max_llq_num) {
2963 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2964 return 0;
2965 }
2966
2967 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2968 if (rc)
2969 return rc;
2970
2971 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
2972 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
2973
2974 if (unlikely(ena_dev->tx_max_header_size == 0)) {
2975 pr_err("the size of the LLQ entry is smaller than needed\n");
2976 return -EINVAL;
2977 }
2978
2979 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2980
2981 return 0;
2982}
2983