1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include "ena_com.h"
34
35
36
37
38
39#define ADMIN_CMD_TIMEOUT_US (3000000)
40
41#define ENA_ASYNC_QUEUE_DEPTH 16
42#define ENA_ADMIN_QUEUE_DEPTH 32
43
44
45#define ENA_CTRL_MAJOR 0
46#define ENA_CTRL_MINOR 0
47#define ENA_CTRL_SUB_MINOR 1
48
49#define MIN_ENA_CTRL_VER \
50 (((ENA_CTRL_MAJOR) << \
51 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
52 ((ENA_CTRL_MINOR) << \
53 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
54 (ENA_CTRL_SUB_MINOR))
55
56#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
57#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
58
59#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
60
61#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
62
63#define ENA_REGS_ADMIN_INTR_MASK 1
64
65#define ENA_POLL_MS 5
66
67
68
69
70
71enum ena_cmd_status {
72 ENA_CMD_SUBMITTED,
73 ENA_CMD_COMPLETED,
74
75 ENA_CMD_ABORTED,
76};
77
78struct ena_comp_ctx {
79 struct completion wait_event;
80 struct ena_admin_acq_entry *user_cqe;
81 u32 comp_size;
82 enum ena_cmd_status status;
83
84 u8 comp_status;
85 u8 cmd_opcode;
86 bool occupied;
87};
88
89struct ena_com_stats_ctx {
90 struct ena_admin_aq_get_stats_cmd get_cmd;
91 struct ena_admin_acq_get_stats_resp get_resp;
92};
93
94static inline int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
95 struct ena_common_mem_addr *ena_addr,
96 dma_addr_t addr)
97{
98 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
99 pr_err("dma address has more bits that the device supports\n");
100 return -EINVAL;
101 }
102
103 ena_addr->mem_addr_low = lower_32_bits(addr);
104 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
105
106 return 0;
107}
108
109static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
110{
111 struct ena_com_admin_sq *sq = &queue->sq;
112 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
113
114 sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
115 GFP_KERNEL);
116
117 if (!sq->entries) {
118 pr_err("memory allocation failed");
119 return -ENOMEM;
120 }
121
122 sq->head = 0;
123 sq->tail = 0;
124 sq->phase = 1;
125
126 sq->db_addr = NULL;
127
128 return 0;
129}
130
131static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
132{
133 struct ena_com_admin_cq *cq = &queue->cq;
134 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
135
136 cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
137 GFP_KERNEL);
138
139 if (!cq->entries) {
140 pr_err("memory allocation failed");
141 return -ENOMEM;
142 }
143
144 cq->head = 0;
145 cq->phase = 1;
146
147 return 0;
148}
149
150static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
151 struct ena_aenq_handlers *aenq_handlers)
152{
153 struct ena_com_aenq *aenq = &dev->aenq;
154 u32 addr_low, addr_high, aenq_caps;
155 u16 size;
156
157 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
158 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
159 aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
160 GFP_KERNEL);
161
162 if (!aenq->entries) {
163 pr_err("memory allocation failed");
164 return -ENOMEM;
165 }
166
167 aenq->head = aenq->q_depth;
168 aenq->phase = 1;
169
170 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
171 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
172
173 writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
174 writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
175
176 aenq_caps = 0;
177 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
178 aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
179 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
180 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
181 writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
182
183 if (unlikely(!aenq_handlers)) {
184 pr_err("aenq handlers pointer is NULL\n");
185 return -EINVAL;
186 }
187
188 aenq->aenq_handlers = aenq_handlers;
189
190 return 0;
191}
192
193static inline void comp_ctxt_release(struct ena_com_admin_queue *queue,
194 struct ena_comp_ctx *comp_ctx)
195{
196 comp_ctx->occupied = false;
197 atomic_dec(&queue->outstanding_cmds);
198}
199
200static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
201 u16 command_id, bool capture)
202{
203 if (unlikely(command_id >= queue->q_depth)) {
204 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
205 command_id, queue->q_depth);
206 return NULL;
207 }
208
209 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
210 pr_err("Completion context is occupied\n");
211 return NULL;
212 }
213
214 if (capture) {
215 atomic_inc(&queue->outstanding_cmds);
216 queue->comp_ctx[command_id].occupied = true;
217 }
218
219 return &queue->comp_ctx[command_id];
220}
221
222static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
223 struct ena_admin_aq_entry *cmd,
224 size_t cmd_size_in_bytes,
225 struct ena_admin_acq_entry *comp,
226 size_t comp_size_in_bytes)
227{
228 struct ena_comp_ctx *comp_ctx;
229 u16 tail_masked, cmd_id;
230 u16 queue_size_mask;
231 u16 cnt;
232
233 queue_size_mask = admin_queue->q_depth - 1;
234
235 tail_masked = admin_queue->sq.tail & queue_size_mask;
236
237
238 cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
239 if (cnt >= admin_queue->q_depth) {
240 pr_debug("admin queue is full.\n");
241 admin_queue->stats.out_of_space++;
242 return ERR_PTR(-ENOSPC);
243 }
244
245 cmd_id = admin_queue->curr_cmd_id;
246
247 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
248 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
249
250 cmd->aq_common_descriptor.command_id |= cmd_id &
251 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
252
253 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
254 if (unlikely(!comp_ctx))
255 return ERR_PTR(-EINVAL);
256
257 comp_ctx->status = ENA_CMD_SUBMITTED;
258 comp_ctx->comp_size = (u32)comp_size_in_bytes;
259 comp_ctx->user_cqe = comp;
260 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
261
262 reinit_completion(&comp_ctx->wait_event);
263
264 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
265
266 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
267 queue_size_mask;
268
269 admin_queue->sq.tail++;
270 admin_queue->stats.submitted_cmd++;
271
272 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
273 admin_queue->sq.phase = !admin_queue->sq.phase;
274
275 writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
276
277 return comp_ctx;
278}
279
280static inline int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
281{
282 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
283 struct ena_comp_ctx *comp_ctx;
284 u16 i;
285
286 queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
287 if (unlikely(!queue->comp_ctx)) {
288 pr_err("memory allocation failed");
289 return -ENOMEM;
290 }
291
292 for (i = 0; i < queue->q_depth; i++) {
293 comp_ctx = get_comp_ctxt(queue, i, false);
294 if (comp_ctx)
295 init_completion(&comp_ctx->wait_event);
296 }
297
298 return 0;
299}
300
301static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
302 struct ena_admin_aq_entry *cmd,
303 size_t cmd_size_in_bytes,
304 struct ena_admin_acq_entry *comp,
305 size_t comp_size_in_bytes)
306{
307 unsigned long flags = 0;
308 struct ena_comp_ctx *comp_ctx;
309
310 spin_lock_irqsave(&admin_queue->q_lock, flags);
311 if (unlikely(!admin_queue->running_state)) {
312 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
313 return ERR_PTR(-ENODEV);
314 }
315 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
316 cmd_size_in_bytes,
317 comp,
318 comp_size_in_bytes);
319 if (IS_ERR(comp_ctx))
320 admin_queue->running_state = false;
321 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
322
323 return comp_ctx;
324}
325
326static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
327 struct ena_com_create_io_ctx *ctx,
328 struct ena_com_io_sq *io_sq)
329{
330 size_t size;
331 int dev_node = 0;
332
333 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
334
335 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
336 io_sq->desc_entry_size =
337 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
338 sizeof(struct ena_eth_io_tx_desc) :
339 sizeof(struct ena_eth_io_rx_desc);
340
341 size = io_sq->desc_entry_size * io_sq->q_depth;
342
343 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
344 dev_node = dev_to_node(ena_dev->dmadev);
345 set_dev_node(ena_dev->dmadev, ctx->numa_node);
346 io_sq->desc_addr.virt_addr =
347 dma_alloc_coherent(ena_dev->dmadev, size,
348 &io_sq->desc_addr.phys_addr,
349 GFP_KERNEL);
350 set_dev_node(ena_dev->dmadev, dev_node);
351 if (!io_sq->desc_addr.virt_addr) {
352 io_sq->desc_addr.virt_addr =
353 dma_alloc_coherent(ena_dev->dmadev, size,
354 &io_sq->desc_addr.phys_addr,
355 GFP_KERNEL);
356 }
357
358 if (!io_sq->desc_addr.virt_addr) {
359 pr_err("memory allocation failed");
360 return -ENOMEM;
361 }
362 }
363
364 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
365
366 io_sq->bounce_buf_ctrl.buffer_size =
367 ena_dev->llq_info.desc_list_entry_size;
368 io_sq->bounce_buf_ctrl.buffers_num =
369 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
370 io_sq->bounce_buf_ctrl.next_to_use = 0;
371
372 size = io_sq->bounce_buf_ctrl.buffer_size *
373 io_sq->bounce_buf_ctrl.buffers_num;
374
375 dev_node = dev_to_node(ena_dev->dmadev);
376 set_dev_node(ena_dev->dmadev, ctx->numa_node);
377 io_sq->bounce_buf_ctrl.base_buffer =
378 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
379 set_dev_node(ena_dev->dmadev, dev_node);
380 if (!io_sq->bounce_buf_ctrl.base_buffer)
381 io_sq->bounce_buf_ctrl.base_buffer =
382 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
383
384 if (!io_sq->bounce_buf_ctrl.base_buffer) {
385 pr_err("bounce buffer memory allocation failed");
386 return -ENOMEM;
387 }
388
389 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
390 sizeof(io_sq->llq_info));
391
392
393 io_sq->llq_buf_ctrl.curr_bounce_buf =
394 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
395 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
396 0x0, io_sq->llq_info.desc_list_entry_size);
397 io_sq->llq_buf_ctrl.descs_left_in_line =
398 io_sq->llq_info.descs_num_before_header;
399 }
400
401 io_sq->tail = 0;
402 io_sq->next_to_comp = 0;
403 io_sq->phase = 1;
404
405 return 0;
406}
407
408static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
409 struct ena_com_create_io_ctx *ctx,
410 struct ena_com_io_cq *io_cq)
411{
412 size_t size;
413 int prev_node = 0;
414
415 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
416
417
418 io_cq->cdesc_entry_size_in_bytes =
419 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
420 sizeof(struct ena_eth_io_tx_cdesc) :
421 sizeof(struct ena_eth_io_rx_cdesc_base);
422
423 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
424
425 prev_node = dev_to_node(ena_dev->dmadev);
426 set_dev_node(ena_dev->dmadev, ctx->numa_node);
427 io_cq->cdesc_addr.virt_addr =
428 dma_alloc_coherent(ena_dev->dmadev, size,
429 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
430 set_dev_node(ena_dev->dmadev, prev_node);
431 if (!io_cq->cdesc_addr.virt_addr) {
432 io_cq->cdesc_addr.virt_addr =
433 dma_alloc_coherent(ena_dev->dmadev, size,
434 &io_cq->cdesc_addr.phys_addr,
435 GFP_KERNEL);
436 }
437
438 if (!io_cq->cdesc_addr.virt_addr) {
439 pr_err("memory allocation failed");
440 return -ENOMEM;
441 }
442
443 io_cq->phase = 1;
444 io_cq->head = 0;
445
446 return 0;
447}
448
449static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
450 struct ena_admin_acq_entry *cqe)
451{
452 struct ena_comp_ctx *comp_ctx;
453 u16 cmd_id;
454
455 cmd_id = cqe->acq_common_descriptor.command &
456 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
457
458 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
459 if (unlikely(!comp_ctx)) {
460 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
461 admin_queue->running_state = false;
462 return;
463 }
464
465 comp_ctx->status = ENA_CMD_COMPLETED;
466 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
467
468 if (comp_ctx->user_cqe)
469 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
470
471 if (!admin_queue->polling)
472 complete(&comp_ctx->wait_event);
473}
474
475static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
476{
477 struct ena_admin_acq_entry *cqe = NULL;
478 u16 comp_num = 0;
479 u16 head_masked;
480 u8 phase;
481
482 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
483 phase = admin_queue->cq.phase;
484
485 cqe = &admin_queue->cq.entries[head_masked];
486
487
488 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
489 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
490
491
492
493 dma_rmb();
494 ena_com_handle_single_admin_completion(admin_queue, cqe);
495
496 head_masked++;
497 comp_num++;
498 if (unlikely(head_masked == admin_queue->q_depth)) {
499 head_masked = 0;
500 phase = !phase;
501 }
502
503 cqe = &admin_queue->cq.entries[head_masked];
504 }
505
506 admin_queue->cq.head += comp_num;
507 admin_queue->cq.phase = phase;
508 admin_queue->sq.head += comp_num;
509 admin_queue->stats.completed_cmd += comp_num;
510}
511
512static int ena_com_comp_status_to_errno(u8 comp_status)
513{
514 if (unlikely(comp_status != 0))
515 pr_err("admin command failed[%u]\n", comp_status);
516
517 if (unlikely(comp_status > ENA_ADMIN_UNKNOWN_ERROR))
518 return -EINVAL;
519
520 switch (comp_status) {
521 case ENA_ADMIN_SUCCESS:
522 return 0;
523 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
524 return -ENOMEM;
525 case ENA_ADMIN_UNSUPPORTED_OPCODE:
526 return -EOPNOTSUPP;
527 case ENA_ADMIN_BAD_OPCODE:
528 case ENA_ADMIN_MALFORMED_REQUEST:
529 case ENA_ADMIN_ILLEGAL_PARAMETER:
530 case ENA_ADMIN_UNKNOWN_ERROR:
531 return -EINVAL;
532 }
533
534 return 0;
535}
536
537static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
538 struct ena_com_admin_queue *admin_queue)
539{
540 unsigned long flags = 0;
541 unsigned long timeout;
542 int ret;
543
544 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
545
546 while (1) {
547 spin_lock_irqsave(&admin_queue->q_lock, flags);
548 ena_com_handle_admin_completion(admin_queue);
549 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
550
551 if (comp_ctx->status != ENA_CMD_SUBMITTED)
552 break;
553
554 if (time_is_before_jiffies(timeout)) {
555 pr_err("Wait for completion (polling) timeout\n");
556
557 spin_lock_irqsave(&admin_queue->q_lock, flags);
558 admin_queue->stats.no_completion++;
559 admin_queue->running_state = false;
560 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
561
562 ret = -ETIME;
563 goto err;
564 }
565
566 msleep(ENA_POLL_MS);
567 }
568
569 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
570 pr_err("Command was aborted\n");
571 spin_lock_irqsave(&admin_queue->q_lock, flags);
572 admin_queue->stats.aborted_cmd++;
573 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
574 ret = -ENODEV;
575 goto err;
576 }
577
578 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
579 comp_ctx->status);
580
581 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
582err:
583 comp_ctxt_release(admin_queue, comp_ctx);
584 return ret;
585}
586
587
588
589
590
591
592
593static int ena_com_set_llq(struct ena_com_dev *ena_dev)
594{
595 struct ena_com_admin_queue *admin_queue;
596 struct ena_admin_set_feat_cmd cmd;
597 struct ena_admin_set_feat_resp resp;
598 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
599 int ret;
600
601 memset(&cmd, 0x0, sizeof(cmd));
602 admin_queue = &ena_dev->admin_queue;
603
604 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
605 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
606
607 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
608 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
609 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
610 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
611
612 ret = ena_com_execute_admin_command(admin_queue,
613 (struct ena_admin_aq_entry *)&cmd,
614 sizeof(cmd),
615 (struct ena_admin_acq_entry *)&resp,
616 sizeof(resp));
617
618 if (unlikely(ret))
619 pr_err("Failed to set LLQ configurations: %d\n", ret);
620
621 return ret;
622}
623
624static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
625 struct ena_admin_feature_llq_desc *llq_features,
626 struct ena_llq_configurations *llq_default_cfg)
627{
628 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
629 u16 supported_feat;
630 int rc;
631
632 memset(llq_info, 0, sizeof(*llq_info));
633
634 supported_feat = llq_features->header_location_ctrl_supported;
635
636 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
637 llq_info->header_location_ctrl =
638 llq_default_cfg->llq_header_location;
639 } else {
640 pr_err("Invalid header location control, supported: 0x%x\n",
641 supported_feat);
642 return -EINVAL;
643 }
644
645 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
646 supported_feat = llq_features->descriptors_stride_ctrl_supported;
647 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
648 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
649 } else {
650 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
651 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
652 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
653 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
654 } else {
655 pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
656 supported_feat);
657 return -EINVAL;
658 }
659
660 pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
661 llq_default_cfg->llq_stride_ctrl, supported_feat,
662 llq_info->desc_stride_ctrl);
663 }
664 } else {
665 llq_info->desc_stride_ctrl = 0;
666 }
667
668 supported_feat = llq_features->entry_size_ctrl_supported;
669 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
670 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
671 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
672 } else {
673 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
674 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
675 llq_info->desc_list_entry_size = 128;
676 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
677 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
678 llq_info->desc_list_entry_size = 192;
679 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
680 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
681 llq_info->desc_list_entry_size = 256;
682 } else {
683 pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
684 supported_feat);
685 return -EINVAL;
686 }
687
688 pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
689 llq_default_cfg->llq_ring_entry_size, supported_feat,
690 llq_info->desc_list_entry_size);
691 }
692 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
693
694
695
696 pr_err("illegal entry size %d\n",
697 llq_info->desc_list_entry_size);
698 return -EINVAL;
699 }
700
701 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
702 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
703 sizeof(struct ena_eth_io_tx_desc);
704 else
705 llq_info->descs_per_entry = 1;
706
707 supported_feat = llq_features->desc_num_before_header_supported;
708 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
709 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
710 } else {
711 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
712 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
713 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
714 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
715 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
716 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
717 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
718 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
719 } else {
720 pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
721 supported_feat);
722 return -EINVAL;
723 }
724
725 pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
726 llq_default_cfg->llq_num_decs_before_header,
727 supported_feat, llq_info->descs_num_before_header);
728 }
729
730 rc = ena_com_set_llq(ena_dev);
731 if (rc)
732 pr_err("Cannot set LLQ configuration: %d\n", rc);
733
734 return 0;
735}
736
737static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
738 struct ena_com_admin_queue *admin_queue)
739{
740 unsigned long flags = 0;
741 int ret;
742
743 wait_for_completion_timeout(&comp_ctx->wait_event,
744 usecs_to_jiffies(
745 admin_queue->completion_timeout));
746
747
748
749
750
751
752 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
753 spin_lock_irqsave(&admin_queue->q_lock, flags);
754 ena_com_handle_admin_completion(admin_queue);
755 admin_queue->stats.no_completion++;
756 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
757
758 if (comp_ctx->status == ENA_CMD_COMPLETED)
759 pr_err("The ena device have completion but the driver didn't receive any MSI-X interrupt (cmd %d)\n",
760 comp_ctx->cmd_opcode);
761 else
762 pr_err("The ena device doesn't send any completion for the admin cmd %d status %d\n",
763 comp_ctx->cmd_opcode, comp_ctx->status);
764
765 admin_queue->running_state = false;
766 ret = -ETIME;
767 goto err;
768 }
769
770 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
771err:
772 comp_ctxt_release(admin_queue, comp_ctx);
773 return ret;
774}
775
776
777
778
779
780static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
781{
782 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
783 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
784 mmio_read->read_resp;
785 u32 mmio_read_reg, ret, i;
786 unsigned long flags = 0;
787 u32 timeout = mmio_read->reg_read_to;
788
789 might_sleep();
790
791 if (timeout == 0)
792 timeout = ENA_REG_READ_TIMEOUT;
793
794
795 if (!mmio_read->readless_supported)
796 return readl(ena_dev->reg_bar + offset);
797
798 spin_lock_irqsave(&mmio_read->lock, flags);
799 mmio_read->seq_num++;
800
801 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
802 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
803 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
804 mmio_read_reg |= mmio_read->seq_num &
805 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
806
807 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
808
809 for (i = 0; i < timeout; i++) {
810 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
811 break;
812
813 udelay(1);
814 }
815
816 if (unlikely(i == timeout)) {
817 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
818 mmio_read->seq_num, offset, read_resp->req_id,
819 read_resp->reg_off);
820 ret = ENA_MMIO_READ_TIMEOUT;
821 goto err;
822 }
823
824 if (read_resp->reg_off != offset) {
825 pr_err("Read failure: wrong offset provided");
826 ret = ENA_MMIO_READ_TIMEOUT;
827 } else {
828 ret = read_resp->reg_val;
829 }
830err:
831 spin_unlock_irqrestore(&mmio_read->lock, flags);
832
833 return ret;
834}
835
836
837
838
839
840
841
842
843static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
844 struct ena_com_admin_queue *admin_queue)
845{
846 if (admin_queue->polling)
847 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
848 admin_queue);
849
850 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
851 admin_queue);
852}
853
854static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
855 struct ena_com_io_sq *io_sq)
856{
857 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
858 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
859 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
860 u8 direction;
861 int ret;
862
863 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
864
865 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
866 direction = ENA_ADMIN_SQ_DIRECTION_TX;
867 else
868 direction = ENA_ADMIN_SQ_DIRECTION_RX;
869
870 destroy_cmd.sq.sq_identity |= (direction <<
871 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
872 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
873
874 destroy_cmd.sq.sq_idx = io_sq->idx;
875 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
876
877 ret = ena_com_execute_admin_command(admin_queue,
878 (struct ena_admin_aq_entry *)&destroy_cmd,
879 sizeof(destroy_cmd),
880 (struct ena_admin_acq_entry *)&destroy_resp,
881 sizeof(destroy_resp));
882
883 if (unlikely(ret && (ret != -ENODEV)))
884 pr_err("failed to destroy io sq error: %d\n", ret);
885
886 return ret;
887}
888
889static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
890 struct ena_com_io_sq *io_sq,
891 struct ena_com_io_cq *io_cq)
892{
893 size_t size;
894
895 if (io_cq->cdesc_addr.virt_addr) {
896 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
897
898 dma_free_coherent(ena_dev->dmadev, size,
899 io_cq->cdesc_addr.virt_addr,
900 io_cq->cdesc_addr.phys_addr);
901
902 io_cq->cdesc_addr.virt_addr = NULL;
903 }
904
905 if (io_sq->desc_addr.virt_addr) {
906 size = io_sq->desc_entry_size * io_sq->q_depth;
907
908 dma_free_coherent(ena_dev->dmadev, size,
909 io_sq->desc_addr.virt_addr,
910 io_sq->desc_addr.phys_addr);
911
912 io_sq->desc_addr.virt_addr = NULL;
913 }
914
915 if (io_sq->bounce_buf_ctrl.base_buffer) {
916 devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
917 io_sq->bounce_buf_ctrl.base_buffer = NULL;
918 }
919}
920
921static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
922 u16 exp_state)
923{
924 u32 val, i;
925
926
927 timeout = (timeout * 100) / ENA_POLL_MS;
928
929 for (i = 0; i < timeout; i++) {
930 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
931
932 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
933 pr_err("Reg read timeout occurred\n");
934 return -ETIME;
935 }
936
937 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
938 exp_state)
939 return 0;
940
941 msleep(ENA_POLL_MS);
942 }
943
944 return -ETIME;
945}
946
947static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
948 enum ena_admin_aq_feature_id feature_id)
949{
950 u32 feature_mask = 1 << feature_id;
951
952
953 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
954 !(ena_dev->supported_features & feature_mask))
955 return false;
956
957 return true;
958}
959
960static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
961 struct ena_admin_get_feat_resp *get_resp,
962 enum ena_admin_aq_feature_id feature_id,
963 dma_addr_t control_buf_dma_addr,
964 u32 control_buff_size)
965{
966 struct ena_com_admin_queue *admin_queue;
967 struct ena_admin_get_feat_cmd get_cmd;
968 int ret;
969
970 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
971 pr_debug("Feature %d isn't supported\n", feature_id);
972 return -EOPNOTSUPP;
973 }
974
975 memset(&get_cmd, 0x0, sizeof(get_cmd));
976 admin_queue = &ena_dev->admin_queue;
977
978 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
979
980 if (control_buff_size)
981 get_cmd.aq_common_descriptor.flags =
982 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
983 else
984 get_cmd.aq_common_descriptor.flags = 0;
985
986 ret = ena_com_mem_addr_set(ena_dev,
987 &get_cmd.control_buffer.address,
988 control_buf_dma_addr);
989 if (unlikely(ret)) {
990 pr_err("memory address set failed\n");
991 return ret;
992 }
993
994 get_cmd.control_buffer.length = control_buff_size;
995
996 get_cmd.feat_common.feature_id = feature_id;
997
998 ret = ena_com_execute_admin_command(admin_queue,
999 (struct ena_admin_aq_entry *)
1000 &get_cmd,
1001 sizeof(get_cmd),
1002 (struct ena_admin_acq_entry *)
1003 get_resp,
1004 sizeof(*get_resp));
1005
1006 if (unlikely(ret))
1007 pr_err("Failed to submit get_feature command %d error: %d\n",
1008 feature_id, ret);
1009
1010 return ret;
1011}
1012
1013static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1014 struct ena_admin_get_feat_resp *get_resp,
1015 enum ena_admin_aq_feature_id feature_id)
1016{
1017 return ena_com_get_feature_ex(ena_dev,
1018 get_resp,
1019 feature_id,
1020 0,
1021 0);
1022}
1023
1024static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1025{
1026 struct ena_rss *rss = &ena_dev->rss;
1027
1028 rss->hash_key =
1029 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1030 &rss->hash_key_dma_addr, GFP_KERNEL);
1031
1032 if (unlikely(!rss->hash_key))
1033 return -ENOMEM;
1034
1035 return 0;
1036}
1037
1038static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1039{
1040 struct ena_rss *rss = &ena_dev->rss;
1041
1042 if (rss->hash_key)
1043 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1044 rss->hash_key, rss->hash_key_dma_addr);
1045 rss->hash_key = NULL;
1046}
1047
1048static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1049{
1050 struct ena_rss *rss = &ena_dev->rss;
1051
1052 rss->hash_ctrl =
1053 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1054 &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1055
1056 if (unlikely(!rss->hash_ctrl))
1057 return -ENOMEM;
1058
1059 return 0;
1060}
1061
1062static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1063{
1064 struct ena_rss *rss = &ena_dev->rss;
1065
1066 if (rss->hash_ctrl)
1067 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1068 rss->hash_ctrl, rss->hash_ctrl_dma_addr);
1069 rss->hash_ctrl = NULL;
1070}
1071
1072static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1073 u16 log_size)
1074{
1075 struct ena_rss *rss = &ena_dev->rss;
1076 struct ena_admin_get_feat_resp get_resp;
1077 size_t tbl_size;
1078 int ret;
1079
1080 ret = ena_com_get_feature(ena_dev, &get_resp,
1081 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
1082 if (unlikely(ret))
1083 return ret;
1084
1085 if ((get_resp.u.ind_table.min_size > log_size) ||
1086 (get_resp.u.ind_table.max_size < log_size)) {
1087 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1088 1 << log_size, 1 << get_resp.u.ind_table.min_size,
1089 1 << get_resp.u.ind_table.max_size);
1090 return -EINVAL;
1091 }
1092
1093 tbl_size = (1ULL << log_size) *
1094 sizeof(struct ena_admin_rss_ind_table_entry);
1095
1096 rss->rss_ind_tbl =
1097 dma_alloc_coherent(ena_dev->dmadev, tbl_size,
1098 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
1099 if (unlikely(!rss->rss_ind_tbl))
1100 goto mem_err1;
1101
1102 tbl_size = (1ULL << log_size) * sizeof(u16);
1103 rss->host_rss_ind_tbl =
1104 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
1105 if (unlikely(!rss->host_rss_ind_tbl))
1106 goto mem_err2;
1107
1108 rss->tbl_log_size = log_size;
1109
1110 return 0;
1111
1112mem_err2:
1113 tbl_size = (1ULL << log_size) *
1114 sizeof(struct ena_admin_rss_ind_table_entry);
1115
1116 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1117 rss->rss_ind_tbl_dma_addr);
1118 rss->rss_ind_tbl = NULL;
1119mem_err1:
1120 rss->tbl_log_size = 0;
1121 return -ENOMEM;
1122}
1123
1124static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1125{
1126 struct ena_rss *rss = &ena_dev->rss;
1127 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1128 sizeof(struct ena_admin_rss_ind_table_entry);
1129
1130 if (rss->rss_ind_tbl)
1131 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1132 rss->rss_ind_tbl_dma_addr);
1133 rss->rss_ind_tbl = NULL;
1134
1135 if (rss->host_rss_ind_tbl)
1136 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
1137 rss->host_rss_ind_tbl = NULL;
1138}
1139
1140static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1141 struct ena_com_io_sq *io_sq, u16 cq_idx)
1142{
1143 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1144 struct ena_admin_aq_create_sq_cmd create_cmd;
1145 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1146 u8 direction;
1147 int ret;
1148
1149 memset(&create_cmd, 0x0, sizeof(create_cmd));
1150
1151 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1152
1153 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1154 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1155 else
1156 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1157
1158 create_cmd.sq_identity |= (direction <<
1159 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1160 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1161
1162 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1163 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1164
1165 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1166 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1167 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1168
1169 create_cmd.sq_caps_3 |=
1170 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1171
1172 create_cmd.cq_idx = cq_idx;
1173 create_cmd.sq_depth = io_sq->q_depth;
1174
1175 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1176 ret = ena_com_mem_addr_set(ena_dev,
1177 &create_cmd.sq_ba,
1178 io_sq->desc_addr.phys_addr);
1179 if (unlikely(ret)) {
1180 pr_err("memory address set failed\n");
1181 return ret;
1182 }
1183 }
1184
1185 ret = ena_com_execute_admin_command(admin_queue,
1186 (struct ena_admin_aq_entry *)&create_cmd,
1187 sizeof(create_cmd),
1188 (struct ena_admin_acq_entry *)&cmd_completion,
1189 sizeof(cmd_completion));
1190 if (unlikely(ret)) {
1191 pr_err("Failed to create IO SQ. error: %d\n", ret);
1192 return ret;
1193 }
1194
1195 io_sq->idx = cmd_completion.sq_idx;
1196
1197 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1198 (uintptr_t)cmd_completion.sq_doorbell_offset);
1199
1200 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1201 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1202 + cmd_completion.llq_headers_offset);
1203
1204 io_sq->desc_addr.pbuf_dev_addr =
1205 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1206 cmd_completion.llq_descriptors_offset);
1207 }
1208
1209 pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1210
1211 return ret;
1212}
1213
1214static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1215{
1216 struct ena_rss *rss = &ena_dev->rss;
1217 struct ena_com_io_sq *io_sq;
1218 u16 qid;
1219 int i;
1220
1221 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1222 qid = rss->host_rss_ind_tbl[i];
1223 if (qid >= ENA_TOTAL_NUM_QUEUES)
1224 return -EINVAL;
1225
1226 io_sq = &ena_dev->io_sq_queues[qid];
1227
1228 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1229 return -EINVAL;
1230
1231 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1232 }
1233
1234 return 0;
1235}
1236
1237static int ena_com_ind_tbl_convert_from_device(struct ena_com_dev *ena_dev)
1238{
1239 u16 dev_idx_to_host_tbl[ENA_TOTAL_NUM_QUEUES] = { (u16)-1 };
1240 struct ena_rss *rss = &ena_dev->rss;
1241 u8 idx;
1242 u16 i;
1243
1244 for (i = 0; i < ENA_TOTAL_NUM_QUEUES; i++)
1245 dev_idx_to_host_tbl[ena_dev->io_sq_queues[i].idx] = i;
1246
1247 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1248 if (rss->rss_ind_tbl[i].cq_idx > ENA_TOTAL_NUM_QUEUES)
1249 return -EINVAL;
1250 idx = (u8)rss->rss_ind_tbl[i].cq_idx;
1251
1252 if (dev_idx_to_host_tbl[idx] > ENA_TOTAL_NUM_QUEUES)
1253 return -EINVAL;
1254
1255 rss->host_rss_ind_tbl[i] = dev_idx_to_host_tbl[idx];
1256 }
1257
1258 return 0;
1259}
1260
1261static int ena_com_init_interrupt_moderation_table(struct ena_com_dev *ena_dev)
1262{
1263 size_t size;
1264
1265 size = sizeof(struct ena_intr_moder_entry) * ENA_INTR_MAX_NUM_OF_LEVELS;
1266
1267 ena_dev->intr_moder_tbl =
1268 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
1269 if (!ena_dev->intr_moder_tbl)
1270 return -ENOMEM;
1271
1272 ena_com_config_default_interrupt_moderation_table(ena_dev);
1273
1274 return 0;
1275}
1276
1277static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1278 u16 intr_delay_resolution)
1279{
1280 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
1281 unsigned int i;
1282
1283 if (!intr_delay_resolution) {
1284 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1285 intr_delay_resolution = 1;
1286 }
1287 ena_dev->intr_delay_resolution = intr_delay_resolution;
1288
1289
1290 for (i = 0; i < ENA_INTR_MAX_NUM_OF_LEVELS; i++)
1291 intr_moder_tbl[i].intr_moder_interval /= intr_delay_resolution;
1292
1293
1294 ena_dev->intr_moder_tx_interval /= intr_delay_resolution;
1295}
1296
1297
1298
1299
1300
1301int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1302 struct ena_admin_aq_entry *cmd,
1303 size_t cmd_size,
1304 struct ena_admin_acq_entry *comp,
1305 size_t comp_size)
1306{
1307 struct ena_comp_ctx *comp_ctx;
1308 int ret;
1309
1310 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1311 comp, comp_size);
1312 if (IS_ERR(comp_ctx)) {
1313 if (comp_ctx == ERR_PTR(-ENODEV))
1314 pr_debug("Failed to submit command [%ld]\n",
1315 PTR_ERR(comp_ctx));
1316 else
1317 pr_err("Failed to submit command [%ld]\n",
1318 PTR_ERR(comp_ctx));
1319
1320 return PTR_ERR(comp_ctx);
1321 }
1322
1323 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1324 if (unlikely(ret)) {
1325 if (admin_queue->running_state)
1326 pr_err("Failed to process command. ret = %d\n", ret);
1327 else
1328 pr_debug("Failed to process command. ret = %d\n", ret);
1329 }
1330 return ret;
1331}
1332
1333int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1334 struct ena_com_io_cq *io_cq)
1335{
1336 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1337 struct ena_admin_aq_create_cq_cmd create_cmd;
1338 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1339 int ret;
1340
1341 memset(&create_cmd, 0x0, sizeof(create_cmd));
1342
1343 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1344
1345 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1346 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1347 create_cmd.cq_caps_1 |=
1348 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1349
1350 create_cmd.msix_vector = io_cq->msix_vector;
1351 create_cmd.cq_depth = io_cq->q_depth;
1352
1353 ret = ena_com_mem_addr_set(ena_dev,
1354 &create_cmd.cq_ba,
1355 io_cq->cdesc_addr.phys_addr);
1356 if (unlikely(ret)) {
1357 pr_err("memory address set failed\n");
1358 return ret;
1359 }
1360
1361 ret = ena_com_execute_admin_command(admin_queue,
1362 (struct ena_admin_aq_entry *)&create_cmd,
1363 sizeof(create_cmd),
1364 (struct ena_admin_acq_entry *)&cmd_completion,
1365 sizeof(cmd_completion));
1366 if (unlikely(ret)) {
1367 pr_err("Failed to create IO CQ. error: %d\n", ret);
1368 return ret;
1369 }
1370
1371 io_cq->idx = cmd_completion.cq_idx;
1372
1373 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1374 cmd_completion.cq_interrupt_unmask_register_offset);
1375
1376 if (cmd_completion.cq_head_db_register_offset)
1377 io_cq->cq_head_db_reg =
1378 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1379 cmd_completion.cq_head_db_register_offset);
1380
1381 if (cmd_completion.numa_node_register_offset)
1382 io_cq->numa_node_cfg_reg =
1383 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1384 cmd_completion.numa_node_register_offset);
1385
1386 pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1387
1388 return ret;
1389}
1390
1391int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1392 struct ena_com_io_sq **io_sq,
1393 struct ena_com_io_cq **io_cq)
1394{
1395 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1396 pr_err("Invalid queue number %d but the max is %d\n", qid,
1397 ENA_TOTAL_NUM_QUEUES);
1398 return -EINVAL;
1399 }
1400
1401 *io_sq = &ena_dev->io_sq_queues[qid];
1402 *io_cq = &ena_dev->io_cq_queues[qid];
1403
1404 return 0;
1405}
1406
1407void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1408{
1409 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1410 struct ena_comp_ctx *comp_ctx;
1411 u16 i;
1412
1413 if (!admin_queue->comp_ctx)
1414 return;
1415
1416 for (i = 0; i < admin_queue->q_depth; i++) {
1417 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1418 if (unlikely(!comp_ctx))
1419 break;
1420
1421 comp_ctx->status = ENA_CMD_ABORTED;
1422
1423 complete(&comp_ctx->wait_event);
1424 }
1425}
1426
1427void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1428{
1429 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1430 unsigned long flags = 0;
1431
1432 spin_lock_irqsave(&admin_queue->q_lock, flags);
1433 while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1434 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1435 msleep(ENA_POLL_MS);
1436 spin_lock_irqsave(&admin_queue->q_lock, flags);
1437 }
1438 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1439}
1440
1441int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1442 struct ena_com_io_cq *io_cq)
1443{
1444 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1445 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1446 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1447 int ret;
1448
1449 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1450
1451 destroy_cmd.cq_idx = io_cq->idx;
1452 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1453
1454 ret = ena_com_execute_admin_command(admin_queue,
1455 (struct ena_admin_aq_entry *)&destroy_cmd,
1456 sizeof(destroy_cmd),
1457 (struct ena_admin_acq_entry *)&destroy_resp,
1458 sizeof(destroy_resp));
1459
1460 if (unlikely(ret && (ret != -ENODEV)))
1461 pr_err("Failed to destroy IO CQ. error: %d\n", ret);
1462
1463 return ret;
1464}
1465
1466bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1467{
1468 return ena_dev->admin_queue.running_state;
1469}
1470
1471void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1472{
1473 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1474 unsigned long flags = 0;
1475
1476 spin_lock_irqsave(&admin_queue->q_lock, flags);
1477 ena_dev->admin_queue.running_state = state;
1478 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1479}
1480
1481void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1482{
1483 u16 depth = ena_dev->aenq.q_depth;
1484
1485 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1486
1487
1488
1489
1490 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1491}
1492
1493int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1494{
1495 struct ena_com_admin_queue *admin_queue;
1496 struct ena_admin_set_feat_cmd cmd;
1497 struct ena_admin_set_feat_resp resp;
1498 struct ena_admin_get_feat_resp get_resp;
1499 int ret;
1500
1501 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG);
1502 if (ret) {
1503 pr_info("Can't get aenq configuration\n");
1504 return ret;
1505 }
1506
1507 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1508 pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1509 get_resp.u.aenq.supported_groups, groups_flag);
1510 return -EOPNOTSUPP;
1511 }
1512
1513 memset(&cmd, 0x0, sizeof(cmd));
1514 admin_queue = &ena_dev->admin_queue;
1515
1516 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1517 cmd.aq_common_descriptor.flags = 0;
1518 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1519 cmd.u.aenq.enabled_groups = groups_flag;
1520
1521 ret = ena_com_execute_admin_command(admin_queue,
1522 (struct ena_admin_aq_entry *)&cmd,
1523 sizeof(cmd),
1524 (struct ena_admin_acq_entry *)&resp,
1525 sizeof(resp));
1526
1527 if (unlikely(ret))
1528 pr_err("Failed to config AENQ ret: %d\n", ret);
1529
1530 return ret;
1531}
1532
1533int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1534{
1535 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1536 int width;
1537
1538 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1539 pr_err("Reg read timeout occurred\n");
1540 return -ETIME;
1541 }
1542
1543 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1544 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1545
1546 pr_debug("ENA dma width: %d\n", width);
1547
1548 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1549 pr_err("DMA width illegal value: %d\n", width);
1550 return -EINVAL;
1551 }
1552
1553 ena_dev->dma_addr_bits = width;
1554
1555 return width;
1556}
1557
1558int ena_com_validate_version(struct ena_com_dev *ena_dev)
1559{
1560 u32 ver;
1561 u32 ctrl_ver;
1562 u32 ctrl_ver_masked;
1563
1564
1565
1566
1567 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1568 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1569 ENA_REGS_CONTROLLER_VERSION_OFF);
1570
1571 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1572 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1573 pr_err("Reg read timeout occurred\n");
1574 return -ETIME;
1575 }
1576
1577 pr_info("ena device version: %d.%d\n",
1578 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1579 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1580 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1581
1582 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1583 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1584 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1585 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1586 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1587 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1588 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1589 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1590
1591 ctrl_ver_masked =
1592 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1593 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1594 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1595
1596
1597 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1598 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1599 return -1;
1600 }
1601
1602 return 0;
1603}
1604
1605void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1606{
1607 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1608 struct ena_com_admin_cq *cq = &admin_queue->cq;
1609 struct ena_com_admin_sq *sq = &admin_queue->sq;
1610 struct ena_com_aenq *aenq = &ena_dev->aenq;
1611 u16 size;
1612
1613 if (admin_queue->comp_ctx)
1614 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1615 admin_queue->comp_ctx = NULL;
1616 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1617 if (sq->entries)
1618 dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1619 sq->dma_addr);
1620 sq->entries = NULL;
1621
1622 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1623 if (cq->entries)
1624 dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1625 cq->dma_addr);
1626 cq->entries = NULL;
1627
1628 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1629 if (ena_dev->aenq.entries)
1630 dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1631 aenq->dma_addr);
1632 aenq->entries = NULL;
1633}
1634
1635void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1636{
1637 u32 mask_value = 0;
1638
1639 if (polling)
1640 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1641
1642 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1643 ena_dev->admin_queue.polling = polling;
1644}
1645
1646int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1647{
1648 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1649
1650 spin_lock_init(&mmio_read->lock);
1651 mmio_read->read_resp =
1652 dma_alloc_coherent(ena_dev->dmadev,
1653 sizeof(*mmio_read->read_resp),
1654 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1655 if (unlikely(!mmio_read->read_resp))
1656 goto err;
1657
1658 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1659
1660 mmio_read->read_resp->req_id = 0x0;
1661 mmio_read->seq_num = 0x0;
1662 mmio_read->readless_supported = true;
1663
1664 return 0;
1665
1666err:
1667
1668 return -ENOMEM;
1669}
1670
1671void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1672{
1673 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1674
1675 mmio_read->readless_supported = readless_supported;
1676}
1677
1678void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1679{
1680 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1681
1682 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1683 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1684
1685 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1686 mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1687
1688 mmio_read->read_resp = NULL;
1689}
1690
1691void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1692{
1693 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1694 u32 addr_low, addr_high;
1695
1696 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1697 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1698
1699 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1700 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1701}
1702
1703int ena_com_admin_init(struct ena_com_dev *ena_dev,
1704 struct ena_aenq_handlers *aenq_handlers)
1705{
1706 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1707 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1708 int ret;
1709
1710 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1711
1712 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1713 pr_err("Reg read timeout occurred\n");
1714 return -ETIME;
1715 }
1716
1717 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1718 pr_err("Device isn't ready, abort com init\n");
1719 return -ENODEV;
1720 }
1721
1722 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1723
1724 admin_queue->q_dmadev = ena_dev->dmadev;
1725 admin_queue->polling = false;
1726 admin_queue->curr_cmd_id = 0;
1727
1728 atomic_set(&admin_queue->outstanding_cmds, 0);
1729
1730 spin_lock_init(&admin_queue->q_lock);
1731
1732 ret = ena_com_init_comp_ctxt(admin_queue);
1733 if (ret)
1734 goto error;
1735
1736 ret = ena_com_admin_init_sq(admin_queue);
1737 if (ret)
1738 goto error;
1739
1740 ret = ena_com_admin_init_cq(admin_queue);
1741 if (ret)
1742 goto error;
1743
1744 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1745 ENA_REGS_AQ_DB_OFF);
1746
1747 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1748 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1749
1750 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1751 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1752
1753 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1754 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1755
1756 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1757 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1758
1759 aq_caps = 0;
1760 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1761 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1762 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1763 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1764
1765 acq_caps = 0;
1766 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1767 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1768 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1769 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1770
1771 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1772 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1773 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1774 if (ret)
1775 goto error;
1776
1777 admin_queue->running_state = true;
1778
1779 return 0;
1780error:
1781 ena_com_admin_destroy(ena_dev);
1782
1783 return ret;
1784}
1785
1786int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1787 struct ena_com_create_io_ctx *ctx)
1788{
1789 struct ena_com_io_sq *io_sq;
1790 struct ena_com_io_cq *io_cq;
1791 int ret;
1792
1793 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1794 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1795 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1796 return -EINVAL;
1797 }
1798
1799 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1800 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1801
1802 memset(io_sq, 0x0, sizeof(*io_sq));
1803 memset(io_cq, 0x0, sizeof(*io_cq));
1804
1805
1806 io_cq->q_depth = ctx->queue_size;
1807 io_cq->direction = ctx->direction;
1808 io_cq->qid = ctx->qid;
1809
1810 io_cq->msix_vector = ctx->msix_vector;
1811
1812 io_sq->q_depth = ctx->queue_size;
1813 io_sq->direction = ctx->direction;
1814 io_sq->qid = ctx->qid;
1815
1816 io_sq->mem_queue_type = ctx->mem_queue_type;
1817
1818 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1819
1820 io_sq->tx_max_header_size =
1821 min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1822
1823 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1824 if (ret)
1825 goto error;
1826 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1827 if (ret)
1828 goto error;
1829
1830 ret = ena_com_create_io_cq(ena_dev, io_cq);
1831 if (ret)
1832 goto error;
1833
1834 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1835 if (ret)
1836 goto destroy_io_cq;
1837
1838 return 0;
1839
1840destroy_io_cq:
1841 ena_com_destroy_io_cq(ena_dev, io_cq);
1842error:
1843 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1844 return ret;
1845}
1846
1847void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1848{
1849 struct ena_com_io_sq *io_sq;
1850 struct ena_com_io_cq *io_cq;
1851
1852 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1853 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
1854 ENA_TOTAL_NUM_QUEUES);
1855 return;
1856 }
1857
1858 io_sq = &ena_dev->io_sq_queues[qid];
1859 io_cq = &ena_dev->io_cq_queues[qid];
1860
1861 ena_com_destroy_io_sq(ena_dev, io_sq);
1862 ena_com_destroy_io_cq(ena_dev, io_cq);
1863
1864 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1865}
1866
1867int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1868 struct ena_admin_get_feat_resp *resp)
1869{
1870 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG);
1871}
1872
1873int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1874 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1875{
1876 struct ena_admin_get_feat_resp get_resp;
1877 int rc;
1878
1879 rc = ena_com_get_feature(ena_dev, &get_resp,
1880 ENA_ADMIN_DEVICE_ATTRIBUTES);
1881 if (rc)
1882 return rc;
1883
1884 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1885 sizeof(get_resp.u.dev_attr));
1886 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1887
1888 rc = ena_com_get_feature(ena_dev, &get_resp,
1889 ENA_ADMIN_MAX_QUEUES_NUM);
1890 if (rc)
1891 return rc;
1892
1893 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1894 sizeof(get_resp.u.max_queue));
1895 ena_dev->tx_max_header_size = get_resp.u.max_queue.max_header_size;
1896
1897 rc = ena_com_get_feature(ena_dev, &get_resp,
1898 ENA_ADMIN_AENQ_CONFIG);
1899 if (rc)
1900 return rc;
1901
1902 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1903 sizeof(get_resp.u.aenq));
1904
1905 rc = ena_com_get_feature(ena_dev, &get_resp,
1906 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
1907 if (rc)
1908 return rc;
1909
1910 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1911 sizeof(get_resp.u.offload));
1912
1913
1914
1915
1916 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS);
1917
1918 if (!rc)
1919 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1920 sizeof(get_resp.u.hw_hints));
1921 else if (rc == -EOPNOTSUPP)
1922 memset(&get_feat_ctx->hw_hints, 0x0,
1923 sizeof(get_feat_ctx->hw_hints));
1924 else
1925 return rc;
1926
1927 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ);
1928 if (!rc)
1929 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
1930 sizeof(get_resp.u.llq));
1931 else if (rc == -EOPNOTSUPP)
1932 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
1933 else
1934 return rc;
1935
1936 return 0;
1937}
1938
1939void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
1940{
1941 ena_com_handle_admin_completion(&ena_dev->admin_queue);
1942}
1943
1944
1945
1946
1947static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
1948 u16 group)
1949{
1950 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
1951
1952 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
1953 return aenq_handlers->handlers[group];
1954
1955 return aenq_handlers->unimplemented_handler;
1956}
1957
1958
1959
1960
1961
1962void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
1963{
1964 struct ena_admin_aenq_entry *aenq_e;
1965 struct ena_admin_aenq_common_desc *aenq_common;
1966 struct ena_com_aenq *aenq = &dev->aenq;
1967 unsigned long long timestamp;
1968 ena_aenq_handler handler_cb;
1969 u16 masked_head, processed = 0;
1970 u8 phase;
1971
1972 masked_head = aenq->head & (aenq->q_depth - 1);
1973 phase = aenq->phase;
1974 aenq_e = &aenq->entries[masked_head];
1975 aenq_common = &aenq_e->aenq_common_desc;
1976
1977
1978 while ((READ_ONCE(aenq_common->flags) &
1979 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
1980
1981
1982
1983 dma_rmb();
1984
1985 timestamp =
1986 (unsigned long long)aenq_common->timestamp_low |
1987 ((unsigned long long)aenq_common->timestamp_high << 32);
1988 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
1989 aenq_common->group, aenq_common->syndrom, timestamp);
1990
1991
1992 handler_cb = ena_com_get_specific_aenq_cb(dev,
1993 aenq_common->group);
1994 handler_cb(data, aenq_e);
1995
1996
1997 masked_head++;
1998 processed++;
1999
2000 if (unlikely(masked_head == aenq->q_depth)) {
2001 masked_head = 0;
2002 phase = !phase;
2003 }
2004 aenq_e = &aenq->entries[masked_head];
2005 aenq_common = &aenq_e->aenq_common_desc;
2006 }
2007
2008 aenq->head += processed;
2009 aenq->phase = phase;
2010
2011
2012 if (!processed)
2013 return;
2014
2015
2016 mb();
2017 writel_relaxed((u32)aenq->head,
2018 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2019 mmiowb();
2020}
2021
2022int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2023 enum ena_regs_reset_reason_types reset_reason)
2024{
2025 u32 stat, timeout, cap, reset_val;
2026 int rc;
2027
2028 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2029 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2030
2031 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2032 (cap == ENA_MMIO_READ_TIMEOUT))) {
2033 pr_err("Reg read32 timeout occurred\n");
2034 return -ETIME;
2035 }
2036
2037 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2038 pr_err("Device isn't ready, can't reset device\n");
2039 return -EINVAL;
2040 }
2041
2042 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2043 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2044 if (timeout == 0) {
2045 pr_err("Invalid timeout value\n");
2046 return -EINVAL;
2047 }
2048
2049
2050 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2051 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2052 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2053 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2054
2055
2056 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2057
2058 rc = wait_for_reset_state(ena_dev, timeout,
2059 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2060 if (rc != 0) {
2061 pr_err("Reset indication didn't turn on\n");
2062 return rc;
2063 }
2064
2065
2066 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2067 rc = wait_for_reset_state(ena_dev, timeout, 0);
2068 if (rc != 0) {
2069 pr_err("Reset indication didn't turn off\n");
2070 return rc;
2071 }
2072
2073 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2074 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2075 if (timeout)
2076
2077 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2078 else
2079 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2080
2081 return 0;
2082}
2083
2084static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2085 struct ena_com_stats_ctx *ctx,
2086 enum ena_admin_get_stats_type type)
2087{
2088 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2089 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2090 struct ena_com_admin_queue *admin_queue;
2091 int ret;
2092
2093 admin_queue = &ena_dev->admin_queue;
2094
2095 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2096 get_cmd->aq_common_descriptor.flags = 0;
2097 get_cmd->type = type;
2098
2099 ret = ena_com_execute_admin_command(admin_queue,
2100 (struct ena_admin_aq_entry *)get_cmd,
2101 sizeof(*get_cmd),
2102 (struct ena_admin_acq_entry *)get_resp,
2103 sizeof(*get_resp));
2104
2105 if (unlikely(ret))
2106 pr_err("Failed to get stats. error: %d\n", ret);
2107
2108 return ret;
2109}
2110
2111int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2112 struct ena_admin_basic_stats *stats)
2113{
2114 struct ena_com_stats_ctx ctx;
2115 int ret;
2116
2117 memset(&ctx, 0x0, sizeof(ctx));
2118 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2119 if (likely(ret == 0))
2120 memcpy(stats, &ctx.get_resp.basic_stats,
2121 sizeof(ctx.get_resp.basic_stats));
2122
2123 return ret;
2124}
2125
2126int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2127{
2128 struct ena_com_admin_queue *admin_queue;
2129 struct ena_admin_set_feat_cmd cmd;
2130 struct ena_admin_set_feat_resp resp;
2131 int ret;
2132
2133 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2134 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2135 return -EOPNOTSUPP;
2136 }
2137
2138 memset(&cmd, 0x0, sizeof(cmd));
2139 admin_queue = &ena_dev->admin_queue;
2140
2141 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2142 cmd.aq_common_descriptor.flags = 0;
2143 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2144 cmd.u.mtu.mtu = mtu;
2145
2146 ret = ena_com_execute_admin_command(admin_queue,
2147 (struct ena_admin_aq_entry *)&cmd,
2148 sizeof(cmd),
2149 (struct ena_admin_acq_entry *)&resp,
2150 sizeof(resp));
2151
2152 if (unlikely(ret))
2153 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2154
2155 return ret;
2156}
2157
2158int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2159 struct ena_admin_feature_offload_desc *offload)
2160{
2161 int ret;
2162 struct ena_admin_get_feat_resp resp;
2163
2164 ret = ena_com_get_feature(ena_dev, &resp,
2165 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG);
2166 if (unlikely(ret)) {
2167 pr_err("Failed to get offload capabilities %d\n", ret);
2168 return ret;
2169 }
2170
2171 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2172
2173 return 0;
2174}
2175
2176int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2177{
2178 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2179 struct ena_rss *rss = &ena_dev->rss;
2180 struct ena_admin_set_feat_cmd cmd;
2181 struct ena_admin_set_feat_resp resp;
2182 struct ena_admin_get_feat_resp get_resp;
2183 int ret;
2184
2185 if (!ena_com_check_supported_feature_id(ena_dev,
2186 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2187 pr_debug("Feature %d isn't supported\n",
2188 ENA_ADMIN_RSS_HASH_FUNCTION);
2189 return -EOPNOTSUPP;
2190 }
2191
2192
2193 ret = ena_com_get_feature(ena_dev, &get_resp,
2194 ENA_ADMIN_RSS_HASH_FUNCTION);
2195 if (unlikely(ret))
2196 return ret;
2197
2198 if (get_resp.u.flow_hash_func.supported_func & (1 << rss->hash_func)) {
2199 pr_err("Func hash %d isn't supported by device, abort\n",
2200 rss->hash_func);
2201 return -EOPNOTSUPP;
2202 }
2203
2204 memset(&cmd, 0x0, sizeof(cmd));
2205
2206 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2207 cmd.aq_common_descriptor.flags =
2208 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2209 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2210 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2211 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2212
2213 ret = ena_com_mem_addr_set(ena_dev,
2214 &cmd.control_buffer.address,
2215 rss->hash_key_dma_addr);
2216 if (unlikely(ret)) {
2217 pr_err("memory address set failed\n");
2218 return ret;
2219 }
2220
2221 cmd.control_buffer.length = sizeof(*rss->hash_key);
2222
2223 ret = ena_com_execute_admin_command(admin_queue,
2224 (struct ena_admin_aq_entry *)&cmd,
2225 sizeof(cmd),
2226 (struct ena_admin_acq_entry *)&resp,
2227 sizeof(resp));
2228 if (unlikely(ret)) {
2229 pr_err("Failed to set hash function %d. error: %d\n",
2230 rss->hash_func, ret);
2231 return -EINVAL;
2232 }
2233
2234 return 0;
2235}
2236
2237int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2238 enum ena_admin_hash_functions func,
2239 const u8 *key, u16 key_len, u32 init_val)
2240{
2241 struct ena_rss *rss = &ena_dev->rss;
2242 struct ena_admin_get_feat_resp get_resp;
2243 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2244 rss->hash_key;
2245 int rc;
2246
2247
2248 if (unlikely(key_len & 0x3))
2249 return -EINVAL;
2250
2251 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2252 ENA_ADMIN_RSS_HASH_FUNCTION,
2253 rss->hash_key_dma_addr,
2254 sizeof(*rss->hash_key));
2255 if (unlikely(rc))
2256 return rc;
2257
2258 if (!((1 << func) & get_resp.u.flow_hash_func.supported_func)) {
2259 pr_err("Flow hash function %d isn't supported\n", func);
2260 return -EOPNOTSUPP;
2261 }
2262
2263 switch (func) {
2264 case ENA_ADMIN_TOEPLITZ:
2265 if (key_len > sizeof(hash_key->key)) {
2266 pr_err("key len (%hu) is bigger than the max supported (%zu)\n",
2267 key_len, sizeof(hash_key->key));
2268 return -EINVAL;
2269 }
2270
2271 memcpy(hash_key->key, key, key_len);
2272 rss->hash_init_val = init_val;
2273 hash_key->keys_num = key_len >> 2;
2274 break;
2275 case ENA_ADMIN_CRC32:
2276 rss->hash_init_val = init_val;
2277 break;
2278 default:
2279 pr_err("Invalid hash function (%d)\n", func);
2280 return -EINVAL;
2281 }
2282
2283 rc = ena_com_set_hash_function(ena_dev);
2284
2285
2286 if (unlikely(rc))
2287 ena_com_get_hash_function(ena_dev, NULL, NULL);
2288
2289 return rc;
2290}
2291
2292int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2293 enum ena_admin_hash_functions *func,
2294 u8 *key)
2295{
2296 struct ena_rss *rss = &ena_dev->rss;
2297 struct ena_admin_get_feat_resp get_resp;
2298 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2299 rss->hash_key;
2300 int rc;
2301
2302 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2303 ENA_ADMIN_RSS_HASH_FUNCTION,
2304 rss->hash_key_dma_addr,
2305 sizeof(*rss->hash_key));
2306 if (unlikely(rc))
2307 return rc;
2308
2309 rss->hash_func = get_resp.u.flow_hash_func.selected_func;
2310 if (func)
2311 *func = rss->hash_func;
2312
2313 if (key)
2314 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2315
2316 return 0;
2317}
2318
2319int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2320 enum ena_admin_flow_hash_proto proto,
2321 u16 *fields)
2322{
2323 struct ena_rss *rss = &ena_dev->rss;
2324 struct ena_admin_get_feat_resp get_resp;
2325 int rc;
2326
2327 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2328 ENA_ADMIN_RSS_HASH_INPUT,
2329 rss->hash_ctrl_dma_addr,
2330 sizeof(*rss->hash_ctrl));
2331 if (unlikely(rc))
2332 return rc;
2333
2334 if (fields)
2335 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2336
2337 return 0;
2338}
2339
2340int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2341{
2342 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2343 struct ena_rss *rss = &ena_dev->rss;
2344 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2345 struct ena_admin_set_feat_cmd cmd;
2346 struct ena_admin_set_feat_resp resp;
2347 int ret;
2348
2349 if (!ena_com_check_supported_feature_id(ena_dev,
2350 ENA_ADMIN_RSS_HASH_INPUT)) {
2351 pr_debug("Feature %d isn't supported\n",
2352 ENA_ADMIN_RSS_HASH_INPUT);
2353 return -EOPNOTSUPP;
2354 }
2355
2356 memset(&cmd, 0x0, sizeof(cmd));
2357
2358 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2359 cmd.aq_common_descriptor.flags =
2360 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2361 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2362 cmd.u.flow_hash_input.enabled_input_sort =
2363 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2364 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2365
2366 ret = ena_com_mem_addr_set(ena_dev,
2367 &cmd.control_buffer.address,
2368 rss->hash_ctrl_dma_addr);
2369 if (unlikely(ret)) {
2370 pr_err("memory address set failed\n");
2371 return ret;
2372 }
2373 cmd.control_buffer.length = sizeof(*hash_ctrl);
2374
2375 ret = ena_com_execute_admin_command(admin_queue,
2376 (struct ena_admin_aq_entry *)&cmd,
2377 sizeof(cmd),
2378 (struct ena_admin_acq_entry *)&resp,
2379 sizeof(resp));
2380 if (unlikely(ret))
2381 pr_err("Failed to set hash input. error: %d\n", ret);
2382
2383 return ret;
2384}
2385
2386int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2387{
2388 struct ena_rss *rss = &ena_dev->rss;
2389 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2390 rss->hash_ctrl;
2391 u16 available_fields = 0;
2392 int rc, i;
2393
2394
2395 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2396 if (unlikely(rc))
2397 return rc;
2398
2399 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2400 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2401 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2402
2403 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2404 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2405 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2406
2407 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2408 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2409 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2410
2411 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2412 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2413 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2414
2415 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2416 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2417
2418 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2419 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2420
2421 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2422 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2423
2424 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2425 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2426
2427 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2428 available_fields = hash_ctrl->selected_fields[i].fields &
2429 hash_ctrl->supported_fields[i].fields;
2430 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2431 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2432 i, hash_ctrl->supported_fields[i].fields,
2433 hash_ctrl->selected_fields[i].fields);
2434 return -EOPNOTSUPP;
2435 }
2436 }
2437
2438 rc = ena_com_set_hash_ctrl(ena_dev);
2439
2440
2441 if (unlikely(rc))
2442 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2443
2444 return rc;
2445}
2446
2447int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2448 enum ena_admin_flow_hash_proto proto,
2449 u16 hash_fields)
2450{
2451 struct ena_rss *rss = &ena_dev->rss;
2452 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2453 u16 supported_fields;
2454 int rc;
2455
2456 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2457 pr_err("Invalid proto num (%u)\n", proto);
2458 return -EINVAL;
2459 }
2460
2461
2462 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2463 if (unlikely(rc))
2464 return rc;
2465
2466
2467 supported_fields = hash_ctrl->supported_fields[proto].fields;
2468 if ((hash_fields & supported_fields) != hash_fields) {
2469 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2470 proto, hash_fields, supported_fields);
2471 }
2472
2473 hash_ctrl->selected_fields[proto].fields = hash_fields;
2474
2475 rc = ena_com_set_hash_ctrl(ena_dev);
2476
2477
2478 if (unlikely(rc))
2479 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2480
2481 return 0;
2482}
2483
2484int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2485 u16 entry_idx, u16 entry_value)
2486{
2487 struct ena_rss *rss = &ena_dev->rss;
2488
2489 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2490 return -EINVAL;
2491
2492 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2493 return -EINVAL;
2494
2495 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2496
2497 return 0;
2498}
2499
2500int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2501{
2502 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2503 struct ena_rss *rss = &ena_dev->rss;
2504 struct ena_admin_set_feat_cmd cmd;
2505 struct ena_admin_set_feat_resp resp;
2506 int ret;
2507
2508 if (!ena_com_check_supported_feature_id(
2509 ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2510 pr_debug("Feature %d isn't supported\n",
2511 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2512 return -EOPNOTSUPP;
2513 }
2514
2515 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2516 if (ret) {
2517 pr_err("Failed to convert host indirection table to device table\n");
2518 return ret;
2519 }
2520
2521 memset(&cmd, 0x0, sizeof(cmd));
2522
2523 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2524 cmd.aq_common_descriptor.flags =
2525 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2526 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2527 cmd.u.ind_table.size = rss->tbl_log_size;
2528 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2529
2530 ret = ena_com_mem_addr_set(ena_dev,
2531 &cmd.control_buffer.address,
2532 rss->rss_ind_tbl_dma_addr);
2533 if (unlikely(ret)) {
2534 pr_err("memory address set failed\n");
2535 return ret;
2536 }
2537
2538 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2539 sizeof(struct ena_admin_rss_ind_table_entry);
2540
2541 ret = ena_com_execute_admin_command(admin_queue,
2542 (struct ena_admin_aq_entry *)&cmd,
2543 sizeof(cmd),
2544 (struct ena_admin_acq_entry *)&resp,
2545 sizeof(resp));
2546
2547 if (unlikely(ret))
2548 pr_err("Failed to set indirect table. error: %d\n", ret);
2549
2550 return ret;
2551}
2552
2553int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2554{
2555 struct ena_rss *rss = &ena_dev->rss;
2556 struct ena_admin_get_feat_resp get_resp;
2557 u32 tbl_size;
2558 int i, rc;
2559
2560 tbl_size = (1ULL << rss->tbl_log_size) *
2561 sizeof(struct ena_admin_rss_ind_table_entry);
2562
2563 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2564 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2565 rss->rss_ind_tbl_dma_addr,
2566 tbl_size);
2567 if (unlikely(rc))
2568 return rc;
2569
2570 if (!ind_tbl)
2571 return 0;
2572
2573 rc = ena_com_ind_tbl_convert_from_device(ena_dev);
2574 if (unlikely(rc))
2575 return rc;
2576
2577 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2578 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2579
2580 return 0;
2581}
2582
2583int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2584{
2585 int rc;
2586
2587 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2588
2589 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2590 if (unlikely(rc))
2591 goto err_indr_tbl;
2592
2593 rc = ena_com_hash_key_allocate(ena_dev);
2594 if (unlikely(rc))
2595 goto err_hash_key;
2596
2597 rc = ena_com_hash_ctrl_init(ena_dev);
2598 if (unlikely(rc))
2599 goto err_hash_ctrl;
2600
2601 return 0;
2602
2603err_hash_ctrl:
2604 ena_com_hash_key_destroy(ena_dev);
2605err_hash_key:
2606 ena_com_indirect_table_destroy(ena_dev);
2607err_indr_tbl:
2608
2609 return rc;
2610}
2611
2612void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2613{
2614 ena_com_indirect_table_destroy(ena_dev);
2615 ena_com_hash_key_destroy(ena_dev);
2616 ena_com_hash_ctrl_destroy(ena_dev);
2617
2618 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2619}
2620
2621int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2622{
2623 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2624
2625 host_attr->host_info =
2626 dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2627 &host_attr->host_info_dma_addr, GFP_KERNEL);
2628 if (unlikely(!host_attr->host_info))
2629 return -ENOMEM;
2630
2631 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2632 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2633 (ENA_COMMON_SPEC_VERSION_MINOR));
2634
2635 return 0;
2636}
2637
2638int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2639 u32 debug_area_size)
2640{
2641 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2642
2643 host_attr->debug_area_virt_addr =
2644 dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2645 &host_attr->debug_area_dma_addr,
2646 GFP_KERNEL);
2647 if (unlikely(!host_attr->debug_area_virt_addr)) {
2648 host_attr->debug_area_size = 0;
2649 return -ENOMEM;
2650 }
2651
2652 host_attr->debug_area_size = debug_area_size;
2653
2654 return 0;
2655}
2656
2657void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2658{
2659 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2660
2661 if (host_attr->host_info) {
2662 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2663 host_attr->host_info_dma_addr);
2664 host_attr->host_info = NULL;
2665 }
2666}
2667
2668void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2669{
2670 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2671
2672 if (host_attr->debug_area_virt_addr) {
2673 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2674 host_attr->debug_area_virt_addr,
2675 host_attr->debug_area_dma_addr);
2676 host_attr->debug_area_virt_addr = NULL;
2677 }
2678}
2679
2680int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2681{
2682 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2683 struct ena_com_admin_queue *admin_queue;
2684 struct ena_admin_set_feat_cmd cmd;
2685 struct ena_admin_set_feat_resp resp;
2686
2687 int ret;
2688
2689
2690
2691
2692
2693 memset(&cmd, 0x0, sizeof(cmd));
2694 admin_queue = &ena_dev->admin_queue;
2695
2696 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2697 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2698
2699 ret = ena_com_mem_addr_set(ena_dev,
2700 &cmd.u.host_attr.debug_ba,
2701 host_attr->debug_area_dma_addr);
2702 if (unlikely(ret)) {
2703 pr_err("memory address set failed\n");
2704 return ret;
2705 }
2706
2707 ret = ena_com_mem_addr_set(ena_dev,
2708 &cmd.u.host_attr.os_info_ba,
2709 host_attr->host_info_dma_addr);
2710 if (unlikely(ret)) {
2711 pr_err("memory address set failed\n");
2712 return ret;
2713 }
2714
2715 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2716
2717 ret = ena_com_execute_admin_command(admin_queue,
2718 (struct ena_admin_aq_entry *)&cmd,
2719 sizeof(cmd),
2720 (struct ena_admin_acq_entry *)&resp,
2721 sizeof(resp));
2722
2723 if (unlikely(ret))
2724 pr_err("Failed to set host attributes: %d\n", ret);
2725
2726 return ret;
2727}
2728
2729
2730bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2731{
2732 return ena_com_check_supported_feature_id(ena_dev,
2733 ENA_ADMIN_INTERRUPT_MODERATION);
2734}
2735
2736int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2737 u32 tx_coalesce_usecs)
2738{
2739 if (!ena_dev->intr_delay_resolution) {
2740 pr_err("Illegal interrupt delay granularity value\n");
2741 return -EFAULT;
2742 }
2743
2744 ena_dev->intr_moder_tx_interval = tx_coalesce_usecs /
2745 ena_dev->intr_delay_resolution;
2746
2747 return 0;
2748}
2749
2750int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2751 u32 rx_coalesce_usecs)
2752{
2753 if (!ena_dev->intr_delay_resolution) {
2754 pr_err("Illegal interrupt delay granularity value\n");
2755 return -EFAULT;
2756 }
2757
2758
2759
2760
2761 ena_dev->intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2762 rx_coalesce_usecs / ena_dev->intr_delay_resolution;
2763
2764 return 0;
2765}
2766
2767void ena_com_destroy_interrupt_moderation(struct ena_com_dev *ena_dev)
2768{
2769 if (ena_dev->intr_moder_tbl)
2770 devm_kfree(ena_dev->dmadev, ena_dev->intr_moder_tbl);
2771 ena_dev->intr_moder_tbl = NULL;
2772}
2773
2774int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2775{
2776 struct ena_admin_get_feat_resp get_resp;
2777 u16 delay_resolution;
2778 int rc;
2779
2780 rc = ena_com_get_feature(ena_dev, &get_resp,
2781 ENA_ADMIN_INTERRUPT_MODERATION);
2782
2783 if (rc) {
2784 if (rc == -EOPNOTSUPP) {
2785 pr_debug("Feature %d isn't supported\n",
2786 ENA_ADMIN_INTERRUPT_MODERATION);
2787 rc = 0;
2788 } else {
2789 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2790 rc);
2791 }
2792
2793
2794 ena_com_disable_adaptive_moderation(ena_dev);
2795 return rc;
2796 }
2797
2798 rc = ena_com_init_interrupt_moderation_table(ena_dev);
2799 if (rc)
2800 goto err;
2801
2802
2803 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2804 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2805 ena_com_enable_adaptive_moderation(ena_dev);
2806
2807 return 0;
2808err:
2809 ena_com_destroy_interrupt_moderation(ena_dev);
2810 return rc;
2811}
2812
2813void ena_com_config_default_interrupt_moderation_table(struct ena_com_dev *ena_dev)
2814{
2815 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2816
2817 if (!intr_moder_tbl)
2818 return;
2819
2820 intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval =
2821 ENA_INTR_LOWEST_USECS;
2822 intr_moder_tbl[ENA_INTR_MODER_LOWEST].pkts_per_interval =
2823 ENA_INTR_LOWEST_PKTS;
2824 intr_moder_tbl[ENA_INTR_MODER_LOWEST].bytes_per_interval =
2825 ENA_INTR_LOWEST_BYTES;
2826
2827 intr_moder_tbl[ENA_INTR_MODER_LOW].intr_moder_interval =
2828 ENA_INTR_LOW_USECS;
2829 intr_moder_tbl[ENA_INTR_MODER_LOW].pkts_per_interval =
2830 ENA_INTR_LOW_PKTS;
2831 intr_moder_tbl[ENA_INTR_MODER_LOW].bytes_per_interval =
2832 ENA_INTR_LOW_BYTES;
2833
2834 intr_moder_tbl[ENA_INTR_MODER_MID].intr_moder_interval =
2835 ENA_INTR_MID_USECS;
2836 intr_moder_tbl[ENA_INTR_MODER_MID].pkts_per_interval =
2837 ENA_INTR_MID_PKTS;
2838 intr_moder_tbl[ENA_INTR_MODER_MID].bytes_per_interval =
2839 ENA_INTR_MID_BYTES;
2840
2841 intr_moder_tbl[ENA_INTR_MODER_HIGH].intr_moder_interval =
2842 ENA_INTR_HIGH_USECS;
2843 intr_moder_tbl[ENA_INTR_MODER_HIGH].pkts_per_interval =
2844 ENA_INTR_HIGH_PKTS;
2845 intr_moder_tbl[ENA_INTR_MODER_HIGH].bytes_per_interval =
2846 ENA_INTR_HIGH_BYTES;
2847
2848 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].intr_moder_interval =
2849 ENA_INTR_HIGHEST_USECS;
2850 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].pkts_per_interval =
2851 ENA_INTR_HIGHEST_PKTS;
2852 intr_moder_tbl[ENA_INTR_MODER_HIGHEST].bytes_per_interval =
2853 ENA_INTR_HIGHEST_BYTES;
2854}
2855
2856unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2857{
2858 return ena_dev->intr_moder_tx_interval;
2859}
2860
2861unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2862{
2863 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2864
2865 if (intr_moder_tbl)
2866 return intr_moder_tbl[ENA_INTR_MODER_LOWEST].intr_moder_interval;
2867
2868 return 0;
2869}
2870
2871void ena_com_init_intr_moderation_entry(struct ena_com_dev *ena_dev,
2872 enum ena_intr_moder_level level,
2873 struct ena_intr_moder_entry *entry)
2874{
2875 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2876
2877 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2878 return;
2879
2880 intr_moder_tbl[level].intr_moder_interval = entry->intr_moder_interval;
2881 if (ena_dev->intr_delay_resolution)
2882 intr_moder_tbl[level].intr_moder_interval /=
2883 ena_dev->intr_delay_resolution;
2884 intr_moder_tbl[level].pkts_per_interval = entry->pkts_per_interval;
2885
2886
2887 if (entry->bytes_per_interval != ENA_INTR_BYTE_COUNT_NOT_SUPPORTED)
2888 intr_moder_tbl[level].bytes_per_interval = entry->bytes_per_interval;
2889}
2890
2891void ena_com_get_intr_moderation_entry(struct ena_com_dev *ena_dev,
2892 enum ena_intr_moder_level level,
2893 struct ena_intr_moder_entry *entry)
2894{
2895 struct ena_intr_moder_entry *intr_moder_tbl = ena_dev->intr_moder_tbl;
2896
2897 if (level >= ENA_INTR_MAX_NUM_OF_LEVELS)
2898 return;
2899
2900 entry->intr_moder_interval = intr_moder_tbl[level].intr_moder_interval;
2901 if (ena_dev->intr_delay_resolution)
2902 entry->intr_moder_interval *= ena_dev->intr_delay_resolution;
2903 entry->pkts_per_interval =
2904 intr_moder_tbl[level].pkts_per_interval;
2905 entry->bytes_per_interval = intr_moder_tbl[level].bytes_per_interval;
2906}
2907
2908int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2909 struct ena_admin_feature_llq_desc *llq_features,
2910 struct ena_llq_configurations *llq_default_cfg)
2911{
2912 int rc;
2913 int size;
2914
2915 if (!llq_features->max_llq_num) {
2916 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2917 return 0;
2918 }
2919
2920 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2921 if (rc)
2922 return rc;
2923
2924
2925 size = ena_dev->tx_max_header_size;
2926 size += ena_dev->llq_info.descs_num_before_header *
2927 sizeof(struct ena_eth_io_tx_desc);
2928
2929 if (unlikely(ena_dev->llq_info.desc_list_entry_size < size)) {
2930 pr_err("the size of the LLQ entry is smaller than needed\n");
2931 return -EINVAL;
2932 }
2933
2934 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2935
2936 return 0;
2937}
2938