1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include "ena_com.h"
34
35
36
37
38
39#define ADMIN_CMD_TIMEOUT_US (3000000)
40
41#define ENA_ASYNC_QUEUE_DEPTH 16
42#define ENA_ADMIN_QUEUE_DEPTH 32
43
44
45#define ENA_CTRL_MAJOR 0
46#define ENA_CTRL_MINOR 0
47#define ENA_CTRL_SUB_MINOR 1
48
49#define MIN_ENA_CTRL_VER \
50 (((ENA_CTRL_MAJOR) << \
51 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
52 ((ENA_CTRL_MINOR) << \
53 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
54 (ENA_CTRL_SUB_MINOR))
55
56#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
57#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
58
59#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
60
61#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
62
63#define ENA_REGS_ADMIN_INTR_MASK 1
64
65#define ENA_MIN_ADMIN_POLL_US 100
66
67#define ENA_MAX_ADMIN_POLL_US 5000
68
69
70
71
72
73enum ena_cmd_status {
74 ENA_CMD_SUBMITTED,
75 ENA_CMD_COMPLETED,
76
77 ENA_CMD_ABORTED,
78};
79
80struct ena_comp_ctx {
81 struct completion wait_event;
82 struct ena_admin_acq_entry *user_cqe;
83 u32 comp_size;
84 enum ena_cmd_status status;
85
86 u8 comp_status;
87 u8 cmd_opcode;
88 bool occupied;
89};
90
91struct ena_com_stats_ctx {
92 struct ena_admin_aq_get_stats_cmd get_cmd;
93 struct ena_admin_acq_get_stats_resp get_resp;
94};
95
96static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
97 struct ena_common_mem_addr *ena_addr,
98 dma_addr_t addr)
99{
100 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
101 pr_err("dma address has more bits that the device supports\n");
102 return -EINVAL;
103 }
104
105 ena_addr->mem_addr_low = lower_32_bits(addr);
106 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
107
108 return 0;
109}
110
111static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
112{
113 struct ena_com_admin_sq *sq = &queue->sq;
114 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
115
116 sq->entries = dma_alloc_coherent(queue->q_dmadev, size, &sq->dma_addr,
117 GFP_KERNEL);
118
119 if (!sq->entries) {
120 pr_err("memory allocation failed\n");
121 return -ENOMEM;
122 }
123
124 sq->head = 0;
125 sq->tail = 0;
126 sq->phase = 1;
127
128 sq->db_addr = NULL;
129
130 return 0;
131}
132
133static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
134{
135 struct ena_com_admin_cq *cq = &queue->cq;
136 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
137
138 cq->entries = dma_alloc_coherent(queue->q_dmadev, size, &cq->dma_addr,
139 GFP_KERNEL);
140
141 if (!cq->entries) {
142 pr_err("memory allocation failed\n");
143 return -ENOMEM;
144 }
145
146 cq->head = 0;
147 cq->phase = 1;
148
149 return 0;
150}
151
152static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
153 struct ena_aenq_handlers *aenq_handlers)
154{
155 struct ena_com_aenq *aenq = &dev->aenq;
156 u32 addr_low, addr_high, aenq_caps;
157 u16 size;
158
159 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
160 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
161 aenq->entries = dma_alloc_coherent(dev->dmadev, size, &aenq->dma_addr,
162 GFP_KERNEL);
163
164 if (!aenq->entries) {
165 pr_err("memory allocation failed\n");
166 return -ENOMEM;
167 }
168
169 aenq->head = aenq->q_depth;
170 aenq->phase = 1;
171
172 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
173 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
174
175 writel(addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
176 writel(addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
177
178 aenq_caps = 0;
179 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
180 aenq_caps |= (sizeof(struct ena_admin_aenq_entry)
181 << ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
182 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
183 writel(aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
184
185 if (unlikely(!aenq_handlers)) {
186 pr_err("aenq handlers pointer is NULL\n");
187 return -EINVAL;
188 }
189
190 aenq->aenq_handlers = aenq_handlers;
191
192 return 0;
193}
194
195static void comp_ctxt_release(struct ena_com_admin_queue *queue,
196 struct ena_comp_ctx *comp_ctx)
197{
198 comp_ctx->occupied = false;
199 atomic_dec(&queue->outstanding_cmds);
200}
201
202static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
203 u16 command_id, bool capture)
204{
205 if (unlikely(command_id >= queue->q_depth)) {
206 pr_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
207 command_id, queue->q_depth);
208 return NULL;
209 }
210
211 if (unlikely(!queue->comp_ctx)) {
212 pr_err("Completion context is NULL\n");
213 return NULL;
214 }
215
216 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
217 pr_err("Completion context is occupied\n");
218 return NULL;
219 }
220
221 if (capture) {
222 atomic_inc(&queue->outstanding_cmds);
223 queue->comp_ctx[command_id].occupied = true;
224 }
225
226 return &queue->comp_ctx[command_id];
227}
228
229static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
230 struct ena_admin_aq_entry *cmd,
231 size_t cmd_size_in_bytes,
232 struct ena_admin_acq_entry *comp,
233 size_t comp_size_in_bytes)
234{
235 struct ena_comp_ctx *comp_ctx;
236 u16 tail_masked, cmd_id;
237 u16 queue_size_mask;
238 u16 cnt;
239
240 queue_size_mask = admin_queue->q_depth - 1;
241
242 tail_masked = admin_queue->sq.tail & queue_size_mask;
243
244
245 cnt = (u16)atomic_read(&admin_queue->outstanding_cmds);
246 if (cnt >= admin_queue->q_depth) {
247 pr_debug("admin queue is full.\n");
248 admin_queue->stats.out_of_space++;
249 return ERR_PTR(-ENOSPC);
250 }
251
252 cmd_id = admin_queue->curr_cmd_id;
253
254 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
255 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
256
257 cmd->aq_common_descriptor.command_id |= cmd_id &
258 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
259
260 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
261 if (unlikely(!comp_ctx))
262 return ERR_PTR(-EINVAL);
263
264 comp_ctx->status = ENA_CMD_SUBMITTED;
265 comp_ctx->comp_size = (u32)comp_size_in_bytes;
266 comp_ctx->user_cqe = comp;
267 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
268
269 reinit_completion(&comp_ctx->wait_event);
270
271 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
272
273 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
274 queue_size_mask;
275
276 admin_queue->sq.tail++;
277 admin_queue->stats.submitted_cmd++;
278
279 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
280 admin_queue->sq.phase = !admin_queue->sq.phase;
281
282 writel(admin_queue->sq.tail, admin_queue->sq.db_addr);
283
284 return comp_ctx;
285}
286
287static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
288{
289 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
290 struct ena_comp_ctx *comp_ctx;
291 u16 i;
292
293 queue->comp_ctx = devm_kzalloc(queue->q_dmadev, size, GFP_KERNEL);
294 if (unlikely(!queue->comp_ctx)) {
295 pr_err("memory allocation failed\n");
296 return -ENOMEM;
297 }
298
299 for (i = 0; i < queue->q_depth; i++) {
300 comp_ctx = get_comp_ctxt(queue, i, false);
301 if (comp_ctx)
302 init_completion(&comp_ctx->wait_event);
303 }
304
305 return 0;
306}
307
308static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
309 struct ena_admin_aq_entry *cmd,
310 size_t cmd_size_in_bytes,
311 struct ena_admin_acq_entry *comp,
312 size_t comp_size_in_bytes)
313{
314 unsigned long flags = 0;
315 struct ena_comp_ctx *comp_ctx;
316
317 spin_lock_irqsave(&admin_queue->q_lock, flags);
318 if (unlikely(!admin_queue->running_state)) {
319 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
320 return ERR_PTR(-ENODEV);
321 }
322 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
323 cmd_size_in_bytes,
324 comp,
325 comp_size_in_bytes);
326 if (IS_ERR(comp_ctx))
327 admin_queue->running_state = false;
328 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
329
330 return comp_ctx;
331}
332
333static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
334 struct ena_com_create_io_ctx *ctx,
335 struct ena_com_io_sq *io_sq)
336{
337 size_t size;
338 int dev_node = 0;
339
340 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
341
342 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
343 io_sq->desc_entry_size =
344 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
345 sizeof(struct ena_eth_io_tx_desc) :
346 sizeof(struct ena_eth_io_rx_desc);
347
348 size = io_sq->desc_entry_size * io_sq->q_depth;
349
350 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
351 dev_node = dev_to_node(ena_dev->dmadev);
352 set_dev_node(ena_dev->dmadev, ctx->numa_node);
353 io_sq->desc_addr.virt_addr =
354 dma_alloc_coherent(ena_dev->dmadev, size,
355 &io_sq->desc_addr.phys_addr,
356 GFP_KERNEL);
357 set_dev_node(ena_dev->dmadev, dev_node);
358 if (!io_sq->desc_addr.virt_addr) {
359 io_sq->desc_addr.virt_addr =
360 dma_alloc_coherent(ena_dev->dmadev, size,
361 &io_sq->desc_addr.phys_addr,
362 GFP_KERNEL);
363 }
364
365 if (!io_sq->desc_addr.virt_addr) {
366 pr_err("memory allocation failed\n");
367 return -ENOMEM;
368 }
369 }
370
371 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
372
373 io_sq->bounce_buf_ctrl.buffer_size =
374 ena_dev->llq_info.desc_list_entry_size;
375 io_sq->bounce_buf_ctrl.buffers_num =
376 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
377 io_sq->bounce_buf_ctrl.next_to_use = 0;
378
379 size = io_sq->bounce_buf_ctrl.buffer_size *
380 io_sq->bounce_buf_ctrl.buffers_num;
381
382 dev_node = dev_to_node(ena_dev->dmadev);
383 set_dev_node(ena_dev->dmadev, ctx->numa_node);
384 io_sq->bounce_buf_ctrl.base_buffer =
385 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
386 set_dev_node(ena_dev->dmadev, dev_node);
387 if (!io_sq->bounce_buf_ctrl.base_buffer)
388 io_sq->bounce_buf_ctrl.base_buffer =
389 devm_kzalloc(ena_dev->dmadev, size, GFP_KERNEL);
390
391 if (!io_sq->bounce_buf_ctrl.base_buffer) {
392 pr_err("bounce buffer memory allocation failed\n");
393 return -ENOMEM;
394 }
395
396 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
397 sizeof(io_sq->llq_info));
398
399
400 io_sq->llq_buf_ctrl.curr_bounce_buf =
401 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
402 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
403 0x0, io_sq->llq_info.desc_list_entry_size);
404 io_sq->llq_buf_ctrl.descs_left_in_line =
405 io_sq->llq_info.descs_num_before_header;
406 io_sq->disable_meta_caching =
407 io_sq->llq_info.disable_meta_caching;
408
409 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
410 io_sq->entries_in_tx_burst_left =
411 io_sq->llq_info.max_entries_in_tx_burst;
412 }
413
414 io_sq->tail = 0;
415 io_sq->next_to_comp = 0;
416 io_sq->phase = 1;
417
418 return 0;
419}
420
421static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
422 struct ena_com_create_io_ctx *ctx,
423 struct ena_com_io_cq *io_cq)
424{
425 size_t size;
426 int prev_node = 0;
427
428 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
429
430
431 io_cq->cdesc_entry_size_in_bytes =
432 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
433 sizeof(struct ena_eth_io_tx_cdesc) :
434 sizeof(struct ena_eth_io_rx_cdesc_base);
435
436 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
437
438 prev_node = dev_to_node(ena_dev->dmadev);
439 set_dev_node(ena_dev->dmadev, ctx->numa_node);
440 io_cq->cdesc_addr.virt_addr =
441 dma_alloc_coherent(ena_dev->dmadev, size,
442 &io_cq->cdesc_addr.phys_addr, GFP_KERNEL);
443 set_dev_node(ena_dev->dmadev, prev_node);
444 if (!io_cq->cdesc_addr.virt_addr) {
445 io_cq->cdesc_addr.virt_addr =
446 dma_alloc_coherent(ena_dev->dmadev, size,
447 &io_cq->cdesc_addr.phys_addr,
448 GFP_KERNEL);
449 }
450
451 if (!io_cq->cdesc_addr.virt_addr) {
452 pr_err("memory allocation failed\n");
453 return -ENOMEM;
454 }
455
456 io_cq->phase = 1;
457 io_cq->head = 0;
458
459 return 0;
460}
461
462static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
463 struct ena_admin_acq_entry *cqe)
464{
465 struct ena_comp_ctx *comp_ctx;
466 u16 cmd_id;
467
468 cmd_id = cqe->acq_common_descriptor.command &
469 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
470
471 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
472 if (unlikely(!comp_ctx)) {
473 pr_err("comp_ctx is NULL. Changing the admin queue running state\n");
474 admin_queue->running_state = false;
475 return;
476 }
477
478 comp_ctx->status = ENA_CMD_COMPLETED;
479 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
480
481 if (comp_ctx->user_cqe)
482 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
483
484 if (!admin_queue->polling)
485 complete(&comp_ctx->wait_event);
486}
487
488static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
489{
490 struct ena_admin_acq_entry *cqe = NULL;
491 u16 comp_num = 0;
492 u16 head_masked;
493 u8 phase;
494
495 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
496 phase = admin_queue->cq.phase;
497
498 cqe = &admin_queue->cq.entries[head_masked];
499
500
501 while ((READ_ONCE(cqe->acq_common_descriptor.flags) &
502 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
503
504
505
506 dma_rmb();
507 ena_com_handle_single_admin_completion(admin_queue, cqe);
508
509 head_masked++;
510 comp_num++;
511 if (unlikely(head_masked == admin_queue->q_depth)) {
512 head_masked = 0;
513 phase = !phase;
514 }
515
516 cqe = &admin_queue->cq.entries[head_masked];
517 }
518
519 admin_queue->cq.head += comp_num;
520 admin_queue->cq.phase = phase;
521 admin_queue->sq.head += comp_num;
522 admin_queue->stats.completed_cmd += comp_num;
523}
524
525static int ena_com_comp_status_to_errno(u8 comp_status)
526{
527 if (unlikely(comp_status != 0))
528 pr_err("admin command failed[%u]\n", comp_status);
529
530 switch (comp_status) {
531 case ENA_ADMIN_SUCCESS:
532 return 0;
533 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
534 return -ENOMEM;
535 case ENA_ADMIN_UNSUPPORTED_OPCODE:
536 return -EOPNOTSUPP;
537 case ENA_ADMIN_BAD_OPCODE:
538 case ENA_ADMIN_MALFORMED_REQUEST:
539 case ENA_ADMIN_ILLEGAL_PARAMETER:
540 case ENA_ADMIN_UNKNOWN_ERROR:
541 return -EINVAL;
542 }
543
544 return -EINVAL;
545}
546
547static void ena_delay_exponential_backoff_us(u32 exp, u32 delay_us)
548{
549 delay_us = max_t(u32, ENA_MIN_ADMIN_POLL_US, delay_us);
550 delay_us = min_t(u32, delay_us * (1U << exp), ENA_MAX_ADMIN_POLL_US);
551 usleep_range(delay_us, 2 * delay_us);
552}
553
554static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
555 struct ena_com_admin_queue *admin_queue)
556{
557 unsigned long flags = 0;
558 unsigned long timeout;
559 int ret;
560 u32 exp = 0;
561
562 timeout = jiffies + usecs_to_jiffies(admin_queue->completion_timeout);
563
564 while (1) {
565 spin_lock_irqsave(&admin_queue->q_lock, flags);
566 ena_com_handle_admin_completion(admin_queue);
567 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
568
569 if (comp_ctx->status != ENA_CMD_SUBMITTED)
570 break;
571
572 if (time_is_before_jiffies(timeout)) {
573 pr_err("Wait for completion (polling) timeout\n");
574
575 spin_lock_irqsave(&admin_queue->q_lock, flags);
576 admin_queue->stats.no_completion++;
577 admin_queue->running_state = false;
578 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
579
580 ret = -ETIME;
581 goto err;
582 }
583
584 ena_delay_exponential_backoff_us(exp++,
585 admin_queue->ena_dev->ena_min_poll_delay_us);
586 }
587
588 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
589 pr_err("Command was aborted\n");
590 spin_lock_irqsave(&admin_queue->q_lock, flags);
591 admin_queue->stats.aborted_cmd++;
592 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
593 ret = -ENODEV;
594 goto err;
595 }
596
597 WARN(comp_ctx->status != ENA_CMD_COMPLETED, "Invalid comp status %d\n",
598 comp_ctx->status);
599
600 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
601err:
602 comp_ctxt_release(admin_queue, comp_ctx);
603 return ret;
604}
605
606
607
608
609
610
611
612static int ena_com_set_llq(struct ena_com_dev *ena_dev)
613{
614 struct ena_com_admin_queue *admin_queue;
615 struct ena_admin_set_feat_cmd cmd;
616 struct ena_admin_set_feat_resp resp;
617 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
618 int ret;
619
620 memset(&cmd, 0x0, sizeof(cmd));
621 admin_queue = &ena_dev->admin_queue;
622
623 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
624 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
625
626 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
627 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
628 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
629 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
630
631 cmd.u.llq.accel_mode.u.set.enabled_flags =
632 BIT(ENA_ADMIN_DISABLE_META_CACHING) |
633 BIT(ENA_ADMIN_LIMIT_TX_BURST);
634
635 ret = ena_com_execute_admin_command(admin_queue,
636 (struct ena_admin_aq_entry *)&cmd,
637 sizeof(cmd),
638 (struct ena_admin_acq_entry *)&resp,
639 sizeof(resp));
640
641 if (unlikely(ret))
642 pr_err("Failed to set LLQ configurations: %d\n", ret);
643
644 return ret;
645}
646
647static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
648 struct ena_admin_feature_llq_desc *llq_features,
649 struct ena_llq_configurations *llq_default_cfg)
650{
651 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
652 struct ena_admin_accel_mode_get llq_accel_mode_get;
653 u16 supported_feat;
654 int rc;
655
656 memset(llq_info, 0, sizeof(*llq_info));
657
658 supported_feat = llq_features->header_location_ctrl_supported;
659
660 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
661 llq_info->header_location_ctrl =
662 llq_default_cfg->llq_header_location;
663 } else {
664 pr_err("Invalid header location control, supported: 0x%x\n",
665 supported_feat);
666 return -EINVAL;
667 }
668
669 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
670 supported_feat = llq_features->descriptors_stride_ctrl_supported;
671 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
672 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
673 } else {
674 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
675 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
676 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
677 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
678 } else {
679 pr_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
680 supported_feat);
681 return -EINVAL;
682 }
683
684 pr_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
685 llq_default_cfg->llq_stride_ctrl, supported_feat,
686 llq_info->desc_stride_ctrl);
687 }
688 } else {
689 llq_info->desc_stride_ctrl = 0;
690 }
691
692 supported_feat = llq_features->entry_size_ctrl_supported;
693 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
694 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
695 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
696 } else {
697 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
698 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
699 llq_info->desc_list_entry_size = 128;
700 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
701 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
702 llq_info->desc_list_entry_size = 192;
703 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
704 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
705 llq_info->desc_list_entry_size = 256;
706 } else {
707 pr_err("Invalid entry_size_ctrl, supported: 0x%x\n",
708 supported_feat);
709 return -EINVAL;
710 }
711
712 pr_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
713 llq_default_cfg->llq_ring_entry_size, supported_feat,
714 llq_info->desc_list_entry_size);
715 }
716 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
717
718
719
720 pr_err("illegal entry size %d\n", llq_info->desc_list_entry_size);
721 return -EINVAL;
722 }
723
724 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
725 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
726 sizeof(struct ena_eth_io_tx_desc);
727 else
728 llq_info->descs_per_entry = 1;
729
730 supported_feat = llq_features->desc_num_before_header_supported;
731 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
732 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
733 } else {
734 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
735 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
736 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
737 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
738 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
739 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
740 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
741 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
742 } else {
743 pr_err("Invalid descs_num_before_header, supported: 0x%x\n",
744 supported_feat);
745 return -EINVAL;
746 }
747
748 pr_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
749 llq_default_cfg->llq_num_decs_before_header,
750 supported_feat, llq_info->descs_num_before_header);
751 }
752
753 llq_accel_mode_get = llq_features->accel_mode.u.get;
754
755 llq_info->disable_meta_caching =
756 !!(llq_accel_mode_get.supported_flags &
757 BIT(ENA_ADMIN_DISABLE_META_CACHING));
758
759 if (llq_accel_mode_get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
760 llq_info->max_entries_in_tx_burst =
761 llq_accel_mode_get.max_tx_burst_size /
762 llq_default_cfg->llq_ring_entry_size_value;
763
764 rc = ena_com_set_llq(ena_dev);
765 if (rc)
766 pr_err("Cannot set LLQ configuration: %d\n", rc);
767
768 return rc;
769}
770
771static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
772 struct ena_com_admin_queue *admin_queue)
773{
774 unsigned long flags = 0;
775 int ret;
776
777 wait_for_completion_timeout(&comp_ctx->wait_event,
778 usecs_to_jiffies(
779 admin_queue->completion_timeout));
780
781
782
783
784
785
786 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
787 spin_lock_irqsave(&admin_queue->q_lock, flags);
788 ena_com_handle_admin_completion(admin_queue);
789 admin_queue->stats.no_completion++;
790 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
791
792 if (comp_ctx->status == ENA_CMD_COMPLETED) {
793 pr_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
794 comp_ctx->cmd_opcode,
795 admin_queue->auto_polling ? "ON" : "OFF");
796
797 if (admin_queue->auto_polling)
798 admin_queue->polling = true;
799 } else {
800 pr_err("The ena device didn't send a completion for the admin cmd %d status %d\n",
801 comp_ctx->cmd_opcode, comp_ctx->status);
802 }
803
804
805
806
807 if (!admin_queue->polling) {
808 admin_queue->running_state = false;
809 ret = -ETIME;
810 goto err;
811 }
812 }
813
814 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
815err:
816 comp_ctxt_release(admin_queue, comp_ctx);
817 return ret;
818}
819
820
821
822
823
824static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
825{
826 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
827 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
828 mmio_read->read_resp;
829 u32 mmio_read_reg, ret, i;
830 unsigned long flags = 0;
831 u32 timeout = mmio_read->reg_read_to;
832
833 might_sleep();
834
835 if (timeout == 0)
836 timeout = ENA_REG_READ_TIMEOUT;
837
838
839 if (!mmio_read->readless_supported)
840 return readl(ena_dev->reg_bar + offset);
841
842 spin_lock_irqsave(&mmio_read->lock, flags);
843 mmio_read->seq_num++;
844
845 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
846 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
847 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
848 mmio_read_reg |= mmio_read->seq_num &
849 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
850
851 writel(mmio_read_reg, ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
852
853 for (i = 0; i < timeout; i++) {
854 if (READ_ONCE(read_resp->req_id) == mmio_read->seq_num)
855 break;
856
857 udelay(1);
858 }
859
860 if (unlikely(i == timeout)) {
861 pr_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
862 mmio_read->seq_num, offset, read_resp->req_id,
863 read_resp->reg_off);
864 ret = ENA_MMIO_READ_TIMEOUT;
865 goto err;
866 }
867
868 if (read_resp->reg_off != offset) {
869 pr_err("Read failure: wrong offset provided\n");
870 ret = ENA_MMIO_READ_TIMEOUT;
871 } else {
872 ret = read_resp->reg_val;
873 }
874err:
875 spin_unlock_irqrestore(&mmio_read->lock, flags);
876
877 return ret;
878}
879
880
881
882
883
884
885
886
887static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
888 struct ena_com_admin_queue *admin_queue)
889{
890 if (admin_queue->polling)
891 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
892 admin_queue);
893
894 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
895 admin_queue);
896}
897
898static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
899 struct ena_com_io_sq *io_sq)
900{
901 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
902 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
903 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
904 u8 direction;
905 int ret;
906
907 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
908
909 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
910 direction = ENA_ADMIN_SQ_DIRECTION_TX;
911 else
912 direction = ENA_ADMIN_SQ_DIRECTION_RX;
913
914 destroy_cmd.sq.sq_identity |= (direction <<
915 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
916 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
917
918 destroy_cmd.sq.sq_idx = io_sq->idx;
919 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
920
921 ret = ena_com_execute_admin_command(admin_queue,
922 (struct ena_admin_aq_entry *)&destroy_cmd,
923 sizeof(destroy_cmd),
924 (struct ena_admin_acq_entry *)&destroy_resp,
925 sizeof(destroy_resp));
926
927 if (unlikely(ret && (ret != -ENODEV)))
928 pr_err("failed to destroy io sq error: %d\n", ret);
929
930 return ret;
931}
932
933static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
934 struct ena_com_io_sq *io_sq,
935 struct ena_com_io_cq *io_cq)
936{
937 size_t size;
938
939 if (io_cq->cdesc_addr.virt_addr) {
940 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
941
942 dma_free_coherent(ena_dev->dmadev, size,
943 io_cq->cdesc_addr.virt_addr,
944 io_cq->cdesc_addr.phys_addr);
945
946 io_cq->cdesc_addr.virt_addr = NULL;
947 }
948
949 if (io_sq->desc_addr.virt_addr) {
950 size = io_sq->desc_entry_size * io_sq->q_depth;
951
952 dma_free_coherent(ena_dev->dmadev, size,
953 io_sq->desc_addr.virt_addr,
954 io_sq->desc_addr.phys_addr);
955
956 io_sq->desc_addr.virt_addr = NULL;
957 }
958
959 if (io_sq->bounce_buf_ctrl.base_buffer) {
960 devm_kfree(ena_dev->dmadev, io_sq->bounce_buf_ctrl.base_buffer);
961 io_sq->bounce_buf_ctrl.base_buffer = NULL;
962 }
963}
964
965static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
966 u16 exp_state)
967{
968 u32 val, exp = 0;
969 unsigned long timeout_stamp;
970
971
972 timeout_stamp = jiffies + usecs_to_jiffies(100 * 1000 * timeout);
973
974 while (1) {
975 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
976
977 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
978 pr_err("Reg read timeout occurred\n");
979 return -ETIME;
980 }
981
982 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
983 exp_state)
984 return 0;
985
986 if (time_is_before_jiffies(timeout_stamp))
987 return -ETIME;
988
989 ena_delay_exponential_backoff_us(exp++, ena_dev->ena_min_poll_delay_us);
990 }
991}
992
993static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
994 enum ena_admin_aq_feature_id feature_id)
995{
996 u32 feature_mask = 1 << feature_id;
997
998
999 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
1000 !(ena_dev->supported_features & feature_mask))
1001 return false;
1002
1003 return true;
1004}
1005
1006static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
1007 struct ena_admin_get_feat_resp *get_resp,
1008 enum ena_admin_aq_feature_id feature_id,
1009 dma_addr_t control_buf_dma_addr,
1010 u32 control_buff_size,
1011 u8 feature_ver)
1012{
1013 struct ena_com_admin_queue *admin_queue;
1014 struct ena_admin_get_feat_cmd get_cmd;
1015 int ret;
1016
1017 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
1018 pr_debug("Feature %d isn't supported\n", feature_id);
1019 return -EOPNOTSUPP;
1020 }
1021
1022 memset(&get_cmd, 0x0, sizeof(get_cmd));
1023 admin_queue = &ena_dev->admin_queue;
1024
1025 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1026
1027 if (control_buff_size)
1028 get_cmd.aq_common_descriptor.flags =
1029 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1030 else
1031 get_cmd.aq_common_descriptor.flags = 0;
1032
1033 ret = ena_com_mem_addr_set(ena_dev,
1034 &get_cmd.control_buffer.address,
1035 control_buf_dma_addr);
1036 if (unlikely(ret)) {
1037 pr_err("memory address set failed\n");
1038 return ret;
1039 }
1040
1041 get_cmd.control_buffer.length = control_buff_size;
1042 get_cmd.feat_common.feature_version = feature_ver;
1043 get_cmd.feat_common.feature_id = feature_id;
1044
1045 ret = ena_com_execute_admin_command(admin_queue,
1046 (struct ena_admin_aq_entry *)
1047 &get_cmd,
1048 sizeof(get_cmd),
1049 (struct ena_admin_acq_entry *)
1050 get_resp,
1051 sizeof(*get_resp));
1052
1053 if (unlikely(ret))
1054 pr_err("Failed to submit get_feature command %d error: %d\n",
1055 feature_id, ret);
1056
1057 return ret;
1058}
1059
1060static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1061 struct ena_admin_get_feat_resp *get_resp,
1062 enum ena_admin_aq_feature_id feature_id,
1063 u8 feature_ver)
1064{
1065 return ena_com_get_feature_ex(ena_dev,
1066 get_resp,
1067 feature_id,
1068 0,
1069 0,
1070 feature_ver);
1071}
1072
1073int ena_com_get_current_hash_function(struct ena_com_dev *ena_dev)
1074{
1075 return ena_dev->rss.hash_func;
1076}
1077
1078static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1079{
1080 struct ena_admin_feature_rss_flow_hash_control *hash_key =
1081 (ena_dev->rss).hash_key;
1082
1083 netdev_rss_key_fill(&hash_key->key, sizeof(hash_key->key));
1084
1085
1086
1087
1088 hash_key->keys_num = sizeof(hash_key->key) / sizeof(u32);
1089}
1090
1091static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1092{
1093 struct ena_rss *rss = &ena_dev->rss;
1094
1095 if (!ena_com_check_supported_feature_id(ena_dev,
1096 ENA_ADMIN_RSS_HASH_FUNCTION))
1097 return -EOPNOTSUPP;
1098
1099 rss->hash_key =
1100 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1101 &rss->hash_key_dma_addr, GFP_KERNEL);
1102
1103 if (unlikely(!rss->hash_key))
1104 return -ENOMEM;
1105
1106 return 0;
1107}
1108
1109static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1110{
1111 struct ena_rss *rss = &ena_dev->rss;
1112
1113 if (rss->hash_key)
1114 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_key),
1115 rss->hash_key, rss->hash_key_dma_addr);
1116 rss->hash_key = NULL;
1117}
1118
1119static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1120{
1121 struct ena_rss *rss = &ena_dev->rss;
1122
1123 rss->hash_ctrl =
1124 dma_alloc_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1125 &rss->hash_ctrl_dma_addr, GFP_KERNEL);
1126
1127 if (unlikely(!rss->hash_ctrl))
1128 return -ENOMEM;
1129
1130 return 0;
1131}
1132
1133static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1134{
1135 struct ena_rss *rss = &ena_dev->rss;
1136
1137 if (rss->hash_ctrl)
1138 dma_free_coherent(ena_dev->dmadev, sizeof(*rss->hash_ctrl),
1139 rss->hash_ctrl, rss->hash_ctrl_dma_addr);
1140 rss->hash_ctrl = NULL;
1141}
1142
1143static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1144 u16 log_size)
1145{
1146 struct ena_rss *rss = &ena_dev->rss;
1147 struct ena_admin_get_feat_resp get_resp;
1148 size_t tbl_size;
1149 int ret;
1150
1151 ret = ena_com_get_feature(ena_dev, &get_resp,
1152 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
1153 if (unlikely(ret))
1154 return ret;
1155
1156 if ((get_resp.u.ind_table.min_size > log_size) ||
1157 (get_resp.u.ind_table.max_size < log_size)) {
1158 pr_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1159 1 << log_size, 1 << get_resp.u.ind_table.min_size,
1160 1 << get_resp.u.ind_table.max_size);
1161 return -EINVAL;
1162 }
1163
1164 tbl_size = (1ULL << log_size) *
1165 sizeof(struct ena_admin_rss_ind_table_entry);
1166
1167 rss->rss_ind_tbl =
1168 dma_alloc_coherent(ena_dev->dmadev, tbl_size,
1169 &rss->rss_ind_tbl_dma_addr, GFP_KERNEL);
1170 if (unlikely(!rss->rss_ind_tbl))
1171 goto mem_err1;
1172
1173 tbl_size = (1ULL << log_size) * sizeof(u16);
1174 rss->host_rss_ind_tbl =
1175 devm_kzalloc(ena_dev->dmadev, tbl_size, GFP_KERNEL);
1176 if (unlikely(!rss->host_rss_ind_tbl))
1177 goto mem_err2;
1178
1179 rss->tbl_log_size = log_size;
1180
1181 return 0;
1182
1183mem_err2:
1184 tbl_size = (1ULL << log_size) *
1185 sizeof(struct ena_admin_rss_ind_table_entry);
1186
1187 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1188 rss->rss_ind_tbl_dma_addr);
1189 rss->rss_ind_tbl = NULL;
1190mem_err1:
1191 rss->tbl_log_size = 0;
1192 return -ENOMEM;
1193}
1194
1195static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1196{
1197 struct ena_rss *rss = &ena_dev->rss;
1198 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1199 sizeof(struct ena_admin_rss_ind_table_entry);
1200
1201 if (rss->rss_ind_tbl)
1202 dma_free_coherent(ena_dev->dmadev, tbl_size, rss->rss_ind_tbl,
1203 rss->rss_ind_tbl_dma_addr);
1204 rss->rss_ind_tbl = NULL;
1205
1206 if (rss->host_rss_ind_tbl)
1207 devm_kfree(ena_dev->dmadev, rss->host_rss_ind_tbl);
1208 rss->host_rss_ind_tbl = NULL;
1209}
1210
1211static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1212 struct ena_com_io_sq *io_sq, u16 cq_idx)
1213{
1214 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1215 struct ena_admin_aq_create_sq_cmd create_cmd;
1216 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1217 u8 direction;
1218 int ret;
1219
1220 memset(&create_cmd, 0x0, sizeof(create_cmd));
1221
1222 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1223
1224 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1225 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1226 else
1227 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1228
1229 create_cmd.sq_identity |= (direction <<
1230 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1231 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1232
1233 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1234 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1235
1236 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1237 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1238 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1239
1240 create_cmd.sq_caps_3 |=
1241 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1242
1243 create_cmd.cq_idx = cq_idx;
1244 create_cmd.sq_depth = io_sq->q_depth;
1245
1246 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1247 ret = ena_com_mem_addr_set(ena_dev,
1248 &create_cmd.sq_ba,
1249 io_sq->desc_addr.phys_addr);
1250 if (unlikely(ret)) {
1251 pr_err("memory address set failed\n");
1252 return ret;
1253 }
1254 }
1255
1256 ret = ena_com_execute_admin_command(admin_queue,
1257 (struct ena_admin_aq_entry *)&create_cmd,
1258 sizeof(create_cmd),
1259 (struct ena_admin_acq_entry *)&cmd_completion,
1260 sizeof(cmd_completion));
1261 if (unlikely(ret)) {
1262 pr_err("Failed to create IO SQ. error: %d\n", ret);
1263 return ret;
1264 }
1265
1266 io_sq->idx = cmd_completion.sq_idx;
1267
1268 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1269 (uintptr_t)cmd_completion.sq_doorbell_offset);
1270
1271 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1272 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1273 + cmd_completion.llq_headers_offset);
1274
1275 io_sq->desc_addr.pbuf_dev_addr =
1276 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1277 cmd_completion.llq_descriptors_offset);
1278 }
1279
1280 pr_debug("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1281
1282 return ret;
1283}
1284
1285static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1286{
1287 struct ena_rss *rss = &ena_dev->rss;
1288 struct ena_com_io_sq *io_sq;
1289 u16 qid;
1290 int i;
1291
1292 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1293 qid = rss->host_rss_ind_tbl[i];
1294 if (qid >= ENA_TOTAL_NUM_QUEUES)
1295 return -EINVAL;
1296
1297 io_sq = &ena_dev->io_sq_queues[qid];
1298
1299 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1300 return -EINVAL;
1301
1302 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1303 }
1304
1305 return 0;
1306}
1307
1308static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1309 u16 intr_delay_resolution)
1310{
1311 u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1312
1313 if (unlikely(!intr_delay_resolution)) {
1314 pr_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1315 intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1316 }
1317
1318
1319 ena_dev->intr_moder_rx_interval =
1320 ena_dev->intr_moder_rx_interval *
1321 prev_intr_delay_resolution /
1322 intr_delay_resolution;
1323
1324
1325 ena_dev->intr_moder_tx_interval =
1326 ena_dev->intr_moder_tx_interval *
1327 prev_intr_delay_resolution /
1328 intr_delay_resolution;
1329
1330 ena_dev->intr_delay_resolution = intr_delay_resolution;
1331}
1332
1333
1334
1335
1336
1337int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1338 struct ena_admin_aq_entry *cmd,
1339 size_t cmd_size,
1340 struct ena_admin_acq_entry *comp,
1341 size_t comp_size)
1342{
1343 struct ena_comp_ctx *comp_ctx;
1344 int ret;
1345
1346 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1347 comp, comp_size);
1348 if (IS_ERR(comp_ctx)) {
1349 if (comp_ctx == ERR_PTR(-ENODEV))
1350 pr_debug("Failed to submit command [%ld]\n",
1351 PTR_ERR(comp_ctx));
1352 else
1353 pr_err("Failed to submit command [%ld]\n",
1354 PTR_ERR(comp_ctx));
1355
1356 return PTR_ERR(comp_ctx);
1357 }
1358
1359 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1360 if (unlikely(ret)) {
1361 if (admin_queue->running_state)
1362 pr_err("Failed to process command. ret = %d\n", ret);
1363 else
1364 pr_debug("Failed to process command. ret = %d\n", ret);
1365 }
1366 return ret;
1367}
1368
1369int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1370 struct ena_com_io_cq *io_cq)
1371{
1372 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1373 struct ena_admin_aq_create_cq_cmd create_cmd;
1374 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1375 int ret;
1376
1377 memset(&create_cmd, 0x0, sizeof(create_cmd));
1378
1379 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1380
1381 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1382 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1383 create_cmd.cq_caps_1 |=
1384 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1385
1386 create_cmd.msix_vector = io_cq->msix_vector;
1387 create_cmd.cq_depth = io_cq->q_depth;
1388
1389 ret = ena_com_mem_addr_set(ena_dev,
1390 &create_cmd.cq_ba,
1391 io_cq->cdesc_addr.phys_addr);
1392 if (unlikely(ret)) {
1393 pr_err("memory address set failed\n");
1394 return ret;
1395 }
1396
1397 ret = ena_com_execute_admin_command(admin_queue,
1398 (struct ena_admin_aq_entry *)&create_cmd,
1399 sizeof(create_cmd),
1400 (struct ena_admin_acq_entry *)&cmd_completion,
1401 sizeof(cmd_completion));
1402 if (unlikely(ret)) {
1403 pr_err("Failed to create IO CQ. error: %d\n", ret);
1404 return ret;
1405 }
1406
1407 io_cq->idx = cmd_completion.cq_idx;
1408
1409 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1410 cmd_completion.cq_interrupt_unmask_register_offset);
1411
1412 if (cmd_completion.cq_head_db_register_offset)
1413 io_cq->cq_head_db_reg =
1414 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1415 cmd_completion.cq_head_db_register_offset);
1416
1417 if (cmd_completion.numa_node_register_offset)
1418 io_cq->numa_node_cfg_reg =
1419 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1420 cmd_completion.numa_node_register_offset);
1421
1422 pr_debug("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1423
1424 return ret;
1425}
1426
1427int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1428 struct ena_com_io_sq **io_sq,
1429 struct ena_com_io_cq **io_cq)
1430{
1431 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1432 pr_err("Invalid queue number %d but the max is %d\n", qid,
1433 ENA_TOTAL_NUM_QUEUES);
1434 return -EINVAL;
1435 }
1436
1437 *io_sq = &ena_dev->io_sq_queues[qid];
1438 *io_cq = &ena_dev->io_cq_queues[qid];
1439
1440 return 0;
1441}
1442
1443void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1444{
1445 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1446 struct ena_comp_ctx *comp_ctx;
1447 u16 i;
1448
1449 if (!admin_queue->comp_ctx)
1450 return;
1451
1452 for (i = 0; i < admin_queue->q_depth; i++) {
1453 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1454 if (unlikely(!comp_ctx))
1455 break;
1456
1457 comp_ctx->status = ENA_CMD_ABORTED;
1458
1459 complete(&comp_ctx->wait_event);
1460 }
1461}
1462
1463void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1464{
1465 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1466 unsigned long flags = 0;
1467 u32 exp = 0;
1468
1469 spin_lock_irqsave(&admin_queue->q_lock, flags);
1470 while (atomic_read(&admin_queue->outstanding_cmds) != 0) {
1471 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1472 ena_delay_exponential_backoff_us(exp++,
1473 ena_dev->ena_min_poll_delay_us);
1474 spin_lock_irqsave(&admin_queue->q_lock, flags);
1475 }
1476 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1477}
1478
1479int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1480 struct ena_com_io_cq *io_cq)
1481{
1482 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1483 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1484 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1485 int ret;
1486
1487 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1488
1489 destroy_cmd.cq_idx = io_cq->idx;
1490 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1491
1492 ret = ena_com_execute_admin_command(admin_queue,
1493 (struct ena_admin_aq_entry *)&destroy_cmd,
1494 sizeof(destroy_cmd),
1495 (struct ena_admin_acq_entry *)&destroy_resp,
1496 sizeof(destroy_resp));
1497
1498 if (unlikely(ret && (ret != -ENODEV)))
1499 pr_err("Failed to destroy IO CQ. error: %d\n", ret);
1500
1501 return ret;
1502}
1503
1504bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1505{
1506 return ena_dev->admin_queue.running_state;
1507}
1508
1509void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1510{
1511 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1512 unsigned long flags = 0;
1513
1514 spin_lock_irqsave(&admin_queue->q_lock, flags);
1515 ena_dev->admin_queue.running_state = state;
1516 spin_unlock_irqrestore(&admin_queue->q_lock, flags);
1517}
1518
1519void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1520{
1521 u16 depth = ena_dev->aenq.q_depth;
1522
1523 WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1524
1525
1526
1527
1528 writel(depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1529}
1530
1531int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1532{
1533 struct ena_com_admin_queue *admin_queue;
1534 struct ena_admin_set_feat_cmd cmd;
1535 struct ena_admin_set_feat_resp resp;
1536 struct ena_admin_get_feat_resp get_resp;
1537 int ret;
1538
1539 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1540 if (ret) {
1541 pr_info("Can't get aenq configuration\n");
1542 return ret;
1543 }
1544
1545 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1546 pr_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1547 get_resp.u.aenq.supported_groups, groups_flag);
1548 return -EOPNOTSUPP;
1549 }
1550
1551 memset(&cmd, 0x0, sizeof(cmd));
1552 admin_queue = &ena_dev->admin_queue;
1553
1554 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1555 cmd.aq_common_descriptor.flags = 0;
1556 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1557 cmd.u.aenq.enabled_groups = groups_flag;
1558
1559 ret = ena_com_execute_admin_command(admin_queue,
1560 (struct ena_admin_aq_entry *)&cmd,
1561 sizeof(cmd),
1562 (struct ena_admin_acq_entry *)&resp,
1563 sizeof(resp));
1564
1565 if (unlikely(ret))
1566 pr_err("Failed to config AENQ ret: %d\n", ret);
1567
1568 return ret;
1569}
1570
1571int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1572{
1573 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1574 int width;
1575
1576 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1577 pr_err("Reg read timeout occurred\n");
1578 return -ETIME;
1579 }
1580
1581 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1582 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1583
1584 pr_debug("ENA dma width: %d\n", width);
1585
1586 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1587 pr_err("DMA width illegal value: %d\n", width);
1588 return -EINVAL;
1589 }
1590
1591 ena_dev->dma_addr_bits = width;
1592
1593 return width;
1594}
1595
1596int ena_com_validate_version(struct ena_com_dev *ena_dev)
1597{
1598 u32 ver;
1599 u32 ctrl_ver;
1600 u32 ctrl_ver_masked;
1601
1602
1603
1604
1605 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1606 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1607 ENA_REGS_CONTROLLER_VERSION_OFF);
1608
1609 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1610 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1611 pr_err("Reg read timeout occurred\n");
1612 return -ETIME;
1613 }
1614
1615 pr_info("ena device version: %d.%d\n",
1616 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1617 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1618 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1619
1620 pr_info("ena controller version: %d.%d.%d implementation version %d\n",
1621 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) >>
1622 ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1623 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) >>
1624 ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1625 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1626 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1627 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1628
1629 ctrl_ver_masked =
1630 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1631 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1632 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1633
1634
1635 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1636 pr_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1637 return -1;
1638 }
1639
1640 return 0;
1641}
1642
1643void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1644{
1645 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1646 struct ena_com_admin_cq *cq = &admin_queue->cq;
1647 struct ena_com_admin_sq *sq = &admin_queue->sq;
1648 struct ena_com_aenq *aenq = &ena_dev->aenq;
1649 u16 size;
1650
1651 if (admin_queue->comp_ctx)
1652 devm_kfree(ena_dev->dmadev, admin_queue->comp_ctx);
1653 admin_queue->comp_ctx = NULL;
1654 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1655 if (sq->entries)
1656 dma_free_coherent(ena_dev->dmadev, size, sq->entries,
1657 sq->dma_addr);
1658 sq->entries = NULL;
1659
1660 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1661 if (cq->entries)
1662 dma_free_coherent(ena_dev->dmadev, size, cq->entries,
1663 cq->dma_addr);
1664 cq->entries = NULL;
1665
1666 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1667 if (ena_dev->aenq.entries)
1668 dma_free_coherent(ena_dev->dmadev, size, aenq->entries,
1669 aenq->dma_addr);
1670 aenq->entries = NULL;
1671}
1672
1673void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1674{
1675 u32 mask_value = 0;
1676
1677 if (polling)
1678 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1679
1680 writel(mask_value, ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1681 ena_dev->admin_queue.polling = polling;
1682}
1683
1684void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1685 bool polling)
1686{
1687 ena_dev->admin_queue.auto_polling = polling;
1688}
1689
1690int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1691{
1692 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1693
1694 spin_lock_init(&mmio_read->lock);
1695 mmio_read->read_resp =
1696 dma_alloc_coherent(ena_dev->dmadev,
1697 sizeof(*mmio_read->read_resp),
1698 &mmio_read->read_resp_dma_addr, GFP_KERNEL);
1699 if (unlikely(!mmio_read->read_resp))
1700 goto err;
1701
1702 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1703
1704 mmio_read->read_resp->req_id = 0x0;
1705 mmio_read->seq_num = 0x0;
1706 mmio_read->readless_supported = true;
1707
1708 return 0;
1709
1710err:
1711
1712 return -ENOMEM;
1713}
1714
1715void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1716{
1717 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1718
1719 mmio_read->readless_supported = readless_supported;
1720}
1721
1722void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1723{
1724 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1725
1726 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1727 writel(0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1728
1729 dma_free_coherent(ena_dev->dmadev, sizeof(*mmio_read->read_resp),
1730 mmio_read->read_resp, mmio_read->read_resp_dma_addr);
1731
1732 mmio_read->read_resp = NULL;
1733}
1734
1735void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1736{
1737 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1738 u32 addr_low, addr_high;
1739
1740 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1741 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1742
1743 writel(addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1744 writel(addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1745}
1746
1747int ena_com_admin_init(struct ena_com_dev *ena_dev,
1748 struct ena_aenq_handlers *aenq_handlers)
1749{
1750 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1751 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1752 int ret;
1753
1754 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1755
1756 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1757 pr_err("Reg read timeout occurred\n");
1758 return -ETIME;
1759 }
1760
1761 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1762 pr_err("Device isn't ready, abort com init\n");
1763 return -ENODEV;
1764 }
1765
1766 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1767
1768 admin_queue->q_dmadev = ena_dev->dmadev;
1769 admin_queue->polling = false;
1770 admin_queue->curr_cmd_id = 0;
1771
1772 atomic_set(&admin_queue->outstanding_cmds, 0);
1773
1774 spin_lock_init(&admin_queue->q_lock);
1775
1776 ret = ena_com_init_comp_ctxt(admin_queue);
1777 if (ret)
1778 goto error;
1779
1780 ret = ena_com_admin_init_sq(admin_queue);
1781 if (ret)
1782 goto error;
1783
1784 ret = ena_com_admin_init_cq(admin_queue);
1785 if (ret)
1786 goto error;
1787
1788 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1789 ENA_REGS_AQ_DB_OFF);
1790
1791 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1792 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1793
1794 writel(addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1795 writel(addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1796
1797 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1798 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1799
1800 writel(addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1801 writel(addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1802
1803 aq_caps = 0;
1804 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1805 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1806 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1807 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1808
1809 acq_caps = 0;
1810 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1811 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1812 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1813 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1814
1815 writel(aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1816 writel(acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1817 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1818 if (ret)
1819 goto error;
1820
1821 admin_queue->ena_dev = ena_dev;
1822 admin_queue->running_state = true;
1823
1824 return 0;
1825error:
1826 ena_com_admin_destroy(ena_dev);
1827
1828 return ret;
1829}
1830
1831int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1832 struct ena_com_create_io_ctx *ctx)
1833{
1834 struct ena_com_io_sq *io_sq;
1835 struct ena_com_io_cq *io_cq;
1836 int ret;
1837
1838 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1839 pr_err("Qid (%d) is bigger than max num of queues (%d)\n",
1840 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1841 return -EINVAL;
1842 }
1843
1844 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1845 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1846
1847 memset(io_sq, 0x0, sizeof(*io_sq));
1848 memset(io_cq, 0x0, sizeof(*io_cq));
1849
1850
1851 io_cq->q_depth = ctx->queue_size;
1852 io_cq->direction = ctx->direction;
1853 io_cq->qid = ctx->qid;
1854
1855 io_cq->msix_vector = ctx->msix_vector;
1856
1857 io_sq->q_depth = ctx->queue_size;
1858 io_sq->direction = ctx->direction;
1859 io_sq->qid = ctx->qid;
1860
1861 io_sq->mem_queue_type = ctx->mem_queue_type;
1862
1863 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1864
1865 io_sq->tx_max_header_size =
1866 min_t(u32, ena_dev->tx_max_header_size, SZ_256);
1867
1868 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1869 if (ret)
1870 goto error;
1871 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1872 if (ret)
1873 goto error;
1874
1875 ret = ena_com_create_io_cq(ena_dev, io_cq);
1876 if (ret)
1877 goto error;
1878
1879 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1880 if (ret)
1881 goto destroy_io_cq;
1882
1883 return 0;
1884
1885destroy_io_cq:
1886 ena_com_destroy_io_cq(ena_dev, io_cq);
1887error:
1888 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1889 return ret;
1890}
1891
1892void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1893{
1894 struct ena_com_io_sq *io_sq;
1895 struct ena_com_io_cq *io_cq;
1896
1897 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1898 pr_err("Qid (%d) is bigger than max num of queues (%d)\n", qid,
1899 ENA_TOTAL_NUM_QUEUES);
1900 return;
1901 }
1902
1903 io_sq = &ena_dev->io_sq_queues[qid];
1904 io_cq = &ena_dev->io_cq_queues[qid];
1905
1906 ena_com_destroy_io_sq(ena_dev, io_sq);
1907 ena_com_destroy_io_cq(ena_dev, io_cq);
1908
1909 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1910}
1911
1912int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1913 struct ena_admin_get_feat_resp *resp)
1914{
1915 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1916}
1917
1918int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1919 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1920{
1921 struct ena_admin_get_feat_resp get_resp;
1922 int rc;
1923
1924 rc = ena_com_get_feature(ena_dev, &get_resp,
1925 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1926 if (rc)
1927 return rc;
1928
1929 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1930 sizeof(get_resp.u.dev_attr));
1931 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1932
1933 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1934 rc = ena_com_get_feature(ena_dev, &get_resp,
1935 ENA_ADMIN_MAX_QUEUES_EXT,
1936 ENA_FEATURE_MAX_QUEUE_EXT_VER);
1937 if (rc)
1938 return rc;
1939
1940 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
1941 return -EINVAL;
1942
1943 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1944 sizeof(get_resp.u.max_queue_ext));
1945 ena_dev->tx_max_header_size =
1946 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1947 } else {
1948 rc = ena_com_get_feature(ena_dev, &get_resp,
1949 ENA_ADMIN_MAX_QUEUES_NUM, 0);
1950 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1951 sizeof(get_resp.u.max_queue));
1952 ena_dev->tx_max_header_size =
1953 get_resp.u.max_queue.max_header_size;
1954
1955 if (rc)
1956 return rc;
1957 }
1958
1959 rc = ena_com_get_feature(ena_dev, &get_resp,
1960 ENA_ADMIN_AENQ_CONFIG, 0);
1961 if (rc)
1962 return rc;
1963
1964 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1965 sizeof(get_resp.u.aenq));
1966
1967 rc = ena_com_get_feature(ena_dev, &get_resp,
1968 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
1969 if (rc)
1970 return rc;
1971
1972 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1973 sizeof(get_resp.u.offload));
1974
1975
1976
1977
1978 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
1979
1980 if (!rc)
1981 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1982 sizeof(get_resp.u.hw_hints));
1983 else if (rc == -EOPNOTSUPP)
1984 memset(&get_feat_ctx->hw_hints, 0x0,
1985 sizeof(get_feat_ctx->hw_hints));
1986 else
1987 return rc;
1988
1989 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
1990 if (!rc)
1991 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
1992 sizeof(get_resp.u.llq));
1993 else if (rc == -EOPNOTSUPP)
1994 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
1995 else
1996 return rc;
1997
1998 return 0;
1999}
2000
2001void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2002{
2003 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2004}
2005
2006
2007
2008
2009static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
2010 u16 group)
2011{
2012 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
2013
2014 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2015 return aenq_handlers->handlers[group];
2016
2017 return aenq_handlers->unimplemented_handler;
2018}
2019
2020
2021
2022
2023
2024void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
2025{
2026 struct ena_admin_aenq_entry *aenq_e;
2027 struct ena_admin_aenq_common_desc *aenq_common;
2028 struct ena_com_aenq *aenq = &dev->aenq;
2029 u64 timestamp;
2030 ena_aenq_handler handler_cb;
2031 u16 masked_head, processed = 0;
2032 u8 phase;
2033
2034 masked_head = aenq->head & (aenq->q_depth - 1);
2035 phase = aenq->phase;
2036 aenq_e = &aenq->entries[masked_head];
2037 aenq_common = &aenq_e->aenq_common_desc;
2038
2039
2040 while ((READ_ONCE(aenq_common->flags) &
2041 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2042
2043
2044
2045 dma_rmb();
2046
2047 timestamp = (u64)aenq_common->timestamp_low |
2048 ((u64)aenq_common->timestamp_high << 32);
2049 pr_debug("AENQ! Group[%x] Syndrom[%x] timestamp: [%llus]\n",
2050 aenq_common->group, aenq_common->syndrom, timestamp);
2051
2052
2053 handler_cb = ena_com_get_specific_aenq_cb(dev,
2054 aenq_common->group);
2055 handler_cb(data, aenq_e);
2056
2057
2058 masked_head++;
2059 processed++;
2060
2061 if (unlikely(masked_head == aenq->q_depth)) {
2062 masked_head = 0;
2063 phase = !phase;
2064 }
2065 aenq_e = &aenq->entries[masked_head];
2066 aenq_common = &aenq_e->aenq_common_desc;
2067 }
2068
2069 aenq->head += processed;
2070 aenq->phase = phase;
2071
2072
2073 if (!processed)
2074 return;
2075
2076
2077 mb();
2078 writel_relaxed((u32)aenq->head, dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2079}
2080
2081int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2082 enum ena_regs_reset_reason_types reset_reason)
2083{
2084 u32 stat, timeout, cap, reset_val;
2085 int rc;
2086
2087 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2088 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2089
2090 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2091 (cap == ENA_MMIO_READ_TIMEOUT))) {
2092 pr_err("Reg read32 timeout occurred\n");
2093 return -ETIME;
2094 }
2095
2096 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2097 pr_err("Device isn't ready, can't reset device\n");
2098 return -EINVAL;
2099 }
2100
2101 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2102 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2103 if (timeout == 0) {
2104 pr_err("Invalid timeout value\n");
2105 return -EINVAL;
2106 }
2107
2108
2109 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2110 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2111 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2112 writel(reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2113
2114
2115 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2116
2117 rc = wait_for_reset_state(ena_dev, timeout,
2118 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2119 if (rc != 0) {
2120 pr_err("Reset indication didn't turn on\n");
2121 return rc;
2122 }
2123
2124
2125 writel(0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2126 rc = wait_for_reset_state(ena_dev, timeout, 0);
2127 if (rc != 0) {
2128 pr_err("Reset indication didn't turn off\n");
2129 return rc;
2130 }
2131
2132 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2133 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2134 if (timeout)
2135
2136 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2137 else
2138 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2139
2140 return 0;
2141}
2142
2143static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2144 struct ena_com_stats_ctx *ctx,
2145 enum ena_admin_get_stats_type type)
2146{
2147 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2148 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2149 struct ena_com_admin_queue *admin_queue;
2150 int ret;
2151
2152 admin_queue = &ena_dev->admin_queue;
2153
2154 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2155 get_cmd->aq_common_descriptor.flags = 0;
2156 get_cmd->type = type;
2157
2158 ret = ena_com_execute_admin_command(admin_queue,
2159 (struct ena_admin_aq_entry *)get_cmd,
2160 sizeof(*get_cmd),
2161 (struct ena_admin_acq_entry *)get_resp,
2162 sizeof(*get_resp));
2163
2164 if (unlikely(ret))
2165 pr_err("Failed to get stats. error: %d\n", ret);
2166
2167 return ret;
2168}
2169
2170int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2171 struct ena_admin_basic_stats *stats)
2172{
2173 struct ena_com_stats_ctx ctx;
2174 int ret;
2175
2176 memset(&ctx, 0x0, sizeof(ctx));
2177 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2178 if (likely(ret == 0))
2179 memcpy(stats, &ctx.get_resp.basic_stats,
2180 sizeof(ctx.get_resp.basic_stats));
2181
2182 return ret;
2183}
2184
2185int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2186{
2187 struct ena_com_admin_queue *admin_queue;
2188 struct ena_admin_set_feat_cmd cmd;
2189 struct ena_admin_set_feat_resp resp;
2190 int ret;
2191
2192 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2193 pr_debug("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2194 return -EOPNOTSUPP;
2195 }
2196
2197 memset(&cmd, 0x0, sizeof(cmd));
2198 admin_queue = &ena_dev->admin_queue;
2199
2200 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2201 cmd.aq_common_descriptor.flags = 0;
2202 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2203 cmd.u.mtu.mtu = mtu;
2204
2205 ret = ena_com_execute_admin_command(admin_queue,
2206 (struct ena_admin_aq_entry *)&cmd,
2207 sizeof(cmd),
2208 (struct ena_admin_acq_entry *)&resp,
2209 sizeof(resp));
2210
2211 if (unlikely(ret))
2212 pr_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2213
2214 return ret;
2215}
2216
2217int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2218 struct ena_admin_feature_offload_desc *offload)
2219{
2220 int ret;
2221 struct ena_admin_get_feat_resp resp;
2222
2223 ret = ena_com_get_feature(ena_dev, &resp,
2224 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2225 if (unlikely(ret)) {
2226 pr_err("Failed to get offload capabilities %d\n", ret);
2227 return ret;
2228 }
2229
2230 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2231
2232 return 0;
2233}
2234
2235int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2236{
2237 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2238 struct ena_rss *rss = &ena_dev->rss;
2239 struct ena_admin_set_feat_cmd cmd;
2240 struct ena_admin_set_feat_resp resp;
2241 struct ena_admin_get_feat_resp get_resp;
2242 int ret;
2243
2244 if (!ena_com_check_supported_feature_id(ena_dev,
2245 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2246 pr_debug("Feature %d isn't supported\n",
2247 ENA_ADMIN_RSS_HASH_FUNCTION);
2248 return -EOPNOTSUPP;
2249 }
2250
2251
2252 ret = ena_com_get_feature(ena_dev, &get_resp,
2253 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2254 if (unlikely(ret))
2255 return ret;
2256
2257 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2258 pr_err("Func hash %d isn't supported by device, abort\n",
2259 rss->hash_func);
2260 return -EOPNOTSUPP;
2261 }
2262
2263 memset(&cmd, 0x0, sizeof(cmd));
2264
2265 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2266 cmd.aq_common_descriptor.flags =
2267 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2268 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2269 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2270 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2271
2272 ret = ena_com_mem_addr_set(ena_dev,
2273 &cmd.control_buffer.address,
2274 rss->hash_key_dma_addr);
2275 if (unlikely(ret)) {
2276 pr_err("memory address set failed\n");
2277 return ret;
2278 }
2279
2280 cmd.control_buffer.length = sizeof(*rss->hash_key);
2281
2282 ret = ena_com_execute_admin_command(admin_queue,
2283 (struct ena_admin_aq_entry *)&cmd,
2284 sizeof(cmd),
2285 (struct ena_admin_acq_entry *)&resp,
2286 sizeof(resp));
2287 if (unlikely(ret)) {
2288 pr_err("Failed to set hash function %d. error: %d\n",
2289 rss->hash_func, ret);
2290 return -EINVAL;
2291 }
2292
2293 return 0;
2294}
2295
2296int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2297 enum ena_admin_hash_functions func,
2298 const u8 *key, u16 key_len, u32 init_val)
2299{
2300 struct ena_admin_feature_rss_flow_hash_control *hash_key;
2301 struct ena_admin_get_feat_resp get_resp;
2302 enum ena_admin_hash_functions old_func;
2303 struct ena_rss *rss = &ena_dev->rss;
2304 int rc;
2305
2306 hash_key = rss->hash_key;
2307
2308
2309 if (unlikely(key_len & 0x3))
2310 return -EINVAL;
2311
2312 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2313 ENA_ADMIN_RSS_HASH_FUNCTION,
2314 rss->hash_key_dma_addr,
2315 sizeof(*rss->hash_key), 0);
2316 if (unlikely(rc))
2317 return rc;
2318
2319 if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2320 pr_err("Flow hash function %d isn't supported\n", func);
2321 return -EOPNOTSUPP;
2322 }
2323
2324 switch (func) {
2325 case ENA_ADMIN_TOEPLITZ:
2326 if (key) {
2327 if (key_len != sizeof(hash_key->key)) {
2328 pr_err("key len (%hu) doesn't equal the supported size (%zu)\n",
2329 key_len, sizeof(hash_key->key));
2330 return -EINVAL;
2331 }
2332 memcpy(hash_key->key, key, key_len);
2333 rss->hash_init_val = init_val;
2334 hash_key->keys_num = key_len >> 2;
2335 }
2336 break;
2337 case ENA_ADMIN_CRC32:
2338 rss->hash_init_val = init_val;
2339 break;
2340 default:
2341 pr_err("Invalid hash function (%d)\n", func);
2342 return -EINVAL;
2343 }
2344
2345 old_func = rss->hash_func;
2346 rss->hash_func = func;
2347 rc = ena_com_set_hash_function(ena_dev);
2348
2349
2350 if (unlikely(rc))
2351 rss->hash_func = old_func;
2352
2353 return rc;
2354}
2355
2356int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2357 enum ena_admin_hash_functions *func)
2358{
2359 struct ena_rss *rss = &ena_dev->rss;
2360 struct ena_admin_get_feat_resp get_resp;
2361 int rc;
2362
2363 if (unlikely(!func))
2364 return -EINVAL;
2365
2366 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2367 ENA_ADMIN_RSS_HASH_FUNCTION,
2368 rss->hash_key_dma_addr,
2369 sizeof(*rss->hash_key), 0);
2370 if (unlikely(rc))
2371 return rc;
2372
2373
2374 rss->hash_func = ffs(get_resp.u.flow_hash_func.selected_func);
2375 if (rss->hash_func)
2376 rss->hash_func--;
2377
2378 *func = rss->hash_func;
2379
2380 return 0;
2381}
2382
2383int ena_com_get_hash_key(struct ena_com_dev *ena_dev, u8 *key)
2384{
2385 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2386 ena_dev->rss.hash_key;
2387
2388 if (key)
2389 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2390
2391 return 0;
2392}
2393
2394int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2395 enum ena_admin_flow_hash_proto proto,
2396 u16 *fields)
2397{
2398 struct ena_rss *rss = &ena_dev->rss;
2399 struct ena_admin_get_feat_resp get_resp;
2400 int rc;
2401
2402 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2403 ENA_ADMIN_RSS_HASH_INPUT,
2404 rss->hash_ctrl_dma_addr,
2405 sizeof(*rss->hash_ctrl), 0);
2406 if (unlikely(rc))
2407 return rc;
2408
2409 if (fields)
2410 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2411
2412 return 0;
2413}
2414
2415int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2416{
2417 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2418 struct ena_rss *rss = &ena_dev->rss;
2419 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2420 struct ena_admin_set_feat_cmd cmd;
2421 struct ena_admin_set_feat_resp resp;
2422 int ret;
2423
2424 if (!ena_com_check_supported_feature_id(ena_dev,
2425 ENA_ADMIN_RSS_HASH_INPUT)) {
2426 pr_debug("Feature %d isn't supported\n",
2427 ENA_ADMIN_RSS_HASH_INPUT);
2428 return -EOPNOTSUPP;
2429 }
2430
2431 memset(&cmd, 0x0, sizeof(cmd));
2432
2433 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2434 cmd.aq_common_descriptor.flags =
2435 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2436 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2437 cmd.u.flow_hash_input.enabled_input_sort =
2438 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2439 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2440
2441 ret = ena_com_mem_addr_set(ena_dev,
2442 &cmd.control_buffer.address,
2443 rss->hash_ctrl_dma_addr);
2444 if (unlikely(ret)) {
2445 pr_err("memory address set failed\n");
2446 return ret;
2447 }
2448 cmd.control_buffer.length = sizeof(*hash_ctrl);
2449
2450 ret = ena_com_execute_admin_command(admin_queue,
2451 (struct ena_admin_aq_entry *)&cmd,
2452 sizeof(cmd),
2453 (struct ena_admin_acq_entry *)&resp,
2454 sizeof(resp));
2455 if (unlikely(ret))
2456 pr_err("Failed to set hash input. error: %d\n", ret);
2457
2458 return ret;
2459}
2460
2461int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2462{
2463 struct ena_rss *rss = &ena_dev->rss;
2464 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2465 rss->hash_ctrl;
2466 u16 available_fields = 0;
2467 int rc, i;
2468
2469
2470 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2471 if (unlikely(rc))
2472 return rc;
2473
2474 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2475 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2476 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2477
2478 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2479 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2480 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2481
2482 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2483 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2484 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2485
2486 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2487 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2488 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2489
2490 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2491 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2492
2493 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2494 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2495
2496 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2497 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2498
2499 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2500 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2501
2502 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2503 available_fields = hash_ctrl->selected_fields[i].fields &
2504 hash_ctrl->supported_fields[i].fields;
2505 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2506 pr_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2507 i, hash_ctrl->supported_fields[i].fields,
2508 hash_ctrl->selected_fields[i].fields);
2509 return -EOPNOTSUPP;
2510 }
2511 }
2512
2513 rc = ena_com_set_hash_ctrl(ena_dev);
2514
2515
2516 if (unlikely(rc))
2517 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2518
2519 return rc;
2520}
2521
2522int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2523 enum ena_admin_flow_hash_proto proto,
2524 u16 hash_fields)
2525{
2526 struct ena_rss *rss = &ena_dev->rss;
2527 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2528 u16 supported_fields;
2529 int rc;
2530
2531 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2532 pr_err("Invalid proto num (%u)\n", proto);
2533 return -EINVAL;
2534 }
2535
2536
2537 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2538 if (unlikely(rc))
2539 return rc;
2540
2541
2542 supported_fields = hash_ctrl->supported_fields[proto].fields;
2543 if ((hash_fields & supported_fields) != hash_fields) {
2544 pr_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2545 proto, hash_fields, supported_fields);
2546 }
2547
2548 hash_ctrl->selected_fields[proto].fields = hash_fields;
2549
2550 rc = ena_com_set_hash_ctrl(ena_dev);
2551
2552
2553 if (unlikely(rc))
2554 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2555
2556 return 0;
2557}
2558
2559int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2560 u16 entry_idx, u16 entry_value)
2561{
2562 struct ena_rss *rss = &ena_dev->rss;
2563
2564 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2565 return -EINVAL;
2566
2567 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2568 return -EINVAL;
2569
2570 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2571
2572 return 0;
2573}
2574
2575int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2576{
2577 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2578 struct ena_rss *rss = &ena_dev->rss;
2579 struct ena_admin_set_feat_cmd cmd;
2580 struct ena_admin_set_feat_resp resp;
2581 int ret;
2582
2583 if (!ena_com_check_supported_feature_id(
2584 ena_dev, ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2585 pr_debug("Feature %d isn't supported\n",
2586 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2587 return -EOPNOTSUPP;
2588 }
2589
2590 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2591 if (ret) {
2592 pr_err("Failed to convert host indirection table to device table\n");
2593 return ret;
2594 }
2595
2596 memset(&cmd, 0x0, sizeof(cmd));
2597
2598 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2599 cmd.aq_common_descriptor.flags =
2600 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2601 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2602 cmd.u.ind_table.size = rss->tbl_log_size;
2603 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2604
2605 ret = ena_com_mem_addr_set(ena_dev,
2606 &cmd.control_buffer.address,
2607 rss->rss_ind_tbl_dma_addr);
2608 if (unlikely(ret)) {
2609 pr_err("memory address set failed\n");
2610 return ret;
2611 }
2612
2613 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2614 sizeof(struct ena_admin_rss_ind_table_entry);
2615
2616 ret = ena_com_execute_admin_command(admin_queue,
2617 (struct ena_admin_aq_entry *)&cmd,
2618 sizeof(cmd),
2619 (struct ena_admin_acq_entry *)&resp,
2620 sizeof(resp));
2621
2622 if (unlikely(ret))
2623 pr_err("Failed to set indirect table. error: %d\n", ret);
2624
2625 return ret;
2626}
2627
2628int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2629{
2630 struct ena_rss *rss = &ena_dev->rss;
2631 struct ena_admin_get_feat_resp get_resp;
2632 u32 tbl_size;
2633 int i, rc;
2634
2635 tbl_size = (1ULL << rss->tbl_log_size) *
2636 sizeof(struct ena_admin_rss_ind_table_entry);
2637
2638 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2639 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2640 rss->rss_ind_tbl_dma_addr,
2641 tbl_size, 0);
2642 if (unlikely(rc))
2643 return rc;
2644
2645 if (!ind_tbl)
2646 return 0;
2647
2648 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2649 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2650
2651 return 0;
2652}
2653
2654int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2655{
2656 int rc;
2657
2658 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2659
2660 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2661 if (unlikely(rc))
2662 goto err_indr_tbl;
2663
2664
2665
2666
2667
2668 rc = ena_com_hash_key_allocate(ena_dev);
2669 if (likely(!rc))
2670 ena_com_hash_key_fill_default_key(ena_dev);
2671 else if (rc != -EOPNOTSUPP)
2672 goto err_hash_key;
2673
2674 rc = ena_com_hash_ctrl_init(ena_dev);
2675 if (unlikely(rc))
2676 goto err_hash_ctrl;
2677
2678 return 0;
2679
2680err_hash_ctrl:
2681 ena_com_hash_key_destroy(ena_dev);
2682err_hash_key:
2683 ena_com_indirect_table_destroy(ena_dev);
2684err_indr_tbl:
2685
2686 return rc;
2687}
2688
2689void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2690{
2691 ena_com_indirect_table_destroy(ena_dev);
2692 ena_com_hash_key_destroy(ena_dev);
2693 ena_com_hash_ctrl_destroy(ena_dev);
2694
2695 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2696}
2697
2698int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2699{
2700 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2701
2702 host_attr->host_info =
2703 dma_alloc_coherent(ena_dev->dmadev, SZ_4K,
2704 &host_attr->host_info_dma_addr, GFP_KERNEL);
2705 if (unlikely(!host_attr->host_info))
2706 return -ENOMEM;
2707
2708 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2709 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2710 (ENA_COMMON_SPEC_VERSION_MINOR));
2711
2712 return 0;
2713}
2714
2715int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2716 u32 debug_area_size)
2717{
2718 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2719
2720 host_attr->debug_area_virt_addr =
2721 dma_alloc_coherent(ena_dev->dmadev, debug_area_size,
2722 &host_attr->debug_area_dma_addr,
2723 GFP_KERNEL);
2724 if (unlikely(!host_attr->debug_area_virt_addr)) {
2725 host_attr->debug_area_size = 0;
2726 return -ENOMEM;
2727 }
2728
2729 host_attr->debug_area_size = debug_area_size;
2730
2731 return 0;
2732}
2733
2734void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2735{
2736 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2737
2738 if (host_attr->host_info) {
2739 dma_free_coherent(ena_dev->dmadev, SZ_4K, host_attr->host_info,
2740 host_attr->host_info_dma_addr);
2741 host_attr->host_info = NULL;
2742 }
2743}
2744
2745void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2746{
2747 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2748
2749 if (host_attr->debug_area_virt_addr) {
2750 dma_free_coherent(ena_dev->dmadev, host_attr->debug_area_size,
2751 host_attr->debug_area_virt_addr,
2752 host_attr->debug_area_dma_addr);
2753 host_attr->debug_area_virt_addr = NULL;
2754 }
2755}
2756
2757int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2758{
2759 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2760 struct ena_com_admin_queue *admin_queue;
2761 struct ena_admin_set_feat_cmd cmd;
2762 struct ena_admin_set_feat_resp resp;
2763
2764 int ret;
2765
2766
2767
2768
2769
2770 memset(&cmd, 0x0, sizeof(cmd));
2771 admin_queue = &ena_dev->admin_queue;
2772
2773 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2774 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2775
2776 ret = ena_com_mem_addr_set(ena_dev,
2777 &cmd.u.host_attr.debug_ba,
2778 host_attr->debug_area_dma_addr);
2779 if (unlikely(ret)) {
2780 pr_err("memory address set failed\n");
2781 return ret;
2782 }
2783
2784 ret = ena_com_mem_addr_set(ena_dev,
2785 &cmd.u.host_attr.os_info_ba,
2786 host_attr->host_info_dma_addr);
2787 if (unlikely(ret)) {
2788 pr_err("memory address set failed\n");
2789 return ret;
2790 }
2791
2792 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2793
2794 ret = ena_com_execute_admin_command(admin_queue,
2795 (struct ena_admin_aq_entry *)&cmd,
2796 sizeof(cmd),
2797 (struct ena_admin_acq_entry *)&resp,
2798 sizeof(resp));
2799
2800 if (unlikely(ret))
2801 pr_err("Failed to set host attributes: %d\n", ret);
2802
2803 return ret;
2804}
2805
2806
2807bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2808{
2809 return ena_com_check_supported_feature_id(ena_dev,
2810 ENA_ADMIN_INTERRUPT_MODERATION);
2811}
2812
2813static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
2814 u32 intr_delay_resolution,
2815 u32 *intr_moder_interval)
2816{
2817 if (!intr_delay_resolution) {
2818 pr_err("Illegal interrupt delay granularity value\n");
2819 return -EFAULT;
2820 }
2821
2822 *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
2823
2824 return 0;
2825}
2826
2827int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2828 u32 tx_coalesce_usecs)
2829{
2830 return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
2831 ena_dev->intr_delay_resolution,
2832 &ena_dev->intr_moder_tx_interval);
2833}
2834
2835int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2836 u32 rx_coalesce_usecs)
2837{
2838 return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
2839 ena_dev->intr_delay_resolution,
2840 &ena_dev->intr_moder_rx_interval);
2841}
2842
2843int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2844{
2845 struct ena_admin_get_feat_resp get_resp;
2846 u16 delay_resolution;
2847 int rc;
2848
2849 rc = ena_com_get_feature(ena_dev, &get_resp,
2850 ENA_ADMIN_INTERRUPT_MODERATION, 0);
2851
2852 if (rc) {
2853 if (rc == -EOPNOTSUPP) {
2854 pr_debug("Feature %d isn't supported\n",
2855 ENA_ADMIN_INTERRUPT_MODERATION);
2856 rc = 0;
2857 } else {
2858 pr_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2859 rc);
2860 }
2861
2862
2863 ena_com_disable_adaptive_moderation(ena_dev);
2864 return rc;
2865 }
2866
2867
2868 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2869 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2870
2871
2872 ena_com_disable_adaptive_moderation(ena_dev);
2873
2874 return 0;
2875}
2876
2877unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2878{
2879 return ena_dev->intr_moder_tx_interval;
2880}
2881
2882unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2883{
2884 return ena_dev->intr_moder_rx_interval;
2885}
2886
2887int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2888 struct ena_admin_feature_llq_desc *llq_features,
2889 struct ena_llq_configurations *llq_default_cfg)
2890{
2891 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
2892 int rc;
2893
2894 if (!llq_features->max_llq_num) {
2895 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2896 return 0;
2897 }
2898
2899 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2900 if (rc)
2901 return rc;
2902
2903 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
2904 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
2905
2906 if (unlikely(ena_dev->tx_max_header_size == 0)) {
2907 pr_err("the size of the LLQ entry is smaller than needed\n");
2908 return -EINVAL;
2909 }
2910
2911 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2912
2913 return 0;
2914}
2915