1
2
3
4
5
6#include "ena_com.h"
7
8
9
10
11
12#define ADMIN_CMD_TIMEOUT_US (3000000)
13
14#define ENA_ASYNC_QUEUE_DEPTH 16
15#define ENA_ADMIN_QUEUE_DEPTH 32
16
17#define ENA_CTRL_MAJOR 0
18#define ENA_CTRL_MINOR 0
19#define ENA_CTRL_SUB_MINOR 1
20
21#define MIN_ENA_CTRL_VER \
22 (((ENA_CTRL_MAJOR) << \
23 (ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT)) | \
24 ((ENA_CTRL_MINOR) << \
25 (ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT)) | \
26 (ENA_CTRL_SUB_MINOR))
27
28#define ENA_DMA_ADDR_TO_UINT32_LOW(x) ((u32)((u64)(x)))
29#define ENA_DMA_ADDR_TO_UINT32_HIGH(x) ((u32)(((u64)(x)) >> 32))
30
31#define ENA_MMIO_READ_TIMEOUT 0xFFFFFFFF
32
33#define ENA_COM_BOUNCE_BUFFER_CNTRL_CNT 4
34
35#define ENA_REGS_ADMIN_INTR_MASK 1
36
37#define ENA_POLL_MS 5
38
39
40
41
42
43enum ena_cmd_status {
44 ENA_CMD_SUBMITTED,
45 ENA_CMD_COMPLETED,
46
47 ENA_CMD_ABORTED,
48};
49
50struct ena_comp_ctx {
51 ena_wait_event_t wait_event;
52 struct ena_admin_acq_entry *user_cqe;
53 u32 comp_size;
54 enum ena_cmd_status status;
55
56 u8 comp_status;
57 u8 cmd_opcode;
58 bool occupied;
59};
60
61struct ena_com_stats_ctx {
62 struct ena_admin_aq_get_stats_cmd get_cmd;
63 struct ena_admin_acq_get_stats_resp get_resp;
64};
65
66static int ena_com_mem_addr_set(struct ena_com_dev *ena_dev,
67 struct ena_common_mem_addr *ena_addr,
68 dma_addr_t addr)
69{
70 if ((addr & GENMASK_ULL(ena_dev->dma_addr_bits - 1, 0)) != addr) {
71 ena_trc_err("dma address has more bits that the device supports\n");
72 return ENA_COM_INVAL;
73 }
74
75 ena_addr->mem_addr_low = lower_32_bits(addr);
76 ena_addr->mem_addr_high = (u16)upper_32_bits(addr);
77
78 return 0;
79}
80
81static int ena_com_admin_init_sq(struct ena_com_admin_queue *queue)
82{
83 struct ena_com_admin_sq *sq = &queue->sq;
84 u16 size = ADMIN_SQ_SIZE(queue->q_depth);
85
86 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, sq->entries, sq->dma_addr,
87 sq->mem_handle);
88
89 if (!sq->entries) {
90 ena_trc_err("memory allocation failed\n");
91 return ENA_COM_NO_MEM;
92 }
93
94 sq->head = 0;
95 sq->tail = 0;
96 sq->phase = 1;
97
98 sq->db_addr = NULL;
99
100 return 0;
101}
102
103static int ena_com_admin_init_cq(struct ena_com_admin_queue *queue)
104{
105 struct ena_com_admin_cq *cq = &queue->cq;
106 u16 size = ADMIN_CQ_SIZE(queue->q_depth);
107
108 ENA_MEM_ALLOC_COHERENT(queue->q_dmadev, size, cq->entries, cq->dma_addr,
109 cq->mem_handle);
110
111 if (!cq->entries) {
112 ena_trc_err("memory allocation failed\n");
113 return ENA_COM_NO_MEM;
114 }
115
116 cq->head = 0;
117 cq->phase = 1;
118
119 return 0;
120}
121
122static int ena_com_admin_init_aenq(struct ena_com_dev *dev,
123 struct ena_aenq_handlers *aenq_handlers)
124{
125 struct ena_com_aenq *aenq = &dev->aenq;
126 u32 addr_low, addr_high, aenq_caps;
127 u16 size;
128
129 dev->aenq.q_depth = ENA_ASYNC_QUEUE_DEPTH;
130 size = ADMIN_AENQ_SIZE(ENA_ASYNC_QUEUE_DEPTH);
131 ENA_MEM_ALLOC_COHERENT(dev->dmadev, size,
132 aenq->entries,
133 aenq->dma_addr,
134 aenq->mem_handle);
135
136 if (!aenq->entries) {
137 ena_trc_err("memory allocation failed\n");
138 return ENA_COM_NO_MEM;
139 }
140
141 aenq->head = aenq->q_depth;
142 aenq->phase = 1;
143
144 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(aenq->dma_addr);
145 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(aenq->dma_addr);
146
147 ENA_REG_WRITE32(dev->bus, addr_low, dev->reg_bar + ENA_REGS_AENQ_BASE_LO_OFF);
148 ENA_REG_WRITE32(dev->bus, addr_high, dev->reg_bar + ENA_REGS_AENQ_BASE_HI_OFF);
149
150 aenq_caps = 0;
151 aenq_caps |= dev->aenq.q_depth & ENA_REGS_AENQ_CAPS_AENQ_DEPTH_MASK;
152 aenq_caps |= (sizeof(struct ena_admin_aenq_entry) <<
153 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_SHIFT) &
154 ENA_REGS_AENQ_CAPS_AENQ_ENTRY_SIZE_MASK;
155 ENA_REG_WRITE32(dev->bus, aenq_caps, dev->reg_bar + ENA_REGS_AENQ_CAPS_OFF);
156
157 if (unlikely(!aenq_handlers)) {
158 ena_trc_err("aenq handlers pointer is NULL\n");
159 return ENA_COM_INVAL;
160 }
161
162 aenq->aenq_handlers = aenq_handlers;
163
164 return 0;
165}
166
167static void comp_ctxt_release(struct ena_com_admin_queue *queue,
168 struct ena_comp_ctx *comp_ctx)
169{
170 comp_ctx->occupied = false;
171 ATOMIC32_DEC(&queue->outstanding_cmds);
172}
173
174static struct ena_comp_ctx *get_comp_ctxt(struct ena_com_admin_queue *queue,
175 u16 command_id, bool capture)
176{
177 if (unlikely(command_id >= queue->q_depth)) {
178 ena_trc_err("command id is larger than the queue size. cmd_id: %u queue size %d\n",
179 command_id, queue->q_depth);
180 return NULL;
181 }
182
183 if (unlikely(!queue->comp_ctx)) {
184 ena_trc_err("Completion context is NULL\n");
185 return NULL;
186 }
187
188 if (unlikely(queue->comp_ctx[command_id].occupied && capture)) {
189 ena_trc_err("Completion context is occupied\n");
190 return NULL;
191 }
192
193 if (capture) {
194 ATOMIC32_INC(&queue->outstanding_cmds);
195 queue->comp_ctx[command_id].occupied = true;
196 }
197
198 return &queue->comp_ctx[command_id];
199}
200
201static struct ena_comp_ctx *__ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
202 struct ena_admin_aq_entry *cmd,
203 size_t cmd_size_in_bytes,
204 struct ena_admin_acq_entry *comp,
205 size_t comp_size_in_bytes)
206{
207 struct ena_comp_ctx *comp_ctx;
208 u16 tail_masked, cmd_id;
209 u16 queue_size_mask;
210 u16 cnt;
211
212 queue_size_mask = admin_queue->q_depth - 1;
213
214 tail_masked = admin_queue->sq.tail & queue_size_mask;
215
216
217 cnt = (u16)ATOMIC32_READ(&admin_queue->outstanding_cmds);
218 if (cnt >= admin_queue->q_depth) {
219 ena_trc_dbg("admin queue is full.\n");
220 admin_queue->stats.out_of_space++;
221 return ERR_PTR(ENA_COM_NO_SPACE);
222 }
223
224 cmd_id = admin_queue->curr_cmd_id;
225
226 cmd->aq_common_descriptor.flags |= admin_queue->sq.phase &
227 ENA_ADMIN_AQ_COMMON_DESC_PHASE_MASK;
228
229 cmd->aq_common_descriptor.command_id |= cmd_id &
230 ENA_ADMIN_AQ_COMMON_DESC_COMMAND_ID_MASK;
231
232 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, true);
233 if (unlikely(!comp_ctx))
234 return ERR_PTR(ENA_COM_INVAL);
235
236 comp_ctx->status = ENA_CMD_SUBMITTED;
237 comp_ctx->comp_size = (u32)comp_size_in_bytes;
238 comp_ctx->user_cqe = comp;
239 comp_ctx->cmd_opcode = cmd->aq_common_descriptor.opcode;
240
241 ENA_WAIT_EVENT_CLEAR(comp_ctx->wait_event);
242
243 memcpy(&admin_queue->sq.entries[tail_masked], cmd, cmd_size_in_bytes);
244
245 admin_queue->curr_cmd_id = (admin_queue->curr_cmd_id + 1) &
246 queue_size_mask;
247
248 admin_queue->sq.tail++;
249 admin_queue->stats.submitted_cmd++;
250
251 if (unlikely((admin_queue->sq.tail & queue_size_mask) == 0))
252 admin_queue->sq.phase = !admin_queue->sq.phase;
253
254 ENA_DB_SYNC(&admin_queue->sq.mem_handle);
255 ENA_REG_WRITE32(admin_queue->bus, admin_queue->sq.tail,
256 admin_queue->sq.db_addr);
257
258 return comp_ctx;
259}
260
261static int ena_com_init_comp_ctxt(struct ena_com_admin_queue *queue)
262{
263 size_t size = queue->q_depth * sizeof(struct ena_comp_ctx);
264 struct ena_comp_ctx *comp_ctx;
265 u16 i;
266
267 queue->comp_ctx = ENA_MEM_ALLOC(queue->q_dmadev, size);
268 if (unlikely(!queue->comp_ctx)) {
269 ena_trc_err("memory allocation failed\n");
270 return ENA_COM_NO_MEM;
271 }
272
273 for (i = 0; i < queue->q_depth; i++) {
274 comp_ctx = get_comp_ctxt(queue, i, false);
275 if (comp_ctx)
276 ENA_WAIT_EVENT_INIT(comp_ctx->wait_event);
277 }
278
279 return 0;
280}
281
282static struct ena_comp_ctx *ena_com_submit_admin_cmd(struct ena_com_admin_queue *admin_queue,
283 struct ena_admin_aq_entry *cmd,
284 size_t cmd_size_in_bytes,
285 struct ena_admin_acq_entry *comp,
286 size_t comp_size_in_bytes)
287{
288 unsigned long flags = 0;
289 struct ena_comp_ctx *comp_ctx;
290
291 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
292 if (unlikely(!admin_queue->running_state)) {
293 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
294 return ERR_PTR(ENA_COM_NO_DEVICE);
295 }
296 comp_ctx = __ena_com_submit_admin_cmd(admin_queue, cmd,
297 cmd_size_in_bytes,
298 comp,
299 comp_size_in_bytes);
300 if (IS_ERR(comp_ctx))
301 admin_queue->running_state = false;
302 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
303
304 return comp_ctx;
305}
306
307static int ena_com_init_io_sq(struct ena_com_dev *ena_dev,
308 struct ena_com_create_io_ctx *ctx,
309 struct ena_com_io_sq *io_sq)
310{
311 size_t size;
312 int dev_node = 0;
313
314 memset(&io_sq->desc_addr, 0x0, sizeof(io_sq->desc_addr));
315
316 io_sq->dma_addr_bits = (u8)ena_dev->dma_addr_bits;
317 io_sq->desc_entry_size =
318 (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
319 sizeof(struct ena_eth_io_tx_desc) :
320 sizeof(struct ena_eth_io_rx_desc);
321
322 size = io_sq->desc_entry_size * io_sq->q_depth;
323 io_sq->bus = ena_dev->bus;
324
325 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
326 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
327 size,
328 io_sq->desc_addr.virt_addr,
329 io_sq->desc_addr.phys_addr,
330 io_sq->desc_addr.mem_handle,
331 ctx->numa_node,
332 dev_node);
333 if (!io_sq->desc_addr.virt_addr) {
334 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
335 size,
336 io_sq->desc_addr.virt_addr,
337 io_sq->desc_addr.phys_addr,
338 io_sq->desc_addr.mem_handle);
339 }
340
341 if (!io_sq->desc_addr.virt_addr) {
342 ena_trc_err("memory allocation failed\n");
343 return ENA_COM_NO_MEM;
344 }
345 }
346
347 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
348
349 io_sq->bounce_buf_ctrl.buffer_size =
350 ena_dev->llq_info.desc_list_entry_size;
351 io_sq->bounce_buf_ctrl.buffers_num =
352 ENA_COM_BOUNCE_BUFFER_CNTRL_CNT;
353 io_sq->bounce_buf_ctrl.next_to_use = 0;
354
355 size = io_sq->bounce_buf_ctrl.buffer_size *
356 io_sq->bounce_buf_ctrl.buffers_num;
357
358 ENA_MEM_ALLOC_NODE(ena_dev->dmadev,
359 size,
360 io_sq->bounce_buf_ctrl.base_buffer,
361 ctx->numa_node,
362 dev_node);
363 if (!io_sq->bounce_buf_ctrl.base_buffer)
364 io_sq->bounce_buf_ctrl.base_buffer = ENA_MEM_ALLOC(ena_dev->dmadev, size);
365
366 if (!io_sq->bounce_buf_ctrl.base_buffer) {
367 ena_trc_err("bounce buffer memory allocation failed\n");
368 return ENA_COM_NO_MEM;
369 }
370
371 memcpy(&io_sq->llq_info, &ena_dev->llq_info,
372 sizeof(io_sq->llq_info));
373
374
375 io_sq->llq_buf_ctrl.curr_bounce_buf =
376 ena_com_get_next_bounce_buffer(&io_sq->bounce_buf_ctrl);
377 memset(io_sq->llq_buf_ctrl.curr_bounce_buf,
378 0x0, io_sq->llq_info.desc_list_entry_size);
379 io_sq->llq_buf_ctrl.descs_left_in_line =
380 io_sq->llq_info.descs_num_before_header;
381 io_sq->disable_meta_caching =
382 io_sq->llq_info.disable_meta_caching;
383
384 if (io_sq->llq_info.max_entries_in_tx_burst > 0)
385 io_sq->entries_in_tx_burst_left =
386 io_sq->llq_info.max_entries_in_tx_burst;
387 }
388
389 io_sq->tail = 0;
390 io_sq->next_to_comp = 0;
391 io_sq->phase = 1;
392
393 return 0;
394}
395
396static int ena_com_init_io_cq(struct ena_com_dev *ena_dev,
397 struct ena_com_create_io_ctx *ctx,
398 struct ena_com_io_cq *io_cq)
399{
400 size_t size;
401 int prev_node = 0;
402
403 memset(&io_cq->cdesc_addr, 0x0, sizeof(io_cq->cdesc_addr));
404
405
406 io_cq->cdesc_entry_size_in_bytes =
407 (io_cq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX) ?
408 sizeof(struct ena_eth_io_tx_cdesc) :
409 sizeof(struct ena_eth_io_rx_cdesc_base);
410
411 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
412 io_cq->bus = ena_dev->bus;
413
414 ENA_MEM_ALLOC_COHERENT_NODE(ena_dev->dmadev,
415 size,
416 io_cq->cdesc_addr.virt_addr,
417 io_cq->cdesc_addr.phys_addr,
418 io_cq->cdesc_addr.mem_handle,
419 ctx->numa_node,
420 prev_node);
421 if (!io_cq->cdesc_addr.virt_addr) {
422 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
423 size,
424 io_cq->cdesc_addr.virt_addr,
425 io_cq->cdesc_addr.phys_addr,
426 io_cq->cdesc_addr.mem_handle);
427 }
428
429 if (!io_cq->cdesc_addr.virt_addr) {
430 ena_trc_err("memory allocation failed\n");
431 return ENA_COM_NO_MEM;
432 }
433
434 io_cq->phase = 1;
435 io_cq->head = 0;
436
437 return 0;
438}
439
440static void ena_com_handle_single_admin_completion(struct ena_com_admin_queue *admin_queue,
441 struct ena_admin_acq_entry *cqe)
442{
443 struct ena_comp_ctx *comp_ctx;
444 u16 cmd_id;
445
446 cmd_id = cqe->acq_common_descriptor.command &
447 ENA_ADMIN_ACQ_COMMON_DESC_COMMAND_ID_MASK;
448
449 comp_ctx = get_comp_ctxt(admin_queue, cmd_id, false);
450 if (unlikely(!comp_ctx)) {
451 ena_trc_err("comp_ctx is NULL. Changing the admin queue running state\n");
452 admin_queue->running_state = false;
453 return;
454 }
455
456 comp_ctx->status = ENA_CMD_COMPLETED;
457 comp_ctx->comp_status = cqe->acq_common_descriptor.status;
458
459 if (comp_ctx->user_cqe)
460 memcpy(comp_ctx->user_cqe, (void *)cqe, comp_ctx->comp_size);
461
462 if (!admin_queue->polling)
463 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
464}
465
466static void ena_com_handle_admin_completion(struct ena_com_admin_queue *admin_queue)
467{
468 struct ena_admin_acq_entry *cqe = NULL;
469 u16 comp_num = 0;
470 u16 head_masked;
471 u8 phase;
472
473 head_masked = admin_queue->cq.head & (admin_queue->q_depth - 1);
474 phase = admin_queue->cq.phase;
475
476 cqe = &admin_queue->cq.entries[head_masked];
477
478
479 while ((READ_ONCE8(cqe->acq_common_descriptor.flags) &
480 ENA_ADMIN_ACQ_COMMON_DESC_PHASE_MASK) == phase) {
481
482
483
484 dma_rmb();
485 ena_com_handle_single_admin_completion(admin_queue, cqe);
486
487 head_masked++;
488 comp_num++;
489 if (unlikely(head_masked == admin_queue->q_depth)) {
490 head_masked = 0;
491 phase = !phase;
492 }
493
494 cqe = &admin_queue->cq.entries[head_masked];
495 }
496
497 admin_queue->cq.head += comp_num;
498 admin_queue->cq.phase = phase;
499 admin_queue->sq.head += comp_num;
500 admin_queue->stats.completed_cmd += comp_num;
501}
502
503static int ena_com_comp_status_to_errno(u8 comp_status)
504{
505 if (unlikely(comp_status != 0))
506 ena_trc_err("admin command failed[%u]\n", comp_status);
507
508 switch (comp_status) {
509 case ENA_ADMIN_SUCCESS:
510 return ENA_COM_OK;
511 case ENA_ADMIN_RESOURCE_ALLOCATION_FAILURE:
512 return ENA_COM_NO_MEM;
513 case ENA_ADMIN_UNSUPPORTED_OPCODE:
514 return ENA_COM_UNSUPPORTED;
515 case ENA_ADMIN_BAD_OPCODE:
516 case ENA_ADMIN_MALFORMED_REQUEST:
517 case ENA_ADMIN_ILLEGAL_PARAMETER:
518 case ENA_ADMIN_UNKNOWN_ERROR:
519 return ENA_COM_INVAL;
520 }
521
522 return ENA_COM_INVAL;
523}
524
525static int ena_com_wait_and_process_admin_cq_polling(struct ena_comp_ctx *comp_ctx,
526 struct ena_com_admin_queue *admin_queue)
527{
528 unsigned long flags = 0;
529 ena_time_t timeout;
530 int ret;
531
532 timeout = ENA_GET_SYSTEM_TIMEOUT(admin_queue->completion_timeout);
533
534 while (1) {
535 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
536 ena_com_handle_admin_completion(admin_queue);
537 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
538
539 if (comp_ctx->status != ENA_CMD_SUBMITTED)
540 break;
541
542 if (ENA_TIME_EXPIRE(timeout)) {
543 ena_trc_err("Wait for completion (polling) timeout\n");
544
545 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
546 admin_queue->stats.no_completion++;
547 admin_queue->running_state = false;
548 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
549
550 ret = ENA_COM_TIMER_EXPIRED;
551 goto err;
552 }
553
554 ENA_MSLEEP(ENA_POLL_MS);
555 }
556
557 if (unlikely(comp_ctx->status == ENA_CMD_ABORTED)) {
558 ena_trc_err("Command was aborted\n");
559 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
560 admin_queue->stats.aborted_cmd++;
561 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
562 ret = ENA_COM_NO_DEVICE;
563 goto err;
564 }
565
566 ENA_WARN(comp_ctx->status != ENA_CMD_COMPLETED,
567 "Invalid comp status %d\n", comp_ctx->status);
568
569 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
570err:
571 comp_ctxt_release(admin_queue, comp_ctx);
572 return ret;
573}
574
575
576
577
578
579
580
581static int ena_com_set_llq(struct ena_com_dev *ena_dev)
582{
583 struct ena_com_admin_queue *admin_queue;
584 struct ena_admin_set_feat_cmd cmd;
585 struct ena_admin_set_feat_resp resp;
586 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
587 int ret;
588
589 memset(&cmd, 0x0, sizeof(cmd));
590 admin_queue = &ena_dev->admin_queue;
591
592 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
593 cmd.feat_common.feature_id = ENA_ADMIN_LLQ;
594
595 cmd.u.llq.header_location_ctrl_enabled = llq_info->header_location_ctrl;
596 cmd.u.llq.entry_size_ctrl_enabled = llq_info->desc_list_entry_size_ctrl;
597 cmd.u.llq.desc_num_before_header_enabled = llq_info->descs_num_before_header;
598 cmd.u.llq.descriptors_stride_ctrl_enabled = llq_info->desc_stride_ctrl;
599
600 if (llq_info->disable_meta_caching)
601 cmd.u.llq.accel_mode.u.set.enabled_flags |=
602 BIT(ENA_ADMIN_DISABLE_META_CACHING);
603
604 if (llq_info->max_entries_in_tx_burst)
605 cmd.u.llq.accel_mode.u.set.enabled_flags |=
606 BIT(ENA_ADMIN_LIMIT_TX_BURST);
607
608 ret = ena_com_execute_admin_command(admin_queue,
609 (struct ena_admin_aq_entry *)&cmd,
610 sizeof(cmd),
611 (struct ena_admin_acq_entry *)&resp,
612 sizeof(resp));
613
614 if (unlikely(ret))
615 ena_trc_err("Failed to set LLQ configurations: %d\n", ret);
616
617 return ret;
618}
619
620static int ena_com_config_llq_info(struct ena_com_dev *ena_dev,
621 struct ena_admin_feature_llq_desc *llq_features,
622 struct ena_llq_configurations *llq_default_cfg)
623{
624 struct ena_com_llq_info *llq_info = &ena_dev->llq_info;
625 u16 supported_feat;
626 int rc;
627
628 memset(llq_info, 0, sizeof(*llq_info));
629
630 supported_feat = llq_features->header_location_ctrl_supported;
631
632 if (likely(supported_feat & llq_default_cfg->llq_header_location)) {
633 llq_info->header_location_ctrl =
634 llq_default_cfg->llq_header_location;
635 } else {
636 ena_trc_err("Invalid header location control, supported: 0x%x\n",
637 supported_feat);
638 return -EINVAL;
639 }
640
641 if (likely(llq_info->header_location_ctrl == ENA_ADMIN_INLINE_HEADER)) {
642 supported_feat = llq_features->descriptors_stride_ctrl_supported;
643 if (likely(supported_feat & llq_default_cfg->llq_stride_ctrl)) {
644 llq_info->desc_stride_ctrl = llq_default_cfg->llq_stride_ctrl;
645 } else {
646 if (supported_feat & ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY) {
647 llq_info->desc_stride_ctrl = ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY;
648 } else if (supported_feat & ENA_ADMIN_SINGLE_DESC_PER_ENTRY) {
649 llq_info->desc_stride_ctrl = ENA_ADMIN_SINGLE_DESC_PER_ENTRY;
650 } else {
651 ena_trc_err("Invalid desc_stride_ctrl, supported: 0x%x\n",
652 supported_feat);
653 return -EINVAL;
654 }
655
656 ena_trc_err("Default llq stride ctrl is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
657 llq_default_cfg->llq_stride_ctrl,
658 supported_feat,
659 llq_info->desc_stride_ctrl);
660 }
661 } else {
662 llq_info->desc_stride_ctrl = 0;
663 }
664
665 supported_feat = llq_features->entry_size_ctrl_supported;
666 if (likely(supported_feat & llq_default_cfg->llq_ring_entry_size)) {
667 llq_info->desc_list_entry_size_ctrl = llq_default_cfg->llq_ring_entry_size;
668 llq_info->desc_list_entry_size = llq_default_cfg->llq_ring_entry_size_value;
669 } else {
670 if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_128B) {
671 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_128B;
672 llq_info->desc_list_entry_size = 128;
673 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_192B) {
674 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_192B;
675 llq_info->desc_list_entry_size = 192;
676 } else if (supported_feat & ENA_ADMIN_LIST_ENTRY_SIZE_256B) {
677 llq_info->desc_list_entry_size_ctrl = ENA_ADMIN_LIST_ENTRY_SIZE_256B;
678 llq_info->desc_list_entry_size = 256;
679 } else {
680 ena_trc_err("Invalid entry_size_ctrl, supported: 0x%x\n", supported_feat);
681 return -EINVAL;
682 }
683
684 ena_trc_err("Default llq ring entry size is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
685 llq_default_cfg->llq_ring_entry_size,
686 supported_feat,
687 llq_info->desc_list_entry_size);
688 }
689 if (unlikely(llq_info->desc_list_entry_size & 0x7)) {
690
691
692
693 ena_trc_err("illegal entry size %d\n",
694 llq_info->desc_list_entry_size);
695 return -EINVAL;
696 }
697
698 if (llq_info->desc_stride_ctrl == ENA_ADMIN_MULTIPLE_DESCS_PER_ENTRY)
699 llq_info->descs_per_entry = llq_info->desc_list_entry_size /
700 sizeof(struct ena_eth_io_tx_desc);
701 else
702 llq_info->descs_per_entry = 1;
703
704 supported_feat = llq_features->desc_num_before_header_supported;
705 if (likely(supported_feat & llq_default_cfg->llq_num_decs_before_header)) {
706 llq_info->descs_num_before_header = llq_default_cfg->llq_num_decs_before_header;
707 } else {
708 if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2) {
709 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_2;
710 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1) {
711 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_1;
712 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4) {
713 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_4;
714 } else if (supported_feat & ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8) {
715 llq_info->descs_num_before_header = ENA_ADMIN_LLQ_NUM_DESCS_BEFORE_HEADER_8;
716 } else {
717 ena_trc_err("Invalid descs_num_before_header, supported: 0x%x\n",
718 supported_feat);
719 return -EINVAL;
720 }
721
722 ena_trc_err("Default llq num descs before header is not supported, performing fallback, default: 0x%x, supported: 0x%x, used: 0x%x\n",
723 llq_default_cfg->llq_num_decs_before_header,
724 supported_feat,
725 llq_info->descs_num_before_header);
726 }
727
728 llq_info->disable_meta_caching =
729 llq_features->accel_mode.u.get.supported_flags &
730 BIT(ENA_ADMIN_DISABLE_META_CACHING);
731
732 if (llq_features->accel_mode.u.get.supported_flags & BIT(ENA_ADMIN_LIMIT_TX_BURST))
733 llq_info->max_entries_in_tx_burst =
734 llq_features->accel_mode.u.get.max_tx_burst_size /
735 llq_default_cfg->llq_ring_entry_size_value;
736
737 rc = ena_com_set_llq(ena_dev);
738 if (rc)
739 ena_trc_err("Cannot set LLQ configuration: %d\n", rc);
740
741 return rc;
742}
743
744static int ena_com_wait_and_process_admin_cq_interrupts(struct ena_comp_ctx *comp_ctx,
745 struct ena_com_admin_queue *admin_queue)
746{
747 unsigned long flags = 0;
748 int ret;
749
750 ENA_WAIT_EVENT_WAIT(comp_ctx->wait_event,
751 admin_queue->completion_timeout);
752
753
754
755
756
757
758 if (unlikely(comp_ctx->status == ENA_CMD_SUBMITTED)) {
759 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
760 ena_com_handle_admin_completion(admin_queue);
761 admin_queue->stats.no_completion++;
762 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
763
764 if (comp_ctx->status == ENA_CMD_COMPLETED) {
765 ena_trc_err("The ena device sent a completion but the driver didn't receive a MSI-X interrupt (cmd %d), autopolling mode is %s\n",
766 comp_ctx->cmd_opcode, admin_queue->auto_polling ? "ON" : "OFF");
767
768 if (admin_queue->auto_polling)
769 admin_queue->polling = true;
770 } else {
771 ena_trc_err("The ena device didn't send a completion for the admin cmd %d status %d\n",
772 comp_ctx->cmd_opcode, comp_ctx->status);
773 }
774
775
776
777
778 if (!admin_queue->polling) {
779 admin_queue->running_state = false;
780 ret = ENA_COM_TIMER_EXPIRED;
781 goto err;
782 }
783 }
784
785 ret = ena_com_comp_status_to_errno(comp_ctx->comp_status);
786err:
787 comp_ctxt_release(admin_queue, comp_ctx);
788 return ret;
789}
790
791
792
793
794
795static u32 ena_com_reg_bar_read32(struct ena_com_dev *ena_dev, u16 offset)
796{
797 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
798 volatile struct ena_admin_ena_mmio_req_read_less_resp *read_resp =
799 mmio_read->read_resp;
800 u32 mmio_read_reg, ret, i;
801 unsigned long flags = 0;
802 u32 timeout = mmio_read->reg_read_to;
803
804 ENA_MIGHT_SLEEP();
805
806 if (timeout == 0)
807 timeout = ENA_REG_READ_TIMEOUT;
808
809
810 if (!mmio_read->readless_supported)
811 return ENA_REG_READ32(ena_dev->bus, ena_dev->reg_bar + offset);
812
813 ENA_SPINLOCK_LOCK(mmio_read->lock, flags);
814 mmio_read->seq_num++;
815
816 read_resp->req_id = mmio_read->seq_num + 0xDEAD;
817 mmio_read_reg = (offset << ENA_REGS_MMIO_REG_READ_REG_OFF_SHIFT) &
818 ENA_REGS_MMIO_REG_READ_REG_OFF_MASK;
819 mmio_read_reg |= mmio_read->seq_num &
820 ENA_REGS_MMIO_REG_READ_REQ_ID_MASK;
821
822 ENA_REG_WRITE32(ena_dev->bus, mmio_read_reg,
823 ena_dev->reg_bar + ENA_REGS_MMIO_REG_READ_OFF);
824
825 for (i = 0; i < timeout; i++) {
826 if (READ_ONCE16(read_resp->req_id) == mmio_read->seq_num)
827 break;
828
829 ENA_UDELAY(1);
830 }
831
832 if (unlikely(i == timeout)) {
833 ena_trc_err("reading reg failed for timeout. expected: req id[%hu] offset[%hu] actual: req id[%hu] offset[%hu]\n",
834 mmio_read->seq_num,
835 offset,
836 read_resp->req_id,
837 read_resp->reg_off);
838 ret = ENA_MMIO_READ_TIMEOUT;
839 goto err;
840 }
841
842 if (read_resp->reg_off != offset) {
843 ena_trc_err("Read failure: wrong offset provided\n");
844 ret = ENA_MMIO_READ_TIMEOUT;
845 } else {
846 ret = read_resp->reg_val;
847 }
848err:
849 ENA_SPINLOCK_UNLOCK(mmio_read->lock, flags);
850
851 return ret;
852}
853
854
855
856
857
858
859
860
861static int ena_com_wait_and_process_admin_cq(struct ena_comp_ctx *comp_ctx,
862 struct ena_com_admin_queue *admin_queue)
863{
864 if (admin_queue->polling)
865 return ena_com_wait_and_process_admin_cq_polling(comp_ctx,
866 admin_queue);
867
868 return ena_com_wait_and_process_admin_cq_interrupts(comp_ctx,
869 admin_queue);
870}
871
872static int ena_com_destroy_io_sq(struct ena_com_dev *ena_dev,
873 struct ena_com_io_sq *io_sq)
874{
875 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
876 struct ena_admin_aq_destroy_sq_cmd destroy_cmd;
877 struct ena_admin_acq_destroy_sq_resp_desc destroy_resp;
878 u8 direction;
879 int ret;
880
881 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
882
883 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
884 direction = ENA_ADMIN_SQ_DIRECTION_TX;
885 else
886 direction = ENA_ADMIN_SQ_DIRECTION_RX;
887
888 destroy_cmd.sq.sq_identity |= (direction <<
889 ENA_ADMIN_SQ_SQ_DIRECTION_SHIFT) &
890 ENA_ADMIN_SQ_SQ_DIRECTION_MASK;
891
892 destroy_cmd.sq.sq_idx = io_sq->idx;
893 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_SQ;
894
895 ret = ena_com_execute_admin_command(admin_queue,
896 (struct ena_admin_aq_entry *)&destroy_cmd,
897 sizeof(destroy_cmd),
898 (struct ena_admin_acq_entry *)&destroy_resp,
899 sizeof(destroy_resp));
900
901 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
902 ena_trc_err("failed to destroy io sq error: %d\n", ret);
903
904 return ret;
905}
906
907static void ena_com_io_queue_free(struct ena_com_dev *ena_dev,
908 struct ena_com_io_sq *io_sq,
909 struct ena_com_io_cq *io_cq)
910{
911 size_t size;
912
913 if (io_cq->cdesc_addr.virt_addr) {
914 size = io_cq->cdesc_entry_size_in_bytes * io_cq->q_depth;
915
916 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
917 size,
918 io_cq->cdesc_addr.virt_addr,
919 io_cq->cdesc_addr.phys_addr,
920 io_cq->cdesc_addr.mem_handle);
921
922 io_cq->cdesc_addr.virt_addr = NULL;
923 }
924
925 if (io_sq->desc_addr.virt_addr) {
926 size = io_sq->desc_entry_size * io_sq->q_depth;
927
928 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
929 size,
930 io_sq->desc_addr.virt_addr,
931 io_sq->desc_addr.phys_addr,
932 io_sq->desc_addr.mem_handle);
933
934 io_sq->desc_addr.virt_addr = NULL;
935 }
936
937 if (io_sq->bounce_buf_ctrl.base_buffer) {
938 ENA_MEM_FREE(ena_dev->dmadev,
939 io_sq->bounce_buf_ctrl.base_buffer,
940 (io_sq->llq_info.desc_list_entry_size * ENA_COM_BOUNCE_BUFFER_CNTRL_CNT));
941 io_sq->bounce_buf_ctrl.base_buffer = NULL;
942 }
943}
944
945static int wait_for_reset_state(struct ena_com_dev *ena_dev, u32 timeout,
946 u16 exp_state)
947{
948 u32 val, i;
949
950
951 timeout = (timeout * 100) / ENA_POLL_MS;
952
953 for (i = 0; i < timeout; i++) {
954 val = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
955
956 if (unlikely(val == ENA_MMIO_READ_TIMEOUT)) {
957 ena_trc_err("Reg read timeout occurred\n");
958 return ENA_COM_TIMER_EXPIRED;
959 }
960
961 if ((val & ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK) ==
962 exp_state)
963 return 0;
964
965 ENA_MSLEEP(ENA_POLL_MS);
966 }
967
968 return ENA_COM_TIMER_EXPIRED;
969}
970
971static bool ena_com_check_supported_feature_id(struct ena_com_dev *ena_dev,
972 enum ena_admin_aq_feature_id feature_id)
973{
974 u32 feature_mask = 1 << feature_id;
975
976
977 if ((feature_id != ENA_ADMIN_DEVICE_ATTRIBUTES) &&
978 !(ena_dev->supported_features & feature_mask))
979 return false;
980
981 return true;
982}
983
984static int ena_com_get_feature_ex(struct ena_com_dev *ena_dev,
985 struct ena_admin_get_feat_resp *get_resp,
986 enum ena_admin_aq_feature_id feature_id,
987 dma_addr_t control_buf_dma_addr,
988 u32 control_buff_size,
989 u8 feature_ver)
990{
991 struct ena_com_admin_queue *admin_queue;
992 struct ena_admin_get_feat_cmd get_cmd;
993 int ret;
994
995 if (!ena_com_check_supported_feature_id(ena_dev, feature_id)) {
996 ena_trc_dbg("Feature %d isn't supported\n", feature_id);
997 return ENA_COM_UNSUPPORTED;
998 }
999
1000 memset(&get_cmd, 0x0, sizeof(get_cmd));
1001 admin_queue = &ena_dev->admin_queue;
1002
1003 get_cmd.aq_common_descriptor.opcode = ENA_ADMIN_GET_FEATURE;
1004
1005 if (control_buff_size)
1006 get_cmd.aq_common_descriptor.flags =
1007 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
1008 else
1009 get_cmd.aq_common_descriptor.flags = 0;
1010
1011 ret = ena_com_mem_addr_set(ena_dev,
1012 &get_cmd.control_buffer.address,
1013 control_buf_dma_addr);
1014 if (unlikely(ret)) {
1015 ena_trc_err("memory address set failed\n");
1016 return ret;
1017 }
1018
1019 get_cmd.control_buffer.length = control_buff_size;
1020 get_cmd.feat_common.feature_version = feature_ver;
1021 get_cmd.feat_common.feature_id = feature_id;
1022
1023 ret = ena_com_execute_admin_command(admin_queue,
1024 (struct ena_admin_aq_entry *)
1025 &get_cmd,
1026 sizeof(get_cmd),
1027 (struct ena_admin_acq_entry *)
1028 get_resp,
1029 sizeof(*get_resp));
1030
1031 if (unlikely(ret))
1032 ena_trc_err("Failed to submit get_feature command %d error: %d\n",
1033 feature_id, ret);
1034
1035 return ret;
1036}
1037
1038static int ena_com_get_feature(struct ena_com_dev *ena_dev,
1039 struct ena_admin_get_feat_resp *get_resp,
1040 enum ena_admin_aq_feature_id feature_id,
1041 u8 feature_ver)
1042{
1043 return ena_com_get_feature_ex(ena_dev,
1044 get_resp,
1045 feature_id,
1046 0,
1047 0,
1048 feature_ver);
1049}
1050
1051static void ena_com_hash_key_fill_default_key(struct ena_com_dev *ena_dev)
1052{
1053 struct ena_admin_feature_rss_flow_hash_control *hash_key =
1054 (ena_dev->rss).hash_key;
1055
1056 ENA_RSS_FILL_KEY(&hash_key->key, sizeof(hash_key->key));
1057
1058
1059
1060
1061 hash_key->keys_num = sizeof(hash_key->key) / sizeof(uint32_t);
1062}
1063
1064static int ena_com_hash_key_allocate(struct ena_com_dev *ena_dev)
1065{
1066 struct ena_rss *rss = &ena_dev->rss;
1067
1068 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1069 sizeof(*rss->hash_key),
1070 rss->hash_key,
1071 rss->hash_key_dma_addr,
1072 rss->hash_key_mem_handle);
1073
1074 if (unlikely(!rss->hash_key))
1075 return ENA_COM_NO_MEM;
1076
1077 return 0;
1078}
1079
1080static void ena_com_hash_key_destroy(struct ena_com_dev *ena_dev)
1081{
1082 struct ena_rss *rss = &ena_dev->rss;
1083
1084 if (rss->hash_key)
1085 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1086 sizeof(*rss->hash_key),
1087 rss->hash_key,
1088 rss->hash_key_dma_addr,
1089 rss->hash_key_mem_handle);
1090 rss->hash_key = NULL;
1091}
1092
1093static int ena_com_hash_ctrl_init(struct ena_com_dev *ena_dev)
1094{
1095 struct ena_rss *rss = &ena_dev->rss;
1096
1097 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1098 sizeof(*rss->hash_ctrl),
1099 rss->hash_ctrl,
1100 rss->hash_ctrl_dma_addr,
1101 rss->hash_ctrl_mem_handle);
1102
1103 if (unlikely(!rss->hash_ctrl))
1104 return ENA_COM_NO_MEM;
1105
1106 return 0;
1107}
1108
1109static void ena_com_hash_ctrl_destroy(struct ena_com_dev *ena_dev)
1110{
1111 struct ena_rss *rss = &ena_dev->rss;
1112
1113 if (rss->hash_ctrl)
1114 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1115 sizeof(*rss->hash_ctrl),
1116 rss->hash_ctrl,
1117 rss->hash_ctrl_dma_addr,
1118 rss->hash_ctrl_mem_handle);
1119 rss->hash_ctrl = NULL;
1120}
1121
1122static int ena_com_indirect_table_allocate(struct ena_com_dev *ena_dev,
1123 u16 log_size)
1124{
1125 struct ena_rss *rss = &ena_dev->rss;
1126 struct ena_admin_get_feat_resp get_resp;
1127 size_t tbl_size;
1128 int ret;
1129
1130 ret = ena_com_get_feature(ena_dev, &get_resp,
1131 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
1132 if (unlikely(ret))
1133 return ret;
1134
1135 if ((get_resp.u.ind_table.min_size > log_size) ||
1136 (get_resp.u.ind_table.max_size < log_size)) {
1137 ena_trc_err("indirect table size doesn't fit. requested size: %d while min is:%d and max %d\n",
1138 1 << log_size,
1139 1 << get_resp.u.ind_table.min_size,
1140 1 << get_resp.u.ind_table.max_size);
1141 return ENA_COM_INVAL;
1142 }
1143
1144 tbl_size = (1ULL << log_size) *
1145 sizeof(struct ena_admin_rss_ind_table_entry);
1146
1147 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1148 tbl_size,
1149 rss->rss_ind_tbl,
1150 rss->rss_ind_tbl_dma_addr,
1151 rss->rss_ind_tbl_mem_handle);
1152 if (unlikely(!rss->rss_ind_tbl))
1153 goto mem_err1;
1154
1155 tbl_size = (1ULL << log_size) * sizeof(u16);
1156 rss->host_rss_ind_tbl =
1157 ENA_MEM_ALLOC(ena_dev->dmadev, tbl_size);
1158 if (unlikely(!rss->host_rss_ind_tbl))
1159 goto mem_err2;
1160
1161 rss->tbl_log_size = log_size;
1162
1163 return 0;
1164
1165mem_err2:
1166 tbl_size = (1ULL << log_size) *
1167 sizeof(struct ena_admin_rss_ind_table_entry);
1168
1169 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1170 tbl_size,
1171 rss->rss_ind_tbl,
1172 rss->rss_ind_tbl_dma_addr,
1173 rss->rss_ind_tbl_mem_handle);
1174 rss->rss_ind_tbl = NULL;
1175mem_err1:
1176 rss->tbl_log_size = 0;
1177 return ENA_COM_NO_MEM;
1178}
1179
1180static void ena_com_indirect_table_destroy(struct ena_com_dev *ena_dev)
1181{
1182 struct ena_rss *rss = &ena_dev->rss;
1183 size_t tbl_size = (1ULL << rss->tbl_log_size) *
1184 sizeof(struct ena_admin_rss_ind_table_entry);
1185
1186 if (rss->rss_ind_tbl)
1187 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1188 tbl_size,
1189 rss->rss_ind_tbl,
1190 rss->rss_ind_tbl_dma_addr,
1191 rss->rss_ind_tbl_mem_handle);
1192 rss->rss_ind_tbl = NULL;
1193
1194 if (rss->host_rss_ind_tbl)
1195 ENA_MEM_FREE(ena_dev->dmadev,
1196 rss->host_rss_ind_tbl,
1197 ((1ULL << rss->tbl_log_size) * sizeof(u16)));
1198 rss->host_rss_ind_tbl = NULL;
1199}
1200
1201static int ena_com_create_io_sq(struct ena_com_dev *ena_dev,
1202 struct ena_com_io_sq *io_sq, u16 cq_idx)
1203{
1204 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1205 struct ena_admin_aq_create_sq_cmd create_cmd;
1206 struct ena_admin_acq_create_sq_resp_desc cmd_completion;
1207 u8 direction;
1208 int ret;
1209
1210 memset(&create_cmd, 0x0, sizeof(create_cmd));
1211
1212 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_SQ;
1213
1214 if (io_sq->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1215 direction = ENA_ADMIN_SQ_DIRECTION_TX;
1216 else
1217 direction = ENA_ADMIN_SQ_DIRECTION_RX;
1218
1219 create_cmd.sq_identity |= (direction <<
1220 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_SHIFT) &
1221 ENA_ADMIN_AQ_CREATE_SQ_CMD_SQ_DIRECTION_MASK;
1222
1223 create_cmd.sq_caps_2 |= io_sq->mem_queue_type &
1224 ENA_ADMIN_AQ_CREATE_SQ_CMD_PLACEMENT_POLICY_MASK;
1225
1226 create_cmd.sq_caps_2 |= (ENA_ADMIN_COMPLETION_POLICY_DESC <<
1227 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_SHIFT) &
1228 ENA_ADMIN_AQ_CREATE_SQ_CMD_COMPLETION_POLICY_MASK;
1229
1230 create_cmd.sq_caps_3 |=
1231 ENA_ADMIN_AQ_CREATE_SQ_CMD_IS_PHYSICALLY_CONTIGUOUS_MASK;
1232
1233 create_cmd.cq_idx = cq_idx;
1234 create_cmd.sq_depth = io_sq->q_depth;
1235
1236 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_HOST) {
1237 ret = ena_com_mem_addr_set(ena_dev,
1238 &create_cmd.sq_ba,
1239 io_sq->desc_addr.phys_addr);
1240 if (unlikely(ret)) {
1241 ena_trc_err("memory address set failed\n");
1242 return ret;
1243 }
1244 }
1245
1246 ret = ena_com_execute_admin_command(admin_queue,
1247 (struct ena_admin_aq_entry *)&create_cmd,
1248 sizeof(create_cmd),
1249 (struct ena_admin_acq_entry *)&cmd_completion,
1250 sizeof(cmd_completion));
1251 if (unlikely(ret)) {
1252 ena_trc_err("Failed to create IO SQ. error: %d\n", ret);
1253 return ret;
1254 }
1255
1256 io_sq->idx = cmd_completion.sq_idx;
1257
1258 io_sq->db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1259 (uintptr_t)cmd_completion.sq_doorbell_offset);
1260
1261 if (io_sq->mem_queue_type == ENA_ADMIN_PLACEMENT_POLICY_DEV) {
1262 io_sq->header_addr = (u8 __iomem *)((uintptr_t)ena_dev->mem_bar
1263 + cmd_completion.llq_headers_offset);
1264
1265 io_sq->desc_addr.pbuf_dev_addr =
1266 (u8 __iomem *)((uintptr_t)ena_dev->mem_bar +
1267 cmd_completion.llq_descriptors_offset);
1268 }
1269
1270 ena_trc_dbg("created sq[%u], depth[%u]\n", io_sq->idx, io_sq->q_depth);
1271
1272 return ret;
1273}
1274
1275static int ena_com_ind_tbl_convert_to_device(struct ena_com_dev *ena_dev)
1276{
1277 struct ena_rss *rss = &ena_dev->rss;
1278 struct ena_com_io_sq *io_sq;
1279 u16 qid;
1280 int i;
1281
1282 for (i = 0; i < 1 << rss->tbl_log_size; i++) {
1283 qid = rss->host_rss_ind_tbl[i];
1284 if (qid >= ENA_TOTAL_NUM_QUEUES)
1285 return ENA_COM_INVAL;
1286
1287 io_sq = &ena_dev->io_sq_queues[qid];
1288
1289 if (io_sq->direction != ENA_COM_IO_QUEUE_DIRECTION_RX)
1290 return ENA_COM_INVAL;
1291
1292 rss->rss_ind_tbl[i].cq_idx = io_sq->idx;
1293 }
1294
1295 return 0;
1296}
1297
1298static void ena_com_update_intr_delay_resolution(struct ena_com_dev *ena_dev,
1299 u16 intr_delay_resolution)
1300{
1301 u16 prev_intr_delay_resolution = ena_dev->intr_delay_resolution;
1302
1303 if (unlikely(!intr_delay_resolution)) {
1304 ena_trc_err("Illegal intr_delay_resolution provided. Going to use default 1 usec resolution\n");
1305 intr_delay_resolution = ENA_DEFAULT_INTR_DELAY_RESOLUTION;
1306 }
1307
1308
1309 ena_dev->intr_moder_rx_interval =
1310 ena_dev->intr_moder_rx_interval *
1311 prev_intr_delay_resolution /
1312 intr_delay_resolution;
1313
1314
1315 ena_dev->intr_moder_tx_interval =
1316 ena_dev->intr_moder_tx_interval *
1317 prev_intr_delay_resolution /
1318 intr_delay_resolution;
1319
1320 ena_dev->intr_delay_resolution = intr_delay_resolution;
1321}
1322
1323
1324
1325
1326
1327int ena_com_execute_admin_command(struct ena_com_admin_queue *admin_queue,
1328 struct ena_admin_aq_entry *cmd,
1329 size_t cmd_size,
1330 struct ena_admin_acq_entry *comp,
1331 size_t comp_size)
1332{
1333 struct ena_comp_ctx *comp_ctx;
1334 int ret;
1335
1336 comp_ctx = ena_com_submit_admin_cmd(admin_queue, cmd, cmd_size,
1337 comp, comp_size);
1338 if (IS_ERR(comp_ctx)) {
1339 if (comp_ctx == ERR_PTR(ENA_COM_NO_DEVICE))
1340 ena_trc_dbg("Failed to submit command [%ld]\n",
1341 PTR_ERR(comp_ctx));
1342 else
1343 ena_trc_err("Failed to submit command [%ld]\n",
1344 PTR_ERR(comp_ctx));
1345
1346 return PTR_ERR(comp_ctx);
1347 }
1348
1349 ret = ena_com_wait_and_process_admin_cq(comp_ctx, admin_queue);
1350 if (unlikely(ret)) {
1351 if (admin_queue->running_state)
1352 ena_trc_err("Failed to process command. ret = %d\n",
1353 ret);
1354 else
1355 ena_trc_dbg("Failed to process command. ret = %d\n",
1356 ret);
1357 }
1358 return ret;
1359}
1360
1361int ena_com_create_io_cq(struct ena_com_dev *ena_dev,
1362 struct ena_com_io_cq *io_cq)
1363{
1364 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1365 struct ena_admin_aq_create_cq_cmd create_cmd;
1366 struct ena_admin_acq_create_cq_resp_desc cmd_completion;
1367 int ret;
1368
1369 memset(&create_cmd, 0x0, sizeof(create_cmd));
1370
1371 create_cmd.aq_common_descriptor.opcode = ENA_ADMIN_CREATE_CQ;
1372
1373 create_cmd.cq_caps_2 |= (io_cq->cdesc_entry_size_in_bytes / 4) &
1374 ENA_ADMIN_AQ_CREATE_CQ_CMD_CQ_ENTRY_SIZE_WORDS_MASK;
1375 create_cmd.cq_caps_1 |=
1376 ENA_ADMIN_AQ_CREATE_CQ_CMD_INTERRUPT_MODE_ENABLED_MASK;
1377
1378 create_cmd.msix_vector = io_cq->msix_vector;
1379 create_cmd.cq_depth = io_cq->q_depth;
1380
1381 ret = ena_com_mem_addr_set(ena_dev,
1382 &create_cmd.cq_ba,
1383 io_cq->cdesc_addr.phys_addr);
1384 if (unlikely(ret)) {
1385 ena_trc_err("memory address set failed\n");
1386 return ret;
1387 }
1388
1389 ret = ena_com_execute_admin_command(admin_queue,
1390 (struct ena_admin_aq_entry *)&create_cmd,
1391 sizeof(create_cmd),
1392 (struct ena_admin_acq_entry *)&cmd_completion,
1393 sizeof(cmd_completion));
1394 if (unlikely(ret)) {
1395 ena_trc_err("Failed to create IO CQ. error: %d\n", ret);
1396 return ret;
1397 }
1398
1399 io_cq->idx = cmd_completion.cq_idx;
1400
1401 io_cq->unmask_reg = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1402 cmd_completion.cq_interrupt_unmask_register_offset);
1403
1404 if (cmd_completion.cq_head_db_register_offset)
1405 io_cq->cq_head_db_reg =
1406 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1407 cmd_completion.cq_head_db_register_offset);
1408
1409 if (cmd_completion.numa_node_register_offset)
1410 io_cq->numa_node_cfg_reg =
1411 (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1412 cmd_completion.numa_node_register_offset);
1413
1414 ena_trc_dbg("created cq[%u], depth[%u]\n", io_cq->idx, io_cq->q_depth);
1415
1416 return ret;
1417}
1418
1419int ena_com_get_io_handlers(struct ena_com_dev *ena_dev, u16 qid,
1420 struct ena_com_io_sq **io_sq,
1421 struct ena_com_io_cq **io_cq)
1422{
1423 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1424 ena_trc_err("Invalid queue number %d but the max is %d\n",
1425 qid, ENA_TOTAL_NUM_QUEUES);
1426 return ENA_COM_INVAL;
1427 }
1428
1429 *io_sq = &ena_dev->io_sq_queues[qid];
1430 *io_cq = &ena_dev->io_cq_queues[qid];
1431
1432 return 0;
1433}
1434
1435void ena_com_abort_admin_commands(struct ena_com_dev *ena_dev)
1436{
1437 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1438 struct ena_comp_ctx *comp_ctx;
1439 u16 i;
1440
1441 if (!admin_queue->comp_ctx)
1442 return;
1443
1444 for (i = 0; i < admin_queue->q_depth; i++) {
1445 comp_ctx = get_comp_ctxt(admin_queue, i, false);
1446 if (unlikely(!comp_ctx))
1447 break;
1448
1449 comp_ctx->status = ENA_CMD_ABORTED;
1450
1451 ENA_WAIT_EVENT_SIGNAL(comp_ctx->wait_event);
1452 }
1453}
1454
1455void ena_com_wait_for_abort_completion(struct ena_com_dev *ena_dev)
1456{
1457 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1458 unsigned long flags = 0;
1459
1460 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1461 while (ATOMIC32_READ(&admin_queue->outstanding_cmds) != 0) {
1462 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1463 ENA_MSLEEP(ENA_POLL_MS);
1464 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1465 }
1466 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1467}
1468
1469int ena_com_destroy_io_cq(struct ena_com_dev *ena_dev,
1470 struct ena_com_io_cq *io_cq)
1471{
1472 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1473 struct ena_admin_aq_destroy_cq_cmd destroy_cmd;
1474 struct ena_admin_acq_destroy_cq_resp_desc destroy_resp;
1475 int ret;
1476
1477 memset(&destroy_cmd, 0x0, sizeof(destroy_cmd));
1478
1479 destroy_cmd.cq_idx = io_cq->idx;
1480 destroy_cmd.aq_common_descriptor.opcode = ENA_ADMIN_DESTROY_CQ;
1481
1482 ret = ena_com_execute_admin_command(admin_queue,
1483 (struct ena_admin_aq_entry *)&destroy_cmd,
1484 sizeof(destroy_cmd),
1485 (struct ena_admin_acq_entry *)&destroy_resp,
1486 sizeof(destroy_resp));
1487
1488 if (unlikely(ret && (ret != ENA_COM_NO_DEVICE)))
1489 ena_trc_err("Failed to destroy IO CQ. error: %d\n", ret);
1490
1491 return ret;
1492}
1493
1494bool ena_com_get_admin_running_state(struct ena_com_dev *ena_dev)
1495{
1496 return ena_dev->admin_queue.running_state;
1497}
1498
1499void ena_com_set_admin_running_state(struct ena_com_dev *ena_dev, bool state)
1500{
1501 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1502 unsigned long flags = 0;
1503
1504 ENA_SPINLOCK_LOCK(admin_queue->q_lock, flags);
1505 ena_dev->admin_queue.running_state = state;
1506 ENA_SPINLOCK_UNLOCK(admin_queue->q_lock, flags);
1507}
1508
1509void ena_com_admin_aenq_enable(struct ena_com_dev *ena_dev)
1510{
1511 u16 depth = ena_dev->aenq.q_depth;
1512
1513 ENA_WARN(ena_dev->aenq.head != depth, "Invalid AENQ state\n");
1514
1515
1516
1517
1518 ENA_REG_WRITE32(ena_dev->bus, depth, ena_dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
1519}
1520
1521int ena_com_set_aenq_config(struct ena_com_dev *ena_dev, u32 groups_flag)
1522{
1523 struct ena_com_admin_queue *admin_queue;
1524 struct ena_admin_set_feat_cmd cmd;
1525 struct ena_admin_set_feat_resp resp;
1526 struct ena_admin_get_feat_resp get_resp;
1527 int ret;
1528
1529 ret = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_AENQ_CONFIG, 0);
1530 if (ret) {
1531 ena_trc_info("Can't get aenq configuration\n");
1532 return ret;
1533 }
1534
1535 if ((get_resp.u.aenq.supported_groups & groups_flag) != groups_flag) {
1536 ena_trc_warn("Trying to set unsupported aenq events. supported flag: 0x%x asked flag: 0x%x\n",
1537 get_resp.u.aenq.supported_groups,
1538 groups_flag);
1539 return ENA_COM_UNSUPPORTED;
1540 }
1541
1542 memset(&cmd, 0x0, sizeof(cmd));
1543 admin_queue = &ena_dev->admin_queue;
1544
1545 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
1546 cmd.aq_common_descriptor.flags = 0;
1547 cmd.feat_common.feature_id = ENA_ADMIN_AENQ_CONFIG;
1548 cmd.u.aenq.enabled_groups = groups_flag;
1549
1550 ret = ena_com_execute_admin_command(admin_queue,
1551 (struct ena_admin_aq_entry *)&cmd,
1552 sizeof(cmd),
1553 (struct ena_admin_acq_entry *)&resp,
1554 sizeof(resp));
1555
1556 if (unlikely(ret))
1557 ena_trc_err("Failed to config AENQ ret: %d\n", ret);
1558
1559 return ret;
1560}
1561
1562int ena_com_get_dma_width(struct ena_com_dev *ena_dev)
1563{
1564 u32 caps = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
1565 int width;
1566
1567 if (unlikely(caps == ENA_MMIO_READ_TIMEOUT)) {
1568 ena_trc_err("Reg read timeout occurred\n");
1569 return ENA_COM_TIMER_EXPIRED;
1570 }
1571
1572 width = (caps & ENA_REGS_CAPS_DMA_ADDR_WIDTH_MASK) >>
1573 ENA_REGS_CAPS_DMA_ADDR_WIDTH_SHIFT;
1574
1575 ena_trc_dbg("ENA dma width: %d\n", width);
1576
1577 if ((width < 32) || width > ENA_MAX_PHYS_ADDR_SIZE_BITS) {
1578 ena_trc_err("DMA width illegal value: %d\n", width);
1579 return ENA_COM_INVAL;
1580 }
1581
1582 ena_dev->dma_addr_bits = width;
1583
1584 return width;
1585}
1586
1587int ena_com_validate_version(struct ena_com_dev *ena_dev)
1588{
1589 u32 ver;
1590 u32 ctrl_ver;
1591 u32 ctrl_ver_masked;
1592
1593
1594
1595
1596 ver = ena_com_reg_bar_read32(ena_dev, ENA_REGS_VERSION_OFF);
1597 ctrl_ver = ena_com_reg_bar_read32(ena_dev,
1598 ENA_REGS_CONTROLLER_VERSION_OFF);
1599
1600 if (unlikely((ver == ENA_MMIO_READ_TIMEOUT) ||
1601 (ctrl_ver == ENA_MMIO_READ_TIMEOUT))) {
1602 ena_trc_err("Reg read timeout occurred\n");
1603 return ENA_COM_TIMER_EXPIRED;
1604 }
1605
1606 ena_trc_info("ena device version: %d.%d\n",
1607 (ver & ENA_REGS_VERSION_MAJOR_VERSION_MASK) >>
1608 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT,
1609 ver & ENA_REGS_VERSION_MINOR_VERSION_MASK);
1610
1611 ena_trc_info("ena controller version: %d.%d.%d implementation version %d\n",
1612 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK)
1613 >> ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_SHIFT,
1614 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK)
1615 >> ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_SHIFT,
1616 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK),
1617 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_IMPL_ID_MASK) >>
1618 ENA_REGS_CONTROLLER_VERSION_IMPL_ID_SHIFT);
1619
1620 ctrl_ver_masked =
1621 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MAJOR_VERSION_MASK) |
1622 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_MINOR_VERSION_MASK) |
1623 (ctrl_ver & ENA_REGS_CONTROLLER_VERSION_SUBMINOR_VERSION_MASK);
1624
1625
1626 if (ctrl_ver_masked < MIN_ENA_CTRL_VER) {
1627 ena_trc_err("ENA ctrl version is lower than the minimal ctrl version the driver supports\n");
1628 return -1;
1629 }
1630
1631 return 0;
1632}
1633
1634void ena_com_admin_destroy(struct ena_com_dev *ena_dev)
1635{
1636 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1637 struct ena_com_admin_cq *cq = &admin_queue->cq;
1638 struct ena_com_admin_sq *sq = &admin_queue->sq;
1639 struct ena_com_aenq *aenq = &ena_dev->aenq;
1640 u16 size;
1641
1642 ENA_WAIT_EVENT_DESTROY(admin_queue->comp_ctx->wait_event);
1643 if (admin_queue->comp_ctx)
1644 ENA_MEM_FREE(ena_dev->dmadev,
1645 admin_queue->comp_ctx,
1646 (admin_queue->q_depth * sizeof(struct ena_comp_ctx)));
1647 admin_queue->comp_ctx = NULL;
1648 size = ADMIN_SQ_SIZE(admin_queue->q_depth);
1649 if (sq->entries)
1650 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, sq->entries,
1651 sq->dma_addr, sq->mem_handle);
1652 sq->entries = NULL;
1653
1654 size = ADMIN_CQ_SIZE(admin_queue->q_depth);
1655 if (cq->entries)
1656 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, cq->entries,
1657 cq->dma_addr, cq->mem_handle);
1658 cq->entries = NULL;
1659
1660 size = ADMIN_AENQ_SIZE(aenq->q_depth);
1661 if (ena_dev->aenq.entries)
1662 ENA_MEM_FREE_COHERENT(ena_dev->dmadev, size, aenq->entries,
1663 aenq->dma_addr, aenq->mem_handle);
1664 aenq->entries = NULL;
1665 ENA_SPINLOCK_DESTROY(admin_queue->q_lock);
1666}
1667
1668void ena_com_set_admin_polling_mode(struct ena_com_dev *ena_dev, bool polling)
1669{
1670 u32 mask_value = 0;
1671
1672 if (polling)
1673 mask_value = ENA_REGS_ADMIN_INTR_MASK;
1674
1675 ENA_REG_WRITE32(ena_dev->bus, mask_value,
1676 ena_dev->reg_bar + ENA_REGS_INTR_MASK_OFF);
1677 ena_dev->admin_queue.polling = polling;
1678}
1679
1680bool ena_com_get_admin_polling_mode(struct ena_com_dev * ena_dev)
1681{
1682 return ena_dev->admin_queue.polling;
1683}
1684
1685void ena_com_set_admin_auto_polling_mode(struct ena_com_dev *ena_dev,
1686 bool polling)
1687{
1688 ena_dev->admin_queue.auto_polling = polling;
1689}
1690
1691int ena_com_mmio_reg_read_request_init(struct ena_com_dev *ena_dev)
1692{
1693 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1694
1695 ENA_SPINLOCK_INIT(mmio_read->lock);
1696 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
1697 sizeof(*mmio_read->read_resp),
1698 mmio_read->read_resp,
1699 mmio_read->read_resp_dma_addr,
1700 mmio_read->read_resp_mem_handle);
1701 if (unlikely(!mmio_read->read_resp))
1702 goto err;
1703
1704 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
1705
1706 mmio_read->read_resp->req_id = 0x0;
1707 mmio_read->seq_num = 0x0;
1708 mmio_read->readless_supported = true;
1709
1710 return 0;
1711
1712err:
1713 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1714 return ENA_COM_NO_MEM;
1715}
1716
1717void ena_com_set_mmio_read_mode(struct ena_com_dev *ena_dev, bool readless_supported)
1718{
1719 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1720
1721 mmio_read->readless_supported = readless_supported;
1722}
1723
1724void ena_com_mmio_reg_read_request_destroy(struct ena_com_dev *ena_dev)
1725{
1726 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1727
1728 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1729 ENA_REG_WRITE32(ena_dev->bus, 0x0, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1730
1731 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
1732 sizeof(*mmio_read->read_resp),
1733 mmio_read->read_resp,
1734 mmio_read->read_resp_dma_addr,
1735 mmio_read->read_resp_mem_handle);
1736
1737 mmio_read->read_resp = NULL;
1738 ENA_SPINLOCK_DESTROY(mmio_read->lock);
1739}
1740
1741void ena_com_mmio_reg_read_request_write_dev_addr(struct ena_com_dev *ena_dev)
1742{
1743 struct ena_com_mmio_read *mmio_read = &ena_dev->mmio_read;
1744 u32 addr_low, addr_high;
1745
1746 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(mmio_read->read_resp_dma_addr);
1747 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(mmio_read->read_resp_dma_addr);
1748
1749 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_LO_OFF);
1750 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_MMIO_RESP_HI_OFF);
1751}
1752
1753int ena_com_admin_init(struct ena_com_dev *ena_dev,
1754 struct ena_aenq_handlers *aenq_handlers)
1755{
1756 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
1757 u32 aq_caps, acq_caps, dev_sts, addr_low, addr_high;
1758 int ret;
1759
1760 dev_sts = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
1761
1762 if (unlikely(dev_sts == ENA_MMIO_READ_TIMEOUT)) {
1763 ena_trc_err("Reg read timeout occurred\n");
1764 return ENA_COM_TIMER_EXPIRED;
1765 }
1766
1767 if (!(dev_sts & ENA_REGS_DEV_STS_READY_MASK)) {
1768 ena_trc_err("Device isn't ready, abort com init\n");
1769 return ENA_COM_NO_DEVICE;
1770 }
1771
1772 admin_queue->q_depth = ENA_ADMIN_QUEUE_DEPTH;
1773
1774 admin_queue->bus = ena_dev->bus;
1775 admin_queue->q_dmadev = ena_dev->dmadev;
1776 admin_queue->polling = false;
1777 admin_queue->curr_cmd_id = 0;
1778
1779 ATOMIC32_SET(&admin_queue->outstanding_cmds, 0);
1780
1781 ENA_SPINLOCK_INIT(admin_queue->q_lock);
1782
1783 ret = ena_com_init_comp_ctxt(admin_queue);
1784 if (ret)
1785 goto error;
1786
1787 ret = ena_com_admin_init_sq(admin_queue);
1788 if (ret)
1789 goto error;
1790
1791 ret = ena_com_admin_init_cq(admin_queue);
1792 if (ret)
1793 goto error;
1794
1795 admin_queue->sq.db_addr = (u32 __iomem *)((uintptr_t)ena_dev->reg_bar +
1796 ENA_REGS_AQ_DB_OFF);
1797
1798 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->sq.dma_addr);
1799 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->sq.dma_addr);
1800
1801 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_AQ_BASE_LO_OFF);
1802 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_AQ_BASE_HI_OFF);
1803
1804 addr_low = ENA_DMA_ADDR_TO_UINT32_LOW(admin_queue->cq.dma_addr);
1805 addr_high = ENA_DMA_ADDR_TO_UINT32_HIGH(admin_queue->cq.dma_addr);
1806
1807 ENA_REG_WRITE32(ena_dev->bus, addr_low, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_LO_OFF);
1808 ENA_REG_WRITE32(ena_dev->bus, addr_high, ena_dev->reg_bar + ENA_REGS_ACQ_BASE_HI_OFF);
1809
1810 aq_caps = 0;
1811 aq_caps |= admin_queue->q_depth & ENA_REGS_AQ_CAPS_AQ_DEPTH_MASK;
1812 aq_caps |= (sizeof(struct ena_admin_aq_entry) <<
1813 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_SHIFT) &
1814 ENA_REGS_AQ_CAPS_AQ_ENTRY_SIZE_MASK;
1815
1816 acq_caps = 0;
1817 acq_caps |= admin_queue->q_depth & ENA_REGS_ACQ_CAPS_ACQ_DEPTH_MASK;
1818 acq_caps |= (sizeof(struct ena_admin_acq_entry) <<
1819 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_SHIFT) &
1820 ENA_REGS_ACQ_CAPS_ACQ_ENTRY_SIZE_MASK;
1821
1822 ENA_REG_WRITE32(ena_dev->bus, aq_caps, ena_dev->reg_bar + ENA_REGS_AQ_CAPS_OFF);
1823 ENA_REG_WRITE32(ena_dev->bus, acq_caps, ena_dev->reg_bar + ENA_REGS_ACQ_CAPS_OFF);
1824 ret = ena_com_admin_init_aenq(ena_dev, aenq_handlers);
1825 if (ret)
1826 goto error;
1827
1828 admin_queue->running_state = true;
1829
1830 return 0;
1831error:
1832 ena_com_admin_destroy(ena_dev);
1833
1834 return ret;
1835}
1836
1837int ena_com_create_io_queue(struct ena_com_dev *ena_dev,
1838 struct ena_com_create_io_ctx *ctx)
1839{
1840 struct ena_com_io_sq *io_sq;
1841 struct ena_com_io_cq *io_cq;
1842 int ret;
1843
1844 if (ctx->qid >= ENA_TOTAL_NUM_QUEUES) {
1845 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1846 ctx->qid, ENA_TOTAL_NUM_QUEUES);
1847 return ENA_COM_INVAL;
1848 }
1849
1850 io_sq = &ena_dev->io_sq_queues[ctx->qid];
1851 io_cq = &ena_dev->io_cq_queues[ctx->qid];
1852
1853 memset(io_sq, 0x0, sizeof(*io_sq));
1854 memset(io_cq, 0x0, sizeof(*io_cq));
1855
1856
1857 io_cq->q_depth = ctx->queue_size;
1858 io_cq->direction = ctx->direction;
1859 io_cq->qid = ctx->qid;
1860
1861 io_cq->msix_vector = ctx->msix_vector;
1862
1863 io_sq->q_depth = ctx->queue_size;
1864 io_sq->direction = ctx->direction;
1865 io_sq->qid = ctx->qid;
1866
1867 io_sq->mem_queue_type = ctx->mem_queue_type;
1868
1869 if (ctx->direction == ENA_COM_IO_QUEUE_DIRECTION_TX)
1870
1871 io_sq->tx_max_header_size =
1872 ENA_MIN32(ena_dev->tx_max_header_size, SZ_256);
1873
1874 ret = ena_com_init_io_sq(ena_dev, ctx, io_sq);
1875 if (ret)
1876 goto error;
1877 ret = ena_com_init_io_cq(ena_dev, ctx, io_cq);
1878 if (ret)
1879 goto error;
1880
1881 ret = ena_com_create_io_cq(ena_dev, io_cq);
1882 if (ret)
1883 goto error;
1884
1885 ret = ena_com_create_io_sq(ena_dev, io_sq, io_cq->idx);
1886 if (ret)
1887 goto destroy_io_cq;
1888
1889 return 0;
1890
1891destroy_io_cq:
1892 ena_com_destroy_io_cq(ena_dev, io_cq);
1893error:
1894 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1895 return ret;
1896}
1897
1898void ena_com_destroy_io_queue(struct ena_com_dev *ena_dev, u16 qid)
1899{
1900 struct ena_com_io_sq *io_sq;
1901 struct ena_com_io_cq *io_cq;
1902
1903 if (qid >= ENA_TOTAL_NUM_QUEUES) {
1904 ena_trc_err("Qid (%d) is bigger than max num of queues (%d)\n",
1905 qid, ENA_TOTAL_NUM_QUEUES);
1906 return;
1907 }
1908
1909 io_sq = &ena_dev->io_sq_queues[qid];
1910 io_cq = &ena_dev->io_cq_queues[qid];
1911
1912 ena_com_destroy_io_sq(ena_dev, io_sq);
1913 ena_com_destroy_io_cq(ena_dev, io_cq);
1914
1915 ena_com_io_queue_free(ena_dev, io_sq, io_cq);
1916}
1917
1918int ena_com_get_link_params(struct ena_com_dev *ena_dev,
1919 struct ena_admin_get_feat_resp *resp)
1920{
1921 return ena_com_get_feature(ena_dev, resp, ENA_ADMIN_LINK_CONFIG, 0);
1922}
1923
1924int ena_com_get_dev_attr_feat(struct ena_com_dev *ena_dev,
1925 struct ena_com_dev_get_features_ctx *get_feat_ctx)
1926{
1927 struct ena_admin_get_feat_resp get_resp;
1928 int rc;
1929
1930 rc = ena_com_get_feature(ena_dev, &get_resp,
1931 ENA_ADMIN_DEVICE_ATTRIBUTES, 0);
1932 if (rc)
1933 return rc;
1934
1935 memcpy(&get_feat_ctx->dev_attr, &get_resp.u.dev_attr,
1936 sizeof(get_resp.u.dev_attr));
1937 ena_dev->supported_features = get_resp.u.dev_attr.supported_features;
1938
1939 if (ena_dev->supported_features & BIT(ENA_ADMIN_MAX_QUEUES_EXT)) {
1940 rc = ena_com_get_feature(ena_dev, &get_resp,
1941 ENA_ADMIN_MAX_QUEUES_EXT,
1942 ENA_FEATURE_MAX_QUEUE_EXT_VER);
1943 if (rc)
1944 return rc;
1945
1946 if (get_resp.u.max_queue_ext.version != ENA_FEATURE_MAX_QUEUE_EXT_VER)
1947 return -EINVAL;
1948
1949 memcpy(&get_feat_ctx->max_queue_ext, &get_resp.u.max_queue_ext,
1950 sizeof(get_resp.u.max_queue_ext));
1951 ena_dev->tx_max_header_size =
1952 get_resp.u.max_queue_ext.max_queue_ext.max_tx_header_size;
1953 } else {
1954 rc = ena_com_get_feature(ena_dev, &get_resp,
1955 ENA_ADMIN_MAX_QUEUES_NUM, 0);
1956 memcpy(&get_feat_ctx->max_queues, &get_resp.u.max_queue,
1957 sizeof(get_resp.u.max_queue));
1958 ena_dev->tx_max_header_size =
1959 get_resp.u.max_queue.max_header_size;
1960
1961 if (rc)
1962 return rc;
1963 }
1964
1965 rc = ena_com_get_feature(ena_dev, &get_resp,
1966 ENA_ADMIN_AENQ_CONFIG, 0);
1967 if (rc)
1968 return rc;
1969
1970 memcpy(&get_feat_ctx->aenq, &get_resp.u.aenq,
1971 sizeof(get_resp.u.aenq));
1972
1973 rc = ena_com_get_feature(ena_dev, &get_resp,
1974 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
1975 if (rc)
1976 return rc;
1977
1978 memcpy(&get_feat_ctx->offload, &get_resp.u.offload,
1979 sizeof(get_resp.u.offload));
1980
1981
1982
1983
1984 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_HW_HINTS, 0);
1985
1986 if (!rc)
1987 memcpy(&get_feat_ctx->hw_hints, &get_resp.u.hw_hints,
1988 sizeof(get_resp.u.hw_hints));
1989 else if (rc == ENA_COM_UNSUPPORTED)
1990 memset(&get_feat_ctx->hw_hints, 0x0, sizeof(get_feat_ctx->hw_hints));
1991 else
1992 return rc;
1993
1994 rc = ena_com_get_feature(ena_dev, &get_resp, ENA_ADMIN_LLQ, 0);
1995 if (!rc)
1996 memcpy(&get_feat_ctx->llq, &get_resp.u.llq,
1997 sizeof(get_resp.u.llq));
1998 else if (rc == ENA_COM_UNSUPPORTED)
1999 memset(&get_feat_ctx->llq, 0x0, sizeof(get_feat_ctx->llq));
2000 else
2001 return rc;
2002
2003 rc = ena_com_get_feature(ena_dev, &get_resp,
2004 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG, 0);
2005 if (!rc)
2006 memcpy(&get_feat_ctx->ind_table, &get_resp.u.ind_table,
2007 sizeof(get_resp.u.ind_table));
2008 else if (rc == ENA_COM_UNSUPPORTED)
2009 memset(&get_feat_ctx->ind_table, 0x0,
2010 sizeof(get_feat_ctx->ind_table));
2011 else
2012 return rc;
2013
2014 return 0;
2015}
2016
2017void ena_com_admin_q_comp_intr_handler(struct ena_com_dev *ena_dev)
2018{
2019 ena_com_handle_admin_completion(&ena_dev->admin_queue);
2020}
2021
2022
2023
2024
2025static ena_aenq_handler ena_com_get_specific_aenq_cb(struct ena_com_dev *dev,
2026 u16 group)
2027{
2028 struct ena_aenq_handlers *aenq_handlers = dev->aenq.aenq_handlers;
2029
2030 if ((group < ENA_MAX_HANDLERS) && aenq_handlers->handlers[group])
2031 return aenq_handlers->handlers[group];
2032
2033 return aenq_handlers->unimplemented_handler;
2034}
2035
2036
2037
2038
2039
2040void ena_com_aenq_intr_handler(struct ena_com_dev *dev, void *data)
2041{
2042 struct ena_admin_aenq_entry *aenq_e;
2043 struct ena_admin_aenq_common_desc *aenq_common;
2044 struct ena_com_aenq *aenq = &dev->aenq;
2045 u64 timestamp;
2046 ena_aenq_handler handler_cb;
2047 u16 masked_head, processed = 0;
2048 u8 phase;
2049
2050 masked_head = aenq->head & (aenq->q_depth - 1);
2051 phase = aenq->phase;
2052 aenq_e = &aenq->entries[masked_head];
2053 aenq_common = &aenq_e->aenq_common_desc;
2054
2055
2056 while ((READ_ONCE8(aenq_common->flags) &
2057 ENA_ADMIN_AENQ_COMMON_DESC_PHASE_MASK) == phase) {
2058
2059
2060
2061 dma_rmb();
2062
2063 timestamp = (u64)aenq_common->timestamp_low |
2064 ((u64)aenq_common->timestamp_high << 32);
2065 ENA_TOUCH(timestamp);
2066 ena_trc_dbg("AENQ! Group[%x] Syndrom[%x] timestamp: [%" ENA_PRIu64 "s]\n",
2067 aenq_common->group,
2068 aenq_common->syndrom,
2069 timestamp);
2070
2071
2072 handler_cb = ena_com_get_specific_aenq_cb(dev,
2073 aenq_common->group);
2074 handler_cb(data, aenq_e);
2075
2076
2077 masked_head++;
2078 processed++;
2079
2080 if (unlikely(masked_head == aenq->q_depth)) {
2081 masked_head = 0;
2082 phase = !phase;
2083 }
2084 aenq_e = &aenq->entries[masked_head];
2085 aenq_common = &aenq_e->aenq_common_desc;
2086 }
2087
2088 aenq->head += processed;
2089 aenq->phase = phase;
2090
2091
2092 if (!processed)
2093 return;
2094
2095
2096 mb();
2097 ENA_REG_WRITE32_RELAXED(dev->bus, (u32)aenq->head,
2098 dev->reg_bar + ENA_REGS_AENQ_HEAD_DB_OFF);
2099#ifndef MMIOWB_NOT_DEFINED
2100 mmiowb();
2101#endif
2102}
2103
2104int ena_com_dev_reset(struct ena_com_dev *ena_dev,
2105 enum ena_regs_reset_reason_types reset_reason)
2106{
2107 u32 stat, timeout, cap, reset_val;
2108 int rc;
2109
2110 stat = ena_com_reg_bar_read32(ena_dev, ENA_REGS_DEV_STS_OFF);
2111 cap = ena_com_reg_bar_read32(ena_dev, ENA_REGS_CAPS_OFF);
2112
2113 if (unlikely((stat == ENA_MMIO_READ_TIMEOUT) ||
2114 (cap == ENA_MMIO_READ_TIMEOUT))) {
2115 ena_trc_err("Reg read32 timeout occurred\n");
2116 return ENA_COM_TIMER_EXPIRED;
2117 }
2118
2119 if ((stat & ENA_REGS_DEV_STS_READY_MASK) == 0) {
2120 ena_trc_err("Device isn't ready, can't reset device\n");
2121 return ENA_COM_INVAL;
2122 }
2123
2124 timeout = (cap & ENA_REGS_CAPS_RESET_TIMEOUT_MASK) >>
2125 ENA_REGS_CAPS_RESET_TIMEOUT_SHIFT;
2126 if (timeout == 0) {
2127 ena_trc_err("Invalid timeout value\n");
2128 return ENA_COM_INVAL;
2129 }
2130
2131
2132 reset_val = ENA_REGS_DEV_CTL_DEV_RESET_MASK;
2133 reset_val |= (reset_reason << ENA_REGS_DEV_CTL_RESET_REASON_SHIFT) &
2134 ENA_REGS_DEV_CTL_RESET_REASON_MASK;
2135 ENA_REG_WRITE32(ena_dev->bus, reset_val, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2136
2137
2138 ena_com_mmio_reg_read_request_write_dev_addr(ena_dev);
2139
2140 rc = wait_for_reset_state(ena_dev, timeout,
2141 ENA_REGS_DEV_STS_RESET_IN_PROGRESS_MASK);
2142 if (rc != 0) {
2143 ena_trc_err("Reset indication didn't turn on\n");
2144 return rc;
2145 }
2146
2147
2148 ENA_REG_WRITE32(ena_dev->bus, 0, ena_dev->reg_bar + ENA_REGS_DEV_CTL_OFF);
2149 rc = wait_for_reset_state(ena_dev, timeout, 0);
2150 if (rc != 0) {
2151 ena_trc_err("Reset indication didn't turn off\n");
2152 return rc;
2153 }
2154
2155 timeout = (cap & ENA_REGS_CAPS_ADMIN_CMD_TO_MASK) >>
2156 ENA_REGS_CAPS_ADMIN_CMD_TO_SHIFT;
2157 if (timeout)
2158
2159 ena_dev->admin_queue.completion_timeout = timeout * 100000;
2160 else
2161 ena_dev->admin_queue.completion_timeout = ADMIN_CMD_TIMEOUT_US;
2162
2163 return 0;
2164}
2165
2166static int ena_get_dev_stats(struct ena_com_dev *ena_dev,
2167 struct ena_com_stats_ctx *ctx,
2168 enum ena_admin_get_stats_type type)
2169{
2170 struct ena_admin_aq_get_stats_cmd *get_cmd = &ctx->get_cmd;
2171 struct ena_admin_acq_get_stats_resp *get_resp = &ctx->get_resp;
2172 struct ena_com_admin_queue *admin_queue;
2173 int ret;
2174
2175 admin_queue = &ena_dev->admin_queue;
2176
2177 get_cmd->aq_common_descriptor.opcode = ENA_ADMIN_GET_STATS;
2178 get_cmd->aq_common_descriptor.flags = 0;
2179 get_cmd->type = type;
2180
2181 ret = ena_com_execute_admin_command(admin_queue,
2182 (struct ena_admin_aq_entry *)get_cmd,
2183 sizeof(*get_cmd),
2184 (struct ena_admin_acq_entry *)get_resp,
2185 sizeof(*get_resp));
2186
2187 if (unlikely(ret))
2188 ena_trc_err("Failed to get stats. error: %d\n", ret);
2189
2190 return ret;
2191}
2192
2193int ena_com_get_dev_basic_stats(struct ena_com_dev *ena_dev,
2194 struct ena_admin_basic_stats *stats)
2195{
2196 struct ena_com_stats_ctx ctx;
2197 int ret;
2198
2199 memset(&ctx, 0x0, sizeof(ctx));
2200 ret = ena_get_dev_stats(ena_dev, &ctx, ENA_ADMIN_GET_STATS_TYPE_BASIC);
2201 if (likely(ret == 0))
2202 memcpy(stats, &ctx.get_resp.basic_stats,
2203 sizeof(ctx.get_resp.basic_stats));
2204
2205 return ret;
2206}
2207
2208int ena_com_set_dev_mtu(struct ena_com_dev *ena_dev, int mtu)
2209{
2210 struct ena_com_admin_queue *admin_queue;
2211 struct ena_admin_set_feat_cmd cmd;
2212 struct ena_admin_set_feat_resp resp;
2213 int ret;
2214
2215 if (!ena_com_check_supported_feature_id(ena_dev, ENA_ADMIN_MTU)) {
2216 ena_trc_dbg("Feature %d isn't supported\n", ENA_ADMIN_MTU);
2217 return ENA_COM_UNSUPPORTED;
2218 }
2219
2220 memset(&cmd, 0x0, sizeof(cmd));
2221 admin_queue = &ena_dev->admin_queue;
2222
2223 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2224 cmd.aq_common_descriptor.flags = 0;
2225 cmd.feat_common.feature_id = ENA_ADMIN_MTU;
2226 cmd.u.mtu.mtu = mtu;
2227
2228 ret = ena_com_execute_admin_command(admin_queue,
2229 (struct ena_admin_aq_entry *)&cmd,
2230 sizeof(cmd),
2231 (struct ena_admin_acq_entry *)&resp,
2232 sizeof(resp));
2233
2234 if (unlikely(ret))
2235 ena_trc_err("Failed to set mtu %d. error: %d\n", mtu, ret);
2236
2237 return ret;
2238}
2239
2240int ena_com_get_offload_settings(struct ena_com_dev *ena_dev,
2241 struct ena_admin_feature_offload_desc *offload)
2242{
2243 int ret;
2244 struct ena_admin_get_feat_resp resp;
2245
2246 ret = ena_com_get_feature(ena_dev, &resp,
2247 ENA_ADMIN_STATELESS_OFFLOAD_CONFIG, 0);
2248 if (unlikely(ret)) {
2249 ena_trc_err("Failed to get offload capabilities %d\n", ret);
2250 return ret;
2251 }
2252
2253 memcpy(offload, &resp.u.offload, sizeof(resp.u.offload));
2254
2255 return 0;
2256}
2257
2258int ena_com_set_hash_function(struct ena_com_dev *ena_dev)
2259{
2260 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2261 struct ena_rss *rss = &ena_dev->rss;
2262 struct ena_admin_set_feat_cmd cmd;
2263 struct ena_admin_set_feat_resp resp;
2264 struct ena_admin_get_feat_resp get_resp;
2265 int ret;
2266
2267 if (!ena_com_check_supported_feature_id(ena_dev,
2268 ENA_ADMIN_RSS_HASH_FUNCTION)) {
2269 ena_trc_dbg("Feature %d isn't supported\n",
2270 ENA_ADMIN_RSS_HASH_FUNCTION);
2271 return ENA_COM_UNSUPPORTED;
2272 }
2273
2274
2275 ret = ena_com_get_feature(ena_dev, &get_resp,
2276 ENA_ADMIN_RSS_HASH_FUNCTION, 0);
2277 if (unlikely(ret))
2278 return ret;
2279
2280 if (!(get_resp.u.flow_hash_func.supported_func & BIT(rss->hash_func))) {
2281 ena_trc_err("Func hash %d isn't supported by device, abort\n",
2282 rss->hash_func);
2283 return ENA_COM_UNSUPPORTED;
2284 }
2285
2286 memset(&cmd, 0x0, sizeof(cmd));
2287
2288 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2289 cmd.aq_common_descriptor.flags =
2290 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2291 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_FUNCTION;
2292 cmd.u.flow_hash_func.init_val = rss->hash_init_val;
2293 cmd.u.flow_hash_func.selected_func = 1 << rss->hash_func;
2294
2295 ret = ena_com_mem_addr_set(ena_dev,
2296 &cmd.control_buffer.address,
2297 rss->hash_key_dma_addr);
2298 if (unlikely(ret)) {
2299 ena_trc_err("memory address set failed\n");
2300 return ret;
2301 }
2302
2303 cmd.control_buffer.length = sizeof(*rss->hash_key);
2304
2305 ret = ena_com_execute_admin_command(admin_queue,
2306 (struct ena_admin_aq_entry *)&cmd,
2307 sizeof(cmd),
2308 (struct ena_admin_acq_entry *)&resp,
2309 sizeof(resp));
2310 if (unlikely(ret)) {
2311 ena_trc_err("Failed to set hash function %d. error: %d\n",
2312 rss->hash_func, ret);
2313 return ENA_COM_INVAL;
2314 }
2315
2316 return 0;
2317}
2318
2319int ena_com_fill_hash_function(struct ena_com_dev *ena_dev,
2320 enum ena_admin_hash_functions func,
2321 const u8 *key, u16 key_len, u32 init_val)
2322{
2323 struct ena_admin_feature_rss_flow_hash_control *hash_key;
2324 struct ena_admin_get_feat_resp get_resp;
2325 enum ena_admin_hash_functions old_func;
2326 struct ena_rss *rss = &ena_dev->rss;
2327 int rc;
2328
2329 hash_key = rss->hash_key;
2330
2331
2332 if (unlikely(key_len & 0x3))
2333 return ENA_COM_INVAL;
2334
2335 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2336 ENA_ADMIN_RSS_HASH_FUNCTION,
2337 rss->hash_key_dma_addr,
2338 sizeof(*rss->hash_key), 0);
2339 if (unlikely(rc))
2340 return rc;
2341
2342 if (!(BIT(func) & get_resp.u.flow_hash_func.supported_func)) {
2343 ena_trc_err("Flow hash function %d isn't supported\n", func);
2344 return ENA_COM_UNSUPPORTED;
2345 }
2346
2347 switch (func) {
2348 case ENA_ADMIN_TOEPLITZ:
2349 if (key) {
2350 if (key_len != sizeof(hash_key->key)) {
2351 ena_trc_err("key len (%hu) doesn't equal the supported size (%zu)\n",
2352 key_len, sizeof(hash_key->key));
2353 return ENA_COM_INVAL;
2354 }
2355 memcpy(hash_key->key, key, key_len);
2356 rss->hash_init_val = init_val;
2357 hash_key->keys_num = key_len / sizeof(u32);
2358 }
2359 break;
2360 case ENA_ADMIN_CRC32:
2361 rss->hash_init_val = init_val;
2362 break;
2363 default:
2364 ena_trc_err("Invalid hash function (%d)\n", func);
2365 return ENA_COM_INVAL;
2366 }
2367
2368 old_func = rss->hash_func;
2369 rss->hash_func = func;
2370 rc = ena_com_set_hash_function(ena_dev);
2371
2372
2373 if (unlikely(rc))
2374 rss->hash_func = old_func;
2375
2376 return rc;
2377}
2378
2379int ena_com_get_hash_function(struct ena_com_dev *ena_dev,
2380 enum ena_admin_hash_functions *func,
2381 u8 *key)
2382{
2383 struct ena_rss *rss = &ena_dev->rss;
2384 struct ena_admin_get_feat_resp get_resp;
2385 struct ena_admin_feature_rss_flow_hash_control *hash_key =
2386 rss->hash_key;
2387 int rc;
2388
2389 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2390 ENA_ADMIN_RSS_HASH_FUNCTION,
2391 rss->hash_key_dma_addr,
2392 sizeof(*rss->hash_key), 0);
2393 if (unlikely(rc))
2394 return rc;
2395
2396
2397 rss->hash_func = ENA_FFS(get_resp.u.flow_hash_func.selected_func);
2398 if (rss->hash_func)
2399 rss->hash_func--;
2400
2401 if (func)
2402 *func = rss->hash_func;
2403
2404 if (key)
2405 memcpy(key, hash_key->key, (size_t)(hash_key->keys_num) << 2);
2406
2407 return 0;
2408}
2409
2410int ena_com_get_hash_ctrl(struct ena_com_dev *ena_dev,
2411 enum ena_admin_flow_hash_proto proto,
2412 u16 *fields)
2413{
2414 struct ena_rss *rss = &ena_dev->rss;
2415 struct ena_admin_get_feat_resp get_resp;
2416 int rc;
2417
2418 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2419 ENA_ADMIN_RSS_HASH_INPUT,
2420 rss->hash_ctrl_dma_addr,
2421 sizeof(*rss->hash_ctrl), 0);
2422 if (unlikely(rc))
2423 return rc;
2424
2425 if (fields)
2426 *fields = rss->hash_ctrl->selected_fields[proto].fields;
2427
2428 return 0;
2429}
2430
2431int ena_com_set_hash_ctrl(struct ena_com_dev *ena_dev)
2432{
2433 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2434 struct ena_rss *rss = &ena_dev->rss;
2435 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2436 struct ena_admin_set_feat_cmd cmd;
2437 struct ena_admin_set_feat_resp resp;
2438 int ret;
2439
2440 if (!ena_com_check_supported_feature_id(ena_dev,
2441 ENA_ADMIN_RSS_HASH_INPUT)) {
2442 ena_trc_dbg("Feature %d isn't supported\n",
2443 ENA_ADMIN_RSS_HASH_INPUT);
2444 return ENA_COM_UNSUPPORTED;
2445 }
2446
2447 memset(&cmd, 0x0, sizeof(cmd));
2448
2449 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2450 cmd.aq_common_descriptor.flags =
2451 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2452 cmd.feat_common.feature_id = ENA_ADMIN_RSS_HASH_INPUT;
2453 cmd.u.flow_hash_input.enabled_input_sort =
2454 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L3_SORT_MASK |
2455 ENA_ADMIN_FEATURE_RSS_FLOW_HASH_INPUT_L4_SORT_MASK;
2456
2457 ret = ena_com_mem_addr_set(ena_dev,
2458 &cmd.control_buffer.address,
2459 rss->hash_ctrl_dma_addr);
2460 if (unlikely(ret)) {
2461 ena_trc_err("memory address set failed\n");
2462 return ret;
2463 }
2464 cmd.control_buffer.length = sizeof(*hash_ctrl);
2465
2466 ret = ena_com_execute_admin_command(admin_queue,
2467 (struct ena_admin_aq_entry *)&cmd,
2468 sizeof(cmd),
2469 (struct ena_admin_acq_entry *)&resp,
2470 sizeof(resp));
2471 if (unlikely(ret))
2472 ena_trc_err("Failed to set hash input. error: %d\n", ret);
2473
2474 return ret;
2475}
2476
2477int ena_com_set_default_hash_ctrl(struct ena_com_dev *ena_dev)
2478{
2479 struct ena_rss *rss = &ena_dev->rss;
2480 struct ena_admin_feature_rss_hash_control *hash_ctrl =
2481 rss->hash_ctrl;
2482 u16 available_fields = 0;
2483 int rc, i;
2484
2485
2486 rc = ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2487 if (unlikely(rc))
2488 return rc;
2489
2490 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP4].fields =
2491 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2492 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2493
2494 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP4].fields =
2495 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2496 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2497
2498 hash_ctrl->selected_fields[ENA_ADMIN_RSS_TCP6].fields =
2499 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2500 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2501
2502 hash_ctrl->selected_fields[ENA_ADMIN_RSS_UDP6].fields =
2503 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA |
2504 ENA_ADMIN_RSS_L4_DP | ENA_ADMIN_RSS_L4_SP;
2505
2506 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4].fields =
2507 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2508
2509 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP6].fields =
2510 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2511
2512 hash_ctrl->selected_fields[ENA_ADMIN_RSS_IP4_FRAG].fields =
2513 ENA_ADMIN_RSS_L3_SA | ENA_ADMIN_RSS_L3_DA;
2514
2515 hash_ctrl->selected_fields[ENA_ADMIN_RSS_NOT_IP].fields =
2516 ENA_ADMIN_RSS_L2_DA | ENA_ADMIN_RSS_L2_SA;
2517
2518 for (i = 0; i < ENA_ADMIN_RSS_PROTO_NUM; i++) {
2519 available_fields = hash_ctrl->selected_fields[i].fields &
2520 hash_ctrl->supported_fields[i].fields;
2521 if (available_fields != hash_ctrl->selected_fields[i].fields) {
2522 ena_trc_err("hash control doesn't support all the desire configuration. proto %x supported %x selected %x\n",
2523 i, hash_ctrl->supported_fields[i].fields,
2524 hash_ctrl->selected_fields[i].fields);
2525 return ENA_COM_UNSUPPORTED;
2526 }
2527 }
2528
2529 rc = ena_com_set_hash_ctrl(ena_dev);
2530
2531
2532 if (unlikely(rc))
2533 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2534
2535 return rc;
2536}
2537
2538int ena_com_fill_hash_ctrl(struct ena_com_dev *ena_dev,
2539 enum ena_admin_flow_hash_proto proto,
2540 u16 hash_fields)
2541{
2542 struct ena_rss *rss = &ena_dev->rss;
2543 struct ena_admin_feature_rss_hash_control *hash_ctrl = rss->hash_ctrl;
2544 u16 supported_fields;
2545 int rc;
2546
2547 if (proto >= ENA_ADMIN_RSS_PROTO_NUM) {
2548 ena_trc_err("Invalid proto num (%u)\n", proto);
2549 return ENA_COM_INVAL;
2550 }
2551
2552
2553 rc = ena_com_get_hash_ctrl(ena_dev, proto, NULL);
2554 if (unlikely(rc))
2555 return rc;
2556
2557
2558 supported_fields = hash_ctrl->supported_fields[proto].fields;
2559 if ((hash_fields & supported_fields) != hash_fields) {
2560 ena_trc_err("proto %d doesn't support the required fields %x. supports only: %x\n",
2561 proto, hash_fields, supported_fields);
2562 }
2563
2564 hash_ctrl->selected_fields[proto].fields = hash_fields;
2565
2566 rc = ena_com_set_hash_ctrl(ena_dev);
2567
2568
2569 if (unlikely(rc))
2570 ena_com_get_hash_ctrl(ena_dev, 0, NULL);
2571
2572 return 0;
2573}
2574
2575int ena_com_indirect_table_fill_entry(struct ena_com_dev *ena_dev,
2576 u16 entry_idx, u16 entry_value)
2577{
2578 struct ena_rss *rss = &ena_dev->rss;
2579
2580 if (unlikely(entry_idx >= (1 << rss->tbl_log_size)))
2581 return ENA_COM_INVAL;
2582
2583 if (unlikely((entry_value > ENA_TOTAL_NUM_QUEUES)))
2584 return ENA_COM_INVAL;
2585
2586 rss->host_rss_ind_tbl[entry_idx] = entry_value;
2587
2588 return 0;
2589}
2590
2591int ena_com_indirect_table_set(struct ena_com_dev *ena_dev)
2592{
2593 struct ena_com_admin_queue *admin_queue = &ena_dev->admin_queue;
2594 struct ena_rss *rss = &ena_dev->rss;
2595 struct ena_admin_set_feat_cmd cmd;
2596 struct ena_admin_set_feat_resp resp;
2597 int ret;
2598
2599 if (!ena_com_check_supported_feature_id(ena_dev,
2600 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG)) {
2601 ena_trc_dbg("Feature %d isn't supported\n",
2602 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG);
2603 return ENA_COM_UNSUPPORTED;
2604 }
2605
2606 ret = ena_com_ind_tbl_convert_to_device(ena_dev);
2607 if (ret) {
2608 ena_trc_err("Failed to convert host indirection table to device table\n");
2609 return ret;
2610 }
2611
2612 memset(&cmd, 0x0, sizeof(cmd));
2613
2614 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2615 cmd.aq_common_descriptor.flags =
2616 ENA_ADMIN_AQ_COMMON_DESC_CTRL_DATA_INDIRECT_MASK;
2617 cmd.feat_common.feature_id = ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG;
2618 cmd.u.ind_table.size = rss->tbl_log_size;
2619 cmd.u.ind_table.inline_index = 0xFFFFFFFF;
2620
2621 ret = ena_com_mem_addr_set(ena_dev,
2622 &cmd.control_buffer.address,
2623 rss->rss_ind_tbl_dma_addr);
2624 if (unlikely(ret)) {
2625 ena_trc_err("memory address set failed\n");
2626 return ret;
2627 }
2628
2629 cmd.control_buffer.length = (1ULL << rss->tbl_log_size) *
2630 sizeof(struct ena_admin_rss_ind_table_entry);
2631
2632 ret = ena_com_execute_admin_command(admin_queue,
2633 (struct ena_admin_aq_entry *)&cmd,
2634 sizeof(cmd),
2635 (struct ena_admin_acq_entry *)&resp,
2636 sizeof(resp));
2637
2638 if (unlikely(ret))
2639 ena_trc_err("Failed to set indirect table. error: %d\n", ret);
2640
2641 return ret;
2642}
2643
2644int ena_com_indirect_table_get(struct ena_com_dev *ena_dev, u32 *ind_tbl)
2645{
2646 struct ena_rss *rss = &ena_dev->rss;
2647 struct ena_admin_get_feat_resp get_resp;
2648 u32 tbl_size;
2649 int i, rc;
2650
2651 tbl_size = (1ULL << rss->tbl_log_size) *
2652 sizeof(struct ena_admin_rss_ind_table_entry);
2653
2654 rc = ena_com_get_feature_ex(ena_dev, &get_resp,
2655 ENA_ADMIN_RSS_REDIRECTION_TABLE_CONFIG,
2656 rss->rss_ind_tbl_dma_addr,
2657 tbl_size, 0);
2658 if (unlikely(rc))
2659 return rc;
2660
2661 if (!ind_tbl)
2662 return 0;
2663
2664 for (i = 0; i < (1 << rss->tbl_log_size); i++)
2665 ind_tbl[i] = rss->host_rss_ind_tbl[i];
2666
2667 return 0;
2668}
2669
2670int ena_com_rss_init(struct ena_com_dev *ena_dev, u16 indr_tbl_log_size)
2671{
2672 int rc;
2673
2674 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2675
2676 rc = ena_com_indirect_table_allocate(ena_dev, indr_tbl_log_size);
2677 if (unlikely(rc))
2678 goto err_indr_tbl;
2679
2680 rc = ena_com_hash_key_allocate(ena_dev);
2681 if (unlikely(rc))
2682 goto err_hash_key;
2683
2684 ena_com_hash_key_fill_default_key(ena_dev);
2685
2686 rc = ena_com_hash_ctrl_init(ena_dev);
2687 if (unlikely(rc))
2688 goto err_hash_ctrl;
2689
2690 return 0;
2691
2692err_hash_ctrl:
2693 ena_com_hash_key_destroy(ena_dev);
2694err_hash_key:
2695 ena_com_indirect_table_destroy(ena_dev);
2696err_indr_tbl:
2697
2698 return rc;
2699}
2700
2701void ena_com_rss_destroy(struct ena_com_dev *ena_dev)
2702{
2703 ena_com_indirect_table_destroy(ena_dev);
2704 ena_com_hash_key_destroy(ena_dev);
2705 ena_com_hash_ctrl_destroy(ena_dev);
2706
2707 memset(&ena_dev->rss, 0x0, sizeof(ena_dev->rss));
2708}
2709
2710int ena_com_allocate_host_info(struct ena_com_dev *ena_dev)
2711{
2712 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2713
2714 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2715 SZ_4K,
2716 host_attr->host_info,
2717 host_attr->host_info_dma_addr,
2718 host_attr->host_info_dma_handle);
2719 if (unlikely(!host_attr->host_info))
2720 return ENA_COM_NO_MEM;
2721
2722 host_attr->host_info->ena_spec_version = ((ENA_COMMON_SPEC_VERSION_MAJOR <<
2723 ENA_REGS_VERSION_MAJOR_VERSION_SHIFT) |
2724 (ENA_COMMON_SPEC_VERSION_MINOR));
2725
2726 return 0;
2727}
2728
2729int ena_com_allocate_debug_area(struct ena_com_dev *ena_dev,
2730 u32 debug_area_size)
2731{
2732 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2733
2734 ENA_MEM_ALLOC_COHERENT(ena_dev->dmadev,
2735 debug_area_size,
2736 host_attr->debug_area_virt_addr,
2737 host_attr->debug_area_dma_addr,
2738 host_attr->debug_area_dma_handle);
2739 if (unlikely(!host_attr->debug_area_virt_addr)) {
2740 host_attr->debug_area_size = 0;
2741 return ENA_COM_NO_MEM;
2742 }
2743
2744 host_attr->debug_area_size = debug_area_size;
2745
2746 return 0;
2747}
2748
2749void ena_com_delete_host_info(struct ena_com_dev *ena_dev)
2750{
2751 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2752
2753 if (host_attr->host_info) {
2754 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2755 SZ_4K,
2756 host_attr->host_info,
2757 host_attr->host_info_dma_addr,
2758 host_attr->host_info_dma_handle);
2759 host_attr->host_info = NULL;
2760 }
2761}
2762
2763void ena_com_delete_debug_area(struct ena_com_dev *ena_dev)
2764{
2765 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2766
2767 if (host_attr->debug_area_virt_addr) {
2768 ENA_MEM_FREE_COHERENT(ena_dev->dmadev,
2769 host_attr->debug_area_size,
2770 host_attr->debug_area_virt_addr,
2771 host_attr->debug_area_dma_addr,
2772 host_attr->debug_area_dma_handle);
2773 host_attr->debug_area_virt_addr = NULL;
2774 }
2775}
2776
2777int ena_com_set_host_attributes(struct ena_com_dev *ena_dev)
2778{
2779 struct ena_host_attribute *host_attr = &ena_dev->host_attr;
2780 struct ena_com_admin_queue *admin_queue;
2781 struct ena_admin_set_feat_cmd cmd;
2782 struct ena_admin_set_feat_resp resp;
2783
2784 int ret;
2785
2786
2787
2788
2789
2790 memset(&cmd, 0x0, sizeof(cmd));
2791 admin_queue = &ena_dev->admin_queue;
2792
2793 cmd.aq_common_descriptor.opcode = ENA_ADMIN_SET_FEATURE;
2794 cmd.feat_common.feature_id = ENA_ADMIN_HOST_ATTR_CONFIG;
2795
2796 ret = ena_com_mem_addr_set(ena_dev,
2797 &cmd.u.host_attr.debug_ba,
2798 host_attr->debug_area_dma_addr);
2799 if (unlikely(ret)) {
2800 ena_trc_err("memory address set failed\n");
2801 return ret;
2802 }
2803
2804 ret = ena_com_mem_addr_set(ena_dev,
2805 &cmd.u.host_attr.os_info_ba,
2806 host_attr->host_info_dma_addr);
2807 if (unlikely(ret)) {
2808 ena_trc_err("memory address set failed\n");
2809 return ret;
2810 }
2811
2812 cmd.u.host_attr.debug_area_size = host_attr->debug_area_size;
2813
2814 ret = ena_com_execute_admin_command(admin_queue,
2815 (struct ena_admin_aq_entry *)&cmd,
2816 sizeof(cmd),
2817 (struct ena_admin_acq_entry *)&resp,
2818 sizeof(resp));
2819
2820 if (unlikely(ret))
2821 ena_trc_err("Failed to set host attributes: %d\n", ret);
2822
2823 return ret;
2824}
2825
2826
2827bool ena_com_interrupt_moderation_supported(struct ena_com_dev *ena_dev)
2828{
2829 return ena_com_check_supported_feature_id(ena_dev,
2830 ENA_ADMIN_INTERRUPT_MODERATION);
2831}
2832
2833static int ena_com_update_nonadaptive_moderation_interval(u32 coalesce_usecs,
2834 u32 intr_delay_resolution,
2835 u32 *intr_moder_interval)
2836{
2837 if (!intr_delay_resolution) {
2838 ena_trc_err("Illegal interrupt delay granularity value\n");
2839 return ENA_COM_FAULT;
2840 }
2841
2842 *intr_moder_interval = coalesce_usecs / intr_delay_resolution;
2843
2844 return 0;
2845}
2846
2847
2848int ena_com_update_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev,
2849 u32 tx_coalesce_usecs)
2850{
2851 return ena_com_update_nonadaptive_moderation_interval(tx_coalesce_usecs,
2852 ena_dev->intr_delay_resolution,
2853 &ena_dev->intr_moder_tx_interval);
2854}
2855
2856int ena_com_update_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev,
2857 u32 rx_coalesce_usecs)
2858{
2859 return ena_com_update_nonadaptive_moderation_interval(rx_coalesce_usecs,
2860 ena_dev->intr_delay_resolution,
2861 &ena_dev->intr_moder_rx_interval);
2862}
2863
2864int ena_com_init_interrupt_moderation(struct ena_com_dev *ena_dev)
2865{
2866 struct ena_admin_get_feat_resp get_resp;
2867 u16 delay_resolution;
2868 int rc;
2869
2870 rc = ena_com_get_feature(ena_dev, &get_resp,
2871 ENA_ADMIN_INTERRUPT_MODERATION, 0);
2872
2873 if (rc) {
2874 if (rc == ENA_COM_UNSUPPORTED) {
2875 ena_trc_dbg("Feature %d isn't supported\n",
2876 ENA_ADMIN_INTERRUPT_MODERATION);
2877 rc = 0;
2878 } else {
2879 ena_trc_err("Failed to get interrupt moderation admin cmd. rc: %d\n",
2880 rc);
2881 }
2882
2883
2884 ena_com_disable_adaptive_moderation(ena_dev);
2885 return rc;
2886 }
2887
2888
2889 delay_resolution = get_resp.u.intr_moderation.intr_delay_resolution;
2890 ena_com_update_intr_delay_resolution(ena_dev, delay_resolution);
2891
2892
2893 ena_com_disable_adaptive_moderation(ena_dev);
2894
2895 return 0;
2896}
2897
2898unsigned int ena_com_get_nonadaptive_moderation_interval_tx(struct ena_com_dev *ena_dev)
2899{
2900 return ena_dev->intr_moder_tx_interval;
2901}
2902
2903unsigned int ena_com_get_nonadaptive_moderation_interval_rx(struct ena_com_dev *ena_dev)
2904{
2905 return ena_dev->intr_moder_rx_interval;
2906}
2907
2908int ena_com_config_dev_mode(struct ena_com_dev *ena_dev,
2909 struct ena_admin_feature_llq_desc *llq_features,
2910 struct ena_llq_configurations *llq_default_cfg)
2911{
2912 int rc;
2913 struct ena_com_llq_info *llq_info = &(ena_dev->llq_info);;
2914
2915 if (!llq_features->max_llq_num) {
2916 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_HOST;
2917 return 0;
2918 }
2919
2920 rc = ena_com_config_llq_info(ena_dev, llq_features, llq_default_cfg);
2921 if (rc)
2922 return rc;
2923
2924 ena_dev->tx_max_header_size = llq_info->desc_list_entry_size -
2925 (llq_info->descs_num_before_header * sizeof(struct ena_eth_io_tx_desc));
2926
2927 if (ena_dev->tx_max_header_size == 0) {
2928 ena_trc_err("the size of the LLQ entry is smaller than needed\n");
2929 return -EINVAL;
2930 }
2931
2932 ena_dev->tx_mem_queue_type = ENA_ADMIN_PLACEMENT_POLICY_DEV;
2933
2934 return 0;
2935}
2936