1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include "i40iw_osdep.h"
36#include "i40iw_register.h"
37#include "i40iw_status.h"
38#include "i40iw_hmc.h"
39#include "i40iw_d.h"
40#include "i40iw_type.h"
41#include "i40iw_p.h"
42#include "i40iw_vf.h"
43#include "i40iw_virtchnl.h"
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58static inline void i40iw_find_sd_index_limit(struct i40iw_hmc_info *hmc_info,
59 u32 type,
60 u32 idx,
61 u32 cnt,
62 u32 *sd_idx,
63 u32 *sd_limit)
64{
65 u64 fpm_addr, fpm_limit;
66
67 fpm_addr = hmc_info->hmc_obj[(type)].base +
68 hmc_info->hmc_obj[type].size * idx;
69 fpm_limit = fpm_addr + hmc_info->hmc_obj[type].size * cnt;
70 *sd_idx = (u32)(fpm_addr / I40IW_HMC_DIRECT_BP_SIZE);
71 *sd_limit = (u32)((fpm_limit - 1) / I40IW_HMC_DIRECT_BP_SIZE);
72 *sd_limit += 1;
73}
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88static inline void i40iw_find_pd_index_limit(struct i40iw_hmc_info *hmc_info,
89 u32 type,
90 u32 idx,
91 u32 cnt,
92 u32 *pd_idx,
93 u32 *pd_limit)
94{
95 u64 fpm_adr, fpm_limit;
96
97 fpm_adr = hmc_info->hmc_obj[type].base +
98 hmc_info->hmc_obj[type].size * idx;
99 fpm_limit = fpm_adr + (hmc_info)->hmc_obj[(type)].size * (cnt);
100 *(pd_idx) = (u32)(fpm_adr / I40IW_HMC_PAGED_BP_SIZE);
101 *(pd_limit) = (u32)((fpm_limit - 1) / I40IW_HMC_PAGED_BP_SIZE);
102 *(pd_limit) += 1;
103}
104
105
106
107
108
109
110
111
112static inline void i40iw_set_sd_entry(u64 pa,
113 u32 idx,
114 enum i40iw_sd_entry_type type,
115 struct update_sd_entry *entry)
116{
117 entry->data = pa | (I40IW_HMC_MAX_BP_COUNT << I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
118 (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
119 I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT) |
120 (1 << I40E_PFHMC_SDDATALOW_PMSDVALID_SHIFT);
121 entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
122}
123
124
125
126
127
128
129
130static inline void i40iw_clr_sd_entry(u32 idx, enum i40iw_sd_entry_type type,
131 struct update_sd_entry *entry)
132{
133 entry->data = (I40IW_HMC_MAX_BP_COUNT <<
134 I40E_PFHMC_SDDATALOW_PMSDBPCOUNT_SHIFT) |
135 (((type == I40IW_SD_TYPE_PAGED) ? 0 : 1) <<
136 I40E_PFHMC_SDDATALOW_PMSDTYPE_SHIFT);
137 entry->cmd = (idx | (1 << I40E_PFHMC_SDCMD_PMSDWR_SHIFT) | (1 << 15));
138}
139
140
141
142
143
144
145
146
147
148
149enum i40iw_status_code i40iw_hmc_sd_one(struct i40iw_sc_dev *dev,
150 u8 hmc_fn_id,
151 u64 pa, u32 sd_idx,
152 enum i40iw_sd_entry_type type,
153 bool setsd)
154{
155 struct i40iw_update_sds_info sdinfo;
156
157 sdinfo.cnt = 1;
158 sdinfo.hmc_fn_id = hmc_fn_id;
159 if (setsd)
160 i40iw_set_sd_entry(pa, sd_idx, type, sdinfo.entry);
161 else
162 i40iw_clr_sd_entry(sd_idx, type, sdinfo.entry);
163
164 return dev->cqp->process_cqp_sds(dev, &sdinfo);
165}
166
167
168
169
170
171
172
173
174
175static enum i40iw_status_code i40iw_hmc_sd_grp(struct i40iw_sc_dev *dev,
176 struct i40iw_hmc_info *hmc_info,
177 u32 sd_index,
178 u32 sd_cnt,
179 bool setsd)
180{
181 struct i40iw_hmc_sd_entry *sd_entry;
182 struct i40iw_update_sds_info sdinfo;
183 u64 pa;
184 u32 i;
185 enum i40iw_status_code ret_code = 0;
186
187 memset(&sdinfo, 0, sizeof(sdinfo));
188 sdinfo.hmc_fn_id = hmc_info->hmc_fn_id;
189 for (i = sd_index; i < sd_index + sd_cnt; i++) {
190 sd_entry = &hmc_info->sd_table.sd_entry[i];
191 if (!sd_entry ||
192 (!sd_entry->valid && setsd) ||
193 (sd_entry->valid && !setsd))
194 continue;
195 if (setsd) {
196 pa = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
197 sd_entry->u.pd_table.pd_page_addr.pa :
198 sd_entry->u.bp.addr.pa;
199 i40iw_set_sd_entry(pa, i, sd_entry->entry_type,
200 &sdinfo.entry[sdinfo.cnt]);
201 } else {
202 i40iw_clr_sd_entry(i, sd_entry->entry_type,
203 &sdinfo.entry[sdinfo.cnt]);
204 }
205 sdinfo.cnt++;
206 if (sdinfo.cnt == I40IW_MAX_SD_ENTRIES) {
207 ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
208 if (ret_code) {
209 i40iw_debug(dev, I40IW_DEBUG_HMC,
210 "i40iw_hmc_sd_grp: sd_programming failed err=%d\n",
211 ret_code);
212 return ret_code;
213 }
214 sdinfo.cnt = 0;
215 }
216 }
217 if (sdinfo.cnt)
218 ret_code = dev->cqp->process_cqp_sds(dev, &sdinfo);
219
220 return ret_code;
221}
222
223
224
225
226
227
228struct i40iw_vfdev *i40iw_vfdev_from_fpm(struct i40iw_sc_dev *dev, u8 hmc_fn_id)
229{
230 struct i40iw_vfdev *vf_dev = NULL;
231 u16 idx;
232
233 for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
234 if (dev->vf_dev[idx] &&
235 ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
236 vf_dev = dev->vf_dev[idx];
237 break;
238 }
239 }
240 return vf_dev;
241}
242
243
244
245
246
247
248struct i40iw_hmc_info *i40iw_vf_hmcinfo_from_fpm(struct i40iw_sc_dev *dev,
249 u8 hmc_fn_id)
250{
251 struct i40iw_hmc_info *hmc_info = NULL;
252 u16 idx;
253
254 for (idx = 0; idx < I40IW_MAX_PE_ENABLED_VF_COUNT; idx++) {
255 if (dev->vf_dev[idx] &&
256 ((u8)dev->vf_dev[idx]->pmf_index == hmc_fn_id)) {
257 hmc_info = &dev->vf_dev[idx]->hmc_info;
258 break;
259 }
260 }
261 return hmc_info;
262}
263
264
265
266
267
268
269static enum i40iw_status_code i40iw_hmc_finish_add_sd_reg(struct i40iw_sc_dev *dev,
270 struct i40iw_hmc_create_obj_info *info)
271{
272 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
273 return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
274
275 if ((info->start_idx + info->count) >
276 info->hmc_info->hmc_obj[info->rsrc_type].cnt)
277 return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
278
279 if (!info->add_sd_cnt)
280 return 0;
281
282 return i40iw_hmc_sd_grp(dev, info->hmc_info,
283 info->hmc_info->sd_indexes[0],
284 info->add_sd_cnt, true);
285}
286
287
288
289
290
291
292
293
294
295enum i40iw_status_code i40iw_sc_create_hmc_obj(struct i40iw_sc_dev *dev,
296 struct i40iw_hmc_create_obj_info *info)
297{
298 struct i40iw_hmc_sd_entry *sd_entry;
299 u32 sd_idx, sd_lmt;
300 u32 pd_idx = 0, pd_lmt = 0;
301 u32 pd_idx1 = 0, pd_lmt1 = 0;
302 u32 i, j;
303 bool pd_error = false;
304 enum i40iw_status_code ret_code = 0;
305
306 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt)
307 return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
308
309 if ((info->start_idx + info->count) >
310 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
311 i40iw_debug(dev, I40IW_DEBUG_HMC,
312 "%s: error type %u, start = %u, req cnt %u, cnt = %u\n",
313 __func__, info->rsrc_type, info->start_idx, info->count,
314 info->hmc_info->hmc_obj[info->rsrc_type].cnt);
315 return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
316 }
317
318 if (!dev->is_pf)
319 return i40iw_vchnl_vf_add_hmc_objs(dev, info->rsrc_type, 0, info->count);
320
321 i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
322 info->start_idx, info->count,
323 &sd_idx, &sd_lmt);
324 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
325 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
326 return I40IW_ERR_INVALID_SD_INDEX;
327 }
328 i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
329 info->start_idx, info->count, &pd_idx, &pd_lmt);
330
331 for (j = sd_idx; j < sd_lmt; j++) {
332 ret_code = i40iw_add_sd_table_entry(dev->hw, info->hmc_info,
333 j,
334 info->entry_type,
335 I40IW_HMC_DIRECT_BP_SIZE);
336 if (ret_code)
337 goto exit_sd_error;
338 sd_entry = &info->hmc_info->sd_table.sd_entry[j];
339
340 if ((sd_entry->entry_type == I40IW_SD_TYPE_PAGED) &&
341 ((dev->hmc_info == info->hmc_info) &&
342 (info->rsrc_type != I40IW_HMC_IW_PBLE))) {
343 pd_idx1 = max(pd_idx, (j * I40IW_HMC_MAX_BP_COUNT));
344 pd_lmt1 = min(pd_lmt,
345 (j + 1) * I40IW_HMC_MAX_BP_COUNT);
346 for (i = pd_idx1; i < pd_lmt1; i++) {
347
348 ret_code = i40iw_add_pd_table_entry(dev->hw, info->hmc_info,
349 i, NULL);
350 if (ret_code) {
351 pd_error = true;
352 break;
353 }
354 }
355 if (pd_error) {
356 while (i && (i > pd_idx1)) {
357 i40iw_remove_pd_bp(dev->hw, info->hmc_info, (i - 1),
358 info->is_pf);
359 i--;
360 }
361 }
362 }
363 if (sd_entry->valid)
364 continue;
365
366 info->hmc_info->sd_indexes[info->add_sd_cnt] = (u16)j;
367 info->add_sd_cnt++;
368 sd_entry->valid = true;
369 }
370 return i40iw_hmc_finish_add_sd_reg(dev, info);
371
372exit_sd_error:
373 while (j && (j > sd_idx)) {
374 sd_entry = &info->hmc_info->sd_table.sd_entry[j - 1];
375 switch (sd_entry->entry_type) {
376 case I40IW_SD_TYPE_PAGED:
377 pd_idx1 = max(pd_idx,
378 (j - 1) * I40IW_HMC_MAX_BP_COUNT);
379 pd_lmt1 = min(pd_lmt, (j * I40IW_HMC_MAX_BP_COUNT));
380 for (i = pd_idx1; i < pd_lmt1; i++)
381 i40iw_prep_remove_pd_page(info->hmc_info, i);
382 break;
383 case I40IW_SD_TYPE_DIRECT:
384 i40iw_prep_remove_pd_page(info->hmc_info, (j - 1));
385 break;
386 default:
387 ret_code = I40IW_ERR_INVALID_SD_TYPE;
388 break;
389 }
390 j--;
391 }
392
393 return ret_code;
394}
395
396
397
398
399
400
401
402static enum i40iw_status_code i40iw_finish_del_sd_reg(struct i40iw_sc_dev *dev,
403 struct i40iw_hmc_del_obj_info *info,
404 bool reset)
405{
406 struct i40iw_hmc_sd_entry *sd_entry;
407 enum i40iw_status_code ret_code = 0;
408 u32 i, sd_idx;
409 struct i40iw_dma_mem *mem;
410
411 if (dev->is_pf && !reset)
412 ret_code = i40iw_hmc_sd_grp(dev, info->hmc_info,
413 info->hmc_info->sd_indexes[0],
414 info->del_sd_cnt, false);
415
416 if (ret_code)
417 i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd sd_grp\n", __func__);
418
419 for (i = 0; i < info->del_sd_cnt; i++) {
420 sd_idx = info->hmc_info->sd_indexes[i];
421 sd_entry = &info->hmc_info->sd_table.sd_entry[sd_idx];
422 if (!sd_entry)
423 continue;
424 mem = (sd_entry->entry_type == I40IW_SD_TYPE_PAGED) ?
425 &sd_entry->u.pd_table.pd_page_addr :
426 &sd_entry->u.bp.addr;
427
428 if (!mem || !mem->va)
429 i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error cqp sd mem\n", __func__);
430 else
431 i40iw_free_dma_mem(dev->hw, mem);
432 }
433 return ret_code;
434}
435
436
437
438
439
440
441
442
443
444
445
446
447enum i40iw_status_code i40iw_sc_del_hmc_obj(struct i40iw_sc_dev *dev,
448 struct i40iw_hmc_del_obj_info *info,
449 bool reset)
450{
451 struct i40iw_hmc_pd_table *pd_table;
452 u32 sd_idx, sd_lmt;
453 u32 pd_idx, pd_lmt, rel_pd_idx;
454 u32 i, j;
455 enum i40iw_status_code ret_code = 0;
456
457 if (info->start_idx >= info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
458 i40iw_debug(dev, I40IW_DEBUG_HMC,
459 "%s: error start_idx[%04d] >= [type %04d].cnt[%04d]\n",
460 __func__, info->start_idx, info->rsrc_type,
461 info->hmc_info->hmc_obj[info->rsrc_type].cnt);
462 return I40IW_ERR_INVALID_HMC_OBJ_INDEX;
463 }
464
465 if ((info->start_idx + info->count) >
466 info->hmc_info->hmc_obj[info->rsrc_type].cnt) {
467 i40iw_debug(dev, I40IW_DEBUG_HMC,
468 "%s: error start_idx[%04d] + count %04d >= [type %04d].cnt[%04d]\n",
469 __func__, info->start_idx, info->count,
470 info->rsrc_type,
471 info->hmc_info->hmc_obj[info->rsrc_type].cnt);
472 return I40IW_ERR_INVALID_HMC_OBJ_COUNT;
473 }
474 if (!dev->is_pf) {
475 ret_code = i40iw_vchnl_vf_del_hmc_obj(dev, info->rsrc_type, 0,
476 info->count);
477 if (info->rsrc_type != I40IW_HMC_IW_PBLE)
478 return ret_code;
479 }
480
481 i40iw_find_pd_index_limit(info->hmc_info, info->rsrc_type,
482 info->start_idx, info->count, &pd_idx, &pd_lmt);
483
484 for (j = pd_idx; j < pd_lmt; j++) {
485 sd_idx = j / I40IW_HMC_PD_CNT_IN_SD;
486
487 if (info->hmc_info->sd_table.sd_entry[sd_idx].entry_type !=
488 I40IW_SD_TYPE_PAGED)
489 continue;
490
491 rel_pd_idx = j % I40IW_HMC_PD_CNT_IN_SD;
492 pd_table = &info->hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
493 if (pd_table->pd_entry[rel_pd_idx].valid) {
494 ret_code = i40iw_remove_pd_bp(dev->hw, info->hmc_info, j,
495 info->is_pf);
496 if (ret_code) {
497 i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error\n", __func__);
498 return ret_code;
499 }
500 }
501 }
502
503 i40iw_find_sd_index_limit(info->hmc_info, info->rsrc_type,
504 info->start_idx, info->count, &sd_idx, &sd_lmt);
505 if (sd_idx >= info->hmc_info->sd_table.sd_cnt ||
506 sd_lmt > info->hmc_info->sd_table.sd_cnt) {
507 i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: error invalid sd_idx\n", __func__);
508 return I40IW_ERR_INVALID_SD_INDEX;
509 }
510
511 for (i = sd_idx; i < sd_lmt; i++) {
512 if (!info->hmc_info->sd_table.sd_entry[i].valid)
513 continue;
514 switch (info->hmc_info->sd_table.sd_entry[i].entry_type) {
515 case I40IW_SD_TYPE_DIRECT:
516 ret_code = i40iw_prep_remove_sd_bp(info->hmc_info, i);
517 if (!ret_code) {
518 info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
519 info->del_sd_cnt++;
520 }
521 break;
522 case I40IW_SD_TYPE_PAGED:
523 ret_code = i40iw_prep_remove_pd_page(info->hmc_info, i);
524 if (!ret_code) {
525 info->hmc_info->sd_indexes[info->del_sd_cnt] = (u16)i;
526 info->del_sd_cnt++;
527 }
528 break;
529 default:
530 break;
531 }
532 }
533 return i40iw_finish_del_sd_reg(dev, info, reset);
534}
535
536
537
538
539
540
541
542
543
544enum i40iw_status_code i40iw_add_sd_table_entry(struct i40iw_hw *hw,
545 struct i40iw_hmc_info *hmc_info,
546 u32 sd_index,
547 enum i40iw_sd_entry_type type,
548 u64 direct_mode_sz)
549{
550 enum i40iw_status_code ret_code = 0;
551 struct i40iw_hmc_sd_entry *sd_entry;
552 bool dma_mem_alloc_done = false;
553 struct i40iw_dma_mem mem;
554 u64 alloc_len;
555
556 sd_entry = &hmc_info->sd_table.sd_entry[sd_index];
557 if (!sd_entry->valid) {
558 if (type == I40IW_SD_TYPE_PAGED)
559 alloc_len = I40IW_HMC_PAGED_BP_SIZE;
560 else
561 alloc_len = direct_mode_sz;
562
563
564 ret_code = i40iw_allocate_dma_mem(hw, &mem, alloc_len,
565 I40IW_HMC_PD_BP_BUF_ALIGNMENT);
566 if (ret_code)
567 goto exit;
568 dma_mem_alloc_done = true;
569 if (type == I40IW_SD_TYPE_PAGED) {
570 ret_code = i40iw_allocate_virt_mem(hw,
571 &sd_entry->u.pd_table.pd_entry_virt_mem,
572 sizeof(struct i40iw_hmc_pd_entry) * 512);
573 if (ret_code)
574 goto exit;
575 sd_entry->u.pd_table.pd_entry = (struct i40iw_hmc_pd_entry *)
576 sd_entry->u.pd_table.pd_entry_virt_mem.va;
577
578 memcpy(&sd_entry->u.pd_table.pd_page_addr, &mem, sizeof(struct i40iw_dma_mem));
579 } else {
580 memcpy(&sd_entry->u.bp.addr, &mem, sizeof(struct i40iw_dma_mem));
581 sd_entry->u.bp.sd_pd_index = sd_index;
582 }
583
584 hmc_info->sd_table.sd_entry[sd_index].entry_type = type;
585
586 I40IW_INC_SD_REFCNT(&hmc_info->sd_table);
587 }
588 if (sd_entry->entry_type == I40IW_SD_TYPE_DIRECT)
589 I40IW_INC_BP_REFCNT(&sd_entry->u.bp);
590exit:
591 if (ret_code)
592 if (dma_mem_alloc_done)
593 i40iw_free_dma_mem(hw, &mem);
594
595 return ret_code;
596}
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615enum i40iw_status_code i40iw_add_pd_table_entry(struct i40iw_hw *hw,
616 struct i40iw_hmc_info *hmc_info,
617 u32 pd_index,
618 struct i40iw_dma_mem *rsrc_pg)
619{
620 enum i40iw_status_code ret_code = 0;
621 struct i40iw_hmc_pd_table *pd_table;
622 struct i40iw_hmc_pd_entry *pd_entry;
623 struct i40iw_dma_mem mem;
624 struct i40iw_dma_mem *page = &mem;
625 u32 sd_idx, rel_pd_idx;
626 u64 *pd_addr;
627 u64 page_desc;
628
629 if (pd_index / I40IW_HMC_PD_CNT_IN_SD >= hmc_info->sd_table.sd_cnt)
630 return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
631
632 sd_idx = (pd_index / I40IW_HMC_PD_CNT_IN_SD);
633 if (hmc_info->sd_table.sd_entry[sd_idx].entry_type != I40IW_SD_TYPE_PAGED)
634 return 0;
635
636 rel_pd_idx = (pd_index % I40IW_HMC_PD_CNT_IN_SD);
637 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
638 pd_entry = &pd_table->pd_entry[rel_pd_idx];
639 if (!pd_entry->valid) {
640 if (rsrc_pg) {
641 pd_entry->rsrc_pg = true;
642 page = rsrc_pg;
643 } else {
644 ret_code = i40iw_allocate_dma_mem(hw, page,
645 I40IW_HMC_PAGED_BP_SIZE,
646 I40IW_HMC_PD_BP_BUF_ALIGNMENT);
647 if (ret_code)
648 return ret_code;
649 pd_entry->rsrc_pg = false;
650 }
651
652 memcpy(&pd_entry->bp.addr, page, sizeof(struct i40iw_dma_mem));
653 pd_entry->bp.sd_pd_index = pd_index;
654 pd_entry->bp.entry_type = I40IW_SD_TYPE_PAGED;
655 page_desc = page->pa | 0x1;
656
657 pd_addr = (u64 *)pd_table->pd_page_addr.va;
658 pd_addr += rel_pd_idx;
659
660 memcpy(pd_addr, &page_desc, sizeof(*pd_addr));
661
662 pd_entry->sd_index = sd_idx;
663 pd_entry->valid = true;
664 I40IW_INC_PD_REFCNT(pd_table);
665 if (hmc_info->hmc_fn_id < I40IW_FIRST_VF_FPM_ID)
666 I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, rel_pd_idx);
667 else if (hw->hmc.hmc_fn_id != hmc_info->hmc_fn_id)
668 I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, rel_pd_idx,
669 hmc_info->hmc_fn_id);
670 }
671 I40IW_INC_BP_REFCNT(&pd_entry->bp);
672
673 return 0;
674}
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692enum i40iw_status_code i40iw_remove_pd_bp(struct i40iw_hw *hw,
693 struct i40iw_hmc_info *hmc_info,
694 u32 idx,
695 bool is_pf)
696{
697 struct i40iw_hmc_pd_entry *pd_entry;
698 struct i40iw_hmc_pd_table *pd_table;
699 struct i40iw_hmc_sd_entry *sd_entry;
700 u32 sd_idx, rel_pd_idx;
701 struct i40iw_dma_mem *mem;
702 u64 *pd_addr;
703
704 sd_idx = idx / I40IW_HMC_PD_CNT_IN_SD;
705 rel_pd_idx = idx % I40IW_HMC_PD_CNT_IN_SD;
706 if (sd_idx >= hmc_info->sd_table.sd_cnt)
707 return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
708
709 sd_entry = &hmc_info->sd_table.sd_entry[sd_idx];
710 if (sd_entry->entry_type != I40IW_SD_TYPE_PAGED)
711 return I40IW_ERR_INVALID_SD_TYPE;
712
713 pd_table = &hmc_info->sd_table.sd_entry[sd_idx].u.pd_table;
714 pd_entry = &pd_table->pd_entry[rel_pd_idx];
715 I40IW_DEC_BP_REFCNT(&pd_entry->bp);
716 if (pd_entry->bp.ref_cnt)
717 return 0;
718
719 pd_entry->valid = false;
720 I40IW_DEC_PD_REFCNT(pd_table);
721 pd_addr = (u64 *)pd_table->pd_page_addr.va;
722 pd_addr += rel_pd_idx;
723 memset(pd_addr, 0, sizeof(u64));
724 if (is_pf)
725 I40IW_INVALIDATE_PF_HMC_PD(hw, sd_idx, idx);
726 else
727 I40IW_INVALIDATE_VF_HMC_PD(hw, sd_idx, idx,
728 hmc_info->hmc_fn_id);
729
730 if (!pd_entry->rsrc_pg) {
731 mem = &pd_entry->bp.addr;
732 if (!mem || !mem->va)
733 return I40IW_ERR_PARAM;
734 i40iw_free_dma_mem(hw, mem);
735 }
736 if (!pd_table->ref_cnt)
737 i40iw_free_virt_mem(hw, &pd_table->pd_entry_virt_mem);
738
739 return 0;
740}
741
742
743
744
745
746
747enum i40iw_status_code i40iw_prep_remove_sd_bp(struct i40iw_hmc_info *hmc_info, u32 idx)
748{
749 struct i40iw_hmc_sd_entry *sd_entry;
750
751 sd_entry = &hmc_info->sd_table.sd_entry[idx];
752 I40IW_DEC_BP_REFCNT(&sd_entry->u.bp);
753 if (sd_entry->u.bp.ref_cnt)
754 return I40IW_ERR_NOT_READY;
755
756 I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
757 sd_entry->valid = false;
758
759 return 0;
760}
761
762
763
764
765
766
767enum i40iw_status_code i40iw_prep_remove_pd_page(struct i40iw_hmc_info *hmc_info,
768 u32 idx)
769{
770 struct i40iw_hmc_sd_entry *sd_entry;
771
772 sd_entry = &hmc_info->sd_table.sd_entry[idx];
773
774 if (sd_entry->u.pd_table.ref_cnt)
775 return I40IW_ERR_NOT_READY;
776
777 sd_entry->valid = false;
778 I40IW_DEC_SD_REFCNT(&hmc_info->sd_table);
779
780 return 0;
781}
782
783
784
785
786
787
788
789
790
791enum i40iw_status_code i40iw_pf_init_vfhmc(struct i40iw_sc_dev *dev,
792 u8 vf_hmc_fn_id,
793 u32 *vf_cnt_array)
794{
795 struct i40iw_hmc_info *hmc_info;
796 enum i40iw_status_code ret_code = 0;
797 u32 i;
798
799 if ((vf_hmc_fn_id < I40IW_FIRST_VF_FPM_ID) ||
800 (vf_hmc_fn_id >= I40IW_FIRST_VF_FPM_ID +
801 I40IW_MAX_PE_ENABLED_VF_COUNT)) {
802 i40iw_debug(dev, I40IW_DEBUG_HMC, "%s: invalid vf_hmc_fn_id 0x%x\n",
803 __func__, vf_hmc_fn_id);
804 return I40IW_ERR_INVALID_HMCFN_ID;
805 }
806
807 ret_code = i40iw_sc_init_iw_hmc(dev, vf_hmc_fn_id);
808 if (ret_code)
809 return ret_code;
810
811 hmc_info = i40iw_vf_hmcinfo_from_fpm(dev, vf_hmc_fn_id);
812
813 for (i = I40IW_HMC_IW_QP; i < I40IW_HMC_IW_MAX; i++)
814 if (vf_cnt_array)
815 hmc_info->hmc_obj[i].cnt =
816 vf_cnt_array[i - I40IW_HMC_IW_QP];
817 else
818 hmc_info->hmc_obj[i].cnt = hmc_info->hmc_obj[i].max_cnt;
819
820 return 0;
821}
822