1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include "i40iw_status.h"
36#include "i40iw_osdep.h"
37#include "i40iw_register.h"
38#include "i40iw_hmc.h"
39
40#include "i40iw_d.h"
41#include "i40iw_type.h"
42#include "i40iw_p.h"
43
44#include <linux/pci.h>
45#include <linux/genalloc.h>
46#include <linux/vmalloc.h>
47#include "i40iw_pble.h"
48#include "i40iw.h"
49
50struct i40iw_device;
51static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
52 struct i40iw_hmc_pble_rsrc *pble_rsrc);
53static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk);
54
55
56
57
58
59void i40iw_destroy_pble_pool(struct i40iw_sc_dev *dev, struct i40iw_hmc_pble_rsrc *pble_rsrc)
60{
61 struct list_head *clist;
62 struct list_head *tlist;
63 struct i40iw_chunk *chunk;
64 struct i40iw_pble_pool *pinfo = &pble_rsrc->pinfo;
65
66 if (pinfo->pool) {
67 list_for_each_safe(clist, tlist, &pinfo->clist) {
68 chunk = list_entry(clist, struct i40iw_chunk, list);
69 if (chunk->type == I40IW_VMALLOC)
70 i40iw_free_vmalloc_mem(dev->hw, chunk);
71 kfree(chunk);
72 }
73 gen_pool_destroy(pinfo->pool);
74 }
75}
76
77
78
79
80
81
82enum i40iw_status_code i40iw_hmc_init_pble(struct i40iw_sc_dev *dev,
83 struct i40iw_hmc_pble_rsrc *pble_rsrc)
84{
85 struct i40iw_hmc_info *hmc_info;
86 u32 fpm_idx = 0;
87
88 hmc_info = dev->hmc_info;
89 pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].base;
90
91 if (pble_rsrc->fpm_base_addr & 0xfff)
92 fpm_idx = (PAGE_SIZE - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
93
94 pble_rsrc->unallocated_pble =
95 hmc_info->hmc_obj[I40IW_HMC_IW_PBLE].cnt - fpm_idx;
96 pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
97
98 pble_rsrc->pinfo.pool_shift = POOL_SHIFT;
99 pble_rsrc->pinfo.pool = gen_pool_create(pble_rsrc->pinfo.pool_shift, -1);
100 INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
101 if (!pble_rsrc->pinfo.pool)
102 goto error;
103
104 if (add_pble_pool(dev, pble_rsrc))
105 goto error;
106
107 return 0;
108
109 error:i40iw_destroy_pble_pool(dev, pble_rsrc);
110 return I40IW_ERR_NO_MEMORY;
111}
112
113
114
115
116
117
118static inline void get_sd_pd_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc,
119 struct sd_pd_idx *idx)
120{
121 idx->sd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_DIRECT_BP_SIZE;
122 idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr) / I40IW_HMC_PAGED_BP_SIZE;
123 idx->rel_pd_idx = (idx->pd_idx % I40IW_HMC_PD_CNT_IN_SD);
124}
125
126
127
128
129
130
131
132static enum i40iw_status_code add_sd_direct(struct i40iw_sc_dev *dev,
133 struct i40iw_hmc_pble_rsrc *pble_rsrc,
134 struct i40iw_add_page_info *info)
135{
136 enum i40iw_status_code ret_code = 0;
137 struct sd_pd_idx *idx = &info->idx;
138 struct i40iw_chunk *chunk = info->chunk;
139 struct i40iw_hmc_info *hmc_info = info->hmc_info;
140 struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
141 u32 offset = 0;
142
143 if (!sd_entry->valid) {
144 if (dev->is_pf) {
145 ret_code = i40iw_add_sd_table_entry(dev->hw, hmc_info,
146 info->idx.sd_idx,
147 I40IW_SD_TYPE_DIRECT,
148 I40IW_HMC_DIRECT_BP_SIZE);
149 if (ret_code)
150 return ret_code;
151 chunk->type = I40IW_DMA_COHERENT;
152 }
153 }
154 offset = idx->rel_pd_idx << I40IW_HMC_PAGED_BP_SHIFT;
155 chunk->size = info->pages << I40IW_HMC_PAGED_BP_SHIFT;
156 chunk->vaddr = ((u8 *)sd_entry->u.bp.addr.va + offset);
157 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
158 i40iw_debug(dev, I40IW_DEBUG_PBLE, "chunk_size[%d] = 0x%x vaddr=%p fpm_addr = %llx\n",
159 chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
160 return 0;
161}
162
163
164
165
166
167
168static void i40iw_free_vmalloc_mem(struct i40iw_hw *hw, struct i40iw_chunk *chunk)
169{
170 struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
171 int i;
172
173 if (!chunk->pg_cnt)
174 goto done;
175 for (i = 0; i < chunk->pg_cnt; i++)
176 dma_unmap_page(&pcidev->dev, chunk->dmaaddrs[i], PAGE_SIZE, DMA_BIDIRECTIONAL);
177
178 done:
179 kfree(chunk->dmaaddrs);
180 chunk->dmaaddrs = NULL;
181 vfree(chunk->vaddr);
182 chunk->vaddr = NULL;
183 chunk->type = 0;
184}
185
186
187
188
189
190
191
192static enum i40iw_status_code i40iw_get_vmalloc_mem(struct i40iw_hw *hw,
193 struct i40iw_chunk *chunk,
194 int pg_cnt)
195{
196 struct pci_dev *pcidev = (struct pci_dev *)hw->dev_context;
197 struct page *page;
198 u8 *addr;
199 u32 size;
200 int i;
201
202 chunk->dmaaddrs = kzalloc(pg_cnt << 3, GFP_KERNEL);
203 if (!chunk->dmaaddrs)
204 return I40IW_ERR_NO_MEMORY;
205 size = PAGE_SIZE * pg_cnt;
206 chunk->vaddr = vmalloc(size);
207 if (!chunk->vaddr) {
208 kfree(chunk->dmaaddrs);
209 chunk->dmaaddrs = NULL;
210 return I40IW_ERR_NO_MEMORY;
211 }
212 chunk->size = size;
213 addr = (u8 *)chunk->vaddr;
214 for (i = 0; i < pg_cnt; i++) {
215 page = vmalloc_to_page((void *)addr);
216 if (!page)
217 break;
218 chunk->dmaaddrs[i] = dma_map_page(&pcidev->dev, page, 0,
219 PAGE_SIZE, DMA_BIDIRECTIONAL);
220 if (dma_mapping_error(&pcidev->dev, chunk->dmaaddrs[i]))
221 break;
222 addr += PAGE_SIZE;
223 }
224
225 chunk->pg_cnt = i;
226 chunk->type = I40IW_VMALLOC;
227 if (i == pg_cnt)
228 return 0;
229
230 i40iw_free_vmalloc_mem(hw, chunk);
231 return I40IW_ERR_NO_MEMORY;
232}
233
234
235
236
237
238
239static inline u32 fpm_to_idx(struct i40iw_hmc_pble_rsrc *pble_rsrc, u64 addr)
240{
241 return (addr - (pble_rsrc->fpm_base_addr)) >> 3;
242}
243
244
245
246
247
248
249
250static enum i40iw_status_code add_bp_pages(struct i40iw_sc_dev *dev,
251 struct i40iw_hmc_pble_rsrc *pble_rsrc,
252 struct i40iw_add_page_info *info)
253{
254 u8 *addr;
255 struct i40iw_dma_mem mem;
256 struct i40iw_hmc_pd_entry *pd_entry;
257 struct i40iw_hmc_sd_entry *sd_entry = info->sd_entry;
258 struct i40iw_hmc_info *hmc_info = info->hmc_info;
259 struct i40iw_chunk *chunk = info->chunk;
260 struct i40iw_manage_vf_pble_info vf_pble_info;
261 enum i40iw_status_code status = 0;
262 u32 rel_pd_idx = info->idx.rel_pd_idx;
263 u32 pd_idx = info->idx.pd_idx;
264 u32 i;
265
266 status = i40iw_get_vmalloc_mem(dev->hw, chunk, info->pages);
267 if (status)
268 return I40IW_ERR_NO_MEMORY;
269 status = i40iw_add_sd_table_entry(dev->hw, hmc_info,
270 info->idx.sd_idx, I40IW_SD_TYPE_PAGED,
271 I40IW_HMC_DIRECT_BP_SIZE);
272 if (status) {
273 i40iw_free_vmalloc_mem(dev->hw, chunk);
274 return status;
275 }
276 if (!dev->is_pf) {
277 status = i40iw_vchnl_vf_add_hmc_objs(dev, I40IW_HMC_IW_PBLE,
278 fpm_to_idx(pble_rsrc,
279 pble_rsrc->next_fpm_addr),
280 (info->pages << PBLE_512_SHIFT));
281 if (status) {
282 i40iw_pr_err("allocate PBLEs in the PF. Error %i\n", status);
283 i40iw_free_vmalloc_mem(dev->hw, chunk);
284 return status;
285 }
286 }
287 addr = chunk->vaddr;
288 for (i = 0; i < info->pages; i++) {
289 mem.pa = chunk->dmaaddrs[i];
290 mem.size = PAGE_SIZE;
291 mem.va = (void *)(addr);
292 pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
293 if (!pd_entry->valid) {
294 status = i40iw_add_pd_table_entry(dev->hw, hmc_info, pd_idx++, &mem);
295 if (status)
296 goto error;
297 addr += PAGE_SIZE;
298 } else {
299 i40iw_pr_err("pd entry is valid expecting to be invalid\n");
300 }
301 }
302 if (!dev->is_pf) {
303 vf_pble_info.first_pd_index = info->idx.rel_pd_idx;
304 vf_pble_info.inv_pd_ent = false;
305 vf_pble_info.pd_entry_cnt = PBLE_PER_PAGE;
306 vf_pble_info.pd_pl_pba = sd_entry->u.pd_table.pd_page_addr.pa;
307 vf_pble_info.sd_index = info->idx.sd_idx;
308 status = i40iw_hw_manage_vf_pble_bp(dev->back_dev,
309 &vf_pble_info, true);
310 if (status) {
311 i40iw_pr_err("CQP manage VF PBLE BP failed. %i\n", status);
312 goto error;
313 }
314 }
315 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
316 return 0;
317error:
318 i40iw_free_vmalloc_mem(dev->hw, chunk);
319 return status;
320}
321
322
323
324
325
326
327static enum i40iw_status_code add_pble_pool(struct i40iw_sc_dev *dev,
328 struct i40iw_hmc_pble_rsrc *pble_rsrc)
329{
330 struct i40iw_hmc_sd_entry *sd_entry;
331 struct i40iw_hmc_info *hmc_info;
332 struct i40iw_chunk *chunk;
333 struct i40iw_add_page_info info;
334 struct sd_pd_idx *idx = &info.idx;
335 enum i40iw_status_code ret_code = 0;
336 enum i40iw_sd_entry_type sd_entry_type;
337 u64 sd_reg_val = 0;
338 u32 pages;
339
340 if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
341 return I40IW_ERR_NO_MEMORY;
342 if (pble_rsrc->next_fpm_addr & 0xfff) {
343 i40iw_pr_err("next fpm_addr %llx\n", pble_rsrc->next_fpm_addr);
344 return I40IW_ERR_INVALID_PAGE_DESC_INDEX;
345 }
346 chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
347 if (!chunk)
348 return I40IW_ERR_NO_MEMORY;
349 hmc_info = dev->hmc_info;
350 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
351 get_sd_pd_idx(pble_rsrc, idx);
352 sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
353 pages = (idx->rel_pd_idx) ? (I40IW_HMC_PD_CNT_IN_SD -
354 idx->rel_pd_idx) : I40IW_HMC_PD_CNT_IN_SD;
355 pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
356 if (!pages) {
357 ret_code = I40IW_ERR_NO_PBLCHUNKS_AVAILABLE;
358 goto error;
359 }
360 info.chunk = chunk;
361 info.hmc_info = hmc_info;
362 info.pages = pages;
363 info.sd_entry = sd_entry;
364 if (!sd_entry->valid) {
365 sd_entry_type = (!idx->rel_pd_idx &&
366 (pages == I40IW_HMC_PD_CNT_IN_SD) &&
367 dev->is_pf) ? I40IW_SD_TYPE_DIRECT : I40IW_SD_TYPE_PAGED;
368 } else {
369 sd_entry_type = sd_entry->entry_type;
370 }
371 i40iw_debug(dev, I40IW_DEBUG_PBLE,
372 "pages = %d, unallocated_pble[%u] current_fpm_addr = %llx\n",
373 pages, pble_rsrc->unallocated_pble, pble_rsrc->next_fpm_addr);
374 i40iw_debug(dev, I40IW_DEBUG_PBLE, "sd_entry_type = %d sd_entry valid = %d\n",
375 sd_entry_type, sd_entry->valid);
376
377 if (sd_entry_type == I40IW_SD_TYPE_DIRECT)
378 ret_code = add_sd_direct(dev, pble_rsrc, &info);
379 if (ret_code)
380 sd_entry_type = I40IW_SD_TYPE_PAGED;
381 else
382 pble_rsrc->stats_direct_sds++;
383
384 if (sd_entry_type == I40IW_SD_TYPE_PAGED) {
385 ret_code = add_bp_pages(dev, pble_rsrc, &info);
386 if (ret_code)
387 goto error;
388 else
389 pble_rsrc->stats_paged_sds++;
390 }
391
392 if (gen_pool_add_virt(pble_rsrc->pinfo.pool, (unsigned long)chunk->vaddr,
393 (phys_addr_t)chunk->fpm_addr, chunk->size, -1)) {
394 i40iw_pr_err("could not allocate memory by gen_pool_addr_virt()\n");
395 ret_code = I40IW_ERR_NO_MEMORY;
396 goto error;
397 }
398 pble_rsrc->next_fpm_addr += chunk->size;
399 i40iw_debug(dev, I40IW_DEBUG_PBLE, "next_fpm_addr = %llx chunk_size[%u] = 0x%x\n",
400 pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
401 pble_rsrc->unallocated_pble -= (chunk->size >> 3);
402 list_add(&chunk->list, &pble_rsrc->pinfo.clist);
403 sd_reg_val = (sd_entry_type == I40IW_SD_TYPE_PAGED) ?
404 sd_entry->u.pd_table.pd_page_addr.pa : sd_entry->u.bp.addr.pa;
405 if (sd_entry->valid)
406 return 0;
407 if (dev->is_pf) {
408 ret_code = i40iw_hmc_sd_one(dev, hmc_info->hmc_fn_id,
409 sd_reg_val, idx->sd_idx,
410 sd_entry->entry_type, true);
411 if (ret_code) {
412 i40iw_pr_err("cqp cmd failed for sd (pbles)\n");
413 goto error;
414 }
415 }
416
417 sd_entry->valid = true;
418 return 0;
419 error:
420 kfree(chunk);
421 return ret_code;
422}
423
424
425
426
427
428
429static void free_lvl2(struct i40iw_hmc_pble_rsrc *pble_rsrc,
430 struct i40iw_pble_alloc *palloc)
431{
432 u32 i;
433 struct gen_pool *pool;
434 struct i40iw_pble_level2 *lvl2 = &palloc->level2;
435 struct i40iw_pble_info *root = &lvl2->root;
436 struct i40iw_pble_info *leaf = lvl2->leaf;
437
438 pool = pble_rsrc->pinfo.pool;
439
440 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
441 if (leaf->addr)
442 gen_pool_free(pool, leaf->addr, (leaf->cnt << 3));
443 else
444 break;
445 }
446
447 if (root->addr)
448 gen_pool_free(pool, root->addr, (root->cnt << 3));
449
450 kfree(lvl2->leaf);
451 lvl2->leaf = NULL;
452}
453
454
455
456
457
458
459
460static enum i40iw_status_code get_lvl2_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
461 struct i40iw_pble_alloc *palloc,
462 struct gen_pool *pool)
463{
464 u32 lf4k, lflast, total, i;
465 u32 pblcnt = PBLE_PER_PAGE;
466 u64 *addr;
467 struct i40iw_pble_level2 *lvl2 = &palloc->level2;
468 struct i40iw_pble_info *root = &lvl2->root;
469 struct i40iw_pble_info *leaf;
470
471
472 lf4k = palloc->total_cnt >> 9;
473 lflast = palloc->total_cnt % PBLE_PER_PAGE;
474 total = (lflast == 0) ? lf4k : lf4k + 1;
475 lvl2->leaf_cnt = total;
476
477 leaf = kzalloc((sizeof(*leaf) * total), GFP_ATOMIC);
478 if (!leaf)
479 return I40IW_ERR_NO_MEMORY;
480 lvl2->leaf = leaf;
481
482 root->addr = gen_pool_alloc(pool, (total << 3));
483 if (!root->addr) {
484 kfree(lvl2->leaf);
485 lvl2->leaf = NULL;
486 return I40IW_ERR_NO_MEMORY;
487 }
488 root->idx = fpm_to_idx(pble_rsrc,
489 (u64)gen_pool_virt_to_phys(pool, root->addr));
490 root->cnt = total;
491 addr = (u64 *)root->addr;
492 for (i = 0; i < total; i++, leaf++) {
493 pblcnt = (lflast && ((i + 1) == total)) ? lflast : PBLE_PER_PAGE;
494 leaf->addr = gen_pool_alloc(pool, (pblcnt << 3));
495 if (!leaf->addr)
496 goto error;
497 leaf->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool, leaf->addr));
498
499 leaf->cnt = pblcnt;
500 *addr = (u64)leaf->idx;
501 addr++;
502 }
503 palloc->level = I40IW_LEVEL_2;
504 pble_rsrc->stats_lvl2++;
505 return 0;
506 error:
507 free_lvl2(pble_rsrc, palloc);
508 return I40IW_ERR_NO_MEMORY;
509}
510
511
512
513
514
515
516
517static enum i40iw_status_code get_lvl1_pble(struct i40iw_sc_dev *dev,
518 struct i40iw_hmc_pble_rsrc *pble_rsrc,
519 struct i40iw_pble_alloc *palloc)
520{
521 u64 *addr;
522 struct gen_pool *pool;
523 struct i40iw_pble_info *lvl1 = &palloc->level1;
524
525 pool = pble_rsrc->pinfo.pool;
526 addr = (u64 *)gen_pool_alloc(pool, (palloc->total_cnt << 3));
527
528 if (!addr)
529 return I40IW_ERR_NO_MEMORY;
530
531 palloc->level = I40IW_LEVEL_1;
532 lvl1->addr = (unsigned long)addr;
533 lvl1->idx = fpm_to_idx(pble_rsrc, (u64)gen_pool_virt_to_phys(pool,
534 (unsigned long)addr));
535 lvl1->cnt = palloc->total_cnt;
536 pble_rsrc->stats_lvl1++;
537 return 0;
538}
539
540
541
542
543
544
545
546
547static inline enum i40iw_status_code get_lvl1_lvl2_pble(struct i40iw_sc_dev *dev,
548 struct i40iw_hmc_pble_rsrc *pble_rsrc,
549 struct i40iw_pble_alloc *palloc,
550 struct gen_pool *pool)
551{
552 enum i40iw_status_code status = 0;
553
554 status = get_lvl1_pble(dev, pble_rsrc, palloc);
555 if (status && (palloc->total_cnt > PBLE_PER_PAGE))
556 status = get_lvl2_pble(pble_rsrc, palloc, pool);
557 return status;
558}
559
560
561
562
563
564
565
566
567enum i40iw_status_code i40iw_get_pble(struct i40iw_sc_dev *dev,
568 struct i40iw_hmc_pble_rsrc *pble_rsrc,
569 struct i40iw_pble_alloc *palloc,
570 u32 pble_cnt)
571{
572 struct gen_pool *pool;
573 enum i40iw_status_code status = 0;
574 u32 max_sds = 0;
575 int i;
576
577 pool = pble_rsrc->pinfo.pool;
578 palloc->total_cnt = pble_cnt;
579 palloc->level = I40IW_LEVEL_0;
580
581 status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
582 if (!status)
583 goto exit;
584 max_sds = (palloc->total_cnt >> 18) + 1;
585 for (i = 0; i < max_sds; i++) {
586 status = add_pble_pool(dev, pble_rsrc);
587 if (status)
588 break;
589 status = get_lvl1_lvl2_pble(dev, pble_rsrc, palloc, pool);
590 if (!status)
591 break;
592 }
593exit:
594 if (!status)
595 pble_rsrc->stats_alloc_ok++;
596 else
597 pble_rsrc->stats_alloc_fail++;
598
599 return status;
600}
601
602
603
604
605
606
607void i40iw_free_pble(struct i40iw_hmc_pble_rsrc *pble_rsrc,
608 struct i40iw_pble_alloc *palloc)
609{
610 struct gen_pool *pool;
611
612 pool = pble_rsrc->pinfo.pool;
613 if (palloc->level == I40IW_LEVEL_2)
614 free_lvl2(pble_rsrc, palloc);
615 else
616 gen_pool_free(pool, palloc->level1.addr,
617 (palloc->level1.cnt << 3));
618 pble_rsrc->stats_alloc_freed++;
619}
620