1
2
3#include "osdep.h"
4#include "hmc.h"
5#include "defs.h"
6#include "type.h"
7#include "protos.h"
8#include "pble.h"
9
10static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc);
11
12
13
14
15
16void irdma_destroy_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
17{
18 struct irdma_chunk *chunk;
19 struct irdma_pble_prm *pinfo = &pble_rsrc->pinfo;
20
21 while (!list_empty(&pinfo->clist)) {
22 chunk = (struct irdma_chunk *) pinfo->clist.next;
23 list_del(&chunk->list);
24 if (chunk->type == PBLE_SD_PAGED)
25 irdma_pble_free_paged_mem(chunk);
26 bitmap_free(chunk->bitmapbuf);
27 kfree(chunk->chunkmem.va);
28 }
29}
30
31
32
33
34
35
36int irdma_hmc_init_pble(struct irdma_sc_dev *dev,
37 struct irdma_hmc_pble_rsrc *pble_rsrc)
38{
39 struct irdma_hmc_info *hmc_info;
40 u32 fpm_idx = 0;
41 int status = 0;
42
43 hmc_info = dev->hmc_info;
44 pble_rsrc->dev = dev;
45 pble_rsrc->fpm_base_addr = hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].base;
46
47 if (pble_rsrc->fpm_base_addr & 0xfff)
48 fpm_idx = (4096 - (pble_rsrc->fpm_base_addr & 0xfff)) >> 3;
49 pble_rsrc->unallocated_pble =
50 hmc_info->hmc_obj[IRDMA_HMC_IW_PBLE].cnt - fpm_idx;
51 pble_rsrc->next_fpm_addr = pble_rsrc->fpm_base_addr + (fpm_idx << 3);
52 pble_rsrc->pinfo.pble_shift = PBLE_SHIFT;
53
54 mutex_init(&pble_rsrc->pble_mutex_lock);
55
56 spin_lock_init(&pble_rsrc->pinfo.prm_lock);
57 INIT_LIST_HEAD(&pble_rsrc->pinfo.clist);
58 if (add_pble_prm(pble_rsrc)) {
59 irdma_destroy_pble_prm(pble_rsrc);
60 status = -ENOMEM;
61 }
62
63 return status;
64}
65
66
67
68
69
70
71static void get_sd_pd_idx(struct irdma_hmc_pble_rsrc *pble_rsrc,
72 struct sd_pd_idx *idx)
73{
74 idx->sd_idx = (u32)pble_rsrc->next_fpm_addr / IRDMA_HMC_DIRECT_BP_SIZE;
75 idx->pd_idx = (u32)(pble_rsrc->next_fpm_addr / IRDMA_HMC_PAGED_BP_SIZE);
76 idx->rel_pd_idx = (idx->pd_idx % IRDMA_HMC_PD_CNT_IN_SD);
77}
78
79
80
81
82
83
84static int add_sd_direct(struct irdma_hmc_pble_rsrc *pble_rsrc,
85 struct irdma_add_page_info *info)
86{
87 struct irdma_sc_dev *dev = pble_rsrc->dev;
88 int ret_code = 0;
89 struct sd_pd_idx *idx = &info->idx;
90 struct irdma_chunk *chunk = info->chunk;
91 struct irdma_hmc_info *hmc_info = info->hmc_info;
92 struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
93 u32 offset = 0;
94
95 if (!sd_entry->valid) {
96 ret_code = irdma_add_sd_table_entry(dev->hw, hmc_info,
97 info->idx.sd_idx,
98 IRDMA_SD_TYPE_DIRECT,
99 IRDMA_HMC_DIRECT_BP_SIZE);
100 if (ret_code)
101 return ret_code;
102
103 chunk->type = PBLE_SD_CONTIGOUS;
104 }
105
106 offset = idx->rel_pd_idx << HMC_PAGED_BP_SHIFT;
107 chunk->size = info->pages << HMC_PAGED_BP_SHIFT;
108 chunk->vaddr = sd_entry->u.bp.addr.va + offset;
109 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
110 ibdev_dbg(to_ibdev(dev),
111 "PBLE: chunk_size[%lld] = 0x%llx vaddr=0x%pK fpm_addr = %llx\n",
112 chunk->size, chunk->size, chunk->vaddr, chunk->fpm_addr);
113
114 return 0;
115}
116
117
118
119
120
121
122static u32 fpm_to_idx(struct irdma_hmc_pble_rsrc *pble_rsrc, u64 addr)
123{
124 u64 idx;
125
126 idx = (addr - (pble_rsrc->fpm_base_addr)) >> 3;
127
128 return (u32)idx;
129}
130
131
132
133
134
135
136static int add_bp_pages(struct irdma_hmc_pble_rsrc *pble_rsrc,
137 struct irdma_add_page_info *info)
138{
139 struct irdma_sc_dev *dev = pble_rsrc->dev;
140 u8 *addr;
141 struct irdma_dma_mem mem;
142 struct irdma_hmc_pd_entry *pd_entry;
143 struct irdma_hmc_sd_entry *sd_entry = info->sd_entry;
144 struct irdma_hmc_info *hmc_info = info->hmc_info;
145 struct irdma_chunk *chunk = info->chunk;
146 int status = 0;
147 u32 rel_pd_idx = info->idx.rel_pd_idx;
148 u32 pd_idx = info->idx.pd_idx;
149 u32 i;
150
151 if (irdma_pble_get_paged_mem(chunk, info->pages))
152 return -ENOMEM;
153
154 status = irdma_add_sd_table_entry(dev->hw, hmc_info, info->idx.sd_idx,
155 IRDMA_SD_TYPE_PAGED,
156 IRDMA_HMC_DIRECT_BP_SIZE);
157 if (status)
158 goto error;
159
160 addr = chunk->vaddr;
161 for (i = 0; i < info->pages; i++) {
162 mem.pa = (u64)chunk->dmainfo.dmaaddrs[i];
163 mem.size = 4096;
164 mem.va = addr;
165 pd_entry = &sd_entry->u.pd_table.pd_entry[rel_pd_idx++];
166 if (!pd_entry->valid) {
167 status = irdma_add_pd_table_entry(dev, hmc_info,
168 pd_idx++, &mem);
169 if (status)
170 goto error;
171
172 addr += 4096;
173 }
174 }
175
176 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
177 return 0;
178
179error:
180 irdma_pble_free_paged_mem(chunk);
181
182 return status;
183}
184
185
186
187
188
189
190
191static enum irdma_sd_entry_type irdma_get_type(struct irdma_sc_dev *dev,
192 struct sd_pd_idx *idx, u32 pages)
193{
194 enum irdma_sd_entry_type sd_entry_type;
195
196 sd_entry_type = !idx->rel_pd_idx && pages == IRDMA_HMC_PD_CNT_IN_SD ?
197 IRDMA_SD_TYPE_DIRECT : IRDMA_SD_TYPE_PAGED;
198 return sd_entry_type;
199}
200
201
202
203
204
205static int add_pble_prm(struct irdma_hmc_pble_rsrc *pble_rsrc)
206{
207 struct irdma_sc_dev *dev = pble_rsrc->dev;
208 struct irdma_hmc_sd_entry *sd_entry;
209 struct irdma_hmc_info *hmc_info;
210 struct irdma_chunk *chunk;
211 struct irdma_add_page_info info;
212 struct sd_pd_idx *idx = &info.idx;
213 int ret_code = 0;
214 enum irdma_sd_entry_type sd_entry_type;
215 u64 sd_reg_val = 0;
216 struct irdma_virt_mem chunkmem;
217 u32 pages;
218
219 if (pble_rsrc->unallocated_pble < PBLE_PER_PAGE)
220 return -ENOMEM;
221
222 if (pble_rsrc->next_fpm_addr & 0xfff)
223 return -EINVAL;
224
225 chunkmem.size = sizeof(*chunk);
226 chunkmem.va = kzalloc(chunkmem.size, GFP_KERNEL);
227 if (!chunkmem.va)
228 return -ENOMEM;
229
230 chunk = chunkmem.va;
231 chunk->chunkmem = chunkmem;
232 hmc_info = dev->hmc_info;
233 chunk->dev = dev;
234 chunk->fpm_addr = pble_rsrc->next_fpm_addr;
235 get_sd_pd_idx(pble_rsrc, idx);
236 sd_entry = &hmc_info->sd_table.sd_entry[idx->sd_idx];
237 pages = (idx->rel_pd_idx) ? (IRDMA_HMC_PD_CNT_IN_SD - idx->rel_pd_idx) :
238 IRDMA_HMC_PD_CNT_IN_SD;
239 pages = min(pages, pble_rsrc->unallocated_pble >> PBLE_512_SHIFT);
240 info.chunk = chunk;
241 info.hmc_info = hmc_info;
242 info.pages = pages;
243 info.sd_entry = sd_entry;
244 if (!sd_entry->valid)
245 sd_entry_type = irdma_get_type(dev, idx, pages);
246 else
247 sd_entry_type = sd_entry->entry_type;
248
249 ibdev_dbg(to_ibdev(dev),
250 "PBLE: pages = %d, unallocated_pble[%d] current_fpm_addr = %llx\n",
251 pages, pble_rsrc->unallocated_pble,
252 pble_rsrc->next_fpm_addr);
253 ibdev_dbg(to_ibdev(dev), "PBLE: sd_entry_type = %d\n", sd_entry_type);
254 if (sd_entry_type == IRDMA_SD_TYPE_DIRECT)
255 ret_code = add_sd_direct(pble_rsrc, &info);
256
257 if (ret_code)
258 sd_entry_type = IRDMA_SD_TYPE_PAGED;
259 else
260 pble_rsrc->stats_direct_sds++;
261
262 if (sd_entry_type == IRDMA_SD_TYPE_PAGED) {
263 ret_code = add_bp_pages(pble_rsrc, &info);
264 if (ret_code)
265 goto error;
266 else
267 pble_rsrc->stats_paged_sds++;
268 }
269
270 ret_code = irdma_prm_add_pble_mem(&pble_rsrc->pinfo, chunk);
271 if (ret_code)
272 goto error;
273
274 pble_rsrc->next_fpm_addr += chunk->size;
275 ibdev_dbg(to_ibdev(dev),
276 "PBLE: next_fpm_addr = %llx chunk_size[%llu] = 0x%llx\n",
277 pble_rsrc->next_fpm_addr, chunk->size, chunk->size);
278 pble_rsrc->unallocated_pble -= (u32)(chunk->size >> 3);
279 sd_reg_val = (sd_entry_type == IRDMA_SD_TYPE_PAGED) ?
280 sd_entry->u.pd_table.pd_page_addr.pa :
281 sd_entry->u.bp.addr.pa;
282
283 if (!sd_entry->valid) {
284 ret_code = irdma_hmc_sd_one(dev, hmc_info->hmc_fn_id, sd_reg_val,
285 idx->sd_idx, sd_entry->entry_type, true);
286 if (ret_code)
287 goto error;
288 }
289
290 list_add(&chunk->list, &pble_rsrc->pinfo.clist);
291 sd_entry->valid = true;
292 return 0;
293
294error:
295 bitmap_free(chunk->bitmapbuf);
296 kfree(chunk->chunkmem.va);
297
298 return ret_code;
299}
300
301
302
303
304
305
306static void free_lvl2(struct irdma_hmc_pble_rsrc *pble_rsrc,
307 struct irdma_pble_alloc *palloc)
308{
309 u32 i;
310 struct irdma_pble_level2 *lvl2 = &palloc->level2;
311 struct irdma_pble_info *root = &lvl2->root;
312 struct irdma_pble_info *leaf = lvl2->leaf;
313
314 for (i = 0; i < lvl2->leaf_cnt; i++, leaf++) {
315 if (leaf->addr)
316 irdma_prm_return_pbles(&pble_rsrc->pinfo,
317 &leaf->chunkinfo);
318 else
319 break;
320 }
321
322 if (root->addr)
323 irdma_prm_return_pbles(&pble_rsrc->pinfo, &root->chunkinfo);
324
325 kfree(lvl2->leafmem.va);
326 lvl2->leaf = NULL;
327}
328
329
330
331
332
333
334static int get_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
335 struct irdma_pble_alloc *palloc)
336{
337 u32 lf4k, lflast, total, i;
338 u32 pblcnt = PBLE_PER_PAGE;
339 u64 *addr;
340 struct irdma_pble_level2 *lvl2 = &palloc->level2;
341 struct irdma_pble_info *root = &lvl2->root;
342 struct irdma_pble_info *leaf;
343 int ret_code;
344 u64 fpm_addr;
345
346
347 lf4k = palloc->total_cnt >> 9;
348 lflast = palloc->total_cnt % PBLE_PER_PAGE;
349 total = (lflast == 0) ? lf4k : lf4k + 1;
350 lvl2->leaf_cnt = total;
351
352 lvl2->leafmem.size = (sizeof(*leaf) * total);
353 lvl2->leafmem.va = kzalloc(lvl2->leafmem.size, GFP_KERNEL);
354 if (!lvl2->leafmem.va)
355 return -ENOMEM;
356
357 lvl2->leaf = lvl2->leafmem.va;
358 leaf = lvl2->leaf;
359 ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &root->chunkinfo,
360 total << 3, &root->addr, &fpm_addr);
361 if (ret_code) {
362 kfree(lvl2->leafmem.va);
363 lvl2->leaf = NULL;
364 return -ENOMEM;
365 }
366
367 root->idx = fpm_to_idx(pble_rsrc, fpm_addr);
368 root->cnt = total;
369 addr = root->addr;
370 for (i = 0; i < total; i++, leaf++) {
371 pblcnt = (lflast && ((i + 1) == total)) ?
372 lflast : PBLE_PER_PAGE;
373 ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo,
374 &leaf->chunkinfo, pblcnt << 3,
375 &leaf->addr, &fpm_addr);
376 if (ret_code)
377 goto error;
378
379 leaf->idx = fpm_to_idx(pble_rsrc, fpm_addr);
380
381 leaf->cnt = pblcnt;
382 *addr = (u64)leaf->idx;
383 addr++;
384 }
385
386 palloc->level = PBLE_LEVEL_2;
387 pble_rsrc->stats_lvl2++;
388 return 0;
389
390error:
391 free_lvl2(pble_rsrc, palloc);
392
393 return -ENOMEM;
394}
395
396
397
398
399
400
401static int get_lvl1_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
402 struct irdma_pble_alloc *palloc)
403{
404 int ret_code;
405 u64 fpm_addr;
406 struct irdma_pble_info *lvl1 = &palloc->level1;
407
408 ret_code = irdma_prm_get_pbles(&pble_rsrc->pinfo, &lvl1->chunkinfo,
409 palloc->total_cnt << 3, &lvl1->addr,
410 &fpm_addr);
411 if (ret_code)
412 return -ENOMEM;
413
414 palloc->level = PBLE_LEVEL_1;
415 lvl1->idx = fpm_to_idx(pble_rsrc, fpm_addr);
416 lvl1->cnt = palloc->total_cnt;
417 pble_rsrc->stats_lvl1++;
418
419 return 0;
420}
421
422
423
424
425
426
427
428static int get_lvl1_lvl2_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
429 struct irdma_pble_alloc *palloc, bool level1_only)
430{
431 int status = 0;
432
433 status = get_lvl1_pble(pble_rsrc, palloc);
434 if (!status || level1_only || palloc->total_cnt <= PBLE_PER_PAGE)
435 return status;
436
437 status = get_lvl2_pble(pble_rsrc, palloc);
438
439 return status;
440}
441
442
443
444
445
446
447
448
449int irdma_get_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
450 struct irdma_pble_alloc *palloc, u32 pble_cnt,
451 bool level1_only)
452{
453 int status = 0;
454 int max_sds = 0;
455 int i;
456
457 palloc->total_cnt = pble_cnt;
458 palloc->level = PBLE_LEVEL_0;
459
460 mutex_lock(&pble_rsrc->pble_mutex_lock);
461
462
463
464
465 status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
466 if (!status)
467 goto exit;
468
469 max_sds = (palloc->total_cnt >> 18) + 1;
470 for (i = 0; i < max_sds; i++) {
471 status = add_pble_prm(pble_rsrc);
472 if (status)
473 break;
474
475 status = get_lvl1_lvl2_pble(pble_rsrc, palloc, level1_only);
476
477 if (!status || level1_only)
478 break;
479 }
480
481exit:
482 if (!status) {
483 pble_rsrc->allocdpbles += pble_cnt;
484 pble_rsrc->stats_alloc_ok++;
485 } else {
486 pble_rsrc->stats_alloc_fail++;
487 }
488 mutex_unlock(&pble_rsrc->pble_mutex_lock);
489
490 return status;
491}
492
493
494
495
496
497
498void irdma_free_pble(struct irdma_hmc_pble_rsrc *pble_rsrc,
499 struct irdma_pble_alloc *palloc)
500{
501 pble_rsrc->freedpbles += palloc->total_cnt;
502
503 if (palloc->level == PBLE_LEVEL_2)
504 free_lvl2(pble_rsrc, palloc);
505 else
506 irdma_prm_return_pbles(&pble_rsrc->pinfo,
507 &palloc->level1.chunkinfo);
508 pble_rsrc->stats_alloc_freed++;
509}
510