1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33#include <linux/highmem.h>
34#include <linux/kernel.h>
35#include <linux/module.h>
36#include <linux/delay.h>
37#include <linux/mlx5/driver.h>
38#include <linux/mlx5/cmd.h>
39#include "mlx5_core.h"
40#include "lib/eq.h"
41
42enum {
43 MLX5_PAGES_CANT_GIVE = 0,
44 MLX5_PAGES_GIVE = 1,
45 MLX5_PAGES_TAKE = 2
46};
47
48struct mlx5_pages_req {
49 struct mlx5_core_dev *dev;
50 u16 func_id;
51 s32 npages;
52 struct work_struct work;
53};
54
55struct fw_page {
56 struct rb_node rb_node;
57 u64 addr;
58 struct page *page;
59 u16 func_id;
60 unsigned long bitmask;
61 struct list_head list;
62 unsigned free_count;
63};
64
65enum {
66 MAX_RECLAIM_TIME_MSECS = 5000,
67 MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
68};
69
70enum {
71 MLX5_MAX_RECLAIM_TIME_MILI = 5000,
72 MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
73};
74
75static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
76{
77 struct rb_root *root = &dev->priv.page_root;
78 struct rb_node **new = &root->rb_node;
79 struct rb_node *parent = NULL;
80 struct fw_page *nfp;
81 struct fw_page *tfp;
82 int i;
83
84 while (*new) {
85 parent = *new;
86 tfp = rb_entry(parent, struct fw_page, rb_node);
87 if (tfp->addr < addr)
88 new = &parent->rb_left;
89 else if (tfp->addr > addr)
90 new = &parent->rb_right;
91 else
92 return -EEXIST;
93 }
94
95 nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
96 if (!nfp)
97 return -ENOMEM;
98
99 nfp->addr = addr;
100 nfp->page = page;
101 nfp->func_id = func_id;
102 nfp->free_count = MLX5_NUM_4K_IN_PAGE;
103 for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
104 set_bit(i, &nfp->bitmask);
105
106 rb_link_node(&nfp->rb_node, parent, new);
107 rb_insert_color(&nfp->rb_node, root);
108 list_add(&nfp->list, &dev->priv.free_list);
109
110 return 0;
111}
112
113static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
114{
115 struct rb_root *root = &dev->priv.page_root;
116 struct rb_node *tmp = root->rb_node;
117 struct fw_page *result = NULL;
118 struct fw_page *tfp;
119
120 while (tmp) {
121 tfp = rb_entry(tmp, struct fw_page, rb_node);
122 if (tfp->addr < addr) {
123 tmp = tmp->rb_left;
124 } else if (tfp->addr > addr) {
125 tmp = tmp->rb_right;
126 } else {
127 result = tfp;
128 break;
129 }
130 }
131
132 return result;
133}
134
135static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
136 s32 *npages, int boot)
137{
138 u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
139 u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0};
140 int err;
141
142 MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
143 MLX5_SET(query_pages_in, in, op_mod, boot ?
144 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
145 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
146
147 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
148 if (err)
149 return err;
150
151 *npages = MLX5_GET(query_pages_out, out, num_pages);
152 *func_id = MLX5_GET(query_pages_out, out, function_id);
153
154 return err;
155}
156
157static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
158{
159 struct fw_page *fp;
160 unsigned n;
161
162 if (list_empty(&dev->priv.free_list))
163 return -ENOMEM;
164
165 fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
166 n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
167 if (n >= MLX5_NUM_4K_IN_PAGE) {
168 mlx5_core_warn(dev, "alloc 4k bug\n");
169 return -ENOENT;
170 }
171 clear_bit(n, &fp->bitmask);
172 fp->free_count--;
173 if (!fp->free_count)
174 list_del(&fp->list);
175
176 *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
177
178 return 0;
179}
180
181#define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
182
183static void free_4k(struct mlx5_core_dev *dev, u64 addr)
184{
185 struct fw_page *fwp;
186 int n;
187
188 fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
189 if (!fwp) {
190 mlx5_core_warn(dev, "page not found\n");
191 return;
192 }
193
194 n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
195 fwp->free_count++;
196 set_bit(n, &fwp->bitmask);
197 if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
198 rb_erase(&fwp->rb_node, &dev->priv.page_root);
199 if (fwp->free_count != 1)
200 list_del(&fwp->list);
201 dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK,
202 PAGE_SIZE, DMA_BIDIRECTIONAL);
203 __free_page(fwp->page);
204 kfree(fwp);
205 } else if (fwp->free_count == 1) {
206 list_add(&fwp->list, &dev->priv.free_list);
207 }
208}
209
210static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
211{
212 struct page *page;
213 u64 zero_addr = 1;
214 u64 addr;
215 int err;
216 int nid = dev_to_node(&dev->pdev->dev);
217
218 page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
219 if (!page) {
220 mlx5_core_warn(dev, "failed to allocate page\n");
221 return -ENOMEM;
222 }
223map:
224 addr = dma_map_page(&dev->pdev->dev, page, 0,
225 PAGE_SIZE, DMA_BIDIRECTIONAL);
226 if (dma_mapping_error(&dev->pdev->dev, addr)) {
227 mlx5_core_warn(dev, "failed dma mapping page\n");
228 err = -ENOMEM;
229 goto err_mapping;
230 }
231
232
233 if (addr == 0) {
234 zero_addr = addr;
235 goto map;
236 }
237
238 err = insert_page(dev, addr, page, func_id);
239 if (err) {
240 mlx5_core_err(dev, "failed to track allocated page\n");
241 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE,
242 DMA_BIDIRECTIONAL);
243 }
244
245err_mapping:
246 if (err)
247 __free_page(page);
248
249 if (zero_addr == 0)
250 dma_unmap_page(&dev->pdev->dev, zero_addr, PAGE_SIZE,
251 DMA_BIDIRECTIONAL);
252
253 return err;
254}
255
256static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
257{
258 u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
259 u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
260 int err;
261
262 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
263 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
264 MLX5_SET(manage_pages_in, in, function_id, func_id);
265
266 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
267 if (err)
268 mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
269 func_id, err);
270}
271
272static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
273 int notify_fail)
274{
275 u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
276 int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
277 u64 addr;
278 int err;
279 u32 *in;
280 int i;
281
282 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
283 in = kvzalloc(inlen, GFP_KERNEL);
284 if (!in) {
285 err = -ENOMEM;
286 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
287 goto out_free;
288 }
289
290 for (i = 0; i < npages; i++) {
291retry:
292 err = alloc_4k(dev, &addr);
293 if (err) {
294 if (err == -ENOMEM)
295 err = alloc_system_page(dev, func_id);
296 if (err)
297 goto out_4k;
298
299 goto retry;
300 }
301 MLX5_ARRAY_SET64(manage_pages_in, in, pas, i, addr);
302 }
303
304 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
305 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
306 MLX5_SET(manage_pages_in, in, function_id, func_id);
307 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
308
309 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
310 if (err) {
311 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
312 func_id, npages, err);
313 goto out_4k;
314 }
315
316 dev->priv.fw_pages += npages;
317 if (func_id)
318 dev->priv.vfs_pages += npages;
319
320 mlx5_core_dbg(dev, "err %d\n", err);
321
322 kvfree(in);
323 return 0;
324
325out_4k:
326 for (i--; i >= 0; i--)
327 free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
328out_free:
329 kvfree(in);
330 if (notify_fail)
331 page_notify_fail(dev, func_id);
332 return err;
333}
334
335static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
336 u32 *in, int in_size, u32 *out, int out_size)
337{
338 struct fw_page *fwp;
339 struct rb_node *p;
340 u32 func_id;
341 u32 npages;
342 u32 i = 0;
343
344 if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
345 return mlx5_cmd_exec(dev, in, in_size, out, out_size);
346
347
348 npages = MLX5_GET(manage_pages_in, in, input_num_entries);
349 func_id = MLX5_GET(manage_pages_in, in, function_id);
350
351 p = rb_first(&dev->priv.page_root);
352 while (p && i < npages) {
353 fwp = rb_entry(p, struct fw_page, rb_node);
354 p = rb_next(p);
355 if (fwp->func_id != func_id)
356 continue;
357
358 MLX5_ARRAY_SET64(manage_pages_out, out, pas, i, fwp->addr);
359 i++;
360 }
361
362 MLX5_SET(manage_pages_out, out, output_num_entries, i);
363 return 0;
364}
365
366static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
367 int *nclaimed)
368{
369 int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
370 u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
371 int num_claimed;
372 u32 *out;
373 int err;
374 int i;
375
376 if (nclaimed)
377 *nclaimed = 0;
378
379 outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
380 out = kvzalloc(outlen, GFP_KERNEL);
381 if (!out)
382 return -ENOMEM;
383
384 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
385 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
386 MLX5_SET(manage_pages_in, in, function_id, func_id);
387 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
388
389 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
390 err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
391 if (err) {
392 mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
393 goto out_free;
394 }
395
396 num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
397 if (num_claimed > npages) {
398 mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
399 num_claimed, npages);
400 err = -EINVAL;
401 goto out_free;
402 }
403
404 for (i = 0; i < num_claimed; i++)
405 free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
406
407 if (nclaimed)
408 *nclaimed = num_claimed;
409
410 dev->priv.fw_pages -= num_claimed;
411 if (func_id)
412 dev->priv.vfs_pages -= num_claimed;
413
414out_free:
415 kvfree(out);
416 return err;
417}
418
419static void pages_work_handler(struct work_struct *work)
420{
421 struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
422 struct mlx5_core_dev *dev = req->dev;
423 int err = 0;
424
425 if (req->npages < 0)
426 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
427 else if (req->npages > 0)
428 err = give_pages(dev, req->func_id, req->npages, 1);
429
430 if (err)
431 mlx5_core_warn(dev, "%s fail %d\n",
432 req->npages < 0 ? "reclaim" : "give", err);
433
434 kfree(req);
435}
436
437static int req_pages_handler(struct notifier_block *nb,
438 unsigned long type, void *data)
439{
440 struct mlx5_pages_req *req;
441 struct mlx5_core_dev *dev;
442 struct mlx5_priv *priv;
443 struct mlx5_eqe *eqe;
444 u16 func_id;
445 s32 npages;
446
447 priv = mlx5_nb_cof(nb, struct mlx5_priv, pg_nb);
448 dev = container_of(priv, struct mlx5_core_dev, priv);
449 eqe = data;
450
451 func_id = be16_to_cpu(eqe->data.req_pages.func_id);
452 npages = be32_to_cpu(eqe->data.req_pages.num_pages);
453 mlx5_core_dbg(dev, "page request for func 0x%x, npages %d\n",
454 func_id, npages);
455 req = kzalloc(sizeof(*req), GFP_ATOMIC);
456 if (!req) {
457 mlx5_core_warn(dev, "failed to allocate pages request\n");
458 return NOTIFY_DONE;
459 }
460
461 req->dev = dev;
462 req->func_id = func_id;
463 req->npages = npages;
464 INIT_WORK(&req->work, pages_work_handler);
465 queue_work(dev->priv.pg_wq, &req->work);
466 return NOTIFY_OK;
467}
468
469int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
470{
471 u16 uninitialized_var(func_id);
472 s32 uninitialized_var(npages);
473 int err;
474
475 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
476 if (err)
477 return err;
478
479 mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
480 npages, boot ? "boot" : "init", func_id);
481
482 return give_pages(dev, func_id, npages, 0);
483}
484
485enum {
486 MLX5_BLKS_FOR_RECLAIM_PAGES = 12
487};
488
489static int optimal_reclaimed_pages(void)
490{
491 struct mlx5_cmd_prot_block *block;
492 struct mlx5_cmd_layout *lay;
493 int ret;
494
495 ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
496 MLX5_ST_SZ_BYTES(manage_pages_out)) /
497 MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
498
499 return ret;
500}
501
502int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
503{
504 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
505 struct fw_page *fwp;
506 struct rb_node *p;
507 int nclaimed = 0;
508 int err = 0;
509
510 do {
511 p = rb_first(&dev->priv.page_root);
512 if (p) {
513 fwp = rb_entry(p, struct fw_page, rb_node);
514 err = reclaim_pages(dev, fwp->func_id,
515 optimal_reclaimed_pages(),
516 &nclaimed);
517
518 if (err) {
519 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
520 err);
521 return err;
522 }
523 if (nclaimed)
524 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
525 }
526 if (time_after(jiffies, end)) {
527 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
528 break;
529 }
530 } while (p);
531
532 WARN(dev->priv.fw_pages,
533 "FW pages counter is %d after reclaiming all pages\n",
534 dev->priv.fw_pages);
535 WARN(dev->priv.vfs_pages,
536 "VFs FW pages counter is %d after reclaiming all pages\n",
537 dev->priv.vfs_pages);
538
539 return 0;
540}
541
542int mlx5_pagealloc_init(struct mlx5_core_dev *dev)
543{
544 dev->priv.page_root = RB_ROOT;
545 INIT_LIST_HEAD(&dev->priv.free_list);
546 dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
547 if (!dev->priv.pg_wq)
548 return -ENOMEM;
549
550 return 0;
551}
552
553void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
554{
555 destroy_workqueue(dev->priv.pg_wq);
556}
557
558void mlx5_pagealloc_start(struct mlx5_core_dev *dev)
559{
560 MLX5_NB_INIT(&dev->priv.pg_nb, req_pages_handler, PAGE_REQUEST);
561 mlx5_eq_notifier_register(dev, &dev->priv.pg_nb);
562}
563
564void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
565{
566 mlx5_eq_notifier_unregister(dev, &dev->priv.pg_nb);
567 flush_workqueue(dev->priv.pg_wq);
568}
569
570int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
571{
572 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
573 int prev_vfs_pages = dev->priv.vfs_pages;
574
575
576 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
577 mlx5_core_warn(dev, "Skipping wait for vf pages stage");
578 return 0;
579 }
580
581 mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages,
582 dev->priv.name);
583 while (dev->priv.vfs_pages) {
584 if (time_after(jiffies, end)) {
585 mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages);
586 return -ETIMEDOUT;
587 }
588 if (dev->priv.vfs_pages < prev_vfs_pages) {
589 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
590 prev_vfs_pages = dev->priv.vfs_pages;
591 }
592 msleep(50);
593 }
594
595 mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name);
596 return 0;
597}
598