1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/mm.h>
36#include <linux/scatterlist.h>
37#include <linux/sched.h>
38
39#include <asm/page.h>
40
41#include "mthca_memfree.h"
42#include "mthca_dev.h"
43#include "mthca_cmd.h"
44
45
46
47
48
49enum {
50 MTHCA_ICM_ALLOC_SIZE = 1 << 18,
51 MTHCA_TABLE_CHUNK_SIZE = 1 << 18
52};
53
54struct mthca_user_db_table {
55 struct mutex mutex;
56 struct {
57 u64 uvirt;
58 struct scatterlist mem;
59 int refcount;
60 } page[0];
61};
62
63static void mthca_free_icm_pages(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
64{
65 int i;
66
67 if (chunk->nsg > 0)
68 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
69 PCI_DMA_BIDIRECTIONAL);
70
71 for (i = 0; i < chunk->npages; ++i)
72 __free_pages(sg_page(&chunk->mem[i]),
73 get_order(chunk->mem[i].length));
74}
75
76static void mthca_free_icm_coherent(struct mthca_dev *dev, struct mthca_icm_chunk *chunk)
77{
78 int i;
79
80 for (i = 0; i < chunk->npages; ++i) {
81 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
82 lowmem_page_address(sg_page(&chunk->mem[i])),
83 sg_dma_address(&chunk->mem[i]));
84 }
85}
86
87void mthca_free_icm(struct mthca_dev *dev, struct mthca_icm *icm, int coherent)
88{
89 struct mthca_icm_chunk *chunk, *tmp;
90
91 if (!icm)
92 return;
93
94 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
95 if (coherent)
96 mthca_free_icm_coherent(dev, chunk);
97 else
98 mthca_free_icm_pages(dev, chunk);
99
100 kfree(chunk);
101 }
102
103 kfree(icm);
104}
105
106static int mthca_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
107{
108 struct page *page;
109
110
111
112
113
114 page = alloc_pages(gfp_mask | __GFP_ZERO, order);
115 if (!page)
116 return -ENOMEM;
117
118 sg_set_page(mem, page, PAGE_SIZE << order, 0);
119 return 0;
120}
121
122static int mthca_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
123 int order, gfp_t gfp_mask)
124{
125 void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order, &sg_dma_address(mem),
126 gfp_mask);
127 if (!buf)
128 return -ENOMEM;
129
130 sg_set_buf(mem, buf, PAGE_SIZE << order);
131 BUG_ON(mem->offset);
132 sg_dma_len(mem) = PAGE_SIZE << order;
133 return 0;
134}
135
136struct mthca_icm *mthca_alloc_icm(struct mthca_dev *dev, int npages,
137 gfp_t gfp_mask, int coherent)
138{
139 struct mthca_icm *icm;
140 struct mthca_icm_chunk *chunk = NULL;
141 int cur_order;
142 int ret;
143
144
145 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
146
147 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
148 if (!icm)
149 return icm;
150
151 icm->refcount = 0;
152 INIT_LIST_HEAD(&icm->chunk_list);
153
154 cur_order = get_order(MTHCA_ICM_ALLOC_SIZE);
155
156 while (npages > 0) {
157 if (!chunk) {
158 chunk = kmalloc(sizeof *chunk,
159 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
160 if (!chunk)
161 goto fail;
162
163 sg_init_table(chunk->mem, MTHCA_ICM_CHUNK_LEN);
164 chunk->npages = 0;
165 chunk->nsg = 0;
166 list_add_tail(&chunk->list, &icm->chunk_list);
167 }
168
169 while (1 << cur_order > npages)
170 --cur_order;
171
172 if (coherent)
173 ret = mthca_alloc_icm_coherent(&dev->pdev->dev,
174 &chunk->mem[chunk->npages],
175 cur_order, gfp_mask);
176 else
177 ret = mthca_alloc_icm_pages(&chunk->mem[chunk->npages],
178 cur_order, gfp_mask);
179
180 if (!ret) {
181 ++chunk->npages;
182
183 if (coherent)
184 ++chunk->nsg;
185 else if (chunk->npages == MTHCA_ICM_CHUNK_LEN) {
186 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
187 chunk->npages,
188 PCI_DMA_BIDIRECTIONAL);
189
190 if (chunk->nsg <= 0)
191 goto fail;
192 }
193
194 if (chunk->npages == MTHCA_ICM_CHUNK_LEN)
195 chunk = NULL;
196
197 npages -= 1 << cur_order;
198 } else {
199 --cur_order;
200 if (cur_order < 0)
201 goto fail;
202 }
203 }
204
205 if (!coherent && chunk) {
206 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
207 chunk->npages,
208 PCI_DMA_BIDIRECTIONAL);
209
210 if (chunk->nsg <= 0)
211 goto fail;
212 }
213
214 return icm;
215
216fail:
217 mthca_free_icm(dev, icm, coherent);
218 return NULL;
219}
220
221int mthca_table_get(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
222{
223 int i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
224 int ret = 0;
225 u8 status;
226
227 mutex_lock(&table->mutex);
228
229 if (table->icm[i]) {
230 ++table->icm[i]->refcount;
231 goto out;
232 }
233
234 table->icm[i] = mthca_alloc_icm(dev, MTHCA_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
235 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
236 __GFP_NOWARN, table->coherent);
237 if (!table->icm[i]) {
238 ret = -ENOMEM;
239 goto out;
240 }
241
242 if (mthca_MAP_ICM(dev, table->icm[i], table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
243 &status) || status) {
244 mthca_free_icm(dev, table->icm[i], table->coherent);
245 table->icm[i] = NULL;
246 ret = -ENOMEM;
247 goto out;
248 }
249
250 ++table->icm[i]->refcount;
251
252out:
253 mutex_unlock(&table->mutex);
254 return ret;
255}
256
257void mthca_table_put(struct mthca_dev *dev, struct mthca_icm_table *table, int obj)
258{
259 int i;
260 u8 status;
261
262 if (!mthca_is_memfree(dev))
263 return;
264
265 i = (obj & (table->num_obj - 1)) * table->obj_size / MTHCA_TABLE_CHUNK_SIZE;
266
267 mutex_lock(&table->mutex);
268
269 if (--table->icm[i]->refcount == 0) {
270 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
271 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
272 &status);
273 mthca_free_icm(dev, table->icm[i], table->coherent);
274 table->icm[i] = NULL;
275 }
276
277 mutex_unlock(&table->mutex);
278}
279
280void *mthca_table_find(struct mthca_icm_table *table, int obj, dma_addr_t *dma_handle)
281{
282 int idx, offset, dma_offset, i;
283 struct mthca_icm_chunk *chunk;
284 struct mthca_icm *icm;
285 struct page *page = NULL;
286
287 if (!table->lowmem)
288 return NULL;
289
290 mutex_lock(&table->mutex);
291
292 idx = (obj & (table->num_obj - 1)) * table->obj_size;
293 icm = table->icm[idx / MTHCA_TABLE_CHUNK_SIZE];
294 dma_offset = offset = idx % MTHCA_TABLE_CHUNK_SIZE;
295
296 if (!icm)
297 goto out;
298
299 list_for_each_entry(chunk, &icm->chunk_list, list) {
300 for (i = 0; i < chunk->npages; ++i) {
301 if (dma_handle && dma_offset >= 0) {
302 if (sg_dma_len(&chunk->mem[i]) > dma_offset)
303 *dma_handle = sg_dma_address(&chunk->mem[i]) +
304 dma_offset;
305 dma_offset -= sg_dma_len(&chunk->mem[i]);
306 }
307
308
309
310 if (chunk->mem[i].length > offset) {
311 page = sg_page(&chunk->mem[i]);
312 goto out;
313 }
314 offset -= chunk->mem[i].length;
315 }
316 }
317
318out:
319 mutex_unlock(&table->mutex);
320 return page ? lowmem_page_address(page) + offset : NULL;
321}
322
323int mthca_table_get_range(struct mthca_dev *dev, struct mthca_icm_table *table,
324 int start, int end)
325{
326 int inc = MTHCA_TABLE_CHUNK_SIZE / table->obj_size;
327 int i, err;
328
329 for (i = start; i <= end; i += inc) {
330 err = mthca_table_get(dev, table, i);
331 if (err)
332 goto fail;
333 }
334
335 return 0;
336
337fail:
338 while (i > start) {
339 i -= inc;
340 mthca_table_put(dev, table, i);
341 }
342
343 return err;
344}
345
346void mthca_table_put_range(struct mthca_dev *dev, struct mthca_icm_table *table,
347 int start, int end)
348{
349 int i;
350
351 if (!mthca_is_memfree(dev))
352 return;
353
354 for (i = start; i <= end; i += MTHCA_TABLE_CHUNK_SIZE / table->obj_size)
355 mthca_table_put(dev, table, i);
356}
357
358struct mthca_icm_table *mthca_alloc_icm_table(struct mthca_dev *dev,
359 u64 virt, int obj_size,
360 int nobj, int reserved,
361 int use_lowmem, int use_coherent)
362{
363 struct mthca_icm_table *table;
364 int obj_per_chunk;
365 int num_icm;
366 unsigned chunk_size;
367 int i;
368 u8 status;
369
370 obj_per_chunk = MTHCA_TABLE_CHUNK_SIZE / obj_size;
371 num_icm = DIV_ROUND_UP(nobj, obj_per_chunk);
372
373 table = kmalloc(sizeof *table + num_icm * sizeof *table->icm, GFP_KERNEL);
374 if (!table)
375 return NULL;
376
377 table->virt = virt;
378 table->num_icm = num_icm;
379 table->num_obj = nobj;
380 table->obj_size = obj_size;
381 table->lowmem = use_lowmem;
382 table->coherent = use_coherent;
383 mutex_init(&table->mutex);
384
385 for (i = 0; i < num_icm; ++i)
386 table->icm[i] = NULL;
387
388 for (i = 0; i * MTHCA_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
389 chunk_size = MTHCA_TABLE_CHUNK_SIZE;
390 if ((i + 1) * MTHCA_TABLE_CHUNK_SIZE > nobj * obj_size)
391 chunk_size = nobj * obj_size - i * MTHCA_TABLE_CHUNK_SIZE;
392
393 table->icm[i] = mthca_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
394 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
395 __GFP_NOWARN, use_coherent);
396 if (!table->icm[i])
397 goto err;
398 if (mthca_MAP_ICM(dev, table->icm[i], virt + i * MTHCA_TABLE_CHUNK_SIZE,
399 &status) || status) {
400 mthca_free_icm(dev, table->icm[i], table->coherent);
401 table->icm[i] = NULL;
402 goto err;
403 }
404
405
406
407
408
409 ++table->icm[i]->refcount;
410 }
411
412 return table;
413
414err:
415 for (i = 0; i < num_icm; ++i)
416 if (table->icm[i]) {
417 mthca_UNMAP_ICM(dev, virt + i * MTHCA_TABLE_CHUNK_SIZE,
418 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
419 &status);
420 mthca_free_icm(dev, table->icm[i], table->coherent);
421 }
422
423 kfree(table);
424
425 return NULL;
426}
427
428void mthca_free_icm_table(struct mthca_dev *dev, struct mthca_icm_table *table)
429{
430 int i;
431 u8 status;
432
433 for (i = 0; i < table->num_icm; ++i)
434 if (table->icm[i]) {
435 mthca_UNMAP_ICM(dev, table->virt + i * MTHCA_TABLE_CHUNK_SIZE,
436 MTHCA_TABLE_CHUNK_SIZE / MTHCA_ICM_PAGE_SIZE,
437 &status);
438 mthca_free_icm(dev, table->icm[i], table->coherent);
439 }
440
441 kfree(table);
442}
443
444static u64 mthca_uarc_virt(struct mthca_dev *dev, struct mthca_uar *uar, int page)
445{
446 return dev->uar_table.uarc_base +
447 uar->index * dev->uar_table.uarc_size +
448 page * MTHCA_ICM_PAGE_SIZE;
449}
450
451int mthca_map_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
452 struct mthca_user_db_table *db_tab, int index, u64 uaddr)
453{
454 struct page *pages[1];
455 int ret = 0;
456 u8 status;
457 int i;
458
459 if (!mthca_is_memfree(dev))
460 return 0;
461
462 if (index < 0 || index > dev->uar_table.uarc_size / 8)
463 return -EINVAL;
464
465 mutex_lock(&db_tab->mutex);
466
467 i = index / MTHCA_DB_REC_PER_PAGE;
468
469 if ((db_tab->page[i].refcount >= MTHCA_DB_REC_PER_PAGE) ||
470 (db_tab->page[i].uvirt && db_tab->page[i].uvirt != uaddr) ||
471 (uaddr & 4095)) {
472 ret = -EINVAL;
473 goto out;
474 }
475
476 if (db_tab->page[i].refcount) {
477 ++db_tab->page[i].refcount;
478 goto out;
479 }
480
481 ret = get_user_pages(current, current->mm, uaddr & PAGE_MASK, 1, 1, 0,
482 pages, NULL);
483 if (ret < 0)
484 goto out;
485
486 sg_set_page(&db_tab->page[i].mem, pages[0], MTHCA_ICM_PAGE_SIZE,
487 uaddr & ~PAGE_MASK);
488
489 ret = pci_map_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
490 if (ret < 0) {
491 put_page(pages[0]);
492 goto out;
493 }
494
495 ret = mthca_MAP_ICM_page(dev, sg_dma_address(&db_tab->page[i].mem),
496 mthca_uarc_virt(dev, uar, i), &status);
497 if (!ret && status)
498 ret = -EINVAL;
499 if (ret) {
500 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
501 put_page(sg_page(&db_tab->page[i].mem));
502 goto out;
503 }
504
505 db_tab->page[i].uvirt = uaddr;
506 db_tab->page[i].refcount = 1;
507
508out:
509 mutex_unlock(&db_tab->mutex);
510 return ret;
511}
512
513void mthca_unmap_user_db(struct mthca_dev *dev, struct mthca_uar *uar,
514 struct mthca_user_db_table *db_tab, int index)
515{
516 if (!mthca_is_memfree(dev))
517 return;
518
519
520
521
522
523
524 mutex_lock(&db_tab->mutex);
525
526 --db_tab->page[index / MTHCA_DB_REC_PER_PAGE].refcount;
527
528 mutex_unlock(&db_tab->mutex);
529}
530
531struct mthca_user_db_table *mthca_init_user_db_tab(struct mthca_dev *dev)
532{
533 struct mthca_user_db_table *db_tab;
534 int npages;
535 int i;
536
537 if (!mthca_is_memfree(dev))
538 return NULL;
539
540 npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
541 db_tab = kmalloc(sizeof *db_tab + npages * sizeof *db_tab->page, GFP_KERNEL);
542 if (!db_tab)
543 return ERR_PTR(-ENOMEM);
544
545 mutex_init(&db_tab->mutex);
546 for (i = 0; i < npages; ++i) {
547 db_tab->page[i].refcount = 0;
548 db_tab->page[i].uvirt = 0;
549 sg_init_table(&db_tab->page[i].mem, 1);
550 }
551
552 return db_tab;
553}
554
555void mthca_cleanup_user_db_tab(struct mthca_dev *dev, struct mthca_uar *uar,
556 struct mthca_user_db_table *db_tab)
557{
558 int i;
559 u8 status;
560
561 if (!mthca_is_memfree(dev))
562 return;
563
564 for (i = 0; i < dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE; ++i) {
565 if (db_tab->page[i].uvirt) {
566 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, uar, i), 1, &status);
567 pci_unmap_sg(dev->pdev, &db_tab->page[i].mem, 1, PCI_DMA_TODEVICE);
568 put_page(sg_page(&db_tab->page[i].mem));
569 }
570 }
571
572 kfree(db_tab);
573}
574
575int mthca_alloc_db(struct mthca_dev *dev, enum mthca_db_type type,
576 u32 qn, __be32 **db)
577{
578 int group;
579 int start, end, dir;
580 int i, j;
581 struct mthca_db_page *page;
582 int ret = 0;
583 u8 status;
584
585 mutex_lock(&dev->db_tab->mutex);
586
587 switch (type) {
588 case MTHCA_DB_TYPE_CQ_ARM:
589 case MTHCA_DB_TYPE_SQ:
590 group = 0;
591 start = 0;
592 end = dev->db_tab->max_group1;
593 dir = 1;
594 break;
595
596 case MTHCA_DB_TYPE_CQ_SET_CI:
597 case MTHCA_DB_TYPE_RQ:
598 case MTHCA_DB_TYPE_SRQ:
599 group = 1;
600 start = dev->db_tab->npages - 1;
601 end = dev->db_tab->min_group2;
602 dir = -1;
603 break;
604
605 default:
606 ret = -EINVAL;
607 goto out;
608 }
609
610 for (i = start; i != end; i += dir)
611 if (dev->db_tab->page[i].db_rec &&
612 !bitmap_full(dev->db_tab->page[i].used,
613 MTHCA_DB_REC_PER_PAGE)) {
614 page = dev->db_tab->page + i;
615 goto found;
616 }
617
618 for (i = start; i != end; i += dir)
619 if (!dev->db_tab->page[i].db_rec) {
620 page = dev->db_tab->page + i;
621 goto alloc;
622 }
623
624 if (dev->db_tab->max_group1 >= dev->db_tab->min_group2 - 1) {
625 ret = -ENOMEM;
626 goto out;
627 }
628
629 if (group == 0)
630 ++dev->db_tab->max_group1;
631 else
632 --dev->db_tab->min_group2;
633
634 page = dev->db_tab->page + end;
635
636alloc:
637 page->db_rec = dma_alloc_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
638 &page->mapping, GFP_KERNEL);
639 if (!page->db_rec) {
640 ret = -ENOMEM;
641 goto out;
642 }
643 memset(page->db_rec, 0, MTHCA_ICM_PAGE_SIZE);
644
645 ret = mthca_MAP_ICM_page(dev, page->mapping,
646 mthca_uarc_virt(dev, &dev->driver_uar, i), &status);
647 if (!ret && status)
648 ret = -EINVAL;
649 if (ret) {
650 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
651 page->db_rec, page->mapping);
652 goto out;
653 }
654
655 bitmap_zero(page->used, MTHCA_DB_REC_PER_PAGE);
656
657found:
658 j = find_first_zero_bit(page->used, MTHCA_DB_REC_PER_PAGE);
659 set_bit(j, page->used);
660
661 if (group == 1)
662 j = MTHCA_DB_REC_PER_PAGE - 1 - j;
663
664 ret = i * MTHCA_DB_REC_PER_PAGE + j;
665
666 page->db_rec[j] = cpu_to_be64((qn << 8) | (type << 5));
667
668 *db = (__be32 *) &page->db_rec[j];
669
670out:
671 mutex_unlock(&dev->db_tab->mutex);
672
673 return ret;
674}
675
676void mthca_free_db(struct mthca_dev *dev, int type, int db_index)
677{
678 int i, j;
679 struct mthca_db_page *page;
680 u8 status;
681
682 i = db_index / MTHCA_DB_REC_PER_PAGE;
683 j = db_index % MTHCA_DB_REC_PER_PAGE;
684
685 page = dev->db_tab->page + i;
686
687 mutex_lock(&dev->db_tab->mutex);
688
689 page->db_rec[j] = 0;
690 if (i >= dev->db_tab->min_group2)
691 j = MTHCA_DB_REC_PER_PAGE - 1 - j;
692 clear_bit(j, page->used);
693
694 if (bitmap_empty(page->used, MTHCA_DB_REC_PER_PAGE) &&
695 i >= dev->db_tab->max_group1 - 1) {
696 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
697
698 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
699 page->db_rec, page->mapping);
700 page->db_rec = NULL;
701
702 if (i == dev->db_tab->max_group1) {
703 --dev->db_tab->max_group1;
704
705 }
706 if (i == dev->db_tab->min_group2)
707 ++dev->db_tab->min_group2;
708 }
709
710 mutex_unlock(&dev->db_tab->mutex);
711}
712
713int mthca_init_db_tab(struct mthca_dev *dev)
714{
715 int i;
716
717 if (!mthca_is_memfree(dev))
718 return 0;
719
720 dev->db_tab = kmalloc(sizeof *dev->db_tab, GFP_KERNEL);
721 if (!dev->db_tab)
722 return -ENOMEM;
723
724 mutex_init(&dev->db_tab->mutex);
725
726 dev->db_tab->npages = dev->uar_table.uarc_size / MTHCA_ICM_PAGE_SIZE;
727 dev->db_tab->max_group1 = 0;
728 dev->db_tab->min_group2 = dev->db_tab->npages - 1;
729
730 dev->db_tab->page = kmalloc(dev->db_tab->npages *
731 sizeof *dev->db_tab->page,
732 GFP_KERNEL);
733 if (!dev->db_tab->page) {
734 kfree(dev->db_tab);
735 return -ENOMEM;
736 }
737
738 for (i = 0; i < dev->db_tab->npages; ++i)
739 dev->db_tab->page[i].db_rec = NULL;
740
741 return 0;
742}
743
744void mthca_cleanup_db_tab(struct mthca_dev *dev)
745{
746 int i;
747 u8 status;
748
749 if (!mthca_is_memfree(dev))
750 return;
751
752
753
754
755
756
757
758 for (i = 0; i < dev->db_tab->npages; ++i) {
759 if (!dev->db_tab->page[i].db_rec)
760 continue;
761
762 if (!bitmap_empty(dev->db_tab->page[i].used, MTHCA_DB_REC_PER_PAGE))
763 mthca_warn(dev, "Kernel UARC page %d not empty\n", i);
764
765 mthca_UNMAP_ICM(dev, mthca_uarc_virt(dev, &dev->driver_uar, i), 1, &status);
766
767 dma_free_coherent(&dev->pdev->dev, MTHCA_ICM_PAGE_SIZE,
768 dev->db_tab->page[i].db_rec,
769 dev->db_tab->page[i].mapping);
770 }
771
772 kfree(dev->db_tab->page);
773 kfree(dev->db_tab);
774}
775