1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34#ifndef MLX4_ICM_H
35#define MLX4_ICM_H
36
37#include <linux/list.h>
38#include <linux/pci.h>
39#include <linux/mutex.h>
40
41#define MLX4_ICM_CHUNK_LEN \
42 ((256 - sizeof (struct list_head) - 2 * sizeof (int)) / \
43 (sizeof (struct scatterlist)))
44
45enum {
46 MLX4_ICM_PAGE_SHIFT = 12,
47 MLX4_ICM_PAGE_SIZE = 1 << MLX4_ICM_PAGE_SHIFT,
48};
49
50struct mlx4_icm_chunk {
51 struct list_head list;
52 int npages;
53 int nsg;
54 struct scatterlist mem[MLX4_ICM_CHUNK_LEN];
55};
56
57struct mlx4_icm {
58 struct list_head chunk_list;
59 int refcount;
60};
61
62struct mlx4_icm_iter {
63 struct mlx4_icm *icm;
64 struct mlx4_icm_chunk *chunk;
65 int page_idx;
66};
67
68struct mlx4_dev;
69
70struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
71 gfp_t gfp_mask, int coherent);
72void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent);
73
74int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj,
75 gfp_t gfp);
76void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, u32 obj);
77int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
78 u32 start, u32 end);
79void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
80 u32 start, u32 end);
81int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
82 u64 virt, int obj_size, u32 nobj, int reserved,
83 int use_lowmem, int use_coherent);
84void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table);
85void *mlx4_table_find(struct mlx4_icm_table *table, u32 obj, dma_addr_t *dma_handle);
86
87static inline void mlx4_icm_first(struct mlx4_icm *icm,
88 struct mlx4_icm_iter *iter)
89{
90 iter->icm = icm;
91 iter->chunk = list_empty(&icm->chunk_list) ?
92 NULL : list_entry(icm->chunk_list.next,
93 struct mlx4_icm_chunk, list);
94 iter->page_idx = 0;
95}
96
97static inline int mlx4_icm_last(struct mlx4_icm_iter *iter)
98{
99 return !iter->chunk;
100}
101
102static inline void mlx4_icm_next(struct mlx4_icm_iter *iter)
103{
104 if (++iter->page_idx >= iter->chunk->nsg) {
105 if (iter->chunk->list.next == &iter->icm->chunk_list) {
106 iter->chunk = NULL;
107 return;
108 }
109
110 iter->chunk = list_entry(iter->chunk->list.next,
111 struct mlx4_icm_chunk, list);
112 iter->page_idx = 0;
113 }
114}
115
116static inline dma_addr_t mlx4_icm_addr(struct mlx4_icm_iter *iter)
117{
118 return sg_dma_address(&iter->chunk->mem[iter->page_idx]);
119}
120
121static inline unsigned long mlx4_icm_size(struct mlx4_icm_iter *iter)
122{
123 return sg_dma_len(&iter->chunk->mem[iter->page_idx]);
124}
125
126int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm);
127int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev);
128
129#endif
130