1#include <linux/prefetch.h>
2
3
4
5
6
7
8
9
10
11
12
13static inline unsigned int
14iommu_fill_pdir(struct ioc *ioc, struct scatterlist *startsg, int nents,
15 unsigned long hint,
16 void (*iommu_io_pdir_entry)(u64 *, space_t, unsigned long,
17 unsigned long))
18{
19 struct scatterlist *dma_sg = startsg;
20 unsigned int n_mappings = 0;
21 unsigned long dma_offset = 0, dma_len = 0;
22 u64 *pdirp = NULL;
23
24
25
26
27 dma_sg--;
28
29 while (nents-- > 0) {
30 unsigned long vaddr;
31 long size;
32
33 DBG_RUN_SG(" %d : %08lx/%05x %08lx/%05x\n", nents,
34 (unsigned long)sg_dma_address(startsg), cnt,
35 sg_virt_addr(startsg), startsg->length
36 );
37
38
39
40
41
42
43 if (sg_dma_address(startsg) & PIDE_FLAG) {
44 u32 pide = sg_dma_address(startsg) & ~PIDE_FLAG;
45
46 BUG_ON(pdirp && (dma_len != sg_dma_len(dma_sg)));
47
48 dma_sg++;
49
50 dma_len = sg_dma_len(startsg);
51 sg_dma_len(startsg) = 0;
52 dma_offset = (unsigned long) pide & ~IOVP_MASK;
53 n_mappings++;
54#if defined(ZX1_SUPPORT)
55
56 sg_dma_address(dma_sg) = pide | ioc->ibase;
57#else
58
59
60
61 sg_dma_address(dma_sg) = pide;
62#endif
63 pdirp = &(ioc->pdir_base[pide >> IOVP_SHIFT]);
64 prefetchw(pdirp);
65 }
66
67 BUG_ON(pdirp == NULL);
68
69 vaddr = sg_virt_addr(startsg);
70 sg_dma_len(dma_sg) += startsg->length;
71 size = startsg->length + dma_offset;
72 dma_offset = 0;
73#ifdef IOMMU_MAP_STATS
74 ioc->msg_pages += startsg->length >> IOVP_SHIFT;
75#endif
76 do {
77 iommu_io_pdir_entry(pdirp, KERNEL_SPACE,
78 vaddr, hint);
79 vaddr += IOVP_SIZE;
80 size -= IOVP_SIZE;
81 pdirp++;
82 } while(unlikely(size > 0));
83 startsg++;
84 }
85 return(n_mappings);
86}
87
88
89
90
91
92
93
94
95
96
97
98
99static inline unsigned int
100iommu_coalesce_chunks(struct ioc *ioc, struct device *dev,
101 struct scatterlist *startsg, int nents,
102 int (*iommu_alloc_range)(struct ioc *, struct device *, size_t))
103{
104 struct scatterlist *contig_sg;
105 unsigned long dma_offset, dma_len;
106 unsigned int n_mappings = 0;
107 unsigned int max_seg_size = dma_get_max_seg_size(dev);
108
109 while (nents > 0) {
110
111
112
113
114 contig_sg = startsg;
115 dma_len = startsg->length;
116 dma_offset = sg_virt_addr(startsg) & ~IOVP_MASK;
117
118
119 sg_dma_address(startsg) = 0;
120 sg_dma_len(startsg) = 0;
121
122
123
124
125
126 while(--nents > 0) {
127 unsigned long prevstartsg_end, startsg_end;
128
129 prevstartsg_end = sg_virt_addr(startsg) +
130 startsg->length;
131
132 startsg++;
133 startsg_end = sg_virt_addr(startsg) +
134 startsg->length;
135
136
137 sg_dma_address(startsg) = 0;
138 sg_dma_len(startsg) = 0;
139
140
141
142
143
144
145 if(unlikely(ALIGN(dma_len + dma_offset + startsg->length,
146 IOVP_SIZE) > DMA_CHUNK_SIZE))
147 break;
148
149 if (startsg->length + dma_len > max_seg_size)
150 break;
151
152
153
154
155
156 if (unlikely(((prevstartsg_end | sg_virt_addr(startsg)) & ~PAGE_MASK) != 0))
157 break;
158
159 dma_len += startsg->length;
160 }
161
162
163
164
165
166
167 sg_dma_len(contig_sg) = dma_len;
168 dma_len = ALIGN(dma_len + dma_offset, IOVP_SIZE);
169 sg_dma_address(contig_sg) =
170 PIDE_FLAG
171 | (iommu_alloc_range(ioc, dev, dma_len) << IOVP_SHIFT)
172 | dma_offset;
173 n_mappings++;
174 }
175
176 return n_mappings;
177}
178
179