1
2
3
4
5
6
7
8
9
10
11
12#ifndef _S390_IDALS_H
13#define _S390_IDALS_H
14
15#include <linux/errno.h>
16#include <linux/err.h>
17#include <linux/types.h>
18#include <linux/slab.h>
19#include <asm/cio.h>
20#include <asm/uaccess.h>
21
22#ifdef CONFIG_64BIT
23#define IDA_SIZE_LOG 12
24#else
25#define IDA_SIZE_LOG 11
26#endif
27#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
28
29
30
31
32static inline int
33idal_is_needed(void *vaddr, unsigned int length)
34{
35#ifdef CONFIG_64BIT
36 return ((__pa(vaddr) + length - 1) >> 31) != 0;
37#else
38 return 0;
39#endif
40}
41
42
43
44
45
46static inline unsigned int idal_nr_words(void *vaddr, unsigned int length)
47{
48 return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length +
49 (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
50}
51
52
53
54
55static inline unsigned long *idal_create_words(unsigned long *idaws,
56 void *vaddr, unsigned int length)
57{
58 unsigned long paddr;
59 unsigned int cidaw;
60
61 paddr = __pa(vaddr);
62 cidaw = ((paddr & (IDA_BLOCK_SIZE-1)) + length +
63 (IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
64 *idaws++ = paddr;
65 paddr &= -IDA_BLOCK_SIZE;
66 while (--cidaw > 0) {
67 paddr += IDA_BLOCK_SIZE;
68 *idaws++ = paddr;
69 }
70 return idaws;
71}
72
73
74
75
76
77static inline int
78set_normalized_cda(struct ccw1 * ccw, void *vaddr)
79{
80#ifdef CONFIG_64BIT
81 unsigned int nridaws;
82 unsigned long *idal;
83
84 if (ccw->flags & CCW_FLAG_IDA)
85 return -EINVAL;
86 nridaws = idal_nr_words(vaddr, ccw->count);
87 if (nridaws > 0) {
88 idal = kmalloc(nridaws * sizeof(unsigned long),
89 GFP_ATOMIC | GFP_DMA );
90 if (idal == NULL)
91 return -ENOMEM;
92 idal_create_words(idal, vaddr, ccw->count);
93 ccw->flags |= CCW_FLAG_IDA;
94 vaddr = idal;
95 }
96#endif
97 ccw->cda = (__u32)(unsigned long) vaddr;
98 return 0;
99}
100
101
102
103
104static inline void
105clear_normalized_cda(struct ccw1 * ccw)
106{
107#ifdef CONFIG_64BIT
108 if (ccw->flags & CCW_FLAG_IDA) {
109 kfree((void *)(unsigned long) ccw->cda);
110 ccw->flags &= ~CCW_FLAG_IDA;
111 }
112#endif
113 ccw->cda = 0;
114}
115
116
117
118
119struct idal_buffer {
120 size_t size;
121 size_t page_order;
122 void *data[0];
123};
124
125
126
127
128static inline struct idal_buffer *
129idal_buffer_alloc(size_t size, int page_order)
130{
131 struct idal_buffer *ib;
132 int nr_chunks, nr_ptrs, i;
133
134 nr_ptrs = (size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
135 nr_chunks = (4096 << page_order) >> IDA_SIZE_LOG;
136 ib = kmalloc(sizeof(struct idal_buffer) + nr_ptrs*sizeof(void *),
137 GFP_DMA | GFP_KERNEL);
138 if (ib == NULL)
139 return ERR_PTR(-ENOMEM);
140 ib->size = size;
141 ib->page_order = page_order;
142 for (i = 0; i < nr_ptrs; i++) {
143 if ((i & (nr_chunks - 1)) != 0) {
144 ib->data[i] = ib->data[i-1] + IDA_BLOCK_SIZE;
145 continue;
146 }
147 ib->data[i] = (void *)
148 __get_free_pages(GFP_KERNEL, page_order);
149 if (ib->data[i] != NULL)
150 continue;
151
152 while (i >= nr_chunks) {
153 i -= nr_chunks;
154 free_pages((unsigned long) ib->data[i],
155 ib->page_order);
156 }
157 kfree(ib);
158 return ERR_PTR(-ENOMEM);
159 }
160 return ib;
161}
162
163
164
165
166static inline void
167idal_buffer_free(struct idal_buffer *ib)
168{
169 int nr_chunks, nr_ptrs, i;
170
171 nr_ptrs = (ib->size + IDA_BLOCK_SIZE - 1) >> IDA_SIZE_LOG;
172 nr_chunks = (4096 << ib->page_order) >> IDA_SIZE_LOG;
173 for (i = 0; i < nr_ptrs; i += nr_chunks)
174 free_pages((unsigned long) ib->data[i], ib->page_order);
175 kfree(ib);
176}
177
178
179
180
181static inline int
182__idal_buffer_is_needed(struct idal_buffer *ib)
183{
184#ifdef CONFIG_64BIT
185 return ib->size > (4096ul << ib->page_order) ||
186 idal_is_needed(ib->data[0], ib->size);
187#else
188 return ib->size > (4096ul << ib->page_order);
189#endif
190}
191
192
193
194
195static inline void
196idal_buffer_set_cda(struct idal_buffer *ib, struct ccw1 *ccw)
197{
198 if (__idal_buffer_is_needed(ib)) {
199
200 ccw->cda = (u32)(addr_t) ib->data;
201 ccw->flags |= CCW_FLAG_IDA;
202 } else
203
204 ccw->cda = (u32)(addr_t) ib->data[0];
205 ccw->count = ib->size;
206}
207
208
209
210
211static inline size_t
212idal_buffer_to_user(struct idal_buffer *ib, void __user *to, size_t count)
213{
214 size_t left;
215 int i;
216
217 BUG_ON(count > ib->size);
218 for (i = 0; count > IDA_BLOCK_SIZE; i++) {
219 left = copy_to_user(to, ib->data[i], IDA_BLOCK_SIZE);
220 if (left)
221 return left + count - IDA_BLOCK_SIZE;
222 to = (void __user *) to + IDA_BLOCK_SIZE;
223 count -= IDA_BLOCK_SIZE;
224 }
225 return copy_to_user(to, ib->data[i], count);
226}
227
228
229
230
231static inline size_t
232idal_buffer_from_user(struct idal_buffer *ib, const void __user *from, size_t count)
233{
234 size_t left;
235 int i;
236
237 BUG_ON(count > ib->size);
238 for (i = 0; count > IDA_BLOCK_SIZE; i++) {
239 left = copy_from_user(ib->data[i], from, IDA_BLOCK_SIZE);
240 if (left)
241 return left + count - IDA_BLOCK_SIZE;
242 from = (void __user *) from + IDA_BLOCK_SIZE;
243 count -= IDA_BLOCK_SIZE;
244 }
245 return copy_from_user(ib->data[i], from, count);
246}
247
248#endif
249