1
2
3
4
5
6
7
8
9#include <linux/export.h>
10#include <linux/slab.h>
11#include <linux/scatterlist.h>
12#include <linux/highmem.h>
13#include <linux/kmemleak.h>
14
15
16
17
18
19
20
21
22
23
24
25struct scatterlist *sg_next(struct scatterlist *sg)
26{
27#ifdef CONFIG_DEBUG_SG
28 BUG_ON(sg->sg_magic != SG_MAGIC);
29#endif
30 if (sg_is_last(sg))
31 return NULL;
32
33 sg++;
34 if (unlikely(sg_is_chain(sg)))
35 sg = sg_chain_ptr(sg);
36
37 return sg;
38}
39EXPORT_SYMBOL(sg_next);
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
56{
57#ifndef ARCH_HAS_SG_CHAIN
58 struct scatterlist *ret = &sgl[nents - 1];
59#else
60 struct scatterlist *sg, *ret = NULL;
61 unsigned int i;
62
63 for_each_sg(sgl, sg, nents, i)
64 ret = sg;
65
66#endif
67#ifdef CONFIG_DEBUG_SG
68 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
69 BUG_ON(!sg_is_last(ret));
70#endif
71 return ret;
72}
73EXPORT_SYMBOL(sg_last);
74
75
76
77
78
79
80
81
82
83
84
85void sg_init_table(struct scatterlist *sgl, unsigned int nents)
86{
87 memset(sgl, 0, sizeof(*sgl) * nents);
88#ifdef CONFIG_DEBUG_SG
89 {
90 unsigned int i;
91 for (i = 0; i < nents; i++)
92 sgl[i].sg_magic = SG_MAGIC;
93 }
94#endif
95 sg_mark_end(&sgl[nents - 1]);
96}
97EXPORT_SYMBOL(sg_init_table);
98
99
100
101
102
103
104
105
106void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
107{
108 sg_init_table(sg, 1);
109 sg_set_buf(sg, buf, buflen);
110}
111EXPORT_SYMBOL(sg_init_one);
112
113
114
115
116
117static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
118{
119 if (nents == SG_MAX_SINGLE_ALLOC) {
120
121
122
123
124
125
126
127
128
129 void *ptr = (void *) __get_free_page(gfp_mask);
130 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
131 return ptr;
132 } else
133 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
134}
135
136static void sg_kfree(struct scatterlist *sg, unsigned int nents)
137{
138 if (nents == SG_MAX_SINGLE_ALLOC) {
139 kmemleak_free(sg);
140 free_page((unsigned long) sg);
141 } else
142 kfree(sg);
143}
144
145
146
147
148
149
150
151
152
153
154
155
156
157void __sg_free_table(struct sg_table *table, unsigned int max_ents,
158 sg_free_fn *free_fn)
159{
160 struct scatterlist *sgl, *next;
161
162 if (unlikely(!table->sgl))
163 return;
164
165 sgl = table->sgl;
166 while (table->orig_nents) {
167 unsigned int alloc_size = table->orig_nents;
168 unsigned int sg_size;
169
170
171
172
173
174
175
176 if (alloc_size > max_ents) {
177 next = sg_chain_ptr(&sgl[max_ents - 1]);
178 alloc_size = max_ents;
179 sg_size = alloc_size - 1;
180 } else {
181 sg_size = alloc_size;
182 next = NULL;
183 }
184
185 table->orig_nents -= sg_size;
186 free_fn(sgl, alloc_size);
187 sgl = next;
188 }
189
190 table->sgl = NULL;
191}
192EXPORT_SYMBOL(__sg_free_table);
193
194
195
196
197
198
199void sg_free_table(struct sg_table *table)
200{
201 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
202}
203EXPORT_SYMBOL(sg_free_table);
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224int __sg_alloc_table(struct sg_table *table, unsigned int nents,
225 unsigned int max_ents, gfp_t gfp_mask,
226 sg_alloc_fn *alloc_fn)
227{
228 struct scatterlist *sg, *prv;
229 unsigned int left;
230
231#ifndef ARCH_HAS_SG_CHAIN
232 BUG_ON(nents > max_ents);
233#endif
234
235 memset(table, 0, sizeof(*table));
236
237 left = nents;
238 prv = NULL;
239 do {
240 unsigned int sg_size, alloc_size = left;
241
242 if (alloc_size > max_ents) {
243 alloc_size = max_ents;
244 sg_size = alloc_size - 1;
245 } else
246 sg_size = alloc_size;
247
248 left -= sg_size;
249
250 sg = alloc_fn(alloc_size, gfp_mask);
251 if (unlikely(!sg)) {
252
253
254
255
256
257
258 if (prv)
259 table->nents = ++table->orig_nents;
260
261 return -ENOMEM;
262 }
263
264 sg_init_table(sg, alloc_size);
265 table->nents = table->orig_nents += sg_size;
266
267
268
269
270
271 if (prv)
272 sg_chain(prv, max_ents, sg);
273 else
274 table->sgl = sg;
275
276
277
278
279 if (!left)
280 sg_mark_end(&sg[sg_size - 1]);
281
282
283
284
285
286
287
288 gfp_mask &= ~__GFP_WAIT;
289 gfp_mask |= __GFP_HIGH;
290 prv = sg;
291 } while (left);
292
293 return 0;
294}
295EXPORT_SYMBOL(__sg_alloc_table);
296
297
298
299
300
301
302
303
304
305
306
307
308int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
309{
310 int ret;
311
312 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
313 gfp_mask, sg_kmalloc);
314 if (unlikely(ret))
315 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
316
317 return ret;
318}
319EXPORT_SYMBOL(sg_alloc_table);
320
321
322
323
324
325
326
327
328
329
330
331
332
333void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
334 unsigned int nents, unsigned int flags)
335{
336 memset(miter, 0, sizeof(struct sg_mapping_iter));
337
338 miter->__sg = sgl;
339 miter->__nents = nents;
340 miter->__offset = 0;
341 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
342 miter->__flags = flags;
343}
344EXPORT_SYMBOL(sg_miter_start);
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364bool sg_miter_next(struct sg_mapping_iter *miter)
365{
366 unsigned int off, len;
367
368
369 if (!miter->__nents)
370 return false;
371
372 sg_miter_stop(miter);
373
374
375 while (miter->__offset == miter->__sg->length) {
376 if (--miter->__nents) {
377 miter->__sg = sg_next(miter->__sg);
378 miter->__offset = 0;
379 } else
380 return false;
381 }
382
383
384 off = miter->__sg->offset + miter->__offset;
385 len = miter->__sg->length - miter->__offset;
386
387 miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
388 off &= ~PAGE_MASK;
389 miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
390 miter->consumed = miter->length;
391
392 if (miter->__flags & SG_MITER_ATOMIC)
393 miter->addr = kmap_atomic(miter->page) + off;
394 else
395 miter->addr = kmap(miter->page) + off;
396
397 return true;
398}
399EXPORT_SYMBOL(sg_miter_next);
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414void sg_miter_stop(struct sg_mapping_iter *miter)
415{
416 WARN_ON(miter->consumed > miter->length);
417
418
419 if (miter->addr) {
420 miter->__offset += miter->consumed;
421
422 if (miter->__flags & SG_MITER_TO_SG)
423 flush_kernel_dcache_page(miter->page);
424
425 if (miter->__flags & SG_MITER_ATOMIC) {
426 WARN_ON(!irqs_disabled());
427 kunmap_atomic(miter->addr);
428 } else
429 kunmap(miter->page);
430
431 miter->page = NULL;
432 miter->addr = NULL;
433 miter->length = 0;
434 miter->consumed = 0;
435 }
436}
437EXPORT_SYMBOL(sg_miter_stop);
438
439
440
441
442
443
444
445
446
447
448
449
450
451static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
452 void *buf, size_t buflen, int to_buffer)
453{
454 unsigned int offset = 0;
455 struct sg_mapping_iter miter;
456 unsigned long flags;
457 unsigned int sg_flags = SG_MITER_ATOMIC;
458
459 if (to_buffer)
460 sg_flags |= SG_MITER_FROM_SG;
461 else
462 sg_flags |= SG_MITER_TO_SG;
463
464 sg_miter_start(&miter, sgl, nents, sg_flags);
465
466 local_irq_save(flags);
467
468 while (sg_miter_next(&miter) && offset < buflen) {
469 unsigned int len;
470
471 len = min(miter.length, buflen - offset);
472
473 if (to_buffer)
474 memcpy(buf + offset, miter.addr, len);
475 else
476 memcpy(miter.addr, buf + offset, len);
477
478 offset += len;
479 }
480
481 sg_miter_stop(&miter);
482
483 local_irq_restore(flags);
484 return offset;
485}
486
487
488
489
490
491
492
493
494
495
496
497size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
498 void *buf, size_t buflen)
499{
500 return sg_copy_buffer(sgl, nents, buf, buflen, 0);
501}
502EXPORT_SYMBOL(sg_copy_from_buffer);
503
504
505
506
507
508
509
510
511
512
513
514size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
515 void *buf, size_t buflen)
516{
517 return sg_copy_buffer(sgl, nents, buf, buflen, 1);
518}
519EXPORT_SYMBOL(sg_copy_to_buffer);
520