1
2
3
4
5
6
7
8
9#include <linux/export.h>
10#include <linux/slab.h>
11#include <linux/scatterlist.h>
12#include <linux/highmem.h>
13#include <linux/kmemleak.h>
14
15
16
17
18
19
20
21
22
23
24
25struct scatterlist *sg_next(struct scatterlist *sg)
26{
27#ifdef CONFIG_DEBUG_SG
28 BUG_ON(sg->sg_magic != SG_MAGIC);
29#endif
30 if (sg_is_last(sg))
31 return NULL;
32
33 sg++;
34 if (unlikely(sg_is_chain(sg)))
35 sg = sg_chain_ptr(sg);
36
37 return sg;
38}
39EXPORT_SYMBOL(sg_next);
40
41
42
43
44
45
46
47
48
49
50int sg_nents(struct scatterlist *sg)
51{
52 int nents;
53 for (nents = 0; sg; sg = sg_next(sg))
54 nents++;
55 return nents;
56}
57EXPORT_SYMBOL(sg_nents);
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
75{
76#ifndef ARCH_HAS_SG_CHAIN
77 struct scatterlist *ret = &sgl[nents - 1];
78#else
79 struct scatterlist *sg, *ret = NULL;
80 unsigned int i;
81
82 for_each_sg(sgl, sg, nents, i)
83 ret = sg;
84
85#endif
86#ifdef CONFIG_DEBUG_SG
87 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
88 BUG_ON(!sg_is_last(ret));
89#endif
90 return ret;
91}
92EXPORT_SYMBOL(sg_last);
93
94
95
96
97
98
99
100
101
102
103
104void sg_init_table(struct scatterlist *sgl, unsigned int nents)
105{
106 memset(sgl, 0, sizeof(*sgl) * nents);
107#ifdef CONFIG_DEBUG_SG
108 {
109 unsigned int i;
110 for (i = 0; i < nents; i++)
111 sgl[i].sg_magic = SG_MAGIC;
112 }
113#endif
114 sg_mark_end(&sgl[nents - 1]);
115}
116EXPORT_SYMBOL(sg_init_table);
117
118
119
120
121
122
123
124
125void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
126{
127 sg_init_table(sg, 1);
128 sg_set_buf(sg, buf, buflen);
129}
130EXPORT_SYMBOL(sg_init_one);
131
132
133
134
135
136static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
137{
138 if (nents == SG_MAX_SINGLE_ALLOC) {
139
140
141
142
143
144
145
146
147
148 void *ptr = (void *) __get_free_page(gfp_mask);
149 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
150 return ptr;
151 } else
152 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
153}
154
155static void sg_kfree(struct scatterlist *sg, unsigned int nents)
156{
157 if (nents == SG_MAX_SINGLE_ALLOC) {
158 kmemleak_free(sg);
159 free_page((unsigned long) sg);
160 } else
161 kfree(sg);
162}
163
164
165
166
167
168
169
170
171
172
173
174
175
176void __sg_free_table(struct sg_table *table, unsigned int max_ents,
177 sg_free_fn *free_fn)
178{
179 struct scatterlist *sgl, *next;
180
181 if (unlikely(!table->sgl))
182 return;
183
184 sgl = table->sgl;
185 while (table->orig_nents) {
186 unsigned int alloc_size = table->orig_nents;
187 unsigned int sg_size;
188
189
190
191
192
193
194
195 if (alloc_size > max_ents) {
196 next = sg_chain_ptr(&sgl[max_ents - 1]);
197 alloc_size = max_ents;
198 sg_size = alloc_size - 1;
199 } else {
200 sg_size = alloc_size;
201 next = NULL;
202 }
203
204 table->orig_nents -= sg_size;
205 free_fn(sgl, alloc_size);
206 sgl = next;
207 }
208
209 table->sgl = NULL;
210}
211EXPORT_SYMBOL(__sg_free_table);
212
213
214
215
216
217
218void sg_free_table(struct sg_table *table)
219{
220 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
221}
222EXPORT_SYMBOL(sg_free_table);
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243int __sg_alloc_table(struct sg_table *table, unsigned int nents,
244 unsigned int max_ents, gfp_t gfp_mask,
245 sg_alloc_fn *alloc_fn)
246{
247 struct scatterlist *sg, *prv;
248 unsigned int left;
249
250 memset(table, 0, sizeof(*table));
251
252 if (nents == 0)
253 return -EINVAL;
254#ifndef ARCH_HAS_SG_CHAIN
255 if (WARN_ON_ONCE(nents > max_ents))
256 return -EINVAL;
257#endif
258
259 left = nents;
260 prv = NULL;
261 do {
262 unsigned int sg_size, alloc_size = left;
263
264 if (alloc_size > max_ents) {
265 alloc_size = max_ents;
266 sg_size = alloc_size - 1;
267 } else
268 sg_size = alloc_size;
269
270 left -= sg_size;
271
272 sg = alloc_fn(alloc_size, gfp_mask);
273 if (unlikely(!sg)) {
274
275
276
277
278
279
280 if (prv)
281 table->nents = ++table->orig_nents;
282
283 return -ENOMEM;
284 }
285
286 sg_init_table(sg, alloc_size);
287 table->nents = table->orig_nents += sg_size;
288
289
290
291
292
293 if (prv)
294 sg_chain(prv, max_ents, sg);
295 else
296 table->sgl = sg;
297
298
299
300
301 if (!left)
302 sg_mark_end(&sg[sg_size - 1]);
303
304 prv = sg;
305 } while (left);
306
307 return 0;
308}
309EXPORT_SYMBOL(__sg_alloc_table);
310
311
312
313
314
315
316
317
318
319
320
321
322int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
323{
324 int ret;
325
326 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
327 gfp_mask, sg_kmalloc);
328 if (unlikely(ret))
329 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, sg_kfree);
330
331 return ret;
332}
333EXPORT_SYMBOL(sg_alloc_table);
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355int sg_alloc_table_from_pages(struct sg_table *sgt,
356 struct page **pages, unsigned int n_pages,
357 unsigned long offset, unsigned long size,
358 gfp_t gfp_mask)
359{
360 unsigned int chunks;
361 unsigned int i;
362 unsigned int cur_page;
363 int ret;
364 struct scatterlist *s;
365
366
367 chunks = 1;
368 for (i = 1; i < n_pages; ++i)
369 if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
370 ++chunks;
371
372 ret = sg_alloc_table(sgt, chunks, gfp_mask);
373 if (unlikely(ret))
374 return ret;
375
376
377 cur_page = 0;
378 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
379 unsigned long chunk_size;
380 unsigned int j;
381
382
383 for (j = cur_page + 1; j < n_pages; ++j)
384 if (page_to_pfn(pages[j]) !=
385 page_to_pfn(pages[j - 1]) + 1)
386 break;
387
388 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
389 sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
390 size -= chunk_size;
391 offset = 0;
392 cur_page = j;
393 }
394
395 return 0;
396}
397EXPORT_SYMBOL(sg_alloc_table_from_pages);
398
399void __sg_page_iter_start(struct sg_page_iter *piter,
400 struct scatterlist *sglist, unsigned int nents,
401 unsigned long pgoffset)
402{
403 piter->__pg_advance = 0;
404 piter->__nents = nents;
405
406 piter->sg = sglist;
407 piter->sg_pgoffset = pgoffset;
408}
409EXPORT_SYMBOL(__sg_page_iter_start);
410
411static int sg_page_count(struct scatterlist *sg)
412{
413 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
414}
415
416bool __sg_page_iter_next(struct sg_page_iter *piter)
417{
418 if (!piter->__nents || !piter->sg)
419 return false;
420
421 piter->sg_pgoffset += piter->__pg_advance;
422 piter->__pg_advance = 1;
423
424 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
425 piter->sg_pgoffset -= sg_page_count(piter->sg);
426 piter->sg = sg_next(piter->sg);
427 if (!--piter->__nents || !piter->sg)
428 return false;
429 }
430
431 return true;
432}
433EXPORT_SYMBOL(__sg_page_iter_next);
434
435
436
437
438
439
440
441
442
443
444
445
446
447void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
448 unsigned int nents, unsigned int flags)
449{
450 memset(miter, 0, sizeof(struct sg_mapping_iter));
451
452 __sg_page_iter_start(&miter->piter, sgl, nents, 0);
453 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
454 miter->__flags = flags;
455}
456EXPORT_SYMBOL(sg_miter_start);
457
458static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
459{
460 if (!miter->__remaining) {
461 struct scatterlist *sg;
462 unsigned long pgoffset;
463
464 if (!__sg_page_iter_next(&miter->piter))
465 return false;
466
467 sg = miter->piter.sg;
468 pgoffset = miter->piter.sg_pgoffset;
469
470 miter->__offset = pgoffset ? 0 : sg->offset;
471 miter->__remaining = sg->offset + sg->length -
472 (pgoffset << PAGE_SHIFT) - miter->__offset;
473 miter->__remaining = min_t(unsigned long, miter->__remaining,
474 PAGE_SIZE - miter->__offset);
475 }
476
477 return true;
478}
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498static bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
499{
500 sg_miter_stop(miter);
501
502 while (offset) {
503 off_t consumed;
504
505 if (!sg_miter_get_next_page(miter))
506 return false;
507
508 consumed = min_t(off_t, offset, miter->__remaining);
509 miter->__offset += consumed;
510 miter->__remaining -= consumed;
511 offset -= consumed;
512 }
513
514 return true;
515}
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534bool sg_miter_next(struct sg_mapping_iter *miter)
535{
536 sg_miter_stop(miter);
537
538
539
540
541
542 if (!sg_miter_get_next_page(miter))
543 return false;
544
545 miter->page = sg_page_iter_page(&miter->piter);
546 miter->consumed = miter->length = miter->__remaining;
547
548 if (miter->__flags & SG_MITER_ATOMIC)
549 miter->addr = kmap_atomic(miter->page) + miter->__offset;
550 else
551 miter->addr = kmap(miter->page) + miter->__offset;
552
553 return true;
554}
555EXPORT_SYMBOL(sg_miter_next);
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571void sg_miter_stop(struct sg_mapping_iter *miter)
572{
573 WARN_ON(miter->consumed > miter->length);
574
575
576 if (miter->addr) {
577 miter->__offset += miter->consumed;
578 miter->__remaining -= miter->consumed;
579
580 if (miter->__flags & SG_MITER_TO_SG)
581 flush_kernel_dcache_page(miter->page);
582
583 if (miter->__flags & SG_MITER_ATOMIC) {
584 WARN_ON_ONCE(preemptible());
585 kunmap_atomic(miter->addr);
586 } else
587 kunmap(miter->page);
588
589 miter->page = NULL;
590 miter->addr = NULL;
591 miter->length = 0;
592 miter->consumed = 0;
593 }
594}
595EXPORT_SYMBOL(sg_miter_stop);
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
611 void *buf, size_t buflen, off_t skip,
612 bool to_buffer)
613{
614 unsigned int offset = 0;
615 struct sg_mapping_iter miter;
616 unsigned long flags;
617 unsigned int sg_flags = SG_MITER_ATOMIC;
618
619 if (to_buffer)
620 sg_flags |= SG_MITER_FROM_SG;
621 else
622 sg_flags |= SG_MITER_TO_SG;
623
624 sg_miter_start(&miter, sgl, nents, sg_flags);
625
626 if (!sg_miter_skip(&miter, skip))
627 return false;
628
629 local_irq_save(flags);
630
631 while (sg_miter_next(&miter) && offset < buflen) {
632 unsigned int len;
633
634 len = min(miter.length, buflen - offset);
635
636 if (to_buffer)
637 memcpy(buf + offset, miter.addr, len);
638 else
639 memcpy(miter.addr, buf + offset, len);
640
641 offset += len;
642 }
643
644 sg_miter_stop(&miter);
645
646 local_irq_restore(flags);
647 return offset;
648}
649
650
651
652
653
654
655
656
657
658
659
660size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
661 void *buf, size_t buflen)
662{
663 return sg_copy_buffer(sgl, nents, buf, buflen, 0, false);
664}
665EXPORT_SYMBOL(sg_copy_from_buffer);
666
667
668
669
670
671
672
673
674
675
676
677size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
678 void *buf, size_t buflen)
679{
680 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
681}
682EXPORT_SYMBOL(sg_copy_to_buffer);
683
684
685
686
687
688
689
690
691
692
693
694
695size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
696 void *buf, size_t buflen, off_t skip)
697{
698 return sg_copy_buffer(sgl, nents, buf, buflen, skip, false);
699}
700EXPORT_SYMBOL(sg_pcopy_from_buffer);
701
702
703
704
705
706
707
708
709
710
711
712
713size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
714 void *buf, size_t buflen, off_t skip)
715{
716 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
717}
718EXPORT_SYMBOL(sg_pcopy_to_buffer);
719