1
2
3
4
5
6
7#include <linux/export.h>
8#include <linux/slab.h>
9#include <linux/scatterlist.h>
10#include <linux/highmem.h>
11#include <linux/kmemleak.h>
12
13
14
15
16
17
18
19
20
21
22
23struct scatterlist *sg_next(struct scatterlist *sg)
24{
25 if (sg_is_last(sg))
26 return NULL;
27
28 sg++;
29 if (unlikely(sg_is_chain(sg)))
30 sg = sg_chain_ptr(sg);
31
32 return sg;
33}
34EXPORT_SYMBOL(sg_next);
35
36
37
38
39
40
41
42
43
44
45int sg_nents(struct scatterlist *sg)
46{
47 int nents;
48 for (nents = 0; sg; sg = sg_next(sg))
49 nents++;
50 return nents;
51}
52EXPORT_SYMBOL(sg_nents);
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68int sg_nents_for_len(struct scatterlist *sg, u64 len)
69{
70 int nents;
71 u64 total;
72
73 if (!len)
74 return 0;
75
76 for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
77 nents++;
78 total += sg->length;
79 if (total >= len)
80 return nents;
81 }
82
83 return -EINVAL;
84}
85EXPORT_SYMBOL(sg_nents_for_len);
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
102{
103 struct scatterlist *sg, *ret = NULL;
104 unsigned int i;
105
106 for_each_sg(sgl, sg, nents, i)
107 ret = sg;
108
109 BUG_ON(!sg_is_last(ret));
110 return ret;
111}
112EXPORT_SYMBOL(sg_last);
113
114
115
116
117
118
119
120
121
122
123
124void sg_init_table(struct scatterlist *sgl, unsigned int nents)
125{
126 memset(sgl, 0, sizeof(*sgl) * nents);
127 sg_init_marker(sgl, nents);
128}
129EXPORT_SYMBOL(sg_init_table);
130
131
132
133
134
135
136
137
138void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
139{
140 sg_init_table(sg, 1);
141 sg_set_buf(sg, buf, buflen);
142}
143EXPORT_SYMBOL(sg_init_one);
144
145
146
147
148
149static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
150{
151 if (nents == SG_MAX_SINGLE_ALLOC) {
152
153
154
155
156
157
158
159
160
161 void *ptr = (void *) __get_free_page(gfp_mask);
162 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
163 return ptr;
164 } else
165 return kmalloc_array(nents, sizeof(struct scatterlist),
166 gfp_mask);
167}
168
169static void sg_kfree(struct scatterlist *sg, unsigned int nents)
170{
171 if (nents == SG_MAX_SINGLE_ALLOC) {
172 kmemleak_free(sg);
173 free_page((unsigned long) sg);
174 } else
175 kfree(sg);
176}
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191void __sg_free_table(struct sg_table *table, unsigned int max_ents,
192 bool skip_first_chunk, sg_free_fn *free_fn)
193{
194 struct scatterlist *sgl, *next;
195
196 if (unlikely(!table->sgl))
197 return;
198
199 sgl = table->sgl;
200 while (table->orig_nents) {
201 unsigned int alloc_size = table->orig_nents;
202 unsigned int sg_size;
203
204
205
206
207
208
209
210 if (alloc_size > max_ents) {
211 next = sg_chain_ptr(&sgl[max_ents - 1]);
212 alloc_size = max_ents;
213 sg_size = alloc_size - 1;
214 } else {
215 sg_size = alloc_size;
216 next = NULL;
217 }
218
219 table->orig_nents -= sg_size;
220 if (skip_first_chunk)
221 skip_first_chunk = false;
222 else
223 free_fn(sgl, alloc_size);
224 sgl = next;
225 }
226
227 table->sgl = NULL;
228}
229EXPORT_SYMBOL(__sg_free_table);
230
231
232
233
234
235
236void sg_free_table(struct sg_table *table)
237{
238 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
239}
240EXPORT_SYMBOL(sg_free_table);
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261int __sg_alloc_table(struct sg_table *table, unsigned int nents,
262 unsigned int max_ents, struct scatterlist *first_chunk,
263 gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
264{
265 struct scatterlist *sg, *prv;
266 unsigned int left;
267
268 memset(table, 0, sizeof(*table));
269
270 if (nents == 0)
271 return -EINVAL;
272#ifdef CONFIG_ARCH_NO_SG_CHAIN
273 if (WARN_ON_ONCE(nents > max_ents))
274 return -EINVAL;
275#endif
276
277 left = nents;
278 prv = NULL;
279 do {
280 unsigned int sg_size, alloc_size = left;
281
282 if (alloc_size > max_ents) {
283 alloc_size = max_ents;
284 sg_size = alloc_size - 1;
285 } else
286 sg_size = alloc_size;
287
288 left -= sg_size;
289
290 if (first_chunk) {
291 sg = first_chunk;
292 first_chunk = NULL;
293 } else {
294 sg = alloc_fn(alloc_size, gfp_mask);
295 }
296 if (unlikely(!sg)) {
297
298
299
300
301
302
303 if (prv)
304 table->nents = ++table->orig_nents;
305
306 return -ENOMEM;
307 }
308
309 sg_init_table(sg, alloc_size);
310 table->nents = table->orig_nents += sg_size;
311
312
313
314
315
316 if (prv)
317 sg_chain(prv, max_ents, sg);
318 else
319 table->sgl = sg;
320
321
322
323
324 if (!left)
325 sg_mark_end(&sg[sg_size - 1]);
326
327 prv = sg;
328 } while (left);
329
330 return 0;
331}
332EXPORT_SYMBOL(__sg_alloc_table);
333
334
335
336
337
338
339
340
341
342
343
344
345int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
346{
347 int ret;
348
349 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
350 NULL, gfp_mask, sg_kmalloc);
351 if (unlikely(ret))
352 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
353
354 return ret;
355}
356EXPORT_SYMBOL(sg_alloc_table);
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
380 unsigned int n_pages, unsigned int offset,
381 unsigned long size, unsigned int max_segment,
382 gfp_t gfp_mask)
383{
384 unsigned int chunks, cur_page, seg_len, i;
385 int ret;
386 struct scatterlist *s;
387
388 if (WARN_ON(!max_segment || offset_in_page(max_segment)))
389 return -EINVAL;
390
391
392 chunks = 1;
393 seg_len = 0;
394 for (i = 1; i < n_pages; i++) {
395 seg_len += PAGE_SIZE;
396 if (seg_len >= max_segment ||
397 page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
398 chunks++;
399 seg_len = 0;
400 }
401 }
402
403 ret = sg_alloc_table(sgt, chunks, gfp_mask);
404 if (unlikely(ret))
405 return ret;
406
407
408 cur_page = 0;
409 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
410 unsigned int j, chunk_size;
411
412
413 seg_len = 0;
414 for (j = cur_page + 1; j < n_pages; j++) {
415 seg_len += PAGE_SIZE;
416 if (seg_len >= max_segment ||
417 page_to_pfn(pages[j]) !=
418 page_to_pfn(pages[j - 1]) + 1)
419 break;
420 }
421
422 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
423 sg_set_page(s, pages[cur_page],
424 min_t(unsigned long, size, chunk_size), offset);
425 size -= chunk_size;
426 offset = 0;
427 cur_page = j;
428 }
429
430 return 0;
431}
432EXPORT_SYMBOL(__sg_alloc_table_from_pages);
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
455 unsigned int n_pages, unsigned int offset,
456 unsigned long size, gfp_t gfp_mask)
457{
458 return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
459 SCATTERLIST_MAX_SEGMENT, gfp_mask);
460}
461EXPORT_SYMBOL(sg_alloc_table_from_pages);
462
463#ifdef CONFIG_SGL_ALLOC
464
465
466
467
468
469
470
471
472
473
474
475
476struct scatterlist *sgl_alloc_order(unsigned long long length,
477 unsigned int order, bool chainable,
478 gfp_t gfp, unsigned int *nent_p)
479{
480 struct scatterlist *sgl, *sg;
481 struct page *page;
482 unsigned int nent, nalloc;
483 u32 elem_len;
484
485 nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
486
487 if (length > (nent << (PAGE_SHIFT + order)))
488 return NULL;
489 nalloc = nent;
490 if (chainable) {
491
492 if (nalloc + 1 < nalloc)
493 return NULL;
494 nalloc++;
495 }
496 sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
497 (gfp & ~GFP_DMA) | __GFP_ZERO);
498 if (!sgl)
499 return NULL;
500
501 sg_init_table(sgl, nalloc);
502 sg = sgl;
503 while (length) {
504 elem_len = min_t(u64, length, PAGE_SIZE << order);
505 page = alloc_pages(gfp, order);
506 if (!page) {
507 sgl_free(sgl);
508 return NULL;
509 }
510
511 sg_set_page(sg, page, elem_len, 0);
512 length -= elem_len;
513 sg = sg_next(sg);
514 }
515 WARN_ONCE(length, "length = %lld\n", length);
516 if (nent_p)
517 *nent_p = nent;
518 return sgl;
519}
520EXPORT_SYMBOL(sgl_alloc_order);
521
522
523
524
525
526
527
528
529
530struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
531 unsigned int *nent_p)
532{
533 return sgl_alloc_order(length, 0, false, gfp, nent_p);
534}
535EXPORT_SYMBOL(sgl_alloc);
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
551{
552 struct scatterlist *sg;
553 struct page *page;
554 int i;
555
556 for_each_sg(sgl, sg, nents, i) {
557 if (!sg)
558 break;
559 page = sg_page(sg);
560 if (page)
561 __free_pages(page, order);
562 }
563 kfree(sgl);
564}
565EXPORT_SYMBOL(sgl_free_n_order);
566
567
568
569
570
571
572void sgl_free_order(struct scatterlist *sgl, int order)
573{
574 sgl_free_n_order(sgl, INT_MAX, order);
575}
576EXPORT_SYMBOL(sgl_free_order);
577
578
579
580
581
582void sgl_free(struct scatterlist *sgl)
583{
584 sgl_free_order(sgl, 0);
585}
586EXPORT_SYMBOL(sgl_free);
587
588#endif
589
590void __sg_page_iter_start(struct sg_page_iter *piter,
591 struct scatterlist *sglist, unsigned int nents,
592 unsigned long pgoffset)
593{
594 piter->__pg_advance = 0;
595 piter->__nents = nents;
596
597 piter->sg = sglist;
598 piter->sg_pgoffset = pgoffset;
599}
600EXPORT_SYMBOL(__sg_page_iter_start);
601
602static int sg_page_count(struct scatterlist *sg)
603{
604 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
605}
606
607bool __sg_page_iter_next(struct sg_page_iter *piter)
608{
609 if (!piter->__nents || !piter->sg)
610 return false;
611
612 piter->sg_pgoffset += piter->__pg_advance;
613 piter->__pg_advance = 1;
614
615 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
616 piter->sg_pgoffset -= sg_page_count(piter->sg);
617 piter->sg = sg_next(piter->sg);
618 if (!--piter->__nents || !piter->sg)
619 return false;
620 }
621
622 return true;
623}
624EXPORT_SYMBOL(__sg_page_iter_next);
625
626static int sg_dma_page_count(struct scatterlist *sg)
627{
628 return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
629}
630
631bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
632{
633 struct sg_page_iter *piter = &dma_iter->base;
634
635 if (!piter->__nents || !piter->sg)
636 return false;
637
638 piter->sg_pgoffset += piter->__pg_advance;
639 piter->__pg_advance = 1;
640
641 while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
642 piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
643 piter->sg = sg_next(piter->sg);
644 if (!--piter->__nents || !piter->sg)
645 return false;
646 }
647
648 return true;
649}
650EXPORT_SYMBOL(__sg_page_iter_dma_next);
651
652
653
654
655
656
657
658
659
660
661
662
663
664void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
665 unsigned int nents, unsigned int flags)
666{
667 memset(miter, 0, sizeof(struct sg_mapping_iter));
668
669 __sg_page_iter_start(&miter->piter, sgl, nents, 0);
670 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
671 miter->__flags = flags;
672}
673EXPORT_SYMBOL(sg_miter_start);
674
675static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
676{
677 if (!miter->__remaining) {
678 struct scatterlist *sg;
679 unsigned long pgoffset;
680
681 if (!__sg_page_iter_next(&miter->piter))
682 return false;
683
684 sg = miter->piter.sg;
685 pgoffset = miter->piter.sg_pgoffset;
686
687 miter->__offset = pgoffset ? 0 : sg->offset;
688 miter->__remaining = sg->offset + sg->length -
689 (pgoffset << PAGE_SHIFT) - miter->__offset;
690 miter->__remaining = min_t(unsigned long, miter->__remaining,
691 PAGE_SIZE - miter->__offset);
692 }
693
694 return true;
695}
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
716{
717 sg_miter_stop(miter);
718
719 while (offset) {
720 off_t consumed;
721
722 if (!sg_miter_get_next_page(miter))
723 return false;
724
725 consumed = min_t(off_t, offset, miter->__remaining);
726 miter->__offset += consumed;
727 miter->__remaining -= consumed;
728 offset -= consumed;
729 }
730
731 return true;
732}
733EXPORT_SYMBOL(sg_miter_skip);
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752bool sg_miter_next(struct sg_mapping_iter *miter)
753{
754 sg_miter_stop(miter);
755
756
757
758
759
760 if (!sg_miter_get_next_page(miter))
761 return false;
762
763 miter->page = sg_page_iter_page(&miter->piter);
764 miter->consumed = miter->length = miter->__remaining;
765
766 if (miter->__flags & SG_MITER_ATOMIC)
767 miter->addr = kmap_atomic(miter->page) + miter->__offset;
768 else
769 miter->addr = kmap(miter->page) + miter->__offset;
770
771 return true;
772}
773EXPORT_SYMBOL(sg_miter_next);
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789void sg_miter_stop(struct sg_mapping_iter *miter)
790{
791 WARN_ON(miter->consumed > miter->length);
792
793
794 if (miter->addr) {
795 miter->__offset += miter->consumed;
796 miter->__remaining -= miter->consumed;
797
798 if ((miter->__flags & SG_MITER_TO_SG) &&
799 !PageSlab(miter->page))
800 flush_kernel_dcache_page(miter->page);
801
802 if (miter->__flags & SG_MITER_ATOMIC) {
803 WARN_ON_ONCE(preemptible());
804 kunmap_atomic(miter->addr);
805 } else
806 kunmap(miter->page);
807
808 miter->page = NULL;
809 miter->addr = NULL;
810 miter->length = 0;
811 miter->consumed = 0;
812 }
813}
814EXPORT_SYMBOL(sg_miter_stop);
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
830 size_t buflen, off_t skip, bool to_buffer)
831{
832 unsigned int offset = 0;
833 struct sg_mapping_iter miter;
834 unsigned int sg_flags = SG_MITER_ATOMIC;
835
836 if (to_buffer)
837 sg_flags |= SG_MITER_FROM_SG;
838 else
839 sg_flags |= SG_MITER_TO_SG;
840
841 sg_miter_start(&miter, sgl, nents, sg_flags);
842
843 if (!sg_miter_skip(&miter, skip))
844 return false;
845
846 while ((offset < buflen) && sg_miter_next(&miter)) {
847 unsigned int len;
848
849 len = min(miter.length, buflen - offset);
850
851 if (to_buffer)
852 memcpy(buf + offset, miter.addr, len);
853 else
854 memcpy(miter.addr, buf + offset, len);
855
856 offset += len;
857 }
858
859 sg_miter_stop(&miter);
860
861 return offset;
862}
863EXPORT_SYMBOL(sg_copy_buffer);
864
865
866
867
868
869
870
871
872
873
874
875size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
876 const void *buf, size_t buflen)
877{
878 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
879}
880EXPORT_SYMBOL(sg_copy_from_buffer);
881
882
883
884
885
886
887
888
889
890
891
892size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
893 void *buf, size_t buflen)
894{
895 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
896}
897EXPORT_SYMBOL(sg_copy_to_buffer);
898
899
900
901
902
903
904
905
906
907
908
909
910size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
911 const void *buf, size_t buflen, off_t skip)
912{
913 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
914}
915EXPORT_SYMBOL(sg_pcopy_from_buffer);
916
917
918
919
920
921
922
923
924
925
926
927
928size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
929 void *buf, size_t buflen, off_t skip)
930{
931 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
932}
933EXPORT_SYMBOL(sg_pcopy_to_buffer);
934
935
936
937
938
939
940
941
942
943
944size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
945 size_t buflen, off_t skip)
946{
947 unsigned int offset = 0;
948 struct sg_mapping_iter miter;
949 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
950
951 sg_miter_start(&miter, sgl, nents, sg_flags);
952
953 if (!sg_miter_skip(&miter, skip))
954 return false;
955
956 while (offset < buflen && sg_miter_next(&miter)) {
957 unsigned int len;
958
959 len = min(miter.length, buflen - offset);
960 memset(miter.addr, 0, len);
961
962 offset += len;
963 }
964
965 sg_miter_stop(&miter);
966 return offset;
967}
968EXPORT_SYMBOL(sg_zero_buffer);
969