1
2
3
4
5
6
7#include <linux/export.h>
8#include <linux/slab.h>
9#include <linux/scatterlist.h>
10#include <linux/highmem.h>
11#include <linux/kmemleak.h>
12
13
14
15
16
17
18
19
20
21
22
23struct scatterlist *sg_next(struct scatterlist *sg)
24{
25 if (sg_is_last(sg))
26 return NULL;
27
28 sg++;
29 if (unlikely(sg_is_chain(sg)))
30 sg = sg_chain_ptr(sg);
31
32 return sg;
33}
34EXPORT_SYMBOL(sg_next);
35
36
37
38
39
40
41
42
43
44
45int sg_nents(struct scatterlist *sg)
46{
47 int nents;
48 for (nents = 0; sg; sg = sg_next(sg))
49 nents++;
50 return nents;
51}
52EXPORT_SYMBOL(sg_nents);
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68int sg_nents_for_len(struct scatterlist *sg, u64 len)
69{
70 int nents;
71 u64 total;
72
73 if (!len)
74 return 0;
75
76 for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
77 nents++;
78 total += sg->length;
79 if (total >= len)
80 return nents;
81 }
82
83 return -EINVAL;
84}
85EXPORT_SYMBOL(sg_nents_for_len);
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
102{
103 struct scatterlist *sg, *ret = NULL;
104 unsigned int i;
105
106 for_each_sg(sgl, sg, nents, i)
107 ret = sg;
108
109 BUG_ON(!sg_is_last(ret));
110 return ret;
111}
112EXPORT_SYMBOL(sg_last);
113
114
115
116
117
118
119
120
121
122
123
124void sg_init_table(struct scatterlist *sgl, unsigned int nents)
125{
126 memset(sgl, 0, sizeof(*sgl) * nents);
127 sg_init_marker(sgl, nents);
128}
129EXPORT_SYMBOL(sg_init_table);
130
131
132
133
134
135
136
137
138void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
139{
140 sg_init_table(sg, 1);
141 sg_set_buf(sg, buf, buflen);
142}
143EXPORT_SYMBOL(sg_init_one);
144
145
146
147
148
149static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
150{
151 if (nents == SG_MAX_SINGLE_ALLOC) {
152
153
154
155
156
157
158
159
160
161 void *ptr = (void *) __get_free_page(gfp_mask);
162 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
163 return ptr;
164 } else
165 return kmalloc_array(nents, sizeof(struct scatterlist),
166 gfp_mask);
167}
168
169static void sg_kfree(struct scatterlist *sg, unsigned int nents)
170{
171 if (nents == SG_MAX_SINGLE_ALLOC) {
172 kmemleak_free(sg);
173 free_page((unsigned long) sg);
174 } else
175 kfree(sg);
176}
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192void __sg_free_table(struct sg_table *table, unsigned int max_ents,
193 unsigned int nents_first_chunk, sg_free_fn *free_fn)
194{
195 struct scatterlist *sgl, *next;
196 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
197
198 if (unlikely(!table->sgl))
199 return;
200
201 sgl = table->sgl;
202 while (table->orig_nents) {
203 unsigned int alloc_size = table->orig_nents;
204 unsigned int sg_size;
205
206
207
208
209
210
211
212 if (alloc_size > curr_max_ents) {
213 next = sg_chain_ptr(&sgl[curr_max_ents - 1]);
214 alloc_size = curr_max_ents;
215 sg_size = alloc_size - 1;
216 } else {
217 sg_size = alloc_size;
218 next = NULL;
219 }
220
221 table->orig_nents -= sg_size;
222 if (nents_first_chunk)
223 nents_first_chunk = 0;
224 else
225 free_fn(sgl, alloc_size);
226 sgl = next;
227 curr_max_ents = max_ents;
228 }
229
230 table->sgl = NULL;
231}
232EXPORT_SYMBOL(__sg_free_table);
233
234
235
236
237
238
239void sg_free_table(struct sg_table *table)
240{
241 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
242}
243EXPORT_SYMBOL(sg_free_table);
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266int __sg_alloc_table(struct sg_table *table, unsigned int nents,
267 unsigned int max_ents, struct scatterlist *first_chunk,
268 unsigned int nents_first_chunk, gfp_t gfp_mask,
269 sg_alloc_fn *alloc_fn)
270{
271 struct scatterlist *sg, *prv;
272 unsigned int left;
273 unsigned curr_max_ents = nents_first_chunk ?: max_ents;
274 unsigned prv_max_ents;
275
276 memset(table, 0, sizeof(*table));
277
278 if (nents == 0)
279 return -EINVAL;
280#ifdef CONFIG_ARCH_NO_SG_CHAIN
281 if (WARN_ON_ONCE(nents > max_ents))
282 return -EINVAL;
283#endif
284
285 left = nents;
286 prv = NULL;
287 do {
288 unsigned int sg_size, alloc_size = left;
289
290 if (alloc_size > curr_max_ents) {
291 alloc_size = curr_max_ents;
292 sg_size = alloc_size - 1;
293 } else
294 sg_size = alloc_size;
295
296 left -= sg_size;
297
298 if (first_chunk) {
299 sg = first_chunk;
300 first_chunk = NULL;
301 } else {
302 sg = alloc_fn(alloc_size, gfp_mask);
303 }
304 if (unlikely(!sg)) {
305
306
307
308
309
310
311 if (prv)
312 table->nents = ++table->orig_nents;
313
314 return -ENOMEM;
315 }
316
317 sg_init_table(sg, alloc_size);
318 table->nents = table->orig_nents += sg_size;
319
320
321
322
323
324 if (prv)
325 sg_chain(prv, prv_max_ents, sg);
326 else
327 table->sgl = sg;
328
329
330
331
332 if (!left)
333 sg_mark_end(&sg[sg_size - 1]);
334
335 prv = sg;
336 prv_max_ents = curr_max_ents;
337 curr_max_ents = max_ents;
338 } while (left);
339
340 return 0;
341}
342EXPORT_SYMBOL(__sg_alloc_table);
343
344
345
346
347
348
349
350
351
352
353
354
355int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
356{
357 int ret;
358
359 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
360 NULL, 0, gfp_mask, sg_kmalloc);
361 if (unlikely(ret))
362 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, 0, sg_kfree);
363
364 return ret;
365}
366EXPORT_SYMBOL(sg_alloc_table);
367
368static struct scatterlist *get_next_sg(struct sg_table *table,
369 struct scatterlist *cur,
370 unsigned long needed_sges,
371 gfp_t gfp_mask)
372{
373 struct scatterlist *new_sg, *next_sg;
374 unsigned int alloc_size;
375
376 if (cur) {
377 next_sg = sg_next(cur);
378
379 if (!sg_is_last(next_sg) || needed_sges == 1)
380 return next_sg;
381 }
382
383 alloc_size = min_t(unsigned long, needed_sges, SG_MAX_SINGLE_ALLOC);
384 new_sg = sg_kmalloc(alloc_size, gfp_mask);
385 if (!new_sg)
386 return ERR_PTR(-ENOMEM);
387 sg_init_table(new_sg, alloc_size);
388 if (cur) {
389 __sg_chain(next_sg, new_sg);
390 table->orig_nents += alloc_size - 1;
391 } else {
392 table->sgl = new_sg;
393 table->orig_nents = alloc_size;
394 table->nents = 0;
395 }
396 return new_sg;
397}
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428struct scatterlist *__sg_alloc_table_from_pages(struct sg_table *sgt,
429 struct page **pages, unsigned int n_pages, unsigned int offset,
430 unsigned long size, unsigned int max_segment,
431 struct scatterlist *prv, unsigned int left_pages,
432 gfp_t gfp_mask)
433{
434 unsigned int chunks, cur_page, seg_len, i, prv_len = 0;
435 unsigned int added_nents = 0;
436 struct scatterlist *s = prv;
437
438
439
440
441
442 max_segment = ALIGN_DOWN(max_segment, PAGE_SIZE);
443 if (WARN_ON(max_segment < PAGE_SIZE))
444 return ERR_PTR(-EINVAL);
445
446 if (IS_ENABLED(CONFIG_ARCH_NO_SG_CHAIN) && prv)
447 return ERR_PTR(-EOPNOTSUPP);
448
449 if (prv) {
450 unsigned long paddr = (page_to_pfn(sg_page(prv)) * PAGE_SIZE +
451 prv->offset + prv->length) /
452 PAGE_SIZE;
453
454 if (WARN_ON(offset))
455 return ERR_PTR(-EINVAL);
456
457
458 prv_len = prv->length;
459 while (n_pages && page_to_pfn(pages[0]) == paddr) {
460 if (prv->length + PAGE_SIZE > max_segment)
461 break;
462 prv->length += PAGE_SIZE;
463 paddr++;
464 pages++;
465 n_pages--;
466 }
467 if (!n_pages)
468 goto out;
469 }
470
471
472 chunks = 1;
473 seg_len = 0;
474 for (i = 1; i < n_pages; i++) {
475 seg_len += PAGE_SIZE;
476 if (seg_len >= max_segment ||
477 page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
478 chunks++;
479 seg_len = 0;
480 }
481 }
482
483
484 cur_page = 0;
485 for (i = 0; i < chunks; i++) {
486 unsigned int j, chunk_size;
487
488
489 seg_len = 0;
490 for (j = cur_page + 1; j < n_pages; j++) {
491 seg_len += PAGE_SIZE;
492 if (seg_len >= max_segment ||
493 page_to_pfn(pages[j]) !=
494 page_to_pfn(pages[j - 1]) + 1)
495 break;
496 }
497
498
499 s = get_next_sg(sgt, s, chunks - i + left_pages, gfp_mask);
500 if (IS_ERR(s)) {
501
502
503
504
505 if (prv)
506 prv->length = prv_len;
507 return s;
508 }
509 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
510 sg_set_page(s, pages[cur_page],
511 min_t(unsigned long, size, chunk_size), offset);
512 added_nents++;
513 size -= chunk_size;
514 offset = 0;
515 cur_page = j;
516 }
517 sgt->nents += added_nents;
518out:
519 if (!left_pages)
520 sg_mark_end(s);
521 return s;
522}
523EXPORT_SYMBOL(__sg_alloc_table_from_pages);
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
546 unsigned int n_pages, unsigned int offset,
547 unsigned long size, gfp_t gfp_mask)
548{
549 return PTR_ERR_OR_ZERO(__sg_alloc_table_from_pages(sgt, pages, n_pages,
550 offset, size, UINT_MAX, NULL, 0, gfp_mask));
551}
552EXPORT_SYMBOL(sg_alloc_table_from_pages);
553
554#ifdef CONFIG_SGL_ALLOC
555
556
557
558
559
560
561
562
563
564
565
566
567struct scatterlist *sgl_alloc_order(unsigned long long length,
568 unsigned int order, bool chainable,
569 gfp_t gfp, unsigned int *nent_p)
570{
571 struct scatterlist *sgl, *sg;
572 struct page *page;
573 unsigned int nent, nalloc;
574 u32 elem_len;
575
576 nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
577
578 if (length > (nent << (PAGE_SHIFT + order)))
579 return NULL;
580 nalloc = nent;
581 if (chainable) {
582
583 if (nalloc + 1 < nalloc)
584 return NULL;
585 nalloc++;
586 }
587 sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
588 gfp & ~GFP_DMA);
589 if (!sgl)
590 return NULL;
591
592 sg_init_table(sgl, nalloc);
593 sg = sgl;
594 while (length) {
595 elem_len = min_t(u64, length, PAGE_SIZE << order);
596 page = alloc_pages(gfp, order);
597 if (!page) {
598 sgl_free_order(sgl, order);
599 return NULL;
600 }
601
602 sg_set_page(sg, page, elem_len, 0);
603 length -= elem_len;
604 sg = sg_next(sg);
605 }
606 WARN_ONCE(length, "length = %lld\n", length);
607 if (nent_p)
608 *nent_p = nent;
609 return sgl;
610}
611EXPORT_SYMBOL(sgl_alloc_order);
612
613
614
615
616
617
618
619
620
621struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
622 unsigned int *nent_p)
623{
624 return sgl_alloc_order(length, 0, false, gfp, nent_p);
625}
626EXPORT_SYMBOL(sgl_alloc);
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
642{
643 struct scatterlist *sg;
644 struct page *page;
645 int i;
646
647 for_each_sg(sgl, sg, nents, i) {
648 if (!sg)
649 break;
650 page = sg_page(sg);
651 if (page)
652 __free_pages(page, order);
653 }
654 kfree(sgl);
655}
656EXPORT_SYMBOL(sgl_free_n_order);
657
658
659
660
661
662
663void sgl_free_order(struct scatterlist *sgl, int order)
664{
665 sgl_free_n_order(sgl, INT_MAX, order);
666}
667EXPORT_SYMBOL(sgl_free_order);
668
669
670
671
672
673void sgl_free(struct scatterlist *sgl)
674{
675 sgl_free_order(sgl, 0);
676}
677EXPORT_SYMBOL(sgl_free);
678
679#endif
680
681void __sg_page_iter_start(struct sg_page_iter *piter,
682 struct scatterlist *sglist, unsigned int nents,
683 unsigned long pgoffset)
684{
685 piter->__pg_advance = 0;
686 piter->__nents = nents;
687
688 piter->sg = sglist;
689 piter->sg_pgoffset = pgoffset;
690}
691EXPORT_SYMBOL(__sg_page_iter_start);
692
693static int sg_page_count(struct scatterlist *sg)
694{
695 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
696}
697
698bool __sg_page_iter_next(struct sg_page_iter *piter)
699{
700 if (!piter->__nents || !piter->sg)
701 return false;
702
703 piter->sg_pgoffset += piter->__pg_advance;
704 piter->__pg_advance = 1;
705
706 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
707 piter->sg_pgoffset -= sg_page_count(piter->sg);
708 piter->sg = sg_next(piter->sg);
709 if (!--piter->__nents || !piter->sg)
710 return false;
711 }
712
713 return true;
714}
715EXPORT_SYMBOL(__sg_page_iter_next);
716
717static int sg_dma_page_count(struct scatterlist *sg)
718{
719 return PAGE_ALIGN(sg->offset + sg_dma_len(sg)) >> PAGE_SHIFT;
720}
721
722bool __sg_page_iter_dma_next(struct sg_dma_page_iter *dma_iter)
723{
724 struct sg_page_iter *piter = &dma_iter->base;
725
726 if (!piter->__nents || !piter->sg)
727 return false;
728
729 piter->sg_pgoffset += piter->__pg_advance;
730 piter->__pg_advance = 1;
731
732 while (piter->sg_pgoffset >= sg_dma_page_count(piter->sg)) {
733 piter->sg_pgoffset -= sg_dma_page_count(piter->sg);
734 piter->sg = sg_next(piter->sg);
735 if (!--piter->__nents || !piter->sg)
736 return false;
737 }
738
739 return true;
740}
741EXPORT_SYMBOL(__sg_page_iter_dma_next);
742
743
744
745
746
747
748
749
750
751
752
753
754
755void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
756 unsigned int nents, unsigned int flags)
757{
758 memset(miter, 0, sizeof(struct sg_mapping_iter));
759
760 __sg_page_iter_start(&miter->piter, sgl, nents, 0);
761 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
762 miter->__flags = flags;
763}
764EXPORT_SYMBOL(sg_miter_start);
765
766static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
767{
768 if (!miter->__remaining) {
769 struct scatterlist *sg;
770
771 if (!__sg_page_iter_next(&miter->piter))
772 return false;
773
774 sg = miter->piter.sg;
775
776 miter->__offset = miter->piter.sg_pgoffset ? 0 : sg->offset;
777 miter->piter.sg_pgoffset += miter->__offset >> PAGE_SHIFT;
778 miter->__offset &= PAGE_SIZE - 1;
779 miter->__remaining = sg->offset + sg->length -
780 (miter->piter.sg_pgoffset << PAGE_SHIFT) -
781 miter->__offset;
782 miter->__remaining = min_t(unsigned long, miter->__remaining,
783 PAGE_SIZE - miter->__offset);
784 }
785
786 return true;
787}
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
808{
809 sg_miter_stop(miter);
810
811 while (offset) {
812 off_t consumed;
813
814 if (!sg_miter_get_next_page(miter))
815 return false;
816
817 consumed = min_t(off_t, offset, miter->__remaining);
818 miter->__offset += consumed;
819 miter->__remaining -= consumed;
820 offset -= consumed;
821 }
822
823 return true;
824}
825EXPORT_SYMBOL(sg_miter_skip);
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844bool sg_miter_next(struct sg_mapping_iter *miter)
845{
846 sg_miter_stop(miter);
847
848
849
850
851
852 if (!sg_miter_get_next_page(miter))
853 return false;
854
855 miter->page = sg_page_iter_page(&miter->piter);
856 miter->consumed = miter->length = miter->__remaining;
857
858 if (miter->__flags & SG_MITER_ATOMIC)
859 miter->addr = kmap_atomic(miter->page) + miter->__offset;
860 else
861 miter->addr = kmap(miter->page) + miter->__offset;
862
863 return true;
864}
865EXPORT_SYMBOL(sg_miter_next);
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881void sg_miter_stop(struct sg_mapping_iter *miter)
882{
883 WARN_ON(miter->consumed > miter->length);
884
885
886 if (miter->addr) {
887 miter->__offset += miter->consumed;
888 miter->__remaining -= miter->consumed;
889
890 if ((miter->__flags & SG_MITER_TO_SG) &&
891 !PageSlab(miter->page))
892 flush_kernel_dcache_page(miter->page);
893
894 if (miter->__flags & SG_MITER_ATOMIC) {
895 WARN_ON_ONCE(preemptible());
896 kunmap_atomic(miter->addr);
897 } else
898 kunmap(miter->page);
899
900 miter->page = NULL;
901 miter->addr = NULL;
902 miter->length = 0;
903 miter->consumed = 0;
904 }
905}
906EXPORT_SYMBOL(sg_miter_stop);
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
922 size_t buflen, off_t skip, bool to_buffer)
923{
924 unsigned int offset = 0;
925 struct sg_mapping_iter miter;
926 unsigned int sg_flags = SG_MITER_ATOMIC;
927
928 if (to_buffer)
929 sg_flags |= SG_MITER_FROM_SG;
930 else
931 sg_flags |= SG_MITER_TO_SG;
932
933 sg_miter_start(&miter, sgl, nents, sg_flags);
934
935 if (!sg_miter_skip(&miter, skip))
936 return 0;
937
938 while ((offset < buflen) && sg_miter_next(&miter)) {
939 unsigned int len;
940
941 len = min(miter.length, buflen - offset);
942
943 if (to_buffer)
944 memcpy(buf + offset, miter.addr, len);
945 else
946 memcpy(miter.addr, buf + offset, len);
947
948 offset += len;
949 }
950
951 sg_miter_stop(&miter);
952
953 return offset;
954}
955EXPORT_SYMBOL(sg_copy_buffer);
956
957
958
959
960
961
962
963
964
965
966
967size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
968 const void *buf, size_t buflen)
969{
970 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
971}
972EXPORT_SYMBOL(sg_copy_from_buffer);
973
974
975
976
977
978
979
980
981
982
983
984size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
985 void *buf, size_t buflen)
986{
987 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
988}
989EXPORT_SYMBOL(sg_copy_to_buffer);
990
991
992
993
994
995
996
997
998
999
1000
1001
1002size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
1003 const void *buf, size_t buflen, off_t skip)
1004{
1005 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
1006}
1007EXPORT_SYMBOL(sg_pcopy_from_buffer);
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
1021 void *buf, size_t buflen, off_t skip)
1022{
1023 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
1024}
1025EXPORT_SYMBOL(sg_pcopy_to_buffer);
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
1037 size_t buflen, off_t skip)
1038{
1039 unsigned int offset = 0;
1040 struct sg_mapping_iter miter;
1041 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
1042
1043 sg_miter_start(&miter, sgl, nents, sg_flags);
1044
1045 if (!sg_miter_skip(&miter, skip))
1046 return false;
1047
1048 while (offset < buflen && sg_miter_next(&miter)) {
1049 unsigned int len;
1050
1051 len = min(miter.length, buflen - offset);
1052 memset(miter.addr, 0, len);
1053
1054 offset += len;
1055 }
1056
1057 sg_miter_stop(&miter);
1058 return offset;
1059}
1060EXPORT_SYMBOL(sg_zero_buffer);
1061