1
2
3
4
5
6
7
8
9#include <linux/export.h>
10#include <linux/slab.h>
11#include <linux/scatterlist.h>
12#include <linux/highmem.h>
13#include <linux/kmemleak.h>
14
15
16
17
18
19
20
21
22
23
24
25struct scatterlist *sg_next(struct scatterlist *sg)
26{
27 if (sg_is_last(sg))
28 return NULL;
29
30 sg++;
31 if (unlikely(sg_is_chain(sg)))
32 sg = sg_chain_ptr(sg);
33
34 return sg;
35}
36EXPORT_SYMBOL(sg_next);
37
38
39
40
41
42
43
44
45
46
47int sg_nents(struct scatterlist *sg)
48{
49 int nents;
50 for (nents = 0; sg; sg = sg_next(sg))
51 nents++;
52 return nents;
53}
54EXPORT_SYMBOL(sg_nents);
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70int sg_nents_for_len(struct scatterlist *sg, u64 len)
71{
72 int nents;
73 u64 total;
74
75 if (!len)
76 return 0;
77
78 for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
79 nents++;
80 total += sg->length;
81 if (total >= len)
82 return nents;
83 }
84
85 return -EINVAL;
86}
87EXPORT_SYMBOL(sg_nents_for_len);
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
104{
105 struct scatterlist *sg, *ret = NULL;
106 unsigned int i;
107
108 for_each_sg(sgl, sg, nents, i)
109 ret = sg;
110
111 BUG_ON(!sg_is_last(ret));
112 return ret;
113}
114EXPORT_SYMBOL(sg_last);
115
116
117
118
119
120
121
122
123
124
125
126void sg_init_table(struct scatterlist *sgl, unsigned int nents)
127{
128 memset(sgl, 0, sizeof(*sgl) * nents);
129 sg_init_marker(sgl, nents);
130}
131EXPORT_SYMBOL(sg_init_table);
132
133
134
135
136
137
138
139
140void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
141{
142 sg_init_table(sg, 1);
143 sg_set_buf(sg, buf, buflen);
144}
145EXPORT_SYMBOL(sg_init_one);
146
147
148
149
150
151static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
152{
153 if (nents == SG_MAX_SINGLE_ALLOC) {
154
155
156
157
158
159
160
161
162
163 void *ptr = (void *) __get_free_page(gfp_mask);
164 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
165 return ptr;
166 } else
167 return kmalloc_array(nents, sizeof(struct scatterlist),
168 gfp_mask);
169}
170
171static void sg_kfree(struct scatterlist *sg, unsigned int nents)
172{
173 if (nents == SG_MAX_SINGLE_ALLOC) {
174 kmemleak_free(sg);
175 free_page((unsigned long) sg);
176 } else
177 kfree(sg);
178}
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193void __sg_free_table(struct sg_table *table, unsigned int max_ents,
194 bool skip_first_chunk, sg_free_fn *free_fn)
195{
196 struct scatterlist *sgl, *next;
197
198 if (unlikely(!table->sgl))
199 return;
200
201 sgl = table->sgl;
202 while (table->orig_nents) {
203 unsigned int alloc_size = table->orig_nents;
204 unsigned int sg_size;
205
206
207
208
209
210
211
212 if (alloc_size > max_ents) {
213 next = sg_chain_ptr(&sgl[max_ents - 1]);
214 alloc_size = max_ents;
215 sg_size = alloc_size - 1;
216 } else {
217 sg_size = alloc_size;
218 next = NULL;
219 }
220
221 table->orig_nents -= sg_size;
222 if (skip_first_chunk)
223 skip_first_chunk = false;
224 else
225 free_fn(sgl, alloc_size);
226 sgl = next;
227 }
228
229 table->sgl = NULL;
230}
231EXPORT_SYMBOL(__sg_free_table);
232
233
234
235
236
237
238void sg_free_table(struct sg_table *table)
239{
240 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
241}
242EXPORT_SYMBOL(sg_free_table);
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263int __sg_alloc_table(struct sg_table *table, unsigned int nents,
264 unsigned int max_ents, struct scatterlist *first_chunk,
265 gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
266{
267 struct scatterlist *sg, *prv;
268 unsigned int left;
269
270 memset(table, 0, sizeof(*table));
271
272 if (nents == 0)
273 return -EINVAL;
274#ifndef CONFIG_ARCH_HAS_SG_CHAIN
275 if (WARN_ON_ONCE(nents > max_ents))
276 return -EINVAL;
277#endif
278
279 left = nents;
280 prv = NULL;
281 do {
282 unsigned int sg_size, alloc_size = left;
283
284 if (alloc_size > max_ents) {
285 alloc_size = max_ents;
286 sg_size = alloc_size - 1;
287 } else
288 sg_size = alloc_size;
289
290 left -= sg_size;
291
292 if (first_chunk) {
293 sg = first_chunk;
294 first_chunk = NULL;
295 } else {
296 sg = alloc_fn(alloc_size, gfp_mask);
297 }
298 if (unlikely(!sg)) {
299
300
301
302
303
304
305 if (prv)
306 table->nents = ++table->orig_nents;
307
308 return -ENOMEM;
309 }
310
311 sg_init_table(sg, alloc_size);
312 table->nents = table->orig_nents += sg_size;
313
314
315
316
317
318 if (prv)
319 sg_chain(prv, max_ents, sg);
320 else
321 table->sgl = sg;
322
323
324
325
326 if (!left)
327 sg_mark_end(&sg[sg_size - 1]);
328
329 prv = sg;
330 } while (left);
331
332 return 0;
333}
334EXPORT_SYMBOL(__sg_alloc_table);
335
336
337
338
339
340
341
342
343
344
345
346
347int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
348{
349 int ret;
350
351 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
352 NULL, gfp_mask, sg_kmalloc);
353 if (unlikely(ret))
354 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
355
356 return ret;
357}
358EXPORT_SYMBOL(sg_alloc_table);
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381int __sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
382 unsigned int n_pages, unsigned int offset,
383 unsigned long size, unsigned int max_segment,
384 gfp_t gfp_mask)
385{
386 unsigned int chunks, cur_page, seg_len, i;
387 int ret;
388 struct scatterlist *s;
389
390 if (WARN_ON(!max_segment || offset_in_page(max_segment)))
391 return -EINVAL;
392
393
394 chunks = 1;
395 seg_len = 0;
396 for (i = 1; i < n_pages; i++) {
397 seg_len += PAGE_SIZE;
398 if (seg_len >= max_segment ||
399 page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1) {
400 chunks++;
401 seg_len = 0;
402 }
403 }
404
405 ret = sg_alloc_table(sgt, chunks, gfp_mask);
406 if (unlikely(ret))
407 return ret;
408
409
410 cur_page = 0;
411 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
412 unsigned int j, chunk_size;
413
414
415 seg_len = 0;
416 for (j = cur_page + 1; j < n_pages; j++) {
417 seg_len += PAGE_SIZE;
418 if (seg_len >= max_segment ||
419 page_to_pfn(pages[j]) !=
420 page_to_pfn(pages[j - 1]) + 1)
421 break;
422 }
423
424 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
425 sg_set_page(s, pages[cur_page],
426 min_t(unsigned long, size, chunk_size), offset);
427 size -= chunk_size;
428 offset = 0;
429 cur_page = j;
430 }
431
432 return 0;
433}
434EXPORT_SYMBOL(__sg_alloc_table_from_pages);
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456int sg_alloc_table_from_pages(struct sg_table *sgt, struct page **pages,
457 unsigned int n_pages, unsigned int offset,
458 unsigned long size, gfp_t gfp_mask)
459{
460 return __sg_alloc_table_from_pages(sgt, pages, n_pages, offset, size,
461 SCATTERLIST_MAX_SEGMENT, gfp_mask);
462}
463EXPORT_SYMBOL(sg_alloc_table_from_pages);
464
465#ifdef CONFIG_SGL_ALLOC
466
467
468
469
470
471
472
473
474
475
476
477
478struct scatterlist *sgl_alloc_order(unsigned long long length,
479 unsigned int order, bool chainable,
480 gfp_t gfp, unsigned int *nent_p)
481{
482 struct scatterlist *sgl, *sg;
483 struct page *page;
484 unsigned int nent, nalloc;
485 u32 elem_len;
486
487 nent = round_up(length, PAGE_SIZE << order) >> (PAGE_SHIFT + order);
488
489 if (length > (nent << (PAGE_SHIFT + order)))
490 return NULL;
491 nalloc = nent;
492 if (chainable) {
493
494 if (nalloc + 1 < nalloc)
495 return NULL;
496 nalloc++;
497 }
498 sgl = kmalloc_array(nalloc, sizeof(struct scatterlist),
499 (gfp & ~GFP_DMA) | __GFP_ZERO);
500 if (!sgl)
501 return NULL;
502
503 sg_init_table(sgl, nalloc);
504 sg = sgl;
505 while (length) {
506 elem_len = min_t(u64, length, PAGE_SIZE << order);
507 page = alloc_pages(gfp, order);
508 if (!page) {
509 sgl_free(sgl);
510 return NULL;
511 }
512
513 sg_set_page(sg, page, elem_len, 0);
514 length -= elem_len;
515 sg = sg_next(sg);
516 }
517 WARN_ONCE(length, "length = %lld\n", length);
518 if (nent_p)
519 *nent_p = nent;
520 return sgl;
521}
522EXPORT_SYMBOL(sgl_alloc_order);
523
524
525
526
527
528
529
530
531
532struct scatterlist *sgl_alloc(unsigned long long length, gfp_t gfp,
533 unsigned int *nent_p)
534{
535 return sgl_alloc_order(length, 0, false, gfp, nent_p);
536}
537EXPORT_SYMBOL(sgl_alloc);
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552void sgl_free_n_order(struct scatterlist *sgl, int nents, int order)
553{
554 struct scatterlist *sg;
555 struct page *page;
556 int i;
557
558 for_each_sg(sgl, sg, nents, i) {
559 if (!sg)
560 break;
561 page = sg_page(sg);
562 if (page)
563 __free_pages(page, order);
564 }
565 kfree(sgl);
566}
567EXPORT_SYMBOL(sgl_free_n_order);
568
569
570
571
572
573
574void sgl_free_order(struct scatterlist *sgl, int order)
575{
576 sgl_free_n_order(sgl, INT_MAX, order);
577}
578EXPORT_SYMBOL(sgl_free_order);
579
580
581
582
583
584void sgl_free(struct scatterlist *sgl)
585{
586 sgl_free_order(sgl, 0);
587}
588EXPORT_SYMBOL(sgl_free);
589
590#endif
591
592void __sg_page_iter_start(struct sg_page_iter *piter,
593 struct scatterlist *sglist, unsigned int nents,
594 unsigned long pgoffset)
595{
596 piter->__pg_advance = 0;
597 piter->__nents = nents;
598
599 piter->sg = sglist;
600 piter->sg_pgoffset = pgoffset;
601}
602EXPORT_SYMBOL(__sg_page_iter_start);
603
604static int sg_page_count(struct scatterlist *sg)
605{
606 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
607}
608
609bool __sg_page_iter_next(struct sg_page_iter *piter)
610{
611 if (!piter->__nents || !piter->sg)
612 return false;
613
614 piter->sg_pgoffset += piter->__pg_advance;
615 piter->__pg_advance = 1;
616
617 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
618 piter->sg_pgoffset -= sg_page_count(piter->sg);
619 piter->sg = sg_next(piter->sg);
620 if (!--piter->__nents || !piter->sg)
621 return false;
622 }
623
624 return true;
625}
626EXPORT_SYMBOL(__sg_page_iter_next);
627
628
629
630
631
632
633
634
635
636
637
638
639
640void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
641 unsigned int nents, unsigned int flags)
642{
643 memset(miter, 0, sizeof(struct sg_mapping_iter));
644
645 __sg_page_iter_start(&miter->piter, sgl, nents, 0);
646 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
647 miter->__flags = flags;
648}
649EXPORT_SYMBOL(sg_miter_start);
650
651static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
652{
653 if (!miter->__remaining) {
654 struct scatterlist *sg;
655 unsigned long pgoffset;
656
657 if (!__sg_page_iter_next(&miter->piter))
658 return false;
659
660 sg = miter->piter.sg;
661 pgoffset = miter->piter.sg_pgoffset;
662
663 miter->__offset = pgoffset ? 0 : sg->offset;
664 miter->__remaining = sg->offset + sg->length -
665 (pgoffset << PAGE_SHIFT) - miter->__offset;
666 miter->__remaining = min_t(unsigned long, miter->__remaining,
667 PAGE_SIZE - miter->__offset);
668 }
669
670 return true;
671}
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
692{
693 sg_miter_stop(miter);
694
695 while (offset) {
696 off_t consumed;
697
698 if (!sg_miter_get_next_page(miter))
699 return false;
700
701 consumed = min_t(off_t, offset, miter->__remaining);
702 miter->__offset += consumed;
703 miter->__remaining -= consumed;
704 offset -= consumed;
705 }
706
707 return true;
708}
709EXPORT_SYMBOL(sg_miter_skip);
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728bool sg_miter_next(struct sg_mapping_iter *miter)
729{
730 sg_miter_stop(miter);
731
732
733
734
735
736 if (!sg_miter_get_next_page(miter))
737 return false;
738
739 miter->page = sg_page_iter_page(&miter->piter);
740 miter->consumed = miter->length = miter->__remaining;
741
742 if (miter->__flags & SG_MITER_ATOMIC)
743 miter->addr = kmap_atomic(miter->page) + miter->__offset;
744 else
745 miter->addr = kmap(miter->page) + miter->__offset;
746
747 return true;
748}
749EXPORT_SYMBOL(sg_miter_next);
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765void sg_miter_stop(struct sg_mapping_iter *miter)
766{
767 WARN_ON(miter->consumed > miter->length);
768
769
770 if (miter->addr) {
771 miter->__offset += miter->consumed;
772 miter->__remaining -= miter->consumed;
773
774 if ((miter->__flags & SG_MITER_TO_SG) &&
775 !PageSlab(miter->page))
776 flush_kernel_dcache_page(miter->page);
777
778 if (miter->__flags & SG_MITER_ATOMIC) {
779 WARN_ON_ONCE(preemptible());
780 kunmap_atomic(miter->addr);
781 } else
782 kunmap(miter->page);
783
784 miter->page = NULL;
785 miter->addr = NULL;
786 miter->length = 0;
787 miter->consumed = 0;
788 }
789}
790EXPORT_SYMBOL(sg_miter_stop);
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents, void *buf,
806 size_t buflen, off_t skip, bool to_buffer)
807{
808 unsigned int offset = 0;
809 struct sg_mapping_iter miter;
810 unsigned int sg_flags = SG_MITER_ATOMIC;
811
812 if (to_buffer)
813 sg_flags |= SG_MITER_FROM_SG;
814 else
815 sg_flags |= SG_MITER_TO_SG;
816
817 sg_miter_start(&miter, sgl, nents, sg_flags);
818
819 if (!sg_miter_skip(&miter, skip))
820 return false;
821
822 while ((offset < buflen) && sg_miter_next(&miter)) {
823 unsigned int len;
824
825 len = min(miter.length, buflen - offset);
826
827 if (to_buffer)
828 memcpy(buf + offset, miter.addr, len);
829 else
830 memcpy(miter.addr, buf + offset, len);
831
832 offset += len;
833 }
834
835 sg_miter_stop(&miter);
836
837 return offset;
838}
839EXPORT_SYMBOL(sg_copy_buffer);
840
841
842
843
844
845
846
847
848
849
850
851size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
852 const void *buf, size_t buflen)
853{
854 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, 0, false);
855}
856EXPORT_SYMBOL(sg_copy_from_buffer);
857
858
859
860
861
862
863
864
865
866
867
868size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
869 void *buf, size_t buflen)
870{
871 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
872}
873EXPORT_SYMBOL(sg_copy_to_buffer);
874
875
876
877
878
879
880
881
882
883
884
885
886size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
887 const void *buf, size_t buflen, off_t skip)
888{
889 return sg_copy_buffer(sgl, nents, (void *)buf, buflen, skip, false);
890}
891EXPORT_SYMBOL(sg_pcopy_from_buffer);
892
893
894
895
896
897
898
899
900
901
902
903
904size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
905 void *buf, size_t buflen, off_t skip)
906{
907 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
908}
909EXPORT_SYMBOL(sg_pcopy_to_buffer);
910
911
912
913
914
915
916
917
918
919
920size_t sg_zero_buffer(struct scatterlist *sgl, unsigned int nents,
921 size_t buflen, off_t skip)
922{
923 unsigned int offset = 0;
924 struct sg_mapping_iter miter;
925 unsigned int sg_flags = SG_MITER_ATOMIC | SG_MITER_TO_SG;
926
927 sg_miter_start(&miter, sgl, nents, sg_flags);
928
929 if (!sg_miter_skip(&miter, skip))
930 return false;
931
932 while (offset < buflen && sg_miter_next(&miter)) {
933 unsigned int len;
934
935 len = min(miter.length, buflen - offset);
936 memset(miter.addr, 0, len);
937
938 offset += len;
939 }
940
941 sg_miter_stop(&miter);
942 return offset;
943}
944EXPORT_SYMBOL(sg_zero_buffer);
945