1
2
3
4
5
6
7
8
9#include <linux/export.h>
10#include <linux/slab.h>
11#include <linux/scatterlist.h>
12#include <linux/highmem.h>
13#include <linux/kmemleak.h>
14
15
16
17
18
19
20
21
22
23
24
25struct scatterlist *sg_next(struct scatterlist *sg)
26{
27#ifdef CONFIG_DEBUG_SG
28 BUG_ON(sg->sg_magic != SG_MAGIC);
29#endif
30 if (sg_is_last(sg))
31 return NULL;
32
33 sg++;
34 if (unlikely(sg_is_chain(sg)))
35 sg = sg_chain_ptr(sg);
36
37 return sg;
38}
39EXPORT_SYMBOL(sg_next);
40
41
42
43
44
45
46
47
48
49
50int sg_nents(struct scatterlist *sg)
51{
52 int nents;
53 for (nents = 0; sg; sg = sg_next(sg))
54 nents++;
55 return nents;
56}
57EXPORT_SYMBOL(sg_nents);
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74struct scatterlist *sg_last(struct scatterlist *sgl, unsigned int nents)
75{
76#ifndef CONFIG_ARCH_HAS_SG_CHAIN
77 struct scatterlist *ret = &sgl[nents - 1];
78#else
79 struct scatterlist *sg, *ret = NULL;
80 unsigned int i;
81
82 for_each_sg(sgl, sg, nents, i)
83 ret = sg;
84
85#endif
86#ifdef CONFIG_DEBUG_SG
87 BUG_ON(sgl[0].sg_magic != SG_MAGIC);
88 BUG_ON(!sg_is_last(ret));
89#endif
90 return ret;
91}
92EXPORT_SYMBOL(sg_last);
93
94
95
96
97
98
99
100
101
102
103
104void sg_init_table(struct scatterlist *sgl, unsigned int nents)
105{
106 memset(sgl, 0, sizeof(*sgl) * nents);
107#ifdef CONFIG_DEBUG_SG
108 {
109 unsigned int i;
110 for (i = 0; i < nents; i++)
111 sgl[i].sg_magic = SG_MAGIC;
112 }
113#endif
114 sg_mark_end(&sgl[nents - 1]);
115}
116EXPORT_SYMBOL(sg_init_table);
117
118
119
120
121
122
123
124
125void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
126{
127 sg_init_table(sg, 1);
128 sg_set_buf(sg, buf, buflen);
129}
130EXPORT_SYMBOL(sg_init_one);
131
132
133
134
135
136static struct scatterlist *sg_kmalloc(unsigned int nents, gfp_t gfp_mask)
137{
138 if (nents == SG_MAX_SINGLE_ALLOC) {
139
140
141
142
143
144
145
146
147
148 void *ptr = (void *) __get_free_page(gfp_mask);
149 kmemleak_alloc(ptr, PAGE_SIZE, 1, gfp_mask);
150 return ptr;
151 } else
152 return kmalloc(nents * sizeof(struct scatterlist), gfp_mask);
153}
154
155static void sg_kfree(struct scatterlist *sg, unsigned int nents)
156{
157 if (nents == SG_MAX_SINGLE_ALLOC) {
158 kmemleak_free(sg);
159 free_page((unsigned long) sg);
160 } else
161 kfree(sg);
162}
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177void __sg_free_table(struct sg_table *table, unsigned int max_ents,
178 bool skip_first_chunk, sg_free_fn *free_fn)
179{
180 struct scatterlist *sgl, *next;
181
182 if (unlikely(!table->sgl))
183 return;
184
185 sgl = table->sgl;
186 while (table->orig_nents) {
187 unsigned int alloc_size = table->orig_nents;
188 unsigned int sg_size;
189
190
191
192
193
194
195
196 if (alloc_size > max_ents) {
197 next = sg_chain_ptr(&sgl[max_ents - 1]);
198 alloc_size = max_ents;
199 sg_size = alloc_size - 1;
200 } else {
201 sg_size = alloc_size;
202 next = NULL;
203 }
204
205 table->orig_nents -= sg_size;
206 if (skip_first_chunk)
207 skip_first_chunk = false;
208 else
209 free_fn(sgl, alloc_size);
210 sgl = next;
211 }
212
213 table->sgl = NULL;
214}
215EXPORT_SYMBOL(__sg_free_table);
216
217
218
219
220
221
222void sg_free_table(struct sg_table *table)
223{
224 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
225}
226EXPORT_SYMBOL(sg_free_table);
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247int __sg_alloc_table(struct sg_table *table, unsigned int nents,
248 unsigned int max_ents, struct scatterlist *first_chunk,
249 gfp_t gfp_mask, sg_alloc_fn *alloc_fn)
250{
251 struct scatterlist *sg, *prv;
252 unsigned int left;
253
254 memset(table, 0, sizeof(*table));
255
256 if (nents == 0)
257 return -EINVAL;
258#ifndef CONFIG_ARCH_HAS_SG_CHAIN
259 if (WARN_ON_ONCE(nents > max_ents))
260 return -EINVAL;
261#endif
262
263 left = nents;
264 prv = NULL;
265 do {
266 unsigned int sg_size, alloc_size = left;
267
268 if (alloc_size > max_ents) {
269 alloc_size = max_ents;
270 sg_size = alloc_size - 1;
271 } else
272 sg_size = alloc_size;
273
274 left -= sg_size;
275
276 if (first_chunk) {
277 sg = first_chunk;
278 first_chunk = NULL;
279 } else {
280 sg = alloc_fn(alloc_size, gfp_mask);
281 }
282 if (unlikely(!sg)) {
283
284
285
286
287
288
289 if (prv)
290 table->nents = ++table->orig_nents;
291
292 return -ENOMEM;
293 }
294
295 sg_init_table(sg, alloc_size);
296 table->nents = table->orig_nents += sg_size;
297
298
299
300
301
302 if (prv)
303 sg_chain(prv, max_ents, sg);
304 else
305 table->sgl = sg;
306
307
308
309
310 if (!left)
311 sg_mark_end(&sg[sg_size - 1]);
312
313 prv = sg;
314 } while (left);
315
316 return 0;
317}
318EXPORT_SYMBOL(__sg_alloc_table);
319
320
321
322
323
324
325
326
327
328
329
330
331int sg_alloc_table(struct sg_table *table, unsigned int nents, gfp_t gfp_mask)
332{
333 int ret;
334
335 ret = __sg_alloc_table(table, nents, SG_MAX_SINGLE_ALLOC,
336 NULL, gfp_mask, sg_kmalloc);
337 if (unlikely(ret))
338 __sg_free_table(table, SG_MAX_SINGLE_ALLOC, false, sg_kfree);
339
340 return ret;
341}
342EXPORT_SYMBOL(sg_alloc_table);
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364int sg_alloc_table_from_pages(struct sg_table *sgt,
365 struct page **pages, unsigned int n_pages,
366 unsigned long offset, unsigned long size,
367 gfp_t gfp_mask)
368{
369 unsigned int chunks;
370 unsigned int i;
371 unsigned int cur_page;
372 int ret;
373 struct scatterlist *s;
374
375
376 chunks = 1;
377 for (i = 1; i < n_pages; ++i)
378 if (page_to_pfn(pages[i]) != page_to_pfn(pages[i - 1]) + 1)
379 ++chunks;
380
381 ret = sg_alloc_table(sgt, chunks, gfp_mask);
382 if (unlikely(ret))
383 return ret;
384
385
386 cur_page = 0;
387 for_each_sg(sgt->sgl, s, sgt->orig_nents, i) {
388 unsigned long chunk_size;
389 unsigned int j;
390
391
392 for (j = cur_page + 1; j < n_pages; ++j)
393 if (page_to_pfn(pages[j]) !=
394 page_to_pfn(pages[j - 1]) + 1)
395 break;
396
397 chunk_size = ((j - cur_page) << PAGE_SHIFT) - offset;
398 sg_set_page(s, pages[cur_page], min(size, chunk_size), offset);
399 size -= chunk_size;
400 offset = 0;
401 cur_page = j;
402 }
403
404 return 0;
405}
406EXPORT_SYMBOL(sg_alloc_table_from_pages);
407
408void __sg_page_iter_start(struct sg_page_iter *piter,
409 struct scatterlist *sglist, unsigned int nents,
410 unsigned long pgoffset)
411{
412 piter->__pg_advance = 0;
413 piter->__nents = nents;
414
415 piter->sg = sglist;
416 piter->sg_pgoffset = pgoffset;
417}
418EXPORT_SYMBOL(__sg_page_iter_start);
419
420static int sg_page_count(struct scatterlist *sg)
421{
422 return PAGE_ALIGN(sg->offset + sg->length) >> PAGE_SHIFT;
423}
424
425bool __sg_page_iter_next(struct sg_page_iter *piter)
426{
427 if (!piter->__nents || !piter->sg)
428 return false;
429
430 piter->sg_pgoffset += piter->__pg_advance;
431 piter->__pg_advance = 1;
432
433 while (piter->sg_pgoffset >= sg_page_count(piter->sg)) {
434 piter->sg_pgoffset -= sg_page_count(piter->sg);
435 piter->sg = sg_next(piter->sg);
436 if (!--piter->__nents || !piter->sg)
437 return false;
438 }
439
440 return true;
441}
442EXPORT_SYMBOL(__sg_page_iter_next);
443
444
445
446
447
448
449
450
451
452
453
454
455
456void sg_miter_start(struct sg_mapping_iter *miter, struct scatterlist *sgl,
457 unsigned int nents, unsigned int flags)
458{
459 memset(miter, 0, sizeof(struct sg_mapping_iter));
460
461 __sg_page_iter_start(&miter->piter, sgl, nents, 0);
462 WARN_ON(!(flags & (SG_MITER_TO_SG | SG_MITER_FROM_SG)));
463 miter->__flags = flags;
464}
465EXPORT_SYMBOL(sg_miter_start);
466
467static bool sg_miter_get_next_page(struct sg_mapping_iter *miter)
468{
469 if (!miter->__remaining) {
470 struct scatterlist *sg;
471 unsigned long pgoffset;
472
473 if (!__sg_page_iter_next(&miter->piter))
474 return false;
475
476 sg = miter->piter.sg;
477 pgoffset = miter->piter.sg_pgoffset;
478
479 miter->__offset = pgoffset ? 0 : sg->offset;
480 miter->__remaining = sg->offset + sg->length -
481 (pgoffset << PAGE_SHIFT) - miter->__offset;
482 miter->__remaining = min_t(unsigned long, miter->__remaining,
483 PAGE_SIZE - miter->__offset);
484 }
485
486 return true;
487}
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507bool sg_miter_skip(struct sg_mapping_iter *miter, off_t offset)
508{
509 sg_miter_stop(miter);
510
511 while (offset) {
512 off_t consumed;
513
514 if (!sg_miter_get_next_page(miter))
515 return false;
516
517 consumed = min_t(off_t, offset, miter->__remaining);
518 miter->__offset += consumed;
519 miter->__remaining -= consumed;
520 offset -= consumed;
521 }
522
523 return true;
524}
525EXPORT_SYMBOL(sg_miter_skip);
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544bool sg_miter_next(struct sg_mapping_iter *miter)
545{
546 sg_miter_stop(miter);
547
548
549
550
551
552 if (!sg_miter_get_next_page(miter))
553 return false;
554
555 miter->page = sg_page_iter_page(&miter->piter);
556 miter->consumed = miter->length = miter->__remaining;
557
558 if (miter->__flags & SG_MITER_ATOMIC)
559 miter->addr = kmap_atomic(miter->page) + miter->__offset;
560 else
561 miter->addr = kmap(miter->page) + miter->__offset;
562
563 return true;
564}
565EXPORT_SYMBOL(sg_miter_next);
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581void sg_miter_stop(struct sg_mapping_iter *miter)
582{
583 WARN_ON(miter->consumed > miter->length);
584
585
586 if (miter->addr) {
587 miter->__offset += miter->consumed;
588 miter->__remaining -= miter->consumed;
589
590 if ((miter->__flags & SG_MITER_TO_SG) &&
591 !PageSlab(miter->page))
592 flush_kernel_dcache_page(miter->page);
593
594 if (miter->__flags & SG_MITER_ATOMIC) {
595 WARN_ON_ONCE(preemptible());
596 kunmap_atomic(miter->addr);
597 } else
598 kunmap(miter->page);
599
600 miter->page = NULL;
601 miter->addr = NULL;
602 miter->length = 0;
603 miter->consumed = 0;
604 }
605}
606EXPORT_SYMBOL(sg_miter_stop);
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
622 void *buf, size_t buflen, off_t skip,
623 bool to_buffer)
624{
625 unsigned int offset = 0;
626 struct sg_mapping_iter miter;
627 unsigned long flags;
628 unsigned int sg_flags = SG_MITER_ATOMIC;
629
630 if (to_buffer)
631 sg_flags |= SG_MITER_FROM_SG;
632 else
633 sg_flags |= SG_MITER_TO_SG;
634
635 sg_miter_start(&miter, sgl, nents, sg_flags);
636
637 if (!sg_miter_skip(&miter, skip))
638 return false;
639
640 local_irq_save(flags);
641
642 while (sg_miter_next(&miter) && offset < buflen) {
643 unsigned int len;
644
645 len = min(miter.length, buflen - offset);
646
647 if (to_buffer)
648 memcpy(buf + offset, miter.addr, len);
649 else
650 memcpy(miter.addr, buf + offset, len);
651
652 offset += len;
653 }
654
655 sg_miter_stop(&miter);
656
657 local_irq_restore(flags);
658 return offset;
659}
660
661
662
663
664
665
666
667
668
669
670
671size_t sg_copy_from_buffer(struct scatterlist *sgl, unsigned int nents,
672 void *buf, size_t buflen)
673{
674 return sg_copy_buffer(sgl, nents, buf, buflen, 0, false);
675}
676EXPORT_SYMBOL(sg_copy_from_buffer);
677
678
679
680
681
682
683
684
685
686
687
688size_t sg_copy_to_buffer(struct scatterlist *sgl, unsigned int nents,
689 void *buf, size_t buflen)
690{
691 return sg_copy_buffer(sgl, nents, buf, buflen, 0, true);
692}
693EXPORT_SYMBOL(sg_copy_to_buffer);
694
695
696
697
698
699
700
701
702
703
704
705
706size_t sg_pcopy_from_buffer(struct scatterlist *sgl, unsigned int nents,
707 void *buf, size_t buflen, off_t skip)
708{
709 return sg_copy_buffer(sgl, nents, buf, buflen, skip, false);
710}
711EXPORT_SYMBOL(sg_pcopy_from_buffer);
712
713
714
715
716
717
718
719
720
721
722
723
724size_t sg_pcopy_to_buffer(struct scatterlist *sgl, unsigned int nents,
725 void *buf, size_t buflen, off_t skip)
726{
727 return sg_copy_buffer(sgl, nents, buf, buflen, skip, true);
728}
729EXPORT_SYMBOL(sg_pcopy_to_buffer);
730