1
2
3
4
5
6
7
8#include <linux/types.h>
9#include <linux/kernel.h>
10#include <linux/slab.h>
11#include <linux/device.h>
12
13#include <net/page_pool.h>
14#include <net/xdp.h>
15
16#include <linux/dma-direction.h>
17#include <linux/dma-mapping.h>
18#include <linux/page-flags.h>
19#include <linux/mm.h>
20
21#include <trace/events/page_pool.h>
22
23#define DEFER_TIME (msecs_to_jiffies(1000))
24#define DEFER_WARN_INTERVAL (60 * HZ)
25
26static int page_pool_init(struct page_pool *pool,
27 const struct page_pool_params *params)
28{
29 unsigned int ring_qsize = 1024;
30
31 memcpy(&pool->p, params, sizeof(pool->p));
32
33
34 if (pool->p.flags & ~(PP_FLAG_ALL))
35 return -EINVAL;
36
37 if (pool->p.pool_size)
38 ring_qsize = pool->p.pool_size;
39
40
41 if (ring_qsize > 32768)
42 return -E2BIG;
43
44
45
46
47
48 if (pool->p.flags & PP_FLAG_DMA_MAP) {
49 if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
50 (pool->p.dma_dir != DMA_BIDIRECTIONAL))
51 return -EINVAL;
52 }
53
54 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
55
56
57
58 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
59 return -EINVAL;
60
61 if (!pool->p.max_len)
62 return -EINVAL;
63
64
65
66
67 }
68
69 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
70 return -ENOMEM;
71
72 atomic_set(&pool->pages_state_release_cnt, 0);
73
74
75 refcount_set(&pool->user_cnt, 1);
76
77 if (pool->p.flags & PP_FLAG_DMA_MAP)
78 get_device(pool->p.dev);
79
80 return 0;
81}
82
83struct page_pool *page_pool_create(const struct page_pool_params *params)
84{
85 struct page_pool *pool;
86 int err;
87
88 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
89 if (!pool)
90 return ERR_PTR(-ENOMEM);
91
92 err = page_pool_init(pool, params);
93 if (err < 0) {
94 pr_warn("%s() gave up with errno %d\n", __func__, err);
95 kfree(pool);
96 return ERR_PTR(err);
97 }
98
99 return pool;
100}
101EXPORT_SYMBOL(page_pool_create);
102
103static void page_pool_return_page(struct page_pool *pool, struct page *page);
104
105noinline
106static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
107{
108 struct ptr_ring *r = &pool->ring;
109 struct page *page;
110 int pref_nid;
111
112
113 if (__ptr_ring_empty(r))
114 return NULL;
115
116
117
118
119#ifdef CONFIG_NUMA
120 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
121#else
122
123 pref_nid = numa_mem_id();
124#endif
125
126
127 spin_lock(&r->consumer_lock);
128
129
130 do {
131 page = __ptr_ring_consume(r);
132 if (unlikely(!page))
133 break;
134
135 if (likely(page_to_nid(page) == pref_nid)) {
136 pool->alloc.cache[pool->alloc.count++] = page;
137 } else {
138
139
140
141
142
143 page_pool_return_page(pool, page);
144 page = NULL;
145 break;
146 }
147 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
148
149
150 if (likely(pool->alloc.count > 0))
151 page = pool->alloc.cache[--pool->alloc.count];
152
153 spin_unlock(&r->consumer_lock);
154 return page;
155}
156
157
158static struct page *__page_pool_get_cached(struct page_pool *pool)
159{
160 struct page *page;
161
162
163 if (likely(pool->alloc.count)) {
164
165 page = pool->alloc.cache[--pool->alloc.count];
166 } else {
167 page = page_pool_refill_alloc_cache(pool);
168 }
169
170 return page;
171}
172
173static void page_pool_dma_sync_for_device(struct page_pool *pool,
174 struct page *page,
175 unsigned int dma_sync_size)
176{
177 dma_sync_size = min(dma_sync_size, pool->p.max_len);
178 dma_sync_single_range_for_device(pool->p.dev, page->dma_addr,
179 pool->p.offset, dma_sync_size,
180 pool->p.dma_dir);
181}
182
183
184noinline
185static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
186 gfp_t _gfp)
187{
188 struct page *page;
189 gfp_t gfp = _gfp;
190 dma_addr_t dma;
191
192
193
194
195 if (pool->p.order)
196 gfp |= __GFP_COMP;
197
198
199
200
201
202
203
204
205
206#ifdef CONFIG_NUMA
207 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
208#else
209 page = alloc_pages(gfp, pool->p.order);
210#endif
211 if (!page)
212 return NULL;
213
214 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
215 goto skip_dma_map;
216
217
218
219
220
221
222 dma = dma_map_page_attrs(pool->p.dev, page, 0,
223 (PAGE_SIZE << pool->p.order),
224 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
225 if (dma_mapping_error(pool->p.dev, dma)) {
226 put_page(page);
227 return NULL;
228 }
229 page->dma_addr = dma;
230
231 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
232 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
233
234skip_dma_map:
235
236 pool->pages_state_hold_cnt++;
237
238 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
239
240
241 return page;
242}
243
244
245
246
247struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
248{
249 struct page *page;
250
251
252 page = __page_pool_get_cached(pool);
253 if (page)
254 return page;
255
256
257 page = __page_pool_alloc_pages_slow(pool, gfp);
258 return page;
259}
260EXPORT_SYMBOL(page_pool_alloc_pages);
261
262
263
264
265#define _distance(a, b) (s32)((a) - (b))
266
267static s32 page_pool_inflight(struct page_pool *pool)
268{
269 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
270 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
271 s32 inflight;
272
273 inflight = _distance(hold_cnt, release_cnt);
274
275 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
276 WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
277
278 return inflight;
279}
280
281
282
283
284
285
286void page_pool_release_page(struct page_pool *pool, struct page *page)
287{
288 dma_addr_t dma;
289 int count;
290
291 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
292
293
294
295 goto skip_dma_unmap;
296
297 dma = page->dma_addr;
298
299
300 dma_unmap_page_attrs(pool->p.dev, dma,
301 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
302 DMA_ATTR_SKIP_CPU_SYNC);
303 page->dma_addr = 0;
304skip_dma_unmap:
305
306
307
308 count = atomic_inc_return(&pool->pages_state_release_cnt);
309 trace_page_pool_state_release(pool, page, count);
310}
311EXPORT_SYMBOL(page_pool_release_page);
312
313
314static void page_pool_return_page(struct page_pool *pool, struct page *page)
315{
316 page_pool_release_page(pool, page);
317
318 put_page(page);
319
320
321
322
323}
324
325static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
326{
327 int ret;
328
329 if (in_serving_softirq())
330 ret = ptr_ring_produce(&pool->ring, page);
331 else
332 ret = ptr_ring_produce_bh(&pool->ring, page);
333
334 return (ret == 0) ? true : false;
335}
336
337
338
339
340
341
342static bool page_pool_recycle_in_cache(struct page *page,
343 struct page_pool *pool)
344{
345 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
346 return false;
347
348
349 pool->alloc.cache[pool->alloc.count++] = page;
350 return true;
351}
352
353
354
355
356
357
358
359static __always_inline struct page *
360__page_pool_put_page(struct page_pool *pool, struct page *page,
361 unsigned int dma_sync_size, bool allow_direct)
362{
363
364
365
366
367
368
369
370
371
372 if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) {
373
374
375 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
376 page_pool_dma_sync_for_device(pool, page,
377 dma_sync_size);
378
379 if (allow_direct && in_serving_softirq() &&
380 page_pool_recycle_in_cache(page, pool))
381 return NULL;
382
383
384 return page;
385 }
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400 page_pool_release_page(pool, page);
401 put_page(page);
402
403 return NULL;
404}
405
406void page_pool_put_page(struct page_pool *pool, struct page *page,
407 unsigned int dma_sync_size, bool allow_direct)
408{
409 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
410 if (page && !page_pool_recycle_in_ring(pool, page)) {
411
412 page_pool_return_page(pool, page);
413 }
414}
415EXPORT_SYMBOL(page_pool_put_page);
416
417
418void page_pool_put_page_bulk(struct page_pool *pool, void **data,
419 int count)
420{
421 int i, bulk_len = 0;
422
423 for (i = 0; i < count; i++) {
424 struct page *page = virt_to_head_page(data[i]);
425
426 page = __page_pool_put_page(pool, page, -1, false);
427
428 if (page)
429 data[bulk_len++] = page;
430 }
431
432 if (unlikely(!bulk_len))
433 return;
434
435
436 page_pool_ring_lock(pool);
437 for (i = 0; i < bulk_len; i++) {
438 if (__ptr_ring_produce(&pool->ring, data[i]))
439 break;
440 }
441 page_pool_ring_unlock(pool);
442
443
444 if (likely(i == bulk_len))
445 return;
446
447
448
449
450 for (; i < bulk_len; i++)
451 page_pool_return_page(pool, data[i]);
452}
453EXPORT_SYMBOL(page_pool_put_page_bulk);
454
455static void page_pool_empty_ring(struct page_pool *pool)
456{
457 struct page *page;
458
459
460 while ((page = ptr_ring_consume_bh(&pool->ring))) {
461
462 if (!(page_ref_count(page) == 1))
463 pr_crit("%s() page_pool refcnt %d violation\n",
464 __func__, page_ref_count(page));
465
466 page_pool_return_page(pool, page);
467 }
468}
469
470static void page_pool_free(struct page_pool *pool)
471{
472 if (pool->disconnect)
473 pool->disconnect(pool);
474
475 ptr_ring_cleanup(&pool->ring, NULL);
476
477 if (pool->p.flags & PP_FLAG_DMA_MAP)
478 put_device(pool->p.dev);
479
480 kfree(pool);
481}
482
483static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
484{
485 struct page *page;
486
487 if (pool->destroy_cnt)
488 return;
489
490
491
492
493
494 while (pool->alloc.count) {
495 page = pool->alloc.cache[--pool->alloc.count];
496 page_pool_return_page(pool, page);
497 }
498}
499
500static void page_pool_scrub(struct page_pool *pool)
501{
502 page_pool_empty_alloc_cache_once(pool);
503 pool->destroy_cnt++;
504
505
506
507
508 page_pool_empty_ring(pool);
509}
510
511static int page_pool_release(struct page_pool *pool)
512{
513 int inflight;
514
515 page_pool_scrub(pool);
516 inflight = page_pool_inflight(pool);
517 if (!inflight)
518 page_pool_free(pool);
519
520 return inflight;
521}
522
523static void page_pool_release_retry(struct work_struct *wq)
524{
525 struct delayed_work *dwq = to_delayed_work(wq);
526 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
527 int inflight;
528
529 inflight = page_pool_release(pool);
530 if (!inflight)
531 return;
532
533
534 if (time_after_eq(jiffies, pool->defer_warn)) {
535 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
536
537 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
538 __func__, inflight, sec);
539 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
540 }
541
542
543 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
544}
545
546void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
547{
548 refcount_inc(&pool->user_cnt);
549 pool->disconnect = disconnect;
550}
551
552void page_pool_destroy(struct page_pool *pool)
553{
554 if (!pool)
555 return;
556
557 if (!page_pool_put(pool))
558 return;
559
560 if (!page_pool_release(pool))
561 return;
562
563 pool->defer_start = jiffies;
564 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
565
566 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
567 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
568}
569EXPORT_SYMBOL(page_pool_destroy);
570
571
572void page_pool_update_nid(struct page_pool *pool, int new_nid)
573{
574 struct page *page;
575
576 trace_page_pool_update_nid(pool, new_nid);
577 pool->p.nid = new_nid;
578
579
580 while (pool->alloc.count) {
581 page = pool->alloc.cache[--pool->alloc.count];
582 page_pool_return_page(pool, page);
583 }
584}
585EXPORT_SYMBOL(page_pool_update_nid);
586