1
2
3
4
5
6#include <linux/init.h>
7#include <linux/sched/mm.h>
8#include <linux/sched/signal.h>
9#include <linux/sched/task.h>
10#include <linux/uaccess.h>
11#include <linux/slab.h>
12#include <linux/bpf.h>
13#include <linux/mm.h>
14#include <linux/netdevice.h>
15#include <linux/rtnetlink.h>
16#include <linux/idr.h>
17
18#include "xdp_umem.h"
19#include "xsk_queue.h"
20
21#define XDP_UMEM_MIN_CHUNK_SIZE 2048
22
23static DEFINE_IDA(umem_ida);
24
25void xdp_add_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
26{
27 unsigned long flags;
28
29 spin_lock_irqsave(&umem->xsk_list_lock, flags);
30 list_add_rcu(&xs->list, &umem->xsk_list);
31 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
32}
33
34void xdp_del_sk_umem(struct xdp_umem *umem, struct xdp_sock *xs)
35{
36 unsigned long flags;
37
38 spin_lock_irqsave(&umem->xsk_list_lock, flags);
39 list_del_rcu(&xs->list);
40 spin_unlock_irqrestore(&umem->xsk_list_lock, flags);
41}
42
43
44
45
46
47static int xdp_reg_umem_at_qid(struct net_device *dev, struct xdp_umem *umem,
48 u16 queue_id)
49{
50 if (queue_id >= max_t(unsigned int,
51 dev->real_num_rx_queues,
52 dev->real_num_tx_queues))
53 return -EINVAL;
54
55 if (queue_id < dev->real_num_rx_queues)
56 dev->_rx[queue_id].umem = umem;
57 if (queue_id < dev->real_num_tx_queues)
58 dev->_tx[queue_id].umem = umem;
59
60 return 0;
61}
62
63struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
64 u16 queue_id)
65{
66 if (queue_id < dev->real_num_rx_queues)
67 return dev->_rx[queue_id].umem;
68 if (queue_id < dev->real_num_tx_queues)
69 return dev->_tx[queue_id].umem;
70
71 return NULL;
72}
73EXPORT_SYMBOL(xdp_get_umem_from_qid);
74
75static void xdp_clear_umem_at_qid(struct net_device *dev, u16 queue_id)
76{
77 if (queue_id < dev->real_num_rx_queues)
78 dev->_rx[queue_id].umem = NULL;
79 if (queue_id < dev->real_num_tx_queues)
80 dev->_tx[queue_id].umem = NULL;
81}
82
83int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
84 u16 queue_id, u16 flags)
85{
86 bool force_zc, force_copy;
87 struct netdev_bpf bpf;
88 int err = 0;
89
90 ASSERT_RTNL();
91
92 force_zc = flags & XDP_ZEROCOPY;
93 force_copy = flags & XDP_COPY;
94
95 if (force_zc && force_copy)
96 return -EINVAL;
97
98 if (xdp_get_umem_from_qid(dev, queue_id))
99 return -EBUSY;
100
101 err = xdp_reg_umem_at_qid(dev, umem, queue_id);
102 if (err)
103 return err;
104
105 umem->dev = dev;
106 umem->queue_id = queue_id;
107
108 dev_hold(dev);
109
110 if (force_copy)
111
112 return 0;
113
114 if (!dev->netdev_ops->ndo_bpf ||
115 !dev->netdev_ops->ndo_xsk_async_xmit) {
116 err = -EOPNOTSUPP;
117 goto err_unreg_umem;
118 }
119
120 bpf.command = XDP_SETUP_XSK_UMEM;
121 bpf.xsk.umem = umem;
122 bpf.xsk.queue_id = queue_id;
123
124 err = dev->netdev_ops->ndo_bpf(dev, &bpf);
125 if (err)
126 goto err_unreg_umem;
127
128 umem->zc = true;
129 return 0;
130
131err_unreg_umem:
132 if (!force_zc)
133 err = 0;
134 if (err)
135 xdp_clear_umem_at_qid(dev, queue_id);
136 return err;
137}
138
139void xdp_umem_clear_dev(struct xdp_umem *umem)
140{
141 struct netdev_bpf bpf;
142 int err;
143
144 ASSERT_RTNL();
145
146 if (!umem->dev)
147 return;
148
149 if (umem->zc) {
150 bpf.command = XDP_SETUP_XSK_UMEM;
151 bpf.xsk.umem = NULL;
152 bpf.xsk.queue_id = umem->queue_id;
153
154 err = umem->dev->netdev_ops->ndo_bpf(umem->dev, &bpf);
155
156 if (err)
157 WARN(1, "failed to disable umem!\n");
158 }
159
160 xdp_clear_umem_at_qid(umem->dev, umem->queue_id);
161
162 dev_put(umem->dev);
163 umem->dev = NULL;
164 umem->zc = false;
165}
166
167static void xdp_umem_unpin_pages(struct xdp_umem *umem)
168{
169 unsigned int i;
170
171 for (i = 0; i < umem->npgs; i++) {
172 struct page *page = umem->pgs[i];
173
174 set_page_dirty_lock(page);
175 put_page(page);
176 }
177
178 kfree(umem->pgs);
179 umem->pgs = NULL;
180}
181
182static void xdp_umem_unaccount_pages(struct xdp_umem *umem)
183{
184 if (umem->user) {
185 atomic_long_sub(umem->npgs, &umem->user->locked_vm);
186 free_uid(umem->user);
187 }
188}
189
190static void xdp_umem_release(struct xdp_umem *umem)
191{
192 rtnl_lock();
193 xdp_umem_clear_dev(umem);
194 rtnl_unlock();
195
196 ida_simple_remove(&umem_ida, umem->id);
197
198 if (umem->fq) {
199 xskq_destroy(umem->fq);
200 umem->fq = NULL;
201 }
202
203 if (umem->cq) {
204 xskq_destroy(umem->cq);
205 umem->cq = NULL;
206 }
207
208 xsk_reuseq_destroy(umem);
209
210 xdp_umem_unpin_pages(umem);
211
212 kfree(umem->pages);
213 umem->pages = NULL;
214
215 xdp_umem_unaccount_pages(umem);
216 kfree(umem);
217}
218
219static void xdp_umem_release_deferred(struct work_struct *work)
220{
221 struct xdp_umem *umem = container_of(work, struct xdp_umem, work);
222
223 xdp_umem_release(umem);
224}
225
226void xdp_get_umem(struct xdp_umem *umem)
227{
228 refcount_inc(&umem->users);
229}
230
231void xdp_put_umem(struct xdp_umem *umem)
232{
233 if (!umem)
234 return;
235
236 if (refcount_dec_and_test(&umem->users)) {
237 INIT_WORK(&umem->work, xdp_umem_release_deferred);
238 schedule_work(&umem->work);
239 }
240}
241
242static int xdp_umem_pin_pages(struct xdp_umem *umem)
243{
244 unsigned int gup_flags = FOLL_WRITE;
245 long npgs;
246 int err;
247
248 umem->pgs = kcalloc(umem->npgs, sizeof(*umem->pgs),
249 GFP_KERNEL | __GFP_NOWARN);
250 if (!umem->pgs)
251 return -ENOMEM;
252
253 down_read(¤t->mm->mmap_sem);
254 npgs = get_user_pages(umem->address, umem->npgs,
255 gup_flags | FOLL_LONGTERM, &umem->pgs[0], NULL);
256 up_read(¤t->mm->mmap_sem);
257
258 if (npgs != umem->npgs) {
259 if (npgs >= 0) {
260 umem->npgs = npgs;
261 err = -ENOMEM;
262 goto out_pin;
263 }
264 err = npgs;
265 goto out_pgs;
266 }
267 return 0;
268
269out_pin:
270 xdp_umem_unpin_pages(umem);
271out_pgs:
272 kfree(umem->pgs);
273 umem->pgs = NULL;
274 return err;
275}
276
277static int xdp_umem_account_pages(struct xdp_umem *umem)
278{
279 unsigned long lock_limit, new_npgs, old_npgs;
280
281 if (capable(CAP_IPC_LOCK))
282 return 0;
283
284 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
285 umem->user = get_uid(current_user());
286
287 do {
288 old_npgs = atomic_long_read(&umem->user->locked_vm);
289 new_npgs = old_npgs + umem->npgs;
290 if (new_npgs > lock_limit) {
291 free_uid(umem->user);
292 umem->user = NULL;
293 return -ENOBUFS;
294 }
295 } while (atomic_long_cmpxchg(&umem->user->locked_vm, old_npgs,
296 new_npgs) != old_npgs);
297 return 0;
298}
299
300static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr)
301{
302 u32 chunk_size = mr->chunk_size, headroom = mr->headroom;
303 unsigned int chunks, chunks_per_page;
304 u64 addr = mr->addr, size = mr->len;
305 int size_chk, err, i;
306
307 if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) {
308
309
310
311
312
313
314 return -EINVAL;
315 }
316
317 if (!is_power_of_2(chunk_size))
318 return -EINVAL;
319
320 if (!PAGE_ALIGNED(addr)) {
321
322
323
324 return -EINVAL;
325 }
326
327 if ((addr + size) < addr)
328 return -EINVAL;
329
330 chunks = (unsigned int)div_u64(size, chunk_size);
331 if (chunks == 0)
332 return -EINVAL;
333
334 chunks_per_page = PAGE_SIZE / chunk_size;
335 if (chunks < chunks_per_page || chunks % chunks_per_page)
336 return -EINVAL;
337
338 headroom = ALIGN(headroom, 64);
339
340 size_chk = chunk_size - headroom - XDP_PACKET_HEADROOM;
341 if (size_chk < 0)
342 return -EINVAL;
343
344 umem->address = (unsigned long)addr;
345 umem->chunk_mask = ~((u64)chunk_size - 1);
346 umem->size = size;
347 umem->headroom = headroom;
348 umem->chunk_size_nohr = chunk_size - headroom;
349 umem->npgs = size / PAGE_SIZE;
350 umem->pgs = NULL;
351 umem->user = NULL;
352 INIT_LIST_HEAD(&umem->xsk_list);
353 spin_lock_init(&umem->xsk_list_lock);
354
355 refcount_set(&umem->users, 1);
356
357 err = xdp_umem_account_pages(umem);
358 if (err)
359 return err;
360
361 err = xdp_umem_pin_pages(umem);
362 if (err)
363 goto out_account;
364
365 umem->pages = kcalloc(umem->npgs, sizeof(*umem->pages), GFP_KERNEL);
366 if (!umem->pages) {
367 err = -ENOMEM;
368 goto out_pin;
369 }
370
371 for (i = 0; i < umem->npgs; i++)
372 umem->pages[i].addr = page_address(umem->pgs[i]);
373
374 return 0;
375
376out_pin:
377 xdp_umem_unpin_pages(umem);
378out_account:
379 xdp_umem_unaccount_pages(umem);
380 return err;
381}
382
383struct xdp_umem *xdp_umem_create(struct xdp_umem_reg *mr)
384{
385 struct xdp_umem *umem;
386 int err;
387
388 umem = kzalloc(sizeof(*umem), GFP_KERNEL);
389 if (!umem)
390 return ERR_PTR(-ENOMEM);
391
392 err = ida_simple_get(&umem_ida, 0, 0, GFP_KERNEL);
393 if (err < 0) {
394 kfree(umem);
395 return ERR_PTR(err);
396 }
397 umem->id = err;
398
399 err = xdp_umem_reg(umem, mr);
400 if (err) {
401 ida_simple_remove(&umem_ida, umem->id);
402 kfree(umem);
403 return ERR_PTR(err);
404 }
405
406 return umem;
407}
408
409bool xdp_umem_validate_queues(struct xdp_umem *umem)
410{
411 return umem->fq && umem->cq;
412}
413