linux/net/xdp/xsk_buff_pool.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include <net/xsk_buff_pool.h>
   4#include <net/xdp_sock.h>
   5#include <net/xdp_sock_drv.h>
   6
   7#include "xsk_queue.h"
   8#include "xdp_umem.h"
   9#include "xsk.h"
  10
  11void xp_add_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
  12{
  13        unsigned long flags;
  14
  15        if (!xs->tx)
  16                return;
  17
  18        spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
  19        list_add_rcu(&xs->tx_list, &pool->xsk_tx_list);
  20        spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
  21}
  22
  23void xp_del_xsk(struct xsk_buff_pool *pool, struct xdp_sock *xs)
  24{
  25        unsigned long flags;
  26
  27        if (!xs->tx)
  28                return;
  29
  30        spin_lock_irqsave(&pool->xsk_tx_list_lock, flags);
  31        list_del_rcu(&xs->tx_list);
  32        spin_unlock_irqrestore(&pool->xsk_tx_list_lock, flags);
  33}
  34
  35void xp_destroy(struct xsk_buff_pool *pool)
  36{
  37        if (!pool)
  38                return;
  39
  40        kvfree(pool->heads);
  41        kvfree(pool);
  42}
  43
  44struct xsk_buff_pool *xp_create_and_assign_umem(struct xdp_sock *xs,
  45                                                struct xdp_umem *umem)
  46{
  47        struct xsk_buff_pool *pool;
  48        struct xdp_buff_xsk *xskb;
  49        u32 i;
  50
  51        pool = kvzalloc(struct_size(pool, free_heads, umem->chunks),
  52                        GFP_KERNEL);
  53        if (!pool)
  54                goto out;
  55
  56        pool->heads = kvcalloc(umem->chunks, sizeof(*pool->heads), GFP_KERNEL);
  57        if (!pool->heads)
  58                goto out;
  59
  60        pool->chunk_mask = ~((u64)umem->chunk_size - 1);
  61        pool->addrs_cnt = umem->size;
  62        pool->heads_cnt = umem->chunks;
  63        pool->free_heads_cnt = umem->chunks;
  64        pool->headroom = umem->headroom;
  65        pool->chunk_size = umem->chunk_size;
  66        pool->unaligned = umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG;
  67        pool->frame_len = umem->chunk_size - umem->headroom -
  68                XDP_PACKET_HEADROOM;
  69        pool->umem = umem;
  70        pool->addrs = umem->addrs;
  71        INIT_LIST_HEAD(&pool->free_list);
  72        INIT_LIST_HEAD(&pool->xsk_tx_list);
  73        spin_lock_init(&pool->xsk_tx_list_lock);
  74        spin_lock_init(&pool->cq_lock);
  75        refcount_set(&pool->users, 1);
  76
  77        pool->fq = xs->fq_tmp;
  78        pool->cq = xs->cq_tmp;
  79
  80        for (i = 0; i < pool->free_heads_cnt; i++) {
  81                xskb = &pool->heads[i];
  82                xskb->pool = pool;
  83                xskb->xdp.frame_sz = umem->chunk_size - umem->headroom;
  84                pool->free_heads[i] = xskb;
  85        }
  86
  87        return pool;
  88
  89out:
  90        xp_destroy(pool);
  91        return NULL;
  92}
  93
  94void xp_set_rxq_info(struct xsk_buff_pool *pool, struct xdp_rxq_info *rxq)
  95{
  96        u32 i;
  97
  98        for (i = 0; i < pool->heads_cnt; i++)
  99                pool->heads[i].xdp.rxq = rxq;
 100}
 101EXPORT_SYMBOL(xp_set_rxq_info);
 102
 103static void xp_disable_drv_zc(struct xsk_buff_pool *pool)
 104{
 105        struct netdev_bpf bpf;
 106        int err;
 107
 108        ASSERT_RTNL();
 109
 110        if (pool->umem->zc) {
 111                bpf.command = XDP_SETUP_XSK_POOL;
 112                bpf.xsk.pool = NULL;
 113                bpf.xsk.queue_id = pool->queue_id;
 114
 115                err = pool->netdev->netdev_ops->ndo_bpf(pool->netdev, &bpf);
 116
 117                if (err)
 118                        WARN(1, "Failed to disable zero-copy!\n");
 119        }
 120}
 121
 122int xp_assign_dev(struct xsk_buff_pool *pool,
 123                  struct net_device *netdev, u16 queue_id, u16 flags)
 124{
 125        bool force_zc, force_copy;
 126        struct netdev_bpf bpf;
 127        int err = 0;
 128
 129        ASSERT_RTNL();
 130
 131        force_zc = flags & XDP_ZEROCOPY;
 132        force_copy = flags & XDP_COPY;
 133
 134        if (force_zc && force_copy)
 135                return -EINVAL;
 136
 137        if (xsk_get_pool_from_qid(netdev, queue_id))
 138                return -EBUSY;
 139
 140        pool->netdev = netdev;
 141        pool->queue_id = queue_id;
 142        err = xsk_reg_pool_at_qid(netdev, pool, queue_id);
 143        if (err)
 144                return err;
 145
 146        if (flags & XDP_USE_NEED_WAKEUP)
 147                pool->uses_need_wakeup = true;
 148        /* Tx needs to be explicitly woken up the first time.  Also
 149         * for supporting drivers that do not implement this
 150         * feature. They will always have to call sendto() or poll().
 151         */
 152        pool->cached_need_wakeup = XDP_WAKEUP_TX;
 153
 154        dev_hold(netdev);
 155
 156        if (force_copy)
 157                /* For copy-mode, we are done. */
 158                return 0;
 159
 160        if (!netdev->netdev_ops->ndo_bpf ||
 161            !netdev->netdev_ops->ndo_xsk_wakeup) {
 162                err = -EOPNOTSUPP;
 163                goto err_unreg_pool;
 164        }
 165
 166        bpf.command = XDP_SETUP_XSK_POOL;
 167        bpf.xsk.pool = pool;
 168        bpf.xsk.queue_id = queue_id;
 169
 170        err = netdev->netdev_ops->ndo_bpf(netdev, &bpf);
 171        if (err)
 172                goto err_unreg_pool;
 173
 174        if (!pool->dma_pages) {
 175                WARN(1, "Driver did not DMA map zero-copy buffers");
 176                err = -EINVAL;
 177                goto err_unreg_xsk;
 178        }
 179        pool->umem->zc = true;
 180        return 0;
 181
 182err_unreg_xsk:
 183        xp_disable_drv_zc(pool);
 184err_unreg_pool:
 185        if (!force_zc)
 186                err = 0; /* fallback to copy mode */
 187        if (err) {
 188                xsk_clear_pool_at_qid(netdev, queue_id);
 189                dev_put(netdev);
 190        }
 191        return err;
 192}
 193
 194int xp_assign_dev_shared(struct xsk_buff_pool *pool, struct xdp_umem *umem,
 195                         struct net_device *dev, u16 queue_id)
 196{
 197        u16 flags;
 198
 199        /* One fill and completion ring required for each queue id. */
 200        if (!pool->fq || !pool->cq)
 201                return -EINVAL;
 202
 203        flags = umem->zc ? XDP_ZEROCOPY : XDP_COPY;
 204        if (pool->uses_need_wakeup)
 205                flags |= XDP_USE_NEED_WAKEUP;
 206
 207        return xp_assign_dev(pool, dev, queue_id, flags);
 208}
 209
 210void xp_clear_dev(struct xsk_buff_pool *pool)
 211{
 212        if (!pool->netdev)
 213                return;
 214
 215        xp_disable_drv_zc(pool);
 216        xsk_clear_pool_at_qid(pool->netdev, pool->queue_id);
 217        dev_put(pool->netdev);
 218        pool->netdev = NULL;
 219}
 220
 221static void xp_release_deferred(struct work_struct *work)
 222{
 223        struct xsk_buff_pool *pool = container_of(work, struct xsk_buff_pool,
 224                                                  work);
 225
 226        rtnl_lock();
 227        xp_clear_dev(pool);
 228        rtnl_unlock();
 229
 230        if (pool->fq) {
 231                xskq_destroy(pool->fq);
 232                pool->fq = NULL;
 233        }
 234
 235        if (pool->cq) {
 236                xskq_destroy(pool->cq);
 237                pool->cq = NULL;
 238        }
 239
 240        xdp_put_umem(pool->umem, false);
 241        xp_destroy(pool);
 242}
 243
 244void xp_get_pool(struct xsk_buff_pool *pool)
 245{
 246        refcount_inc(&pool->users);
 247}
 248
 249bool xp_put_pool(struct xsk_buff_pool *pool)
 250{
 251        if (!pool)
 252                return false;
 253
 254        if (refcount_dec_and_test(&pool->users)) {
 255                INIT_WORK(&pool->work, xp_release_deferred);
 256                schedule_work(&pool->work);
 257                return true;
 258        }
 259
 260        return false;
 261}
 262
 263static struct xsk_dma_map *xp_find_dma_map(struct xsk_buff_pool *pool)
 264{
 265        struct xsk_dma_map *dma_map;
 266
 267        list_for_each_entry(dma_map, &pool->umem->xsk_dma_list, list) {
 268                if (dma_map->netdev == pool->netdev)
 269                        return dma_map;
 270        }
 271
 272        return NULL;
 273}
 274
 275static struct xsk_dma_map *xp_create_dma_map(struct device *dev, struct net_device *netdev,
 276                                             u32 nr_pages, struct xdp_umem *umem)
 277{
 278        struct xsk_dma_map *dma_map;
 279
 280        dma_map = kzalloc(sizeof(*dma_map), GFP_KERNEL);
 281        if (!dma_map)
 282                return NULL;
 283
 284        dma_map->dma_pages = kvcalloc(nr_pages, sizeof(*dma_map->dma_pages), GFP_KERNEL);
 285        if (!dma_map->dma_pages) {
 286                kfree(dma_map);
 287                return NULL;
 288        }
 289
 290        dma_map->netdev = netdev;
 291        dma_map->dev = dev;
 292        dma_map->dma_need_sync = false;
 293        dma_map->dma_pages_cnt = nr_pages;
 294        refcount_set(&dma_map->users, 1);
 295        list_add(&dma_map->list, &umem->xsk_dma_list);
 296        return dma_map;
 297}
 298
 299static void xp_destroy_dma_map(struct xsk_dma_map *dma_map)
 300{
 301        list_del(&dma_map->list);
 302        kvfree(dma_map->dma_pages);
 303        kfree(dma_map);
 304}
 305
 306static void __xp_dma_unmap(struct xsk_dma_map *dma_map, unsigned long attrs)
 307{
 308        dma_addr_t *dma;
 309        u32 i;
 310
 311        for (i = 0; i < dma_map->dma_pages_cnt; i++) {
 312                dma = &dma_map->dma_pages[i];
 313                if (*dma) {
 314                        dma_unmap_page_attrs(dma_map->dev, *dma, PAGE_SIZE,
 315                                             DMA_BIDIRECTIONAL, attrs);
 316                        *dma = 0;
 317                }
 318        }
 319
 320        xp_destroy_dma_map(dma_map);
 321}
 322
 323void xp_dma_unmap(struct xsk_buff_pool *pool, unsigned long attrs)
 324{
 325        struct xsk_dma_map *dma_map;
 326
 327        if (pool->dma_pages_cnt == 0)
 328                return;
 329
 330        dma_map = xp_find_dma_map(pool);
 331        if (!dma_map) {
 332                WARN(1, "Could not find dma_map for device");
 333                return;
 334        }
 335
 336        if (!refcount_dec_and_test(&dma_map->users))
 337                return;
 338
 339        __xp_dma_unmap(dma_map, attrs);
 340        kvfree(pool->dma_pages);
 341        pool->dma_pages_cnt = 0;
 342        pool->dev = NULL;
 343}
 344EXPORT_SYMBOL(xp_dma_unmap);
 345
 346static void xp_check_dma_contiguity(struct xsk_dma_map *dma_map)
 347{
 348        u32 i;
 349
 350        for (i = 0; i < dma_map->dma_pages_cnt - 1; i++) {
 351                if (dma_map->dma_pages[i] + PAGE_SIZE == dma_map->dma_pages[i + 1])
 352                        dma_map->dma_pages[i] |= XSK_NEXT_PG_CONTIG_MASK;
 353                else
 354                        dma_map->dma_pages[i] &= ~XSK_NEXT_PG_CONTIG_MASK;
 355        }
 356}
 357
 358static int xp_init_dma_info(struct xsk_buff_pool *pool, struct xsk_dma_map *dma_map)
 359{
 360        pool->dma_pages = kvcalloc(dma_map->dma_pages_cnt, sizeof(*pool->dma_pages), GFP_KERNEL);
 361        if (!pool->dma_pages)
 362                return -ENOMEM;
 363
 364        pool->dev = dma_map->dev;
 365        pool->dma_pages_cnt = dma_map->dma_pages_cnt;
 366        pool->dma_need_sync = dma_map->dma_need_sync;
 367        memcpy(pool->dma_pages, dma_map->dma_pages,
 368               pool->dma_pages_cnt * sizeof(*pool->dma_pages));
 369
 370        return 0;
 371}
 372
 373int xp_dma_map(struct xsk_buff_pool *pool, struct device *dev,
 374               unsigned long attrs, struct page **pages, u32 nr_pages)
 375{
 376        struct xsk_dma_map *dma_map;
 377        dma_addr_t dma;
 378        int err;
 379        u32 i;
 380
 381        dma_map = xp_find_dma_map(pool);
 382        if (dma_map) {
 383                err = xp_init_dma_info(pool, dma_map);
 384                if (err)
 385                        return err;
 386
 387                refcount_inc(&dma_map->users);
 388                return 0;
 389        }
 390
 391        dma_map = xp_create_dma_map(dev, pool->netdev, nr_pages, pool->umem);
 392        if (!dma_map)
 393                return -ENOMEM;
 394
 395        for (i = 0; i < dma_map->dma_pages_cnt; i++) {
 396                dma = dma_map_page_attrs(dev, pages[i], 0, PAGE_SIZE,
 397                                         DMA_BIDIRECTIONAL, attrs);
 398                if (dma_mapping_error(dev, dma)) {
 399                        __xp_dma_unmap(dma_map, attrs);
 400                        return -ENOMEM;
 401                }
 402                if (dma_need_sync(dev, dma))
 403                        dma_map->dma_need_sync = true;
 404                dma_map->dma_pages[i] = dma;
 405        }
 406
 407        if (pool->unaligned)
 408                xp_check_dma_contiguity(dma_map);
 409
 410        err = xp_init_dma_info(pool, dma_map);
 411        if (err) {
 412                __xp_dma_unmap(dma_map, attrs);
 413                return err;
 414        }
 415
 416        return 0;
 417}
 418EXPORT_SYMBOL(xp_dma_map);
 419
 420static bool xp_addr_crosses_non_contig_pg(struct xsk_buff_pool *pool,
 421                                          u64 addr)
 422{
 423        return xp_desc_crosses_non_contig_pg(pool, addr, pool->chunk_size);
 424}
 425
 426static bool xp_check_unaligned(struct xsk_buff_pool *pool, u64 *addr)
 427{
 428        *addr = xp_unaligned_extract_addr(*addr);
 429        if (*addr >= pool->addrs_cnt ||
 430            *addr + pool->chunk_size > pool->addrs_cnt ||
 431            xp_addr_crosses_non_contig_pg(pool, *addr))
 432                return false;
 433        return true;
 434}
 435
 436static bool xp_check_aligned(struct xsk_buff_pool *pool, u64 *addr)
 437{
 438        *addr = xp_aligned_extract_addr(pool, *addr);
 439        return *addr < pool->addrs_cnt;
 440}
 441
 442static struct xdp_buff_xsk *__xp_alloc(struct xsk_buff_pool *pool)
 443{
 444        struct xdp_buff_xsk *xskb;
 445        u64 addr;
 446        bool ok;
 447
 448        if (pool->free_heads_cnt == 0)
 449                return NULL;
 450
 451        xskb = pool->free_heads[--pool->free_heads_cnt];
 452
 453        for (;;) {
 454                if (!xskq_cons_peek_addr_unchecked(pool->fq, &addr)) {
 455                        pool->fq->queue_empty_descs++;
 456                        xp_release(xskb);
 457                        return NULL;
 458                }
 459
 460                ok = pool->unaligned ? xp_check_unaligned(pool, &addr) :
 461                     xp_check_aligned(pool, &addr);
 462                if (!ok) {
 463                        pool->fq->invalid_descs++;
 464                        xskq_cons_release(pool->fq);
 465                        continue;
 466                }
 467                break;
 468        }
 469        xskq_cons_release(pool->fq);
 470
 471        xskb->orig_addr = addr;
 472        xskb->xdp.data_hard_start = pool->addrs + addr + pool->headroom;
 473        if (pool->dma_pages_cnt) {
 474                xskb->frame_dma = (pool->dma_pages[addr >> PAGE_SHIFT] &
 475                                   ~XSK_NEXT_PG_CONTIG_MASK) +
 476                                  (addr & ~PAGE_MASK);
 477                xskb->dma = xskb->frame_dma + pool->headroom +
 478                            XDP_PACKET_HEADROOM;
 479        }
 480        return xskb;
 481}
 482
 483struct xdp_buff *xp_alloc(struct xsk_buff_pool *pool)
 484{
 485        struct xdp_buff_xsk *xskb;
 486
 487        if (!pool->free_list_cnt) {
 488                xskb = __xp_alloc(pool);
 489                if (!xskb)
 490                        return NULL;
 491        } else {
 492                pool->free_list_cnt--;
 493                xskb = list_first_entry(&pool->free_list, struct xdp_buff_xsk,
 494                                        free_list_node);
 495                list_del(&xskb->free_list_node);
 496        }
 497
 498        xskb->xdp.data = xskb->xdp.data_hard_start + XDP_PACKET_HEADROOM;
 499        xskb->xdp.data_meta = xskb->xdp.data;
 500
 501        if (pool->dma_need_sync) {
 502                dma_sync_single_range_for_device(pool->dev, xskb->dma, 0,
 503                                                 pool->frame_len,
 504                                                 DMA_BIDIRECTIONAL);
 505        }
 506        return &xskb->xdp;
 507}
 508EXPORT_SYMBOL(xp_alloc);
 509
 510bool xp_can_alloc(struct xsk_buff_pool *pool, u32 count)
 511{
 512        if (pool->free_list_cnt >= count)
 513                return true;
 514        return xskq_cons_has_entries(pool->fq, count - pool->free_list_cnt);
 515}
 516EXPORT_SYMBOL(xp_can_alloc);
 517
 518void xp_free(struct xdp_buff_xsk *xskb)
 519{
 520        xskb->pool->free_list_cnt++;
 521        list_add(&xskb->free_list_node, &xskb->pool->free_list);
 522}
 523EXPORT_SYMBOL(xp_free);
 524
 525void *xp_raw_get_data(struct xsk_buff_pool *pool, u64 addr)
 526{
 527        addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
 528        return pool->addrs + addr;
 529}
 530EXPORT_SYMBOL(xp_raw_get_data);
 531
 532dma_addr_t xp_raw_get_dma(struct xsk_buff_pool *pool, u64 addr)
 533{
 534        addr = pool->unaligned ? xp_unaligned_add_offset_to_addr(addr) : addr;
 535        return (pool->dma_pages[addr >> PAGE_SHIFT] &
 536                ~XSK_NEXT_PG_CONTIG_MASK) +
 537                (addr & ~PAGE_MASK);
 538}
 539EXPORT_SYMBOL(xp_raw_get_dma);
 540
 541void xp_dma_sync_for_cpu_slow(struct xdp_buff_xsk *xskb)
 542{
 543        dma_sync_single_range_for_cpu(xskb->pool->dev, xskb->dma, 0,
 544                                      xskb->pool->frame_len, DMA_BIDIRECTIONAL);
 545}
 546EXPORT_SYMBOL(xp_dma_sync_for_cpu_slow);
 547
 548void xp_dma_sync_for_device_slow(struct xsk_buff_pool *pool, dma_addr_t dma,
 549                                 size_t size)
 550{
 551        dma_sync_single_range_for_device(pool->dev, dma, 0,
 552                                         size, DMA_BIDIRECTIONAL);
 553}
 554EXPORT_SYMBOL(xp_dma_sync_for_device_slow);
 555