linux/drivers/net/ethernet/hisilicon/hns/hnae.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 * Copyright (c) 2014-2015 Hisilicon Limited.
   4 */
   5
   6#include <linux/dma-mapping.h>
   7#include <linux/interrupt.h>
   8#include <linux/of.h>
   9#include <linux/skbuff.h>
  10#include <linux/slab.h>
  11#include "hnae.h"
  12
  13#define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
  14
  15static struct class *hnae_class;
  16
  17static void
  18hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
  19{
  20        unsigned long flags;
  21
  22        spin_lock_irqsave(lock, flags);
  23        list_add_tail_rcu(node, head);
  24        spin_unlock_irqrestore(lock, flags);
  25}
  26
  27static void hnae_list_del(spinlock_t *lock, struct list_head *node)
  28{
  29        unsigned long flags;
  30
  31        spin_lock_irqsave(lock, flags);
  32        list_del_rcu(node);
  33        spin_unlock_irqrestore(lock, flags);
  34}
  35
  36static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
  37{
  38        unsigned int order = hnae_page_order(ring);
  39        struct page *p = dev_alloc_pages(order);
  40
  41        if (!p)
  42                return -ENOMEM;
  43
  44        cb->priv = p;
  45        cb->page_offset = 0;
  46        cb->reuse_flag = 0;
  47        cb->buf  = page_address(p);
  48        cb->length = hnae_page_size(ring);
  49        cb->type = DESC_TYPE_PAGE;
  50
  51        return 0;
  52}
  53
  54static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
  55{
  56        if (unlikely(!cb->priv))
  57                return;
  58
  59        if (cb->type == DESC_TYPE_SKB)
  60                dev_kfree_skb_any((struct sk_buff *)cb->priv);
  61        else if (unlikely(is_rx_ring(ring)))
  62                put_page((struct page *)cb->priv);
  63
  64        cb->priv = NULL;
  65}
  66
  67static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
  68{
  69        cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
  70                               cb->length, ring_to_dma_dir(ring));
  71
  72        if (dma_mapping_error(ring_to_dev(ring), cb->dma))
  73                return -EIO;
  74
  75        return 0;
  76}
  77
  78static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
  79{
  80        if (cb->type == DESC_TYPE_SKB)
  81                dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
  82                                 ring_to_dma_dir(ring));
  83        else if (cb->length)
  84                dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
  85                               ring_to_dma_dir(ring));
  86}
  87
  88static struct hnae_buf_ops hnae_bops = {
  89        .alloc_buffer = hnae_alloc_buffer,
  90        .free_buffer = hnae_free_buffer,
  91        .map_buffer = hnae_map_buffer,
  92        .unmap_buffer = hnae_unmap_buffer,
  93};
  94
  95static int __ae_match(struct device *dev, const void *data)
  96{
  97        struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
  98
  99        if (dev_of_node(hdev->dev))
 100                return (data == &hdev->dev->of_node->fwnode);
 101        else if (is_acpi_node(hdev->dev->fwnode))
 102                return (data == hdev->dev->fwnode);
 103
 104        dev_err(dev, "__ae_match cannot read cfg data from OF or acpi\n");
 105        return 0;
 106}
 107
 108static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode)
 109{
 110        struct device *dev;
 111
 112        WARN_ON(!fwnode);
 113
 114        dev = class_find_device(hnae_class, NULL, fwnode, __ae_match);
 115
 116        return dev ? cls_to_ae_dev(dev) : NULL;
 117}
 118
 119static void hnae_free_buffers(struct hnae_ring *ring)
 120{
 121        int i;
 122
 123        for (i = 0; i < ring->desc_num; i++)
 124                hnae_free_buffer_detach(ring, i);
 125}
 126
 127/* Allocate memory for raw pkg, and map with dma */
 128static int hnae_alloc_buffers(struct hnae_ring *ring)
 129{
 130        int i, j, ret;
 131
 132        for (i = 0; i < ring->desc_num; i++) {
 133                ret = hnae_alloc_buffer_attach(ring, i);
 134                if (ret)
 135                        goto out_buffer_fail;
 136        }
 137
 138        return 0;
 139
 140out_buffer_fail:
 141        for (j = i - 1; j >= 0; j--)
 142                hnae_free_buffer_detach(ring, j);
 143        return ret;
 144}
 145
 146/* free desc along with its attached buffer */
 147static void hnae_free_desc(struct hnae_ring *ring)
 148{
 149        dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
 150                         ring->desc_num * sizeof(ring->desc[0]),
 151                         ring_to_dma_dir(ring));
 152        ring->desc_dma_addr = 0;
 153        kfree(ring->desc);
 154        ring->desc = NULL;
 155}
 156
 157/* alloc desc, without buffer attached */
 158static int hnae_alloc_desc(struct hnae_ring *ring)
 159{
 160        int size = ring->desc_num * sizeof(ring->desc[0]);
 161
 162        ring->desc = kzalloc(size, GFP_KERNEL);
 163        if (!ring->desc)
 164                return -ENOMEM;
 165
 166        ring->desc_dma_addr = dma_map_single(ring_to_dev(ring),
 167                ring->desc, size, ring_to_dma_dir(ring));
 168        if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
 169                ring->desc_dma_addr = 0;
 170                kfree(ring->desc);
 171                ring->desc = NULL;
 172                return -ENOMEM;
 173        }
 174
 175        return 0;
 176}
 177
 178/* fini ring, also free the buffer for the ring */
 179static void hnae_fini_ring(struct hnae_ring *ring)
 180{
 181        if (is_rx_ring(ring))
 182                hnae_free_buffers(ring);
 183
 184        hnae_free_desc(ring);
 185        kfree(ring->desc_cb);
 186        ring->desc_cb = NULL;
 187        ring->next_to_clean = 0;
 188        ring->next_to_use = 0;
 189}
 190
 191/* init ring, and with buffer for rx ring */
 192static int
 193hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
 194{
 195        int ret;
 196
 197        if (ring->desc_num <= 0 || ring->buf_size <= 0)
 198                return -EINVAL;
 199
 200        ring->q = q;
 201        ring->flags = flags;
 202        spin_lock_init(&ring->lock);
 203        ring->coal_param = q->handle->coal_param;
 204        assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
 205
 206        /* not matter for tx or rx ring, the ntc and ntc start from 0 */
 207        assert(ring->next_to_use == 0);
 208        assert(ring->next_to_clean == 0);
 209
 210        ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
 211                        GFP_KERNEL);
 212        if (!ring->desc_cb) {
 213                ret = -ENOMEM;
 214                goto out;
 215        }
 216
 217        ret = hnae_alloc_desc(ring);
 218        if (ret)
 219                goto out_with_desc_cb;
 220
 221        if (is_rx_ring(ring)) {
 222                ret = hnae_alloc_buffers(ring);
 223                if (ret)
 224                        goto out_with_desc;
 225        }
 226
 227        return 0;
 228
 229out_with_desc:
 230        hnae_free_desc(ring);
 231out_with_desc_cb:
 232        kfree(ring->desc_cb);
 233        ring->desc_cb = NULL;
 234out:
 235        return ret;
 236}
 237
 238static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
 239                           struct hnae_ae_dev *dev)
 240{
 241        int ret;
 242
 243        q->dev = dev;
 244        q->handle = h;
 245
 246        ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR);
 247        if (ret)
 248                goto out;
 249
 250        ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR);
 251        if (ret)
 252                goto out_with_tx_ring;
 253
 254        if (dev->ops->init_queue)
 255                dev->ops->init_queue(q);
 256
 257        return 0;
 258
 259out_with_tx_ring:
 260        hnae_fini_ring(&q->tx_ring);
 261out:
 262        return ret;
 263}
 264
 265static void hnae_fini_queue(struct hnae_queue *q)
 266{
 267        if (q->dev->ops->fini_queue)
 268                q->dev->ops->fini_queue(q);
 269
 270        hnae_fini_ring(&q->tx_ring);
 271        hnae_fini_ring(&q->rx_ring);
 272}
 273
 274/**
 275 * ae_chain - define ae chain head
 276 */
 277static RAW_NOTIFIER_HEAD(ae_chain);
 278
 279int hnae_register_notifier(struct notifier_block *nb)
 280{
 281        return raw_notifier_chain_register(&ae_chain, nb);
 282}
 283EXPORT_SYMBOL(hnae_register_notifier);
 284
 285void hnae_unregister_notifier(struct notifier_block *nb)
 286{
 287        if (raw_notifier_chain_unregister(&ae_chain, nb))
 288                dev_err(NULL, "notifier chain unregister fail\n");
 289}
 290EXPORT_SYMBOL(hnae_unregister_notifier);
 291
 292int hnae_reinit_handle(struct hnae_handle *handle)
 293{
 294        int i, j;
 295        int ret;
 296
 297        for (i = 0; i < handle->q_num; i++) /* free ring*/
 298                hnae_fini_queue(handle->qs[i]);
 299
 300        if (handle->dev->ops->reset)
 301                handle->dev->ops->reset(handle);
 302
 303        for (i = 0; i < handle->q_num; i++) {/* reinit ring*/
 304                ret = hnae_init_queue(handle, handle->qs[i], handle->dev);
 305                if (ret)
 306                        goto out_when_init_queue;
 307        }
 308        return 0;
 309out_when_init_queue:
 310        for (j = i - 1; j >= 0; j--)
 311                hnae_fini_queue(handle->qs[j]);
 312        return ret;
 313}
 314EXPORT_SYMBOL(hnae_reinit_handle);
 315
 316/* hnae_get_handle - get a handle from the AE
 317 * @owner_dev: the dev use this handle
 318 * @ae_id: the id of the ae to be used
 319 * @ae_opts: the options set for the handle
 320 * @bops: the callbacks for buffer management
 321 *
 322 * return handle ptr or ERR_PTR
 323 */
 324struct hnae_handle *hnae_get_handle(struct device *owner_dev,
 325                                    const struct fwnode_handle  *fwnode,
 326                                    u32 port_id,
 327                                    struct hnae_buf_ops *bops)
 328{
 329        struct hnae_ae_dev *dev;
 330        struct hnae_handle *handle;
 331        int i, j;
 332        int ret;
 333
 334        dev = find_ae(fwnode);
 335        if (!dev)
 336                return ERR_PTR(-ENODEV);
 337
 338        handle = dev->ops->get_handle(dev, port_id);
 339        if (IS_ERR(handle)) {
 340                put_device(&dev->cls_dev);
 341                return handle;
 342        }
 343
 344        handle->dev = dev;
 345        handle->owner_dev = owner_dev;
 346        handle->bops = bops ? bops : &hnae_bops;
 347        handle->eport_id = port_id;
 348
 349        for (i = 0; i < handle->q_num; i++) {
 350                ret = hnae_init_queue(handle, handle->qs[i], dev);
 351                if (ret)
 352                        goto out_when_init_queue;
 353        }
 354
 355        __module_get(dev->owner);
 356
 357        hnae_list_add(&dev->lock, &handle->node, &dev->handle_list);
 358
 359        return handle;
 360
 361out_when_init_queue:
 362        for (j = i - 1; j >= 0; j--)
 363                hnae_fini_queue(handle->qs[j]);
 364
 365        put_device(&dev->cls_dev);
 366
 367        return ERR_PTR(-ENOMEM);
 368}
 369EXPORT_SYMBOL(hnae_get_handle);
 370
 371void hnae_put_handle(struct hnae_handle *h)
 372{
 373        struct hnae_ae_dev *dev = h->dev;
 374        int i;
 375
 376        for (i = 0; i < h->q_num; i++)
 377                hnae_fini_queue(h->qs[i]);
 378
 379        if (h->dev->ops->reset)
 380                h->dev->ops->reset(h);
 381
 382        hnae_list_del(&dev->lock, &h->node);
 383
 384        if (dev->ops->put_handle)
 385                dev->ops->put_handle(h);
 386
 387        module_put(dev->owner);
 388
 389        put_device(&dev->cls_dev);
 390}
 391EXPORT_SYMBOL(hnae_put_handle);
 392
 393static void hnae_release(struct device *dev)
 394{
 395}
 396
 397/**
 398 * hnae_ae_register - register a AE engine to hnae framework
 399 * @hdev: the hnae ae engine device
 400 * @owner:  the module who provides this dev
 401 * NOTE: the duplicated name will not be checked
 402 */
 403int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
 404{
 405        static atomic_t id = ATOMIC_INIT(-1);
 406        int ret;
 407
 408        if (!hdev->dev)
 409                return -ENODEV;
 410
 411        if (!hdev->ops || !hdev->ops->get_handle ||
 412            !hdev->ops->toggle_ring_irq ||
 413            !hdev->ops->get_status || !hdev->ops->adjust_link)
 414                return -EINVAL;
 415
 416        hdev->owner = owner;
 417        hdev->id = (int)atomic_inc_return(&id);
 418        hdev->cls_dev.parent = hdev->dev;
 419        hdev->cls_dev.class = hnae_class;
 420        hdev->cls_dev.release = hnae_release;
 421        (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
 422        ret = device_register(&hdev->cls_dev);
 423        if (ret)
 424                return ret;
 425
 426        __module_get(THIS_MODULE);
 427
 428        INIT_LIST_HEAD(&hdev->handle_list);
 429        spin_lock_init(&hdev->lock);
 430
 431        ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL);
 432        if (ret)
 433                dev_dbg(hdev->dev,
 434                        "has not notifier for AE: %s\n", hdev->name);
 435
 436        return 0;
 437}
 438EXPORT_SYMBOL(hnae_ae_register);
 439
 440/**
 441 * hnae_ae_unregister - unregisters a HNAE AE engine
 442 * @cdev: the device to unregister
 443 */
 444void hnae_ae_unregister(struct hnae_ae_dev *hdev)
 445{
 446        device_unregister(&hdev->cls_dev);
 447        module_put(THIS_MODULE);
 448}
 449EXPORT_SYMBOL(hnae_ae_unregister);
 450
 451static int __init hnae_init(void)
 452{
 453        hnae_class = class_create(THIS_MODULE, "hnae");
 454        return PTR_ERR_OR_ZERO(hnae_class);
 455}
 456
 457static void __exit hnae_exit(void)
 458{
 459        class_destroy(hnae_class);
 460}
 461
 462subsys_initcall(hnae_init);
 463module_exit(hnae_exit);
 464
 465MODULE_AUTHOR("Hisilicon, Inc.");
 466MODULE_LICENSE("GPL");
 467MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework");
 468
 469/* vi: set tw=78 noet: */
 470