linux/drivers/net/ethernet/hisilicon/hns/hnae.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2014-2015 Hisilicon Limited.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 */
   9
  10#include <linux/dma-mapping.h>
  11#include <linux/interrupt.h>
  12#include <linux/of.h>
  13#include <linux/skbuff.h>
  14#include <linux/slab.h>
  15#include "hnae.h"
  16
  17#define cls_to_ae_dev(dev) container_of(dev, struct hnae_ae_dev, cls_dev)
  18
  19static struct class *hnae_class;
  20
  21static void
  22hnae_list_add(spinlock_t *lock, struct list_head *node, struct list_head *head)
  23{
  24        unsigned long flags;
  25
  26        spin_lock_irqsave(lock, flags);
  27        list_add_tail_rcu(node, head);
  28        spin_unlock_irqrestore(lock, flags);
  29}
  30
  31static void hnae_list_del(spinlock_t *lock, struct list_head *node)
  32{
  33        unsigned long flags;
  34
  35        spin_lock_irqsave(lock, flags);
  36        list_del_rcu(node);
  37        spin_unlock_irqrestore(lock, flags);
  38}
  39
  40static int hnae_alloc_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
  41{
  42        unsigned int order = hnae_page_order(ring);
  43        struct page *p = dev_alloc_pages(order);
  44
  45        if (!p)
  46                return -ENOMEM;
  47
  48        cb->priv = p;
  49        cb->page_offset = 0;
  50        cb->reuse_flag = 0;
  51        cb->buf  = page_address(p);
  52        cb->length = hnae_page_size(ring);
  53        cb->type = DESC_TYPE_PAGE;
  54
  55        return 0;
  56}
  57
  58static void hnae_free_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
  59{
  60        if (unlikely(!cb->priv))
  61                return;
  62
  63        if (cb->type == DESC_TYPE_SKB)
  64                dev_kfree_skb_any((struct sk_buff *)cb->priv);
  65        else if (unlikely(is_rx_ring(ring)))
  66                put_page((struct page *)cb->priv);
  67
  68        cb->priv = NULL;
  69}
  70
  71static int hnae_map_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
  72{
  73        cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
  74                               cb->length, ring_to_dma_dir(ring));
  75
  76        if (dma_mapping_error(ring_to_dev(ring), cb->dma))
  77                return -EIO;
  78
  79        return 0;
  80}
  81
  82static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
  83{
  84        if (cb->type == DESC_TYPE_SKB)
  85                dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
  86                                 ring_to_dma_dir(ring));
  87        else
  88                dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
  89                               ring_to_dma_dir(ring));
  90}
  91
  92static struct hnae_buf_ops hnae_bops = {
  93        .alloc_buffer = hnae_alloc_buffer,
  94        .free_buffer = hnae_free_buffer,
  95        .map_buffer = hnae_map_buffer,
  96        .unmap_buffer = hnae_unmap_buffer,
  97};
  98
  99static int __ae_match(struct device *dev, const void *data)
 100{
 101        struct hnae_ae_dev *hdev = cls_to_ae_dev(dev);
 102
 103        if (dev_of_node(hdev->dev))
 104                return (data == &hdev->dev->of_node->fwnode);
 105        else if (is_acpi_node(hdev->dev->fwnode))
 106                return (data == hdev->dev->fwnode);
 107
 108        dev_err(dev, "__ae_match cannot read cfg data from OF or acpi\n");
 109        return 0;
 110}
 111
 112static struct hnae_ae_dev *find_ae(const struct fwnode_handle *fwnode)
 113{
 114        struct device *dev;
 115
 116        WARN_ON(!fwnode);
 117
 118        dev = class_find_device(hnae_class, NULL, fwnode, __ae_match);
 119
 120        return dev ? cls_to_ae_dev(dev) : NULL;
 121}
 122
 123static void hnae_free_buffers(struct hnae_ring *ring)
 124{
 125        int i;
 126
 127        for (i = 0; i < ring->desc_num; i++)
 128                hnae_free_buffer_detach(ring, i);
 129}
 130
 131/* Allocate memory for raw pkg, and map with dma */
 132static int hnae_alloc_buffers(struct hnae_ring *ring)
 133{
 134        int i, j, ret;
 135
 136        for (i = 0; i < ring->desc_num; i++) {
 137                ret = hnae_alloc_buffer_attach(ring, i);
 138                if (ret)
 139                        goto out_buffer_fail;
 140        }
 141
 142        return 0;
 143
 144out_buffer_fail:
 145        for (j = i - 1; j >= 0; j--)
 146                hnae_free_buffer_detach(ring, j);
 147        return ret;
 148}
 149
 150/* free desc along with its attached buffer */
 151static void hnae_free_desc(struct hnae_ring *ring)
 152{
 153        hnae_free_buffers(ring);
 154        dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
 155                         ring->desc_num * sizeof(ring->desc[0]),
 156                         ring_to_dma_dir(ring));
 157        ring->desc_dma_addr = 0;
 158        kfree(ring->desc);
 159        ring->desc = NULL;
 160}
 161
 162/* alloc desc, without buffer attached */
 163static int hnae_alloc_desc(struct hnae_ring *ring)
 164{
 165        int size = ring->desc_num * sizeof(ring->desc[0]);
 166
 167        ring->desc = kzalloc(size, GFP_KERNEL);
 168        if (!ring->desc)
 169                return -ENOMEM;
 170
 171        ring->desc_dma_addr = dma_map_single(ring_to_dev(ring),
 172                ring->desc, size, ring_to_dma_dir(ring));
 173        if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
 174                ring->desc_dma_addr = 0;
 175                kfree(ring->desc);
 176                ring->desc = NULL;
 177                return -ENOMEM;
 178        }
 179
 180        return 0;
 181}
 182
 183/* fini ring, also free the buffer for the ring */
 184static void hnae_fini_ring(struct hnae_ring *ring)
 185{
 186        hnae_free_desc(ring);
 187        kfree(ring->desc_cb);
 188        ring->desc_cb = NULL;
 189        ring->next_to_clean = 0;
 190        ring->next_to_use = 0;
 191}
 192
 193/* init ring, and with buffer for rx ring */
 194static int
 195hnae_init_ring(struct hnae_queue *q, struct hnae_ring *ring, int flags)
 196{
 197        int ret;
 198
 199        if (ring->desc_num <= 0 || ring->buf_size <= 0)
 200                return -EINVAL;
 201
 202        ring->q = q;
 203        ring->flags = flags;
 204        spin_lock_init(&ring->lock);
 205        ring->coal_param = q->handle->coal_param;
 206        assert(!ring->desc && !ring->desc_cb && !ring->desc_dma_addr);
 207
 208        /* not matter for tx or rx ring, the ntc and ntc start from 0 */
 209        assert(ring->next_to_use == 0);
 210        assert(ring->next_to_clean == 0);
 211
 212        ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
 213                        GFP_KERNEL);
 214        if (!ring->desc_cb) {
 215                ret = -ENOMEM;
 216                goto out;
 217        }
 218
 219        ret = hnae_alloc_desc(ring);
 220        if (ret)
 221                goto out_with_desc_cb;
 222
 223        if (is_rx_ring(ring)) {
 224                ret = hnae_alloc_buffers(ring);
 225                if (ret)
 226                        goto out_with_desc;
 227        }
 228
 229        return 0;
 230
 231out_with_desc:
 232        hnae_free_desc(ring);
 233out_with_desc_cb:
 234        kfree(ring->desc_cb);
 235        ring->desc_cb = NULL;
 236out:
 237        return ret;
 238}
 239
 240static int hnae_init_queue(struct hnae_handle *h, struct hnae_queue *q,
 241                           struct hnae_ae_dev *dev)
 242{
 243        int ret;
 244
 245        q->dev = dev;
 246        q->handle = h;
 247
 248        ret = hnae_init_ring(q, &q->tx_ring, q->tx_ring.flags | RINGF_DIR);
 249        if (ret)
 250                goto out;
 251
 252        ret = hnae_init_ring(q, &q->rx_ring, q->rx_ring.flags & ~RINGF_DIR);
 253        if (ret)
 254                goto out_with_tx_ring;
 255
 256        if (dev->ops->init_queue)
 257                dev->ops->init_queue(q);
 258
 259        return 0;
 260
 261out_with_tx_ring:
 262        hnae_fini_ring(&q->tx_ring);
 263out:
 264        return ret;
 265}
 266
 267static void hnae_fini_queue(struct hnae_queue *q)
 268{
 269        if (q->dev->ops->fini_queue)
 270                q->dev->ops->fini_queue(q);
 271
 272        hnae_fini_ring(&q->tx_ring);
 273        hnae_fini_ring(&q->rx_ring);
 274}
 275
 276/**
 277 * ae_chain - define ae chain head
 278 */
 279static RAW_NOTIFIER_HEAD(ae_chain);
 280
 281int hnae_register_notifier(struct notifier_block *nb)
 282{
 283        return raw_notifier_chain_register(&ae_chain, nb);
 284}
 285EXPORT_SYMBOL(hnae_register_notifier);
 286
 287void hnae_unregister_notifier(struct notifier_block *nb)
 288{
 289        if (raw_notifier_chain_unregister(&ae_chain, nb))
 290                dev_err(NULL, "notifier chain unregister fail\n");
 291}
 292EXPORT_SYMBOL(hnae_unregister_notifier);
 293
 294int hnae_reinit_handle(struct hnae_handle *handle)
 295{
 296        int i, j;
 297        int ret;
 298
 299        for (i = 0; i < handle->q_num; i++) /* free ring*/
 300                hnae_fini_queue(handle->qs[i]);
 301
 302        if (handle->dev->ops->reset)
 303                handle->dev->ops->reset(handle);
 304
 305        for (i = 0; i < handle->q_num; i++) {/* reinit ring*/
 306                ret = hnae_init_queue(handle, handle->qs[i], handle->dev);
 307                if (ret)
 308                        goto out_when_init_queue;
 309        }
 310        return 0;
 311out_when_init_queue:
 312        for (j = i - 1; j >= 0; j--)
 313                hnae_fini_queue(handle->qs[j]);
 314        return ret;
 315}
 316EXPORT_SYMBOL(hnae_reinit_handle);
 317
 318/* hnae_get_handle - get a handle from the AE
 319 * @owner_dev: the dev use this handle
 320 * @ae_id: the id of the ae to be used
 321 * @ae_opts: the options set for the handle
 322 * @bops: the callbacks for buffer management
 323 *
 324 * return handle ptr or ERR_PTR
 325 */
 326struct hnae_handle *hnae_get_handle(struct device *owner_dev,
 327                                    const struct fwnode_handle  *fwnode,
 328                                    u32 port_id,
 329                                    struct hnae_buf_ops *bops)
 330{
 331        struct hnae_ae_dev *dev;
 332        struct hnae_handle *handle;
 333        int i, j;
 334        int ret;
 335
 336        dev = find_ae(fwnode);
 337        if (!dev)
 338                return ERR_PTR(-ENODEV);
 339
 340        handle = dev->ops->get_handle(dev, port_id);
 341        if (IS_ERR(handle)) {
 342                put_device(&dev->cls_dev);
 343                return handle;
 344        }
 345
 346        handle->dev = dev;
 347        handle->owner_dev = owner_dev;
 348        handle->bops = bops ? bops : &hnae_bops;
 349        handle->eport_id = port_id;
 350
 351        for (i = 0; i < handle->q_num; i++) {
 352                ret = hnae_init_queue(handle, handle->qs[i], dev);
 353                if (ret)
 354                        goto out_when_init_queue;
 355        }
 356
 357        __module_get(dev->owner);
 358
 359        hnae_list_add(&dev->lock, &handle->node, &dev->handle_list);
 360
 361        return handle;
 362
 363out_when_init_queue:
 364        for (j = i - 1; j >= 0; j--)
 365                hnae_fini_queue(handle->qs[j]);
 366
 367        put_device(&dev->cls_dev);
 368
 369        return ERR_PTR(-ENOMEM);
 370}
 371EXPORT_SYMBOL(hnae_get_handle);
 372
 373void hnae_put_handle(struct hnae_handle *h)
 374{
 375        struct hnae_ae_dev *dev = h->dev;
 376        int i;
 377
 378        for (i = 0; i < h->q_num; i++)
 379                hnae_fini_queue(h->qs[i]);
 380
 381        if (h->dev->ops->reset)
 382                h->dev->ops->reset(h);
 383
 384        hnae_list_del(&dev->lock, &h->node);
 385
 386        if (dev->ops->put_handle)
 387                dev->ops->put_handle(h);
 388
 389        module_put(dev->owner);
 390
 391        put_device(&dev->cls_dev);
 392}
 393EXPORT_SYMBOL(hnae_put_handle);
 394
 395static void hnae_release(struct device *dev)
 396{
 397}
 398
 399/**
 400 * hnae_ae_register - register a AE engine to hnae framework
 401 * @hdev: the hnae ae engine device
 402 * @owner:  the module who provides this dev
 403 * NOTE: the duplicated name will not be checked
 404 */
 405int hnae_ae_register(struct hnae_ae_dev *hdev, struct module *owner)
 406{
 407        static atomic_t id = ATOMIC_INIT(-1);
 408        int ret;
 409
 410        if (!hdev->dev)
 411                return -ENODEV;
 412
 413        if (!hdev->ops || !hdev->ops->get_handle ||
 414            !hdev->ops->toggle_ring_irq ||
 415            !hdev->ops->get_status || !hdev->ops->adjust_link)
 416                return -EINVAL;
 417
 418        hdev->owner = owner;
 419        hdev->id = (int)atomic_inc_return(&id);
 420        hdev->cls_dev.parent = hdev->dev;
 421        hdev->cls_dev.class = hnae_class;
 422        hdev->cls_dev.release = hnae_release;
 423        (void)dev_set_name(&hdev->cls_dev, "hnae%d", hdev->id);
 424        ret = device_register(&hdev->cls_dev);
 425        if (ret)
 426                return ret;
 427
 428        __module_get(THIS_MODULE);
 429
 430        INIT_LIST_HEAD(&hdev->handle_list);
 431        spin_lock_init(&hdev->lock);
 432
 433        ret = raw_notifier_call_chain(&ae_chain, HNAE_AE_REGISTER, NULL);
 434        if (ret)
 435                dev_dbg(hdev->dev,
 436                        "has not notifier for AE: %s\n", hdev->name);
 437
 438        return 0;
 439}
 440EXPORT_SYMBOL(hnae_ae_register);
 441
 442/**
 443 * hnae_ae_unregister - unregisters a HNAE AE engine
 444 * @cdev: the device to unregister
 445 */
 446void hnae_ae_unregister(struct hnae_ae_dev *hdev)
 447{
 448        device_unregister(&hdev->cls_dev);
 449        module_put(THIS_MODULE);
 450}
 451EXPORT_SYMBOL(hnae_ae_unregister);
 452
 453static int __init hnae_init(void)
 454{
 455        hnae_class = class_create(THIS_MODULE, "hnae");
 456        return PTR_ERR_OR_ZERO(hnae_class);
 457}
 458
 459static void __exit hnae_exit(void)
 460{
 461        class_destroy(hnae_class);
 462}
 463
 464subsys_initcall(hnae_init);
 465module_exit(hnae_exit);
 466
 467MODULE_AUTHOR("Hisilicon, Inc.");
 468MODULE_LICENSE("GPL");
 469MODULE_DESCRIPTION("Hisilicon Network Acceleration Engine Framework");
 470
 471/* vi: set tw=78 noet: */
 472