linux/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015-2017 Netronome Systems, Inc.
   3 *
   4 * This software is dual licensed under the GNU General License Version 2,
   5 * June 1991 as shown in the file COPYING in the top-level directory of this
   6 * source tree or the BSD 2-Clause License provided below.  You have the
   7 * option to license this software under the complete terms of either license.
   8 *
   9 * The BSD 2-Clause License:
  10 *
  11 *     Redistribution and use in source and binary forms, with or
  12 *     without modification, are permitted provided that the following
  13 *     conditions are met:
  14 *
  15 *      1. Redistributions of source code must retain the above
  16 *         copyright notice, this list of conditions and the following
  17 *         disclaimer.
  18 *
  19 *      2. Redistributions in binary form must reproduce the above
  20 *         copyright notice, this list of conditions and the following
  21 *         disclaimer in the documentation and/or other materials
  22 *         provided with the distribution.
  23 *
  24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
  27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
  28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
  29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
  30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
  31 * SOFTWARE.
  32 */
  33
  34/*
  35 * nfp_cppcore.c
  36 * Provides low-level access to the NFP's internal CPP bus
  37 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
  38 *          Jason McMullan <jason.mcmullan@netronome.com>
  39 *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
  40 */
  41
  42#include <asm/unaligned.h>
  43#include <linux/delay.h>
  44#include <linux/device.h>
  45#include <linux/ioport.h>
  46#include <linux/kernel.h>
  47#include <linux/module.h>
  48#include <linux/mutex.h>
  49#include <linux/sched.h>
  50#include <linux/slab.h>
  51#include <linux/wait.h>
  52
  53#include "nfp_arm.h"
  54#include "nfp_cpp.h"
  55#include "nfp6000/nfp6000.h"
  56
  57#define NFP_ARM_GCSR_SOFTMODEL2                              0x0000014c
  58#define NFP_ARM_GCSR_SOFTMODEL3                              0x00000150
  59
  60struct nfp_cpp_resource {
  61        struct list_head list;
  62        const char *name;
  63        u32 cpp_id;
  64        u64 start;
  65        u64 end;
  66};
  67
  68/**
  69 * struct nfp_cpp - main nfpcore device structure
  70 * Following fields are read-only after probe() exits or netdevs are spawned.
  71 * @dev:                embedded device structure
  72 * @op:                 low-level implementation ops
  73 * @priv:               private data of the low-level implementation
  74 * @model:              chip model
  75 * @interface:          chip interface id we are using to reach it
  76 * @serial:             chip serial number
  77 * @imb_cat_table:      CPP Mapping Table
  78 *
  79 * Following fields use explicit locking:
  80 * @resource_list:      NFP CPP resource list
  81 * @resource_lock:      protects @resource_list
  82 *
  83 * @area_cache_list:    cached areas for cpp/xpb read/write speed up
  84 * @area_cache_mutex:   protects @area_cache_list
  85 *
  86 * @waitq:              area wait queue
  87 */
  88struct nfp_cpp {
  89        struct device dev;
  90
  91        void *priv;
  92
  93        u32 model;
  94        u16 interface;
  95        u8 serial[NFP_SERIAL_LEN];
  96
  97        const struct nfp_cpp_operations *op;
  98        struct list_head resource_list;
  99        rwlock_t resource_lock;
 100        wait_queue_head_t waitq;
 101
 102        u32 imb_cat_table[16];
 103
 104        struct mutex area_cache_mutex;
 105        struct list_head area_cache_list;
 106};
 107
 108/* Element of the area_cache_list */
 109struct nfp_cpp_area_cache {
 110        struct list_head entry;
 111        u32 id;
 112        u64 addr;
 113        u32 size;
 114        struct nfp_cpp_area *area;
 115};
 116
 117struct nfp_cpp_area {
 118        struct nfp_cpp *cpp;
 119        struct kref kref;
 120        atomic_t refcount;
 121        struct mutex mutex;     /* Lock for the area's refcount */
 122        unsigned long long offset;
 123        unsigned long size;
 124        struct nfp_cpp_resource resource;
 125        void __iomem *iomem;
 126        /* Here follows the 'priv' part of nfp_cpp_area. */
 127};
 128
 129struct nfp_cpp_explicit {
 130        struct nfp_cpp *cpp;
 131        struct nfp_cpp_explicit_command cmd;
 132        /* Here follows the 'priv' part of nfp_cpp_area. */
 133};
 134
 135static void __resource_add(struct list_head *head, struct nfp_cpp_resource *res)
 136{
 137        struct nfp_cpp_resource *tmp;
 138        struct list_head *pos;
 139
 140        list_for_each(pos, head) {
 141                tmp = container_of(pos, struct nfp_cpp_resource, list);
 142
 143                if (tmp->cpp_id > res->cpp_id)
 144                        break;
 145
 146                if (tmp->cpp_id == res->cpp_id && tmp->start > res->start)
 147                        break;
 148        }
 149
 150        list_add_tail(&res->list, pos);
 151}
 152
 153static void __resource_del(struct nfp_cpp_resource *res)
 154{
 155        list_del_init(&res->list);
 156}
 157
 158static void __release_cpp_area(struct kref *kref)
 159{
 160        struct nfp_cpp_area *area =
 161                container_of(kref, struct nfp_cpp_area, kref);
 162        struct nfp_cpp *cpp = nfp_cpp_area_cpp(area);
 163
 164        if (area->cpp->op->area_cleanup)
 165                area->cpp->op->area_cleanup(area);
 166
 167        write_lock(&cpp->resource_lock);
 168        __resource_del(&area->resource);
 169        write_unlock(&cpp->resource_lock);
 170        kfree(area);
 171}
 172
 173static void nfp_cpp_area_put(struct nfp_cpp_area *area)
 174{
 175        kref_put(&area->kref, __release_cpp_area);
 176}
 177
 178static struct nfp_cpp_area *nfp_cpp_area_get(struct nfp_cpp_area *area)
 179{
 180        kref_get(&area->kref);
 181
 182        return area;
 183}
 184
 185/**
 186 * nfp_cpp_free() - free the CPP handle
 187 * @cpp:        CPP handle
 188 */
 189void nfp_cpp_free(struct nfp_cpp *cpp)
 190{
 191        struct nfp_cpp_area_cache *cache, *ctmp;
 192        struct nfp_cpp_resource *res, *rtmp;
 193
 194        /* Remove all caches */
 195        list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) {
 196                list_del(&cache->entry);
 197                if (cache->id)
 198                        nfp_cpp_area_release(cache->area);
 199                nfp_cpp_area_free(cache->area);
 200                kfree(cache);
 201        }
 202
 203        /* There should be no dangling areas at this point */
 204        WARN_ON(!list_empty(&cpp->resource_list));
 205
 206        /* .. but if they weren't, try to clean up. */
 207        list_for_each_entry_safe(res, rtmp, &cpp->resource_list, list) {
 208                struct nfp_cpp_area *area = container_of(res,
 209                                                         struct nfp_cpp_area,
 210                                                         resource);
 211
 212                dev_err(cpp->dev.parent, "Dangling area: %d:%d:%d:0x%0llx-0x%0llx%s%s\n",
 213                        NFP_CPP_ID_TARGET_of(res->cpp_id),
 214                        NFP_CPP_ID_ACTION_of(res->cpp_id),
 215                        NFP_CPP_ID_TOKEN_of(res->cpp_id),
 216                        res->start, res->end,
 217                        res->name ? " " : "",
 218                        res->name ? res->name : "");
 219
 220                if (area->cpp->op->area_release)
 221                        area->cpp->op->area_release(area);
 222
 223                __release_cpp_area(&area->kref);
 224        }
 225
 226        if (cpp->op->free)
 227                cpp->op->free(cpp);
 228
 229        device_unregister(&cpp->dev);
 230
 231        kfree(cpp);
 232}
 233
 234/**
 235 * nfp_cpp_model() - Retrieve the Model ID of the NFP
 236 * @cpp:        NFP CPP handle
 237 *
 238 * Return: NFP CPP Model ID
 239 */
 240u32 nfp_cpp_model(struct nfp_cpp *cpp)
 241{
 242        return cpp->model;
 243}
 244
 245/**
 246 * nfp_cpp_interface() - Retrieve the Interface ID of the NFP
 247 * @cpp:        NFP CPP handle
 248 *
 249 * Return: NFP CPP Interface ID
 250 */
 251u16 nfp_cpp_interface(struct nfp_cpp *cpp)
 252{
 253        return cpp->interface;
 254}
 255
 256/**
 257 * nfp_cpp_serial() - Retrieve the Serial ID of the NFP
 258 * @cpp:        NFP CPP handle
 259 * @serial:     Pointer to NFP serial number
 260 *
 261 * Return:  Length of NFP serial number
 262 */
 263int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial)
 264{
 265        *serial = &cpp->serial[0];
 266        return sizeof(cpp->serial);
 267}
 268
 269/**
 270 * nfp_cpp_area_alloc_with_name() - allocate a new CPP area
 271 * @cpp:        CPP device handle
 272 * @dest:       NFP CPP ID
 273 * @name:       Name of region
 274 * @address:    Address of region
 275 * @size:       Size of region
 276 *
 277 * Allocate and initialize a CPP area structure.  The area must later
 278 * be locked down with an 'acquire' before it can be safely accessed.
 279 *
 280 * NOTE: @address and @size must be 32-bit aligned values.
 281 *
 282 * Return: NFP CPP area handle, or NULL
 283 */
 284struct nfp_cpp_area *
 285nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, u32 dest, const char *name,
 286                             unsigned long long address, unsigned long size)
 287{
 288        struct nfp_cpp_area *area;
 289        u64 tmp64 = address;
 290        int err, name_len;
 291
 292        /* Remap from cpp_island to cpp_target */
 293        err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table);
 294        if (err < 0)
 295                return NULL;
 296
 297        address = tmp64;
 298
 299        if (!name)
 300                name = "(reserved)";
 301
 302        name_len = strlen(name) + 1;
 303        area = kzalloc(sizeof(*area) + cpp->op->area_priv_size + name_len,
 304                       GFP_KERNEL);
 305        if (!area)
 306                return NULL;
 307
 308        area->cpp = cpp;
 309        area->resource.name = (void *)area + sizeof(*area) +
 310                cpp->op->area_priv_size;
 311        memcpy((char *)area->resource.name, name, name_len);
 312
 313        area->resource.cpp_id = dest;
 314        area->resource.start = address;
 315        area->resource.end = area->resource.start + size - 1;
 316        INIT_LIST_HEAD(&area->resource.list);
 317
 318        atomic_set(&area->refcount, 0);
 319        kref_init(&area->kref);
 320        mutex_init(&area->mutex);
 321
 322        if (cpp->op->area_init) {
 323                int err;
 324
 325                err = cpp->op->area_init(area, dest, address, size);
 326                if (err < 0) {
 327                        kfree(area);
 328                        return NULL;
 329                }
 330        }
 331
 332        write_lock(&cpp->resource_lock);
 333        __resource_add(&cpp->resource_list, &area->resource);
 334        write_unlock(&cpp->resource_lock);
 335
 336        area->offset = address;
 337        area->size = size;
 338
 339        return area;
 340}
 341
 342/**
 343 * nfp_cpp_area_alloc() - allocate a new CPP area
 344 * @cpp:        CPP handle
 345 * @dest:       CPP id
 346 * @address:    Start address on CPP target
 347 * @size:       Size of area in bytes
 348 *
 349 * Allocate and initialize a CPP area structure.  The area must later
 350 * be locked down with an 'acquire' before it can be safely accessed.
 351 *
 352 * NOTE: @address and @size must be 32-bit aligned values.
 353 *
 354 * Return: NFP CPP Area handle, or NULL
 355 */
 356struct nfp_cpp_area *
 357nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
 358                   unsigned long long address, unsigned long size)
 359{
 360        return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size);
 361}
 362
 363/**
 364 * nfp_cpp_area_alloc_acquire() - allocate a new CPP area and lock it down
 365 * @cpp:        CPP handle
 366 * @name:       Name of region
 367 * @dest:       CPP id
 368 * @address:    Start address on CPP target
 369 * @size:       Size of area
 370 *
 371 * Allocate and initialize a CPP area structure, and lock it down so
 372 * that it can be accessed directly.
 373 *
 374 * NOTE: @address and @size must be 32-bit aligned values.
 375 * The area must also be 'released' when the structure is freed.
 376 *
 377 * Return: NFP CPP Area handle, or NULL
 378 */
 379struct nfp_cpp_area *
 380nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, const char *name, u32 dest,
 381                           unsigned long long address, unsigned long size)
 382{
 383        struct nfp_cpp_area *area;
 384
 385        area = nfp_cpp_area_alloc_with_name(cpp, dest, name, address, size);
 386        if (!area)
 387                return NULL;
 388
 389        if (nfp_cpp_area_acquire(area)) {
 390                nfp_cpp_area_free(area);
 391                return NULL;
 392        }
 393
 394        return area;
 395}
 396
 397/**
 398 * nfp_cpp_area_free() - free up the CPP area
 399 * @area:       CPP area handle
 400 *
 401 * Frees up memory resources held by the CPP area.
 402 */
 403void nfp_cpp_area_free(struct nfp_cpp_area *area)
 404{
 405        if (atomic_read(&area->refcount))
 406                nfp_warn(area->cpp, "Warning: freeing busy area\n");
 407        nfp_cpp_area_put(area);
 408}
 409
 410static bool nfp_cpp_area_acquire_try(struct nfp_cpp_area *area, int *status)
 411{
 412        *status = area->cpp->op->area_acquire(area);
 413
 414        return *status != -EAGAIN;
 415}
 416
 417static int __nfp_cpp_area_acquire(struct nfp_cpp_area *area)
 418{
 419        int err, status;
 420
 421        if (atomic_inc_return(&area->refcount) > 1)
 422                return 0;
 423
 424        if (!area->cpp->op->area_acquire)
 425                return 0;
 426
 427        err = wait_event_interruptible(area->cpp->waitq,
 428                                       nfp_cpp_area_acquire_try(area, &status));
 429        if (!err)
 430                err = status;
 431        if (err) {
 432                nfp_warn(area->cpp, "Warning: area wait failed: %d\n", err);
 433                atomic_dec(&area->refcount);
 434                return err;
 435        }
 436
 437        nfp_cpp_area_get(area);
 438
 439        return 0;
 440}
 441
 442/**
 443 * nfp_cpp_area_acquire() - lock down a CPP area for access
 444 * @area:       CPP area handle
 445 *
 446 * Locks down the CPP area for a potential long term activity.  Area
 447 * must always be locked down before being accessed.
 448 *
 449 * Return: 0, or -ERRNO
 450 */
 451int nfp_cpp_area_acquire(struct nfp_cpp_area *area)
 452{
 453        int ret;
 454
 455        mutex_lock(&area->mutex);
 456        ret = __nfp_cpp_area_acquire(area);
 457        mutex_unlock(&area->mutex);
 458
 459        return ret;
 460}
 461
 462/**
 463 * nfp_cpp_area_acquire_nonblocking() - lock down a CPP area for access
 464 * @area:       CPP area handle
 465 *
 466 * Locks down the CPP area for a potential long term activity.  Area
 467 * must always be locked down before being accessed.
 468 *
 469 * NOTE: Returns -EAGAIN is no area is available
 470 *
 471 * Return: 0, or -ERRNO
 472 */
 473int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area)
 474{
 475        mutex_lock(&area->mutex);
 476        if (atomic_inc_return(&area->refcount) == 1) {
 477                if (area->cpp->op->area_acquire) {
 478                        int err;
 479
 480                        err = area->cpp->op->area_acquire(area);
 481                        if (err < 0) {
 482                                atomic_dec(&area->refcount);
 483                                mutex_unlock(&area->mutex);
 484                                return err;
 485                        }
 486                }
 487        }
 488        mutex_unlock(&area->mutex);
 489
 490        nfp_cpp_area_get(area);
 491        return 0;
 492}
 493
 494/**
 495 * nfp_cpp_area_release() - release a locked down CPP area
 496 * @area:       CPP area handle
 497 *
 498 * Releases a previously locked down CPP area.
 499 */
 500void nfp_cpp_area_release(struct nfp_cpp_area *area)
 501{
 502        mutex_lock(&area->mutex);
 503        /* Only call the release on refcount == 0 */
 504        if (atomic_dec_and_test(&area->refcount)) {
 505                if (area->cpp->op->area_release) {
 506                        area->cpp->op->area_release(area);
 507                        /* Let anyone waiting for a BAR try to get one.. */
 508                        wake_up_interruptible_all(&area->cpp->waitq);
 509                }
 510        }
 511        mutex_unlock(&area->mutex);
 512
 513        nfp_cpp_area_put(area);
 514}
 515
 516/**
 517 * nfp_cpp_area_release_free() - release CPP area and free it
 518 * @area:       CPP area handle
 519 *
 520 * Releases CPP area and frees up memory resources held by the it.
 521 */
 522void nfp_cpp_area_release_free(struct nfp_cpp_area *area)
 523{
 524        nfp_cpp_area_release(area);
 525        nfp_cpp_area_free(area);
 526}
 527
 528/**
 529 * nfp_cpp_area_read() - read data from CPP area
 530 * @area:         CPP area handle
 531 * @offset:       offset into CPP area
 532 * @kernel_vaddr: kernel address to put data into
 533 * @length:       number of bytes to read
 534 *
 535 * Read data from indicated CPP region.
 536 *
 537 * NOTE: @offset and @length must be 32-bit aligned values.
 538 * Area must have been locked down with an 'acquire'.
 539 *
 540 * Return: length of io, or -ERRNO
 541 */
 542int nfp_cpp_area_read(struct nfp_cpp_area *area,
 543                      unsigned long offset, void *kernel_vaddr,
 544                      size_t length)
 545{
 546        return area->cpp->op->area_read(area, kernel_vaddr, offset, length);
 547}
 548
 549/**
 550 * nfp_cpp_area_write() - write data to CPP area
 551 * @area:       CPP area handle
 552 * @offset:     offset into CPP area
 553 * @kernel_vaddr: kernel address to read data from
 554 * @length:     number of bytes to write
 555 *
 556 * Write data to indicated CPP region.
 557 *
 558 * NOTE: @offset and @length must be 32-bit aligned values.
 559 * Area must have been locked down with an 'acquire'.
 560 *
 561 * Return: length of io, or -ERRNO
 562 */
 563int nfp_cpp_area_write(struct nfp_cpp_area *area,
 564                       unsigned long offset, const void *kernel_vaddr,
 565                       size_t length)
 566{
 567        return area->cpp->op->area_write(area, kernel_vaddr, offset, length);
 568}
 569
 570/**
 571 * nfp_cpp_area_size() - return size of a CPP area
 572 * @cpp_area:   CPP area handle
 573 *
 574 * Return: Size of the area
 575 */
 576size_t nfp_cpp_area_size(struct nfp_cpp_area *cpp_area)
 577{
 578        return cpp_area->size;
 579}
 580
 581/**
 582 * nfp_cpp_area_name() - return name of a CPP area
 583 * @cpp_area:   CPP area handle
 584 *
 585 * Return: Name of the area, or NULL
 586 */
 587const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area)
 588{
 589        return cpp_area->resource.name;
 590}
 591
 592/**
 593 * nfp_cpp_area_priv() - return private struct for CPP area
 594 * @cpp_area:   CPP area handle
 595 *
 596 * Return: Private data for the CPP area
 597 */
 598void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area)
 599{
 600        return &cpp_area[1];
 601}
 602
 603/**
 604 * nfp_cpp_area_cpp() - return CPP handle for CPP area
 605 * @cpp_area:   CPP area handle
 606 *
 607 * Return: NFP CPP handle
 608 */
 609struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area)
 610{
 611        return cpp_area->cpp;
 612}
 613
 614/**
 615 * nfp_cpp_area_resource() - get resource
 616 * @area:       CPP area handle
 617 *
 618 * NOTE: Area must have been locked down with an 'acquire'.
 619 *
 620 * Return: struct resource pointer, or NULL
 621 */
 622struct resource *nfp_cpp_area_resource(struct nfp_cpp_area *area)
 623{
 624        struct resource *res = NULL;
 625
 626        if (area->cpp->op->area_resource)
 627                res = area->cpp->op->area_resource(area);
 628
 629        return res;
 630}
 631
 632/**
 633 * nfp_cpp_area_phys() - get physical address of CPP area
 634 * @area:       CPP area handle
 635 *
 636 * NOTE: Area must have been locked down with an 'acquire'.
 637 *
 638 * Return: phy_addr_t of the area, or NULL
 639 */
 640phys_addr_t nfp_cpp_area_phys(struct nfp_cpp_area *area)
 641{
 642        phys_addr_t addr = ~0;
 643
 644        if (area->cpp->op->area_phys)
 645                addr = area->cpp->op->area_phys(area);
 646
 647        return addr;
 648}
 649
 650/**
 651 * nfp_cpp_area_iomem() - get IOMEM region for CPP area
 652 * @area:       CPP area handle
 653 *
 654 * Returns an iomem pointer for use with readl()/writel() style
 655 * operations.
 656 *
 657 * NOTE: Area must have been locked down with an 'acquire'.
 658 *
 659 * Return: __iomem pointer to the area, or NULL
 660 */
 661void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area)
 662{
 663        void __iomem *iomem = NULL;
 664
 665        if (area->cpp->op->area_iomem)
 666                iomem = area->cpp->op->area_iomem(area);
 667
 668        return iomem;
 669}
 670
 671/**
 672 * nfp_cpp_area_readl() - Read a u32 word from an area
 673 * @area:       CPP Area handle
 674 * @offset:     Offset into area
 675 * @value:      Pointer to read buffer
 676 *
 677 * Return: 0 on success, or -ERRNO
 678 */
 679int nfp_cpp_area_readl(struct nfp_cpp_area *area,
 680                       unsigned long offset, u32 *value)
 681{
 682        u8 tmp[4];
 683        int n;
 684
 685        n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
 686        if (n != sizeof(tmp))
 687                return n < 0 ? n : -EIO;
 688
 689        *value = get_unaligned_le32(tmp);
 690        return 0;
 691}
 692
 693/**
 694 * nfp_cpp_area_writel() - Write a u32 word to an area
 695 * @area:       CPP Area handle
 696 * @offset:     Offset into area
 697 * @value:      Value to write
 698 *
 699 * Return: 0 on success, or -ERRNO
 700 */
 701int nfp_cpp_area_writel(struct nfp_cpp_area *area,
 702                        unsigned long offset, u32 value)
 703{
 704        u8 tmp[4];
 705        int n;
 706
 707        put_unaligned_le32(value, tmp);
 708        n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
 709
 710        return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
 711}
 712
 713/**
 714 * nfp_cpp_area_readq() - Read a u64 word from an area
 715 * @area:       CPP Area handle
 716 * @offset:     Offset into area
 717 * @value:      Pointer to read buffer
 718 *
 719 * Return: 0 on success, or -ERRNO
 720 */
 721int nfp_cpp_area_readq(struct nfp_cpp_area *area,
 722                       unsigned long offset, u64 *value)
 723{
 724        u8 tmp[8];
 725        int n;
 726
 727        n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
 728        if (n != sizeof(tmp))
 729                return n < 0 ? n : -EIO;
 730
 731        *value = get_unaligned_le64(tmp);
 732        return 0;
 733}
 734
 735/**
 736 * nfp_cpp_area_writeq() - Write a u64 word to an area
 737 * @area:       CPP Area handle
 738 * @offset:     Offset into area
 739 * @value:      Value to write
 740 *
 741 * Return: 0 on success, or -ERRNO
 742 */
 743int nfp_cpp_area_writeq(struct nfp_cpp_area *area,
 744                        unsigned long offset, u64 value)
 745{
 746        u8 tmp[8];
 747        int n;
 748
 749        put_unaligned_le64(value, tmp);
 750        n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
 751
 752        return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
 753}
 754
 755/**
 756 * nfp_cpp_area_fill() - fill a CPP area with a value
 757 * @area:       CPP area
 758 * @offset:     offset into CPP area
 759 * @value:      value to fill with
 760 * @length:     length of area to fill
 761 *
 762 * Fill indicated area with given value.
 763 *
 764 * Return: length of io, or -ERRNO
 765 */
 766int nfp_cpp_area_fill(struct nfp_cpp_area *area,
 767                      unsigned long offset, u32 value, size_t length)
 768{
 769        u8 tmp[4];
 770        size_t i;
 771        int k;
 772
 773        put_unaligned_le32(value, tmp);
 774
 775        if (offset % sizeof(tmp) || length % sizeof(tmp))
 776                return -EINVAL;
 777
 778        for (i = 0; i < length; i += sizeof(tmp)) {
 779                k = nfp_cpp_area_write(area, offset + i, &tmp, sizeof(tmp));
 780                if (k < 0)
 781                        return k;
 782        }
 783
 784        return i;
 785}
 786
 787/**
 788 * nfp_cpp_area_cache_add() - Permanently reserve and area for the hot cache
 789 * @cpp:        NFP CPP handle
 790 * @size:       Size of the area - MUST BE A POWER OF 2.
 791 */
 792int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
 793{
 794        struct nfp_cpp_area_cache *cache;
 795        struct nfp_cpp_area *area;
 796
 797        /* Allocate an area - we use the MU target's base as a placeholder,
 798         * as all supported chips have a MU.
 799         */
 800        area = nfp_cpp_area_alloc(cpp, NFP_CPP_ID(7, NFP_CPP_ACTION_RW, 0),
 801                                  0, size);
 802        if (!area)
 803                return -ENOMEM;
 804
 805        cache = kzalloc(sizeof(*cache), GFP_KERNEL);
 806        if (!cache)
 807                return -ENOMEM;
 808
 809        cache->id = 0;
 810        cache->addr = 0;
 811        cache->size = size;
 812        cache->area = area;
 813        mutex_lock(&cpp->area_cache_mutex);
 814        list_add_tail(&cache->entry, &cpp->area_cache_list);
 815        mutex_unlock(&cpp->area_cache_mutex);
 816
 817        return 0;
 818}
 819
 820static struct nfp_cpp_area_cache *
 821area_cache_get(struct nfp_cpp *cpp, u32 id,
 822               u64 addr, unsigned long *offset, size_t length)
 823{
 824        struct nfp_cpp_area_cache *cache;
 825        int err;
 826
 827        /* Early exit when length == 0, which prevents
 828         * the need for special case code below when
 829         * checking against available cache size.
 830         */
 831        if (length == 0 || id == 0)
 832                return NULL;
 833
 834        /* Remap from cpp_island to cpp_target */
 835        err = nfp_target_cpp(id, addr, &id, &addr, cpp->imb_cat_table);
 836        if (err < 0)
 837                return NULL;
 838
 839        mutex_lock(&cpp->area_cache_mutex);
 840
 841        if (list_empty(&cpp->area_cache_list)) {
 842                mutex_unlock(&cpp->area_cache_mutex);
 843                return NULL;
 844        }
 845
 846        addr += *offset;
 847
 848        /* See if we have a match */
 849        list_for_each_entry(cache, &cpp->area_cache_list, entry) {
 850                if (id == cache->id &&
 851                    addr >= cache->addr &&
 852                    addr + length <= cache->addr + cache->size)
 853                        goto exit;
 854        }
 855
 856        /* No matches - inspect the tail of the LRU */
 857        cache = list_entry(cpp->area_cache_list.prev,
 858                           struct nfp_cpp_area_cache, entry);
 859
 860        /* Can we fit in the cache entry? */
 861        if (round_down(addr + length - 1, cache->size) !=
 862            round_down(addr, cache->size)) {
 863                mutex_unlock(&cpp->area_cache_mutex);
 864                return NULL;
 865        }
 866
 867        /* If id != 0, we will need to release it */
 868        if (cache->id) {
 869                nfp_cpp_area_release(cache->area);
 870                cache->id = 0;
 871                cache->addr = 0;
 872        }
 873
 874        /* Adjust the start address to be cache size aligned */
 875        cache->id = id;
 876        cache->addr = addr & ~(u64)(cache->size - 1);
 877
 878        /* Re-init to the new ID and address */
 879        if (cpp->op->area_init) {
 880                err = cpp->op->area_init(cache->area,
 881                                         id, cache->addr, cache->size);
 882                if (err < 0) {
 883                        mutex_unlock(&cpp->area_cache_mutex);
 884                        return NULL;
 885                }
 886        }
 887
 888        /* Attempt to acquire */
 889        err = nfp_cpp_area_acquire(cache->area);
 890        if (err < 0) {
 891                mutex_unlock(&cpp->area_cache_mutex);
 892                return NULL;
 893        }
 894
 895exit:
 896        /* Adjust offset */
 897        *offset = addr - cache->addr;
 898        return cache;
 899}
 900
 901static void
 902area_cache_put(struct nfp_cpp *cpp, struct nfp_cpp_area_cache *cache)
 903{
 904        if (!cache)
 905                return;
 906
 907        /* Move to front of LRU */
 908        list_del(&cache->entry);
 909        list_add(&cache->entry, &cpp->area_cache_list);
 910
 911        mutex_unlock(&cpp->area_cache_mutex);
 912}
 913
 914static int __nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
 915                          unsigned long long address, void *kernel_vaddr,
 916                          size_t length)
 917{
 918        struct nfp_cpp_area_cache *cache;
 919        struct nfp_cpp_area *area;
 920        unsigned long offset = 0;
 921        int err;
 922
 923        cache = area_cache_get(cpp, destination, address, &offset, length);
 924        if (cache) {
 925                area = cache->area;
 926        } else {
 927                area = nfp_cpp_area_alloc(cpp, destination, address, length);
 928                if (!area)
 929                        return -ENOMEM;
 930
 931                err = nfp_cpp_area_acquire(area);
 932                if (err) {
 933                        nfp_cpp_area_free(area);
 934                        return err;
 935                }
 936        }
 937
 938        err = nfp_cpp_area_read(area, offset, kernel_vaddr, length);
 939
 940        if (cache)
 941                area_cache_put(cpp, cache);
 942        else
 943                nfp_cpp_area_release_free(area);
 944
 945        return err;
 946}
 947
 948/**
 949 * nfp_cpp_read() - read from CPP target
 950 * @cpp:                CPP handle
 951 * @destination:        CPP id
 952 * @address:            offset into CPP target
 953 * @kernel_vaddr:       kernel buffer for result
 954 * @length:             number of bytes to read
 955 *
 956 * Return: length of io, or -ERRNO
 957 */
 958int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
 959                 unsigned long long address, void *kernel_vaddr,
 960                 size_t length)
 961{
 962        size_t n, offset;
 963        int ret;
 964
 965        for (offset = 0; offset < length; offset += n) {
 966                unsigned long long r_addr = address + offset;
 967
 968                /* make first read smaller to align to safe window */
 969                n = min_t(size_t, length - offset,
 970                          ALIGN(r_addr + 1, NFP_CPP_SAFE_AREA_SIZE) - r_addr);
 971
 972                ret = __nfp_cpp_read(cpp, destination, address + offset,
 973                                     kernel_vaddr + offset, n);
 974                if (ret < 0)
 975                        return ret;
 976                if (ret != n)
 977                        return offset + n;
 978        }
 979
 980        return length;
 981}
 982
 983static int __nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
 984                           unsigned long long address,
 985                           const void *kernel_vaddr, size_t length)
 986{
 987        struct nfp_cpp_area_cache *cache;
 988        struct nfp_cpp_area *area;
 989        unsigned long offset = 0;
 990        int err;
 991
 992        cache = area_cache_get(cpp, destination, address, &offset, length);
 993        if (cache) {
 994                area = cache->area;
 995        } else {
 996                area = nfp_cpp_area_alloc(cpp, destination, address, length);
 997                if (!area)
 998                        return -ENOMEM;
 999
1000                err = nfp_cpp_area_acquire(area);
1001                if (err) {
1002                        nfp_cpp_area_free(area);
1003                        return err;
1004                }
1005        }
1006
1007        err = nfp_cpp_area_write(area, offset, kernel_vaddr, length);
1008
1009        if (cache)
1010                area_cache_put(cpp, cache);
1011        else
1012                nfp_cpp_area_release_free(area);
1013
1014        return err;
1015}
1016
1017/**
1018 * nfp_cpp_write() - write to CPP target
1019 * @cpp:                CPP handle
1020 * @destination:        CPP id
1021 * @address:            offset into CPP target
1022 * @kernel_vaddr:       kernel buffer to read from
1023 * @length:             number of bytes to write
1024 *
1025 * Return: length of io, or -ERRNO
1026 */
1027int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
1028                  unsigned long long address,
1029                  const void *kernel_vaddr, size_t length)
1030{
1031        size_t n, offset;
1032        int ret;
1033
1034        for (offset = 0; offset < length; offset += n) {
1035                unsigned long long w_addr = address + offset;
1036
1037                /* make first write smaller to align to safe window */
1038                n = min_t(size_t, length - offset,
1039                          ALIGN(w_addr + 1, NFP_CPP_SAFE_AREA_SIZE) - w_addr);
1040
1041                ret = __nfp_cpp_write(cpp, destination, address + offset,
1042                                      kernel_vaddr + offset, n);
1043                if (ret < 0)
1044                        return ret;
1045                if (ret != n)
1046                        return offset + n;
1047        }
1048
1049        return length;
1050}
1051
1052/* Return the correct CPP address, and fixup xpb_addr as needed. */
1053static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr)
1054{
1055        int island;
1056        u32 xpb;
1057
1058        xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0);
1059        /* Ensure that non-local XPB accesses go
1060         * out through the global XPBM bus.
1061         */
1062        island = (*xpb_addr >> 24) & 0x3f;
1063        if (!island)
1064                return xpb;
1065
1066        if (island != 1) {
1067                *xpb_addr |= 1 << 30;
1068                return xpb;
1069        }
1070
1071        /* Accesses to the ARM Island overlay uses Island 0 / Global Bit */
1072        *xpb_addr &= ~0x7f000000;
1073        if (*xpb_addr < 0x60000) {
1074                *xpb_addr |= 1 << 30;
1075        } else {
1076                /* And only non-ARM interfaces use the island id = 1 */
1077                if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp))
1078                    != NFP_CPP_INTERFACE_TYPE_ARM)
1079                        *xpb_addr |= 1 << 24;
1080        }
1081
1082        return xpb;
1083}
1084
1085/**
1086 * nfp_xpb_readl() - Read a u32 word from a XPB location
1087 * @cpp:        CPP device handle
1088 * @xpb_addr:   Address for operation
1089 * @value:      Pointer to read buffer
1090 *
1091 * Return: 0 on success, or -ERRNO
1092 */
1093int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
1094{
1095        u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
1096
1097        return nfp_cpp_readl(cpp, cpp_dest, xpb_addr, value);
1098}
1099
1100/**
1101 * nfp_xpb_writel() - Write a u32 word to a XPB location
1102 * @cpp:        CPP device handle
1103 * @xpb_addr:   Address for operation
1104 * @value:      Value to write
1105 *
1106 * Return: 0 on success, or -ERRNO
1107 */
1108int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
1109{
1110        u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
1111
1112        return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value);
1113}
1114
1115/**
1116 * nfp_xpb_writelm() - Modify bits of a 32-bit value from the XPB bus
1117 * @cpp:        NFP CPP device handle
1118 * @xpb_tgt:    XPB target and address
1119 * @mask:       mask of bits to alter
1120 * @value:      value to modify
1121 *
1122 * KERNEL: This operation is safe to call in interrupt or softirq context.
1123 *
1124 * Return: 0 on success, or -ERRNO
1125 */
1126int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt,
1127                    u32 mask, u32 value)
1128{
1129        int err;
1130        u32 tmp;
1131
1132        err = nfp_xpb_readl(cpp, xpb_tgt, &tmp);
1133        if (err < 0)
1134                return err;
1135
1136        tmp &= ~mask;
1137        tmp |= mask & value;
1138        return nfp_xpb_writel(cpp, xpb_tgt, tmp);
1139}
1140
1141/* Lockdep markers */
1142static struct lock_class_key nfp_cpp_resource_lock_key;
1143
1144static void nfp_cpp_dev_release(struct device *dev)
1145{
1146        /* Nothing to do here - it just makes the kernel happy */
1147}
1148
1149/**
1150 * nfp_cpp_from_operations() - Create a NFP CPP handle
1151 *                             from an operations structure
1152 * @ops:        NFP CPP operations structure
1153 * @parent:     Parent device
1154 * @priv:       Private data of low-level implementation
1155 *
1156 * NOTE: On failure, cpp_ops->free will be called!
1157 *
1158 * Return: NFP CPP handle on success, ERR_PTR on failure
1159 */
1160struct nfp_cpp *
1161nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
1162                        struct device *parent, void *priv)
1163{
1164        const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0);
1165        struct nfp_cpp *cpp;
1166        u32 mask[2];
1167        u32 xpbaddr;
1168        size_t tgt;
1169        int err;
1170
1171        cpp = kzalloc(sizeof(*cpp), GFP_KERNEL);
1172        if (!cpp) {
1173                err = -ENOMEM;
1174                goto err_malloc;
1175        }
1176
1177        cpp->op = ops;
1178        cpp->priv = priv;
1179        cpp->interface = ops->get_interface(parent);
1180        if (ops->read_serial)
1181                ops->read_serial(parent, cpp->serial);
1182        rwlock_init(&cpp->resource_lock);
1183        init_waitqueue_head(&cpp->waitq);
1184        lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key);
1185        INIT_LIST_HEAD(&cpp->resource_list);
1186        INIT_LIST_HEAD(&cpp->area_cache_list);
1187        mutex_init(&cpp->area_cache_mutex);
1188        cpp->dev.init_name = "cpp";
1189        cpp->dev.parent = parent;
1190        cpp->dev.release = nfp_cpp_dev_release;
1191        err = device_register(&cpp->dev);
1192        if (err < 0) {
1193                put_device(&cpp->dev);
1194                goto err_dev;
1195        }
1196
1197        dev_set_drvdata(&cpp->dev, cpp);
1198
1199        /* NOTE: cpp_lock is NOT locked for op->init,
1200         * since it may call NFP CPP API operations
1201         */
1202        if (cpp->op->init) {
1203                err = cpp->op->init(cpp);
1204                if (err < 0) {
1205                        dev_err(parent,
1206                                "NFP interface initialization failed\n");
1207                        goto err_out;
1208                }
1209        }
1210
1211        err = nfp_cpp_model_autodetect(cpp, &cpp->model);
1212        if (err < 0) {
1213                dev_err(parent, "NFP model detection failed\n");
1214                goto err_out;
1215        }
1216
1217        for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) {
1218                        /* Hardcoded XPB IMB Base, island 0 */
1219                xpbaddr = 0x000a0000 + (tgt * 4);
1220                err = nfp_xpb_readl(cpp, xpbaddr,
1221                                    &cpp->imb_cat_table[tgt]);
1222                if (err < 0) {
1223                        dev_err(parent,
1224                                "Can't read CPP mapping from device\n");
1225                        goto err_out;
1226                }
1227        }
1228
1229        nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL2,
1230                      &mask[0]);
1231        nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL3,
1232                      &mask[1]);
1233
1234        dev_info(cpp->dev.parent, "Model: 0x%08x, SN: %pM, Ifc: 0x%04x\n",
1235                 nfp_cpp_model(cpp), cpp->serial, nfp_cpp_interface(cpp));
1236
1237        return cpp;
1238
1239err_out:
1240        device_unregister(&cpp->dev);
1241err_dev:
1242        kfree(cpp);
1243err_malloc:
1244        return ERR_PTR(err);
1245}
1246
1247/**
1248 * nfp_cpp_priv() - Get the operations private data of a CPP handle
1249 * @cpp:        CPP handle
1250 *
1251 * Return: Private data for the NFP CPP handle
1252 */
1253void *nfp_cpp_priv(struct nfp_cpp *cpp)
1254{
1255        return cpp->priv;
1256}
1257
1258/**
1259 * nfp_cpp_device() - Get the Linux device handle of a CPP handle
1260 * @cpp:        CPP handle
1261 *
1262 * Return: Device for the NFP CPP bus
1263 */
1264struct device *nfp_cpp_device(struct nfp_cpp *cpp)
1265{
1266        return &cpp->dev;
1267}
1268
1269#define NFP_EXPL_OP(func, expl, args...)                          \
1270        ({                                                        \
1271                struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \
1272                int err = -ENODEV;                                \
1273                                                                  \
1274                if (cpp->op->func)                                \
1275                        err = cpp->op->func(expl, ##args);        \
1276                err;                                              \
1277        })
1278
1279#define NFP_EXPL_OP_NR(func, expl, args...)                       \
1280        ({                                                        \
1281                struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \
1282                                                                  \
1283                if (cpp->op->func)                                \
1284                        cpp->op->func(expl, ##args);              \
1285                                                                  \
1286        })
1287
1288/**
1289 * nfp_cpp_explicit_acquire() - Acquire explicit access handle
1290 * @cpp:        NFP CPP handle
1291 *
1292 * The 'data_ref' and 'signal_ref' values are useful when
1293 * constructing the NFP_EXPL_CSR1 and NFP_EXPL_POST values.
1294 *
1295 * Return: NFP CPP explicit handle
1296 */
1297struct nfp_cpp_explicit *nfp_cpp_explicit_acquire(struct nfp_cpp *cpp)
1298{
1299        struct nfp_cpp_explicit *expl;
1300        int err;
1301
1302        expl = kzalloc(sizeof(*expl) + cpp->op->explicit_priv_size, GFP_KERNEL);
1303        if (!expl)
1304                return NULL;
1305
1306        expl->cpp = cpp;
1307        err = NFP_EXPL_OP(explicit_acquire, expl);
1308        if (err < 0) {
1309                kfree(expl);
1310                return NULL;
1311        }
1312
1313        return expl;
1314}
1315
1316/**
1317 * nfp_cpp_explicit_set_target() - Set target fields for explicit
1318 * @expl:       Explicit handle
1319 * @cpp_id:     CPP ID field
1320 * @len:        CPP Length field
1321 * @mask:       CPP Mask field
1322 *
1323 * Return: 0, or -ERRNO
1324 */
1325int nfp_cpp_explicit_set_target(struct nfp_cpp_explicit *expl,
1326                                u32 cpp_id, u8 len, u8 mask)
1327{
1328        expl->cmd.cpp_id = cpp_id;
1329        expl->cmd.len = len;
1330        expl->cmd.byte_mask = mask;
1331
1332        return 0;
1333}
1334
1335/**
1336 * nfp_cpp_explicit_set_data() - Set data fields for explicit
1337 * @expl:       Explicit handle
1338 * @data_master: CPP Data Master field
1339 * @data_ref:   CPP Data Ref field
1340 *
1341 * Return: 0, or -ERRNO
1342 */
1343int nfp_cpp_explicit_set_data(struct nfp_cpp_explicit *expl,
1344                              u8 data_master, u16 data_ref)
1345{
1346        expl->cmd.data_master = data_master;
1347        expl->cmd.data_ref = data_ref;
1348
1349        return 0;
1350}
1351
1352/**
1353 * nfp_cpp_explicit_set_signal() - Set signal fields for explicit
1354 * @expl:       Explicit handle
1355 * @signal_master: CPP Signal Master field
1356 * @signal_ref: CPP Signal Ref field
1357 *
1358 * Return: 0, or -ERRNO
1359 */
1360int nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit *expl,
1361                                u8 signal_master, u8 signal_ref)
1362{
1363        expl->cmd.signal_master = signal_master;
1364        expl->cmd.signal_ref = signal_ref;
1365
1366        return 0;
1367}
1368
1369/**
1370 * nfp_cpp_explicit_set_posted() - Set completion fields for explicit
1371 * @expl:       Explicit handle
1372 * @posted:     True for signaled completion, false otherwise
1373 * @siga:       CPP Signal A field
1374 * @siga_mode:  CPP Signal A Mode field
1375 * @sigb:       CPP Signal B field
1376 * @sigb_mode:  CPP Signal B Mode field
1377 *
1378 * Return: 0, or -ERRNO
1379 */
1380int nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit *expl, int posted,
1381                                u8 siga,
1382                                enum nfp_cpp_explicit_signal_mode siga_mode,
1383                                u8 sigb,
1384                                enum nfp_cpp_explicit_signal_mode sigb_mode)
1385{
1386        expl->cmd.posted = posted;
1387        expl->cmd.siga = siga;
1388        expl->cmd.sigb = sigb;
1389        expl->cmd.siga_mode = siga_mode;
1390        expl->cmd.sigb_mode = sigb_mode;
1391
1392        return 0;
1393}
1394
1395/**
1396 * nfp_cpp_explicit_put() - Set up the write (pull) data for a explicit access
1397 * @expl:       NFP CPP Explicit handle
1398 * @buff:       Data to have the target pull in the transaction
1399 * @len:        Length of data, in bytes
1400 *
1401 * The 'len' parameter must be less than or equal to 128 bytes.
1402 *
1403 * If this function is called before the configuration
1404 * registers are set, it will return -EINVAL.
1405 *
1406 * Return: 0, or -ERRNO
1407 */
1408int nfp_cpp_explicit_put(struct nfp_cpp_explicit *expl,
1409                         const void *buff, size_t len)
1410{
1411        return NFP_EXPL_OP(explicit_put, expl, buff, len);
1412}
1413
1414/**
1415 * nfp_cpp_explicit_do() - Execute a transaction, and wait for it to complete
1416 * @expl:       NFP CPP Explicit handle
1417 * @address:    Address to send in the explicit transaction
1418 *
1419 * If this function is called before the configuration
1420 * registers are set, it will return -1, with an errno of EINVAL.
1421 *
1422 * Return: 0, or -ERRNO
1423 */
1424int nfp_cpp_explicit_do(struct nfp_cpp_explicit *expl, u64 address)
1425{
1426        return NFP_EXPL_OP(explicit_do, expl, &expl->cmd, address);
1427}
1428
1429/**
1430 * nfp_cpp_explicit_get() - Get the 'push' (read) data from a explicit access
1431 * @expl:       NFP CPP Explicit handle
1432 * @buff:       Data that the target pushed in the transaction
1433 * @len:        Length of data, in bytes
1434 *
1435 * The 'len' parameter must be less than or equal to 128 bytes.
1436 *
1437 * If this function is called before all three configuration
1438 * registers are set, it will return -1, with an errno of EINVAL.
1439 *
1440 * If this function is called before nfp_cpp_explicit_do()
1441 * has completed, it will return -1, with an errno of EBUSY.
1442 *
1443 * Return: 0, or -ERRNO
1444 */
1445int nfp_cpp_explicit_get(struct nfp_cpp_explicit *expl, void *buff, size_t len)
1446{
1447        return NFP_EXPL_OP(explicit_get, expl, buff, len);
1448}
1449
1450/**
1451 * nfp_cpp_explicit_release() - Release explicit access handle
1452 * @expl:       NFP CPP Explicit handle
1453 *
1454 */
1455void nfp_cpp_explicit_release(struct nfp_cpp_explicit *expl)
1456{
1457        NFP_EXPL_OP_NR(explicit_release, expl);
1458        kfree(expl);
1459}
1460
1461/**
1462 * nfp_cpp_explicit_cpp() - return CPP handle for CPP explicit
1463 * @cpp_explicit:       CPP explicit handle
1464 *
1465 * Return: NFP CPP handle of the explicit
1466 */
1467struct nfp_cpp *nfp_cpp_explicit_cpp(struct nfp_cpp_explicit *cpp_explicit)
1468{
1469        return cpp_explicit->cpp;
1470}
1471
1472/**
1473 * nfp_cpp_explicit_priv() - return private struct for CPP explicit
1474 * @cpp_explicit:       CPP explicit handle
1475 *
1476 * Return: private data of the explicit, or NULL
1477 */
1478void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit)
1479{
1480        return &cpp_explicit[1];
1481}
1482