linux/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2015-2018 Netronome Systems, Inc. */
   3
   4/*
   5 * nfp_cppcore.c
   6 * Provides low-level access to the NFP's internal CPP bus
   7 * Authors: Jakub Kicinski <jakub.kicinski@netronome.com>
   8 *          Jason McMullan <jason.mcmullan@netronome.com>
   9 *          Rolf Neugebauer <rolf.neugebauer@netronome.com>
  10 */
  11
  12#include <asm/unaligned.h>
  13#include <linux/delay.h>
  14#include <linux/device.h>
  15#include <linux/ioport.h>
  16#include <linux/kernel.h>
  17#include <linux/module.h>
  18#include <linux/mutex.h>
  19#include <linux/sched.h>
  20#include <linux/slab.h>
  21#include <linux/wait.h>
  22
  23#include "nfp_arm.h"
  24#include "nfp_cpp.h"
  25#include "nfp6000/nfp6000.h"
  26
  27#define NFP_ARM_GCSR_SOFTMODEL2                              0x0000014c
  28#define NFP_ARM_GCSR_SOFTMODEL3                              0x00000150
  29
  30struct nfp_cpp_resource {
  31        struct list_head list;
  32        const char *name;
  33        u32 cpp_id;
  34        u64 start;
  35        u64 end;
  36};
  37
  38/**
  39 * struct nfp_cpp - main nfpcore device structure
  40 * Following fields are read-only after probe() exits or netdevs are spawned.
  41 * @dev:                embedded device structure
  42 * @op:                 low-level implementation ops
  43 * @priv:               private data of the low-level implementation
  44 * @model:              chip model
  45 * @interface:          chip interface id we are using to reach it
  46 * @serial:             chip serial number
  47 * @imb_cat_table:      CPP Mapping Table
  48 * @mu_locality_lsb:    MU access type bit offset
  49 *
  50 * Following fields use explicit locking:
  51 * @resource_list:      NFP CPP resource list
  52 * @resource_lock:      protects @resource_list
  53 *
  54 * @area_cache_list:    cached areas for cpp/xpb read/write speed up
  55 * @area_cache_mutex:   protects @area_cache_list
  56 *
  57 * @waitq:              area wait queue
  58 */
  59struct nfp_cpp {
  60        struct device dev;
  61
  62        void *priv;
  63
  64        u32 model;
  65        u16 interface;
  66        u8 serial[NFP_SERIAL_LEN];
  67
  68        const struct nfp_cpp_operations *op;
  69        struct list_head resource_list;
  70        rwlock_t resource_lock;
  71        wait_queue_head_t waitq;
  72
  73        u32 imb_cat_table[16];
  74        unsigned int mu_locality_lsb;
  75
  76        struct mutex area_cache_mutex;
  77        struct list_head area_cache_list;
  78};
  79
  80/* Element of the area_cache_list */
  81struct nfp_cpp_area_cache {
  82        struct list_head entry;
  83        u32 id;
  84        u64 addr;
  85        u32 size;
  86        struct nfp_cpp_area *area;
  87};
  88
  89struct nfp_cpp_area {
  90        struct nfp_cpp *cpp;
  91        struct kref kref;
  92        atomic_t refcount;
  93        struct mutex mutex;     /* Lock for the area's refcount */
  94        unsigned long long offset;
  95        unsigned long size;
  96        struct nfp_cpp_resource resource;
  97        void __iomem *iomem;
  98        /* Here follows the 'priv' part of nfp_cpp_area. */
  99};
 100
 101struct nfp_cpp_explicit {
 102        struct nfp_cpp *cpp;
 103        struct nfp_cpp_explicit_command cmd;
 104        /* Here follows the 'priv' part of nfp_cpp_area. */
 105};
 106
 107static void __resource_add(struct list_head *head, struct nfp_cpp_resource *res)
 108{
 109        struct nfp_cpp_resource *tmp;
 110        struct list_head *pos;
 111
 112        list_for_each(pos, head) {
 113                tmp = container_of(pos, struct nfp_cpp_resource, list);
 114
 115                if (tmp->cpp_id > res->cpp_id)
 116                        break;
 117
 118                if (tmp->cpp_id == res->cpp_id && tmp->start > res->start)
 119                        break;
 120        }
 121
 122        list_add_tail(&res->list, pos);
 123}
 124
 125static void __resource_del(struct nfp_cpp_resource *res)
 126{
 127        list_del_init(&res->list);
 128}
 129
 130static void __release_cpp_area(struct kref *kref)
 131{
 132        struct nfp_cpp_area *area =
 133                container_of(kref, struct nfp_cpp_area, kref);
 134        struct nfp_cpp *cpp = nfp_cpp_area_cpp(area);
 135
 136        if (area->cpp->op->area_cleanup)
 137                area->cpp->op->area_cleanup(area);
 138
 139        write_lock(&cpp->resource_lock);
 140        __resource_del(&area->resource);
 141        write_unlock(&cpp->resource_lock);
 142        kfree(area);
 143}
 144
 145static void nfp_cpp_area_put(struct nfp_cpp_area *area)
 146{
 147        kref_put(&area->kref, __release_cpp_area);
 148}
 149
 150static struct nfp_cpp_area *nfp_cpp_area_get(struct nfp_cpp_area *area)
 151{
 152        kref_get(&area->kref);
 153
 154        return area;
 155}
 156
 157/**
 158 * nfp_cpp_free() - free the CPP handle
 159 * @cpp:        CPP handle
 160 */
 161void nfp_cpp_free(struct nfp_cpp *cpp)
 162{
 163        struct nfp_cpp_area_cache *cache, *ctmp;
 164        struct nfp_cpp_resource *res, *rtmp;
 165
 166        /* Remove all caches */
 167        list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) {
 168                list_del(&cache->entry);
 169                if (cache->id)
 170                        nfp_cpp_area_release(cache->area);
 171                nfp_cpp_area_free(cache->area);
 172                kfree(cache);
 173        }
 174
 175        /* There should be no dangling areas at this point */
 176        WARN_ON(!list_empty(&cpp->resource_list));
 177
 178        /* .. but if they weren't, try to clean up. */
 179        list_for_each_entry_safe(res, rtmp, &cpp->resource_list, list) {
 180                struct nfp_cpp_area *area = container_of(res,
 181                                                         struct nfp_cpp_area,
 182                                                         resource);
 183
 184                dev_err(cpp->dev.parent, "Dangling area: %d:%d:%d:0x%0llx-0x%0llx%s%s\n",
 185                        NFP_CPP_ID_TARGET_of(res->cpp_id),
 186                        NFP_CPP_ID_ACTION_of(res->cpp_id),
 187                        NFP_CPP_ID_TOKEN_of(res->cpp_id),
 188                        res->start, res->end,
 189                        res->name ? " " : "",
 190                        res->name ? res->name : "");
 191
 192                if (area->cpp->op->area_release)
 193                        area->cpp->op->area_release(area);
 194
 195                __release_cpp_area(&area->kref);
 196        }
 197
 198        if (cpp->op->free)
 199                cpp->op->free(cpp);
 200
 201        device_unregister(&cpp->dev);
 202
 203        kfree(cpp);
 204}
 205
 206/**
 207 * nfp_cpp_model() - Retrieve the Model ID of the NFP
 208 * @cpp:        NFP CPP handle
 209 *
 210 * Return: NFP CPP Model ID
 211 */
 212u32 nfp_cpp_model(struct nfp_cpp *cpp)
 213{
 214        return cpp->model;
 215}
 216
 217/**
 218 * nfp_cpp_interface() - Retrieve the Interface ID of the NFP
 219 * @cpp:        NFP CPP handle
 220 *
 221 * Return: NFP CPP Interface ID
 222 */
 223u16 nfp_cpp_interface(struct nfp_cpp *cpp)
 224{
 225        return cpp->interface;
 226}
 227
 228/**
 229 * nfp_cpp_serial() - Retrieve the Serial ID of the NFP
 230 * @cpp:        NFP CPP handle
 231 * @serial:     Pointer to NFP serial number
 232 *
 233 * Return:  Length of NFP serial number
 234 */
 235int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial)
 236{
 237        *serial = &cpp->serial[0];
 238        return sizeof(cpp->serial);
 239}
 240
 241#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x)           (((_x) >> 13) & 0x7)
 242#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE              BIT(12)
 243#define   NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT     0
 244#define   NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT     BIT(12)
 245
 246static int nfp_cpp_set_mu_locality_lsb(struct nfp_cpp *cpp)
 247{
 248        unsigned int mode, addr40;
 249        u32 imbcppat;
 250        int res;
 251
 252        imbcppat = cpp->imb_cat_table[NFP_CPP_TARGET_MU];
 253        mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat);
 254        addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE);
 255
 256        res = nfp_cppat_mu_locality_lsb(mode, addr40);
 257        if (res < 0)
 258                return res;
 259        cpp->mu_locality_lsb = res;
 260
 261        return 0;
 262}
 263
 264unsigned int nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp)
 265{
 266        return cpp->mu_locality_lsb;
 267}
 268
 269/**
 270 * nfp_cpp_area_alloc_with_name() - allocate a new CPP area
 271 * @cpp:        CPP device handle
 272 * @dest:       NFP CPP ID
 273 * @name:       Name of region
 274 * @address:    Address of region
 275 * @size:       Size of region
 276 *
 277 * Allocate and initialize a CPP area structure.  The area must later
 278 * be locked down with an 'acquire' before it can be safely accessed.
 279 *
 280 * NOTE: @address and @size must be 32-bit aligned values.
 281 *
 282 * Return: NFP CPP area handle, or NULL
 283 */
 284struct nfp_cpp_area *
 285nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, u32 dest, const char *name,
 286                             unsigned long long address, unsigned long size)
 287{
 288        struct nfp_cpp_area *area;
 289        u64 tmp64 = address;
 290        int err, name_len;
 291
 292        /* Remap from cpp_island to cpp_target */
 293        err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table);
 294        if (err < 0)
 295                return NULL;
 296
 297        address = tmp64;
 298
 299        if (!name)
 300                name = "(reserved)";
 301
 302        name_len = strlen(name) + 1;
 303        area = kzalloc(sizeof(*area) + cpp->op->area_priv_size + name_len,
 304                       GFP_KERNEL);
 305        if (!area)
 306                return NULL;
 307
 308        area->cpp = cpp;
 309        area->resource.name = (void *)area + sizeof(*area) +
 310                cpp->op->area_priv_size;
 311        memcpy((char *)area->resource.name, name, name_len);
 312
 313        area->resource.cpp_id = dest;
 314        area->resource.start = address;
 315        area->resource.end = area->resource.start + size - 1;
 316        INIT_LIST_HEAD(&area->resource.list);
 317
 318        atomic_set(&area->refcount, 0);
 319        kref_init(&area->kref);
 320        mutex_init(&area->mutex);
 321
 322        if (cpp->op->area_init) {
 323                int err;
 324
 325                err = cpp->op->area_init(area, dest, address, size);
 326                if (err < 0) {
 327                        kfree(area);
 328                        return NULL;
 329                }
 330        }
 331
 332        write_lock(&cpp->resource_lock);
 333        __resource_add(&cpp->resource_list, &area->resource);
 334        write_unlock(&cpp->resource_lock);
 335
 336        area->offset = address;
 337        area->size = size;
 338
 339        return area;
 340}
 341
 342/**
 343 * nfp_cpp_area_alloc() - allocate a new CPP area
 344 * @cpp:        CPP handle
 345 * @dest:       CPP id
 346 * @address:    Start address on CPP target
 347 * @size:       Size of area in bytes
 348 *
 349 * Allocate and initialize a CPP area structure.  The area must later
 350 * be locked down with an 'acquire' before it can be safely accessed.
 351 *
 352 * NOTE: @address and @size must be 32-bit aligned values.
 353 *
 354 * Return: NFP CPP Area handle, or NULL
 355 */
 356struct nfp_cpp_area *
 357nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest,
 358                   unsigned long long address, unsigned long size)
 359{
 360        return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size);
 361}
 362
 363/**
 364 * nfp_cpp_area_alloc_acquire() - allocate a new CPP area and lock it down
 365 * @cpp:        CPP handle
 366 * @name:       Name of region
 367 * @dest:       CPP id
 368 * @address:    Start address on CPP target
 369 * @size:       Size of area
 370 *
 371 * Allocate and initialize a CPP area structure, and lock it down so
 372 * that it can be accessed directly.
 373 *
 374 * NOTE: @address and @size must be 32-bit aligned values.
 375 * The area must also be 'released' when the structure is freed.
 376 *
 377 * Return: NFP CPP Area handle, or NULL
 378 */
 379struct nfp_cpp_area *
 380nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, const char *name, u32 dest,
 381                           unsigned long long address, unsigned long size)
 382{
 383        struct nfp_cpp_area *area;
 384
 385        area = nfp_cpp_area_alloc_with_name(cpp, dest, name, address, size);
 386        if (!area)
 387                return NULL;
 388
 389        if (nfp_cpp_area_acquire(area)) {
 390                nfp_cpp_area_free(area);
 391                return NULL;
 392        }
 393
 394        return area;
 395}
 396
 397/**
 398 * nfp_cpp_area_free() - free up the CPP area
 399 * @area:       CPP area handle
 400 *
 401 * Frees up memory resources held by the CPP area.
 402 */
 403void nfp_cpp_area_free(struct nfp_cpp_area *area)
 404{
 405        if (atomic_read(&area->refcount))
 406                nfp_warn(area->cpp, "Warning: freeing busy area\n");
 407        nfp_cpp_area_put(area);
 408}
 409
 410static bool nfp_cpp_area_acquire_try(struct nfp_cpp_area *area, int *status)
 411{
 412        *status = area->cpp->op->area_acquire(area);
 413
 414        return *status != -EAGAIN;
 415}
 416
 417static int __nfp_cpp_area_acquire(struct nfp_cpp_area *area)
 418{
 419        int err, status;
 420
 421        if (atomic_inc_return(&area->refcount) > 1)
 422                return 0;
 423
 424        if (!area->cpp->op->area_acquire)
 425                return 0;
 426
 427        err = wait_event_interruptible(area->cpp->waitq,
 428                                       nfp_cpp_area_acquire_try(area, &status));
 429        if (!err)
 430                err = status;
 431        if (err) {
 432                nfp_warn(area->cpp, "Warning: area wait failed: %d\n", err);
 433                atomic_dec(&area->refcount);
 434                return err;
 435        }
 436
 437        nfp_cpp_area_get(area);
 438
 439        return 0;
 440}
 441
 442/**
 443 * nfp_cpp_area_acquire() - lock down a CPP area for access
 444 * @area:       CPP area handle
 445 *
 446 * Locks down the CPP area for a potential long term activity.  Area
 447 * must always be locked down before being accessed.
 448 *
 449 * Return: 0, or -ERRNO
 450 */
 451int nfp_cpp_area_acquire(struct nfp_cpp_area *area)
 452{
 453        int ret;
 454
 455        mutex_lock(&area->mutex);
 456        ret = __nfp_cpp_area_acquire(area);
 457        mutex_unlock(&area->mutex);
 458
 459        return ret;
 460}
 461
 462/**
 463 * nfp_cpp_area_acquire_nonblocking() - lock down a CPP area for access
 464 * @area:       CPP area handle
 465 *
 466 * Locks down the CPP area for a potential long term activity.  Area
 467 * must always be locked down before being accessed.
 468 *
 469 * NOTE: Returns -EAGAIN is no area is available
 470 *
 471 * Return: 0, or -ERRNO
 472 */
 473int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area)
 474{
 475        mutex_lock(&area->mutex);
 476        if (atomic_inc_return(&area->refcount) == 1) {
 477                if (area->cpp->op->area_acquire) {
 478                        int err;
 479
 480                        err = area->cpp->op->area_acquire(area);
 481                        if (err < 0) {
 482                                atomic_dec(&area->refcount);
 483                                mutex_unlock(&area->mutex);
 484                                return err;
 485                        }
 486                }
 487        }
 488        mutex_unlock(&area->mutex);
 489
 490        nfp_cpp_area_get(area);
 491        return 0;
 492}
 493
 494/**
 495 * nfp_cpp_area_release() - release a locked down CPP area
 496 * @area:       CPP area handle
 497 *
 498 * Releases a previously locked down CPP area.
 499 */
 500void nfp_cpp_area_release(struct nfp_cpp_area *area)
 501{
 502        mutex_lock(&area->mutex);
 503        /* Only call the release on refcount == 0 */
 504        if (atomic_dec_and_test(&area->refcount)) {
 505                if (area->cpp->op->area_release) {
 506                        area->cpp->op->area_release(area);
 507                        /* Let anyone waiting for a BAR try to get one.. */
 508                        wake_up_interruptible_all(&area->cpp->waitq);
 509                }
 510        }
 511        mutex_unlock(&area->mutex);
 512
 513        nfp_cpp_area_put(area);
 514}
 515
 516/**
 517 * nfp_cpp_area_release_free() - release CPP area and free it
 518 * @area:       CPP area handle
 519 *
 520 * Releases CPP area and frees up memory resources held by the it.
 521 */
 522void nfp_cpp_area_release_free(struct nfp_cpp_area *area)
 523{
 524        nfp_cpp_area_release(area);
 525        nfp_cpp_area_free(area);
 526}
 527
 528/**
 529 * nfp_cpp_area_read() - read data from CPP area
 530 * @area:         CPP area handle
 531 * @offset:       offset into CPP area
 532 * @kernel_vaddr: kernel address to put data into
 533 * @length:       number of bytes to read
 534 *
 535 * Read data from indicated CPP region.
 536 *
 537 * NOTE: @offset and @length must be 32-bit aligned values.
 538 * Area must have been locked down with an 'acquire'.
 539 *
 540 * Return: length of io, or -ERRNO
 541 */
 542int nfp_cpp_area_read(struct nfp_cpp_area *area,
 543                      unsigned long offset, void *kernel_vaddr,
 544                      size_t length)
 545{
 546        return area->cpp->op->area_read(area, kernel_vaddr, offset, length);
 547}
 548
 549/**
 550 * nfp_cpp_area_write() - write data to CPP area
 551 * @area:       CPP area handle
 552 * @offset:     offset into CPP area
 553 * @kernel_vaddr: kernel address to read data from
 554 * @length:     number of bytes to write
 555 *
 556 * Write data to indicated CPP region.
 557 *
 558 * NOTE: @offset and @length must be 32-bit aligned values.
 559 * Area must have been locked down with an 'acquire'.
 560 *
 561 * Return: length of io, or -ERRNO
 562 */
 563int nfp_cpp_area_write(struct nfp_cpp_area *area,
 564                       unsigned long offset, const void *kernel_vaddr,
 565                       size_t length)
 566{
 567        return area->cpp->op->area_write(area, kernel_vaddr, offset, length);
 568}
 569
 570/**
 571 * nfp_cpp_area_size() - return size of a CPP area
 572 * @cpp_area:   CPP area handle
 573 *
 574 * Return: Size of the area
 575 */
 576size_t nfp_cpp_area_size(struct nfp_cpp_area *cpp_area)
 577{
 578        return cpp_area->size;
 579}
 580
 581/**
 582 * nfp_cpp_area_name() - return name of a CPP area
 583 * @cpp_area:   CPP area handle
 584 *
 585 * Return: Name of the area, or NULL
 586 */
 587const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area)
 588{
 589        return cpp_area->resource.name;
 590}
 591
 592/**
 593 * nfp_cpp_area_priv() - return private struct for CPP area
 594 * @cpp_area:   CPP area handle
 595 *
 596 * Return: Private data for the CPP area
 597 */
 598void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area)
 599{
 600        return &cpp_area[1];
 601}
 602
 603/**
 604 * nfp_cpp_area_cpp() - return CPP handle for CPP area
 605 * @cpp_area:   CPP area handle
 606 *
 607 * Return: NFP CPP handle
 608 */
 609struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area)
 610{
 611        return cpp_area->cpp;
 612}
 613
 614/**
 615 * nfp_cpp_area_resource() - get resource
 616 * @area:       CPP area handle
 617 *
 618 * NOTE: Area must have been locked down with an 'acquire'.
 619 *
 620 * Return: struct resource pointer, or NULL
 621 */
 622struct resource *nfp_cpp_area_resource(struct nfp_cpp_area *area)
 623{
 624        struct resource *res = NULL;
 625
 626        if (area->cpp->op->area_resource)
 627                res = area->cpp->op->area_resource(area);
 628
 629        return res;
 630}
 631
 632/**
 633 * nfp_cpp_area_phys() - get physical address of CPP area
 634 * @area:       CPP area handle
 635 *
 636 * NOTE: Area must have been locked down with an 'acquire'.
 637 *
 638 * Return: phy_addr_t of the area, or NULL
 639 */
 640phys_addr_t nfp_cpp_area_phys(struct nfp_cpp_area *area)
 641{
 642        phys_addr_t addr = ~0;
 643
 644        if (area->cpp->op->area_phys)
 645                addr = area->cpp->op->area_phys(area);
 646
 647        return addr;
 648}
 649
 650/**
 651 * nfp_cpp_area_iomem() - get IOMEM region for CPP area
 652 * @area:       CPP area handle
 653 *
 654 * Returns an iomem pointer for use with readl()/writel() style
 655 * operations.
 656 *
 657 * NOTE: Area must have been locked down with an 'acquire'.
 658 *
 659 * Return: __iomem pointer to the area, or NULL
 660 */
 661void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area)
 662{
 663        void __iomem *iomem = NULL;
 664
 665        if (area->cpp->op->area_iomem)
 666                iomem = area->cpp->op->area_iomem(area);
 667
 668        return iomem;
 669}
 670
 671/**
 672 * nfp_cpp_area_readl() - Read a u32 word from an area
 673 * @area:       CPP Area handle
 674 * @offset:     Offset into area
 675 * @value:      Pointer to read buffer
 676 *
 677 * Return: 0 on success, or -ERRNO
 678 */
 679int nfp_cpp_area_readl(struct nfp_cpp_area *area,
 680                       unsigned long offset, u32 *value)
 681{
 682        u8 tmp[4];
 683        int n;
 684
 685        n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
 686        if (n != sizeof(tmp))
 687                return n < 0 ? n : -EIO;
 688
 689        *value = get_unaligned_le32(tmp);
 690        return 0;
 691}
 692
 693/**
 694 * nfp_cpp_area_writel() - Write a u32 word to an area
 695 * @area:       CPP Area handle
 696 * @offset:     Offset into area
 697 * @value:      Value to write
 698 *
 699 * Return: 0 on success, or -ERRNO
 700 */
 701int nfp_cpp_area_writel(struct nfp_cpp_area *area,
 702                        unsigned long offset, u32 value)
 703{
 704        u8 tmp[4];
 705        int n;
 706
 707        put_unaligned_le32(value, tmp);
 708        n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
 709
 710        return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
 711}
 712
 713/**
 714 * nfp_cpp_area_readq() - Read a u64 word from an area
 715 * @area:       CPP Area handle
 716 * @offset:     Offset into area
 717 * @value:      Pointer to read buffer
 718 *
 719 * Return: 0 on success, or -ERRNO
 720 */
 721int nfp_cpp_area_readq(struct nfp_cpp_area *area,
 722                       unsigned long offset, u64 *value)
 723{
 724        u8 tmp[8];
 725        int n;
 726
 727        n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp));
 728        if (n != sizeof(tmp))
 729                return n < 0 ? n : -EIO;
 730
 731        *value = get_unaligned_le64(tmp);
 732        return 0;
 733}
 734
 735/**
 736 * nfp_cpp_area_writeq() - Write a u64 word to an area
 737 * @area:       CPP Area handle
 738 * @offset:     Offset into area
 739 * @value:      Value to write
 740 *
 741 * Return: 0 on success, or -ERRNO
 742 */
 743int nfp_cpp_area_writeq(struct nfp_cpp_area *area,
 744                        unsigned long offset, u64 value)
 745{
 746        u8 tmp[8];
 747        int n;
 748
 749        put_unaligned_le64(value, tmp);
 750        n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp));
 751
 752        return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO;
 753}
 754
 755/**
 756 * nfp_cpp_area_fill() - fill a CPP area with a value
 757 * @area:       CPP area
 758 * @offset:     offset into CPP area
 759 * @value:      value to fill with
 760 * @length:     length of area to fill
 761 *
 762 * Fill indicated area with given value.
 763 *
 764 * Return: length of io, or -ERRNO
 765 */
 766int nfp_cpp_area_fill(struct nfp_cpp_area *area,
 767                      unsigned long offset, u32 value, size_t length)
 768{
 769        u8 tmp[4];
 770        size_t i;
 771        int k;
 772
 773        put_unaligned_le32(value, tmp);
 774
 775        if (offset % sizeof(tmp) || length % sizeof(tmp))
 776                return -EINVAL;
 777
 778        for (i = 0; i < length; i += sizeof(tmp)) {
 779                k = nfp_cpp_area_write(area, offset + i, &tmp, sizeof(tmp));
 780                if (k < 0)
 781                        return k;
 782        }
 783
 784        return i;
 785}
 786
 787/**
 788 * nfp_cpp_area_cache_add() - Permanently reserve and area for the hot cache
 789 * @cpp:        NFP CPP handle
 790 * @size:       Size of the area - MUST BE A POWER OF 2.
 791 */
 792int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size)
 793{
 794        struct nfp_cpp_area_cache *cache;
 795        struct nfp_cpp_area *area;
 796
 797        /* Allocate an area - we use the MU target's base as a placeholder,
 798         * as all supported chips have a MU.
 799         */
 800        area = nfp_cpp_area_alloc(cpp, NFP_CPP_ID(7, NFP_CPP_ACTION_RW, 0),
 801                                  0, size);
 802        if (!area)
 803                return -ENOMEM;
 804
 805        cache = kzalloc(sizeof(*cache), GFP_KERNEL);
 806        if (!cache)
 807                return -ENOMEM;
 808
 809        cache->id = 0;
 810        cache->addr = 0;
 811        cache->size = size;
 812        cache->area = area;
 813        mutex_lock(&cpp->area_cache_mutex);
 814        list_add_tail(&cache->entry, &cpp->area_cache_list);
 815        mutex_unlock(&cpp->area_cache_mutex);
 816
 817        return 0;
 818}
 819
 820static struct nfp_cpp_area_cache *
 821area_cache_get(struct nfp_cpp *cpp, u32 id,
 822               u64 addr, unsigned long *offset, size_t length)
 823{
 824        struct nfp_cpp_area_cache *cache;
 825        int err;
 826
 827        /* Early exit when length == 0, which prevents
 828         * the need for special case code below when
 829         * checking against available cache size.
 830         */
 831        if (length == 0 || id == 0)
 832                return NULL;
 833
 834        /* Remap from cpp_island to cpp_target */
 835        err = nfp_target_cpp(id, addr, &id, &addr, cpp->imb_cat_table);
 836        if (err < 0)
 837                return NULL;
 838
 839        mutex_lock(&cpp->area_cache_mutex);
 840
 841        if (list_empty(&cpp->area_cache_list)) {
 842                mutex_unlock(&cpp->area_cache_mutex);
 843                return NULL;
 844        }
 845
 846        addr += *offset;
 847
 848        /* See if we have a match */
 849        list_for_each_entry(cache, &cpp->area_cache_list, entry) {
 850                if (id == cache->id &&
 851                    addr >= cache->addr &&
 852                    addr + length <= cache->addr + cache->size)
 853                        goto exit;
 854        }
 855
 856        /* No matches - inspect the tail of the LRU */
 857        cache = list_entry(cpp->area_cache_list.prev,
 858                           struct nfp_cpp_area_cache, entry);
 859
 860        /* Can we fit in the cache entry? */
 861        if (round_down(addr + length - 1, cache->size) !=
 862            round_down(addr, cache->size)) {
 863                mutex_unlock(&cpp->area_cache_mutex);
 864                return NULL;
 865        }
 866
 867        /* If id != 0, we will need to release it */
 868        if (cache->id) {
 869                nfp_cpp_area_release(cache->area);
 870                cache->id = 0;
 871                cache->addr = 0;
 872        }
 873
 874        /* Adjust the start address to be cache size aligned */
 875        cache->id = id;
 876        cache->addr = addr & ~(u64)(cache->size - 1);
 877
 878        /* Re-init to the new ID and address */
 879        if (cpp->op->area_init) {
 880                err = cpp->op->area_init(cache->area,
 881                                         id, cache->addr, cache->size);
 882                if (err < 0) {
 883                        mutex_unlock(&cpp->area_cache_mutex);
 884                        return NULL;
 885                }
 886        }
 887
 888        /* Attempt to acquire */
 889        err = nfp_cpp_area_acquire(cache->area);
 890        if (err < 0) {
 891                mutex_unlock(&cpp->area_cache_mutex);
 892                return NULL;
 893        }
 894
 895exit:
 896        /* Adjust offset */
 897        *offset = addr - cache->addr;
 898        return cache;
 899}
 900
 901static void
 902area_cache_put(struct nfp_cpp *cpp, struct nfp_cpp_area_cache *cache)
 903{
 904        if (!cache)
 905                return;
 906
 907        /* Move to front of LRU */
 908        list_move(&cache->entry, &cpp->area_cache_list);
 909
 910        mutex_unlock(&cpp->area_cache_mutex);
 911}
 912
 913static int __nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
 914                          unsigned long long address, void *kernel_vaddr,
 915                          size_t length)
 916{
 917        struct nfp_cpp_area_cache *cache;
 918        struct nfp_cpp_area *area;
 919        unsigned long offset = 0;
 920        int err;
 921
 922        cache = area_cache_get(cpp, destination, address, &offset, length);
 923        if (cache) {
 924                area = cache->area;
 925        } else {
 926                area = nfp_cpp_area_alloc(cpp, destination, address, length);
 927                if (!area)
 928                        return -ENOMEM;
 929
 930                err = nfp_cpp_area_acquire(area);
 931                if (err) {
 932                        nfp_cpp_area_free(area);
 933                        return err;
 934                }
 935        }
 936
 937        err = nfp_cpp_area_read(area, offset, kernel_vaddr, length);
 938
 939        if (cache)
 940                area_cache_put(cpp, cache);
 941        else
 942                nfp_cpp_area_release_free(area);
 943
 944        return err;
 945}
 946
 947/**
 948 * nfp_cpp_read() - read from CPP target
 949 * @cpp:                CPP handle
 950 * @destination:        CPP id
 951 * @address:            offset into CPP target
 952 * @kernel_vaddr:       kernel buffer for result
 953 * @length:             number of bytes to read
 954 *
 955 * Return: length of io, or -ERRNO
 956 */
 957int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination,
 958                 unsigned long long address, void *kernel_vaddr,
 959                 size_t length)
 960{
 961        size_t n, offset;
 962        int ret;
 963
 964        for (offset = 0; offset < length; offset += n) {
 965                unsigned long long r_addr = address + offset;
 966
 967                /* make first read smaller to align to safe window */
 968                n = min_t(size_t, length - offset,
 969                          ALIGN(r_addr + 1, NFP_CPP_SAFE_AREA_SIZE) - r_addr);
 970
 971                ret = __nfp_cpp_read(cpp, destination, address + offset,
 972                                     kernel_vaddr + offset, n);
 973                if (ret < 0)
 974                        return ret;
 975                if (ret != n)
 976                        return offset + n;
 977        }
 978
 979        return length;
 980}
 981
 982static int __nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
 983                           unsigned long long address,
 984                           const void *kernel_vaddr, size_t length)
 985{
 986        struct nfp_cpp_area_cache *cache;
 987        struct nfp_cpp_area *area;
 988        unsigned long offset = 0;
 989        int err;
 990
 991        cache = area_cache_get(cpp, destination, address, &offset, length);
 992        if (cache) {
 993                area = cache->area;
 994        } else {
 995                area = nfp_cpp_area_alloc(cpp, destination, address, length);
 996                if (!area)
 997                        return -ENOMEM;
 998
 999                err = nfp_cpp_area_acquire(area);
1000                if (err) {
1001                        nfp_cpp_area_free(area);
1002                        return err;
1003                }
1004        }
1005
1006        err = nfp_cpp_area_write(area, offset, kernel_vaddr, length);
1007
1008        if (cache)
1009                area_cache_put(cpp, cache);
1010        else
1011                nfp_cpp_area_release_free(area);
1012
1013        return err;
1014}
1015
1016/**
1017 * nfp_cpp_write() - write to CPP target
1018 * @cpp:                CPP handle
1019 * @destination:        CPP id
1020 * @address:            offset into CPP target
1021 * @kernel_vaddr:       kernel buffer to read from
1022 * @length:             number of bytes to write
1023 *
1024 * Return: length of io, or -ERRNO
1025 */
1026int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination,
1027                  unsigned long long address,
1028                  const void *kernel_vaddr, size_t length)
1029{
1030        size_t n, offset;
1031        int ret;
1032
1033        for (offset = 0; offset < length; offset += n) {
1034                unsigned long long w_addr = address + offset;
1035
1036                /* make first write smaller to align to safe window */
1037                n = min_t(size_t, length - offset,
1038                          ALIGN(w_addr + 1, NFP_CPP_SAFE_AREA_SIZE) - w_addr);
1039
1040                ret = __nfp_cpp_write(cpp, destination, address + offset,
1041                                      kernel_vaddr + offset, n);
1042                if (ret < 0)
1043                        return ret;
1044                if (ret != n)
1045                        return offset + n;
1046        }
1047
1048        return length;
1049}
1050
1051/* Return the correct CPP address, and fixup xpb_addr as needed. */
1052static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr)
1053{
1054        int island;
1055        u32 xpb;
1056
1057        xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0);
1058        /* Ensure that non-local XPB accesses go
1059         * out through the global XPBM bus.
1060         */
1061        island = (*xpb_addr >> 24) & 0x3f;
1062        if (!island)
1063                return xpb;
1064
1065        if (island != 1) {
1066                *xpb_addr |= 1 << 30;
1067                return xpb;
1068        }
1069
1070        /* Accesses to the ARM Island overlay uses Island 0 / Global Bit */
1071        *xpb_addr &= ~0x7f000000;
1072        if (*xpb_addr < 0x60000) {
1073                *xpb_addr |= 1 << 30;
1074        } else {
1075                /* And only non-ARM interfaces use the island id = 1 */
1076                if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp))
1077                    != NFP_CPP_INTERFACE_TYPE_ARM)
1078                        *xpb_addr |= 1 << 24;
1079        }
1080
1081        return xpb;
1082}
1083
1084/**
1085 * nfp_xpb_readl() - Read a u32 word from a XPB location
1086 * @cpp:        CPP device handle
1087 * @xpb_addr:   Address for operation
1088 * @value:      Pointer to read buffer
1089 *
1090 * Return: 0 on success, or -ERRNO
1091 */
1092int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value)
1093{
1094        u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
1095
1096        return nfp_cpp_readl(cpp, cpp_dest, xpb_addr, value);
1097}
1098
1099/**
1100 * nfp_xpb_writel() - Write a u32 word to a XPB location
1101 * @cpp:        CPP device handle
1102 * @xpb_addr:   Address for operation
1103 * @value:      Value to write
1104 *
1105 * Return: 0 on success, or -ERRNO
1106 */
1107int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value)
1108{
1109        u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr);
1110
1111        return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value);
1112}
1113
1114/**
1115 * nfp_xpb_writelm() - Modify bits of a 32-bit value from the XPB bus
1116 * @cpp:        NFP CPP device handle
1117 * @xpb_tgt:    XPB target and address
1118 * @mask:       mask of bits to alter
1119 * @value:      value to modify
1120 *
1121 * KERNEL: This operation is safe to call in interrupt or softirq context.
1122 *
1123 * Return: 0 on success, or -ERRNO
1124 */
1125int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt,
1126                    u32 mask, u32 value)
1127{
1128        int err;
1129        u32 tmp;
1130
1131        err = nfp_xpb_readl(cpp, xpb_tgt, &tmp);
1132        if (err < 0)
1133                return err;
1134
1135        tmp &= ~mask;
1136        tmp |= mask & value;
1137        return nfp_xpb_writel(cpp, xpb_tgt, tmp);
1138}
1139
1140/* Lockdep markers */
1141static struct lock_class_key nfp_cpp_resource_lock_key;
1142
1143static void nfp_cpp_dev_release(struct device *dev)
1144{
1145        /* Nothing to do here - it just makes the kernel happy */
1146}
1147
1148/**
1149 * nfp_cpp_from_operations() - Create a NFP CPP handle
1150 *                             from an operations structure
1151 * @ops:        NFP CPP operations structure
1152 * @parent:     Parent device
1153 * @priv:       Private data of low-level implementation
1154 *
1155 * NOTE: On failure, cpp_ops->free will be called!
1156 *
1157 * Return: NFP CPP handle on success, ERR_PTR on failure
1158 */
1159struct nfp_cpp *
1160nfp_cpp_from_operations(const struct nfp_cpp_operations *ops,
1161                        struct device *parent, void *priv)
1162{
1163        const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0);
1164        struct nfp_cpp *cpp;
1165        int ifc, err;
1166        u32 mask[2];
1167        u32 xpbaddr;
1168        size_t tgt;
1169
1170        cpp = kzalloc(sizeof(*cpp), GFP_KERNEL);
1171        if (!cpp) {
1172                err = -ENOMEM;
1173                goto err_malloc;
1174        }
1175
1176        cpp->op = ops;
1177        cpp->priv = priv;
1178
1179        ifc = ops->get_interface(parent);
1180        if (ifc < 0) {
1181                err = ifc;
1182                goto err_free_cpp;
1183        }
1184        cpp->interface = ifc;
1185        if (ops->read_serial) {
1186                err = ops->read_serial(parent, cpp->serial);
1187                if (err)
1188                        goto err_free_cpp;
1189        }
1190
1191        rwlock_init(&cpp->resource_lock);
1192        init_waitqueue_head(&cpp->waitq);
1193        lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key);
1194        INIT_LIST_HEAD(&cpp->resource_list);
1195        INIT_LIST_HEAD(&cpp->area_cache_list);
1196        mutex_init(&cpp->area_cache_mutex);
1197        cpp->dev.init_name = "cpp";
1198        cpp->dev.parent = parent;
1199        cpp->dev.release = nfp_cpp_dev_release;
1200        err = device_register(&cpp->dev);
1201        if (err < 0) {
1202                put_device(&cpp->dev);
1203                goto err_free_cpp;
1204        }
1205
1206        dev_set_drvdata(&cpp->dev, cpp);
1207
1208        /* NOTE: cpp_lock is NOT locked for op->init,
1209         * since it may call NFP CPP API operations
1210         */
1211        if (cpp->op->init) {
1212                err = cpp->op->init(cpp);
1213                if (err < 0) {
1214                        dev_err(parent,
1215                                "NFP interface initialization failed\n");
1216                        goto err_out;
1217                }
1218        }
1219
1220        err = nfp_cpp_model_autodetect(cpp, &cpp->model);
1221        if (err < 0) {
1222                dev_err(parent, "NFP model detection failed\n");
1223                goto err_out;
1224        }
1225
1226        for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) {
1227                        /* Hardcoded XPB IMB Base, island 0 */
1228                xpbaddr = 0x000a0000 + (tgt * 4);
1229                err = nfp_xpb_readl(cpp, xpbaddr,
1230                                    &cpp->imb_cat_table[tgt]);
1231                if (err < 0) {
1232                        dev_err(parent,
1233                                "Can't read CPP mapping from device\n");
1234                        goto err_out;
1235                }
1236        }
1237
1238        nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL2,
1239                      &mask[0]);
1240        nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL3,
1241                      &mask[1]);
1242
1243        err = nfp_cpp_set_mu_locality_lsb(cpp);
1244        if (err < 0) {
1245                dev_err(parent, "Can't calculate MU locality bit offset\n");
1246                goto err_out;
1247        }
1248
1249        dev_info(cpp->dev.parent, "Model: 0x%08x, SN: %pM, Ifc: 0x%04x\n",
1250                 nfp_cpp_model(cpp), cpp->serial, nfp_cpp_interface(cpp));
1251
1252        return cpp;
1253
1254err_out:
1255        device_unregister(&cpp->dev);
1256err_free_cpp:
1257        kfree(cpp);
1258err_malloc:
1259        return ERR_PTR(err);
1260}
1261
1262/**
1263 * nfp_cpp_priv() - Get the operations private data of a CPP handle
1264 * @cpp:        CPP handle
1265 *
1266 * Return: Private data for the NFP CPP handle
1267 */
1268void *nfp_cpp_priv(struct nfp_cpp *cpp)
1269{
1270        return cpp->priv;
1271}
1272
1273/**
1274 * nfp_cpp_device() - Get the Linux device handle of a CPP handle
1275 * @cpp:        CPP handle
1276 *
1277 * Return: Device for the NFP CPP bus
1278 */
1279struct device *nfp_cpp_device(struct nfp_cpp *cpp)
1280{
1281        return &cpp->dev;
1282}
1283
1284#define NFP_EXPL_OP(func, expl, args...)                          \
1285        ({                                                        \
1286                struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \
1287                int err = -ENODEV;                                \
1288                                                                  \
1289                if (cpp->op->func)                                \
1290                        err = cpp->op->func(expl, ##args);        \
1291                err;                                              \
1292        })
1293
1294#define NFP_EXPL_OP_NR(func, expl, args...)                       \
1295        ({                                                        \
1296                struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \
1297                                                                  \
1298                if (cpp->op->func)                                \
1299                        cpp->op->func(expl, ##args);              \
1300                                                                  \
1301        })
1302
1303/**
1304 * nfp_cpp_explicit_acquire() - Acquire explicit access handle
1305 * @cpp:        NFP CPP handle
1306 *
1307 * The 'data_ref' and 'signal_ref' values are useful when
1308 * constructing the NFP_EXPL_CSR1 and NFP_EXPL_POST values.
1309 *
1310 * Return: NFP CPP explicit handle
1311 */
1312struct nfp_cpp_explicit *nfp_cpp_explicit_acquire(struct nfp_cpp *cpp)
1313{
1314        struct nfp_cpp_explicit *expl;
1315        int err;
1316
1317        expl = kzalloc(sizeof(*expl) + cpp->op->explicit_priv_size, GFP_KERNEL);
1318        if (!expl)
1319                return NULL;
1320
1321        expl->cpp = cpp;
1322        err = NFP_EXPL_OP(explicit_acquire, expl);
1323        if (err < 0) {
1324                kfree(expl);
1325                return NULL;
1326        }
1327
1328        return expl;
1329}
1330
1331/**
1332 * nfp_cpp_explicit_set_target() - Set target fields for explicit
1333 * @expl:       Explicit handle
1334 * @cpp_id:     CPP ID field
1335 * @len:        CPP Length field
1336 * @mask:       CPP Mask field
1337 *
1338 * Return: 0, or -ERRNO
1339 */
1340int nfp_cpp_explicit_set_target(struct nfp_cpp_explicit *expl,
1341                                u32 cpp_id, u8 len, u8 mask)
1342{
1343        expl->cmd.cpp_id = cpp_id;
1344        expl->cmd.len = len;
1345        expl->cmd.byte_mask = mask;
1346
1347        return 0;
1348}
1349
1350/**
1351 * nfp_cpp_explicit_set_data() - Set data fields for explicit
1352 * @expl:       Explicit handle
1353 * @data_master: CPP Data Master field
1354 * @data_ref:   CPP Data Ref field
1355 *
1356 * Return: 0, or -ERRNO
1357 */
1358int nfp_cpp_explicit_set_data(struct nfp_cpp_explicit *expl,
1359                              u8 data_master, u16 data_ref)
1360{
1361        expl->cmd.data_master = data_master;
1362        expl->cmd.data_ref = data_ref;
1363
1364        return 0;
1365}
1366
1367/**
1368 * nfp_cpp_explicit_set_signal() - Set signal fields for explicit
1369 * @expl:       Explicit handle
1370 * @signal_master: CPP Signal Master field
1371 * @signal_ref: CPP Signal Ref field
1372 *
1373 * Return: 0, or -ERRNO
1374 */
1375int nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit *expl,
1376                                u8 signal_master, u8 signal_ref)
1377{
1378        expl->cmd.signal_master = signal_master;
1379        expl->cmd.signal_ref = signal_ref;
1380
1381        return 0;
1382}
1383
1384/**
1385 * nfp_cpp_explicit_set_posted() - Set completion fields for explicit
1386 * @expl:       Explicit handle
1387 * @posted:     True for signaled completion, false otherwise
1388 * @siga:       CPP Signal A field
1389 * @siga_mode:  CPP Signal A Mode field
1390 * @sigb:       CPP Signal B field
1391 * @sigb_mode:  CPP Signal B Mode field
1392 *
1393 * Return: 0, or -ERRNO
1394 */
1395int nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit *expl, int posted,
1396                                u8 siga,
1397                                enum nfp_cpp_explicit_signal_mode siga_mode,
1398                                u8 sigb,
1399                                enum nfp_cpp_explicit_signal_mode sigb_mode)
1400{
1401        expl->cmd.posted = posted;
1402        expl->cmd.siga = siga;
1403        expl->cmd.sigb = sigb;
1404        expl->cmd.siga_mode = siga_mode;
1405        expl->cmd.sigb_mode = sigb_mode;
1406
1407        return 0;
1408}
1409
1410/**
1411 * nfp_cpp_explicit_put() - Set up the write (pull) data for a explicit access
1412 * @expl:       NFP CPP Explicit handle
1413 * @buff:       Data to have the target pull in the transaction
1414 * @len:        Length of data, in bytes
1415 *
1416 * The 'len' parameter must be less than or equal to 128 bytes.
1417 *
1418 * If this function is called before the configuration
1419 * registers are set, it will return -EINVAL.
1420 *
1421 * Return: 0, or -ERRNO
1422 */
1423int nfp_cpp_explicit_put(struct nfp_cpp_explicit *expl,
1424                         const void *buff, size_t len)
1425{
1426        return NFP_EXPL_OP(explicit_put, expl, buff, len);
1427}
1428
1429/**
1430 * nfp_cpp_explicit_do() - Execute a transaction, and wait for it to complete
1431 * @expl:       NFP CPP Explicit handle
1432 * @address:    Address to send in the explicit transaction
1433 *
1434 * If this function is called before the configuration
1435 * registers are set, it will return -1, with an errno of EINVAL.
1436 *
1437 * Return: 0, or -ERRNO
1438 */
1439int nfp_cpp_explicit_do(struct nfp_cpp_explicit *expl, u64 address)
1440{
1441        return NFP_EXPL_OP(explicit_do, expl, &expl->cmd, address);
1442}
1443
1444/**
1445 * nfp_cpp_explicit_get() - Get the 'push' (read) data from a explicit access
1446 * @expl:       NFP CPP Explicit handle
1447 * @buff:       Data that the target pushed in the transaction
1448 * @len:        Length of data, in bytes
1449 *
1450 * The 'len' parameter must be less than or equal to 128 bytes.
1451 *
1452 * If this function is called before all three configuration
1453 * registers are set, it will return -1, with an errno of EINVAL.
1454 *
1455 * If this function is called before nfp_cpp_explicit_do()
1456 * has completed, it will return -1, with an errno of EBUSY.
1457 *
1458 * Return: 0, or -ERRNO
1459 */
1460int nfp_cpp_explicit_get(struct nfp_cpp_explicit *expl, void *buff, size_t len)
1461{
1462        return NFP_EXPL_OP(explicit_get, expl, buff, len);
1463}
1464
1465/**
1466 * nfp_cpp_explicit_release() - Release explicit access handle
1467 * @expl:       NFP CPP Explicit handle
1468 *
1469 */
1470void nfp_cpp_explicit_release(struct nfp_cpp_explicit *expl)
1471{
1472        NFP_EXPL_OP_NR(explicit_release, expl);
1473        kfree(expl);
1474}
1475
1476/**
1477 * nfp_cpp_explicit_cpp() - return CPP handle for CPP explicit
1478 * @cpp_explicit:       CPP explicit handle
1479 *
1480 * Return: NFP CPP handle of the explicit
1481 */
1482struct nfp_cpp *nfp_cpp_explicit_cpp(struct nfp_cpp_explicit *cpp_explicit)
1483{
1484        return cpp_explicit->cpp;
1485}
1486
1487/**
1488 * nfp_cpp_explicit_priv() - return private struct for CPP explicit
1489 * @cpp_explicit:       CPP explicit handle
1490 *
1491 * Return: private data of the explicit, or NULL
1492 */
1493void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit)
1494{
1495        return &cpp_explicit[1];
1496}
1497