linux/drivers/net/ethernet/netronome/nfp/ccm_mbox.c
<<
>>
Prefs
   1// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause)
   2/* Copyright (C) 2019 Netronome Systems, Inc. */
   3
   4#include <linux/bitfield.h>
   5#include <linux/io.h>
   6#include <linux/skbuff.h>
   7
   8#include "ccm.h"
   9#include "nfp_net.h"
  10
  11/* CCM messages via the mailbox.  CMSGs get wrapped into simple TLVs
  12 * and copied into the mailbox.  Multiple messages can be copied to
  13 * form a batch.  Threads come in with CMSG formed in an skb, then
  14 * enqueue that skb onto the request queue.  If threads skb is first
  15 * in queue this thread will handle the mailbox operation.  It copies
  16 * up to 64 messages into the mailbox (making sure that both requests
  17 * and replies will fit.  After FW is done processing the batch it
  18 * copies the data out and wakes waiting threads.
  19 * If a thread is waiting it either gets its the message completed
  20 * (response is copied into the same skb as the request, overwriting
  21 * it), or becomes the first in queue.
  22 * Completions and next-to-run are signaled via the control buffer
  23 * to limit potential cache line bounces.
  24 */
  25
  26#define NFP_CCM_MBOX_BATCH_LIMIT        64
  27#define NFP_CCM_TIMEOUT                 (NFP_NET_POLL_TIMEOUT * 1000)
  28#define NFP_CCM_MAX_QLEN                1024
  29
  30enum nfp_net_mbox_cmsg_state {
  31        NFP_NET_MBOX_CMSG_STATE_QUEUED,
  32        NFP_NET_MBOX_CMSG_STATE_NEXT,
  33        NFP_NET_MBOX_CMSG_STATE_BUSY,
  34        NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND,
  35        NFP_NET_MBOX_CMSG_STATE_DONE,
  36};
  37
  38/**
  39 * struct nfp_ccm_mbox_cmsg_cb - CCM mailbox specific info
  40 * @state:      processing state (/stage) of the message
  41 * @err:        error encountered during processing if any
  42 * @max_len:    max(request_len, reply_len)
  43 * @exp_reply:  expected reply length (0 means don't validate)
  44 * @posted:     the message was posted and nobody waits for the reply
  45 */
  46struct nfp_ccm_mbox_cmsg_cb {
  47        enum nfp_net_mbox_cmsg_state state;
  48        int err;
  49        unsigned int max_len;
  50        unsigned int exp_reply;
  51        bool posted;
  52};
  53
  54static u32 nfp_ccm_mbox_max_msg(struct nfp_net *nn)
  55{
  56        return round_down(nn->tlv_caps.mbox_len, 4) -
  57                NFP_NET_CFG_MBOX_SIMPLE_VAL - /* common mbox command header */
  58                4 * 2; /* Msg TLV plus End TLV headers */
  59}
  60
  61static void
  62nfp_ccm_mbox_msg_init(struct sk_buff *skb, unsigned int exp_reply, int max_len)
  63{
  64        struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
  65
  66        cb->state = NFP_NET_MBOX_CMSG_STATE_QUEUED;
  67        cb->err = 0;
  68        cb->max_len = max_len;
  69        cb->exp_reply = exp_reply;
  70        cb->posted = false;
  71}
  72
  73static int nfp_ccm_mbox_maxlen(const struct sk_buff *skb)
  74{
  75        struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
  76
  77        return cb->max_len;
  78}
  79
  80static bool nfp_ccm_mbox_done(struct sk_buff *skb)
  81{
  82        struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
  83
  84        return cb->state == NFP_NET_MBOX_CMSG_STATE_DONE;
  85}
  86
  87static bool nfp_ccm_mbox_in_progress(struct sk_buff *skb)
  88{
  89        struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
  90
  91        return cb->state != NFP_NET_MBOX_CMSG_STATE_QUEUED &&
  92               cb->state != NFP_NET_MBOX_CMSG_STATE_NEXT;
  93}
  94
  95static void nfp_ccm_mbox_set_busy(struct sk_buff *skb)
  96{
  97        struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
  98
  99        cb->state = NFP_NET_MBOX_CMSG_STATE_BUSY;
 100}
 101
 102static bool nfp_ccm_mbox_is_posted(struct sk_buff *skb)
 103{
 104        struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
 105
 106        return cb->posted;
 107}
 108
 109static void nfp_ccm_mbox_mark_posted(struct sk_buff *skb)
 110{
 111        struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
 112
 113        cb->posted = true;
 114}
 115
 116static bool nfp_ccm_mbox_is_first(struct nfp_net *nn, struct sk_buff *skb)
 117{
 118        return skb_queue_is_first(&nn->mbox_cmsg.queue, skb);
 119}
 120
 121static bool nfp_ccm_mbox_should_run(struct nfp_net *nn, struct sk_buff *skb)
 122{
 123        struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
 124
 125        return cb->state == NFP_NET_MBOX_CMSG_STATE_NEXT;
 126}
 127
 128static void nfp_ccm_mbox_mark_next_runner(struct nfp_net *nn)
 129{
 130        struct nfp_ccm_mbox_cmsg_cb *cb;
 131        struct sk_buff *skb;
 132
 133        skb = skb_peek(&nn->mbox_cmsg.queue);
 134        if (!skb)
 135                return;
 136
 137        cb = (void *)skb->cb;
 138        cb->state = NFP_NET_MBOX_CMSG_STATE_NEXT;
 139        if (cb->posted)
 140                queue_work(nn->mbox_cmsg.workq, &nn->mbox_cmsg.runq_work);
 141}
 142
 143static void
 144nfp_ccm_mbox_write_tlv(struct nfp_net *nn, u32 off, u32 type, u32 len)
 145{
 146        nn_writel(nn, off,
 147                  FIELD_PREP(NFP_NET_MBOX_TLV_TYPE, type) |
 148                  FIELD_PREP(NFP_NET_MBOX_TLV_LEN, len));
 149}
 150
 151static void nfp_ccm_mbox_copy_in(struct nfp_net *nn, struct sk_buff *last)
 152{
 153        struct sk_buff *skb;
 154        int reserve, i, cnt;
 155        __be32 *data;
 156        u32 off, len;
 157
 158        off = nn->tlv_caps.mbox_off + NFP_NET_CFG_MBOX_SIMPLE_VAL;
 159        skb = __skb_peek(&nn->mbox_cmsg.queue);
 160        while (true) {
 161                nfp_ccm_mbox_write_tlv(nn, off, NFP_NET_MBOX_TLV_TYPE_MSG,
 162                                       skb->len);
 163                off += 4;
 164
 165                /* Write data word by word, skb->data should be aligned */
 166                data = (__be32 *)skb->data;
 167                cnt = skb->len / 4;
 168                for (i = 0 ; i < cnt; i++) {
 169                        nn_writel(nn, off, be32_to_cpu(data[i]));
 170                        off += 4;
 171                }
 172                if (skb->len & 3) {
 173                        __be32 tmp = 0;
 174
 175                        memcpy(&tmp, &data[i], skb->len & 3);
 176                        nn_writel(nn, off, be32_to_cpu(tmp));
 177                        off += 4;
 178                }
 179
 180                /* Reserve space if reply is bigger */
 181                len = round_up(skb->len, 4);
 182                reserve = nfp_ccm_mbox_maxlen(skb) - len;
 183                if (reserve > 0) {
 184                        nfp_ccm_mbox_write_tlv(nn, off,
 185                                               NFP_NET_MBOX_TLV_TYPE_RESV,
 186                                               reserve);
 187                        off += 4 + reserve;
 188                }
 189
 190                if (skb == last)
 191                        break;
 192                skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
 193        }
 194
 195        nfp_ccm_mbox_write_tlv(nn, off, NFP_NET_MBOX_TLV_TYPE_END, 0);
 196}
 197
 198static struct sk_buff *
 199nfp_ccm_mbox_find_req(struct nfp_net *nn, __be16 tag, struct sk_buff *last)
 200{
 201        struct sk_buff *skb;
 202
 203        skb = __skb_peek(&nn->mbox_cmsg.queue);
 204        while (true) {
 205                if (__nfp_ccm_get_tag(skb) == tag)
 206                        return skb;
 207
 208                if (skb == last)
 209                        return NULL;
 210                skb = skb_queue_next(&nn->mbox_cmsg.queue, skb);
 211        }
 212}
 213
 214static void nfp_ccm_mbox_copy_out(struct nfp_net *nn, struct sk_buff *last)
 215{
 216        struct nfp_ccm_mbox_cmsg_cb *cb;
 217        u8 __iomem *data, *end;
 218        struct sk_buff *skb;
 219
 220        data = nn->dp.ctrl_bar + nn->tlv_caps.mbox_off +
 221                NFP_NET_CFG_MBOX_SIMPLE_VAL;
 222        end = data + nn->tlv_caps.mbox_len;
 223
 224        while (true) {
 225                unsigned int length, offset, type;
 226                struct nfp_ccm_hdr hdr;
 227                u32 tlv_hdr;
 228
 229                tlv_hdr = readl(data);
 230                type = FIELD_GET(NFP_NET_MBOX_TLV_TYPE, tlv_hdr);
 231                length = FIELD_GET(NFP_NET_MBOX_TLV_LEN, tlv_hdr);
 232                offset = data - nn->dp.ctrl_bar;
 233
 234                /* Advance past the header */
 235                data += 4;
 236
 237                if (data + length > end) {
 238                        nn_dp_warn(&nn->dp, "mailbox oversized TLV type:%d offset:%u len:%u\n",
 239                                   type, offset, length);
 240                        break;
 241                }
 242
 243                if (type == NFP_NET_MBOX_TLV_TYPE_END)
 244                        break;
 245                if (type == NFP_NET_MBOX_TLV_TYPE_RESV)
 246                        goto next_tlv;
 247                if (type != NFP_NET_MBOX_TLV_TYPE_MSG &&
 248                    type != NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP) {
 249                        nn_dp_warn(&nn->dp, "mailbox unknown TLV type:%d offset:%u len:%u\n",
 250                                   type, offset, length);
 251                        break;
 252                }
 253
 254                if (length < 4) {
 255                        nn_dp_warn(&nn->dp, "mailbox msg too short to contain header TLV type:%d offset:%u len:%u\n",
 256                                   type, offset, length);
 257                        break;
 258                }
 259
 260                hdr.raw = cpu_to_be32(readl(data));
 261
 262                skb = nfp_ccm_mbox_find_req(nn, hdr.tag, last);
 263                if (!skb) {
 264                        nn_dp_warn(&nn->dp, "mailbox request not found:%u\n",
 265                                   be16_to_cpu(hdr.tag));
 266                        break;
 267                }
 268                cb = (void *)skb->cb;
 269
 270                if (type == NFP_NET_MBOX_TLV_TYPE_MSG_NOSUP) {
 271                        nn_dp_warn(&nn->dp,
 272                                   "mailbox msg not supported type:%d\n",
 273                                   nfp_ccm_get_type(skb));
 274                        cb->err = -EIO;
 275                        goto next_tlv;
 276                }
 277
 278                if (hdr.type != __NFP_CCM_REPLY(nfp_ccm_get_type(skb))) {
 279                        nn_dp_warn(&nn->dp, "mailbox msg reply wrong type:%u expected:%lu\n",
 280                                   hdr.type,
 281                                   __NFP_CCM_REPLY(nfp_ccm_get_type(skb)));
 282                        cb->err = -EIO;
 283                        goto next_tlv;
 284                }
 285                if (cb->exp_reply && length != cb->exp_reply) {
 286                        nn_dp_warn(&nn->dp, "mailbox msg reply wrong size type:%u expected:%u have:%u\n",
 287                                   hdr.type, length, cb->exp_reply);
 288                        cb->err = -EIO;
 289                        goto next_tlv;
 290                }
 291                if (length > cb->max_len) {
 292                        nn_dp_warn(&nn->dp, "mailbox msg oversized reply type:%u max:%u have:%u\n",
 293                                   hdr.type, cb->max_len, length);
 294                        cb->err = -EIO;
 295                        goto next_tlv;
 296                }
 297
 298                if (!cb->posted) {
 299                        __be32 *skb_data;
 300                        int i, cnt;
 301
 302                        if (length <= skb->len)
 303                                __skb_trim(skb, length);
 304                        else
 305                                skb_put(skb, length - skb->len);
 306
 307                        /* We overcopy here slightly, but that's okay,
 308                         * the skb is large enough, and the garbage will
 309                         * be ignored (beyond skb->len).
 310                         */
 311                        skb_data = (__be32 *)skb->data;
 312                        memcpy(skb_data, &hdr, 4);
 313
 314                        cnt = DIV_ROUND_UP(length, 4);
 315                        for (i = 1 ; i < cnt; i++)
 316                                skb_data[i] = cpu_to_be32(readl(data + i * 4));
 317                }
 318
 319                cb->state = NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND;
 320next_tlv:
 321                data += round_up(length, 4);
 322                if (data + 4 > end) {
 323                        nn_dp_warn(&nn->dp,
 324                                   "reached end of MBOX without END TLV\n");
 325                        break;
 326                }
 327        }
 328
 329        smp_wmb(); /* order the skb->data vs. cb->state */
 330        spin_lock_bh(&nn->mbox_cmsg.queue.lock);
 331        do {
 332                skb = __skb_dequeue(&nn->mbox_cmsg.queue);
 333                cb = (void *)skb->cb;
 334
 335                if (cb->state != NFP_NET_MBOX_CMSG_STATE_REPLY_FOUND) {
 336                        cb->err = -ENOENT;
 337                        smp_wmb(); /* order the cb->err vs. cb->state */
 338                }
 339                cb->state = NFP_NET_MBOX_CMSG_STATE_DONE;
 340
 341                if (cb->posted) {
 342                        if (cb->err)
 343                                nn_dp_warn(&nn->dp,
 344                                           "mailbox posted msg failed type:%u err:%d\n",
 345                                           nfp_ccm_get_type(skb), cb->err);
 346                        dev_consume_skb_any(skb);
 347                }
 348        } while (skb != last);
 349
 350        nfp_ccm_mbox_mark_next_runner(nn);
 351        spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
 352}
 353
 354static void
 355nfp_ccm_mbox_mark_all_err(struct nfp_net *nn, struct sk_buff *last, int err)
 356{
 357        struct nfp_ccm_mbox_cmsg_cb *cb;
 358        struct sk_buff *skb;
 359
 360        spin_lock_bh(&nn->mbox_cmsg.queue.lock);
 361        do {
 362                skb = __skb_dequeue(&nn->mbox_cmsg.queue);
 363                cb = (void *)skb->cb;
 364
 365                cb->err = err;
 366                smp_wmb(); /* order the cb->err vs. cb->state */
 367                cb->state = NFP_NET_MBOX_CMSG_STATE_DONE;
 368        } while (skb != last);
 369
 370        nfp_ccm_mbox_mark_next_runner(nn);
 371        spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
 372}
 373
 374static void nfp_ccm_mbox_run_queue_unlock(struct nfp_net *nn)
 375        __releases(&nn->mbox_cmsg.queue.lock)
 376{
 377        int space = nn->tlv_caps.mbox_len - NFP_NET_CFG_MBOX_SIMPLE_VAL;
 378        struct sk_buff *skb, *last;
 379        int cnt, err;
 380
 381        space -= 4; /* for End TLV */
 382
 383        /* First skb must fit, because it's ours and we checked it fits */
 384        cnt = 1;
 385        last = skb = __skb_peek(&nn->mbox_cmsg.queue);
 386        space -= 4 + nfp_ccm_mbox_maxlen(skb);
 387
 388        while (!skb_queue_is_last(&nn->mbox_cmsg.queue, last)) {
 389                skb = skb_queue_next(&nn->mbox_cmsg.queue, last);
 390                space -= 4 + nfp_ccm_mbox_maxlen(skb);
 391                if (space < 0)
 392                        break;
 393                last = skb;
 394                nfp_ccm_mbox_set_busy(skb);
 395                cnt++;
 396                if (cnt == NFP_CCM_MBOX_BATCH_LIMIT)
 397                        break;
 398        }
 399        spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
 400
 401        /* Now we own all skb's marked in progress, new requests may arrive
 402         * at the end of the queue.
 403         */
 404
 405        nn_ctrl_bar_lock(nn);
 406
 407        nfp_ccm_mbox_copy_in(nn, last);
 408
 409        err = nfp_net_mbox_reconfig(nn, NFP_NET_CFG_MBOX_CMD_TLV_CMSG);
 410        if (!err)
 411                nfp_ccm_mbox_copy_out(nn, last);
 412        else
 413                nfp_ccm_mbox_mark_all_err(nn, last, -EIO);
 414
 415        nn_ctrl_bar_unlock(nn);
 416
 417        wake_up_all(&nn->mbox_cmsg.wq);
 418}
 419
 420static int nfp_ccm_mbox_skb_return(struct sk_buff *skb)
 421{
 422        struct nfp_ccm_mbox_cmsg_cb *cb = (void *)skb->cb;
 423
 424        if (cb->err)
 425                dev_kfree_skb_any(skb);
 426        return cb->err;
 427}
 428
 429/* If wait timed out but the command is already in progress we have
 430 * to wait until it finishes.  Runners has ownership of the skbs marked
 431 * as busy.
 432 */
 433static int
 434nfp_ccm_mbox_unlink_unlock(struct nfp_net *nn, struct sk_buff *skb,
 435                           enum nfp_ccm_type type)
 436        __releases(&nn->mbox_cmsg.queue.lock)
 437{
 438        bool was_first;
 439
 440        if (nfp_ccm_mbox_in_progress(skb)) {
 441                spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
 442
 443                wait_event(nn->mbox_cmsg.wq, nfp_ccm_mbox_done(skb));
 444                smp_rmb(); /* pairs with smp_wmb() after data is written */
 445                return nfp_ccm_mbox_skb_return(skb);
 446        }
 447
 448        was_first = nfp_ccm_mbox_should_run(nn, skb);
 449        __skb_unlink(skb, &nn->mbox_cmsg.queue);
 450        if (was_first)
 451                nfp_ccm_mbox_mark_next_runner(nn);
 452
 453        spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
 454
 455        if (was_first)
 456                wake_up_all(&nn->mbox_cmsg.wq);
 457
 458        nn_dp_warn(&nn->dp, "time out waiting for mbox response to 0x%02x\n",
 459                   type);
 460        return -ETIMEDOUT;
 461}
 462
 463static int
 464nfp_ccm_mbox_msg_prepare(struct nfp_net *nn, struct sk_buff *skb,
 465                         enum nfp_ccm_type type,
 466                         unsigned int reply_size, unsigned int max_reply_size,
 467                         gfp_t flags)
 468{
 469        const unsigned int mbox_max = nfp_ccm_mbox_max_msg(nn);
 470        unsigned int max_len;
 471        ssize_t undersize;
 472        int err;
 473
 474        if (unlikely(!(nn->tlv_caps.mbox_cmsg_types & BIT(type)))) {
 475                nn_dp_warn(&nn->dp,
 476                           "message type %d not supported by mailbox\n", type);
 477                return -EINVAL;
 478        }
 479
 480        /* If the reply size is unknown assume it will take the entire
 481         * mailbox, the callers should do their best for this to never
 482         * happen.
 483         */
 484        if (!max_reply_size)
 485                max_reply_size = mbox_max;
 486        max_reply_size = round_up(max_reply_size, 4);
 487
 488        /* Make sure we can fit the entire reply into the skb,
 489         * and that we don't have to slow down the mbox handler
 490         * with allocations.
 491         */
 492        undersize = max_reply_size - (skb_end_pointer(skb) - skb->data);
 493        if (undersize > 0) {
 494                err = pskb_expand_head(skb, 0, undersize, flags);
 495                if (err) {
 496                        nn_dp_warn(&nn->dp,
 497                                   "can't allocate reply buffer for mailbox\n");
 498                        return err;
 499                }
 500        }
 501
 502        /* Make sure that request and response both fit into the mailbox */
 503        max_len = max(max_reply_size, round_up(skb->len, 4));
 504        if (max_len > mbox_max) {
 505                nn_dp_warn(&nn->dp,
 506                           "message too big for tha mailbox: %u/%u vs %u\n",
 507                           skb->len, max_reply_size, mbox_max);
 508                return -EMSGSIZE;
 509        }
 510
 511        nfp_ccm_mbox_msg_init(skb, reply_size, max_len);
 512
 513        return 0;
 514}
 515
 516static int
 517nfp_ccm_mbox_msg_enqueue(struct nfp_net *nn, struct sk_buff *skb,
 518                         enum nfp_ccm_type type, bool critical)
 519{
 520        struct nfp_ccm_hdr *hdr;
 521
 522        assert_spin_locked(&nn->mbox_cmsg.queue.lock);
 523
 524        if (!critical && nn->mbox_cmsg.queue.qlen >= NFP_CCM_MAX_QLEN) {
 525                nn_dp_warn(&nn->dp, "mailbox request queue too long\n");
 526                return -EBUSY;
 527        }
 528
 529        hdr = (void *)skb->data;
 530        hdr->ver = NFP_CCM_ABI_VERSION;
 531        hdr->type = type;
 532        hdr->tag = cpu_to_be16(nn->mbox_cmsg.tag++);
 533
 534        __skb_queue_tail(&nn->mbox_cmsg.queue, skb);
 535
 536        return 0;
 537}
 538
 539int __nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
 540                               enum nfp_ccm_type type,
 541                               unsigned int reply_size,
 542                               unsigned int max_reply_size, bool critical)
 543{
 544        int err;
 545
 546        err = nfp_ccm_mbox_msg_prepare(nn, skb, type, reply_size,
 547                                       max_reply_size, GFP_KERNEL);
 548        if (err)
 549                goto err_free_skb;
 550
 551        spin_lock_bh(&nn->mbox_cmsg.queue.lock);
 552
 553        err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, critical);
 554        if (err)
 555                goto err_unlock;
 556
 557        /* First in queue takes the mailbox lock and processes the batch */
 558        if (!nfp_ccm_mbox_is_first(nn, skb)) {
 559                bool to;
 560
 561                spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
 562
 563                to = !wait_event_timeout(nn->mbox_cmsg.wq,
 564                                         nfp_ccm_mbox_done(skb) ||
 565                                         nfp_ccm_mbox_should_run(nn, skb),
 566                                         msecs_to_jiffies(NFP_CCM_TIMEOUT));
 567
 568                /* fast path for those completed by another thread */
 569                if (nfp_ccm_mbox_done(skb)) {
 570                        smp_rmb(); /* pairs with wmb after data is written */
 571                        return nfp_ccm_mbox_skb_return(skb);
 572                }
 573
 574                spin_lock_bh(&nn->mbox_cmsg.queue.lock);
 575
 576                if (!nfp_ccm_mbox_is_first(nn, skb)) {
 577                        WARN_ON(!to);
 578
 579                        err = nfp_ccm_mbox_unlink_unlock(nn, skb, type);
 580                        if (err)
 581                                goto err_free_skb;
 582                        return 0;
 583                }
 584        }
 585
 586        /* run queue expects the lock held */
 587        nfp_ccm_mbox_run_queue_unlock(nn);
 588        return nfp_ccm_mbox_skb_return(skb);
 589
 590err_unlock:
 591        spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
 592err_free_skb:
 593        dev_kfree_skb_any(skb);
 594        return err;
 595}
 596
 597int nfp_ccm_mbox_communicate(struct nfp_net *nn, struct sk_buff *skb,
 598                             enum nfp_ccm_type type,
 599                             unsigned int reply_size,
 600                             unsigned int max_reply_size)
 601{
 602        return __nfp_ccm_mbox_communicate(nn, skb, type, reply_size,
 603                                          max_reply_size, false);
 604}
 605
 606static void nfp_ccm_mbox_post_runq_work(struct work_struct *work)
 607{
 608        struct sk_buff *skb;
 609        struct nfp_net *nn;
 610
 611        nn = container_of(work, struct nfp_net, mbox_cmsg.runq_work);
 612
 613        spin_lock_bh(&nn->mbox_cmsg.queue.lock);
 614
 615        skb = __skb_peek(&nn->mbox_cmsg.queue);
 616        if (WARN_ON(!skb || !nfp_ccm_mbox_is_posted(skb) ||
 617                    !nfp_ccm_mbox_should_run(nn, skb))) {
 618                spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
 619                return;
 620        }
 621
 622        nfp_ccm_mbox_run_queue_unlock(nn);
 623}
 624
 625static void nfp_ccm_mbox_post_wait_work(struct work_struct *work)
 626{
 627        struct sk_buff *skb;
 628        struct nfp_net *nn;
 629        int err;
 630
 631        nn = container_of(work, struct nfp_net, mbox_cmsg.wait_work);
 632
 633        skb = skb_peek(&nn->mbox_cmsg.queue);
 634        if (WARN_ON(!skb || !nfp_ccm_mbox_is_posted(skb)))
 635                /* Should never happen so it's unclear what to do here.. */
 636                goto exit_unlock_wake;
 637
 638        err = nfp_net_mbox_reconfig_wait_posted(nn);
 639        if (!err)
 640                nfp_ccm_mbox_copy_out(nn, skb);
 641        else
 642                nfp_ccm_mbox_mark_all_err(nn, skb, -EIO);
 643exit_unlock_wake:
 644        nn_ctrl_bar_unlock(nn);
 645        wake_up_all(&nn->mbox_cmsg.wq);
 646}
 647
 648int nfp_ccm_mbox_post(struct nfp_net *nn, struct sk_buff *skb,
 649                      enum nfp_ccm_type type, unsigned int max_reply_size)
 650{
 651        int err;
 652
 653        err = nfp_ccm_mbox_msg_prepare(nn, skb, type, 0, max_reply_size,
 654                                       GFP_ATOMIC);
 655        if (err)
 656                goto err_free_skb;
 657
 658        nfp_ccm_mbox_mark_posted(skb);
 659
 660        spin_lock_bh(&nn->mbox_cmsg.queue.lock);
 661
 662        err = nfp_ccm_mbox_msg_enqueue(nn, skb, type, false);
 663        if (err)
 664                goto err_unlock;
 665
 666        if (nfp_ccm_mbox_is_first(nn, skb)) {
 667                if (nn_ctrl_bar_trylock(nn)) {
 668                        nfp_ccm_mbox_copy_in(nn, skb);
 669                        nfp_net_mbox_reconfig_post(nn,
 670                                                   NFP_NET_CFG_MBOX_CMD_TLV_CMSG);
 671                        queue_work(nn->mbox_cmsg.workq,
 672                                   &nn->mbox_cmsg.wait_work);
 673                } else {
 674                        nfp_ccm_mbox_mark_next_runner(nn);
 675                }
 676        }
 677
 678        spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
 679
 680        return 0;
 681
 682err_unlock:
 683        spin_unlock_bh(&nn->mbox_cmsg.queue.lock);
 684err_free_skb:
 685        dev_kfree_skb_any(skb);
 686        return err;
 687}
 688
 689struct sk_buff *
 690nfp_ccm_mbox_msg_alloc(struct nfp_net *nn, unsigned int req_size,
 691                       unsigned int reply_size, gfp_t flags)
 692{
 693        unsigned int max_size;
 694        struct sk_buff *skb;
 695
 696        if (!reply_size)
 697                max_size = nfp_ccm_mbox_max_msg(nn);
 698        else
 699                max_size = max(req_size, reply_size);
 700        max_size = round_up(max_size, 4);
 701
 702        skb = alloc_skb(max_size, flags);
 703        if (!skb)
 704                return NULL;
 705
 706        skb_put(skb, req_size);
 707
 708        return skb;
 709}
 710
 711bool nfp_ccm_mbox_fits(struct nfp_net *nn, unsigned int size)
 712{
 713        return nfp_ccm_mbox_max_msg(nn) >= size;
 714}
 715
 716int nfp_ccm_mbox_init(struct nfp_net *nn)
 717{
 718        return 0;
 719}
 720
 721void nfp_ccm_mbox_clean(struct nfp_net *nn)
 722{
 723        drain_workqueue(nn->mbox_cmsg.workq);
 724}
 725
 726int nfp_ccm_mbox_alloc(struct nfp_net *nn)
 727{
 728        skb_queue_head_init(&nn->mbox_cmsg.queue);
 729        init_waitqueue_head(&nn->mbox_cmsg.wq);
 730        INIT_WORK(&nn->mbox_cmsg.wait_work, nfp_ccm_mbox_post_wait_work);
 731        INIT_WORK(&nn->mbox_cmsg.runq_work, nfp_ccm_mbox_post_runq_work);
 732
 733        nn->mbox_cmsg.workq = alloc_workqueue("nfp-ccm-mbox", WQ_UNBOUND, 0);
 734        if (!nn->mbox_cmsg.workq)
 735                return -ENOMEM;
 736        return 0;
 737}
 738
 739void nfp_ccm_mbox_free(struct nfp_net *nn)
 740{
 741        destroy_workqueue(nn->mbox_cmsg.workq);
 742        WARN_ON(!skb_queue_empty(&nn->mbox_cmsg.queue));
 743}
 744