linux/drivers/net/ipa/gsi.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2018-2021 Linaro Ltd.
   5 */
   6
   7#include <linux/types.h>
   8#include <linux/bits.h>
   9#include <linux/bitfield.h>
  10#include <linux/mutex.h>
  11#include <linux/completion.h>
  12#include <linux/io.h>
  13#include <linux/bug.h>
  14#include <linux/interrupt.h>
  15#include <linux/platform_device.h>
  16#include <linux/netdevice.h>
  17
  18#include "gsi.h"
  19#include "gsi_reg.h"
  20#include "gsi_private.h"
  21#include "gsi_trans.h"
  22#include "ipa_gsi.h"
  23#include "ipa_data.h"
  24#include "ipa_version.h"
  25
  26/**
  27 * DOC: The IPA Generic Software Interface
  28 *
  29 * The generic software interface (GSI) is an integral component of the IPA,
  30 * providing a well-defined communication layer between the AP subsystem
  31 * and the IPA core.  The modem uses the GSI layer as well.
  32 *
  33 *      --------             ---------
  34 *      |      |             |       |
  35 *      |  AP  +<---.   .----+ Modem |
  36 *      |      +--. |   | .->+       |
  37 *      |      |  | |   | |  |       |
  38 *      --------  | |   | |  ---------
  39 *                v |   v |
  40 *              --+-+---+-+--
  41 *              |    GSI    |
  42 *              |-----------|
  43 *              |           |
  44 *              |    IPA    |
  45 *              |           |
  46 *              -------------
  47 *
  48 * In the above diagram, the AP and Modem represent "execution environments"
  49 * (EEs), which are independent operating environments that use the IPA for
  50 * data transfer.
  51 *
  52 * Each EE uses a set of unidirectional GSI "channels," which allow transfer
  53 * of data to or from the IPA.  A channel is implemented as a ring buffer,
  54 * with a DRAM-resident array of "transfer elements" (TREs) available to
  55 * describe transfers to or from other EEs through the IPA.  A transfer
  56 * element can also contain an immediate command, requesting the IPA perform
  57 * actions other than data transfer.
  58 *
  59 * Each TRE refers to a block of data--also located DRAM.  After writing one
  60 * or more TREs to a channel, the writer (either the IPA or an EE) writes a
  61 * doorbell register to inform the receiving side how many elements have
  62 * been written.
  63 *
  64 * Each channel has a GSI "event ring" associated with it.  An event ring
  65 * is implemented very much like a channel ring, but is always directed from
  66 * the IPA to an EE.  The IPA notifies an EE (such as the AP) about channel
  67 * events by adding an entry to the event ring associated with the channel.
  68 * The GSI then writes its doorbell for the event ring, causing the target
  69 * EE to be interrupted.  Each entry in an event ring contains a pointer
  70 * to the channel TRE whose completion the event represents.
  71 *
  72 * Each TRE in a channel ring has a set of flags.  One flag indicates whether
  73 * the completion of the transfer operation generates an entry (and possibly
  74 * an interrupt) in the channel's event ring.  Other flags allow transfer
  75 * elements to be chained together, forming a single logical transaction.
  76 * TRE flags are used to control whether and when interrupts are generated
  77 * to signal completion of channel transfers.
  78 *
  79 * Elements in channel and event rings are completed (or consumed) strictly
  80 * in order.  Completion of one entry implies the completion of all preceding
  81 * entries.  A single completion interrupt can therefore communicate the
  82 * completion of many transfers.
  83 *
  84 * Note that all GSI registers are little-endian, which is the assumed
  85 * endianness of I/O space accesses.  The accessor functions perform byte
  86 * swapping if needed (i.e., for a big endian CPU).
  87 */
  88
  89/* Delay period for interrupt moderation (in 32KHz IPA internal timer ticks) */
  90#define GSI_EVT_RING_INT_MODT           (32 * 1) /* 1ms under 32KHz clock */
  91
  92#define GSI_CMD_TIMEOUT                 50      /* milliseconds */
  93
  94#define GSI_CHANNEL_STOP_RETRIES        10
  95#define GSI_CHANNEL_MODEM_HALT_RETRIES  10
  96#define GSI_CHANNEL_MODEM_FLOW_RETRIES  5       /* disable flow control only */
  97
  98#define GSI_MHI_EVENT_ID_START          10      /* 1st reserved event id */
  99#define GSI_MHI_EVENT_ID_END            16      /* Last reserved event id */
 100
 101#define GSI_ISR_MAX_ITER                50      /* Detect interrupt storms */
 102
 103/* An entry in an event ring */
 104struct gsi_event {
 105        __le64 xfer_ptr;
 106        __le16 len;
 107        u8 reserved1;
 108        u8 code;
 109        __le16 reserved2;
 110        u8 type;
 111        u8 chid;
 112};
 113
 114/** gsi_channel_scratch_gpi - GPI protocol scratch register
 115 * @max_outstanding_tre:
 116 *      Defines the maximum number of TREs allowed in a single transaction
 117 *      on a channel (in bytes).  This determines the amount of prefetch
 118 *      performed by the hardware.  We configure this to equal the size of
 119 *      the TLV FIFO for the channel.
 120 * @outstanding_threshold:
 121 *      Defines the threshold (in bytes) determining when the sequencer
 122 *      should update the channel doorbell.  We configure this to equal
 123 *      the size of two TREs.
 124 */
 125struct gsi_channel_scratch_gpi {
 126        u64 reserved1;
 127        u16 reserved2;
 128        u16 max_outstanding_tre;
 129        u16 reserved3;
 130        u16 outstanding_threshold;
 131};
 132
 133/** gsi_channel_scratch - channel scratch configuration area
 134 *
 135 * The exact interpretation of this register is protocol-specific.
 136 * We only use GPI channels; see struct gsi_channel_scratch_gpi, above.
 137 */
 138union gsi_channel_scratch {
 139        struct gsi_channel_scratch_gpi gpi;
 140        struct {
 141                u32 word1;
 142                u32 word2;
 143                u32 word3;
 144                u32 word4;
 145        } data;
 146};
 147
 148/* Check things that can be validated at build time. */
 149static void gsi_validate_build(void)
 150{
 151        /* This is used as a divisor */
 152        BUILD_BUG_ON(!GSI_RING_ELEMENT_SIZE);
 153
 154        /* Code assumes the size of channel and event ring element are
 155         * the same (and fixed).  Make sure the size of an event ring
 156         * element is what's expected.
 157         */
 158        BUILD_BUG_ON(sizeof(struct gsi_event) != GSI_RING_ELEMENT_SIZE);
 159
 160        /* Hardware requires a 2^n ring size.  We ensure the number of
 161         * elements in an event ring is a power of 2 elsewhere; this
 162         * ensure the elements themselves meet the requirement.
 163         */
 164        BUILD_BUG_ON(!is_power_of_2(GSI_RING_ELEMENT_SIZE));
 165
 166        /* The channel element size must fit in this field */
 167        BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(ELEMENT_SIZE_FMASK));
 168
 169        /* The event ring element size must fit in this field */
 170        BUILD_BUG_ON(GSI_RING_ELEMENT_SIZE > field_max(EV_ELEMENT_SIZE_FMASK));
 171}
 172
 173/* Return the channel id associated with a given channel */
 174static u32 gsi_channel_id(struct gsi_channel *channel)
 175{
 176        return channel - &channel->gsi->channel[0];
 177}
 178
 179/* An initialized channel has a non-null GSI pointer */
 180static bool gsi_channel_initialized(struct gsi_channel *channel)
 181{
 182        return !!channel->gsi;
 183}
 184
 185/* Update the GSI IRQ type register with the cached value */
 186static void gsi_irq_type_update(struct gsi *gsi, u32 val)
 187{
 188        gsi->type_enabled_bitmap = val;
 189        iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
 190}
 191
 192static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
 193{
 194        gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
 195}
 196
 197static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
 198{
 199        gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
 200}
 201
 202/* Event ring commands are performed one at a time.  Their completion
 203 * is signaled by the event ring control GSI interrupt type, which is
 204 * only enabled when we issue an event ring command.  Only the event
 205 * ring being operated on has this interrupt enabled.
 206 */
 207static void gsi_irq_ev_ctrl_enable(struct gsi *gsi, u32 evt_ring_id)
 208{
 209        u32 val = BIT(evt_ring_id);
 210
 211        /* There's a small chance that a previous command completed
 212         * after the interrupt was disabled, so make sure we have no
 213         * pending interrupts before we enable them.
 214         */
 215        iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
 216
 217        iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
 218        gsi_irq_type_enable(gsi, GSI_EV_CTRL);
 219}
 220
 221/* Disable event ring control interrupts */
 222static void gsi_irq_ev_ctrl_disable(struct gsi *gsi)
 223{
 224        gsi_irq_type_disable(gsi, GSI_EV_CTRL);
 225        iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
 226}
 227
 228/* Channel commands are performed one at a time.  Their completion is
 229 * signaled by the channel control GSI interrupt type, which is only
 230 * enabled when we issue a channel command.  Only the channel being
 231 * operated on has this interrupt enabled.
 232 */
 233static void gsi_irq_ch_ctrl_enable(struct gsi *gsi, u32 channel_id)
 234{
 235        u32 val = BIT(channel_id);
 236
 237        /* There's a small chance that a previous command completed
 238         * after the interrupt was disabled, so make sure we have no
 239         * pending interrupts before we enable them.
 240         */
 241        iowrite32(~0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
 242
 243        iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
 244        gsi_irq_type_enable(gsi, GSI_CH_CTRL);
 245}
 246
 247/* Disable channel control interrupts */
 248static void gsi_irq_ch_ctrl_disable(struct gsi *gsi)
 249{
 250        gsi_irq_type_disable(gsi, GSI_CH_CTRL);
 251        iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
 252}
 253
 254static void gsi_irq_ieob_enable_one(struct gsi *gsi, u32 evt_ring_id)
 255{
 256        bool enable_ieob = !gsi->ieob_enabled_bitmap;
 257        u32 val;
 258
 259        gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
 260        val = gsi->ieob_enabled_bitmap;
 261        iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
 262
 263        /* Enable the interrupt type if this is the first channel enabled */
 264        if (enable_ieob)
 265                gsi_irq_type_enable(gsi, GSI_IEOB);
 266}
 267
 268static void gsi_irq_ieob_disable(struct gsi *gsi, u32 event_mask)
 269{
 270        u32 val;
 271
 272        gsi->ieob_enabled_bitmap &= ~event_mask;
 273
 274        /* Disable the interrupt type if this was the last enabled channel */
 275        if (!gsi->ieob_enabled_bitmap)
 276                gsi_irq_type_disable(gsi, GSI_IEOB);
 277
 278        val = gsi->ieob_enabled_bitmap;
 279        iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
 280}
 281
 282static void gsi_irq_ieob_disable_one(struct gsi *gsi, u32 evt_ring_id)
 283{
 284        gsi_irq_ieob_disable(gsi, BIT(evt_ring_id));
 285}
 286
 287/* Enable all GSI_interrupt types */
 288static void gsi_irq_enable(struct gsi *gsi)
 289{
 290        u32 val;
 291
 292        /* Global interrupts include hardware error reports.  Enable
 293         * that so we can at least report the error should it occur.
 294         */
 295        iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
 296        gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
 297
 298        /* General GSI interrupts are reported to all EEs; if they occur
 299         * they are unrecoverable (without reset).  A breakpoint interrupt
 300         * also exists, but we don't support that.  We want to be notified
 301         * of errors so we can report them, even if they can't be handled.
 302         */
 303        val = BIT(BUS_ERROR);
 304        val |= BIT(CMD_FIFO_OVRFLOW);
 305        val |= BIT(MCS_STACK_OVRFLOW);
 306        iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
 307        gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
 308}
 309
 310/* Disable all GSI interrupt types */
 311static void gsi_irq_disable(struct gsi *gsi)
 312{
 313        gsi_irq_type_update(gsi, 0);
 314
 315        /* Clear the type-specific interrupt masks set by gsi_irq_enable() */
 316        iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
 317        iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
 318}
 319
 320/* Return the virtual address associated with a ring index */
 321void *gsi_ring_virt(struct gsi_ring *ring, u32 index)
 322{
 323        /* Note: index *must* be used modulo the ring count here */
 324        return ring->virt + (index % ring->count) * GSI_RING_ELEMENT_SIZE;
 325}
 326
 327/* Return the 32-bit DMA address associated with a ring index */
 328static u32 gsi_ring_addr(struct gsi_ring *ring, u32 index)
 329{
 330        return lower_32_bits(ring->addr) + index * GSI_RING_ELEMENT_SIZE;
 331}
 332
 333/* Return the ring index of a 32-bit ring offset */
 334static u32 gsi_ring_index(struct gsi_ring *ring, u32 offset)
 335{
 336        return (offset - gsi_ring_addr(ring, 0)) / GSI_RING_ELEMENT_SIZE;
 337}
 338
 339/* Issue a GSI command by writing a value to a register, then wait for
 340 * completion to be signaled.  Returns true if the command completes
 341 * or false if it times out.
 342 */
 343static bool gsi_command(struct gsi *gsi, u32 reg, u32 val)
 344{
 345        unsigned long timeout = msecs_to_jiffies(GSI_CMD_TIMEOUT);
 346        struct completion *completion = &gsi->completion;
 347
 348        reinit_completion(completion);
 349
 350        iowrite32(val, gsi->virt + reg);
 351
 352        return !!wait_for_completion_timeout(completion, timeout);
 353}
 354
 355/* Return the hardware's notion of the current state of an event ring */
 356static enum gsi_evt_ring_state
 357gsi_evt_ring_state(struct gsi *gsi, u32 evt_ring_id)
 358{
 359        u32 val;
 360
 361        val = ioread32(gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
 362
 363        return u32_get_bits(val, EV_CHSTATE_FMASK);
 364}
 365
 366/* Issue an event ring command and wait for it to complete */
 367static void gsi_evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
 368                                 enum gsi_evt_cmd_opcode opcode)
 369{
 370        struct device *dev = gsi->dev;
 371        bool timeout;
 372        u32 val;
 373
 374        /* Enable the completion interrupt for the command */
 375        gsi_irq_ev_ctrl_enable(gsi, evt_ring_id);
 376
 377        val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
 378        val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
 379
 380        timeout = !gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val);
 381
 382        gsi_irq_ev_ctrl_disable(gsi);
 383
 384        if (!timeout)
 385                return;
 386
 387        dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
 388                opcode, evt_ring_id, gsi_evt_ring_state(gsi, evt_ring_id));
 389}
 390
 391/* Allocate an event ring in NOT_ALLOCATED state */
 392static int gsi_evt_ring_alloc_command(struct gsi *gsi, u32 evt_ring_id)
 393{
 394        enum gsi_evt_ring_state state;
 395
 396        /* Get initial event ring state */
 397        state = gsi_evt_ring_state(gsi, evt_ring_id);
 398        if (state != GSI_EVT_RING_STATE_NOT_ALLOCATED) {
 399                dev_err(gsi->dev, "event ring %u bad state %u before alloc\n",
 400                        evt_ring_id, state);
 401                return -EINVAL;
 402        }
 403
 404        gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_ALLOCATE);
 405
 406        /* If successful the event ring state will have changed */
 407        state = gsi_evt_ring_state(gsi, evt_ring_id);
 408        if (state == GSI_EVT_RING_STATE_ALLOCATED)
 409                return 0;
 410
 411        dev_err(gsi->dev, "event ring %u bad state %u after alloc\n",
 412                evt_ring_id, state);
 413
 414        return -EIO;
 415}
 416
 417/* Reset a GSI event ring in ALLOCATED or ERROR state. */
 418static void gsi_evt_ring_reset_command(struct gsi *gsi, u32 evt_ring_id)
 419{
 420        enum gsi_evt_ring_state state;
 421
 422        state = gsi_evt_ring_state(gsi, evt_ring_id);
 423        if (state != GSI_EVT_RING_STATE_ALLOCATED &&
 424            state != GSI_EVT_RING_STATE_ERROR) {
 425                dev_err(gsi->dev, "event ring %u bad state %u before reset\n",
 426                        evt_ring_id, state);
 427                return;
 428        }
 429
 430        gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_RESET);
 431
 432        /* If successful the event ring state will have changed */
 433        state = gsi_evt_ring_state(gsi, evt_ring_id);
 434        if (state == GSI_EVT_RING_STATE_ALLOCATED)
 435                return;
 436
 437        dev_err(gsi->dev, "event ring %u bad state %u after reset\n",
 438                evt_ring_id, state);
 439}
 440
 441/* Issue a hardware de-allocation request for an allocated event ring */
 442static void gsi_evt_ring_de_alloc_command(struct gsi *gsi, u32 evt_ring_id)
 443{
 444        enum gsi_evt_ring_state state;
 445
 446        state = gsi_evt_ring_state(gsi, evt_ring_id);
 447        if (state != GSI_EVT_RING_STATE_ALLOCATED) {
 448                dev_err(gsi->dev, "event ring %u state %u before dealloc\n",
 449                        evt_ring_id, state);
 450                return;
 451        }
 452
 453        gsi_evt_ring_command(gsi, evt_ring_id, GSI_EVT_DE_ALLOC);
 454
 455        /* If successful the event ring state will have changed */
 456        state = gsi_evt_ring_state(gsi, evt_ring_id);
 457        if (state == GSI_EVT_RING_STATE_NOT_ALLOCATED)
 458                return;
 459
 460        dev_err(gsi->dev, "event ring %u bad state %u after dealloc\n",
 461                evt_ring_id, state);
 462}
 463
 464/* Fetch the current state of a channel from hardware */
 465static enum gsi_channel_state gsi_channel_state(struct gsi_channel *channel)
 466{
 467        u32 channel_id = gsi_channel_id(channel);
 468        void __iomem *virt = channel->gsi->virt;
 469        u32 val;
 470
 471        val = ioread32(virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
 472
 473        return u32_get_bits(val, CHSTATE_FMASK);
 474}
 475
 476/* Issue a channel command and wait for it to complete */
 477static void
 478gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
 479{
 480        u32 channel_id = gsi_channel_id(channel);
 481        struct gsi *gsi = channel->gsi;
 482        struct device *dev = gsi->dev;
 483        bool timeout;
 484        u32 val;
 485
 486        /* Enable the completion interrupt for the command */
 487        gsi_irq_ch_ctrl_enable(gsi, channel_id);
 488
 489        val = u32_encode_bits(channel_id, CH_CHID_FMASK);
 490        val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
 491        timeout = !gsi_command(gsi, GSI_CH_CMD_OFFSET, val);
 492
 493        gsi_irq_ch_ctrl_disable(gsi);
 494
 495        if (!timeout)
 496                return;
 497
 498        dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
 499                opcode, channel_id, gsi_channel_state(channel));
 500}
 501
 502/* Allocate GSI channel in NOT_ALLOCATED state */
 503static int gsi_channel_alloc_command(struct gsi *gsi, u32 channel_id)
 504{
 505        struct gsi_channel *channel = &gsi->channel[channel_id];
 506        struct device *dev = gsi->dev;
 507        enum gsi_channel_state state;
 508
 509        /* Get initial channel state */
 510        state = gsi_channel_state(channel);
 511        if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED) {
 512                dev_err(dev, "channel %u bad state %u before alloc\n",
 513                        channel_id, state);
 514                return -EINVAL;
 515        }
 516
 517        gsi_channel_command(channel, GSI_CH_ALLOCATE);
 518
 519        /* If successful the channel state will have changed */
 520        state = gsi_channel_state(channel);
 521        if (state == GSI_CHANNEL_STATE_ALLOCATED)
 522                return 0;
 523
 524        dev_err(dev, "channel %u bad state %u after alloc\n",
 525                channel_id, state);
 526
 527        return -EIO;
 528}
 529
 530/* Start an ALLOCATED channel */
 531static int gsi_channel_start_command(struct gsi_channel *channel)
 532{
 533        struct device *dev = channel->gsi->dev;
 534        enum gsi_channel_state state;
 535
 536        state = gsi_channel_state(channel);
 537        if (state != GSI_CHANNEL_STATE_ALLOCATED &&
 538            state != GSI_CHANNEL_STATE_STOPPED) {
 539                dev_err(dev, "channel %u bad state %u before start\n",
 540                        gsi_channel_id(channel), state);
 541                return -EINVAL;
 542        }
 543
 544        gsi_channel_command(channel, GSI_CH_START);
 545
 546        /* If successful the channel state will have changed */
 547        state = gsi_channel_state(channel);
 548        if (state == GSI_CHANNEL_STATE_STARTED)
 549                return 0;
 550
 551        dev_err(dev, "channel %u bad state %u after start\n",
 552                gsi_channel_id(channel), state);
 553
 554        return -EIO;
 555}
 556
 557/* Stop a GSI channel in STARTED state */
 558static int gsi_channel_stop_command(struct gsi_channel *channel)
 559{
 560        struct device *dev = channel->gsi->dev;
 561        enum gsi_channel_state state;
 562
 563        state = gsi_channel_state(channel);
 564
 565        /* Channel could have entered STOPPED state since last call
 566         * if it timed out.  If so, we're done.
 567         */
 568        if (state == GSI_CHANNEL_STATE_STOPPED)
 569                return 0;
 570
 571        if (state != GSI_CHANNEL_STATE_STARTED &&
 572            state != GSI_CHANNEL_STATE_STOP_IN_PROC) {
 573                dev_err(dev, "channel %u bad state %u before stop\n",
 574                        gsi_channel_id(channel), state);
 575                return -EINVAL;
 576        }
 577
 578        gsi_channel_command(channel, GSI_CH_STOP);
 579
 580        /* If successful the channel state will have changed */
 581        state = gsi_channel_state(channel);
 582        if (state == GSI_CHANNEL_STATE_STOPPED)
 583                return 0;
 584
 585        /* We may have to try again if stop is in progress */
 586        if (state == GSI_CHANNEL_STATE_STOP_IN_PROC)
 587                return -EAGAIN;
 588
 589        dev_err(dev, "channel %u bad state %u after stop\n",
 590                gsi_channel_id(channel), state);
 591
 592        return -EIO;
 593}
 594
 595/* Reset a GSI channel in ALLOCATED or ERROR state. */
 596static void gsi_channel_reset_command(struct gsi_channel *channel)
 597{
 598        struct device *dev = channel->gsi->dev;
 599        enum gsi_channel_state state;
 600
 601        /* A short delay is required before a RESET command */
 602        usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
 603
 604        state = gsi_channel_state(channel);
 605        if (state != GSI_CHANNEL_STATE_STOPPED &&
 606            state != GSI_CHANNEL_STATE_ERROR) {
 607                /* No need to reset a channel already in ALLOCATED state */
 608                if (state != GSI_CHANNEL_STATE_ALLOCATED)
 609                        dev_err(dev, "channel %u bad state %u before reset\n",
 610                                gsi_channel_id(channel), state);
 611                return;
 612        }
 613
 614        gsi_channel_command(channel, GSI_CH_RESET);
 615
 616        /* If successful the channel state will have changed */
 617        state = gsi_channel_state(channel);
 618        if (state != GSI_CHANNEL_STATE_ALLOCATED)
 619                dev_err(dev, "channel %u bad state %u after reset\n",
 620                        gsi_channel_id(channel), state);
 621}
 622
 623/* Deallocate an ALLOCATED GSI channel */
 624static void gsi_channel_de_alloc_command(struct gsi *gsi, u32 channel_id)
 625{
 626        struct gsi_channel *channel = &gsi->channel[channel_id];
 627        struct device *dev = gsi->dev;
 628        enum gsi_channel_state state;
 629
 630        state = gsi_channel_state(channel);
 631        if (state != GSI_CHANNEL_STATE_ALLOCATED) {
 632                dev_err(dev, "channel %u bad state %u before dealloc\n",
 633                        channel_id, state);
 634                return;
 635        }
 636
 637        gsi_channel_command(channel, GSI_CH_DE_ALLOC);
 638
 639        /* If successful the channel state will have changed */
 640        state = gsi_channel_state(channel);
 641
 642        if (state != GSI_CHANNEL_STATE_NOT_ALLOCATED)
 643                dev_err(dev, "channel %u bad state %u after dealloc\n",
 644                        channel_id, state);
 645}
 646
 647/* Ring an event ring doorbell, reporting the last entry processed by the AP.
 648 * The index argument (modulo the ring count) is the first unfilled entry, so
 649 * we supply one less than that with the doorbell.  Update the event ring
 650 * index field with the value provided.
 651 */
 652static void gsi_evt_ring_doorbell(struct gsi *gsi, u32 evt_ring_id, u32 index)
 653{
 654        struct gsi_ring *ring = &gsi->evt_ring[evt_ring_id].ring;
 655        u32 val;
 656
 657        ring->index = index;    /* Next unused entry */
 658
 659        /* Note: index *must* be used modulo the ring count here */
 660        val = gsi_ring_addr(ring, (index - 1) % ring->count);
 661        iowrite32(val, gsi->virt + GSI_EV_CH_E_DOORBELL_0_OFFSET(evt_ring_id));
 662}
 663
 664/* Program an event ring for use */
 665static void gsi_evt_ring_program(struct gsi *gsi, u32 evt_ring_id)
 666{
 667        struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
 668        size_t size = evt_ring->ring.count * GSI_RING_ELEMENT_SIZE;
 669        u32 val;
 670
 671        /* We program all event rings as GPI type/protocol */
 672        val = u32_encode_bits(GSI_CHANNEL_TYPE_GPI, EV_CHTYPE_FMASK);
 673        val |= EV_INTYPE_FMASK;
 674        val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, EV_ELEMENT_SIZE_FMASK);
 675        iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_0_OFFSET(evt_ring_id));
 676
 677        val = ev_r_length_encoded(gsi->version, size);
 678        iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_1_OFFSET(evt_ring_id));
 679
 680        /* The context 2 and 3 registers store the low-order and
 681         * high-order 32 bits of the address of the event ring,
 682         * respectively.
 683         */
 684        val = lower_32_bits(evt_ring->ring.addr);
 685        iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_2_OFFSET(evt_ring_id));
 686        val = upper_32_bits(evt_ring->ring.addr);
 687        iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_3_OFFSET(evt_ring_id));
 688
 689        /* Enable interrupt moderation by setting the moderation delay */
 690        val = u32_encode_bits(GSI_EVT_RING_INT_MODT, MODT_FMASK);
 691        val |= u32_encode_bits(1, MODC_FMASK);  /* comes from channel */
 692        iowrite32(val, gsi->virt + GSI_EV_CH_E_CNTXT_8_OFFSET(evt_ring_id));
 693
 694        /* No MSI write data, and MSI address high and low address is 0 */
 695        iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_9_OFFSET(evt_ring_id));
 696        iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_10_OFFSET(evt_ring_id));
 697        iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_11_OFFSET(evt_ring_id));
 698
 699        /* We don't need to get event read pointer updates */
 700        iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_12_OFFSET(evt_ring_id));
 701        iowrite32(0, gsi->virt + GSI_EV_CH_E_CNTXT_13_OFFSET(evt_ring_id));
 702
 703        /* Finally, tell the hardware we've completed event 0 (arbitrary) */
 704        gsi_evt_ring_doorbell(gsi, evt_ring_id, 0);
 705}
 706
 707/* Find the transaction whose completion indicates a channel is quiesced */
 708static struct gsi_trans *gsi_channel_trans_last(struct gsi_channel *channel)
 709{
 710        struct gsi_trans_info *trans_info = &channel->trans_info;
 711        const struct list_head *list;
 712        struct gsi_trans *trans;
 713
 714        spin_lock_bh(&trans_info->spinlock);
 715
 716        /* There is a small chance a TX transaction got allocated just
 717         * before we disabled transmits, so check for that.
 718         */
 719        if (channel->toward_ipa) {
 720                list = &trans_info->alloc;
 721                if (!list_empty(list))
 722                        goto done;
 723                list = &trans_info->pending;
 724                if (!list_empty(list))
 725                        goto done;
 726        }
 727
 728        /* Otherwise (TX or RX) we want to wait for anything that
 729         * has completed, or has been polled but not released yet.
 730         */
 731        list = &trans_info->complete;
 732        if (!list_empty(list))
 733                goto done;
 734        list = &trans_info->polled;
 735        if (list_empty(list))
 736                list = NULL;
 737done:
 738        trans = list ? list_last_entry(list, struct gsi_trans, links) : NULL;
 739
 740        /* Caller will wait for this, so take a reference */
 741        if (trans)
 742                refcount_inc(&trans->refcount);
 743
 744        spin_unlock_bh(&trans_info->spinlock);
 745
 746        return trans;
 747}
 748
 749/* Wait for transaction activity on a channel to complete */
 750static void gsi_channel_trans_quiesce(struct gsi_channel *channel)
 751{
 752        struct gsi_trans *trans;
 753
 754        /* Get the last transaction, and wait for it to complete */
 755        trans = gsi_channel_trans_last(channel);
 756        if (trans) {
 757                wait_for_completion(&trans->completion);
 758                gsi_trans_free(trans);
 759        }
 760}
 761
 762/* Program a channel for use; there is no gsi_channel_deprogram() */
 763static void gsi_channel_program(struct gsi_channel *channel, bool doorbell)
 764{
 765        size_t size = channel->tre_ring.count * GSI_RING_ELEMENT_SIZE;
 766        u32 channel_id = gsi_channel_id(channel);
 767        union gsi_channel_scratch scr = { };
 768        struct gsi_channel_scratch_gpi *gpi;
 769        struct gsi *gsi = channel->gsi;
 770        u32 wrr_weight = 0;
 771        u32 val;
 772
 773        /* Arbitrarily pick TRE 0 as the first channel element to use */
 774        channel->tre_ring.index = 0;
 775
 776        /* We program all channels as GPI type/protocol */
 777        val = chtype_protocol_encoded(gsi->version, GSI_CHANNEL_TYPE_GPI);
 778        if (channel->toward_ipa)
 779                val |= CHTYPE_DIR_FMASK;
 780        val |= u32_encode_bits(channel->evt_ring_id, ERINDEX_FMASK);
 781        val |= u32_encode_bits(GSI_RING_ELEMENT_SIZE, ELEMENT_SIZE_FMASK);
 782        iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_0_OFFSET(channel_id));
 783
 784        val = r_length_encoded(gsi->version, size);
 785        iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_1_OFFSET(channel_id));
 786
 787        /* The context 2 and 3 registers store the low-order and
 788         * high-order 32 bits of the address of the channel ring,
 789         * respectively.
 790         */
 791        val = lower_32_bits(channel->tre_ring.addr);
 792        iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_2_OFFSET(channel_id));
 793        val = upper_32_bits(channel->tre_ring.addr);
 794        iowrite32(val, gsi->virt + GSI_CH_C_CNTXT_3_OFFSET(channel_id));
 795
 796        /* Command channel gets low weighted round-robin priority */
 797        if (channel->command)
 798                wrr_weight = field_max(WRR_WEIGHT_FMASK);
 799        val = u32_encode_bits(wrr_weight, WRR_WEIGHT_FMASK);
 800
 801        /* Max prefetch is 1 segment (do not set MAX_PREFETCH_FMASK) */
 802
 803        /* No need to use the doorbell engine starting at IPA v4.0 */
 804        if (gsi->version < IPA_VERSION_4_0 && doorbell)
 805                val |= USE_DB_ENG_FMASK;
 806
 807        /* v4.0 introduces an escape buffer for prefetch.  We use it
 808         * on all but the AP command channel.
 809         */
 810        if (gsi->version >= IPA_VERSION_4_0 && !channel->command) {
 811                /* If not otherwise set, prefetch buffers are used */
 812                if (gsi->version < IPA_VERSION_4_5)
 813                        val |= USE_ESCAPE_BUF_ONLY_FMASK;
 814                else
 815                        val |= u32_encode_bits(GSI_ESCAPE_BUF_ONLY,
 816                                               PREFETCH_MODE_FMASK);
 817        }
 818        /* All channels set DB_IN_BYTES */
 819        if (gsi->version >= IPA_VERSION_4_9)
 820                val |= DB_IN_BYTES;
 821
 822        iowrite32(val, gsi->virt + GSI_CH_C_QOS_OFFSET(channel_id));
 823
 824        /* Now update the scratch registers for GPI protocol */
 825        gpi = &scr.gpi;
 826        gpi->max_outstanding_tre = gsi_channel_trans_tre_max(gsi, channel_id) *
 827                                        GSI_RING_ELEMENT_SIZE;
 828        gpi->outstanding_threshold = 2 * GSI_RING_ELEMENT_SIZE;
 829
 830        val = scr.data.word1;
 831        iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_0_OFFSET(channel_id));
 832
 833        val = scr.data.word2;
 834        iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_1_OFFSET(channel_id));
 835
 836        val = scr.data.word3;
 837        iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_2_OFFSET(channel_id));
 838
 839        /* We must preserve the upper 16 bits of the last scratch register.
 840         * The next sequence assumes those bits remain unchanged between the
 841         * read and the write.
 842         */
 843        val = ioread32(gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
 844        val = (scr.data.word4 & GENMASK(31, 16)) | (val & GENMASK(15, 0));
 845        iowrite32(val, gsi->virt + GSI_CH_C_SCRATCH_3_OFFSET(channel_id));
 846
 847        /* All done! */
 848}
 849
 850static int __gsi_channel_start(struct gsi_channel *channel, bool resume)
 851{
 852        struct gsi *gsi = channel->gsi;
 853        int ret;
 854
 855        /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
 856        if (resume && gsi->version < IPA_VERSION_4_0)
 857                return 0;
 858
 859        mutex_lock(&gsi->mutex);
 860
 861        ret = gsi_channel_start_command(channel);
 862
 863        mutex_unlock(&gsi->mutex);
 864
 865        return ret;
 866}
 867
 868/* Start an allocated GSI channel */
 869int gsi_channel_start(struct gsi *gsi, u32 channel_id)
 870{
 871        struct gsi_channel *channel = &gsi->channel[channel_id];
 872        int ret;
 873
 874        /* Enable NAPI and the completion interrupt */
 875        napi_enable(&channel->napi);
 876        gsi_irq_ieob_enable_one(gsi, channel->evt_ring_id);
 877
 878        ret = __gsi_channel_start(channel, false);
 879        if (ret) {
 880                gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
 881                napi_disable(&channel->napi);
 882        }
 883
 884        return ret;
 885}
 886
 887static int gsi_channel_stop_retry(struct gsi_channel *channel)
 888{
 889        u32 retries = GSI_CHANNEL_STOP_RETRIES;
 890        int ret;
 891
 892        do {
 893                ret = gsi_channel_stop_command(channel);
 894                if (ret != -EAGAIN)
 895                        break;
 896                usleep_range(3 * USEC_PER_MSEC, 5 * USEC_PER_MSEC);
 897        } while (retries--);
 898
 899        return ret;
 900}
 901
 902static int __gsi_channel_stop(struct gsi_channel *channel, bool suspend)
 903{
 904        struct gsi *gsi = channel->gsi;
 905        int ret;
 906
 907        /* Wait for any underway transactions to complete before stopping. */
 908        gsi_channel_trans_quiesce(channel);
 909
 910        /* Prior to IPA v4.0 suspend/resume is not implemented by GSI */
 911        if (suspend && gsi->version < IPA_VERSION_4_0)
 912                return 0;
 913
 914        mutex_lock(&gsi->mutex);
 915
 916        ret = gsi_channel_stop_retry(channel);
 917
 918        mutex_unlock(&gsi->mutex);
 919
 920        return ret;
 921}
 922
 923/* Stop a started channel */
 924int gsi_channel_stop(struct gsi *gsi, u32 channel_id)
 925{
 926        struct gsi_channel *channel = &gsi->channel[channel_id];
 927        int ret;
 928
 929        ret = __gsi_channel_stop(channel, false);
 930        if (ret)
 931                return ret;
 932
 933        /* Disable the completion interrupt and NAPI if successful */
 934        gsi_irq_ieob_disable_one(gsi, channel->evt_ring_id);
 935        napi_disable(&channel->napi);
 936
 937        return 0;
 938}
 939
 940/* Reset and reconfigure a channel, (possibly) enabling the doorbell engine */
 941void gsi_channel_reset(struct gsi *gsi, u32 channel_id, bool doorbell)
 942{
 943        struct gsi_channel *channel = &gsi->channel[channel_id];
 944
 945        mutex_lock(&gsi->mutex);
 946
 947        gsi_channel_reset_command(channel);
 948        /* Due to a hardware quirk we may need to reset RX channels twice. */
 949        if (gsi->version < IPA_VERSION_4_0 && !channel->toward_ipa)
 950                gsi_channel_reset_command(channel);
 951
 952        gsi_channel_program(channel, doorbell);
 953        gsi_channel_trans_cancel_pending(channel);
 954
 955        mutex_unlock(&gsi->mutex);
 956}
 957
 958/* Stop a started channel for suspend */
 959int gsi_channel_suspend(struct gsi *gsi, u32 channel_id)
 960{
 961        struct gsi_channel *channel = &gsi->channel[channel_id];
 962        int ret;
 963
 964        ret = __gsi_channel_stop(channel, true);
 965        if (ret)
 966                return ret;
 967
 968        /* Ensure NAPI polling has finished. */
 969        napi_synchronize(&channel->napi);
 970
 971        return 0;
 972}
 973
 974/* Resume a suspended channel (starting if stopped) */
 975int gsi_channel_resume(struct gsi *gsi, u32 channel_id)
 976{
 977        struct gsi_channel *channel = &gsi->channel[channel_id];
 978
 979        return __gsi_channel_start(channel, true);
 980}
 981
 982/* Prevent all GSI interrupts while suspended */
 983void gsi_suspend(struct gsi *gsi)
 984{
 985        disable_irq(gsi->irq);
 986}
 987
 988/* Allow all GSI interrupts again when resuming */
 989void gsi_resume(struct gsi *gsi)
 990{
 991        enable_irq(gsi->irq);
 992}
 993
 994/**
 995 * gsi_channel_tx_queued() - Report queued TX transfers for a channel
 996 * @channel:    Channel for which to report
 997 *
 998 * Report to the network stack the number of bytes and transactions that
 999 * have been queued to hardware since last call.  This and the next function
1000 * supply information used by the network stack for throttling.
1001 *
1002 * For each channel we track the number of transactions used and bytes of
1003 * data those transactions represent.  We also track what those values are
1004 * each time this function is called.  Subtracting the two tells us
1005 * the number of bytes and transactions that have been added between
1006 * successive calls.
1007 *
1008 * Calling this each time we ring the channel doorbell allows us to
1009 * provide accurate information to the network stack about how much
1010 * work we've given the hardware at any point in time.
1011 */
1012void gsi_channel_tx_queued(struct gsi_channel *channel)
1013{
1014        u32 trans_count;
1015        u32 byte_count;
1016
1017        byte_count = channel->byte_count - channel->queued_byte_count;
1018        trans_count = channel->trans_count - channel->queued_trans_count;
1019        channel->queued_byte_count = channel->byte_count;
1020        channel->queued_trans_count = channel->trans_count;
1021
1022        ipa_gsi_channel_tx_queued(channel->gsi, gsi_channel_id(channel),
1023                                  trans_count, byte_count);
1024}
1025
1026/**
1027 * gsi_channel_tx_update() - Report completed TX transfers
1028 * @channel:    Channel that has completed transmitting packets
1029 * @trans:      Last transation known to be complete
1030 *
1031 * Compute the number of transactions and bytes that have been transferred
1032 * over a TX channel since the given transaction was committed.  Report this
1033 * information to the network stack.
1034 *
1035 * At the time a transaction is committed, we record its channel's
1036 * committed transaction and byte counts *in the transaction*.
1037 * Completions are signaled by the hardware with an interrupt, and
1038 * we can determine the latest completed transaction at that time.
1039 *
1040 * The difference between the byte/transaction count recorded in
1041 * the transaction and the count last time we recorded a completion
1042 * tells us exactly how much data has been transferred between
1043 * completions.
1044 *
1045 * Calling this each time we learn of a newly-completed transaction
1046 * allows us to provide accurate information to the network stack
1047 * about how much work has been completed by the hardware at a given
1048 * point in time.
1049 */
1050static void
1051gsi_channel_tx_update(struct gsi_channel *channel, struct gsi_trans *trans)
1052{
1053        u64 byte_count = trans->byte_count + trans->len;
1054        u64 trans_count = trans->trans_count + 1;
1055
1056        byte_count -= channel->compl_byte_count;
1057        channel->compl_byte_count += byte_count;
1058        trans_count -= channel->compl_trans_count;
1059        channel->compl_trans_count += trans_count;
1060
1061        ipa_gsi_channel_tx_completed(channel->gsi, gsi_channel_id(channel),
1062                                     trans_count, byte_count);
1063}
1064
1065/* Channel control interrupt handler */
1066static void gsi_isr_chan_ctrl(struct gsi *gsi)
1067{
1068        u32 channel_mask;
1069
1070        channel_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_CH_IRQ_OFFSET);
1071        iowrite32(channel_mask, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_CLR_OFFSET);
1072
1073        while (channel_mask) {
1074                u32 channel_id = __ffs(channel_mask);
1075
1076                channel_mask ^= BIT(channel_id);
1077
1078                complete(&gsi->completion);
1079        }
1080}
1081
1082/* Event ring control interrupt handler */
1083static void gsi_isr_evt_ctrl(struct gsi *gsi)
1084{
1085        u32 event_mask;
1086
1087        event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_OFFSET);
1088        iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_CLR_OFFSET);
1089
1090        while (event_mask) {
1091                u32 evt_ring_id = __ffs(event_mask);
1092
1093                event_mask ^= BIT(evt_ring_id);
1094
1095                complete(&gsi->completion);
1096        }
1097}
1098
1099/* Global channel error interrupt handler */
1100static void
1101gsi_isr_glob_chan_err(struct gsi *gsi, u32 err_ee, u32 channel_id, u32 code)
1102{
1103        if (code == GSI_OUT_OF_RESOURCES) {
1104                dev_err(gsi->dev, "channel %u out of resources\n", channel_id);
1105                complete(&gsi->completion);
1106                return;
1107        }
1108
1109        /* Report, but otherwise ignore all other error codes */
1110        dev_err(gsi->dev, "channel %u global error ee 0x%08x code 0x%08x\n",
1111                channel_id, err_ee, code);
1112}
1113
1114/* Global event error interrupt handler */
1115static void
1116gsi_isr_glob_evt_err(struct gsi *gsi, u32 err_ee, u32 evt_ring_id, u32 code)
1117{
1118        if (code == GSI_OUT_OF_RESOURCES) {
1119                struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
1120                u32 channel_id = gsi_channel_id(evt_ring->channel);
1121
1122                complete(&gsi->completion);
1123                dev_err(gsi->dev, "evt_ring for channel %u out of resources\n",
1124                        channel_id);
1125                return;
1126        }
1127
1128        /* Report, but otherwise ignore all other error codes */
1129        dev_err(gsi->dev, "event ring %u global error ee %u code 0x%08x\n",
1130                evt_ring_id, err_ee, code);
1131}
1132
1133/* Global error interrupt handler */
1134static void gsi_isr_glob_err(struct gsi *gsi)
1135{
1136        enum gsi_err_type type;
1137        enum gsi_err_code code;
1138        u32 which;
1139        u32 val;
1140        u32 ee;
1141
1142        /* Get the logged error, then reinitialize the log */
1143        val = ioread32(gsi->virt + GSI_ERROR_LOG_OFFSET);
1144        iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1145        iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
1146
1147        ee = u32_get_bits(val, ERR_EE_FMASK);
1148        type = u32_get_bits(val, ERR_TYPE_FMASK);
1149        which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
1150        code = u32_get_bits(val, ERR_CODE_FMASK);
1151
1152        if (type == GSI_ERR_TYPE_CHAN)
1153                gsi_isr_glob_chan_err(gsi, ee, which, code);
1154        else if (type == GSI_ERR_TYPE_EVT)
1155                gsi_isr_glob_evt_err(gsi, ee, which, code);
1156        else    /* type GSI_ERR_TYPE_GLOB should be fatal */
1157                dev_err(gsi->dev, "unexpected global error 0x%08x\n", type);
1158}
1159
1160/* Generic EE interrupt handler */
1161static void gsi_isr_gp_int1(struct gsi *gsi)
1162{
1163        u32 result;
1164        u32 val;
1165
1166        /* This interrupt is used to handle completions of GENERIC GSI
1167         * commands.  We use these to allocate and halt channels on the
1168         * modem's behalf due to a hardware quirk on IPA v4.2.  The modem
1169         * "owns" channels even when the AP allocates them, and have no
1170         * way of knowing whether a modem channel's state has been changed.
1171         *
1172         * We also use GENERIC commands to enable/disable channel flow
1173         * control for IPA v4.2+.
1174         *
1175         * It is recommended that we halt the modem channels we allocated
1176         * when shutting down, but it's possible the channel isn't running
1177         * at the time we issue the HALT command.  We'll get an error in
1178         * that case, but it's harmless (the channel is already halted).
1179         * Similarly, we could get an error back when updating flow control
1180         * on a channel because it's not in the proper state.
1181         *
1182         * In either case, we silently ignore a CHANNEL_NOT_RUNNING error
1183         * if we receive it.
1184         */
1185        val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1186        result = u32_get_bits(val, GENERIC_EE_RESULT_FMASK);
1187
1188        switch (result) {
1189        case GENERIC_EE_SUCCESS:
1190        case GENERIC_EE_CHANNEL_NOT_RUNNING:
1191                gsi->result = 0;
1192                break;
1193
1194        case GENERIC_EE_RETRY:
1195                gsi->result = -EAGAIN;
1196                break;
1197
1198        default:
1199                dev_err(gsi->dev, "global INT1 generic result %u\n", result);
1200                gsi->result = -EIO;
1201                break;
1202        }
1203
1204        complete(&gsi->completion);
1205}
1206
1207/* Inter-EE interrupt handler */
1208static void gsi_isr_glob_ee(struct gsi *gsi)
1209{
1210        u32 val;
1211
1212        val = ioread32(gsi->virt + GSI_CNTXT_GLOB_IRQ_STTS_OFFSET);
1213
1214        if (val & BIT(ERROR_INT))
1215                gsi_isr_glob_err(gsi);
1216
1217        iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_CLR_OFFSET);
1218
1219        val &= ~BIT(ERROR_INT);
1220
1221        if (val & BIT(GP_INT1)) {
1222                val ^= BIT(GP_INT1);
1223                gsi_isr_gp_int1(gsi);
1224        }
1225
1226        if (val)
1227                dev_err(gsi->dev, "unexpected global interrupt 0x%08x\n", val);
1228}
1229
1230/* I/O completion interrupt event */
1231static void gsi_isr_ieob(struct gsi *gsi)
1232{
1233        u32 event_mask;
1234
1235        event_mask = ioread32(gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_OFFSET);
1236        gsi_irq_ieob_disable(gsi, event_mask);
1237        iowrite32(event_mask, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_CLR_OFFSET);
1238
1239        while (event_mask) {
1240                u32 evt_ring_id = __ffs(event_mask);
1241
1242                event_mask ^= BIT(evt_ring_id);
1243
1244                napi_schedule(&gsi->evt_ring[evt_ring_id].channel->napi);
1245        }
1246}
1247
1248/* General event interrupts represent serious problems, so report them */
1249static void gsi_isr_general(struct gsi *gsi)
1250{
1251        struct device *dev = gsi->dev;
1252        u32 val;
1253
1254        val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
1255        iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
1256
1257        dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
1258}
1259
1260/**
1261 * gsi_isr() - Top level GSI interrupt service routine
1262 * @irq:        Interrupt number (ignored)
1263 * @dev_id:     GSI pointer supplied to request_irq()
1264 *
1265 * This is the main handler function registered for the GSI IRQ. Each type
1266 * of interrupt has a separate handler function that is called from here.
1267 */
1268static irqreturn_t gsi_isr(int irq, void *dev_id)
1269{
1270        struct gsi *gsi = dev_id;
1271        u32 intr_mask;
1272        u32 cnt = 0;
1273
1274        /* enum gsi_irq_type_id defines GSI interrupt types */
1275        while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
1276                /* intr_mask contains bitmask of pending GSI interrupts */
1277                do {
1278                        u32 gsi_intr = BIT(__ffs(intr_mask));
1279
1280                        intr_mask ^= gsi_intr;
1281
1282                        switch (gsi_intr) {
1283                        case BIT(GSI_CH_CTRL):
1284                                gsi_isr_chan_ctrl(gsi);
1285                                break;
1286                        case BIT(GSI_EV_CTRL):
1287                                gsi_isr_evt_ctrl(gsi);
1288                                break;
1289                        case BIT(GSI_GLOB_EE):
1290                                gsi_isr_glob_ee(gsi);
1291                                break;
1292                        case BIT(GSI_IEOB):
1293                                gsi_isr_ieob(gsi);
1294                                break;
1295                        case BIT(GSI_GENERAL):
1296                                gsi_isr_general(gsi);
1297                                break;
1298                        default:
1299                                dev_err(gsi->dev,
1300                                        "unrecognized interrupt type 0x%08x\n",
1301                                        gsi_intr);
1302                                break;
1303                        }
1304                } while (intr_mask);
1305
1306                if (++cnt > GSI_ISR_MAX_ITER) {
1307                        dev_err(gsi->dev, "interrupt flood\n");
1308                        break;
1309                }
1310        }
1311
1312        return IRQ_HANDLED;
1313}
1314
1315/* Init function for GSI IRQ lookup; there is no gsi_irq_exit() */
1316static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
1317{
1318        int ret;
1319
1320        ret = platform_get_irq_byname(pdev, "gsi");
1321        if (ret <= 0)
1322                return ret ? : -EINVAL;
1323
1324        gsi->irq = ret;
1325
1326        return 0;
1327}
1328
1329/* Return the transaction associated with a transfer completion event */
1330static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
1331                                         struct gsi_event *event)
1332{
1333        u32 tre_offset;
1334        u32 tre_index;
1335
1336        /* Event xfer_ptr records the TRE it's associated with */
1337        tre_offset = lower_32_bits(le64_to_cpu(event->xfer_ptr));
1338        tre_index = gsi_ring_index(&channel->tre_ring, tre_offset);
1339
1340        return gsi_channel_trans_mapped(channel, tre_index);
1341}
1342
1343/**
1344 * gsi_evt_ring_rx_update() - Record lengths of received data
1345 * @evt_ring:   Event ring associated with channel that received packets
1346 * @index:      Event index in ring reported by hardware
1347 *
1348 * Events for RX channels contain the actual number of bytes received into
1349 * the buffer.  Every event has a transaction associated with it, and here
1350 * we update transactions to record their actual received lengths.
1351 *
1352 * This function is called whenever we learn that the GSI hardware has filled
1353 * new events since the last time we checked.  The ring's index field tells
1354 * the first entry in need of processing.  The index provided is the
1355 * first *unfilled* event in the ring (following the last filled one).
1356 *
1357 * Events are sequential within the event ring, and transactions are
1358 * sequential within the transaction pool.
1359 *
1360 * Note that @index always refers to an element *within* the event ring.
1361 */
1362static void gsi_evt_ring_rx_update(struct gsi_evt_ring *evt_ring, u32 index)
1363{
1364        struct gsi_channel *channel = evt_ring->channel;
1365        struct gsi_ring *ring = &evt_ring->ring;
1366        struct gsi_trans_info *trans_info;
1367        struct gsi_event *event_done;
1368        struct gsi_event *event;
1369        struct gsi_trans *trans;
1370        u32 trans_count = 0;
1371        u32 byte_count = 0;
1372        u32 event_avail;
1373        u32 old_index;
1374
1375        trans_info = &channel->trans_info;
1376
1377        /* We'll start with the oldest un-processed event.  RX channels
1378         * replenish receive buffers in single-TRE transactions, so we
1379         * can just map that event to its transaction.  Transactions
1380         * associated with completion events are consecutive.
1381         */
1382        old_index = ring->index;
1383        event = gsi_ring_virt(ring, old_index);
1384        trans = gsi_event_trans(channel, event);
1385
1386        /* Compute the number of events to process before we wrap,
1387         * and determine when we'll be done processing events.
1388         */
1389        event_avail = ring->count - old_index % ring->count;
1390        event_done = gsi_ring_virt(ring, index);
1391        do {
1392                trans->len = __le16_to_cpu(event->len);
1393                byte_count += trans->len;
1394                trans_count++;
1395
1396                /* Move on to the next event and transaction */
1397                if (--event_avail)
1398                        event++;
1399                else
1400                        event = gsi_ring_virt(ring, 0);
1401                trans = gsi_trans_pool_next(&trans_info->pool, trans);
1402        } while (event != event_done);
1403
1404        /* We record RX bytes when they are received */
1405        channel->byte_count += byte_count;
1406        channel->trans_count += trans_count;
1407}
1408
1409/* Initialize a ring, including allocating DMA memory for its entries */
1410static int gsi_ring_alloc(struct gsi *gsi, struct gsi_ring *ring, u32 count)
1411{
1412        u32 size = count * GSI_RING_ELEMENT_SIZE;
1413        struct device *dev = gsi->dev;
1414        dma_addr_t addr;
1415
1416        /* Hardware requires a 2^n ring size, with alignment equal to size.
1417         * The DMA address returned by dma_alloc_coherent() is guaranteed to
1418         * be a power-of-2 number of pages, which satisfies the requirement.
1419         */
1420        ring->virt = dma_alloc_coherent(dev, size, &addr, GFP_KERNEL);
1421        if (!ring->virt)
1422                return -ENOMEM;
1423
1424        ring->addr = addr;
1425        ring->count = count;
1426
1427        return 0;
1428}
1429
1430/* Free a previously-allocated ring */
1431static void gsi_ring_free(struct gsi *gsi, struct gsi_ring *ring)
1432{
1433        size_t size = ring->count * GSI_RING_ELEMENT_SIZE;
1434
1435        dma_free_coherent(gsi->dev, size, ring->virt, ring->addr);
1436}
1437
1438/* Allocate an available event ring id */
1439static int gsi_evt_ring_id_alloc(struct gsi *gsi)
1440{
1441        u32 evt_ring_id;
1442
1443        if (gsi->event_bitmap == ~0U) {
1444                dev_err(gsi->dev, "event rings exhausted\n");
1445                return -ENOSPC;
1446        }
1447
1448        evt_ring_id = ffz(gsi->event_bitmap);
1449        gsi->event_bitmap |= BIT(evt_ring_id);
1450
1451        return (int)evt_ring_id;
1452}
1453
1454/* Free a previously-allocated event ring id */
1455static void gsi_evt_ring_id_free(struct gsi *gsi, u32 evt_ring_id)
1456{
1457        gsi->event_bitmap &= ~BIT(evt_ring_id);
1458}
1459
1460/* Ring a channel doorbell, reporting the first un-filled entry */
1461void gsi_channel_doorbell(struct gsi_channel *channel)
1462{
1463        struct gsi_ring *tre_ring = &channel->tre_ring;
1464        u32 channel_id = gsi_channel_id(channel);
1465        struct gsi *gsi = channel->gsi;
1466        u32 val;
1467
1468        /* Note: index *must* be used modulo the ring count here */
1469        val = gsi_ring_addr(tre_ring, tre_ring->index % tre_ring->count);
1470        iowrite32(val, gsi->virt + GSI_CH_C_DOORBELL_0_OFFSET(channel_id));
1471}
1472
1473/* Consult hardware, move any newly completed transactions to completed list */
1474static struct gsi_trans *gsi_channel_update(struct gsi_channel *channel)
1475{
1476        u32 evt_ring_id = channel->evt_ring_id;
1477        struct gsi *gsi = channel->gsi;
1478        struct gsi_evt_ring *evt_ring;
1479        struct gsi_trans *trans;
1480        struct gsi_ring *ring;
1481        u32 offset;
1482        u32 index;
1483
1484        evt_ring = &gsi->evt_ring[evt_ring_id];
1485        ring = &evt_ring->ring;
1486
1487        /* See if there's anything new to process; if not, we're done.  Note
1488         * that index always refers to an entry *within* the event ring.
1489         */
1490        offset = GSI_EV_CH_E_CNTXT_4_OFFSET(evt_ring_id);
1491        index = gsi_ring_index(ring, ioread32(gsi->virt + offset));
1492        if (index == ring->index % ring->count)
1493                return NULL;
1494
1495        /* Get the transaction for the latest completed event.  Take a
1496         * reference to keep it from completing before we give the events
1497         * for this and previous transactions back to the hardware.
1498         */
1499        trans = gsi_event_trans(channel, gsi_ring_virt(ring, index - 1));
1500        refcount_inc(&trans->refcount);
1501
1502        /* For RX channels, update each completed transaction with the number
1503         * of bytes that were actually received.  For TX channels, report
1504         * the number of transactions and bytes this completion represents
1505         * up the network stack.
1506         */
1507        if (channel->toward_ipa)
1508                gsi_channel_tx_update(channel, trans);
1509        else
1510                gsi_evt_ring_rx_update(evt_ring, index);
1511
1512        gsi_trans_move_complete(trans);
1513
1514        /* Tell the hardware we've handled these events */
1515        gsi_evt_ring_doorbell(channel->gsi, channel->evt_ring_id, index);
1516
1517        gsi_trans_free(trans);
1518
1519        return gsi_channel_trans_complete(channel);
1520}
1521
1522/**
1523 * gsi_channel_poll_one() - Return a single completed transaction on a channel
1524 * @channel:    Channel to be polled
1525 *
1526 * Return:      Transaction pointer, or null if none are available
1527 *
1528 * This function returns the first entry on a channel's completed transaction
1529 * list.  If that list is empty, the hardware is consulted to determine
1530 * whether any new transactions have completed.  If so, they're moved to the
1531 * completed list and the new first entry is returned.  If there are no more
1532 * completed transactions, a null pointer is returned.
1533 */
1534static struct gsi_trans *gsi_channel_poll_one(struct gsi_channel *channel)
1535{
1536        struct gsi_trans *trans;
1537
1538        /* Get the first transaction from the completed list */
1539        trans = gsi_channel_trans_complete(channel);
1540        if (!trans)     /* List is empty; see if there's more to do */
1541                trans = gsi_channel_update(channel);
1542
1543        if (trans)
1544                gsi_trans_move_polled(trans);
1545
1546        return trans;
1547}
1548
1549/**
1550 * gsi_channel_poll() - NAPI poll function for a channel
1551 * @napi:       NAPI structure for the channel
1552 * @budget:     Budget supplied by NAPI core
1553 *
1554 * Return:      Number of items polled (<= budget)
1555 *
1556 * Single transactions completed by hardware are polled until either
1557 * the budget is exhausted, or there are no more.  Each transaction
1558 * polled is passed to gsi_trans_complete(), to perform remaining
1559 * completion processing and retire/free the transaction.
1560 */
1561static int gsi_channel_poll(struct napi_struct *napi, int budget)
1562{
1563        struct gsi_channel *channel;
1564        int count;
1565
1566        channel = container_of(napi, struct gsi_channel, napi);
1567        for (count = 0; count < budget; count++) {
1568                struct gsi_trans *trans;
1569
1570                trans = gsi_channel_poll_one(channel);
1571                if (!trans)
1572                        break;
1573                gsi_trans_complete(trans);
1574        }
1575
1576        if (count < budget && napi_complete(napi))
1577                gsi_irq_ieob_enable_one(channel->gsi, channel->evt_ring_id);
1578
1579        return count;
1580}
1581
1582/* The event bitmap represents which event ids are available for allocation.
1583 * Set bits are not available, clear bits can be used.  This function
1584 * initializes the map so all events supported by the hardware are available,
1585 * then precludes any reserved events from being allocated.
1586 */
1587static u32 gsi_event_bitmap_init(u32 evt_ring_max)
1588{
1589        u32 event_bitmap = GENMASK(BITS_PER_LONG - 1, evt_ring_max);
1590
1591        event_bitmap |= GENMASK(GSI_MHI_EVENT_ID_END, GSI_MHI_EVENT_ID_START);
1592
1593        return event_bitmap;
1594}
1595
1596/* Setup function for a single channel */
1597static int gsi_channel_setup_one(struct gsi *gsi, u32 channel_id)
1598{
1599        struct gsi_channel *channel = &gsi->channel[channel_id];
1600        u32 evt_ring_id = channel->evt_ring_id;
1601        int ret;
1602
1603        if (!gsi_channel_initialized(channel))
1604                return 0;
1605
1606        ret = gsi_evt_ring_alloc_command(gsi, evt_ring_id);
1607        if (ret)
1608                return ret;
1609
1610        gsi_evt_ring_program(gsi, evt_ring_id);
1611
1612        ret = gsi_channel_alloc_command(gsi, channel_id);
1613        if (ret)
1614                goto err_evt_ring_de_alloc;
1615
1616        gsi_channel_program(channel, true);
1617
1618        if (channel->toward_ipa)
1619                netif_tx_napi_add(&gsi->dummy_dev, &channel->napi,
1620                                  gsi_channel_poll, NAPI_POLL_WEIGHT);
1621        else
1622                netif_napi_add(&gsi->dummy_dev, &channel->napi,
1623                               gsi_channel_poll, NAPI_POLL_WEIGHT);
1624
1625        return 0;
1626
1627err_evt_ring_de_alloc:
1628        /* We've done nothing with the event ring yet so don't reset */
1629        gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1630
1631        return ret;
1632}
1633
1634/* Inverse of gsi_channel_setup_one() */
1635static void gsi_channel_teardown_one(struct gsi *gsi, u32 channel_id)
1636{
1637        struct gsi_channel *channel = &gsi->channel[channel_id];
1638        u32 evt_ring_id = channel->evt_ring_id;
1639
1640        if (!gsi_channel_initialized(channel))
1641                return;
1642
1643        netif_napi_del(&channel->napi);
1644
1645        gsi_channel_de_alloc_command(gsi, channel_id);
1646        gsi_evt_ring_reset_command(gsi, evt_ring_id);
1647        gsi_evt_ring_de_alloc_command(gsi, evt_ring_id);
1648}
1649
1650/* We use generic commands only to operate on modem channels.  We don't have
1651 * the ability to determine channel state for a modem channel, so we simply
1652 * issue the command and wait for it to complete.
1653 */
1654static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
1655                               enum gsi_generic_cmd_opcode opcode,
1656                               u8 params)
1657{
1658        bool timeout;
1659        u32 val;
1660
1661        /* The error global interrupt type is always enabled (until we tear
1662         * down), so we will keep it enabled.
1663         *
1664         * A generic EE command completes with a GSI global interrupt of
1665         * type GP_INT1.  We only perform one generic command at a time
1666         * (to allocate, halt, or enable/disable flow control on a modem
1667         * channel), and only from this function.  So we enable the GP_INT1
1668         * IRQ type here, and disable it again after the command completes.
1669         */
1670        val = BIT(ERROR_INT) | BIT(GP_INT1);
1671        iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1672
1673        /* First zero the result code field */
1674        val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1675        val &= ~GENERIC_EE_RESULT_FMASK;
1676        iowrite32(val, gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
1677
1678        /* Now issue the command */
1679        val = u32_encode_bits(opcode, GENERIC_OPCODE_FMASK);
1680        val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
1681        val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
1682        val |= u32_encode_bits(params, GENERIC_PARAMS_FMASK);
1683
1684        timeout = !gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val);
1685
1686        /* Disable the GP_INT1 IRQ type again */
1687        iowrite32(BIT(ERROR_INT), gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1688
1689        if (!timeout)
1690                return gsi->result;
1691
1692        dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
1693                opcode, channel_id);
1694
1695        return -ETIMEDOUT;
1696}
1697
1698static int gsi_modem_channel_alloc(struct gsi *gsi, u32 channel_id)
1699{
1700        return gsi_generic_command(gsi, channel_id,
1701                                   GSI_GENERIC_ALLOCATE_CHANNEL, 0);
1702}
1703
1704static void gsi_modem_channel_halt(struct gsi *gsi, u32 channel_id)
1705{
1706        u32 retries = GSI_CHANNEL_MODEM_HALT_RETRIES;
1707        int ret;
1708
1709        do
1710                ret = gsi_generic_command(gsi, channel_id,
1711                                          GSI_GENERIC_HALT_CHANNEL, 0);
1712        while (ret == -EAGAIN && retries--);
1713
1714        if (ret)
1715                dev_err(gsi->dev, "error %d halting modem channel %u\n",
1716                        ret, channel_id);
1717}
1718
1719/* Enable or disable flow control for a modem GSI TX channel (IPA v4.2+) */
1720void
1721gsi_modem_channel_flow_control(struct gsi *gsi, u32 channel_id, bool enable)
1722{
1723        u32 retries = 0;
1724        u32 command;
1725        int ret;
1726
1727        command = enable ? GSI_GENERIC_ENABLE_FLOW_CONTROL
1728                         : GSI_GENERIC_DISABLE_FLOW_CONTROL;
1729        /* Disabling flow control on IPA v4.11+ can return -EAGAIN if enable
1730         * is underway.  In this case we need to retry the command.
1731         */
1732        if (!enable && gsi->version >= IPA_VERSION_4_11)
1733                retries = GSI_CHANNEL_MODEM_FLOW_RETRIES;
1734
1735        do
1736                ret = gsi_generic_command(gsi, channel_id, command, 0);
1737        while (ret == -EAGAIN && retries--);
1738
1739        if (ret)
1740                dev_err(gsi->dev,
1741                        "error %d %sabling mode channel %u flow control\n",
1742                        ret, enable ? "en" : "dis", channel_id);
1743}
1744
1745/* Setup function for channels */
1746static int gsi_channel_setup(struct gsi *gsi)
1747{
1748        u32 channel_id = 0;
1749        u32 mask;
1750        int ret;
1751
1752        gsi_irq_enable(gsi);
1753
1754        mutex_lock(&gsi->mutex);
1755
1756        do {
1757                ret = gsi_channel_setup_one(gsi, channel_id);
1758                if (ret)
1759                        goto err_unwind;
1760        } while (++channel_id < gsi->channel_count);
1761
1762        /* Make sure no channels were defined that hardware does not support */
1763        while (channel_id < GSI_CHANNEL_COUNT_MAX) {
1764                struct gsi_channel *channel = &gsi->channel[channel_id++];
1765
1766                if (!gsi_channel_initialized(channel))
1767                        continue;
1768
1769                ret = -EINVAL;
1770                dev_err(gsi->dev, "channel %u not supported by hardware\n",
1771                        channel_id - 1);
1772                channel_id = gsi->channel_count;
1773                goto err_unwind;
1774        }
1775
1776        /* Allocate modem channels if necessary */
1777        mask = gsi->modem_channel_bitmap;
1778        while (mask) {
1779                u32 modem_channel_id = __ffs(mask);
1780
1781                ret = gsi_modem_channel_alloc(gsi, modem_channel_id);
1782                if (ret)
1783                        goto err_unwind_modem;
1784
1785                /* Clear bit from mask only after success (for unwind) */
1786                mask ^= BIT(modem_channel_id);
1787        }
1788
1789        mutex_unlock(&gsi->mutex);
1790
1791        return 0;
1792
1793err_unwind_modem:
1794        /* Compute which modem channels need to be deallocated */
1795        mask ^= gsi->modem_channel_bitmap;
1796        while (mask) {
1797                channel_id = __fls(mask);
1798
1799                mask ^= BIT(channel_id);
1800
1801                gsi_modem_channel_halt(gsi, channel_id);
1802        }
1803
1804err_unwind:
1805        while (channel_id--)
1806                gsi_channel_teardown_one(gsi, channel_id);
1807
1808        mutex_unlock(&gsi->mutex);
1809
1810        gsi_irq_disable(gsi);
1811
1812        return ret;
1813}
1814
1815/* Inverse of gsi_channel_setup() */
1816static void gsi_channel_teardown(struct gsi *gsi)
1817{
1818        u32 mask = gsi->modem_channel_bitmap;
1819        u32 channel_id;
1820
1821        mutex_lock(&gsi->mutex);
1822
1823        while (mask) {
1824                channel_id = __fls(mask);
1825
1826                mask ^= BIT(channel_id);
1827
1828                gsi_modem_channel_halt(gsi, channel_id);
1829        }
1830
1831        channel_id = gsi->channel_count - 1;
1832        do
1833                gsi_channel_teardown_one(gsi, channel_id);
1834        while (channel_id--);
1835
1836        mutex_unlock(&gsi->mutex);
1837
1838        gsi_irq_disable(gsi);
1839}
1840
1841/* Turn off all GSI interrupts initially */
1842static int gsi_irq_setup(struct gsi *gsi)
1843{
1844        int ret;
1845
1846        /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
1847        iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
1848
1849        /* Disable all interrupt types */
1850        gsi_irq_type_update(gsi, 0);
1851
1852        /* Clear all type-specific interrupt masks */
1853        iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
1854        iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
1855        iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
1856        iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
1857
1858        /* The inter-EE interrupts are not supported for IPA v3.0-v3.1 */
1859        if (gsi->version > IPA_VERSION_3_1) {
1860                u32 offset;
1861
1862                /* These registers are in the non-adjusted address range */
1863                offset = GSI_INTER_EE_SRC_CH_IRQ_MSK_OFFSET;
1864                iowrite32(0, gsi->virt_raw + offset);
1865                offset = GSI_INTER_EE_SRC_EV_CH_IRQ_MSK_OFFSET;
1866                iowrite32(0, gsi->virt_raw + offset);
1867        }
1868
1869        iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
1870
1871        ret = request_irq(gsi->irq, gsi_isr, 0, "gsi", gsi);
1872        if (ret)
1873                dev_err(gsi->dev, "error %d requesting \"gsi\" IRQ\n", ret);
1874
1875        return ret;
1876}
1877
1878static void gsi_irq_teardown(struct gsi *gsi)
1879{
1880        free_irq(gsi->irq, gsi);
1881}
1882
1883/* Get # supported channel and event rings; there is no gsi_ring_teardown() */
1884static int gsi_ring_setup(struct gsi *gsi)
1885{
1886        struct device *dev = gsi->dev;
1887        u32 count;
1888        u32 val;
1889
1890        if (gsi->version < IPA_VERSION_3_5_1) {
1891                /* No HW_PARAM_2 register prior to IPA v3.5.1, assume the max */
1892                gsi->channel_count = GSI_CHANNEL_COUNT_MAX;
1893                gsi->evt_ring_count = GSI_EVT_RING_COUNT_MAX;
1894
1895                return 0;
1896        }
1897
1898        val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
1899
1900        count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
1901        if (!count) {
1902                dev_err(dev, "GSI reports zero channels supported\n");
1903                return -EINVAL;
1904        }
1905        if (count > GSI_CHANNEL_COUNT_MAX) {
1906                dev_warn(dev, "limiting to %u channels; hardware supports %u\n",
1907                         GSI_CHANNEL_COUNT_MAX, count);
1908                count = GSI_CHANNEL_COUNT_MAX;
1909        }
1910        gsi->channel_count = count;
1911
1912        count = u32_get_bits(val, NUM_EV_PER_EE_FMASK);
1913        if (!count) {
1914                dev_err(dev, "GSI reports zero event rings supported\n");
1915                return -EINVAL;
1916        }
1917        if (count > GSI_EVT_RING_COUNT_MAX) {
1918                dev_warn(dev,
1919                         "limiting to %u event rings; hardware supports %u\n",
1920                         GSI_EVT_RING_COUNT_MAX, count);
1921                count = GSI_EVT_RING_COUNT_MAX;
1922        }
1923        gsi->evt_ring_count = count;
1924
1925        return 0;
1926}
1927
1928/* Setup function for GSI.  GSI firmware must be loaded and initialized */
1929int gsi_setup(struct gsi *gsi)
1930{
1931        u32 val;
1932        int ret;
1933
1934        /* Here is where we first touch the GSI hardware */
1935        val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
1936        if (!(val & ENABLED_FMASK)) {
1937                dev_err(gsi->dev, "GSI has not been enabled\n");
1938                return -EIO;
1939        }
1940
1941        ret = gsi_irq_setup(gsi);
1942        if (ret)
1943                return ret;
1944
1945        ret = gsi_ring_setup(gsi);      /* No matching teardown required */
1946        if (ret)
1947                goto err_irq_teardown;
1948
1949        /* Initialize the error log */
1950        iowrite32(0, gsi->virt + GSI_ERROR_LOG_OFFSET);
1951
1952        ret = gsi_channel_setup(gsi);
1953        if (ret)
1954                goto err_irq_teardown;
1955
1956        return 0;
1957
1958err_irq_teardown:
1959        gsi_irq_teardown(gsi);
1960
1961        return ret;
1962}
1963
1964/* Inverse of gsi_setup() */
1965void gsi_teardown(struct gsi *gsi)
1966{
1967        gsi_channel_teardown(gsi);
1968        gsi_irq_teardown(gsi);
1969}
1970
1971/* Initialize a channel's event ring */
1972static int gsi_channel_evt_ring_init(struct gsi_channel *channel)
1973{
1974        struct gsi *gsi = channel->gsi;
1975        struct gsi_evt_ring *evt_ring;
1976        int ret;
1977
1978        ret = gsi_evt_ring_id_alloc(gsi);
1979        if (ret < 0)
1980                return ret;
1981        channel->evt_ring_id = ret;
1982
1983        evt_ring = &gsi->evt_ring[channel->evt_ring_id];
1984        evt_ring->channel = channel;
1985
1986        ret = gsi_ring_alloc(gsi, &evt_ring->ring, channel->event_count);
1987        if (!ret)
1988                return 0;       /* Success! */
1989
1990        dev_err(gsi->dev, "error %d allocating channel %u event ring\n",
1991                ret, gsi_channel_id(channel));
1992
1993        gsi_evt_ring_id_free(gsi, channel->evt_ring_id);
1994
1995        return ret;
1996}
1997
1998/* Inverse of gsi_channel_evt_ring_init() */
1999static void gsi_channel_evt_ring_exit(struct gsi_channel *channel)
2000{
2001        u32 evt_ring_id = channel->evt_ring_id;
2002        struct gsi *gsi = channel->gsi;
2003        struct gsi_evt_ring *evt_ring;
2004
2005        evt_ring = &gsi->evt_ring[evt_ring_id];
2006        gsi_ring_free(gsi, &evt_ring->ring);
2007        gsi_evt_ring_id_free(gsi, evt_ring_id);
2008}
2009
2010static bool gsi_channel_data_valid(struct gsi *gsi,
2011                                   const struct ipa_gsi_endpoint_data *data)
2012{
2013        u32 channel_id = data->channel_id;
2014        struct device *dev = gsi->dev;
2015
2016        /* Make sure channel ids are in the range driver supports */
2017        if (channel_id >= GSI_CHANNEL_COUNT_MAX) {
2018                dev_err(dev, "bad channel id %u; must be less than %u\n",
2019                        channel_id, GSI_CHANNEL_COUNT_MAX);
2020                return false;
2021        }
2022
2023        if (data->ee_id != GSI_EE_AP && data->ee_id != GSI_EE_MODEM) {
2024                dev_err(dev, "bad EE id %u; not AP or modem\n", data->ee_id);
2025                return false;
2026        }
2027
2028        if (!data->channel.tlv_count ||
2029            data->channel.tlv_count > GSI_TLV_MAX) {
2030                dev_err(dev, "channel %u bad tlv_count %u; must be 1..%u\n",
2031                        channel_id, data->channel.tlv_count, GSI_TLV_MAX);
2032                return false;
2033        }
2034
2035        /* We have to allow at least one maximally-sized transaction to
2036         * be outstanding (which would use tlv_count TREs).  Given how
2037         * gsi_channel_tre_max() is computed, tre_count has to be almost
2038         * twice the TLV FIFO size to satisfy this requirement.
2039         */
2040        if (data->channel.tre_count < 2 * data->channel.tlv_count - 1) {
2041                dev_err(dev, "channel %u TLV count %u exceeds TRE count %u\n",
2042                        channel_id, data->channel.tlv_count,
2043                        data->channel.tre_count);
2044                return false;
2045        }
2046
2047        if (!is_power_of_2(data->channel.tre_count)) {
2048                dev_err(dev, "channel %u bad tre_count %u; not power of 2\n",
2049                        channel_id, data->channel.tre_count);
2050                return false;
2051        }
2052
2053        if (!is_power_of_2(data->channel.event_count)) {
2054                dev_err(dev, "channel %u bad event_count %u; not power of 2\n",
2055                        channel_id, data->channel.event_count);
2056                return false;
2057        }
2058
2059        return true;
2060}
2061
2062/* Init function for a single channel */
2063static int gsi_channel_init_one(struct gsi *gsi,
2064                                const struct ipa_gsi_endpoint_data *data,
2065                                bool command)
2066{
2067        struct gsi_channel *channel;
2068        u32 tre_count;
2069        int ret;
2070
2071        if (!gsi_channel_data_valid(gsi, data))
2072                return -EINVAL;
2073
2074        /* Worst case we need an event for every outstanding TRE */
2075        if (data->channel.tre_count > data->channel.event_count) {
2076                tre_count = data->channel.event_count;
2077                dev_warn(gsi->dev, "channel %u limited to %u TREs\n",
2078                         data->channel_id, tre_count);
2079        } else {
2080                tre_count = data->channel.tre_count;
2081        }
2082
2083        channel = &gsi->channel[data->channel_id];
2084        memset(channel, 0, sizeof(*channel));
2085
2086        channel->gsi = gsi;
2087        channel->toward_ipa = data->toward_ipa;
2088        channel->command = command;
2089        channel->tlv_count = data->channel.tlv_count;
2090        channel->tre_count = tre_count;
2091        channel->event_count = data->channel.event_count;
2092
2093        ret = gsi_channel_evt_ring_init(channel);
2094        if (ret)
2095                goto err_clear_gsi;
2096
2097        ret = gsi_ring_alloc(gsi, &channel->tre_ring, data->channel.tre_count);
2098        if (ret) {
2099                dev_err(gsi->dev, "error %d allocating channel %u ring\n",
2100                        ret, data->channel_id);
2101                goto err_channel_evt_ring_exit;
2102        }
2103
2104        ret = gsi_channel_trans_init(gsi, data->channel_id);
2105        if (ret)
2106                goto err_ring_free;
2107
2108        if (command) {
2109                u32 tre_max = gsi_channel_tre_max(gsi, data->channel_id);
2110
2111                ret = ipa_cmd_pool_init(channel, tre_max);
2112        }
2113        if (!ret)
2114                return 0;       /* Success! */
2115
2116        gsi_channel_trans_exit(channel);
2117err_ring_free:
2118        gsi_ring_free(gsi, &channel->tre_ring);
2119err_channel_evt_ring_exit:
2120        gsi_channel_evt_ring_exit(channel);
2121err_clear_gsi:
2122        channel->gsi = NULL;    /* Mark it not (fully) initialized */
2123
2124        return ret;
2125}
2126
2127/* Inverse of gsi_channel_init_one() */
2128static void gsi_channel_exit_one(struct gsi_channel *channel)
2129{
2130        if (!gsi_channel_initialized(channel))
2131                return;
2132
2133        if (channel->command)
2134                ipa_cmd_pool_exit(channel);
2135        gsi_channel_trans_exit(channel);
2136        gsi_ring_free(channel->gsi, &channel->tre_ring);
2137        gsi_channel_evt_ring_exit(channel);
2138}
2139
2140/* Init function for channels */
2141static int gsi_channel_init(struct gsi *gsi, u32 count,
2142                            const struct ipa_gsi_endpoint_data *data)
2143{
2144        bool modem_alloc;
2145        int ret = 0;
2146        u32 i;
2147
2148        /* IPA v4.2 requires the AP to allocate channels for the modem */
2149        modem_alloc = gsi->version == IPA_VERSION_4_2;
2150
2151        gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
2152        gsi->ieob_enabled_bitmap = 0;
2153
2154        /* The endpoint data array is indexed by endpoint name */
2155        for (i = 0; i < count; i++) {
2156                bool command = i == IPA_ENDPOINT_AP_COMMAND_TX;
2157
2158                if (ipa_gsi_endpoint_data_empty(&data[i]))
2159                        continue;       /* Skip over empty slots */
2160
2161                /* Mark modem channels to be allocated (hardware workaround) */
2162                if (data[i].ee_id == GSI_EE_MODEM) {
2163                        if (modem_alloc)
2164                                gsi->modem_channel_bitmap |=
2165                                                BIT(data[i].channel_id);
2166                        continue;
2167                }
2168
2169                ret = gsi_channel_init_one(gsi, &data[i], command);
2170                if (ret)
2171                        goto err_unwind;
2172        }
2173
2174        return ret;
2175
2176err_unwind:
2177        while (i--) {
2178                if (ipa_gsi_endpoint_data_empty(&data[i]))
2179                        continue;
2180                if (modem_alloc && data[i].ee_id == GSI_EE_MODEM) {
2181                        gsi->modem_channel_bitmap &= ~BIT(data[i].channel_id);
2182                        continue;
2183                }
2184                gsi_channel_exit_one(&gsi->channel[data->channel_id]);
2185        }
2186
2187        return ret;
2188}
2189
2190/* Inverse of gsi_channel_init() */
2191static void gsi_channel_exit(struct gsi *gsi)
2192{
2193        u32 channel_id = GSI_CHANNEL_COUNT_MAX - 1;
2194
2195        do
2196                gsi_channel_exit_one(&gsi->channel[channel_id]);
2197        while (channel_id--);
2198        gsi->modem_channel_bitmap = 0;
2199}
2200
2201/* Init function for GSI.  GSI hardware does not need to be "ready" */
2202int gsi_init(struct gsi *gsi, struct platform_device *pdev,
2203             enum ipa_version version, u32 count,
2204             const struct ipa_gsi_endpoint_data *data)
2205{
2206        struct device *dev = &pdev->dev;
2207        struct resource *res;
2208        resource_size_t size;
2209        u32 adjust;
2210        int ret;
2211
2212        gsi_validate_build();
2213
2214        gsi->dev = dev;
2215        gsi->version = version;
2216
2217        /* GSI uses NAPI on all channels.  Create a dummy network device
2218         * for the channel NAPI contexts to be associated with.
2219         */
2220        init_dummy_netdev(&gsi->dummy_dev);
2221
2222        /* Get GSI memory range and map it */
2223        res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
2224        if (!res) {
2225                dev_err(dev, "DT error getting \"gsi\" memory property\n");
2226                return -ENODEV;
2227        }
2228
2229        size = resource_size(res);
2230        if (res->start > U32_MAX || size > U32_MAX - res->start) {
2231                dev_err(dev, "DT memory resource \"gsi\" out of range\n");
2232                return -EINVAL;
2233        }
2234
2235        /* Make sure we can make our pointer adjustment if necessary */
2236        adjust = gsi->version < IPA_VERSION_4_5 ? 0 : GSI_EE_REG_ADJUST;
2237        if (res->start < adjust) {
2238                dev_err(dev, "DT memory resource \"gsi\" too low (< %u)\n",
2239                        adjust);
2240                return -EINVAL;
2241        }
2242
2243        gsi->virt_raw = ioremap(res->start, size);
2244        if (!gsi->virt_raw) {
2245                dev_err(dev, "unable to remap \"gsi\" memory\n");
2246                return -ENOMEM;
2247        }
2248        /* Most registers are accessed using an adjusted register range */
2249        gsi->virt = gsi->virt_raw - adjust;
2250
2251        init_completion(&gsi->completion);
2252
2253        ret = gsi_irq_init(gsi, pdev);  /* No matching exit required */
2254        if (ret)
2255                goto err_iounmap;
2256
2257        ret = gsi_channel_init(gsi, count, data);
2258        if (ret)
2259                goto err_iounmap;
2260
2261        mutex_init(&gsi->mutex);
2262
2263        return 0;
2264
2265err_iounmap:
2266        iounmap(gsi->virt_raw);
2267
2268        return ret;
2269}
2270
2271/* Inverse of gsi_init() */
2272void gsi_exit(struct gsi *gsi)
2273{
2274        mutex_destroy(&gsi->mutex);
2275        gsi_channel_exit(gsi);
2276        iounmap(gsi->virt_raw);
2277}
2278
2279/* The maximum number of outstanding TREs on a channel.  This limits
2280 * a channel's maximum number of transactions outstanding (worst case
2281 * is one TRE per transaction).
2282 *
2283 * The absolute limit is the number of TREs in the channel's TRE ring,
2284 * and in theory we should be able use all of them.  But in practice,
2285 * doing that led to the hardware reporting exhaustion of event ring
2286 * slots for writing completion information.  So the hardware limit
2287 * would be (tre_count - 1).
2288 *
2289 * We reduce it a bit further though.  Transaction resource pools are
2290 * sized to be a little larger than this maximum, to allow resource
2291 * allocations to always be contiguous.  The number of entries in a
2292 * TRE ring buffer is a power of 2, and the extra resources in a pool
2293 * tends to nearly double the memory allocated for it.  Reducing the
2294 * maximum number of outstanding TREs allows the number of entries in
2295 * a pool to avoid crossing that power-of-2 boundary, and this can
2296 * substantially reduce pool memory requirements.  The number we
2297 * reduce it by matches the number added in gsi_trans_pool_init().
2298 */
2299u32 gsi_channel_tre_max(struct gsi *gsi, u32 channel_id)
2300{
2301        struct gsi_channel *channel = &gsi->channel[channel_id];
2302
2303        /* Hardware limit is channel->tre_count - 1 */
2304        return channel->tre_count - (channel->tlv_count - 1);
2305}
2306
2307/* Returns the maximum number of TREs in a single transaction for a channel */
2308u32 gsi_channel_trans_tre_max(struct gsi *gsi, u32 channel_id)
2309{
2310        struct gsi_channel *channel = &gsi->channel[channel_id];
2311
2312        return channel->tlv_count;
2313}
2314