linux/drivers/net/ipa/ipa_endpoint.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2019-2021 Linaro Ltd.
   5 */
   6
   7#include <linux/types.h>
   8#include <linux/device.h>
   9#include <linux/slab.h>
  10#include <linux/bitfield.h>
  11#include <linux/if_rmnet.h>
  12#include <linux/dma-direction.h>
  13
  14#include "gsi.h"
  15#include "gsi_trans.h"
  16#include "ipa.h"
  17#include "ipa_data.h"
  18#include "ipa_endpoint.h"
  19#include "ipa_cmd.h"
  20#include "ipa_mem.h"
  21#include "ipa_modem.h"
  22#include "ipa_table.h"
  23#include "ipa_gsi.h"
  24#include "ipa_power.h"
  25
  26#define atomic_dec_not_zero(v)  atomic_add_unless((v), -1, 0)
  27
  28#define IPA_REPLENISH_BATCH     16
  29
  30/* RX buffer is 1 page (or a power-of-2 contiguous pages) */
  31#define IPA_RX_BUFFER_SIZE      8192    /* PAGE_SIZE > 4096 wastes a LOT */
  32
  33/* The amount of RX buffer space consumed by standard skb overhead */
  34#define IPA_RX_BUFFER_OVERHEAD  (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
  35
  36/* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
  37#define IPA_ENDPOINT_QMAP_METADATA_MASK         0x000000ff /* host byte order */
  38
  39#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX       3
  40#define IPA_AGGR_TIME_LIMIT                     500     /* microseconds */
  41
  42/** enum ipa_status_opcode - status element opcode hardware values */
  43enum ipa_status_opcode {
  44        IPA_STATUS_OPCODE_PACKET                = 0x01,
  45        IPA_STATUS_OPCODE_DROPPED_PACKET        = 0x04,
  46        IPA_STATUS_OPCODE_SUSPENDED_PACKET      = 0x08,
  47        IPA_STATUS_OPCODE_PACKET_2ND_PASS       = 0x40,
  48};
  49
  50/** enum ipa_status_exception - status element exception type */
  51enum ipa_status_exception {
  52        /* 0 means no exception */
  53        IPA_STATUS_EXCEPTION_DEAGGR             = 0x01,
  54};
  55
  56/* Status element provided by hardware */
  57struct ipa_status {
  58        u8 opcode;              /* enum ipa_status_opcode */
  59        u8 exception;           /* enum ipa_status_exception */
  60        __le16 mask;
  61        __le16 pkt_len;
  62        u8 endp_src_idx;
  63        u8 endp_dst_idx;
  64        __le32 metadata;
  65        __le32 flags1;
  66        __le64 flags2;
  67        __le32 flags3;
  68        __le32 flags4;
  69};
  70
  71/* Field masks for struct ipa_status structure fields */
  72#define IPA_STATUS_MASK_TAG_VALID_FMASK         GENMASK(4, 4)
  73#define IPA_STATUS_SRC_IDX_FMASK                GENMASK(4, 0)
  74#define IPA_STATUS_DST_IDX_FMASK                GENMASK(4, 0)
  75#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK      GENMASK(31, 22)
  76#define IPA_STATUS_FLAGS2_TAG_FMASK             GENMASK_ULL(63, 16)
  77
  78static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
  79                            const struct ipa_gsi_endpoint_data *all_data,
  80                            const struct ipa_gsi_endpoint_data *data)
  81{
  82        const struct ipa_gsi_endpoint_data *other_data;
  83        struct device *dev = &ipa->pdev->dev;
  84        enum ipa_endpoint_name other_name;
  85
  86        if (ipa_gsi_endpoint_data_empty(data))
  87                return true;
  88
  89        if (!data->toward_ipa) {
  90                if (data->endpoint.filter_support) {
  91                        dev_err(dev, "filtering not supported for "
  92                                        "RX endpoint %u\n",
  93                                data->endpoint_id);
  94                        return false;
  95                }
  96
  97                return true;    /* Nothing more to check for RX */
  98        }
  99
 100        if (data->endpoint.config.status_enable) {
 101                other_name = data->endpoint.config.tx.status_endpoint;
 102                if (other_name >= count) {
 103                        dev_err(dev, "status endpoint name %u out of range "
 104                                        "for endpoint %u\n",
 105                                other_name, data->endpoint_id);
 106                        return false;
 107                }
 108
 109                /* Status endpoint must be defined... */
 110                other_data = &all_data[other_name];
 111                if (ipa_gsi_endpoint_data_empty(other_data)) {
 112                        dev_err(dev, "DMA endpoint name %u undefined "
 113                                        "for endpoint %u\n",
 114                                other_name, data->endpoint_id);
 115                        return false;
 116                }
 117
 118                /* ...and has to be an RX endpoint... */
 119                if (other_data->toward_ipa) {
 120                        dev_err(dev,
 121                                "status endpoint for endpoint %u not RX\n",
 122                                data->endpoint_id);
 123                        return false;
 124                }
 125
 126                /* ...and if it's to be an AP endpoint... */
 127                if (other_data->ee_id == GSI_EE_AP) {
 128                        /* ...make sure it has status enabled. */
 129                        if (!other_data->endpoint.config.status_enable) {
 130                                dev_err(dev,
 131                                        "status not enabled for endpoint %u\n",
 132                                        other_data->endpoint_id);
 133                                return false;
 134                        }
 135                }
 136        }
 137
 138        if (data->endpoint.config.dma_mode) {
 139                other_name = data->endpoint.config.dma_endpoint;
 140                if (other_name >= count) {
 141                        dev_err(dev, "DMA endpoint name %u out of range "
 142                                        "for endpoint %u\n",
 143                                other_name, data->endpoint_id);
 144                        return false;
 145                }
 146
 147                other_data = &all_data[other_name];
 148                if (ipa_gsi_endpoint_data_empty(other_data)) {
 149                        dev_err(dev, "DMA endpoint name %u undefined "
 150                                        "for endpoint %u\n",
 151                                other_name, data->endpoint_id);
 152                        return false;
 153                }
 154        }
 155
 156        return true;
 157}
 158
 159static u32 aggr_byte_limit_max(enum ipa_version version)
 160{
 161        if (version < IPA_VERSION_4_5)
 162                return field_max(aggr_byte_limit_fmask(true));
 163
 164        return field_max(aggr_byte_limit_fmask(false));
 165}
 166
 167static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
 168                                    const struct ipa_gsi_endpoint_data *data)
 169{
 170        const struct ipa_gsi_endpoint_data *dp = data;
 171        struct device *dev = &ipa->pdev->dev;
 172        enum ipa_endpoint_name name;
 173        u32 limit;
 174
 175        if (count > IPA_ENDPOINT_COUNT) {
 176                dev_err(dev, "too many endpoints specified (%u > %u)\n",
 177                        count, IPA_ENDPOINT_COUNT);
 178                return false;
 179        }
 180
 181        /* The aggregation byte limit defines the point at which an
 182         * aggregation window will close.  It is programmed into the
 183         * IPA hardware as a number of KB.  We don't use "hard byte
 184         * limit" aggregation, which means that we need to supply
 185         * enough space in a receive buffer to hold a complete MTU
 186         * plus normal skb overhead *after* that aggregation byte
 187         * limit has been crossed.
 188         *
 189         * This check ensures we don't define a receive buffer size
 190         * that would exceed what we can represent in the field that
 191         * is used to program its size.
 192         */
 193        limit = aggr_byte_limit_max(ipa->version) * SZ_1K;
 194        limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
 195        if (limit < IPA_RX_BUFFER_SIZE) {
 196                dev_err(dev, "buffer size too big for aggregation (%u > %u)\n",
 197                        IPA_RX_BUFFER_SIZE, limit);
 198                return false;
 199        }
 200
 201        /* Make sure needed endpoints have defined data */
 202        if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
 203                dev_err(dev, "command TX endpoint not defined\n");
 204                return false;
 205        }
 206        if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
 207                dev_err(dev, "LAN RX endpoint not defined\n");
 208                return false;
 209        }
 210        if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
 211                dev_err(dev, "AP->modem TX endpoint not defined\n");
 212                return false;
 213        }
 214        if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
 215                dev_err(dev, "AP<-modem RX endpoint not defined\n");
 216                return false;
 217        }
 218
 219        for (name = 0; name < count; name++, dp++)
 220                if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
 221                        return false;
 222
 223        return true;
 224}
 225
 226/* Allocate a transaction to use on a non-command endpoint */
 227static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
 228                                                  u32 tre_count)
 229{
 230        struct gsi *gsi = &endpoint->ipa->gsi;
 231        u32 channel_id = endpoint->channel_id;
 232        enum dma_data_direction direction;
 233
 234        direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 235
 236        return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
 237}
 238
 239/* suspend_delay represents suspend for RX, delay for TX endpoints.
 240 * Note that suspend is not supported starting with IPA v4.0.
 241 */
 242static bool
 243ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
 244{
 245        u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
 246        struct ipa *ipa = endpoint->ipa;
 247        bool state;
 248        u32 mask;
 249        u32 val;
 250
 251        /* Suspend is not supported for IPA v4.0+.  Delay doesn't work
 252         * correctly on IPA v4.2.
 253         */
 254        if (endpoint->toward_ipa)
 255                WARN_ON(ipa->version == IPA_VERSION_4_2);
 256        else
 257                WARN_ON(ipa->version >= IPA_VERSION_4_0);
 258
 259        mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
 260
 261        val = ioread32(ipa->reg_virt + offset);
 262        state = !!(val & mask);
 263
 264        /* Don't bother if it's already in the requested state */
 265        if (suspend_delay != state) {
 266                val ^= mask;
 267                iowrite32(val, ipa->reg_virt + offset);
 268        }
 269
 270        return state;
 271}
 272
 273/* We currently don't care what the previous state was for delay mode */
 274static void
 275ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
 276{
 277        WARN_ON(!endpoint->toward_ipa);
 278
 279        /* Delay mode doesn't work properly for IPA v4.2 */
 280        if (endpoint->ipa->version != IPA_VERSION_4_2)
 281                (void)ipa_endpoint_init_ctrl(endpoint, enable);
 282}
 283
 284static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
 285{
 286        u32 mask = BIT(endpoint->endpoint_id);
 287        struct ipa *ipa = endpoint->ipa;
 288        u32 offset;
 289        u32 val;
 290
 291        WARN_ON(!(mask & ipa->available));
 292
 293        offset = ipa_reg_state_aggr_active_offset(ipa->version);
 294        val = ioread32(ipa->reg_virt + offset);
 295
 296        return !!(val & mask);
 297}
 298
 299static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
 300{
 301        u32 mask = BIT(endpoint->endpoint_id);
 302        struct ipa *ipa = endpoint->ipa;
 303
 304        WARN_ON(!(mask & ipa->available));
 305
 306        iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
 307}
 308
 309/**
 310 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
 311 * @endpoint:   Endpoint on which to emulate a suspend
 312 *
 313 *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
 314 *  with an open aggregation frame.  This is to work around a hardware
 315 *  issue in IPA version 3.5.1 where the suspend interrupt will not be
 316 *  generated when it should be.
 317 */
 318static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
 319{
 320        struct ipa *ipa = endpoint->ipa;
 321
 322        if (!endpoint->data->aggregation)
 323                return;
 324
 325        /* Nothing to do if the endpoint doesn't have aggregation open */
 326        if (!ipa_endpoint_aggr_active(endpoint))
 327                return;
 328
 329        /* Force close aggregation */
 330        ipa_endpoint_force_close(endpoint);
 331
 332        ipa_interrupt_simulate_suspend(ipa->interrupt);
 333}
 334
 335/* Returns previous suspend state (true means suspend was enabled) */
 336static bool
 337ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
 338{
 339        bool suspended;
 340
 341        if (endpoint->ipa->version >= IPA_VERSION_4_0)
 342                return enable;  /* For IPA v4.0+, no change made */
 343
 344        WARN_ON(endpoint->toward_ipa);
 345
 346        suspended = ipa_endpoint_init_ctrl(endpoint, enable);
 347
 348        /* A client suspended with an open aggregation frame will not
 349         * generate a SUSPEND IPA interrupt.  If enabling suspend, have
 350         * ipa_endpoint_suspend_aggr() handle this.
 351         */
 352        if (enable && !suspended)
 353                ipa_endpoint_suspend_aggr(endpoint);
 354
 355        return suspended;
 356}
 357
 358/* Enable or disable delay or suspend mode on all modem endpoints */
 359void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
 360{
 361        u32 endpoint_id;
 362
 363        /* DELAY mode doesn't work correctly on IPA v4.2 */
 364        if (ipa->version == IPA_VERSION_4_2)
 365                return;
 366
 367        for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
 368                struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
 369
 370                if (endpoint->ee_id != GSI_EE_MODEM)
 371                        continue;
 372
 373                /* Set TX delay mode or RX suspend mode */
 374                if (endpoint->toward_ipa)
 375                        ipa_endpoint_program_delay(endpoint, enable);
 376                else
 377                        (void)ipa_endpoint_program_suspend(endpoint, enable);
 378        }
 379}
 380
 381/* Reset all modem endpoints to use the default exception endpoint */
 382int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
 383{
 384        u32 initialized = ipa->initialized;
 385        struct gsi_trans *trans;
 386        u32 count;
 387
 388        /* We need one command per modem TX endpoint.  We can get an upper
 389         * bound on that by assuming all initialized endpoints are modem->IPA.
 390         * That won't happen, and we could be more precise, but this is fine
 391         * for now.  End the transaction with commands to clear the pipeline.
 392         */
 393        count = hweight32(initialized) + ipa_cmd_pipeline_clear_count();
 394        trans = ipa_cmd_trans_alloc(ipa, count);
 395        if (!trans) {
 396                dev_err(&ipa->pdev->dev,
 397                        "no transaction to reset modem exception endpoints\n");
 398                return -EBUSY;
 399        }
 400
 401        while (initialized) {
 402                u32 endpoint_id = __ffs(initialized);
 403                struct ipa_endpoint *endpoint;
 404                u32 offset;
 405
 406                initialized ^= BIT(endpoint_id);
 407
 408                /* We only reset modem TX endpoints */
 409                endpoint = &ipa->endpoint[endpoint_id];
 410                if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
 411                        continue;
 412
 413                offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
 414
 415                /* Value written is 0, and all bits are updated.  That
 416                 * means status is disabled on the endpoint, and as a
 417                 * result all other fields in the register are ignored.
 418                 */
 419                ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
 420        }
 421
 422        ipa_cmd_pipeline_clear_add(trans);
 423
 424        /* XXX This should have a 1 second timeout */
 425        gsi_trans_commit_wait(trans);
 426
 427        ipa_cmd_pipeline_clear_wait(ipa);
 428
 429        return 0;
 430}
 431
 432static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
 433{
 434        u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
 435        enum ipa_cs_offload_en enabled;
 436        u32 val = 0;
 437
 438        /* FRAG_OFFLOAD_EN is 0 */
 439        if (endpoint->data->checksum) {
 440                enum ipa_version version = endpoint->ipa->version;
 441
 442                if (endpoint->toward_ipa) {
 443                        u32 checksum_offset;
 444
 445                        /* Checksum header offset is in 4-byte units */
 446                        checksum_offset = sizeof(struct rmnet_map_header);
 447                        checksum_offset /= sizeof(u32);
 448                        val |= u32_encode_bits(checksum_offset,
 449                                               CS_METADATA_HDR_OFFSET_FMASK);
 450
 451                        enabled = version < IPA_VERSION_4_5
 452                                        ? IPA_CS_OFFLOAD_UL
 453                                        : IPA_CS_OFFLOAD_INLINE;
 454                } else {
 455                        enabled = version < IPA_VERSION_4_5
 456                                        ? IPA_CS_OFFLOAD_DL
 457                                        : IPA_CS_OFFLOAD_INLINE;
 458                }
 459        } else {
 460                enabled = IPA_CS_OFFLOAD_NONE;
 461        }
 462        val |= u32_encode_bits(enabled, CS_OFFLOAD_EN_FMASK);
 463        /* CS_GEN_QMB_MASTER_SEL is 0 */
 464
 465        iowrite32(val, endpoint->ipa->reg_virt + offset);
 466}
 467
 468static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
 469{
 470        u32 offset;
 471        u32 val;
 472
 473        if (!endpoint->toward_ipa)
 474                return;
 475
 476        offset = IPA_REG_ENDP_INIT_NAT_N_OFFSET(endpoint->endpoint_id);
 477        val = u32_encode_bits(IPA_NAT_BYPASS, NAT_EN_FMASK);
 478
 479        iowrite32(val, endpoint->ipa->reg_virt + offset);
 480}
 481
 482static u32
 483ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
 484{
 485        u32 header_size = sizeof(struct rmnet_map_header);
 486
 487        /* Without checksum offload, we just have the MAP header */
 488        if (!endpoint->data->checksum)
 489                return header_size;
 490
 491        if (version < IPA_VERSION_4_5) {
 492                /* Checksum header inserted for AP TX endpoints only */
 493                if (endpoint->toward_ipa)
 494                        header_size += sizeof(struct rmnet_map_ul_csum_header);
 495        } else {
 496                /* Checksum header is used in both directions */
 497                header_size += sizeof(struct rmnet_map_v5_csum_header);
 498        }
 499
 500        return header_size;
 501}
 502
 503/**
 504 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
 505 * @endpoint:   Endpoint pointer
 506 *
 507 * We program QMAP endpoints so each packet received is preceded by a QMAP
 508 * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
 509 * packet size field, and we have the IPA hardware populate both for each
 510 * received packet.  The header is configured (in the HDR_EXT register)
 511 * to use big endian format.
 512 *
 513 * The packet size is written into the QMAP header's pkt_len field.  That
 514 * location is defined here using the HDR_OFST_PKT_SIZE field.
 515 *
 516 * The mux_id comes from a 4-byte metadata value supplied with each packet
 517 * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
 518 * value that we want, in its low-order byte.  A bitmask defined in the
 519 * endpoint's METADATA_MASK register defines which byte within the modem
 520 * metadata contains the mux_id.  And the OFST_METADATA field programmed
 521 * here indicates where the extracted byte should be placed within the QMAP
 522 * header.
 523 */
 524static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
 525{
 526        u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
 527        struct ipa *ipa = endpoint->ipa;
 528        u32 val = 0;
 529
 530        if (endpoint->data->qmap) {
 531                enum ipa_version version = ipa->version;
 532                size_t header_size;
 533
 534                header_size = ipa_qmap_header_size(version, endpoint);
 535                val = ipa_header_size_encoded(version, header_size);
 536
 537                /* Define how to fill fields in a received QMAP header */
 538                if (!endpoint->toward_ipa) {
 539                        u32 offset;     /* Field offset within header */
 540
 541                        /* Where IPA will write the metadata value */
 542                        offset = offsetof(struct rmnet_map_header, mux_id);
 543                        val |= ipa_metadata_offset_encoded(version, offset);
 544
 545                        /* Where IPA will write the length */
 546                        offset = offsetof(struct rmnet_map_header, pkt_len);
 547                        /* Upper bits are stored in HDR_EXT with IPA v4.5 */
 548                        if (version >= IPA_VERSION_4_5)
 549                                offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
 550
 551                        val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
 552                        val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK);
 553                }
 554                /* For QMAP TX, metadata offset is 0 (modem assumes this) */
 555                val |= HDR_OFST_METADATA_VALID_FMASK;
 556
 557                /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
 558                /* HDR_A5_MUX is 0 */
 559                /* HDR_LEN_INC_DEAGG_HDR is 0 */
 560                /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
 561        }
 562
 563        iowrite32(val, ipa->reg_virt + offset);
 564}
 565
 566static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
 567{
 568        u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
 569        u32 pad_align = endpoint->data->rx.pad_align;
 570        struct ipa *ipa = endpoint->ipa;
 571        u32 val = 0;
 572
 573        val |= HDR_ENDIANNESS_FMASK;            /* big endian */
 574
 575        /* A QMAP header contains a 6 bit pad field at offset 0.  The RMNet
 576         * driver assumes this field is meaningful in packets it receives,
 577         * and assumes the header's payload length includes that padding.
 578         * The RMNet driver does *not* pad packets it sends, however, so
 579         * the pad field (although 0) should be ignored.
 580         */
 581        if (endpoint->data->qmap && !endpoint->toward_ipa) {
 582                val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
 583                /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
 584                val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
 585                /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
 586        }
 587
 588        /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
 589        if (!endpoint->toward_ipa)
 590                val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
 591
 592        /* IPA v4.5 adds some most-significant bits to a few fields,
 593         * two of which are defined in the HDR (not HDR_EXT) register.
 594         */
 595        if (ipa->version >= IPA_VERSION_4_5) {
 596                /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
 597                if (endpoint->data->qmap && !endpoint->toward_ipa) {
 598                        u32 offset;
 599
 600                        offset = offsetof(struct rmnet_map_header, pkt_len);
 601                        offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
 602                        val |= u32_encode_bits(offset,
 603                                               HDR_OFST_PKT_SIZE_MSB_FMASK);
 604                        /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
 605                }
 606        }
 607        iowrite32(val, ipa->reg_virt + offset);
 608}
 609
 610static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
 611{
 612        u32 endpoint_id = endpoint->endpoint_id;
 613        u32 val = 0;
 614        u32 offset;
 615
 616        if (endpoint->toward_ipa)
 617                return;         /* Register not valid for TX endpoints */
 618
 619        offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
 620
 621        /* Note that HDR_ENDIANNESS indicates big endian header fields */
 622        if (endpoint->data->qmap)
 623                val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
 624
 625        iowrite32(val, endpoint->ipa->reg_virt + offset);
 626}
 627
 628static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
 629{
 630        u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
 631        u32 val;
 632
 633        if (!endpoint->toward_ipa)
 634                return;         /* Register not valid for RX endpoints */
 635
 636        if (endpoint->data->dma_mode) {
 637                enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
 638                u32 dma_endpoint_id;
 639
 640                dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
 641
 642                val = u32_encode_bits(IPA_DMA, MODE_FMASK);
 643                val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
 644        } else {
 645                val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
 646        }
 647        /* All other bits unspecified (and 0) */
 648
 649        iowrite32(val, endpoint->ipa->reg_virt + offset);
 650}
 651
 652/* Compute the aggregation size value to use for a given buffer size */
 653static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
 654{
 655        /* We don't use "hard byte limit" aggregation, so we define the
 656         * aggregation limit such that our buffer has enough space *after*
 657         * that limit to receive a full MTU of data, plus overhead.
 658         */
 659        rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
 660
 661        return rx_buffer_size / SZ_1K;
 662}
 663
 664/* Encoded values for AGGR endpoint register fields */
 665static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
 666{
 667        if (version < IPA_VERSION_4_5)
 668                return u32_encode_bits(limit, aggr_byte_limit_fmask(true));
 669
 670        return u32_encode_bits(limit, aggr_byte_limit_fmask(false));
 671}
 672
 673/* Encode the aggregation timer limit (microseconds) based on IPA version */
 674static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
 675{
 676        u32 gran_sel;
 677        u32 fmask;
 678        u32 val;
 679
 680        if (version < IPA_VERSION_4_5) {
 681                /* We set aggregation granularity in ipa_hardware_config() */
 682                limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
 683
 684                return u32_encode_bits(limit, aggr_time_limit_fmask(true));
 685        }
 686
 687        /* IPA v4.5 expresses the time limit using Qtime.  The AP has
 688         * pulse generators 0 and 1 available, which were configured
 689         * in ipa_qtime_config() to have granularity 100 usec and
 690         * 1 msec, respectively.  Use pulse generator 0 if possible,
 691         * otherwise fall back to pulse generator 1.
 692         */
 693        fmask = aggr_time_limit_fmask(false);
 694        val = DIV_ROUND_CLOSEST(limit, 100);
 695        if (val > field_max(fmask)) {
 696                /* Have to use pulse generator 1 (millisecond granularity) */
 697                gran_sel = AGGR_GRAN_SEL_FMASK;
 698                val = DIV_ROUND_CLOSEST(limit, 1000);
 699        } else {
 700                /* We can use pulse generator 0 (100 usec granularity) */
 701                gran_sel = 0;
 702        }
 703
 704        return gran_sel | u32_encode_bits(val, fmask);
 705}
 706
 707static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled)
 708{
 709        u32 val = enabled ? 1 : 0;
 710
 711        if (version < IPA_VERSION_4_5)
 712                return u32_encode_bits(val, aggr_sw_eof_active_fmask(true));
 713
 714        return u32_encode_bits(val, aggr_sw_eof_active_fmask(false));
 715}
 716
 717static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
 718{
 719        u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
 720        enum ipa_version version = endpoint->ipa->version;
 721        u32 val = 0;
 722
 723        if (endpoint->data->aggregation) {
 724                if (!endpoint->toward_ipa) {
 725                        bool close_eof;
 726                        u32 limit;
 727
 728                        val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
 729                        val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
 730
 731                        limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
 732                        val |= aggr_byte_limit_encoded(version, limit);
 733
 734                        limit = IPA_AGGR_TIME_LIMIT;
 735                        val |= aggr_time_limit_encoded(version, limit);
 736
 737                        /* AGGR_PKT_LIMIT is 0 (unlimited) */
 738
 739                        close_eof = endpoint->data->rx.aggr_close_eof;
 740                        val |= aggr_sw_eof_active_encoded(version, close_eof);
 741
 742                        /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
 743                } else {
 744                        val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
 745                                               AGGR_EN_FMASK);
 746                        val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
 747                        /* other fields ignored */
 748                }
 749                /* AGGR_FORCE_CLOSE is 0 */
 750                /* AGGR_GRAN_SEL is 0 for IPA v4.5 */
 751        } else {
 752                val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
 753                /* other fields ignored */
 754        }
 755
 756        iowrite32(val, endpoint->ipa->reg_virt + offset);
 757}
 758
 759/* Return the Qtime-based head-of-line blocking timer value that
 760 * represents the given number of microseconds.  The result
 761 * includes both the timer value and the selected timer granularity.
 762 */
 763static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
 764{
 765        u32 gran_sel;
 766        u32 val;
 767
 768        /* IPA v4.5 expresses time limits using Qtime.  The AP has
 769         * pulse generators 0 and 1 available, which were configured
 770         * in ipa_qtime_config() to have granularity 100 usec and
 771         * 1 msec, respectively.  Use pulse generator 0 if possible,
 772         * otherwise fall back to pulse generator 1.
 773         */
 774        val = DIV_ROUND_CLOSEST(microseconds, 100);
 775        if (val > field_max(TIME_LIMIT_FMASK)) {
 776                /* Have to use pulse generator 1 (millisecond granularity) */
 777                gran_sel = GRAN_SEL_FMASK;
 778                val = DIV_ROUND_CLOSEST(microseconds, 1000);
 779        } else {
 780                /* We can use pulse generator 0 (100 usec granularity) */
 781                gran_sel = 0;
 782        }
 783
 784        return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK);
 785}
 786
 787/* The head-of-line blocking timer is defined as a tick count.  For
 788 * IPA version 4.5 the tick count is based on the Qtimer, which is
 789 * derived from the 19.2 MHz SoC XO clock.  For older IPA versions
 790 * each tick represents 128 cycles of the IPA core clock.
 791 *
 792 * Return the encoded value that should be written to that register
 793 * that represents the timeout period provided.  For IPA v4.2 this
 794 * encodes a base and scale value, while for earlier versions the
 795 * value is a simple tick count.
 796 */
 797static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
 798{
 799        u32 width;
 800        u32 scale;
 801        u64 ticks;
 802        u64 rate;
 803        u32 high;
 804        u32 val;
 805
 806        if (!microseconds)
 807                return 0;       /* Nothing to compute if timer period is 0 */
 808
 809        if (ipa->version >= IPA_VERSION_4_5)
 810                return hol_block_timer_qtime_val(ipa, microseconds);
 811
 812        /* Use 64 bit arithmetic to avoid overflow... */
 813        rate = ipa_core_clock_rate(ipa);
 814        ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
 815        /* ...but we still need to fit into a 32-bit register */
 816        WARN_ON(ticks > U32_MAX);
 817
 818        /* IPA v3.5.1 through v4.1 just record the tick count */
 819        if (ipa->version < IPA_VERSION_4_2)
 820                return (u32)ticks;
 821
 822        /* For IPA v4.2, the tick count is represented by base and
 823         * scale fields within the 32-bit timer register, where:
 824         *     ticks = base << scale;
 825         * The best precision is achieved when the base value is as
 826         * large as possible.  Find the highest set bit in the tick
 827         * count, and extract the number of bits in the base field
 828         * such that high bit is included.
 829         */
 830        high = fls(ticks);              /* 1..32 */
 831        width = HWEIGHT32(BASE_VALUE_FMASK);
 832        scale = high > width ? high - width : 0;
 833        if (scale) {
 834                /* If we're scaling, round up to get a closer result */
 835                ticks += 1 << (scale - 1);
 836                /* High bit was set, so rounding might have affected it */
 837                if (fls(ticks) != high)
 838                        scale++;
 839        }
 840
 841        val = u32_encode_bits(scale, SCALE_FMASK);
 842        val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
 843
 844        return val;
 845}
 846
 847/* If microseconds is 0, timeout is immediate */
 848static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
 849                                              u32 microseconds)
 850{
 851        u32 endpoint_id = endpoint->endpoint_id;
 852        struct ipa *ipa = endpoint->ipa;
 853        u32 offset;
 854        u32 val;
 855
 856        offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
 857        val = hol_block_timer_val(ipa, microseconds);
 858        iowrite32(val, ipa->reg_virt + offset);
 859}
 860
 861static void
 862ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
 863{
 864        u32 endpoint_id = endpoint->endpoint_id;
 865        u32 offset;
 866        u32 val;
 867
 868        val = enable ? HOL_BLOCK_EN_FMASK : 0;
 869        offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
 870        iowrite32(val, endpoint->ipa->reg_virt + offset);
 871}
 872
 873void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
 874{
 875        u32 i;
 876
 877        for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
 878                struct ipa_endpoint *endpoint = &ipa->endpoint[i];
 879
 880                if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
 881                        continue;
 882
 883                ipa_endpoint_init_hol_block_timer(endpoint, 0);
 884                ipa_endpoint_init_hol_block_enable(endpoint, true);
 885        }
 886}
 887
 888static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
 889{
 890        u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
 891        u32 val = 0;
 892
 893        if (!endpoint->toward_ipa)
 894                return;         /* Register not valid for RX endpoints */
 895
 896        /* DEAGGR_HDR_LEN is 0 */
 897        /* PACKET_OFFSET_VALID is 0 */
 898        /* PACKET_OFFSET_LOCATION is ignored (not valid) */
 899        /* MAX_PACKET_LEN is 0 (not enforced) */
 900
 901        iowrite32(val, endpoint->ipa->reg_virt + offset);
 902}
 903
 904static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
 905{
 906        u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
 907        struct ipa *ipa = endpoint->ipa;
 908        u32 val;
 909
 910        val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group);
 911        iowrite32(val, ipa->reg_virt + offset);
 912}
 913
 914static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
 915{
 916        u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
 917        u32 val = 0;
 918
 919        if (!endpoint->toward_ipa)
 920                return;         /* Register not valid for RX endpoints */
 921
 922        /* Low-order byte configures primary packet processing */
 923        val |= u32_encode_bits(endpoint->data->tx.seq_type, SEQ_TYPE_FMASK);
 924
 925        /* Second byte configures replicated packet processing */
 926        val |= u32_encode_bits(endpoint->data->tx.seq_rep_type,
 927                               SEQ_REP_TYPE_FMASK);
 928
 929        iowrite32(val, endpoint->ipa->reg_virt + offset);
 930}
 931
 932/**
 933 * ipa_endpoint_skb_tx() - Transmit a socket buffer
 934 * @endpoint:   Endpoint pointer
 935 * @skb:        Socket buffer to send
 936 *
 937 * Returns:     0 if successful, or a negative error code
 938 */
 939int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
 940{
 941        struct gsi_trans *trans;
 942        u32 nr_frags;
 943        int ret;
 944
 945        /* Make sure source endpoint's TLV FIFO has enough entries to
 946         * hold the linear portion of the skb and all its fragments.
 947         * If not, see if we can linearize it before giving up.
 948         */
 949        nr_frags = skb_shinfo(skb)->nr_frags;
 950        if (1 + nr_frags > endpoint->trans_tre_max) {
 951                if (skb_linearize(skb))
 952                        return -E2BIG;
 953                nr_frags = 0;
 954        }
 955
 956        trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
 957        if (!trans)
 958                return -EBUSY;
 959
 960        ret = gsi_trans_skb_add(trans, skb);
 961        if (ret)
 962                goto err_trans_free;
 963        trans->data = skb;      /* transaction owns skb now */
 964
 965        gsi_trans_commit(trans, !netdev_xmit_more());
 966
 967        return 0;
 968
 969err_trans_free:
 970        gsi_trans_free(trans);
 971
 972        return -ENOMEM;
 973}
 974
 975static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
 976{
 977        u32 endpoint_id = endpoint->endpoint_id;
 978        struct ipa *ipa = endpoint->ipa;
 979        u32 val = 0;
 980        u32 offset;
 981
 982        offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
 983
 984        if (endpoint->data->status_enable) {
 985                val |= STATUS_EN_FMASK;
 986                if (endpoint->toward_ipa) {
 987                        enum ipa_endpoint_name name;
 988                        u32 status_endpoint_id;
 989
 990                        name = endpoint->data->tx.status_endpoint;
 991                        status_endpoint_id = ipa->name_map[name]->endpoint_id;
 992
 993                        val |= u32_encode_bits(status_endpoint_id,
 994                                               STATUS_ENDP_FMASK);
 995                }
 996                /* STATUS_LOCATION is 0, meaning status element precedes
 997                 * packet (not present for IPA v4.5)
 998                 */
 999                /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
1000        }
1001
1002        iowrite32(val, ipa->reg_virt + offset);
1003}
1004
1005static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
1006{
1007        struct gsi_trans *trans;
1008        bool doorbell = false;
1009        struct page *page;
1010        u32 offset;
1011        u32 len;
1012        int ret;
1013
1014        page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
1015        if (!page)
1016                return -ENOMEM;
1017
1018        trans = ipa_endpoint_trans_alloc(endpoint, 1);
1019        if (!trans)
1020                goto err_free_pages;
1021
1022        /* Offset the buffer to make space for skb headroom */
1023        offset = NET_SKB_PAD;
1024        len = IPA_RX_BUFFER_SIZE - offset;
1025
1026        ret = gsi_trans_page_add(trans, page, len, offset);
1027        if (ret)
1028                goto err_trans_free;
1029        trans->data = page;     /* transaction owns page now */
1030
1031        if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
1032                doorbell = true;
1033                endpoint->replenish_ready = 0;
1034        }
1035
1036        gsi_trans_commit(trans, doorbell);
1037
1038        return 0;
1039
1040err_trans_free:
1041        gsi_trans_free(trans);
1042err_free_pages:
1043        __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
1044
1045        return -ENOMEM;
1046}
1047
1048/**
1049 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1050 * @endpoint:   Endpoint to be replenished
1051 * @add_one:    Whether this is replacing a just-consumed buffer
1052 *
1053 * The IPA hardware can hold a fixed number of receive buffers for an RX
1054 * endpoint, based on the number of entries in the underlying channel ring
1055 * buffer.  If an endpoint's "backlog" is non-zero, it indicates how many
1056 * more receive buffers can be supplied to the hardware.  Replenishing for
1057 * an endpoint can be disabled, in which case requests to replenish a
1058 * buffer are "saved", and transferred to the backlog once it is re-enabled
1059 * again.
1060 */
1061static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, bool add_one)
1062{
1063        struct gsi *gsi;
1064        u32 backlog;
1065
1066        if (!endpoint->replenish_enabled) {
1067                if (add_one)
1068                        atomic_inc(&endpoint->replenish_saved);
1069                return;
1070        }
1071
1072        while (atomic_dec_not_zero(&endpoint->replenish_backlog))
1073                if (ipa_endpoint_replenish_one(endpoint))
1074                        goto try_again_later;
1075        if (add_one)
1076                atomic_inc(&endpoint->replenish_backlog);
1077
1078        return;
1079
1080try_again_later:
1081        /* The last one didn't succeed, so fix the backlog */
1082        backlog = atomic_inc_return(&endpoint->replenish_backlog);
1083
1084        if (add_one)
1085                atomic_inc(&endpoint->replenish_backlog);
1086
1087        /* Whenever a receive buffer transaction completes we'll try to
1088         * replenish again.  It's unlikely, but if we fail to supply even
1089         * one buffer, nothing will trigger another replenish attempt.
1090         * Receive buffer transactions use one TRE, so schedule work to
1091         * try replenishing again if our backlog is *all* available TREs.
1092         */
1093        gsi = &endpoint->ipa->gsi;
1094        if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
1095                schedule_delayed_work(&endpoint->replenish_work,
1096                                      msecs_to_jiffies(1));
1097}
1098
1099static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1100{
1101        struct gsi *gsi = &endpoint->ipa->gsi;
1102        u32 max_backlog;
1103        u32 saved;
1104
1105        endpoint->replenish_enabled = true;
1106        while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
1107                atomic_add(saved, &endpoint->replenish_backlog);
1108
1109        /* Start replenishing if hardware currently has no buffers */
1110        max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
1111        if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
1112                ipa_endpoint_replenish(endpoint, false);
1113}
1114
1115static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1116{
1117        u32 backlog;
1118
1119        endpoint->replenish_enabled = false;
1120        while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
1121                atomic_add(backlog, &endpoint->replenish_saved);
1122}
1123
1124static void ipa_endpoint_replenish_work(struct work_struct *work)
1125{
1126        struct delayed_work *dwork = to_delayed_work(work);
1127        struct ipa_endpoint *endpoint;
1128
1129        endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1130
1131        ipa_endpoint_replenish(endpoint, false);
1132}
1133
1134static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1135                                  void *data, u32 len, u32 extra)
1136{
1137        struct sk_buff *skb;
1138
1139        skb = __dev_alloc_skb(len, GFP_ATOMIC);
1140        if (skb) {
1141                skb_put(skb, len);
1142                memcpy(skb->data, data, len);
1143                skb->truesize += extra;
1144        }
1145
1146        /* Now receive it, or drop it if there's no netdev */
1147        if (endpoint->netdev)
1148                ipa_modem_skb_rx(endpoint->netdev, skb);
1149        else if (skb)
1150                dev_kfree_skb_any(skb);
1151}
1152
1153static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1154                                   struct page *page, u32 len)
1155{
1156        struct sk_buff *skb;
1157
1158        /* Nothing to do if there's no netdev */
1159        if (!endpoint->netdev)
1160                return false;
1161
1162        WARN_ON(len > SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE - NET_SKB_PAD));
1163
1164        skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
1165        if (skb) {
1166                /* Reserve the headroom and account for the data */
1167                skb_reserve(skb, NET_SKB_PAD);
1168                skb_put(skb, len);
1169        }
1170
1171        /* Receive the buffer (or record drop if unable to build it) */
1172        ipa_modem_skb_rx(endpoint->netdev, skb);
1173
1174        return skb != NULL;
1175}
1176
1177/* The format of a packet status element is the same for several status
1178 * types (opcodes).  Other types aren't currently supported.
1179 */
1180static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1181{
1182        switch (opcode) {
1183        case IPA_STATUS_OPCODE_PACKET:
1184        case IPA_STATUS_OPCODE_DROPPED_PACKET:
1185        case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1186        case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1187                return true;
1188        default:
1189                return false;
1190        }
1191}
1192
1193static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1194                                     const struct ipa_status *status)
1195{
1196        u32 endpoint_id;
1197
1198        if (!ipa_status_format_packet(status->opcode))
1199                return true;
1200        if (!status->pkt_len)
1201                return true;
1202        endpoint_id = u8_get_bits(status->endp_dst_idx,
1203                                  IPA_STATUS_DST_IDX_FMASK);
1204        if (endpoint_id != endpoint->endpoint_id)
1205                return true;
1206
1207        return false;   /* Don't skip this packet, process it */
1208}
1209
1210static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
1211                                    const struct ipa_status *status)
1212{
1213        struct ipa_endpoint *command_endpoint;
1214        struct ipa *ipa = endpoint->ipa;
1215        u32 endpoint_id;
1216
1217        if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
1218                return false;   /* No valid tag */
1219
1220        /* The status contains a valid tag.  We know the packet was sent to
1221         * this endpoint (already verified by ipa_endpoint_status_skip()).
1222         * If the packet came from the AP->command TX endpoint we know
1223         * this packet was sent as part of the pipeline clear process.
1224         */
1225        endpoint_id = u8_get_bits(status->endp_src_idx,
1226                                  IPA_STATUS_SRC_IDX_FMASK);
1227        command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1228        if (endpoint_id == command_endpoint->endpoint_id) {
1229                complete(&ipa->completion);
1230        } else {
1231                dev_err(&ipa->pdev->dev,
1232                        "unexpected tagged packet from endpoint %u\n",
1233                        endpoint_id);
1234        }
1235
1236        return true;
1237}
1238
1239/* Return whether the status indicates the packet should be dropped */
1240static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
1241                                     const struct ipa_status *status)
1242{
1243        u32 val;
1244
1245        /* If the status indicates a tagged transfer, we'll drop the packet */
1246        if (ipa_endpoint_status_tag(endpoint, status))
1247                return true;
1248
1249        /* Deaggregation exceptions we drop; all other types we consume */
1250        if (status->exception)
1251                return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
1252
1253        /* Drop the packet if it fails to match a routing rule; otherwise no */
1254        val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1255
1256        return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1257}
1258
1259static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1260                                      struct page *page, u32 total_len)
1261{
1262        void *data = page_address(page) + NET_SKB_PAD;
1263        u32 unused = IPA_RX_BUFFER_SIZE - total_len;
1264        u32 resid = total_len;
1265
1266        while (resid) {
1267                const struct ipa_status *status = data;
1268                u32 align;
1269                u32 len;
1270
1271                if (resid < sizeof(*status)) {
1272                        dev_err(&endpoint->ipa->pdev->dev,
1273                                "short message (%u bytes < %zu byte status)\n",
1274                                resid, sizeof(*status));
1275                        break;
1276                }
1277
1278                /* Skip over status packets that lack packet data */
1279                if (ipa_endpoint_status_skip(endpoint, status)) {
1280                        data += sizeof(*status);
1281                        resid -= sizeof(*status);
1282                        continue;
1283                }
1284
1285                /* Compute the amount of buffer space consumed by the packet,
1286                 * including the status element.  If the hardware is configured
1287                 * to pad packet data to an aligned boundary, account for that.
1288                 * And if checksum offload is enabled a trailer containing
1289                 * computed checksum information will be appended.
1290                 */
1291                align = endpoint->data->rx.pad_align ? : 1;
1292                len = le16_to_cpu(status->pkt_len);
1293                len = sizeof(*status) + ALIGN(len, align);
1294                if (endpoint->data->checksum)
1295                        len += sizeof(struct rmnet_map_dl_csum_trailer);
1296
1297                if (!ipa_endpoint_status_drop(endpoint, status)) {
1298                        void *data2;
1299                        u32 extra;
1300                        u32 len2;
1301
1302                        /* Client receives only packet data (no status) */
1303                        data2 = data + sizeof(*status);
1304                        len2 = le16_to_cpu(status->pkt_len);
1305
1306                        /* Have the true size reflect the extra unused space in
1307                         * the original receive buffer.  Distribute the "cost"
1308                         * proportionately across all aggregated packets in the
1309                         * buffer.
1310                         */
1311                        extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1312                        ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1313                }
1314
1315                /* Consume status and the full packet it describes */
1316                data += len;
1317                resid -= len;
1318        }
1319}
1320
1321/* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
1322static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
1323                                     struct gsi_trans *trans)
1324{
1325}
1326
1327/* Complete transaction initiated in ipa_endpoint_replenish_one() */
1328static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
1329                                     struct gsi_trans *trans)
1330{
1331        struct page *page;
1332
1333        ipa_endpoint_replenish(endpoint, true);
1334
1335        if (trans->cancelled)
1336                return;
1337
1338        /* Parse or build a socket buffer using the actual received length */
1339        page = trans->data;
1340        if (endpoint->data->status_enable)
1341                ipa_endpoint_status_parse(endpoint, page, trans->len);
1342        else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1343                trans->data = NULL;     /* Pages have been consumed */
1344}
1345
1346void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1347                                 struct gsi_trans *trans)
1348{
1349        if (endpoint->toward_ipa)
1350                ipa_endpoint_tx_complete(endpoint, trans);
1351        else
1352                ipa_endpoint_rx_complete(endpoint, trans);
1353}
1354
1355void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1356                                struct gsi_trans *trans)
1357{
1358        if (endpoint->toward_ipa) {
1359                struct ipa *ipa = endpoint->ipa;
1360
1361                /* Nothing to do for command transactions */
1362                if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1363                        struct sk_buff *skb = trans->data;
1364
1365                        if (skb)
1366                                dev_kfree_skb_any(skb);
1367                }
1368        } else {
1369                struct page *page = trans->data;
1370
1371                if (page)
1372                        __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
1373        }
1374}
1375
1376void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1377{
1378        u32 val;
1379
1380        /* ROUTE_DIS is 0 */
1381        val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
1382        val |= ROUTE_DEF_HDR_TABLE_FMASK;
1383        val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
1384        val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
1385        val |= ROUTE_DEF_RETAIN_HDR_FMASK;
1386
1387        iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
1388}
1389
1390void ipa_endpoint_default_route_clear(struct ipa *ipa)
1391{
1392        ipa_endpoint_default_route_set(ipa, 0);
1393}
1394
1395/**
1396 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1397 * @endpoint:   Endpoint to be reset
1398 *
1399 * If aggregation is active on an RX endpoint when a reset is performed
1400 * on its underlying GSI channel, a special sequence of actions must be
1401 * taken to ensure the IPA pipeline is properly cleared.
1402 *
1403 * Return:      0 if successful, or a negative error code
1404 */
1405static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1406{
1407        struct device *dev = &endpoint->ipa->pdev->dev;
1408        struct ipa *ipa = endpoint->ipa;
1409        struct gsi *gsi = &ipa->gsi;
1410        bool suspended = false;
1411        dma_addr_t addr;
1412        u32 retries;
1413        u32 len = 1;
1414        void *virt;
1415        int ret;
1416
1417        virt = kzalloc(len, GFP_KERNEL);
1418        if (!virt)
1419                return -ENOMEM;
1420
1421        addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1422        if (dma_mapping_error(dev, addr)) {
1423                ret = -ENOMEM;
1424                goto out_kfree;
1425        }
1426
1427        /* Force close aggregation before issuing the reset */
1428        ipa_endpoint_force_close(endpoint);
1429
1430        /* Reset and reconfigure the channel with the doorbell engine
1431         * disabled.  Then poll until we know aggregation is no longer
1432         * active.  We'll re-enable the doorbell (if appropriate) when
1433         * we reset again below.
1434         */
1435        gsi_channel_reset(gsi, endpoint->channel_id, false);
1436
1437        /* Make sure the channel isn't suspended */
1438        suspended = ipa_endpoint_program_suspend(endpoint, false);
1439
1440        /* Start channel and do a 1 byte read */
1441        ret = gsi_channel_start(gsi, endpoint->channel_id);
1442        if (ret)
1443                goto out_suspend_again;
1444
1445        ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1446        if (ret)
1447                goto err_endpoint_stop;
1448
1449        /* Wait for aggregation to be closed on the channel */
1450        retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1451        do {
1452                if (!ipa_endpoint_aggr_active(endpoint))
1453                        break;
1454                usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1455        } while (retries--);
1456
1457        /* Check one last time */
1458        if (ipa_endpoint_aggr_active(endpoint))
1459                dev_err(dev, "endpoint %u still active during reset\n",
1460                        endpoint->endpoint_id);
1461
1462        gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1463
1464        ret = gsi_channel_stop(gsi, endpoint->channel_id);
1465        if (ret)
1466                goto out_suspend_again;
1467
1468        /* Finally, reset and reconfigure the channel again (re-enabling
1469         * the doorbell engine if appropriate).  Sleep for 1 millisecond to
1470         * complete the channel reset sequence.  Finish by suspending the
1471         * channel again (if necessary).
1472         */
1473        gsi_channel_reset(gsi, endpoint->channel_id, true);
1474
1475        usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1476
1477        goto out_suspend_again;
1478
1479err_endpoint_stop:
1480        (void)gsi_channel_stop(gsi, endpoint->channel_id);
1481out_suspend_again:
1482        if (suspended)
1483                (void)ipa_endpoint_program_suspend(endpoint, true);
1484        dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1485out_kfree:
1486        kfree(virt);
1487
1488        return ret;
1489}
1490
1491static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1492{
1493        u32 channel_id = endpoint->channel_id;
1494        struct ipa *ipa = endpoint->ipa;
1495        bool special;
1496        int ret = 0;
1497
1498        /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1499         * is active, we need to handle things specially to recover.
1500         * All other cases just need to reset the underlying GSI channel.
1501         */
1502        special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1503                        endpoint->data->aggregation;
1504        if (special && ipa_endpoint_aggr_active(endpoint))
1505                ret = ipa_endpoint_reset_rx_aggr(endpoint);
1506        else
1507                gsi_channel_reset(&ipa->gsi, channel_id, true);
1508
1509        if (ret)
1510                dev_err(&ipa->pdev->dev,
1511                        "error %d resetting channel %u for endpoint %u\n",
1512                        ret, endpoint->channel_id, endpoint->endpoint_id);
1513}
1514
1515static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1516{
1517        if (endpoint->toward_ipa)
1518                ipa_endpoint_program_delay(endpoint, false);
1519        else
1520                (void)ipa_endpoint_program_suspend(endpoint, false);
1521        ipa_endpoint_init_cfg(endpoint);
1522        ipa_endpoint_init_nat(endpoint);
1523        ipa_endpoint_init_hdr(endpoint);
1524        ipa_endpoint_init_hdr_ext(endpoint);
1525        ipa_endpoint_init_hdr_metadata_mask(endpoint);
1526        ipa_endpoint_init_mode(endpoint);
1527        ipa_endpoint_init_aggr(endpoint);
1528        ipa_endpoint_init_deaggr(endpoint);
1529        ipa_endpoint_init_rsrc_grp(endpoint);
1530        ipa_endpoint_init_seq(endpoint);
1531        ipa_endpoint_status(endpoint);
1532}
1533
1534int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1535{
1536        struct ipa *ipa = endpoint->ipa;
1537        struct gsi *gsi = &ipa->gsi;
1538        int ret;
1539
1540        ret = gsi_channel_start(gsi, endpoint->channel_id);
1541        if (ret) {
1542                dev_err(&ipa->pdev->dev,
1543                        "error %d starting %cX channel %u for endpoint %u\n",
1544                        ret, endpoint->toward_ipa ? 'T' : 'R',
1545                        endpoint->channel_id, endpoint->endpoint_id);
1546                return ret;
1547        }
1548
1549        if (!endpoint->toward_ipa) {
1550                ipa_interrupt_suspend_enable(ipa->interrupt,
1551                                             endpoint->endpoint_id);
1552                ipa_endpoint_replenish_enable(endpoint);
1553        }
1554
1555        ipa->enabled |= BIT(endpoint->endpoint_id);
1556
1557        return 0;
1558}
1559
1560void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1561{
1562        u32 mask = BIT(endpoint->endpoint_id);
1563        struct ipa *ipa = endpoint->ipa;
1564        struct gsi *gsi = &ipa->gsi;
1565        int ret;
1566
1567        if (!(ipa->enabled & mask))
1568                return;
1569
1570        ipa->enabled ^= mask;
1571
1572        if (!endpoint->toward_ipa) {
1573                ipa_endpoint_replenish_disable(endpoint);
1574                ipa_interrupt_suspend_disable(ipa->interrupt,
1575                                              endpoint->endpoint_id);
1576        }
1577
1578        /* Note that if stop fails, the channel's state is not well-defined */
1579        ret = gsi_channel_stop(gsi, endpoint->channel_id);
1580        if (ret)
1581                dev_err(&ipa->pdev->dev,
1582                        "error %d attempting to stop endpoint %u\n", ret,
1583                        endpoint->endpoint_id);
1584}
1585
1586void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1587{
1588        struct device *dev = &endpoint->ipa->pdev->dev;
1589        struct gsi *gsi = &endpoint->ipa->gsi;
1590        int ret;
1591
1592        if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1593                return;
1594
1595        if (!endpoint->toward_ipa) {
1596                ipa_endpoint_replenish_disable(endpoint);
1597                (void)ipa_endpoint_program_suspend(endpoint, true);
1598        }
1599
1600        ret = gsi_channel_suspend(gsi, endpoint->channel_id);
1601        if (ret)
1602                dev_err(dev, "error %d suspending channel %u\n", ret,
1603                        endpoint->channel_id);
1604}
1605
1606void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1607{
1608        struct device *dev = &endpoint->ipa->pdev->dev;
1609        struct gsi *gsi = &endpoint->ipa->gsi;
1610        int ret;
1611
1612        if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1613                return;
1614
1615        if (!endpoint->toward_ipa)
1616                (void)ipa_endpoint_program_suspend(endpoint, false);
1617
1618        ret = gsi_channel_resume(gsi, endpoint->channel_id);
1619        if (ret)
1620                dev_err(dev, "error %d resuming channel %u\n", ret,
1621                        endpoint->channel_id);
1622        else if (!endpoint->toward_ipa)
1623                ipa_endpoint_replenish_enable(endpoint);
1624}
1625
1626void ipa_endpoint_suspend(struct ipa *ipa)
1627{
1628        if (!ipa->setup_complete)
1629                return;
1630
1631        if (ipa->modem_netdev)
1632                ipa_modem_suspend(ipa->modem_netdev);
1633
1634        ipa_cmd_pipeline_clear(ipa);
1635
1636        ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1637        ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1638}
1639
1640void ipa_endpoint_resume(struct ipa *ipa)
1641{
1642        if (!ipa->setup_complete)
1643                return;
1644
1645        ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1646        ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1647
1648        if (ipa->modem_netdev)
1649                ipa_modem_resume(ipa->modem_netdev);
1650}
1651
1652static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1653{
1654        struct gsi *gsi = &endpoint->ipa->gsi;
1655        u32 channel_id = endpoint->channel_id;
1656
1657        /* Only AP endpoints get set up */
1658        if (endpoint->ee_id != GSI_EE_AP)
1659                return;
1660
1661        endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
1662        if (!endpoint->toward_ipa) {
1663                /* RX transactions require a single TRE, so the maximum
1664                 * backlog is the same as the maximum outstanding TREs.
1665                 */
1666                endpoint->replenish_enabled = false;
1667                atomic_set(&endpoint->replenish_saved,
1668                           gsi_channel_tre_max(gsi, endpoint->channel_id));
1669                atomic_set(&endpoint->replenish_backlog, 0);
1670                INIT_DELAYED_WORK(&endpoint->replenish_work,
1671                                  ipa_endpoint_replenish_work);
1672        }
1673
1674        ipa_endpoint_program(endpoint);
1675
1676        endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1677}
1678
1679static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1680{
1681        endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1682
1683        if (!endpoint->toward_ipa)
1684                cancel_delayed_work_sync(&endpoint->replenish_work);
1685
1686        ipa_endpoint_reset(endpoint);
1687}
1688
1689void ipa_endpoint_setup(struct ipa *ipa)
1690{
1691        u32 initialized = ipa->initialized;
1692
1693        ipa->set_up = 0;
1694        while (initialized) {
1695                u32 endpoint_id = __ffs(initialized);
1696
1697                initialized ^= BIT(endpoint_id);
1698
1699                ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1700        }
1701}
1702
1703void ipa_endpoint_teardown(struct ipa *ipa)
1704{
1705        u32 set_up = ipa->set_up;
1706
1707        while (set_up) {
1708                u32 endpoint_id = __fls(set_up);
1709
1710                set_up ^= BIT(endpoint_id);
1711
1712                ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1713        }
1714        ipa->set_up = 0;
1715}
1716
1717int ipa_endpoint_config(struct ipa *ipa)
1718{
1719        struct device *dev = &ipa->pdev->dev;
1720        u32 initialized;
1721        u32 rx_base;
1722        u32 rx_mask;
1723        u32 tx_mask;
1724        int ret = 0;
1725        u32 max;
1726        u32 val;
1727
1728        /* Prior to IPAv3.5, the FLAVOR_0 register was not supported.
1729         * Furthermore, the endpoints were not grouped such that TX
1730         * endpoint numbers started with 0 and RX endpoints had numbers
1731         * higher than all TX endpoints, so we can't do the simple
1732         * direction check used for newer hardware below.
1733         *
1734         * For hardware that doesn't support the FLAVOR_0 register,
1735         * just set the available mask to support any endpoint, and
1736         * assume the configuration is valid.
1737         */
1738        if (ipa->version < IPA_VERSION_3_5) {
1739                ipa->available = ~0;
1740                return 0;
1741        }
1742
1743        /* Find out about the endpoints supplied by the hardware, and ensure
1744         * the highest one doesn't exceed the number we support.
1745         */
1746        val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
1747
1748        /* Our RX is an IPA producer */
1749        rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
1750        max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
1751        if (max > IPA_ENDPOINT_MAX) {
1752                dev_err(dev, "too many endpoints (%u > %u)\n",
1753                        max, IPA_ENDPOINT_MAX);
1754                return -EINVAL;
1755        }
1756        rx_mask = GENMASK(max - 1, rx_base);
1757
1758        /* Our TX is an IPA consumer */
1759        max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
1760        tx_mask = GENMASK(max - 1, 0);
1761
1762        ipa->available = rx_mask | tx_mask;
1763
1764        /* Check for initialized endpoints not supported by the hardware */
1765        if (ipa->initialized & ~ipa->available) {
1766                dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1767                        ipa->initialized & ~ipa->available);
1768                ret = -EINVAL;          /* Report other errors too */
1769        }
1770
1771        initialized = ipa->initialized;
1772        while (initialized) {
1773                u32 endpoint_id = __ffs(initialized);
1774                struct ipa_endpoint *endpoint;
1775
1776                initialized ^= BIT(endpoint_id);
1777
1778                /* Make sure it's pointing in the right direction */
1779                endpoint = &ipa->endpoint[endpoint_id];
1780                if ((endpoint_id < rx_base) != endpoint->toward_ipa) {
1781                        dev_err(dev, "endpoint id %u wrong direction\n",
1782                                endpoint_id);
1783                        ret = -EINVAL;
1784                }
1785        }
1786
1787        return ret;
1788}
1789
1790void ipa_endpoint_deconfig(struct ipa *ipa)
1791{
1792        ipa->available = 0;     /* Nothing more to do */
1793}
1794
1795static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1796                                  const struct ipa_gsi_endpoint_data *data)
1797{
1798        struct ipa_endpoint *endpoint;
1799
1800        endpoint = &ipa->endpoint[data->endpoint_id];
1801
1802        if (data->ee_id == GSI_EE_AP)
1803                ipa->channel_map[data->channel_id] = endpoint;
1804        ipa->name_map[name] = endpoint;
1805
1806        endpoint->ipa = ipa;
1807        endpoint->ee_id = data->ee_id;
1808        endpoint->channel_id = data->channel_id;
1809        endpoint->endpoint_id = data->endpoint_id;
1810        endpoint->toward_ipa = data->toward_ipa;
1811        endpoint->data = &data->endpoint.config;
1812
1813        ipa->initialized |= BIT(endpoint->endpoint_id);
1814}
1815
1816static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1817{
1818        endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1819
1820        memset(endpoint, 0, sizeof(*endpoint));
1821}
1822
1823void ipa_endpoint_exit(struct ipa *ipa)
1824{
1825        u32 initialized = ipa->initialized;
1826
1827        while (initialized) {
1828                u32 endpoint_id = __fls(initialized);
1829
1830                initialized ^= BIT(endpoint_id);
1831
1832                ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1833        }
1834        memset(ipa->name_map, 0, sizeof(ipa->name_map));
1835        memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1836}
1837
1838/* Returns a bitmask of endpoints that support filtering, or 0 on error */
1839u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1840                      const struct ipa_gsi_endpoint_data *data)
1841{
1842        enum ipa_endpoint_name name;
1843        u32 filter_map;
1844
1845        if (!ipa_endpoint_data_valid(ipa, count, data))
1846                return 0;       /* Error */
1847
1848        ipa->initialized = 0;
1849
1850        filter_map = 0;
1851        for (name = 0; name < count; name++, data++) {
1852                if (ipa_gsi_endpoint_data_empty(data))
1853                        continue;       /* Skip over empty slots */
1854
1855                ipa_endpoint_init_one(ipa, name, data);
1856
1857                if (data->endpoint.filter_support)
1858                        filter_map |= BIT(data->endpoint_id);
1859        }
1860
1861        if (!ipa_filter_map_valid(ipa, filter_map))
1862                goto err_endpoint_exit;
1863
1864        return filter_map;      /* Non-zero bitmask */
1865
1866err_endpoint_exit:
1867        ipa_endpoint_exit(ipa);
1868
1869        return 0;       /* Error */
1870}
1871