linux/drivers/net/ipa/ipa_endpoint.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2019-2020 Linaro Ltd.
   5 */
   6
   7#include <linux/types.h>
   8#include <linux/device.h>
   9#include <linux/slab.h>
  10#include <linux/bitfield.h>
  11#include <linux/if_rmnet.h>
  12#include <linux/dma-direction.h>
  13
  14#include "gsi.h"
  15#include "gsi_trans.h"
  16#include "ipa.h"
  17#include "ipa_data.h"
  18#include "ipa_endpoint.h"
  19#include "ipa_cmd.h"
  20#include "ipa_mem.h"
  21#include "ipa_modem.h"
  22#include "ipa_table.h"
  23#include "ipa_gsi.h"
  24#include "ipa_clock.h"
  25
  26#define atomic_dec_not_zero(v)  atomic_add_unless((v), -1, 0)
  27
  28#define IPA_REPLENISH_BATCH     16
  29
  30/* RX buffer is 1 page (or a power-of-2 contiguous pages) */
  31#define IPA_RX_BUFFER_SIZE      8192    /* PAGE_SIZE > 4096 wastes a LOT */
  32
  33/* The amount of RX buffer space consumed by standard skb overhead */
  34#define IPA_RX_BUFFER_OVERHEAD  (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
  35
  36/* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
  37#define IPA_ENDPOINT_QMAP_METADATA_MASK         0x000000ff /* host byte order */
  38
  39#define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX       3
  40#define IPA_AGGR_TIME_LIMIT_DEFAULT             500     /* microseconds */
  41
  42/** enum ipa_status_opcode - status element opcode hardware values */
  43enum ipa_status_opcode {
  44        IPA_STATUS_OPCODE_PACKET                = 0x01,
  45        IPA_STATUS_OPCODE_DROPPED_PACKET        = 0x04,
  46        IPA_STATUS_OPCODE_SUSPENDED_PACKET      = 0x08,
  47        IPA_STATUS_OPCODE_PACKET_2ND_PASS       = 0x40,
  48};
  49
  50/** enum ipa_status_exception - status element exception type */
  51enum ipa_status_exception {
  52        /* 0 means no exception */
  53        IPA_STATUS_EXCEPTION_DEAGGR             = 0x01,
  54};
  55
  56/* Status element provided by hardware */
  57struct ipa_status {
  58        u8 opcode;              /* enum ipa_status_opcode */
  59        u8 exception;           /* enum ipa_status_exception */
  60        __le16 mask;
  61        __le16 pkt_len;
  62        u8 endp_src_idx;
  63        u8 endp_dst_idx;
  64        __le32 metadata;
  65        __le32 flags1;
  66        __le64 flags2;
  67        __le32 flags3;
  68        __le32 flags4;
  69};
  70
  71/* Field masks for struct ipa_status structure fields */
  72#define IPA_STATUS_DST_IDX_FMASK                GENMASK(4, 0)
  73#define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK      GENMASK(31, 22)
  74
  75#ifdef IPA_VALIDATE
  76
  77static void ipa_endpoint_validate_build(void)
  78{
  79        /* The aggregation byte limit defines the point at which an
  80         * aggregation window will close.  It is programmed into the
  81         * IPA hardware as a number of KB.  We don't use "hard byte
  82         * limit" aggregation, which means that we need to supply
  83         * enough space in a receive buffer to hold a complete MTU
  84         * plus normal skb overhead *after* that aggregation byte
  85         * limit has been crossed.
  86         *
  87         * This check just ensures we don't define a receive buffer
  88         * size that would exceed what we can represent in the field
  89         * that is used to program its size.
  90         */
  91        BUILD_BUG_ON(IPA_RX_BUFFER_SIZE >
  92                     field_max(AGGR_BYTE_LIMIT_FMASK) * SZ_1K +
  93                     IPA_MTU + IPA_RX_BUFFER_OVERHEAD);
  94
  95        /* I honestly don't know where this requirement comes from.  But
  96         * it holds, and if we someday need to loosen the constraint we
  97         * can try to track it down.
  98         */
  99        BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
 100}
 101
 102static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
 103                            const struct ipa_gsi_endpoint_data *all_data,
 104                            const struct ipa_gsi_endpoint_data *data)
 105{
 106        const struct ipa_gsi_endpoint_data *other_data;
 107        struct device *dev = &ipa->pdev->dev;
 108        enum ipa_endpoint_name other_name;
 109
 110        if (ipa_gsi_endpoint_data_empty(data))
 111                return true;
 112
 113        if (!data->toward_ipa) {
 114                if (data->endpoint.filter_support) {
 115                        dev_err(dev, "filtering not supported for "
 116                                        "RX endpoint %u\n",
 117                                data->endpoint_id);
 118                        return false;
 119                }
 120
 121                return true;    /* Nothing more to check for RX */
 122        }
 123
 124        if (data->endpoint.config.status_enable) {
 125                other_name = data->endpoint.config.tx.status_endpoint;
 126                if (other_name >= count) {
 127                        dev_err(dev, "status endpoint name %u out of range "
 128                                        "for endpoint %u\n",
 129                                other_name, data->endpoint_id);
 130                        return false;
 131                }
 132
 133                /* Status endpoint must be defined... */
 134                other_data = &all_data[other_name];
 135                if (ipa_gsi_endpoint_data_empty(other_data)) {
 136                        dev_err(dev, "DMA endpoint name %u undefined "
 137                                        "for endpoint %u\n",
 138                                other_name, data->endpoint_id);
 139                        return false;
 140                }
 141
 142                /* ...and has to be an RX endpoint... */
 143                if (other_data->toward_ipa) {
 144                        dev_err(dev,
 145                                "status endpoint for endpoint %u not RX\n",
 146                                data->endpoint_id);
 147                        return false;
 148                }
 149
 150                /* ...and if it's to be an AP endpoint... */
 151                if (other_data->ee_id == GSI_EE_AP) {
 152                        /* ...make sure it has status enabled. */
 153                        if (!other_data->endpoint.config.status_enable) {
 154                                dev_err(dev,
 155                                        "status not enabled for endpoint %u\n",
 156                                        other_data->endpoint_id);
 157                                return false;
 158                        }
 159                }
 160        }
 161
 162        if (data->endpoint.config.dma_mode) {
 163                other_name = data->endpoint.config.dma_endpoint;
 164                if (other_name >= count) {
 165                        dev_err(dev, "DMA endpoint name %u out of range "
 166                                        "for endpoint %u\n",
 167                                other_name, data->endpoint_id);
 168                        return false;
 169                }
 170
 171                other_data = &all_data[other_name];
 172                if (ipa_gsi_endpoint_data_empty(other_data)) {
 173                        dev_err(dev, "DMA endpoint name %u undefined "
 174                                        "for endpoint %u\n",
 175                                other_name, data->endpoint_id);
 176                        return false;
 177                }
 178        }
 179
 180        return true;
 181}
 182
 183static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
 184                                    const struct ipa_gsi_endpoint_data *data)
 185{
 186        const struct ipa_gsi_endpoint_data *dp = data;
 187        struct device *dev = &ipa->pdev->dev;
 188        enum ipa_endpoint_name name;
 189
 190        ipa_endpoint_validate_build();
 191
 192        if (count > IPA_ENDPOINT_COUNT) {
 193                dev_err(dev, "too many endpoints specified (%u > %u)\n",
 194                        count, IPA_ENDPOINT_COUNT);
 195                return false;
 196        }
 197
 198        /* Make sure needed endpoints have defined data */
 199        if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
 200                dev_err(dev, "command TX endpoint not defined\n");
 201                return false;
 202        }
 203        if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
 204                dev_err(dev, "LAN RX endpoint not defined\n");
 205                return false;
 206        }
 207        if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
 208                dev_err(dev, "AP->modem TX endpoint not defined\n");
 209                return false;
 210        }
 211        if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
 212                dev_err(dev, "AP<-modem RX endpoint not defined\n");
 213                return false;
 214        }
 215
 216        for (name = 0; name < count; name++, dp++)
 217                if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
 218                        return false;
 219
 220        return true;
 221}
 222
 223#else /* !IPA_VALIDATE */
 224
 225static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
 226                                    const struct ipa_gsi_endpoint_data *data)
 227{
 228        return true;
 229}
 230
 231#endif /* !IPA_VALIDATE */
 232
 233/* Allocate a transaction to use on a non-command endpoint */
 234static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
 235                                                  u32 tre_count)
 236{
 237        struct gsi *gsi = &endpoint->ipa->gsi;
 238        u32 channel_id = endpoint->channel_id;
 239        enum dma_data_direction direction;
 240
 241        direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 242
 243        return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
 244}
 245
 246/* suspend_delay represents suspend for RX, delay for TX endpoints.
 247 * Note that suspend is not supported starting with IPA v4.0.
 248 */
 249static bool
 250ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
 251{
 252        u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
 253        struct ipa *ipa = endpoint->ipa;
 254        bool state;
 255        u32 mask;
 256        u32 val;
 257
 258        /* Suspend is not supported for IPA v4.0+.  Delay doesn't work
 259         * correctly on IPA v4.2.
 260         *
 261         * if (endpoint->toward_ipa)
 262         *      assert(ipa->version != IPA_VERSION_4.2);
 263         * else
 264         *      assert(ipa->version == IPA_VERSION_3_5_1);
 265         */
 266        mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
 267
 268        val = ioread32(ipa->reg_virt + offset);
 269        /* Don't bother if it's already in the requested state */
 270        state = !!(val & mask);
 271        if (suspend_delay != state) {
 272                val ^= mask;
 273                iowrite32(val, ipa->reg_virt + offset);
 274        }
 275
 276        return state;
 277}
 278
 279/* We currently don't care what the previous state was for delay mode */
 280static void
 281ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
 282{
 283        /* assert(endpoint->toward_ipa); */
 284
 285        /* Delay mode doesn't work properly for IPA v4.2 */
 286        if (endpoint->ipa->version != IPA_VERSION_4_2)
 287                (void)ipa_endpoint_init_ctrl(endpoint, enable);
 288}
 289
 290static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
 291{
 292        u32 mask = BIT(endpoint->endpoint_id);
 293        struct ipa *ipa = endpoint->ipa;
 294        u32 offset;
 295        u32 val;
 296
 297        /* assert(mask & ipa->available); */
 298        offset = ipa_reg_state_aggr_active_offset(ipa->version);
 299        val = ioread32(ipa->reg_virt + offset);
 300
 301        return !!(val & mask);
 302}
 303
 304static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
 305{
 306        u32 mask = BIT(endpoint->endpoint_id);
 307        struct ipa *ipa = endpoint->ipa;
 308
 309        /* assert(mask & ipa->available); */
 310        iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
 311}
 312
 313/**
 314 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
 315 * @endpoint:   Endpoint on which to emulate a suspend
 316 *
 317 *  Emulate suspend IPA interrupt to unsuspend an endpoint suspended
 318 *  with an open aggregation frame.  This is to work around a hardware
 319 *  issue in IPA version 3.5.1 where the suspend interrupt will not be
 320 *  generated when it should be.
 321 */
 322static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
 323{
 324        struct ipa *ipa = endpoint->ipa;
 325
 326        if (!endpoint->data->aggregation)
 327                return;
 328
 329        /* Nothing to do if the endpoint doesn't have aggregation open */
 330        if (!ipa_endpoint_aggr_active(endpoint))
 331                return;
 332
 333        /* Force close aggregation */
 334        ipa_endpoint_force_close(endpoint);
 335
 336        ipa_interrupt_simulate_suspend(ipa->interrupt);
 337}
 338
 339/* Returns previous suspend state (true means suspend was enabled) */
 340static bool
 341ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
 342{
 343        bool suspended;
 344
 345        if (endpoint->ipa->version != IPA_VERSION_3_5_1)
 346                return enable;  /* For IPA v4.0+, no change made */
 347
 348        /* assert(!endpoint->toward_ipa); */
 349
 350        suspended = ipa_endpoint_init_ctrl(endpoint, enable);
 351
 352        /* A client suspended with an open aggregation frame will not
 353         * generate a SUSPEND IPA interrupt.  If enabling suspend, have
 354         * ipa_endpoint_suspend_aggr() handle this.
 355         */
 356        if (enable && !suspended)
 357                ipa_endpoint_suspend_aggr(endpoint);
 358
 359        return suspended;
 360}
 361
 362/* Enable or disable delay or suspend mode on all modem endpoints */
 363void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
 364{
 365        u32 endpoint_id;
 366
 367        /* DELAY mode doesn't work correctly on IPA v4.2 */
 368        if (ipa->version == IPA_VERSION_4_2)
 369                return;
 370
 371        for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
 372                struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
 373
 374                if (endpoint->ee_id != GSI_EE_MODEM)
 375                        continue;
 376
 377                /* Set TX delay mode or RX suspend mode */
 378                if (endpoint->toward_ipa)
 379                        ipa_endpoint_program_delay(endpoint, enable);
 380                else
 381                        (void)ipa_endpoint_program_suspend(endpoint, enable);
 382        }
 383}
 384
 385/* Reset all modem endpoints to use the default exception endpoint */
 386int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
 387{
 388        u32 initialized = ipa->initialized;
 389        struct gsi_trans *trans;
 390        u32 count;
 391
 392        /* We need one command per modem TX endpoint.  We can get an upper
 393         * bound on that by assuming all initialized endpoints are modem->IPA.
 394         * That won't happen, and we could be more precise, but this is fine
 395         * for now.  We need to end the transaction with a "tag process."
 396         */
 397        count = hweight32(initialized) + ipa_cmd_tag_process_count();
 398        trans = ipa_cmd_trans_alloc(ipa, count);
 399        if (!trans) {
 400                dev_err(&ipa->pdev->dev,
 401                        "no transaction to reset modem exception endpoints\n");
 402                return -EBUSY;
 403        }
 404
 405        while (initialized) {
 406                u32 endpoint_id = __ffs(initialized);
 407                struct ipa_endpoint *endpoint;
 408                u32 offset;
 409
 410                initialized ^= BIT(endpoint_id);
 411
 412                /* We only reset modem TX endpoints */
 413                endpoint = &ipa->endpoint[endpoint_id];
 414                if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
 415                        continue;
 416
 417                offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
 418
 419                /* Value written is 0, and all bits are updated.  That
 420                 * means status is disabled on the endpoint, and as a
 421                 * result all other fields in the register are ignored.
 422                 */
 423                ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
 424        }
 425
 426        ipa_cmd_tag_process_add(trans);
 427
 428        /* XXX This should have a 1 second timeout */
 429        gsi_trans_commit_wait(trans);
 430
 431        return 0;
 432}
 433
 434static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
 435{
 436        u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
 437        u32 val = 0;
 438
 439        /* FRAG_OFFLOAD_EN is 0 */
 440        if (endpoint->data->checksum) {
 441                if (endpoint->toward_ipa) {
 442                        u32 checksum_offset;
 443
 444                        val |= u32_encode_bits(IPA_CS_OFFLOAD_UL,
 445                                               CS_OFFLOAD_EN_FMASK);
 446                        /* Checksum header offset is in 4-byte units */
 447                        checksum_offset = sizeof(struct rmnet_map_header);
 448                        checksum_offset /= sizeof(u32);
 449                        val |= u32_encode_bits(checksum_offset,
 450                                               CS_METADATA_HDR_OFFSET_FMASK);
 451                } else {
 452                        val |= u32_encode_bits(IPA_CS_OFFLOAD_DL,
 453                                               CS_OFFLOAD_EN_FMASK);
 454                }
 455        } else {
 456                val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE,
 457                                       CS_OFFLOAD_EN_FMASK);
 458        }
 459        /* CS_GEN_QMB_MASTER_SEL is 0 */
 460
 461        iowrite32(val, endpoint->ipa->reg_virt + offset);
 462}
 463
 464/**
 465 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
 466 * @endpoint:   Endpoint pointer
 467 *
 468 * We program QMAP endpoints so each packet received is preceded by a QMAP
 469 * header structure.  The QMAP header contains a 1-byte mux_id and 2-byte
 470 * packet size field, and we have the IPA hardware populate both for each
 471 * received packet.  The header is configured (in the HDR_EXT register)
 472 * to use big endian format.
 473 *
 474 * The packet size is written into the QMAP header's pkt_len field.  That
 475 * location is defined here using the HDR_OFST_PKT_SIZE field.
 476 *
 477 * The mux_id comes from a 4-byte metadata value supplied with each packet
 478 * by the modem.  It is *not* a QMAP header, but it does contain the mux_id
 479 * value that we want, in its low-order byte.  A bitmask defined in the
 480 * endpoint's METADATA_MASK register defines which byte within the modem
 481 * metadata contains the mux_id.  And the OFST_METADATA field programmed
 482 * here indicates where the extracted byte should be placed within the QMAP
 483 * header.
 484 */
 485static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
 486{
 487        u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
 488        u32 val = 0;
 489
 490        if (endpoint->data->qmap) {
 491                size_t header_size = sizeof(struct rmnet_map_header);
 492
 493                /* We might supply a checksum header after the QMAP header */
 494                if (endpoint->toward_ipa && endpoint->data->checksum)
 495                        header_size += sizeof(struct rmnet_map_ul_csum_header);
 496                val |= u32_encode_bits(header_size, HDR_LEN_FMASK);
 497
 498                /* Define how to fill fields in a received QMAP header */
 499                if (!endpoint->toward_ipa) {
 500                        u32 off;        /* Field offset within header */
 501
 502                        /* Where IPA will write the metadata value */
 503                        off = offsetof(struct rmnet_map_header, mux_id);
 504                        val |= u32_encode_bits(off, HDR_OFST_METADATA_FMASK);
 505
 506                        /* Where IPA will write the length */
 507                        off = offsetof(struct rmnet_map_header, pkt_len);
 508                        val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
 509                        val |= u32_encode_bits(off, HDR_OFST_PKT_SIZE_FMASK);
 510                }
 511                /* For QMAP TX, metadata offset is 0 (modem assumes this) */
 512                val |= HDR_OFST_METADATA_VALID_FMASK;
 513
 514                /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
 515                /* HDR_A5_MUX is 0 */
 516                /* HDR_LEN_INC_DEAGG_HDR is 0 */
 517                /* HDR_METADATA_REG_VALID is 0 (TX only) */
 518        }
 519
 520        iowrite32(val, endpoint->ipa->reg_virt + offset);
 521}
 522
 523static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
 524{
 525        u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
 526        u32 pad_align = endpoint->data->rx.pad_align;
 527        u32 val = 0;
 528
 529        val |= HDR_ENDIANNESS_FMASK;            /* big endian */
 530
 531        /* A QMAP header contains a 6 bit pad field at offset 0.  The RMNet
 532         * driver assumes this field is meaningful in packets it receives,
 533         * and assumes the header's payload length includes that padding.
 534         * The RMNet driver does *not* pad packets it sends, however, so
 535         * the pad field (although 0) should be ignored.
 536         */
 537        if (endpoint->data->qmap && !endpoint->toward_ipa) {
 538                val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
 539                /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
 540                val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
 541                /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
 542        }
 543
 544        /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
 545        if (!endpoint->toward_ipa)
 546                val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
 547
 548        iowrite32(val, endpoint->ipa->reg_virt + offset);
 549}
 550
 551
 552static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
 553{
 554        u32 endpoint_id = endpoint->endpoint_id;
 555        u32 val = 0;
 556        u32 offset;
 557
 558        if (endpoint->toward_ipa)
 559                return;         /* Register not valid for TX endpoints */
 560
 561        offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
 562
 563        /* Note that HDR_ENDIANNESS indicates big endian header fields */
 564        if (endpoint->data->qmap)
 565                val = cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
 566
 567        iowrite32(val, endpoint->ipa->reg_virt + offset);
 568}
 569
 570static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
 571{
 572        u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
 573        u32 val;
 574
 575        if (!endpoint->toward_ipa)
 576                return;         /* Register not valid for RX endpoints */
 577
 578        if (endpoint->data->dma_mode) {
 579                enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
 580                u32 dma_endpoint_id;
 581
 582                dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
 583
 584                val = u32_encode_bits(IPA_DMA, MODE_FMASK);
 585                val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
 586        } else {
 587                val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
 588        }
 589        /* All other bits unspecified (and 0) */
 590
 591        iowrite32(val, endpoint->ipa->reg_virt + offset);
 592}
 593
 594/* Compute the aggregation size value to use for a given buffer size */
 595static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
 596{
 597        /* We don't use "hard byte limit" aggregation, so we define the
 598         * aggregation limit such that our buffer has enough space *after*
 599         * that limit to receive a full MTU of data, plus overhead.
 600         */
 601        rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
 602
 603        return rx_buffer_size / SZ_1K;
 604}
 605
 606static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
 607{
 608        u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
 609        u32 val = 0;
 610
 611        if (endpoint->data->aggregation) {
 612                if (!endpoint->toward_ipa) {
 613                        u32 limit;
 614
 615                        val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
 616                        val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
 617
 618                        limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
 619                        val |= u32_encode_bits(limit, AGGR_BYTE_LIMIT_FMASK);
 620
 621                        limit = IPA_AGGR_TIME_LIMIT_DEFAULT;
 622                        limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
 623                        val |= u32_encode_bits(limit, AGGR_TIME_LIMIT_FMASK);
 624
 625                        /* AGGR_PKT_LIMIT is 0 (unlimited) */
 626
 627                        if (endpoint->data->rx.aggr_close_eof)
 628                                val |= AGGR_SW_EOF_ACTIVE_FMASK;
 629                        /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
 630                } else {
 631                        val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
 632                                               AGGR_EN_FMASK);
 633                        val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
 634                        /* other fields ignored */
 635                }
 636                /* AGGR_FORCE_CLOSE is 0 */
 637        } else {
 638                val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
 639                /* other fields ignored */
 640        }
 641
 642        iowrite32(val, endpoint->ipa->reg_virt + offset);
 643}
 644
 645/* The head-of-line blocking timer is defined as a tick count, where each
 646 * tick represents 128 cycles of the IPA core clock.  Return the value
 647 * that should be written to that register that represents the timeout
 648 * period provided.
 649 */
 650static u32 ipa_reg_init_hol_block_timer_val(struct ipa *ipa, u32 microseconds)
 651{
 652        u32 width;
 653        u32 scale;
 654        u64 ticks;
 655        u64 rate;
 656        u32 high;
 657        u32 val;
 658
 659        if (!microseconds)
 660                return 0;       /* Nothing to compute if timer period is 0 */
 661
 662        /* Use 64 bit arithmetic to avoid overflow... */
 663        rate = ipa_clock_rate(ipa);
 664        ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
 665        /* ...but we still need to fit into a 32-bit register */
 666        WARN_ON(ticks > U32_MAX);
 667
 668        /* IPA v3.5.1 just records the tick count */
 669        if (ipa->version == IPA_VERSION_3_5_1)
 670                return (u32)ticks;
 671
 672        /* For IPA v4.2, the tick count is represented by base and
 673         * scale fields within the 32-bit timer register, where:
 674         *     ticks = base << scale;
 675         * The best precision is achieved when the base value is as
 676         * large as possible.  Find the highest set bit in the tick
 677         * count, and extract the number of bits in the base field
 678         * such that that high bit is included.
 679         */
 680        high = fls(ticks);              /* 1..32 */
 681        width = HWEIGHT32(BASE_VALUE_FMASK);
 682        scale = high > width ? high - width : 0;
 683        if (scale) {
 684                /* If we're scaling, round up to get a closer result */
 685                ticks += 1 << (scale - 1);
 686                /* High bit was set, so rounding might have affected it */
 687                if (fls(ticks) != high)
 688                        scale++;
 689        }
 690
 691        val = u32_encode_bits(scale, SCALE_FMASK);
 692        val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
 693
 694        return val;
 695}
 696
 697/* If microseconds is 0, timeout is immediate */
 698static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
 699                                              u32 microseconds)
 700{
 701        u32 endpoint_id = endpoint->endpoint_id;
 702        struct ipa *ipa = endpoint->ipa;
 703        u32 offset;
 704        u32 val;
 705
 706        offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
 707        val = ipa_reg_init_hol_block_timer_val(ipa, microseconds);
 708        iowrite32(val, ipa->reg_virt + offset);
 709}
 710
 711static void
 712ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
 713{
 714        u32 endpoint_id = endpoint->endpoint_id;
 715        u32 offset;
 716        u32 val;
 717
 718        val = enable ? HOL_BLOCK_EN_FMASK : 0;
 719        offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
 720        iowrite32(val, endpoint->ipa->reg_virt + offset);
 721}
 722
 723void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
 724{
 725        u32 i;
 726
 727        for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
 728                struct ipa_endpoint *endpoint = &ipa->endpoint[i];
 729
 730                if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
 731                        continue;
 732
 733                ipa_endpoint_init_hol_block_timer(endpoint, 0);
 734                ipa_endpoint_init_hol_block_enable(endpoint, true);
 735        }
 736}
 737
 738static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
 739{
 740        u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
 741        u32 val = 0;
 742
 743        if (!endpoint->toward_ipa)
 744                return;         /* Register not valid for RX endpoints */
 745
 746        /* DEAGGR_HDR_LEN is 0 */
 747        /* PACKET_OFFSET_VALID is 0 */
 748        /* PACKET_OFFSET_LOCATION is ignored (not valid) */
 749        /* MAX_PACKET_LEN is 0 (not enforced) */
 750
 751        iowrite32(val, endpoint->ipa->reg_virt + offset);
 752}
 753
 754static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
 755{
 756        u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
 757        u32 seq_type = endpoint->seq_type;
 758        u32 val = 0;
 759
 760        if (!endpoint->toward_ipa)
 761                return;         /* Register not valid for RX endpoints */
 762
 763        /* Sequencer type is made up of four nibbles */
 764        val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK);
 765        val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK);
 766        /* The second two apply to replicated packets */
 767        val |= u32_encode_bits((seq_type >> 8) & 0xf, HPS_REP_SEQ_TYPE_FMASK);
 768        val |= u32_encode_bits((seq_type >> 12) & 0xf, DPS_REP_SEQ_TYPE_FMASK);
 769
 770        iowrite32(val, endpoint->ipa->reg_virt + offset);
 771}
 772
 773/**
 774 * ipa_endpoint_skb_tx() - Transmit a socket buffer
 775 * @endpoint:   Endpoint pointer
 776 * @skb:        Socket buffer to send
 777 *
 778 * Returns:     0 if successful, or a negative error code
 779 */
 780int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
 781{
 782        struct gsi_trans *trans;
 783        u32 nr_frags;
 784        int ret;
 785
 786        /* Make sure source endpoint's TLV FIFO has enough entries to
 787         * hold the linear portion of the skb and all its fragments.
 788         * If not, see if we can linearize it before giving up.
 789         */
 790        nr_frags = skb_shinfo(skb)->nr_frags;
 791        if (1 + nr_frags > endpoint->trans_tre_max) {
 792                if (skb_linearize(skb))
 793                        return -E2BIG;
 794                nr_frags = 0;
 795        }
 796
 797        trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
 798        if (!trans)
 799                return -EBUSY;
 800
 801        ret = gsi_trans_skb_add(trans, skb);
 802        if (ret)
 803                goto err_trans_free;
 804        trans->data = skb;      /* transaction owns skb now */
 805
 806        gsi_trans_commit(trans, !netdev_xmit_more());
 807
 808        return 0;
 809
 810err_trans_free:
 811        gsi_trans_free(trans);
 812
 813        return -ENOMEM;
 814}
 815
 816static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
 817{
 818        u32 endpoint_id = endpoint->endpoint_id;
 819        struct ipa *ipa = endpoint->ipa;
 820        u32 val = 0;
 821        u32 offset;
 822
 823        offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
 824
 825        if (endpoint->data->status_enable) {
 826                val |= STATUS_EN_FMASK;
 827                if (endpoint->toward_ipa) {
 828                        enum ipa_endpoint_name name;
 829                        u32 status_endpoint_id;
 830
 831                        name = endpoint->data->tx.status_endpoint;
 832                        status_endpoint_id = ipa->name_map[name]->endpoint_id;
 833
 834                        val |= u32_encode_bits(status_endpoint_id,
 835                                               STATUS_ENDP_FMASK);
 836                }
 837                /* STATUS_LOCATION is 0 (status element precedes packet) */
 838                /* The next field is present for IPA v4.0 and above */
 839                /* STATUS_PKT_SUPPRESS_FMASK is 0 */
 840        }
 841
 842        iowrite32(val, ipa->reg_virt + offset);
 843}
 844
 845static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
 846{
 847        struct gsi_trans *trans;
 848        bool doorbell = false;
 849        struct page *page;
 850        u32 offset;
 851        u32 len;
 852        int ret;
 853
 854        page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
 855        if (!page)
 856                return -ENOMEM;
 857
 858        trans = ipa_endpoint_trans_alloc(endpoint, 1);
 859        if (!trans)
 860                goto err_free_pages;
 861
 862        /* Offset the buffer to make space for skb headroom */
 863        offset = NET_SKB_PAD;
 864        len = IPA_RX_BUFFER_SIZE - offset;
 865
 866        ret = gsi_trans_page_add(trans, page, len, offset);
 867        if (ret)
 868                goto err_trans_free;
 869        trans->data = page;     /* transaction owns page now */
 870
 871        if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
 872                doorbell = true;
 873                endpoint->replenish_ready = 0;
 874        }
 875
 876        gsi_trans_commit(trans, doorbell);
 877
 878        return 0;
 879
 880err_trans_free:
 881        gsi_trans_free(trans);
 882err_free_pages:
 883        __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
 884
 885        return -ENOMEM;
 886}
 887
 888/**
 889 * ipa_endpoint_replenish() - Replenish the Rx packets cache.
 890 * @endpoint:   Endpoint to be replenished
 891 * @count:      Number of buffers to send to hardware
 892 *
 893 * Allocate RX packet wrapper structures with maximal socket buffers
 894 * for an endpoint.  These are supplied to the hardware, which fills
 895 * them with incoming data.
 896 */
 897static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
 898{
 899        struct gsi *gsi;
 900        u32 backlog;
 901
 902        if (!endpoint->replenish_enabled) {
 903                if (count)
 904                        atomic_add(count, &endpoint->replenish_saved);
 905                return;
 906        }
 907
 908
 909        while (atomic_dec_not_zero(&endpoint->replenish_backlog))
 910                if (ipa_endpoint_replenish_one(endpoint))
 911                        goto try_again_later;
 912        if (count)
 913                atomic_add(count, &endpoint->replenish_backlog);
 914
 915        return;
 916
 917try_again_later:
 918        /* The last one didn't succeed, so fix the backlog */
 919        backlog = atomic_inc_return(&endpoint->replenish_backlog);
 920
 921        if (count)
 922                atomic_add(count, &endpoint->replenish_backlog);
 923
 924        /* Whenever a receive buffer transaction completes we'll try to
 925         * replenish again.  It's unlikely, but if we fail to supply even
 926         * one buffer, nothing will trigger another replenish attempt.
 927         * Receive buffer transactions use one TRE, so schedule work to
 928         * try replenishing again if our backlog is *all* available TREs.
 929         */
 930        gsi = &endpoint->ipa->gsi;
 931        if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
 932                schedule_delayed_work(&endpoint->replenish_work,
 933                                      msecs_to_jiffies(1));
 934}
 935
 936static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
 937{
 938        struct gsi *gsi = &endpoint->ipa->gsi;
 939        u32 max_backlog;
 940        u32 saved;
 941
 942        endpoint->replenish_enabled = true;
 943        while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
 944                atomic_add(saved, &endpoint->replenish_backlog);
 945
 946        /* Start replenishing if hardware currently has no buffers */
 947        max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
 948        if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
 949                ipa_endpoint_replenish(endpoint, 0);
 950}
 951
 952static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
 953{
 954        u32 backlog;
 955
 956        endpoint->replenish_enabled = false;
 957        while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
 958                atomic_add(backlog, &endpoint->replenish_saved);
 959}
 960
 961static void ipa_endpoint_replenish_work(struct work_struct *work)
 962{
 963        struct delayed_work *dwork = to_delayed_work(work);
 964        struct ipa_endpoint *endpoint;
 965
 966        endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
 967
 968        ipa_endpoint_replenish(endpoint, 0);
 969}
 970
 971static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
 972                                  void *data, u32 len, u32 extra)
 973{
 974        struct sk_buff *skb;
 975
 976        skb = __dev_alloc_skb(len, GFP_ATOMIC);
 977        if (skb) {
 978                skb_put(skb, len);
 979                memcpy(skb->data, data, len);
 980                skb->truesize += extra;
 981        }
 982
 983        /* Now receive it, or drop it if there's no netdev */
 984        if (endpoint->netdev)
 985                ipa_modem_skb_rx(endpoint->netdev, skb);
 986        else if (skb)
 987                dev_kfree_skb_any(skb);
 988}
 989
 990static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
 991                                   struct page *page, u32 len)
 992{
 993        struct sk_buff *skb;
 994
 995        /* Nothing to do if there's no netdev */
 996        if (!endpoint->netdev)
 997                return false;
 998
 999        /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
1000        skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
1001        if (skb) {
1002                /* Reserve the headroom and account for the data */
1003                skb_reserve(skb, NET_SKB_PAD);
1004                skb_put(skb, len);
1005        }
1006
1007        /* Receive the buffer (or record drop if unable to build it) */
1008        ipa_modem_skb_rx(endpoint->netdev, skb);
1009
1010        return skb != NULL;
1011}
1012
1013/* The format of a packet status element is the same for several status
1014 * types (opcodes).  Other types aren't currently supported.
1015 */
1016static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1017{
1018        switch (opcode) {
1019        case IPA_STATUS_OPCODE_PACKET:
1020        case IPA_STATUS_OPCODE_DROPPED_PACKET:
1021        case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1022        case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1023                return true;
1024        default:
1025                return false;
1026        }
1027}
1028
1029static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1030                                     const struct ipa_status *status)
1031{
1032        u32 endpoint_id;
1033
1034        if (!ipa_status_format_packet(status->opcode))
1035                return true;
1036        if (!status->pkt_len)
1037                return true;
1038        endpoint_id = u32_get_bits(status->endp_dst_idx,
1039                                   IPA_STATUS_DST_IDX_FMASK);
1040        if (endpoint_id != endpoint->endpoint_id)
1041                return true;
1042
1043        return false;   /* Don't skip this packet, process it */
1044}
1045
1046/* Return whether the status indicates the packet should be dropped */
1047static bool ipa_status_drop_packet(const struct ipa_status *status)
1048{
1049        u32 val;
1050
1051        /* Deaggregation exceptions we drop; all other types we consume */
1052        if (status->exception)
1053                return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
1054
1055        /* Drop the packet if it fails to match a routing rule; otherwise no */
1056        val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1057
1058        return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1059}
1060
1061static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1062                                      struct page *page, u32 total_len)
1063{
1064        void *data = page_address(page) + NET_SKB_PAD;
1065        u32 unused = IPA_RX_BUFFER_SIZE - total_len;
1066        u32 resid = total_len;
1067
1068        while (resid) {
1069                const struct ipa_status *status = data;
1070                u32 align;
1071                u32 len;
1072
1073                if (resid < sizeof(*status)) {
1074                        dev_err(&endpoint->ipa->pdev->dev,
1075                                "short message (%u bytes < %zu byte status)\n",
1076                                resid, sizeof(*status));
1077                        break;
1078                }
1079
1080                /* Skip over status packets that lack packet data */
1081                if (ipa_endpoint_status_skip(endpoint, status)) {
1082                        data += sizeof(*status);
1083                        resid -= sizeof(*status);
1084                        continue;
1085                }
1086
1087                /* Compute the amount of buffer space consumed by the
1088                 * packet, including the status element.  If the hardware
1089                 * is configured to pad packet data to an aligned boundary,
1090                 * account for that.  And if checksum offload is is enabled
1091                 * a trailer containing computed checksum information will
1092                 * be appended.
1093                 */
1094                align = endpoint->data->rx.pad_align ? : 1;
1095                len = le16_to_cpu(status->pkt_len);
1096                len = sizeof(*status) + ALIGN(len, align);
1097                if (endpoint->data->checksum)
1098                        len += sizeof(struct rmnet_map_dl_csum_trailer);
1099
1100                /* Charge the new packet with a proportional fraction of
1101                 * the unused space in the original receive buffer.
1102                 * XXX Charge a proportion of the *whole* receive buffer?
1103                 */
1104                if (!ipa_status_drop_packet(status)) {
1105                        u32 extra = unused * len / total_len;
1106                        void *data2 = data + sizeof(*status);
1107                        u32 len2 = le16_to_cpu(status->pkt_len);
1108
1109                        /* Client receives only packet data (no status) */
1110                        ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1111                }
1112
1113                /* Consume status and the full packet it describes */
1114                data += len;
1115                resid -= len;
1116        }
1117}
1118
1119/* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
1120static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
1121                                     struct gsi_trans *trans)
1122{
1123}
1124
1125/* Complete transaction initiated in ipa_endpoint_replenish_one() */
1126static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
1127                                     struct gsi_trans *trans)
1128{
1129        struct page *page;
1130
1131        ipa_endpoint_replenish(endpoint, 1);
1132
1133        if (trans->cancelled)
1134                return;
1135
1136        /* Parse or build a socket buffer using the actual received length */
1137        page = trans->data;
1138        if (endpoint->data->status_enable)
1139                ipa_endpoint_status_parse(endpoint, page, trans->len);
1140        else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1141                trans->data = NULL;     /* Pages have been consumed */
1142}
1143
1144void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1145                                 struct gsi_trans *trans)
1146{
1147        if (endpoint->toward_ipa)
1148                ipa_endpoint_tx_complete(endpoint, trans);
1149        else
1150                ipa_endpoint_rx_complete(endpoint, trans);
1151}
1152
1153void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1154                                struct gsi_trans *trans)
1155{
1156        if (endpoint->toward_ipa) {
1157                struct ipa *ipa = endpoint->ipa;
1158
1159                /* Nothing to do for command transactions */
1160                if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1161                        struct sk_buff *skb = trans->data;
1162
1163                        if (skb)
1164                                dev_kfree_skb_any(skb);
1165                }
1166        } else {
1167                struct page *page = trans->data;
1168
1169                if (page)
1170                        __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
1171        }
1172}
1173
1174void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1175{
1176        u32 val;
1177
1178        /* ROUTE_DIS is 0 */
1179        val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
1180        val |= ROUTE_DEF_HDR_TABLE_FMASK;
1181        val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
1182        val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
1183        val |= ROUTE_DEF_RETAIN_HDR_FMASK;
1184
1185        iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
1186}
1187
1188void ipa_endpoint_default_route_clear(struct ipa *ipa)
1189{
1190        ipa_endpoint_default_route_set(ipa, 0);
1191}
1192
1193/**
1194 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1195 * @endpoint:   Endpoint to be reset
1196 *
1197 * If aggregation is active on an RX endpoint when a reset is performed
1198 * on its underlying GSI channel, a special sequence of actions must be
1199 * taken to ensure the IPA pipeline is properly cleared.
1200 *
1201 * Return:      0 if successful, or a negative error code
1202 */
1203static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1204{
1205        struct device *dev = &endpoint->ipa->pdev->dev;
1206        struct ipa *ipa = endpoint->ipa;
1207        struct gsi *gsi = &ipa->gsi;
1208        bool suspended = false;
1209        dma_addr_t addr;
1210        bool legacy;
1211        u32 retries;
1212        u32 len = 1;
1213        void *virt;
1214        int ret;
1215
1216        virt = kzalloc(len, GFP_KERNEL);
1217        if (!virt)
1218                return -ENOMEM;
1219
1220        addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1221        if (dma_mapping_error(dev, addr)) {
1222                ret = -ENOMEM;
1223                goto out_kfree;
1224        }
1225
1226        /* Force close aggregation before issuing the reset */
1227        ipa_endpoint_force_close(endpoint);
1228
1229        /* Reset and reconfigure the channel with the doorbell engine
1230         * disabled.  Then poll until we know aggregation is no longer
1231         * active.  We'll re-enable the doorbell (if appropriate) when
1232         * we reset again below.
1233         */
1234        gsi_channel_reset(gsi, endpoint->channel_id, false);
1235
1236        /* Make sure the channel isn't suspended */
1237        suspended = ipa_endpoint_program_suspend(endpoint, false);
1238
1239        /* Start channel and do a 1 byte read */
1240        ret = gsi_channel_start(gsi, endpoint->channel_id);
1241        if (ret)
1242                goto out_suspend_again;
1243
1244        ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1245        if (ret)
1246                goto err_endpoint_stop;
1247
1248        /* Wait for aggregation to be closed on the channel */
1249        retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1250        do {
1251                if (!ipa_endpoint_aggr_active(endpoint))
1252                        break;
1253                msleep(1);
1254        } while (retries--);
1255
1256        /* Check one last time */
1257        if (ipa_endpoint_aggr_active(endpoint))
1258                dev_err(dev, "endpoint %u still active during reset\n",
1259                        endpoint->endpoint_id);
1260
1261        gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1262
1263        ret = gsi_channel_stop(gsi, endpoint->channel_id);
1264        if (ret)
1265                goto out_suspend_again;
1266
1267        /* Finally, reset and reconfigure the channel again (re-enabling the
1268         * the doorbell engine if appropriate).  Sleep for 1 millisecond to
1269         * complete the channel reset sequence.  Finish by suspending the
1270         * channel again (if necessary).
1271         */
1272        legacy = ipa->version == IPA_VERSION_3_5_1;
1273        gsi_channel_reset(gsi, endpoint->channel_id, legacy);
1274
1275        msleep(1);
1276
1277        goto out_suspend_again;
1278
1279err_endpoint_stop:
1280        (void)gsi_channel_stop(gsi, endpoint->channel_id);
1281out_suspend_again:
1282        if (suspended)
1283                (void)ipa_endpoint_program_suspend(endpoint, true);
1284        dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1285out_kfree:
1286        kfree(virt);
1287
1288        return ret;
1289}
1290
1291static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1292{
1293        u32 channel_id = endpoint->channel_id;
1294        struct ipa *ipa = endpoint->ipa;
1295        bool special;
1296        bool legacy;
1297        int ret = 0;
1298
1299        /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1300         * is active, we need to handle things specially to recover.
1301         * All other cases just need to reset the underlying GSI channel.
1302         *
1303         * IPA v3.5.1 enables the doorbell engine.  Newer versions do not.
1304         */
1305        legacy = ipa->version == IPA_VERSION_3_5_1;
1306        special = !endpoint->toward_ipa && endpoint->data->aggregation;
1307        if (special && ipa_endpoint_aggr_active(endpoint))
1308                ret = ipa_endpoint_reset_rx_aggr(endpoint);
1309        else
1310                gsi_channel_reset(&ipa->gsi, channel_id, legacy);
1311
1312        if (ret)
1313                dev_err(&ipa->pdev->dev,
1314                        "error %d resetting channel %u for endpoint %u\n",
1315                        ret, endpoint->channel_id, endpoint->endpoint_id);
1316}
1317
1318static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1319{
1320        if (endpoint->toward_ipa)
1321                ipa_endpoint_program_delay(endpoint, false);
1322        else
1323                (void)ipa_endpoint_program_suspend(endpoint, false);
1324        ipa_endpoint_init_cfg(endpoint);
1325        ipa_endpoint_init_hdr(endpoint);
1326        ipa_endpoint_init_hdr_ext(endpoint);
1327        ipa_endpoint_init_hdr_metadata_mask(endpoint);
1328        ipa_endpoint_init_mode(endpoint);
1329        ipa_endpoint_init_aggr(endpoint);
1330        ipa_endpoint_init_deaggr(endpoint);
1331        ipa_endpoint_init_seq(endpoint);
1332        ipa_endpoint_status(endpoint);
1333}
1334
1335int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1336{
1337        struct ipa *ipa = endpoint->ipa;
1338        struct gsi *gsi = &ipa->gsi;
1339        int ret;
1340
1341        ret = gsi_channel_start(gsi, endpoint->channel_id);
1342        if (ret) {
1343                dev_err(&ipa->pdev->dev,
1344                        "error %d starting %cX channel %u for endpoint %u\n",
1345                        ret, endpoint->toward_ipa ? 'T' : 'R',
1346                        endpoint->channel_id, endpoint->endpoint_id);
1347                return ret;
1348        }
1349
1350        if (!endpoint->toward_ipa) {
1351                ipa_interrupt_suspend_enable(ipa->interrupt,
1352                                             endpoint->endpoint_id);
1353                ipa_endpoint_replenish_enable(endpoint);
1354        }
1355
1356        ipa->enabled |= BIT(endpoint->endpoint_id);
1357
1358        return 0;
1359}
1360
1361void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1362{
1363        u32 mask = BIT(endpoint->endpoint_id);
1364        struct ipa *ipa = endpoint->ipa;
1365        struct gsi *gsi = &ipa->gsi;
1366        int ret;
1367
1368        if (!(ipa->enabled & mask))
1369                return;
1370
1371        ipa->enabled ^= mask;
1372
1373        if (!endpoint->toward_ipa) {
1374                ipa_endpoint_replenish_disable(endpoint);
1375                ipa_interrupt_suspend_disable(ipa->interrupt,
1376                                              endpoint->endpoint_id);
1377        }
1378
1379        /* Note that if stop fails, the channel's state is not well-defined */
1380        ret = gsi_channel_stop(gsi, endpoint->channel_id);
1381        if (ret)
1382                dev_err(&ipa->pdev->dev,
1383                        "error %d attempting to stop endpoint %u\n", ret,
1384                        endpoint->endpoint_id);
1385}
1386
1387void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1388{
1389        struct device *dev = &endpoint->ipa->pdev->dev;
1390        struct gsi *gsi = &endpoint->ipa->gsi;
1391        bool stop_channel;
1392        int ret;
1393
1394        if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1395                return;
1396
1397        if (!endpoint->toward_ipa) {
1398                ipa_endpoint_replenish_disable(endpoint);
1399                (void)ipa_endpoint_program_suspend(endpoint, true);
1400        }
1401
1402        /* IPA v3.5.1 doesn't use channel stop for suspend */
1403        stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1404        ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
1405        if (ret)
1406                dev_err(dev, "error %d suspending channel %u\n", ret,
1407                        endpoint->channel_id);
1408}
1409
1410void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1411{
1412        struct device *dev = &endpoint->ipa->pdev->dev;
1413        struct gsi *gsi = &endpoint->ipa->gsi;
1414        bool start_channel;
1415        int ret;
1416
1417        if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1418                return;
1419
1420        if (!endpoint->toward_ipa)
1421                (void)ipa_endpoint_program_suspend(endpoint, false);
1422
1423        /* IPA v3.5.1 doesn't use channel start for resume */
1424        start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1425        ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
1426        if (ret)
1427                dev_err(dev, "error %d resuming channel %u\n", ret,
1428                        endpoint->channel_id);
1429        else if (!endpoint->toward_ipa)
1430                ipa_endpoint_replenish_enable(endpoint);
1431}
1432
1433void ipa_endpoint_suspend(struct ipa *ipa)
1434{
1435        if (!ipa->setup_complete)
1436                return;
1437
1438        if (ipa->modem_netdev)
1439                ipa_modem_suspend(ipa->modem_netdev);
1440
1441        ipa_cmd_tag_process(ipa);
1442
1443        ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1444        ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1445}
1446
1447void ipa_endpoint_resume(struct ipa *ipa)
1448{
1449        if (!ipa->setup_complete)
1450                return;
1451
1452        ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1453        ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1454
1455        if (ipa->modem_netdev)
1456                ipa_modem_resume(ipa->modem_netdev);
1457}
1458
1459static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1460{
1461        struct gsi *gsi = &endpoint->ipa->gsi;
1462        u32 channel_id = endpoint->channel_id;
1463
1464        /* Only AP endpoints get set up */
1465        if (endpoint->ee_id != GSI_EE_AP)
1466                return;
1467
1468        endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
1469        if (!endpoint->toward_ipa) {
1470                /* RX transactions require a single TRE, so the maximum
1471                 * backlog is the same as the maximum outstanding TREs.
1472                 */
1473                endpoint->replenish_enabled = false;
1474                atomic_set(&endpoint->replenish_saved,
1475                           gsi_channel_tre_max(gsi, endpoint->channel_id));
1476                atomic_set(&endpoint->replenish_backlog, 0);
1477                INIT_DELAYED_WORK(&endpoint->replenish_work,
1478                                  ipa_endpoint_replenish_work);
1479        }
1480
1481        ipa_endpoint_program(endpoint);
1482
1483        endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1484}
1485
1486static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1487{
1488        endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1489
1490        if (!endpoint->toward_ipa)
1491                cancel_delayed_work_sync(&endpoint->replenish_work);
1492
1493        ipa_endpoint_reset(endpoint);
1494}
1495
1496void ipa_endpoint_setup(struct ipa *ipa)
1497{
1498        u32 initialized = ipa->initialized;
1499
1500        ipa->set_up = 0;
1501        while (initialized) {
1502                u32 endpoint_id = __ffs(initialized);
1503
1504                initialized ^= BIT(endpoint_id);
1505
1506                ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1507        }
1508}
1509
1510void ipa_endpoint_teardown(struct ipa *ipa)
1511{
1512        u32 set_up = ipa->set_up;
1513
1514        while (set_up) {
1515                u32 endpoint_id = __fls(set_up);
1516
1517                set_up ^= BIT(endpoint_id);
1518
1519                ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1520        }
1521        ipa->set_up = 0;
1522}
1523
1524int ipa_endpoint_config(struct ipa *ipa)
1525{
1526        struct device *dev = &ipa->pdev->dev;
1527        u32 initialized;
1528        u32 rx_base;
1529        u32 rx_mask;
1530        u32 tx_mask;
1531        int ret = 0;
1532        u32 max;
1533        u32 val;
1534
1535        /* Find out about the endpoints supplied by the hardware, and ensure
1536         * the highest one doesn't exceed the number we support.
1537         */
1538        val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
1539
1540        /* Our RX is an IPA producer */
1541        rx_base = u32_get_bits(val, BAM_PROD_LOWEST_FMASK);
1542        max = rx_base + u32_get_bits(val, BAM_MAX_PROD_PIPES_FMASK);
1543        if (max > IPA_ENDPOINT_MAX) {
1544                dev_err(dev, "too many endpoints (%u > %u)\n",
1545                        max, IPA_ENDPOINT_MAX);
1546                return -EINVAL;
1547        }
1548        rx_mask = GENMASK(max - 1, rx_base);
1549
1550        /* Our TX is an IPA consumer */
1551        max = u32_get_bits(val, BAM_MAX_CONS_PIPES_FMASK);
1552        tx_mask = GENMASK(max - 1, 0);
1553
1554        ipa->available = rx_mask | tx_mask;
1555
1556        /* Check for initialized endpoints not supported by the hardware */
1557        if (ipa->initialized & ~ipa->available) {
1558                dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1559                        ipa->initialized & ~ipa->available);
1560                ret = -EINVAL;          /* Report other errors too */
1561        }
1562
1563        initialized = ipa->initialized;
1564        while (initialized) {
1565                u32 endpoint_id = __ffs(initialized);
1566                struct ipa_endpoint *endpoint;
1567
1568                initialized ^= BIT(endpoint_id);
1569
1570                /* Make sure it's pointing in the right direction */
1571                endpoint = &ipa->endpoint[endpoint_id];
1572                if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) {
1573                        dev_err(dev, "endpoint id %u wrong direction\n",
1574                                endpoint_id);
1575                        ret = -EINVAL;
1576                }
1577        }
1578
1579        return ret;
1580}
1581
1582void ipa_endpoint_deconfig(struct ipa *ipa)
1583{
1584        ipa->available = 0;     /* Nothing more to do */
1585}
1586
1587static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1588                                  const struct ipa_gsi_endpoint_data *data)
1589{
1590        struct ipa_endpoint *endpoint;
1591
1592        endpoint = &ipa->endpoint[data->endpoint_id];
1593
1594        if (data->ee_id == GSI_EE_AP)
1595                ipa->channel_map[data->channel_id] = endpoint;
1596        ipa->name_map[name] = endpoint;
1597
1598        endpoint->ipa = ipa;
1599        endpoint->ee_id = data->ee_id;
1600        endpoint->seq_type = data->endpoint.seq_type;
1601        endpoint->channel_id = data->channel_id;
1602        endpoint->endpoint_id = data->endpoint_id;
1603        endpoint->toward_ipa = data->toward_ipa;
1604        endpoint->data = &data->endpoint.config;
1605
1606        ipa->initialized |= BIT(endpoint->endpoint_id);
1607}
1608
1609void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1610{
1611        endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1612
1613        memset(endpoint, 0, sizeof(*endpoint));
1614}
1615
1616void ipa_endpoint_exit(struct ipa *ipa)
1617{
1618        u32 initialized = ipa->initialized;
1619
1620        while (initialized) {
1621                u32 endpoint_id = __fls(initialized);
1622
1623                initialized ^= BIT(endpoint_id);
1624
1625                ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1626        }
1627        memset(ipa->name_map, 0, sizeof(ipa->name_map));
1628        memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1629}
1630
1631/* Returns a bitmask of endpoints that support filtering, or 0 on error */
1632u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1633                      const struct ipa_gsi_endpoint_data *data)
1634{
1635        enum ipa_endpoint_name name;
1636        u32 filter_map;
1637
1638        if (!ipa_endpoint_data_valid(ipa, count, data))
1639                return 0;       /* Error */
1640
1641        ipa->initialized = 0;
1642
1643        filter_map = 0;
1644        for (name = 0; name < count; name++, data++) {
1645                if (ipa_gsi_endpoint_data_empty(data))
1646                        continue;       /* Skip over empty slots */
1647
1648                ipa_endpoint_init_one(ipa, name, data);
1649
1650                if (data->endpoint.filter_support)
1651                        filter_map |= BIT(data->endpoint_id);
1652        }
1653
1654        if (!ipa_filter_map_valid(ipa, filter_map))
1655                goto err_endpoint_exit;
1656
1657        return filter_map;      /* Non-zero bitmask */
1658
1659err_endpoint_exit:
1660        ipa_endpoint_exit(ipa);
1661
1662        return 0;       /* Error */
1663}
1664