linux/drivers/net/ipa/ipa_cmd.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
   4 * Copyright (C) 2019-2021 Linaro Ltd.
   5 */
   6
   7#include <linux/types.h>
   8#include <linux/device.h>
   9#include <linux/slab.h>
  10#include <linux/bitfield.h>
  11#include <linux/dma-direction.h>
  12
  13#include "gsi.h"
  14#include "gsi_trans.h"
  15#include "ipa.h"
  16#include "ipa_endpoint.h"
  17#include "ipa_table.h"
  18#include "ipa_cmd.h"
  19#include "ipa_mem.h"
  20
  21/**
  22 * DOC:  IPA Immediate Commands
  23 *
  24 * The AP command TX endpoint is used to issue immediate commands to the IPA.
  25 * An immediate command is generally used to request the IPA do something
  26 * other than data transfer to another endpoint.
  27 *
  28 * Immediate commands are represented by GSI transactions just like other
  29 * transfer requests, represented by a single GSI TRE.  Each immediate
  30 * command has a well-defined format, having a payload of a known length.
  31 * This allows the transfer element's length field to be used to hold an
  32 * immediate command's opcode.  The payload for a command resides in DRAM
  33 * and is described by a single scatterlist entry in its transaction.
  34 * Commands do not require a transaction completion callback.  To commit
  35 * an immediate command transaction, either gsi_trans_commit_wait() or
  36 * gsi_trans_commit_wait_timeout() is used.
  37 */
  38
  39/* Some commands can wait until indicated pipeline stages are clear */
  40enum pipeline_clear_options {
  41        pipeline_clear_hps              = 0x0,
  42        pipeline_clear_src_grp          = 0x1,
  43        pipeline_clear_full             = 0x2,
  44};
  45
  46/* IPA_CMD_IP_V{4,6}_{FILTER,ROUTING}_INIT */
  47
  48struct ipa_cmd_hw_ip_fltrt_init {
  49        __le64 hash_rules_addr;
  50        __le64 flags;
  51        __le64 nhash_rules_addr;
  52};
  53
  54/* Field masks for ipa_cmd_hw_ip_fltrt_init structure fields */
  55#define IP_FLTRT_FLAGS_HASH_SIZE_FMASK                  GENMASK_ULL(11, 0)
  56#define IP_FLTRT_FLAGS_HASH_ADDR_FMASK                  GENMASK_ULL(27, 12)
  57#define IP_FLTRT_FLAGS_NHASH_SIZE_FMASK                 GENMASK_ULL(39, 28)
  58#define IP_FLTRT_FLAGS_NHASH_ADDR_FMASK                 GENMASK_ULL(55, 40)
  59
  60/* IPA_CMD_HDR_INIT_LOCAL */
  61
  62struct ipa_cmd_hw_hdr_init_local {
  63        __le64 hdr_table_addr;
  64        __le32 flags;
  65        __le32 reserved;
  66};
  67
  68/* Field masks for ipa_cmd_hw_hdr_init_local structure fields */
  69#define HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK           GENMASK(11, 0)
  70#define HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK             GENMASK(27, 12)
  71
  72/* IPA_CMD_REGISTER_WRITE */
  73
  74/* For IPA v4.0+, the pipeline clear options are encoded in the opcode */
  75#define REGISTER_WRITE_OPCODE_SKIP_CLEAR_FMASK          GENMASK(8, 8)
  76#define REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK        GENMASK(10, 9)
  77
  78struct ipa_cmd_register_write {
  79        __le16 flags;           /* Unused/reserved prior to IPA v4.0 */
  80        __le16 offset;
  81        __le32 value;
  82        __le32 value_mask;
  83        __le32 clear_options;   /* Unused/reserved for IPA v4.0+ */
  84};
  85
  86/* Field masks for ipa_cmd_register_write structure fields */
  87/* The next field is present for IPA v4.0+ */
  88#define REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK          GENMASK(14, 11)
  89/* The next field is not present for IPA v4.0+ */
  90#define REGISTER_WRITE_FLAGS_SKIP_CLEAR_FMASK           GENMASK(15, 15)
  91
  92/* The next field and its values are not present for IPA v4.0+ */
  93#define REGISTER_WRITE_CLEAR_OPTIONS_FMASK              GENMASK(1, 0)
  94
  95/* IPA_CMD_IP_PACKET_INIT */
  96
  97struct ipa_cmd_ip_packet_init {
  98        u8 dest_endpoint;
  99        u8 reserved[7];
 100};
 101
 102/* Field masks for ipa_cmd_ip_packet_init dest_endpoint field */
 103#define IPA_PACKET_INIT_DEST_ENDPOINT_FMASK             GENMASK(4, 0)
 104
 105/* IPA_CMD_DMA_SHARED_MEM */
 106
 107/* For IPA v4.0+, this opcode gets modified with pipeline clear options */
 108
 109#define DMA_SHARED_MEM_OPCODE_SKIP_CLEAR_FMASK          GENMASK(8, 8)
 110#define DMA_SHARED_MEM_OPCODE_CLEAR_OPTION_FMASK        GENMASK(10, 9)
 111
 112struct ipa_cmd_hw_dma_mem_mem {
 113        __le16 clear_after_read; /* 0 or DMA_SHARED_MEM_CLEAR_AFTER_READ */
 114        __le16 size;
 115        __le16 local_addr;
 116        __le16 flags;
 117        __le64 system_addr;
 118};
 119
 120/* Flag allowing atomic clear of target region after reading data (v4.0+)*/
 121#define DMA_SHARED_MEM_CLEAR_AFTER_READ                 GENMASK(15, 15)
 122
 123/* Field masks for ipa_cmd_hw_dma_mem_mem structure fields */
 124#define DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK            GENMASK(0, 0)
 125/* The next two fields are not present for IPA v4.0+ */
 126#define DMA_SHARED_MEM_FLAGS_SKIP_CLEAR_FMASK           GENMASK(1, 1)
 127#define DMA_SHARED_MEM_FLAGS_CLEAR_OPTIONS_FMASK        GENMASK(3, 2)
 128
 129/* IPA_CMD_IP_PACKET_TAG_STATUS */
 130
 131struct ipa_cmd_ip_packet_tag_status {
 132        __le64 tag;
 133};
 134
 135#define IP_PACKET_TAG_STATUS_TAG_FMASK                  GENMASK_ULL(63, 16)
 136
 137/* Immediate command payload */
 138union ipa_cmd_payload {
 139        struct ipa_cmd_hw_ip_fltrt_init table_init;
 140        struct ipa_cmd_hw_hdr_init_local hdr_init_local;
 141        struct ipa_cmd_register_write register_write;
 142        struct ipa_cmd_ip_packet_init ip_packet_init;
 143        struct ipa_cmd_hw_dma_mem_mem dma_shared_mem;
 144        struct ipa_cmd_ip_packet_tag_status ip_packet_tag_status;
 145};
 146
 147static void ipa_cmd_validate_build(void)
 148{
 149        /* The sizes of a filter and route tables need to fit into fields
 150         * in the ipa_cmd_hw_ip_fltrt_init structure.  Although hashed tables
 151         * might not be used, non-hashed and hashed tables have the same
 152         * maximum size.  IPv4 and IPv6 filter tables have the same number
 153         * of entries, as and IPv4 and IPv6 route tables have the same number
 154         * of entries.
 155         */
 156#define TABLE_SIZE      (TABLE_COUNT_MAX * sizeof(__le64))
 157#define TABLE_COUNT_MAX max_t(u32, IPA_ROUTE_COUNT_MAX, IPA_FILTER_COUNT_MAX)
 158        BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK));
 159        BUILD_BUG_ON(TABLE_SIZE > field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
 160#undef TABLE_COUNT_MAX
 161#undef TABLE_SIZE
 162
 163        /* Hashed and non-hashed fields are assumed to be the same size */
 164        BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_SIZE_FMASK) !=
 165                     field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK));
 166        BUILD_BUG_ON(field_max(IP_FLTRT_FLAGS_HASH_ADDR_FMASK) !=
 167                     field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK));
 168
 169        /* Valid endpoint numbers must fit in the IP packet init command */
 170        BUILD_BUG_ON(field_max(IPA_PACKET_INIT_DEST_ENDPOINT_FMASK) <
 171                     IPA_ENDPOINT_MAX - 1);
 172}
 173
 174/* Validate a memory region holding a table */
 175bool ipa_cmd_table_valid(struct ipa *ipa, const struct ipa_mem *mem, bool route)
 176{
 177        u32 offset_max = field_max(IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
 178        u32 size_max = field_max(IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
 179        const char *table = route ? "route" : "filter";
 180        struct device *dev = &ipa->pdev->dev;
 181
 182        /* Size must fit in the immediate command field that holds it */
 183        if (mem->size > size_max) {
 184                dev_err(dev, "%s table region size too large\n", table);
 185                dev_err(dev, "    (0x%04x > 0x%04x)\n",
 186                        mem->size, size_max);
 187
 188                return false;
 189        }
 190
 191        /* Offset must fit in the immediate command field that holds it */
 192        if (mem->offset > offset_max ||
 193            ipa->mem_offset > offset_max - mem->offset) {
 194                dev_err(dev, "%s table region offset too large\n", table);
 195                dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
 196                        ipa->mem_offset, mem->offset, offset_max);
 197
 198                return false;
 199        }
 200
 201        /* Entire memory range must fit within IPA-local memory */
 202        if (mem->offset > ipa->mem_size ||
 203            mem->size > ipa->mem_size - mem->offset) {
 204                dev_err(dev, "%s table region out of range\n", table);
 205                dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
 206                        mem->offset, mem->size, ipa->mem_size);
 207
 208                return false;
 209        }
 210
 211        return true;
 212}
 213
 214/* Validate the memory region that holds headers */
 215static bool ipa_cmd_header_valid(struct ipa *ipa)
 216{
 217        struct device *dev = &ipa->pdev->dev;
 218        const struct ipa_mem *mem;
 219        u32 offset_max;
 220        u32 size_max;
 221        u32 offset;
 222        u32 size;
 223
 224        /* In ipa_cmd_hdr_init_local_add() we record the offset and size of
 225         * the header table memory area in an immediate command.  Make sure
 226         * the offset and size fit in the fields that need to hold them, and
 227         * that the entire range is within the overall IPA memory range.
 228         */
 229        offset_max = field_max(HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
 230        size_max = field_max(HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
 231
 232        /* The header memory area contains both the modem and AP header
 233         * regions.  The modem portion defines the address of the region.
 234         */
 235        mem = ipa_mem_find(ipa, IPA_MEM_MODEM_HEADER);
 236        offset = mem->offset;
 237        size = mem->size;
 238
 239        /* Make sure the offset fits in the IPA command */
 240        if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
 241                dev_err(dev, "header table region offset too large\n");
 242                dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
 243                        ipa->mem_offset, offset, offset_max);
 244
 245                return false;
 246        }
 247
 248        /* Add the size of the AP portion (if defined) to the combined size */
 249        mem = ipa_mem_find(ipa, IPA_MEM_AP_HEADER);
 250        if (mem)
 251                size += mem->size;
 252
 253        /* Make sure the combined size fits in the IPA command */
 254        if (size > size_max) {
 255                dev_err(dev, "header table region size too large\n");
 256                dev_err(dev, "    (0x%04x > 0x%08x)\n", size, size_max);
 257
 258                return false;
 259        }
 260
 261        /* Make sure the entire combined area fits in IPA memory */
 262        if (size > ipa->mem_size || offset > ipa->mem_size - size) {
 263                dev_err(dev, "header table region out of range\n");
 264                dev_err(dev, "    (0x%04x + 0x%04x > 0x%04x)\n",
 265                        offset, size, ipa->mem_size);
 266
 267                return false;
 268        }
 269
 270        return true;
 271}
 272
 273/* Indicate whether an offset can be used with a register_write command */
 274static bool ipa_cmd_register_write_offset_valid(struct ipa *ipa,
 275                                                const char *name, u32 offset)
 276{
 277        struct ipa_cmd_register_write *payload;
 278        struct device *dev = &ipa->pdev->dev;
 279        u32 offset_max;
 280        u32 bit_count;
 281
 282        /* The maximum offset in a register_write immediate command depends
 283         * on the version of IPA.  A 16 bit offset is always supported,
 284         * but starting with IPA v4.0 some additional high-order bits are
 285         * allowed.
 286         */
 287        bit_count = BITS_PER_BYTE * sizeof(payload->offset);
 288        if (ipa->version >= IPA_VERSION_4_0)
 289                bit_count += hweight32(REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
 290        BUILD_BUG_ON(bit_count > 32);
 291        offset_max = ~0U >> (32 - bit_count);
 292
 293        /* Make sure the offset can be represented by the field(s)
 294         * that holds it.  Also make sure the offset is not outside
 295         * the overall IPA memory range.
 296         */
 297        if (offset > offset_max || ipa->mem_offset > offset_max - offset) {
 298                dev_err(dev, "%s offset too large 0x%04x + 0x%04x > 0x%04x)\n",
 299                        name, ipa->mem_offset, offset, offset_max);
 300                return false;
 301        }
 302
 303        return true;
 304}
 305
 306/* Check whether offsets passed to register_write are valid */
 307static bool ipa_cmd_register_write_valid(struct ipa *ipa)
 308{
 309        const char *name;
 310        u32 offset;
 311
 312        /* If hashed tables are supported, ensure the hash flush register
 313         * offset will fit in a register write IPA immediate command.
 314         */
 315        if (ipa_table_hash_support(ipa)) {
 316                offset = ipa_reg_filt_rout_hash_flush_offset(ipa->version);
 317                name = "filter/route hash flush";
 318                if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
 319                        return false;
 320        }
 321
 322        /* Each endpoint can have a status endpoint associated with it,
 323         * and this is recorded in an endpoint register.  If the modem
 324         * crashes, we reset the status endpoint for all modem endpoints
 325         * using a register write IPA immediate command.  Make sure the
 326         * worst case (highest endpoint number) offset of that endpoint
 327         * fits in the register write command field(s) that must hold it.
 328         */
 329        offset = IPA_REG_ENDP_STATUS_N_OFFSET(IPA_ENDPOINT_COUNT - 1);
 330        name = "maximal endpoint status";
 331        if (!ipa_cmd_register_write_offset_valid(ipa, name, offset))
 332                return false;
 333
 334        return true;
 335}
 336
 337bool ipa_cmd_data_valid(struct ipa *ipa)
 338{
 339        if (!ipa_cmd_header_valid(ipa))
 340                return false;
 341
 342        if (!ipa_cmd_register_write_valid(ipa))
 343                return false;
 344
 345        return true;
 346}
 347
 348
 349int ipa_cmd_pool_init(struct gsi_channel *channel, u32 tre_max)
 350{
 351        struct gsi_trans_info *trans_info = &channel->trans_info;
 352        struct device *dev = channel->gsi->dev;
 353        int ret;
 354
 355        /* This is as good a place as any to validate build constants */
 356        ipa_cmd_validate_build();
 357
 358        /* Even though command payloads are allocated one at a time,
 359         * a single transaction can require up to tlv_count of them,
 360         * so we treat them as if that many can be allocated at once.
 361         */
 362        ret = gsi_trans_pool_init_dma(dev, &trans_info->cmd_pool,
 363                                      sizeof(union ipa_cmd_payload),
 364                                      tre_max, channel->tlv_count);
 365        if (ret)
 366                return ret;
 367
 368        /* Each TRE needs a command info structure */
 369        ret = gsi_trans_pool_init(&trans_info->info_pool,
 370                                   sizeof(struct ipa_cmd_info),
 371                                   tre_max, channel->tlv_count);
 372        if (ret)
 373                gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
 374
 375        return ret;
 376}
 377
 378void ipa_cmd_pool_exit(struct gsi_channel *channel)
 379{
 380        struct gsi_trans_info *trans_info = &channel->trans_info;
 381        struct device *dev = channel->gsi->dev;
 382
 383        gsi_trans_pool_exit(&trans_info->info_pool);
 384        gsi_trans_pool_exit_dma(dev, &trans_info->cmd_pool);
 385}
 386
 387static union ipa_cmd_payload *
 388ipa_cmd_payload_alloc(struct ipa *ipa, dma_addr_t *addr)
 389{
 390        struct gsi_trans_info *trans_info;
 391        struct ipa_endpoint *endpoint;
 392
 393        endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
 394        trans_info = &ipa->gsi.channel[endpoint->channel_id].trans_info;
 395
 396        return gsi_trans_pool_alloc_dma(&trans_info->cmd_pool, addr);
 397}
 398
 399/* If hash_size is 0, hash_offset and hash_addr ignored. */
 400void ipa_cmd_table_init_add(struct gsi_trans *trans,
 401                            enum ipa_cmd_opcode opcode, u16 size, u32 offset,
 402                            dma_addr_t addr, u16 hash_size, u32 hash_offset,
 403                            dma_addr_t hash_addr)
 404{
 405        struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
 406        enum dma_data_direction direction = DMA_TO_DEVICE;
 407        struct ipa_cmd_hw_ip_fltrt_init *payload;
 408        union ipa_cmd_payload *cmd_payload;
 409        dma_addr_t payload_addr;
 410        u64 val;
 411
 412        /* Record the non-hash table offset and size */
 413        offset += ipa->mem_offset;
 414        val = u64_encode_bits(offset, IP_FLTRT_FLAGS_NHASH_ADDR_FMASK);
 415        val |= u64_encode_bits(size, IP_FLTRT_FLAGS_NHASH_SIZE_FMASK);
 416
 417        /* The hash table offset and address are zero if its size is 0 */
 418        if (hash_size) {
 419                /* Record the hash table offset and size */
 420                hash_offset += ipa->mem_offset;
 421                val |= u64_encode_bits(hash_offset,
 422                                       IP_FLTRT_FLAGS_HASH_ADDR_FMASK);
 423                val |= u64_encode_bits(hash_size,
 424                                       IP_FLTRT_FLAGS_HASH_SIZE_FMASK);
 425        }
 426
 427        cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
 428        payload = &cmd_payload->table_init;
 429
 430        /* Fill in all offsets and sizes and the non-hash table address */
 431        if (hash_size)
 432                payload->hash_rules_addr = cpu_to_le64(hash_addr);
 433        payload->flags = cpu_to_le64(val);
 434        payload->nhash_rules_addr = cpu_to_le64(addr);
 435
 436        gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
 437                          direction, opcode);
 438}
 439
 440/* Initialize header space in IPA-local memory */
 441void ipa_cmd_hdr_init_local_add(struct gsi_trans *trans, u32 offset, u16 size,
 442                                dma_addr_t addr)
 443{
 444        struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
 445        enum ipa_cmd_opcode opcode = IPA_CMD_HDR_INIT_LOCAL;
 446        enum dma_data_direction direction = DMA_TO_DEVICE;
 447        struct ipa_cmd_hw_hdr_init_local *payload;
 448        union ipa_cmd_payload *cmd_payload;
 449        dma_addr_t payload_addr;
 450        u32 flags;
 451
 452        offset += ipa->mem_offset;
 453
 454        /* With this command we tell the IPA where in its local memory the
 455         * header tables reside.  The content of the buffer provided is
 456         * also written via DMA into that space.  The IPA hardware owns
 457         * the table, but the AP must initialize it.
 458         */
 459        cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
 460        payload = &cmd_payload->hdr_init_local;
 461
 462        payload->hdr_table_addr = cpu_to_le64(addr);
 463        flags = u32_encode_bits(size, HDR_INIT_LOCAL_FLAGS_TABLE_SIZE_FMASK);
 464        flags |= u32_encode_bits(offset, HDR_INIT_LOCAL_FLAGS_HDR_ADDR_FMASK);
 465        payload->flags = cpu_to_le32(flags);
 466
 467        gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
 468                          direction, opcode);
 469}
 470
 471void ipa_cmd_register_write_add(struct gsi_trans *trans, u32 offset, u32 value,
 472                                u32 mask, bool clear_full)
 473{
 474        struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
 475        struct ipa_cmd_register_write *payload;
 476        union ipa_cmd_payload *cmd_payload;
 477        u32 opcode = IPA_CMD_REGISTER_WRITE;
 478        dma_addr_t payload_addr;
 479        u32 clear_option;
 480        u32 options;
 481        u16 flags;
 482
 483        /* pipeline_clear_src_grp is not used */
 484        clear_option = clear_full ? pipeline_clear_full : pipeline_clear_hps;
 485
 486        /* IPA v4.0+ represents the pipeline clear options in the opcode.  It
 487         * also supports a larger offset by encoding additional high-order
 488         * bits in the payload flags field.
 489         */
 490        if (ipa->version >= IPA_VERSION_4_0) {
 491                u16 offset_high;
 492                u32 val;
 493
 494                /* Opcode encodes pipeline clear options */
 495                /* SKIP_CLEAR is always 0 (don't skip pipeline clear) */
 496                val = u16_encode_bits(clear_option,
 497                                      REGISTER_WRITE_OPCODE_CLEAR_OPTION_FMASK);
 498                opcode |= val;
 499
 500                /* Extract the high 4 bits from the offset */
 501                offset_high = (u16)u32_get_bits(offset, GENMASK(19, 16));
 502                offset &= (1 << 16) - 1;
 503
 504                /* Extract the top 4 bits and encode it into the flags field */
 505                flags = u16_encode_bits(offset_high,
 506                                REGISTER_WRITE_FLAGS_OFFSET_HIGH_FMASK);
 507                options = 0;    /* reserved */
 508
 509        } else {
 510                flags = 0;      /* SKIP_CLEAR flag is always 0 */
 511                options = u16_encode_bits(clear_option,
 512                                          REGISTER_WRITE_CLEAR_OPTIONS_FMASK);
 513        }
 514
 515        cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
 516        payload = &cmd_payload->register_write;
 517
 518        payload->flags = cpu_to_le16(flags);
 519        payload->offset = cpu_to_le16((u16)offset);
 520        payload->value = cpu_to_le32(value);
 521        payload->value_mask = cpu_to_le32(mask);
 522        payload->clear_options = cpu_to_le32(options);
 523
 524        gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
 525                          DMA_NONE, opcode);
 526}
 527
 528/* Skip IP packet processing on the next data transfer on a TX channel */
 529static void ipa_cmd_ip_packet_init_add(struct gsi_trans *trans, u8 endpoint_id)
 530{
 531        struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
 532        enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_INIT;
 533        enum dma_data_direction direction = DMA_TO_DEVICE;
 534        struct ipa_cmd_ip_packet_init *payload;
 535        union ipa_cmd_payload *cmd_payload;
 536        dma_addr_t payload_addr;
 537
 538        cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
 539        payload = &cmd_payload->ip_packet_init;
 540
 541        payload->dest_endpoint = u8_encode_bits(endpoint_id,
 542                                        IPA_PACKET_INIT_DEST_ENDPOINT_FMASK);
 543
 544        gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
 545                          direction, opcode);
 546}
 547
 548/* Use a DMA command to read or write a block of IPA-resident memory */
 549void ipa_cmd_dma_shared_mem_add(struct gsi_trans *trans, u32 offset, u16 size,
 550                                dma_addr_t addr, bool toward_ipa)
 551{
 552        struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
 553        enum ipa_cmd_opcode opcode = IPA_CMD_DMA_SHARED_MEM;
 554        struct ipa_cmd_hw_dma_mem_mem *payload;
 555        union ipa_cmd_payload *cmd_payload;
 556        enum dma_data_direction direction;
 557        dma_addr_t payload_addr;
 558        u16 flags;
 559
 560        /* size and offset must fit in 16 bit fields */
 561        WARN_ON(!size);
 562        WARN_ON(size > U16_MAX);
 563        WARN_ON(offset > U16_MAX || ipa->mem_offset > U16_MAX - offset);
 564
 565        offset += ipa->mem_offset;
 566
 567        cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
 568        payload = &cmd_payload->dma_shared_mem;
 569
 570        /* payload->clear_after_read was reserved prior to IPA v4.0.  It's
 571         * never needed for current code, so it's 0 regardless of version.
 572         */
 573        payload->size = cpu_to_le16(size);
 574        payload->local_addr = cpu_to_le16(offset);
 575        /* payload->flags:
 576         *   direction:         0 = write to IPA, 1 read from IPA
 577         * Starting at v4.0 these are reserved; either way, all zero:
 578         *   pipeline clear:    0 = wait for pipeline clear (don't skip)
 579         *   clear_options:     0 = pipeline_clear_hps
 580         * Instead, for v4.0+ these are encoded in the opcode.  But again
 581         * since both values are 0 we won't bother OR'ing them in.
 582         */
 583        flags = toward_ipa ? 0 : DMA_SHARED_MEM_FLAGS_DIRECTION_FMASK;
 584        payload->flags = cpu_to_le16(flags);
 585        payload->system_addr = cpu_to_le64(addr);
 586
 587        direction = toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
 588
 589        gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
 590                          direction, opcode);
 591}
 592
 593static void ipa_cmd_ip_tag_status_add(struct gsi_trans *trans)
 594{
 595        struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
 596        enum ipa_cmd_opcode opcode = IPA_CMD_IP_PACKET_TAG_STATUS;
 597        enum dma_data_direction direction = DMA_TO_DEVICE;
 598        struct ipa_cmd_ip_packet_tag_status *payload;
 599        union ipa_cmd_payload *cmd_payload;
 600        dma_addr_t payload_addr;
 601
 602        cmd_payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
 603        payload = &cmd_payload->ip_packet_tag_status;
 604
 605        payload->tag = le64_encode_bits(0, IP_PACKET_TAG_STATUS_TAG_FMASK);
 606
 607        gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
 608                          direction, opcode);
 609}
 610
 611/* Issue a small command TX data transfer */
 612static void ipa_cmd_transfer_add(struct gsi_trans *trans)
 613{
 614        struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
 615        enum dma_data_direction direction = DMA_TO_DEVICE;
 616        enum ipa_cmd_opcode opcode = IPA_CMD_NONE;
 617        union ipa_cmd_payload *payload;
 618        dma_addr_t payload_addr;
 619
 620        /* Just transfer a zero-filled payload structure */
 621        payload = ipa_cmd_payload_alloc(ipa, &payload_addr);
 622
 623        gsi_trans_cmd_add(trans, payload, sizeof(*payload), payload_addr,
 624                          direction, opcode);
 625}
 626
 627/* Add immediate commands to a transaction to clear the hardware pipeline */
 628void ipa_cmd_pipeline_clear_add(struct gsi_trans *trans)
 629{
 630        struct ipa *ipa = container_of(trans->gsi, struct ipa, gsi);
 631        struct ipa_endpoint *endpoint;
 632
 633        /* This will complete when the transfer is received */
 634        reinit_completion(&ipa->completion);
 635
 636        /* Issue a no-op register write command (mask 0 means no write) */
 637        ipa_cmd_register_write_add(trans, 0, 0, 0, true);
 638
 639        /* Send a data packet through the IPA pipeline.  The packet_init
 640         * command says to send the next packet directly to the exception
 641         * endpoint without any other IPA processing.  The tag_status
 642         * command requests that status be generated on completion of
 643         * that transfer, and that it will be tagged with a value.
 644         * Finally, the transfer command sends a small packet of data
 645         * (instead of a command) using the command endpoint.
 646         */
 647        endpoint = ipa->name_map[IPA_ENDPOINT_AP_LAN_RX];
 648        ipa_cmd_ip_packet_init_add(trans, endpoint->endpoint_id);
 649        ipa_cmd_ip_tag_status_add(trans);
 650        ipa_cmd_transfer_add(trans);
 651}
 652
 653/* Returns the number of commands required to clear the pipeline */
 654u32 ipa_cmd_pipeline_clear_count(void)
 655{
 656        return 4;
 657}
 658
 659void ipa_cmd_pipeline_clear_wait(struct ipa *ipa)
 660{
 661        wait_for_completion(&ipa->completion);
 662}
 663
 664void ipa_cmd_pipeline_clear(struct ipa *ipa)
 665{
 666        u32 count = ipa_cmd_pipeline_clear_count();
 667        struct gsi_trans *trans;
 668
 669        trans = ipa_cmd_trans_alloc(ipa, count);
 670        if (trans) {
 671                ipa_cmd_pipeline_clear_add(trans);
 672                gsi_trans_commit_wait(trans);
 673                ipa_cmd_pipeline_clear_wait(ipa);
 674        } else {
 675                dev_err(&ipa->pdev->dev,
 676                        "error allocating %u entry tag transaction\n", count);
 677        }
 678}
 679
 680static struct ipa_cmd_info *
 681ipa_cmd_info_alloc(struct ipa_endpoint *endpoint, u32 tre_count)
 682{
 683        struct gsi_channel *channel;
 684
 685        channel = &endpoint->ipa->gsi.channel[endpoint->channel_id];
 686
 687        return gsi_trans_pool_alloc(&channel->trans_info.info_pool, tre_count);
 688}
 689
 690/* Allocate a transaction for the command TX endpoint */
 691struct gsi_trans *ipa_cmd_trans_alloc(struct ipa *ipa, u32 tre_count)
 692{
 693        struct ipa_endpoint *endpoint;
 694        struct gsi_trans *trans;
 695
 696        endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
 697
 698        trans = gsi_channel_trans_alloc(&ipa->gsi, endpoint->channel_id,
 699                                        tre_count, DMA_NONE);
 700        if (trans)
 701                trans->info = ipa_cmd_info_alloc(endpoint, tre_count);
 702
 703        return trans;
 704}
 705