linux/drivers/thunderbolt/dma_port.c
<<
>>
Prefs
   1/*
   2 * Thunderbolt DMA configuration based mailbox support
   3 *
   4 * Copyright (C) 2017, Intel Corporation
   5 * Authors: Michael Jamet <michael.jamet@intel.com>
   6 *          Mika Westerberg <mika.westerberg@linux.intel.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/delay.h>
  14#include <linux/slab.h>
  15
  16#include "dma_port.h"
  17#include "tb_regs.h"
  18
  19#define DMA_PORT_CAP                    0x3e
  20
  21#define MAIL_DATA                       1
  22#define MAIL_DATA_DWORDS                16
  23
  24#define MAIL_IN                         17
  25#define MAIL_IN_CMD_SHIFT               28
  26#define MAIL_IN_CMD_MASK                GENMASK(31, 28)
  27#define MAIL_IN_CMD_FLASH_WRITE         0x0
  28#define MAIL_IN_CMD_FLASH_UPDATE_AUTH   0x1
  29#define MAIL_IN_CMD_FLASH_READ          0x2
  30#define MAIL_IN_CMD_POWER_CYCLE         0x4
  31#define MAIL_IN_DWORDS_SHIFT            24
  32#define MAIL_IN_DWORDS_MASK             GENMASK(27, 24)
  33#define MAIL_IN_ADDRESS_SHIFT           2
  34#define MAIL_IN_ADDRESS_MASK            GENMASK(23, 2)
  35#define MAIL_IN_CSS                     BIT(1)
  36#define MAIL_IN_OP_REQUEST              BIT(0)
  37
  38#define MAIL_OUT                        18
  39#define MAIL_OUT_STATUS_RESPONSE        BIT(29)
  40#define MAIL_OUT_STATUS_CMD_SHIFT       4
  41#define MAIL_OUT_STATUS_CMD_MASK        GENMASK(7, 4)
  42#define MAIL_OUT_STATUS_MASK            GENMASK(3, 0)
  43#define MAIL_OUT_STATUS_COMPLETED       0
  44#define MAIL_OUT_STATUS_ERR_AUTH        1
  45#define MAIL_OUT_STATUS_ERR_ACCESS      2
  46
  47#define DMA_PORT_TIMEOUT                5000 /* ms */
  48#define DMA_PORT_RETRIES                3
  49
  50/**
  51 * struct tb_dma_port - DMA control port
  52 * @sw: Switch the DMA port belongs to
  53 * @port: Switch port number where DMA capability is found
  54 * @base: Start offset of the mailbox registers
  55 * @buf: Temporary buffer to store a single block
  56 */
  57struct tb_dma_port {
  58        struct tb_switch *sw;
  59        u8 port;
  60        u32 base;
  61        u8 *buf;
  62};
  63
  64/*
  65 * When the switch is in safe mode it supports very little functionality
  66 * so we don't validate that much here.
  67 */
  68static bool dma_port_match(const struct tb_cfg_request *req,
  69                           const struct ctl_pkg *pkg)
  70{
  71        u64 route = tb_cfg_get_route(pkg->buffer) & ~BIT_ULL(63);
  72
  73        if (pkg->frame.eof == TB_CFG_PKG_ERROR)
  74                return true;
  75        if (pkg->frame.eof != req->response_type)
  76                return false;
  77        if (route != tb_cfg_get_route(req->request))
  78                return false;
  79        if (pkg->frame.size != req->response_size)
  80                return false;
  81
  82        return true;
  83}
  84
  85static bool dma_port_copy(struct tb_cfg_request *req, const struct ctl_pkg *pkg)
  86{
  87        memcpy(req->response, pkg->buffer, req->response_size);
  88        return true;
  89}
  90
  91static int dma_port_read(struct tb_ctl *ctl, void *buffer, u64 route,
  92                         u32 port, u32 offset, u32 length, int timeout_msec)
  93{
  94        struct cfg_read_pkg request = {
  95                .header = tb_cfg_make_header(route),
  96                .addr = {
  97                        .seq = 1,
  98                        .port = port,
  99                        .space = TB_CFG_PORT,
 100                        .offset = offset,
 101                        .length = length,
 102                },
 103        };
 104        struct tb_cfg_request *req;
 105        struct cfg_write_pkg reply;
 106        struct tb_cfg_result res;
 107
 108        req = tb_cfg_request_alloc();
 109        if (!req)
 110                return -ENOMEM;
 111
 112        req->match = dma_port_match;
 113        req->copy = dma_port_copy;
 114        req->request = &request;
 115        req->request_size = sizeof(request);
 116        req->request_type = TB_CFG_PKG_READ;
 117        req->response = &reply;
 118        req->response_size = 12 + 4 * length;
 119        req->response_type = TB_CFG_PKG_READ;
 120
 121        res = tb_cfg_request_sync(ctl, req, timeout_msec);
 122
 123        tb_cfg_request_put(req);
 124
 125        if (res.err)
 126                return res.err;
 127
 128        memcpy(buffer, &reply.data, 4 * length);
 129        return 0;
 130}
 131
 132static int dma_port_write(struct tb_ctl *ctl, const void *buffer, u64 route,
 133                          u32 port, u32 offset, u32 length, int timeout_msec)
 134{
 135        struct cfg_write_pkg request = {
 136                .header = tb_cfg_make_header(route),
 137                .addr = {
 138                        .seq = 1,
 139                        .port = port,
 140                        .space = TB_CFG_PORT,
 141                        .offset = offset,
 142                        .length = length,
 143                },
 144        };
 145        struct tb_cfg_request *req;
 146        struct cfg_read_pkg reply;
 147        struct tb_cfg_result res;
 148
 149        memcpy(&request.data, buffer, length * 4);
 150
 151        req = tb_cfg_request_alloc();
 152        if (!req)
 153                return -ENOMEM;
 154
 155        req->match = dma_port_match;
 156        req->copy = dma_port_copy;
 157        req->request = &request;
 158        req->request_size = 12 + 4 * length;
 159        req->request_type = TB_CFG_PKG_WRITE;
 160        req->response = &reply;
 161        req->response_size = sizeof(reply);
 162        req->response_type = TB_CFG_PKG_WRITE;
 163
 164        res = tb_cfg_request_sync(ctl, req, timeout_msec);
 165
 166        tb_cfg_request_put(req);
 167
 168        return res.err;
 169}
 170
 171static int dma_find_port(struct tb_switch *sw)
 172{
 173        int port, ret;
 174        u32 type;
 175
 176        /*
 177         * The DMA (NHI) port is either 3 or 5 depending on the
 178         * controller. Try both starting from 5 which is more common.
 179         */
 180        port = 5;
 181        ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), port, 2, 1,
 182                            DMA_PORT_TIMEOUT);
 183        if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
 184                return port;
 185
 186        port = 3;
 187        ret = dma_port_read(sw->tb->ctl, &type, tb_route(sw), port, 2, 1,
 188                            DMA_PORT_TIMEOUT);
 189        if (!ret && (type & 0xffffff) == TB_TYPE_NHI)
 190                return port;
 191
 192        return -ENODEV;
 193}
 194
 195/**
 196 * dma_port_alloc() - Finds DMA control port from a switch pointed by route
 197 * @sw: Switch from where find the DMA port
 198 *
 199 * Function checks if the switch NHI port supports DMA configuration
 200 * based mailbox capability and if it does, allocates and initializes
 201 * DMA port structure. Returns %NULL if the capabity was not found.
 202 *
 203 * The DMA control port is functional also when the switch is in safe
 204 * mode.
 205 */
 206struct tb_dma_port *dma_port_alloc(struct tb_switch *sw)
 207{
 208        struct tb_dma_port *dma;
 209        int port;
 210
 211        port = dma_find_port(sw);
 212        if (port < 0)
 213                return NULL;
 214
 215        dma = kzalloc(sizeof(*dma), GFP_KERNEL);
 216        if (!dma)
 217                return NULL;
 218
 219        dma->buf = kmalloc_array(MAIL_DATA_DWORDS, sizeof(u32), GFP_KERNEL);
 220        if (!dma->buf) {
 221                kfree(dma);
 222                return NULL;
 223        }
 224
 225        dma->sw = sw;
 226        dma->port = port;
 227        dma->base = DMA_PORT_CAP;
 228
 229        return dma;
 230}
 231
 232/**
 233 * dma_port_free() - Release DMA control port structure
 234 * @dma: DMA control port
 235 */
 236void dma_port_free(struct tb_dma_port *dma)
 237{
 238        if (dma) {
 239                kfree(dma->buf);
 240                kfree(dma);
 241        }
 242}
 243
 244static int dma_port_wait_for_completion(struct tb_dma_port *dma,
 245                                        unsigned int timeout)
 246{
 247        unsigned long end = jiffies + msecs_to_jiffies(timeout);
 248        struct tb_switch *sw = dma->sw;
 249
 250        do {
 251                int ret;
 252                u32 in;
 253
 254                ret = dma_port_read(sw->tb->ctl, &in, tb_route(sw), dma->port,
 255                                    dma->base + MAIL_IN, 1, 50);
 256                if (ret) {
 257                        if (ret != -ETIMEDOUT)
 258                                return ret;
 259                } else if (!(in & MAIL_IN_OP_REQUEST)) {
 260                        return 0;
 261                }
 262
 263                usleep_range(50, 100);
 264        } while (time_before(jiffies, end));
 265
 266        return -ETIMEDOUT;
 267}
 268
 269static int status_to_errno(u32 status)
 270{
 271        switch (status & MAIL_OUT_STATUS_MASK) {
 272        case MAIL_OUT_STATUS_COMPLETED:
 273                return 0;
 274        case MAIL_OUT_STATUS_ERR_AUTH:
 275                return -EINVAL;
 276        case MAIL_OUT_STATUS_ERR_ACCESS:
 277                return -EACCES;
 278        }
 279
 280        return -EIO;
 281}
 282
 283static int dma_port_request(struct tb_dma_port *dma, u32 in,
 284                            unsigned int timeout)
 285{
 286        struct tb_switch *sw = dma->sw;
 287        u32 out;
 288        int ret;
 289
 290        ret = dma_port_write(sw->tb->ctl, &in, tb_route(sw), dma->port,
 291                             dma->base + MAIL_IN, 1, DMA_PORT_TIMEOUT);
 292        if (ret)
 293                return ret;
 294
 295        ret = dma_port_wait_for_completion(dma, timeout);
 296        if (ret)
 297                return ret;
 298
 299        ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
 300                            dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
 301        if (ret)
 302                return ret;
 303
 304        return status_to_errno(out);
 305}
 306
 307static int dma_port_flash_read_block(struct tb_dma_port *dma, u32 address,
 308                                     void *buf, u32 size)
 309{
 310        struct tb_switch *sw = dma->sw;
 311        u32 in, dwaddress, dwords;
 312        int ret;
 313
 314        dwaddress = address / 4;
 315        dwords = size / 4;
 316
 317        in = MAIL_IN_CMD_FLASH_READ << MAIL_IN_CMD_SHIFT;
 318        if (dwords < MAIL_DATA_DWORDS)
 319                in |= (dwords << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
 320        in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
 321        in |= MAIL_IN_OP_REQUEST;
 322
 323        ret = dma_port_request(dma, in, DMA_PORT_TIMEOUT);
 324        if (ret)
 325                return ret;
 326
 327        return dma_port_read(sw->tb->ctl, buf, tb_route(sw), dma->port,
 328                             dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
 329}
 330
 331static int dma_port_flash_write_block(struct tb_dma_port *dma, u32 address,
 332                                      const void *buf, u32 size)
 333{
 334        struct tb_switch *sw = dma->sw;
 335        u32 in, dwaddress, dwords;
 336        int ret;
 337
 338        dwords = size / 4;
 339
 340        /* Write the block to MAIL_DATA registers */
 341        ret = dma_port_write(sw->tb->ctl, buf, tb_route(sw), dma->port,
 342                            dma->base + MAIL_DATA, dwords, DMA_PORT_TIMEOUT);
 343
 344        in = MAIL_IN_CMD_FLASH_WRITE << MAIL_IN_CMD_SHIFT;
 345
 346        /* CSS header write is always done to the same magic address */
 347        if (address >= DMA_PORT_CSS_ADDRESS) {
 348                dwaddress = DMA_PORT_CSS_ADDRESS;
 349                in |= MAIL_IN_CSS;
 350        } else {
 351                dwaddress = address / 4;
 352        }
 353
 354        in |= ((dwords - 1) << MAIL_IN_DWORDS_SHIFT) & MAIL_IN_DWORDS_MASK;
 355        in |= (dwaddress << MAIL_IN_ADDRESS_SHIFT) & MAIL_IN_ADDRESS_MASK;
 356        in |= MAIL_IN_OP_REQUEST;
 357
 358        return dma_port_request(dma, in, DMA_PORT_TIMEOUT);
 359}
 360
 361/**
 362 * dma_port_flash_read() - Read from active flash region
 363 * @dma: DMA control port
 364 * @address: Address relative to the start of active region
 365 * @buf: Buffer where the data is read
 366 * @size: Size of the buffer
 367 */
 368int dma_port_flash_read(struct tb_dma_port *dma, unsigned int address,
 369                        void *buf, size_t size)
 370{
 371        unsigned int retries = DMA_PORT_RETRIES;
 372        unsigned int offset;
 373
 374        offset = address & 3;
 375        address = address & ~3;
 376
 377        do {
 378                u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
 379                int ret;
 380
 381                ret = dma_port_flash_read_block(dma, address, dma->buf,
 382                                                ALIGN(nbytes, 4));
 383                if (ret) {
 384                        if (ret == -ETIMEDOUT) {
 385                                if (retries--)
 386                                        continue;
 387                                ret = -EIO;
 388                        }
 389                        return ret;
 390                }
 391
 392                memcpy(buf, dma->buf + offset, nbytes);
 393
 394                size -= nbytes;
 395                address += nbytes;
 396                buf += nbytes;
 397        } while (size > 0);
 398
 399        return 0;
 400}
 401
 402/**
 403 * dma_port_flash_write() - Write to non-active flash region
 404 * @dma: DMA control port
 405 * @address: Address relative to the start of non-active region
 406 * @buf: Data to write
 407 * @size: Size of the buffer
 408 *
 409 * Writes block of data to the non-active flash region of the switch. If
 410 * the address is given as %DMA_PORT_CSS_ADDRESS the block is written
 411 * using CSS command.
 412 */
 413int dma_port_flash_write(struct tb_dma_port *dma, unsigned int address,
 414                         const void *buf, size_t size)
 415{
 416        unsigned int retries = DMA_PORT_RETRIES;
 417        unsigned int offset;
 418
 419        if (address >= DMA_PORT_CSS_ADDRESS) {
 420                offset = 0;
 421                if (size > DMA_PORT_CSS_MAX_SIZE)
 422                        return -E2BIG;
 423        } else {
 424                offset = address & 3;
 425                address = address & ~3;
 426        }
 427
 428        do {
 429                u32 nbytes = min_t(u32, size, MAIL_DATA_DWORDS * 4);
 430                int ret;
 431
 432                memcpy(dma->buf + offset, buf, nbytes);
 433
 434                ret = dma_port_flash_write_block(dma, address, buf, nbytes);
 435                if (ret) {
 436                        if (ret == -ETIMEDOUT) {
 437                                if (retries--)
 438                                        continue;
 439                                ret = -EIO;
 440                        }
 441                        return ret;
 442                }
 443
 444                size -= nbytes;
 445                address += nbytes;
 446                buf += nbytes;
 447        } while (size > 0);
 448
 449        return 0;
 450}
 451
 452/**
 453 * dma_port_flash_update_auth() - Starts flash authenticate cycle
 454 * @dma: DMA control port
 455 *
 456 * Starts the flash update authentication cycle. If the image in the
 457 * non-active area was valid, the switch starts upgrade process where
 458 * active and non-active area get swapped in the end. Caller should call
 459 * dma_port_flash_update_auth_status() to get status of this command.
 460 * This is because if the switch in question is root switch the
 461 * thunderbolt host controller gets reset as well.
 462 */
 463int dma_port_flash_update_auth(struct tb_dma_port *dma)
 464{
 465        u32 in;
 466
 467        in = MAIL_IN_CMD_FLASH_UPDATE_AUTH << MAIL_IN_CMD_SHIFT;
 468        in |= MAIL_IN_OP_REQUEST;
 469
 470        return dma_port_request(dma, in, 150);
 471}
 472
 473/**
 474 * dma_port_flash_update_auth_status() - Reads status of update auth command
 475 * @dma: DMA control port
 476 * @status: Status code of the operation
 477 *
 478 * The function checks if there is status available from the last update
 479 * auth command. Returns %0 if there is no status and no further
 480 * action is required. If there is status, %1 is returned instead and
 481 * @status holds the failure code.
 482 *
 483 * Negative return means there was an error reading status from the
 484 * switch.
 485 */
 486int dma_port_flash_update_auth_status(struct tb_dma_port *dma, u32 *status)
 487{
 488        struct tb_switch *sw = dma->sw;
 489        u32 out, cmd;
 490        int ret;
 491
 492        ret = dma_port_read(sw->tb->ctl, &out, tb_route(sw), dma->port,
 493                            dma->base + MAIL_OUT, 1, DMA_PORT_TIMEOUT);
 494        if (ret)
 495                return ret;
 496
 497        /* Check if the status relates to flash update auth */
 498        cmd = (out & MAIL_OUT_STATUS_CMD_MASK) >> MAIL_OUT_STATUS_CMD_SHIFT;
 499        if (cmd == MAIL_IN_CMD_FLASH_UPDATE_AUTH) {
 500                if (status)
 501                        *status = out & MAIL_OUT_STATUS_MASK;
 502
 503                /* Reset is needed in any case */
 504                return 1;
 505        }
 506
 507        return 0;
 508}
 509
 510/**
 511 * dma_port_power_cycle() - Power cycles the switch
 512 * @dma: DMA control port
 513 *
 514 * Triggers power cycle to the switch.
 515 */
 516int dma_port_power_cycle(struct tb_dma_port *dma)
 517{
 518        u32 in;
 519
 520        in = MAIL_IN_CMD_POWER_CYCLE << MAIL_IN_CMD_SHIFT;
 521        in |= MAIL_IN_OP_REQUEST;
 522
 523        return dma_port_request(dma, in, 150);
 524}
 525