linux/drivers/gpu/drm/i915/intel_sideband.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2013 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include <asm/iosf_mbi.h>
  26
  27#include "i915_drv.h"
  28#include "intel_sideband.h"
  29
  30/*
  31 * IOSF sideband, see VLV2_SidebandMsg_HAS.docx and
  32 * VLV_VLV2_PUNIT_HAS_0.8.docx
  33 */
  34
  35/* Standard MMIO read, non-posted */
  36#define SB_MRD_NP       0x00
  37/* Standard MMIO write, non-posted */
  38#define SB_MWR_NP       0x01
  39/* Private register read, double-word addressing, non-posted */
  40#define SB_CRRDDA_NP    0x06
  41/* Private register write, double-word addressing, non-posted */
  42#define SB_CRWRDA_NP    0x07
  43
  44static void ping(void *info)
  45{
  46}
  47
  48static void __vlv_punit_get(struct drm_i915_private *i915)
  49{
  50        iosf_mbi_punit_acquire();
  51
  52        /*
  53         * Prevent the cpu from sleeping while we use this sideband, otherwise
  54         * the punit may cause a machine hang. The issue appears to be isolated
  55         * with changing the power state of the CPU package while changing
  56         * the power state via the punit, and we have only observed it
  57         * reliably on 4-core Baytail systems suggesting the issue is in the
  58         * power delivery mechanism and likely to be be board/function
  59         * specific. Hence we presume the workaround needs only be applied
  60         * to the Valleyview P-unit and not all sideband communications.
  61         */
  62        if (IS_VALLEYVIEW(i915)) {
  63                pm_qos_update_request(&i915->sb_qos, 0);
  64                on_each_cpu(ping, NULL, 1);
  65        }
  66}
  67
  68static void __vlv_punit_put(struct drm_i915_private *i915)
  69{
  70        if (IS_VALLEYVIEW(i915))
  71                pm_qos_update_request(&i915->sb_qos, PM_QOS_DEFAULT_VALUE);
  72
  73        iosf_mbi_punit_release();
  74}
  75
  76void vlv_iosf_sb_get(struct drm_i915_private *i915, unsigned long ports)
  77{
  78        if (ports & BIT(VLV_IOSF_SB_PUNIT))
  79                __vlv_punit_get(i915);
  80
  81        mutex_lock(&i915->sb_lock);
  82}
  83
  84void vlv_iosf_sb_put(struct drm_i915_private *i915, unsigned long ports)
  85{
  86        mutex_unlock(&i915->sb_lock);
  87
  88        if (ports & BIT(VLV_IOSF_SB_PUNIT))
  89                __vlv_punit_put(i915);
  90}
  91
  92static int vlv_sideband_rw(struct drm_i915_private *i915,
  93                           u32 devfn, u32 port, u32 opcode,
  94                           u32 addr, u32 *val)
  95{
  96        struct intel_uncore *uncore = &i915->uncore;
  97        const bool is_read = (opcode == SB_MRD_NP || opcode == SB_CRRDDA_NP);
  98        int err;
  99
 100        lockdep_assert_held(&i915->sb_lock);
 101        if (port == IOSF_PORT_PUNIT)
 102                iosf_mbi_assert_punit_acquired();
 103
 104        /* Flush the previous comms, just in case it failed last time. */
 105        if (intel_wait_for_register(uncore,
 106                                    VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
 107                                    5)) {
 108                drm_dbg(&i915->drm, "IOSF sideband idle wait (%s) timed out\n",
 109                        is_read ? "read" : "write");
 110                return -EAGAIN;
 111        }
 112
 113        preempt_disable();
 114
 115        intel_uncore_write_fw(uncore, VLV_IOSF_ADDR, addr);
 116        intel_uncore_write_fw(uncore, VLV_IOSF_DATA, is_read ? 0 : *val);
 117        intel_uncore_write_fw(uncore, VLV_IOSF_DOORBELL_REQ,
 118                              (devfn << IOSF_DEVFN_SHIFT) |
 119                              (opcode << IOSF_OPCODE_SHIFT) |
 120                              (port << IOSF_PORT_SHIFT) |
 121                              (0xf << IOSF_BYTE_ENABLES_SHIFT) |
 122                              (0 << IOSF_BAR_SHIFT) |
 123                              IOSF_SB_BUSY);
 124
 125        if (__intel_wait_for_register_fw(uncore,
 126                                         VLV_IOSF_DOORBELL_REQ, IOSF_SB_BUSY, 0,
 127                                         10000, 0, NULL) == 0) {
 128                if (is_read)
 129                        *val = intel_uncore_read_fw(uncore, VLV_IOSF_DATA);
 130                err = 0;
 131        } else {
 132                drm_dbg(&i915->drm, "IOSF sideband finish wait (%s) timed out\n",
 133                        is_read ? "read" : "write");
 134                err = -ETIMEDOUT;
 135        }
 136
 137        preempt_enable();
 138
 139        return err;
 140}
 141
 142u32 vlv_punit_read(struct drm_i915_private *i915, u32 addr)
 143{
 144        u32 val = 0;
 145
 146        vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
 147                        SB_CRRDDA_NP, addr, &val);
 148
 149        return val;
 150}
 151
 152int vlv_punit_write(struct drm_i915_private *i915, u32 addr, u32 val)
 153{
 154        return vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_PUNIT,
 155                               SB_CRWRDA_NP, addr, &val);
 156}
 157
 158u32 vlv_bunit_read(struct drm_i915_private *i915, u32 reg)
 159{
 160        u32 val = 0;
 161
 162        vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
 163                        SB_CRRDDA_NP, reg, &val);
 164
 165        return val;
 166}
 167
 168void vlv_bunit_write(struct drm_i915_private *i915, u32 reg, u32 val)
 169{
 170        vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_BUNIT,
 171                        SB_CRWRDA_NP, reg, &val);
 172}
 173
 174u32 vlv_nc_read(struct drm_i915_private *i915, u8 addr)
 175{
 176        u32 val = 0;
 177
 178        vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_NC,
 179                        SB_CRRDDA_NP, addr, &val);
 180
 181        return val;
 182}
 183
 184u32 vlv_iosf_sb_read(struct drm_i915_private *i915, u8 port, u32 reg)
 185{
 186        u32 val = 0;
 187
 188        vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
 189                        SB_CRRDDA_NP, reg, &val);
 190
 191        return val;
 192}
 193
 194void vlv_iosf_sb_write(struct drm_i915_private *i915,
 195                       u8 port, u32 reg, u32 val)
 196{
 197        vlv_sideband_rw(i915, PCI_DEVFN(0, 0), port,
 198                        SB_CRWRDA_NP, reg, &val);
 199}
 200
 201u32 vlv_cck_read(struct drm_i915_private *i915, u32 reg)
 202{
 203        u32 val = 0;
 204
 205        vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
 206                        SB_CRRDDA_NP, reg, &val);
 207
 208        return val;
 209}
 210
 211void vlv_cck_write(struct drm_i915_private *i915, u32 reg, u32 val)
 212{
 213        vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCK,
 214                        SB_CRWRDA_NP, reg, &val);
 215}
 216
 217u32 vlv_ccu_read(struct drm_i915_private *i915, u32 reg)
 218{
 219        u32 val = 0;
 220
 221        vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
 222                        SB_CRRDDA_NP, reg, &val);
 223
 224        return val;
 225}
 226
 227void vlv_ccu_write(struct drm_i915_private *i915, u32 reg, u32 val)
 228{
 229        vlv_sideband_rw(i915, PCI_DEVFN(0, 0), IOSF_PORT_CCU,
 230                        SB_CRWRDA_NP, reg, &val);
 231}
 232
 233u32 vlv_dpio_read(struct drm_i915_private *i915, enum pipe pipe, int reg)
 234{
 235        int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)];
 236        u32 val = 0;
 237
 238        vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MRD_NP, reg, &val);
 239
 240        /*
 241         * FIXME: There might be some registers where all 1's is a valid value,
 242         * so ideally we should check the register offset instead...
 243         */
 244        WARN(val == 0xffffffff, "DPIO read pipe %c reg 0x%x == 0x%x\n",
 245             pipe_name(pipe), reg, val);
 246
 247        return val;
 248}
 249
 250void vlv_dpio_write(struct drm_i915_private *i915,
 251                    enum pipe pipe, int reg, u32 val)
 252{
 253        int port = i915->dpio_phy_iosf_port[DPIO_PHY(pipe)];
 254
 255        vlv_sideband_rw(i915, DPIO_DEVFN, port, SB_MWR_NP, reg, &val);
 256}
 257
 258u32 vlv_flisdsi_read(struct drm_i915_private *i915, u32 reg)
 259{
 260        u32 val = 0;
 261
 262        vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRRDDA_NP,
 263                        reg, &val);
 264        return val;
 265}
 266
 267void vlv_flisdsi_write(struct drm_i915_private *i915, u32 reg, u32 val)
 268{
 269        vlv_sideband_rw(i915, DPIO_DEVFN, IOSF_PORT_FLISDSI, SB_CRWRDA_NP,
 270                        reg, &val);
 271}
 272
 273/* SBI access */
 274static int intel_sbi_rw(struct drm_i915_private *i915, u16 reg,
 275                        enum intel_sbi_destination destination,
 276                        u32 *val, bool is_read)
 277{
 278        struct intel_uncore *uncore = &i915->uncore;
 279        u32 cmd;
 280
 281        lockdep_assert_held(&i915->sb_lock);
 282
 283        if (intel_wait_for_register_fw(uncore,
 284                                       SBI_CTL_STAT, SBI_BUSY, 0,
 285                                       100)) {
 286                drm_err(&i915->drm,
 287                        "timeout waiting for SBI to become ready\n");
 288                return -EBUSY;
 289        }
 290
 291        intel_uncore_write_fw(uncore, SBI_ADDR, (u32)reg << 16);
 292        intel_uncore_write_fw(uncore, SBI_DATA, is_read ? 0 : *val);
 293
 294        if (destination == SBI_ICLK)
 295                cmd = SBI_CTL_DEST_ICLK | SBI_CTL_OP_CRRD;
 296        else
 297                cmd = SBI_CTL_DEST_MPHY | SBI_CTL_OP_IORD;
 298        if (!is_read)
 299                cmd |= BIT(8);
 300        intel_uncore_write_fw(uncore, SBI_CTL_STAT, cmd | SBI_BUSY);
 301
 302        if (__intel_wait_for_register_fw(uncore,
 303                                         SBI_CTL_STAT, SBI_BUSY, 0,
 304                                         100, 100, &cmd)) {
 305                drm_err(&i915->drm,
 306                        "timeout waiting for SBI to complete read\n");
 307                return -ETIMEDOUT;
 308        }
 309
 310        if (cmd & SBI_RESPONSE_FAIL) {
 311                drm_err(&i915->drm, "error during SBI read of reg %x\n", reg);
 312                return -ENXIO;
 313        }
 314
 315        if (is_read)
 316                *val = intel_uncore_read_fw(uncore, SBI_DATA);
 317
 318        return 0;
 319}
 320
 321u32 intel_sbi_read(struct drm_i915_private *i915, u16 reg,
 322                   enum intel_sbi_destination destination)
 323{
 324        u32 result = 0;
 325
 326        intel_sbi_rw(i915, reg, destination, &result, true);
 327
 328        return result;
 329}
 330
 331void intel_sbi_write(struct drm_i915_private *i915, u16 reg, u32 value,
 332                     enum intel_sbi_destination destination)
 333{
 334        intel_sbi_rw(i915, reg, destination, &value, false);
 335}
 336
 337static inline int gen6_check_mailbox_status(u32 mbox)
 338{
 339        switch (mbox & GEN6_PCODE_ERROR_MASK) {
 340        case GEN6_PCODE_SUCCESS:
 341                return 0;
 342        case GEN6_PCODE_UNIMPLEMENTED_CMD:
 343                return -ENODEV;
 344        case GEN6_PCODE_ILLEGAL_CMD:
 345                return -ENXIO;
 346        case GEN6_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
 347        case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
 348                return -EOVERFLOW;
 349        case GEN6_PCODE_TIMEOUT:
 350                return -ETIMEDOUT;
 351        default:
 352                MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
 353                return 0;
 354        }
 355}
 356
 357static inline int gen7_check_mailbox_status(u32 mbox)
 358{
 359        switch (mbox & GEN6_PCODE_ERROR_MASK) {
 360        case GEN6_PCODE_SUCCESS:
 361                return 0;
 362        case GEN6_PCODE_ILLEGAL_CMD:
 363                return -ENXIO;
 364        case GEN7_PCODE_TIMEOUT:
 365                return -ETIMEDOUT;
 366        case GEN7_PCODE_ILLEGAL_DATA:
 367                return -EINVAL;
 368        case GEN7_PCODE_MIN_FREQ_TABLE_GT_RATIO_OUT_OF_RANGE:
 369                return -EOVERFLOW;
 370        default:
 371                MISSING_CASE(mbox & GEN6_PCODE_ERROR_MASK);
 372                return 0;
 373        }
 374}
 375
 376static int __sandybridge_pcode_rw(struct drm_i915_private *i915,
 377                                  u32 mbox, u32 *val, u32 *val1,
 378                                  int fast_timeout_us,
 379                                  int slow_timeout_ms,
 380                                  bool is_read)
 381{
 382        struct intel_uncore *uncore = &i915->uncore;
 383
 384        lockdep_assert_held(&i915->sb_lock);
 385
 386        /*
 387         * GEN6_PCODE_* are outside of the forcewake domain, we can
 388         * use te fw I915_READ variants to reduce the amount of work
 389         * required when reading/writing.
 390         */
 391
 392        if (intel_uncore_read_fw(uncore, GEN6_PCODE_MAILBOX) & GEN6_PCODE_READY)
 393                return -EAGAIN;
 394
 395        intel_uncore_write_fw(uncore, GEN6_PCODE_DATA, *val);
 396        intel_uncore_write_fw(uncore, GEN6_PCODE_DATA1, val1 ? *val1 : 0);
 397        intel_uncore_write_fw(uncore,
 398                              GEN6_PCODE_MAILBOX, GEN6_PCODE_READY | mbox);
 399
 400        if (__intel_wait_for_register_fw(uncore,
 401                                         GEN6_PCODE_MAILBOX,
 402                                         GEN6_PCODE_READY, 0,
 403                                         fast_timeout_us,
 404                                         slow_timeout_ms,
 405                                         &mbox))
 406                return -ETIMEDOUT;
 407
 408        if (is_read)
 409                *val = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA);
 410        if (is_read && val1)
 411                *val1 = intel_uncore_read_fw(uncore, GEN6_PCODE_DATA1);
 412
 413        if (INTEL_GEN(i915) > 6)
 414                return gen7_check_mailbox_status(mbox);
 415        else
 416                return gen6_check_mailbox_status(mbox);
 417}
 418
 419int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox,
 420                           u32 *val, u32 *val1)
 421{
 422        int err;
 423
 424        mutex_lock(&i915->sb_lock);
 425        err = __sandybridge_pcode_rw(i915, mbox, val, val1,
 426                                     500, 0,
 427                                     true);
 428        mutex_unlock(&i915->sb_lock);
 429
 430        if (err) {
 431                drm_dbg(&i915->drm,
 432                        "warning: pcode (read from mbox %x) mailbox access failed for %ps: %d\n",
 433                        mbox, __builtin_return_address(0), err);
 434        }
 435
 436        return err;
 437}
 438
 439int sandybridge_pcode_write_timeout(struct drm_i915_private *i915,
 440                                    u32 mbox, u32 val,
 441                                    int fast_timeout_us,
 442                                    int slow_timeout_ms)
 443{
 444        int err;
 445
 446        mutex_lock(&i915->sb_lock);
 447        err = __sandybridge_pcode_rw(i915, mbox, &val, NULL,
 448                                     fast_timeout_us, slow_timeout_ms,
 449                                     false);
 450        mutex_unlock(&i915->sb_lock);
 451
 452        if (err) {
 453                drm_dbg(&i915->drm,
 454                        "warning: pcode (write of 0x%08x to mbox %x) mailbox access failed for %ps: %d\n",
 455                        val, mbox, __builtin_return_address(0), err);
 456        }
 457
 458        return err;
 459}
 460
 461static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox,
 462                                  u32 request, u32 reply_mask, u32 reply,
 463                                  u32 *status)
 464{
 465        *status = __sandybridge_pcode_rw(i915, mbox, &request, NULL,
 466                                         500, 0,
 467                                         true);
 468
 469        return *status || ((request & reply_mask) == reply);
 470}
 471
 472/**
 473 * skl_pcode_request - send PCODE request until acknowledgment
 474 * @i915: device private
 475 * @mbox: PCODE mailbox ID the request is targeted for
 476 * @request: request ID
 477 * @reply_mask: mask used to check for request acknowledgment
 478 * @reply: value used to check for request acknowledgment
 479 * @timeout_base_ms: timeout for polling with preemption enabled
 480 *
 481 * Keep resending the @request to @mbox until PCODE acknowledges it, PCODE
 482 * reports an error or an overall timeout of @timeout_base_ms+50 ms expires.
 483 * The request is acknowledged once the PCODE reply dword equals @reply after
 484 * applying @reply_mask. Polling is first attempted with preemption enabled
 485 * for @timeout_base_ms and if this times out for another 50 ms with
 486 * preemption disabled.
 487 *
 488 * Returns 0 on success, %-ETIMEDOUT in case of a timeout, <0 in case of some
 489 * other error as reported by PCODE.
 490 */
 491int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request,
 492                      u32 reply_mask, u32 reply, int timeout_base_ms)
 493{
 494        u32 status;
 495        int ret;
 496
 497        mutex_lock(&i915->sb_lock);
 498
 499#define COND \
 500        skl_pcode_try_request(i915, mbox, request, reply_mask, reply, &status)
 501
 502        /*
 503         * Prime the PCODE by doing a request first. Normally it guarantees
 504         * that a subsequent request, at most @timeout_base_ms later, succeeds.
 505         * _wait_for() doesn't guarantee when its passed condition is evaluated
 506         * first, so send the first request explicitly.
 507         */
 508        if (COND) {
 509                ret = 0;
 510                goto out;
 511        }
 512        ret = _wait_for(COND, timeout_base_ms * 1000, 10, 10);
 513        if (!ret)
 514                goto out;
 515
 516        /*
 517         * The above can time out if the number of requests was low (2 in the
 518         * worst case) _and_ PCODE was busy for some reason even after a
 519         * (queued) request and @timeout_base_ms delay. As a workaround retry
 520         * the poll with preemption disabled to maximize the number of
 521         * requests. Increase the timeout from @timeout_base_ms to 50ms to
 522         * account for interrupts that could reduce the number of these
 523         * requests, and for any quirks of the PCODE firmware that delays
 524         * the request completion.
 525         */
 526        drm_dbg_kms(&i915->drm,
 527                    "PCODE timeout, retrying with preemption disabled\n");
 528        WARN_ON_ONCE(timeout_base_ms > 3);
 529        preempt_disable();
 530        ret = wait_for_atomic(COND, 50);
 531        preempt_enable();
 532
 533out:
 534        mutex_unlock(&i915->sb_lock);
 535        return ret ? ret : status;
 536#undef COND
 537}
 538