linux/drivers/gpu/drm/amd/amdgpu/mxgpu_nv.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include "amdgpu.h"
  25#include "nbio/nbio_2_3_offset.h"
  26#include "nbio/nbio_2_3_sh_mask.h"
  27#include "gc/gc_10_1_0_offset.h"
  28#include "gc/gc_10_1_0_sh_mask.h"
  29#include "soc15.h"
  30#include "navi10_ih.h"
  31#include "soc15_common.h"
  32#include "mxgpu_nv.h"
  33
  34static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
  35{
  36        WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
  37}
  38
  39static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
  40{
  41        WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
  42}
  43
  44/*
  45 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
  46 * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
  47 * by host.
  48 *
  49 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
  50 * correct value since it doesn't return the RCV_DW0 under the case that
  51 * RCV_MSG_VALID is set by host.
  52 */
  53static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
  54{
  55        return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
  56}
  57
  58
  59static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
  60                                   enum idh_event event)
  61{
  62        u32 reg;
  63
  64        reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
  65        if (reg != event)
  66                return -ENOENT;
  67
  68        xgpu_nv_mailbox_send_ack(adev);
  69
  70        return 0;
  71}
  72
  73static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
  74{
  75        return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
  76}
  77
  78static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
  79{
  80        int timeout  = NV_MAILBOX_POLL_ACK_TIMEDOUT;
  81        u8 reg;
  82
  83        do {
  84                reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
  85                if (reg & 2)
  86                        return 0;
  87
  88                mdelay(5);
  89                timeout -= 5;
  90        } while (timeout > 1);
  91
  92        pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
  93
  94        return -ETIME;
  95}
  96
  97static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
  98{
  99        int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT;
 100
 101        do {
 102                r = xgpu_nv_mailbox_rcv_msg(adev, event);
 103                if (!r)
 104                        return 0;
 105
 106                msleep(10);
 107                timeout -= 10;
 108        } while (timeout > 1);
 109
 110
 111        return -ETIME;
 112}
 113
 114static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
 115              enum idh_request req, u32 data1, u32 data2, u32 data3)
 116{
 117        int r;
 118        uint8_t trn;
 119
 120        /* IMPORTANT:
 121         * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
 122         * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
 123         * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
 124         * will return immediatly
 125         */
 126        do {
 127                xgpu_nv_mailbox_set_valid(adev, false);
 128                trn = xgpu_nv_peek_ack(adev);
 129                if (trn) {
 130                        pr_err("trn=%x ACK should not assert! wait again !\n", trn);
 131                        msleep(1);
 132                }
 133        } while (trn);
 134
 135        WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
 136        WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
 137        WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
 138        WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
 139        xgpu_nv_mailbox_set_valid(adev, true);
 140
 141        /* start to poll ack */
 142        r = xgpu_nv_poll_ack(adev);
 143        if (r)
 144                pr_err("Doesn't get ack from pf, continue\n");
 145
 146        xgpu_nv_mailbox_set_valid(adev, false);
 147}
 148
 149static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
 150                                        enum idh_request req)
 151{
 152        int r;
 153        enum idh_event event = -1;
 154
 155        xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
 156
 157        switch (req) {
 158        case IDH_REQ_GPU_INIT_ACCESS:
 159        case IDH_REQ_GPU_FINI_ACCESS:
 160        case IDH_REQ_GPU_RESET_ACCESS:
 161                event = IDH_READY_TO_ACCESS_GPU;
 162                break;
 163        case IDH_REQ_GPU_INIT_DATA:
 164                event = IDH_REQ_GPU_INIT_DATA_READY;
 165                break;
 166        default:
 167                break;
 168        }
 169
 170        if (event != -1) {
 171                r = xgpu_nv_poll_msg(adev, event);
 172                if (r) {
 173                        if (req != IDH_REQ_GPU_INIT_DATA) {
 174                                pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
 175                                return r;
 176                        }
 177                        else /* host doesn't support REQ_GPU_INIT_DATA handshake */
 178                                adev->virt.req_init_data_ver = 0;
 179                } else {
 180                        if (req == IDH_REQ_GPU_INIT_DATA)
 181                        {
 182                                adev->virt.req_init_data_ver =
 183                                        RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
 184
 185                                /* assume V1 in case host doesn't set version number */
 186                                if (adev->virt.req_init_data_ver < 1)
 187                                        adev->virt.req_init_data_ver = 1;
 188                        }
 189                }
 190
 191                /* Retrieve checksum from mailbox2 */
 192                if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
 193                        adev->virt.fw_reserve.checksum_key =
 194                                RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
 195                }
 196        }
 197
 198        return 0;
 199}
 200
 201static int xgpu_nv_request_reset(struct amdgpu_device *adev)
 202{
 203        int ret, i = 0;
 204
 205        while (i < NV_MAILBOX_POLL_MSG_REP_MAX) {
 206                ret = xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
 207                if (!ret)
 208                        break;
 209                i++;
 210        }
 211
 212        return ret;
 213}
 214
 215static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
 216                                           bool init)
 217{
 218        enum idh_request req;
 219
 220        req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
 221        return xgpu_nv_send_access_requests(adev, req);
 222}
 223
 224static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
 225                                           bool init)
 226{
 227        enum idh_request req;
 228        int r = 0;
 229
 230        req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
 231        r = xgpu_nv_send_access_requests(adev, req);
 232
 233        return r;
 234}
 235
 236static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
 237{
 238        return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
 239}
 240
 241static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
 242                                        struct amdgpu_irq_src *source,
 243                                        struct amdgpu_iv_entry *entry)
 244{
 245        DRM_DEBUG("get ack intr and do nothing.\n");
 246        return 0;
 247}
 248
 249static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
 250                                        struct amdgpu_irq_src *source,
 251                                        unsigned type,
 252                                        enum amdgpu_interrupt_state state)
 253{
 254        u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
 255
 256        if (state == AMDGPU_IRQ_STATE_ENABLE)
 257                tmp |= 2;
 258        else
 259                tmp &= ~2;
 260
 261        WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
 262
 263        return 0;
 264}
 265
 266static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
 267{
 268        struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
 269        struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
 270        int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
 271
 272        /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
 273         * otherwise the mailbox msg will be ruined/reseted by
 274         * the VF FLR.
 275         */
 276        if (!down_read_trylock(&adev->reset_sem))
 277                return;
 278
 279        amdgpu_virt_fini_data_exchange(adev);
 280        atomic_set(&adev->in_gpu_reset, 1);
 281
 282        do {
 283                if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
 284                        goto flr_done;
 285
 286                msleep(10);
 287                timeout -= 10;
 288        } while (timeout > 1);
 289
 290flr_done:
 291        atomic_set(&adev->in_gpu_reset, 0);
 292        up_read(&adev->reset_sem);
 293
 294        /* Trigger recovery for world switch failure if no TDR */
 295        if (amdgpu_device_should_recover_gpu(adev)
 296                && (!amdgpu_device_has_job_running(adev) ||
 297                adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
 298                adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
 299                adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
 300                adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
 301                amdgpu_device_gpu_recover(adev, NULL);
 302}
 303
 304static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
 305                                       struct amdgpu_irq_src *src,
 306                                       unsigned type,
 307                                       enum amdgpu_interrupt_state state)
 308{
 309        u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
 310
 311        if (state == AMDGPU_IRQ_STATE_ENABLE)
 312                tmp |= 1;
 313        else
 314                tmp &= ~1;
 315
 316        WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
 317
 318        return 0;
 319}
 320
 321static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
 322                                   struct amdgpu_irq_src *source,
 323                                   struct amdgpu_iv_entry *entry)
 324{
 325        enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
 326
 327        switch (event) {
 328        case IDH_FLR_NOTIFICATION:
 329                if (amdgpu_sriov_runtime(adev))
 330                        schedule_work(&adev->virt.flr_work);
 331                break;
 332                /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
 333                 * it byfar since that polling thread will handle it,
 334                 * other msg like flr complete is not handled here.
 335                 */
 336        case IDH_CLR_MSG_BUF:
 337        case IDH_FLR_NOTIFICATION_CMPL:
 338        case IDH_READY_TO_ACCESS_GPU:
 339        default:
 340                break;
 341        }
 342
 343        return 0;
 344}
 345
 346static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
 347        .set = xgpu_nv_set_mailbox_ack_irq,
 348        .process = xgpu_nv_mailbox_ack_irq,
 349};
 350
 351static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
 352        .set = xgpu_nv_set_mailbox_rcv_irq,
 353        .process = xgpu_nv_mailbox_rcv_irq,
 354};
 355
 356void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
 357{
 358        adev->virt.ack_irq.num_types = 1;
 359        adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
 360        adev->virt.rcv_irq.num_types = 1;
 361        adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
 362}
 363
 364int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
 365{
 366        int r;
 367
 368        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
 369        if (r)
 370                return r;
 371
 372        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
 373        if (r) {
 374                amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
 375                return r;
 376        }
 377
 378        return 0;
 379}
 380
 381int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
 382{
 383        int r;
 384
 385        r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
 386        if (r)
 387                return r;
 388        r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
 389        if (r) {
 390                amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
 391                return r;
 392        }
 393
 394        INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
 395
 396        return 0;
 397}
 398
 399void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
 400{
 401        amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
 402        amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
 403}
 404
 405const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
 406        .req_full_gpu   = xgpu_nv_request_full_gpu_access,
 407        .rel_full_gpu   = xgpu_nv_release_full_gpu_access,
 408        .req_init_data  = xgpu_nv_request_init_data,
 409        .reset_gpu = xgpu_nv_request_reset,
 410        .wait_reset = NULL,
 411        .trans_msg = xgpu_nv_mailbox_trans_msg,
 412};
 413