linux/drivers/gpu/drm/amd/amdgpu/mxgpu_ai.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#include "amdgpu.h"
  25#include "nbio/nbio_6_1_offset.h"
  26#include "nbio/nbio_6_1_sh_mask.h"
  27#include "gc/gc_9_0_offset.h"
  28#include "gc/gc_9_0_sh_mask.h"
  29#include "mp/mp_9_0_offset.h"
  30#include "soc15.h"
  31#include "vega10_ih.h"
  32#include "soc15_common.h"
  33#include "mxgpu_ai.h"
  34
  35static void xgpu_ai_mailbox_send_ack(struct amdgpu_device *adev)
  36{
  37        WREG8(AI_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
  38}
  39
  40static void xgpu_ai_mailbox_set_valid(struct amdgpu_device *adev, bool val)
  41{
  42        WREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
  43}
  44
  45/*
  46 * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
  47 * RCV_MSG_VALID filed of BIF_BX_PF0_MAILBOX_CONTROL must already be set to 1
  48 * by host.
  49 *
  50 * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
  51 * correct value since it doesn't return the RCV_DW0 under the case that
  52 * RCV_MSG_VALID is set by host.
  53 */
  54static enum idh_event xgpu_ai_mailbox_peek_msg(struct amdgpu_device *adev)
  55{
  56        return RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
  57                                mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
  58}
  59
  60
  61static int xgpu_ai_mailbox_rcv_msg(struct amdgpu_device *adev,
  62                                   enum idh_event event)
  63{
  64        u32 reg;
  65
  66        reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
  67                                             mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW0));
  68        if (reg != event)
  69                return -ENOENT;
  70
  71        xgpu_ai_mailbox_send_ack(adev);
  72
  73        return 0;
  74}
  75
  76static uint8_t xgpu_ai_peek_ack(struct amdgpu_device *adev) {
  77        return RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
  78}
  79
  80static int xgpu_ai_poll_ack(struct amdgpu_device *adev)
  81{
  82        int timeout  = AI_MAILBOX_POLL_ACK_TIMEDOUT;
  83        u8 reg;
  84
  85        do {
  86                reg = RREG8(AI_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
  87                if (reg & 2)
  88                        return 0;
  89
  90                mdelay(5);
  91                timeout -= 5;
  92        } while (timeout > 1);
  93
  94        pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", AI_MAILBOX_POLL_ACK_TIMEDOUT);
  95
  96        return -ETIME;
  97}
  98
  99static int xgpu_ai_poll_msg(struct amdgpu_device *adev, enum idh_event event)
 100{
 101        int r, timeout = AI_MAILBOX_POLL_MSG_TIMEDOUT;
 102
 103        do {
 104                r = xgpu_ai_mailbox_rcv_msg(adev, event);
 105                if (!r)
 106                        return 0;
 107
 108                msleep(10);
 109                timeout -= 10;
 110        } while (timeout > 1);
 111
 112        pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
 113
 114        return -ETIME;
 115}
 116
 117static void xgpu_ai_mailbox_trans_msg (struct amdgpu_device *adev,
 118              enum idh_request req, u32 data1, u32 data2, u32 data3) {
 119        u32 reg;
 120        int r;
 121        uint8_t trn;
 122
 123        /* IMPORTANT:
 124         * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
 125         * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
 126         * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_ai_poll_ack()
 127         * will return immediatly
 128         */
 129        do {
 130                xgpu_ai_mailbox_set_valid(adev, false);
 131                trn = xgpu_ai_peek_ack(adev);
 132                if (trn) {
 133                        pr_err("trn=%x ACK should not assert! wait again !\n", trn);
 134                        msleep(1);
 135                }
 136        } while(trn);
 137
 138        reg = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
 139                                             mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0));
 140        reg = REG_SET_FIELD(reg, BIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0,
 141                            MSGBUF_DATA, req);
 142        WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW0),
 143                      reg);
 144        WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW1),
 145                                data1);
 146        WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW2),
 147                                data2);
 148        WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_MSGBUF_TRN_DW3),
 149                                data3);
 150
 151        xgpu_ai_mailbox_set_valid(adev, true);
 152
 153        /* start to poll ack */
 154        r = xgpu_ai_poll_ack(adev);
 155        if (r)
 156                pr_err("Doesn't get ack from pf, continue\n");
 157
 158        xgpu_ai_mailbox_set_valid(adev, false);
 159}
 160
 161static int xgpu_ai_get_pp_clk(struct amdgpu_device *adev, u32 type, char *buf)
 162{
 163        int r = 0;
 164        u32 req, val, size;
 165
 166        if (!amdgim_is_hwperf(adev) || buf == NULL)
 167                return -EBADRQC;
 168
 169        switch(type) {
 170        case PP_SCLK:
 171                req = IDH_IRQ_GET_PP_SCLK;
 172                break;
 173        case PP_MCLK:
 174                req = IDH_IRQ_GET_PP_MCLK;
 175                break;
 176        default:
 177                return -EBADRQC;
 178        }
 179
 180        mutex_lock(&adev->virt.dpm_mutex);
 181
 182        xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
 183
 184        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
 185        if (!r && adev->fw_vram_usage.va != NULL) {
 186                val = RREG32_NO_KIQ(
 187                        SOC15_REG_OFFSET(NBIO, 0,
 188                                         mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW1));
 189                size = strnlen((((char *)adev->virt.fw_reserve.p_pf2vf) +
 190                                val), PAGE_SIZE);
 191
 192                if (size < PAGE_SIZE)
 193                        strcpy(buf,((char *)adev->virt.fw_reserve.p_pf2vf + val));
 194                else
 195                        size = 0;
 196
 197                r = size;
 198                goto out;
 199        }
 200
 201        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
 202        if(r)
 203                pr_info("%s DPM request failed",
 204                        (type == PP_SCLK)? "SCLK" : "MCLK");
 205
 206out:
 207        mutex_unlock(&adev->virt.dpm_mutex);
 208        return r;
 209}
 210
 211static int xgpu_ai_force_dpm_level(struct amdgpu_device *adev, u32 level)
 212{
 213        int r = 0;
 214        u32 req = IDH_IRQ_FORCE_DPM_LEVEL;
 215
 216        if (!amdgim_is_hwperf(adev))
 217                return -EBADRQC;
 218
 219        mutex_lock(&adev->virt.dpm_mutex);
 220        xgpu_ai_mailbox_trans_msg(adev, req, level, 0, 0);
 221
 222        r = xgpu_ai_poll_msg(adev, IDH_SUCCESS);
 223        if (!r)
 224                goto out;
 225
 226        r = xgpu_ai_poll_msg(adev, IDH_FAIL);
 227        if (!r)
 228                pr_info("DPM request failed");
 229        else
 230                pr_info("Mailbox is broken");
 231
 232out:
 233        mutex_unlock(&adev->virt.dpm_mutex);
 234        return r;
 235}
 236
 237static int xgpu_ai_send_access_requests(struct amdgpu_device *adev,
 238                                        enum idh_request req)
 239{
 240        int r;
 241
 242        xgpu_ai_mailbox_trans_msg(adev, req, 0, 0, 0);
 243
 244        /* start to check msg if request is idh_req_gpu_init_access */
 245        if (req == IDH_REQ_GPU_INIT_ACCESS ||
 246                req == IDH_REQ_GPU_FINI_ACCESS ||
 247                req == IDH_REQ_GPU_RESET_ACCESS) {
 248                r = xgpu_ai_poll_msg(adev, IDH_READY_TO_ACCESS_GPU);
 249                if (r) {
 250                        pr_err("Doesn't get READY_TO_ACCESS_GPU from pf, give up\n");
 251                        return r;
 252                }
 253                /* Retrieve checksum from mailbox2 */
 254                if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
 255                        adev->virt.fw_reserve.checksum_key =
 256                                RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0,
 257                                        mmBIF_BX_PF0_MAILBOX_MSGBUF_RCV_DW2));
 258                }
 259        }
 260
 261        return 0;
 262}
 263
 264static int xgpu_ai_request_reset(struct amdgpu_device *adev)
 265{
 266        return xgpu_ai_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
 267}
 268
 269static int xgpu_ai_request_full_gpu_access(struct amdgpu_device *adev,
 270                                           bool init)
 271{
 272        enum idh_request req;
 273
 274        req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
 275        return xgpu_ai_send_access_requests(adev, req);
 276}
 277
 278static int xgpu_ai_release_full_gpu_access(struct amdgpu_device *adev,
 279                                           bool init)
 280{
 281        enum idh_request req;
 282        int r = 0;
 283
 284        req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
 285        r = xgpu_ai_send_access_requests(adev, req);
 286
 287        return r;
 288}
 289
 290static int xgpu_ai_mailbox_ack_irq(struct amdgpu_device *adev,
 291                                        struct amdgpu_irq_src *source,
 292                                        struct amdgpu_iv_entry *entry)
 293{
 294        DRM_DEBUG("get ack intr and do nothing.\n");
 295        return 0;
 296}
 297
 298static int xgpu_ai_set_mailbox_ack_irq(struct amdgpu_device *adev,
 299                                        struct amdgpu_irq_src *source,
 300                                        unsigned type,
 301                                        enum amdgpu_interrupt_state state)
 302{
 303        u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
 304
 305        tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, ACK_INT_EN,
 306                                (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
 307        WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
 308
 309        return 0;
 310}
 311
 312static void xgpu_ai_mailbox_flr_work(struct work_struct *work)
 313{
 314        struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
 315        struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
 316        int timeout = AI_MAILBOX_POLL_FLR_TIMEDOUT;
 317        int locked;
 318
 319        /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
 320         * otherwise the mailbox msg will be ruined/reseted by
 321         * the VF FLR.
 322         *
 323         * we can unlock the lock_reset to allow "amdgpu_job_timedout"
 324         * to run gpu_recover() after FLR_NOTIFICATION_CMPL received
 325         * which means host side had finished this VF's FLR.
 326         */
 327        locked = mutex_trylock(&adev->lock_reset);
 328        if (locked)
 329                adev->in_gpu_reset = 1;
 330
 331        do {
 332                if (xgpu_ai_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
 333                        goto flr_done;
 334
 335                msleep(10);
 336                timeout -= 10;
 337        } while (timeout > 1);
 338
 339flr_done:
 340        if (locked) {
 341                adev->in_gpu_reset = 0;
 342                mutex_unlock(&adev->lock_reset);
 343        }
 344
 345        /* Trigger recovery for world switch failure if no TDR */
 346        if (amdgpu_device_should_recover_gpu(adev)
 347                && adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT)
 348                amdgpu_device_gpu_recover(adev, NULL);
 349}
 350
 351static int xgpu_ai_set_mailbox_rcv_irq(struct amdgpu_device *adev,
 352                                       struct amdgpu_irq_src *src,
 353                                       unsigned type,
 354                                       enum amdgpu_interrupt_state state)
 355{
 356        u32 tmp = RREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL));
 357
 358        tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_MAILBOX_INT_CNTL, VALID_INT_EN,
 359                            (state == AMDGPU_IRQ_STATE_ENABLE) ? 1 : 0);
 360        WREG32_NO_KIQ(SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_MAILBOX_INT_CNTL), tmp);
 361
 362        return 0;
 363}
 364
 365static int xgpu_ai_mailbox_rcv_irq(struct amdgpu_device *adev,
 366                                   struct amdgpu_irq_src *source,
 367                                   struct amdgpu_iv_entry *entry)
 368{
 369        enum idh_event event = xgpu_ai_mailbox_peek_msg(adev);
 370
 371        switch (event) {
 372                case IDH_FLR_NOTIFICATION:
 373                if (amdgpu_sriov_runtime(adev))
 374                        schedule_work(&adev->virt.flr_work);
 375                break;
 376                case IDH_QUERY_ALIVE:
 377                        xgpu_ai_mailbox_send_ack(adev);
 378                        break;
 379                /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
 380                 * it byfar since that polling thread will handle it,
 381                 * other msg like flr complete is not handled here.
 382                 */
 383                case IDH_CLR_MSG_BUF:
 384                case IDH_FLR_NOTIFICATION_CMPL:
 385                case IDH_READY_TO_ACCESS_GPU:
 386                default:
 387                break;
 388        }
 389
 390        return 0;
 391}
 392
 393static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_ack_irq_funcs = {
 394        .set = xgpu_ai_set_mailbox_ack_irq,
 395        .process = xgpu_ai_mailbox_ack_irq,
 396};
 397
 398static const struct amdgpu_irq_src_funcs xgpu_ai_mailbox_rcv_irq_funcs = {
 399        .set = xgpu_ai_set_mailbox_rcv_irq,
 400        .process = xgpu_ai_mailbox_rcv_irq,
 401};
 402
 403void xgpu_ai_mailbox_set_irq_funcs(struct amdgpu_device *adev)
 404{
 405        adev->virt.ack_irq.num_types = 1;
 406        adev->virt.ack_irq.funcs = &xgpu_ai_mailbox_ack_irq_funcs;
 407        adev->virt.rcv_irq.num_types = 1;
 408        adev->virt.rcv_irq.funcs = &xgpu_ai_mailbox_rcv_irq_funcs;
 409}
 410
 411int xgpu_ai_mailbox_add_irq_id(struct amdgpu_device *adev)
 412{
 413        int r;
 414
 415        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
 416        if (r)
 417                return r;
 418
 419        r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
 420        if (r) {
 421                amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
 422                return r;
 423        }
 424
 425        return 0;
 426}
 427
 428int xgpu_ai_mailbox_get_irq(struct amdgpu_device *adev)
 429{
 430        int r;
 431
 432        r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
 433        if (r)
 434                return r;
 435        r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
 436        if (r) {
 437                amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
 438                return r;
 439        }
 440
 441        INIT_WORK(&adev->virt.flr_work, xgpu_ai_mailbox_flr_work);
 442
 443        return 0;
 444}
 445
 446void xgpu_ai_mailbox_put_irq(struct amdgpu_device *adev)
 447{
 448        amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
 449        amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
 450}
 451
 452static void xgpu_ai_init_reg_access_mode(struct amdgpu_device *adev)
 453{
 454        adev->virt.reg_access_mode = AMDGPU_VIRT_REG_ACCESS_LEGACY;
 455
 456        /* Enable L1 security reg access mode by defaul,  as non-security VF
 457         * will no longer be supported.
 458         */
 459        adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_RLC;
 460
 461        adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_ACCESS_PSP_PRG_IH;
 462
 463        adev->virt.reg_access_mode |= AMDGPU_VIRT_REG_SKIP_SEETING;
 464}
 465
 466const struct amdgpu_virt_ops xgpu_ai_virt_ops = {
 467        .req_full_gpu   = xgpu_ai_request_full_gpu_access,
 468        .rel_full_gpu   = xgpu_ai_release_full_gpu_access,
 469        .reset_gpu = xgpu_ai_request_reset,
 470        .wait_reset = NULL,
 471        .trans_msg = xgpu_ai_mailbox_trans_msg,
 472        .get_pp_clk = xgpu_ai_get_pp_clk,
 473        .force_dpm_level = xgpu_ai_force_dpm_level,
 474        .init_reg_access_mode = xgpu_ai_init_reg_access_mode,
 475};
 476