linux/drivers/gpu/drm/i915/intel_guc_ct.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2016-2017 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 */
  23
  24#include "i915_drv.h"
  25#include "intel_guc_ct.h"
  26
  27#ifdef CONFIG_DRM_I915_DEBUG_GUC
  28#define CT_DEBUG_DRIVER(...)    DRM_DEBUG_DRIVER(__VA_ARGS__)
  29#else
  30#define CT_DEBUG_DRIVER(...)    do { } while (0)
  31#endif
  32
  33struct ct_request {
  34        struct list_head link;
  35        u32 fence;
  36        u32 status;
  37        u32 response_len;
  38        u32 *response_buf;
  39};
  40
  41struct ct_incoming_request {
  42        struct list_head link;
  43        u32 msg[];
  44};
  45
  46enum { CTB_SEND = 0, CTB_RECV = 1 };
  47
  48enum { CTB_OWNER_HOST = 0 };
  49
  50static void ct_incoming_request_worker_func(struct work_struct *w);
  51
  52/**
  53 * intel_guc_ct_init_early - Initialize CT state without requiring device access
  54 * @ct: pointer to CT struct
  55 */
  56void intel_guc_ct_init_early(struct intel_guc_ct *ct)
  57{
  58        /* we're using static channel owners */
  59        ct->host_channel.owner = CTB_OWNER_HOST;
  60
  61        spin_lock_init(&ct->lock);
  62        INIT_LIST_HEAD(&ct->pending_requests);
  63        INIT_LIST_HEAD(&ct->incoming_requests);
  64        INIT_WORK(&ct->worker, ct_incoming_request_worker_func);
  65}
  66
  67static inline struct intel_guc *ct_to_guc(struct intel_guc_ct *ct)
  68{
  69        return container_of(ct, struct intel_guc, ct);
  70}
  71
  72static inline const char *guc_ct_buffer_type_to_str(u32 type)
  73{
  74        switch (type) {
  75        case INTEL_GUC_CT_BUFFER_TYPE_SEND:
  76                return "SEND";
  77        case INTEL_GUC_CT_BUFFER_TYPE_RECV:
  78                return "RECV";
  79        default:
  80                return "<invalid>";
  81        }
  82}
  83
  84static void guc_ct_buffer_desc_init(struct guc_ct_buffer_desc *desc,
  85                                    u32 cmds_addr, u32 size, u32 owner)
  86{
  87        CT_DEBUG_DRIVER("CT: desc %p init addr=%#x size=%u owner=%u\n",
  88                        desc, cmds_addr, size, owner);
  89        memset(desc, 0, sizeof(*desc));
  90        desc->addr = cmds_addr;
  91        desc->size = size;
  92        desc->owner = owner;
  93}
  94
  95static void guc_ct_buffer_desc_reset(struct guc_ct_buffer_desc *desc)
  96{
  97        CT_DEBUG_DRIVER("CT: desc %p reset head=%u tail=%u\n",
  98                        desc, desc->head, desc->tail);
  99        desc->head = 0;
 100        desc->tail = 0;
 101        desc->is_in_error = 0;
 102}
 103
 104static int guc_action_register_ct_buffer(struct intel_guc *guc,
 105                                         u32 desc_addr,
 106                                         u32 type)
 107{
 108        u32 action[] = {
 109                INTEL_GUC_ACTION_REGISTER_COMMAND_TRANSPORT_BUFFER,
 110                desc_addr,
 111                sizeof(struct guc_ct_buffer_desc),
 112                type
 113        };
 114        int err;
 115
 116        /* Can't use generic send(), CT registration must go over MMIO */
 117        err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
 118        if (err)
 119                DRM_ERROR("CT: register %s buffer failed; err=%d\n",
 120                          guc_ct_buffer_type_to_str(type), err);
 121        return err;
 122}
 123
 124static int guc_action_deregister_ct_buffer(struct intel_guc *guc,
 125                                           u32 owner,
 126                                           u32 type)
 127{
 128        u32 action[] = {
 129                INTEL_GUC_ACTION_DEREGISTER_COMMAND_TRANSPORT_BUFFER,
 130                owner,
 131                type
 132        };
 133        int err;
 134
 135        /* Can't use generic send(), CT deregistration must go over MMIO */
 136        err = intel_guc_send_mmio(guc, action, ARRAY_SIZE(action), NULL, 0);
 137        if (err)
 138                DRM_ERROR("CT: deregister %s buffer failed; owner=%d err=%d\n",
 139                          guc_ct_buffer_type_to_str(type), owner, err);
 140        return err;
 141}
 142
 143static int ctch_init(struct intel_guc *guc,
 144                     struct intel_guc_ct_channel *ctch)
 145{
 146        struct i915_vma *vma;
 147        void *blob;
 148        int err;
 149        int i;
 150
 151        GEM_BUG_ON(ctch->vma);
 152
 153        /* We allocate 1 page to hold both descriptors and both buffers.
 154         *       ___________.....................
 155         *      |desc (SEND)|                   :
 156         *      |___________|                   PAGE/4
 157         *      :___________....................:
 158         *      |desc (RECV)|                   :
 159         *      |___________|                   PAGE/4
 160         *      :_______________________________:
 161         *      |cmds (SEND)                    |
 162         *      |                               PAGE/4
 163         *      |_______________________________|
 164         *      |cmds (RECV)                    |
 165         *      |                               PAGE/4
 166         *      |_______________________________|
 167         *
 168         * Each message can use a maximum of 32 dwords and we don't expect to
 169         * have more than 1 in flight at any time, so we have enough space.
 170         * Some logic further ahead will rely on the fact that there is only 1
 171         * page and that it is always mapped, so if the size is changed the
 172         * other code will need updating as well.
 173         */
 174
 175        /* allocate vma */
 176        vma = intel_guc_allocate_vma(guc, PAGE_SIZE);
 177        if (IS_ERR(vma)) {
 178                err = PTR_ERR(vma);
 179                goto err_out;
 180        }
 181        ctch->vma = vma;
 182
 183        /* map first page */
 184        blob = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
 185        if (IS_ERR(blob)) {
 186                err = PTR_ERR(blob);
 187                goto err_vma;
 188        }
 189        CT_DEBUG_DRIVER("CT: vma base=%#x\n",
 190                        intel_guc_ggtt_offset(guc, ctch->vma));
 191
 192        /* store pointers to desc and cmds */
 193        for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
 194                GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
 195                ctch->ctbs[i].desc = blob + PAGE_SIZE/4 * i;
 196                ctch->ctbs[i].cmds = blob + PAGE_SIZE/4 * i + PAGE_SIZE/2;
 197        }
 198
 199        return 0;
 200
 201err_vma:
 202        i915_vma_unpin_and_release(&ctch->vma, 0);
 203err_out:
 204        CT_DEBUG_DRIVER("CT: channel %d initialization failed; err=%d\n",
 205                        ctch->owner, err);
 206        return err;
 207}
 208
 209static void ctch_fini(struct intel_guc *guc,
 210                      struct intel_guc_ct_channel *ctch)
 211{
 212        GEM_BUG_ON(ctch->enabled);
 213
 214        i915_vma_unpin_and_release(&ctch->vma, I915_VMA_RELEASE_MAP);
 215}
 216
 217static int ctch_enable(struct intel_guc *guc,
 218                       struct intel_guc_ct_channel *ctch)
 219{
 220        u32 base;
 221        int err;
 222        int i;
 223
 224        GEM_BUG_ON(!ctch->vma);
 225
 226        GEM_BUG_ON(ctch->enabled);
 227
 228        /* vma should be already allocated and map'ed */
 229        base = intel_guc_ggtt_offset(guc, ctch->vma);
 230
 231        /* (re)initialize descriptors
 232         * cmds buffers are in the second half of the blob page
 233         */
 234        for (i = 0; i < ARRAY_SIZE(ctch->ctbs); i++) {
 235                GEM_BUG_ON((i != CTB_SEND) && (i != CTB_RECV));
 236                guc_ct_buffer_desc_init(ctch->ctbs[i].desc,
 237                                        base + PAGE_SIZE/4 * i + PAGE_SIZE/2,
 238                                        PAGE_SIZE/4,
 239                                        ctch->owner);
 240        }
 241
 242        /* register buffers, starting wirh RECV buffer
 243         * descriptors are in first half of the blob
 244         */
 245        err = guc_action_register_ct_buffer(guc,
 246                                            base + PAGE_SIZE/4 * CTB_RECV,
 247                                            INTEL_GUC_CT_BUFFER_TYPE_RECV);
 248        if (unlikely(err))
 249                goto err_out;
 250
 251        err = guc_action_register_ct_buffer(guc,
 252                                            base + PAGE_SIZE/4 * CTB_SEND,
 253                                            INTEL_GUC_CT_BUFFER_TYPE_SEND);
 254        if (unlikely(err))
 255                goto err_deregister;
 256
 257        ctch->enabled = true;
 258
 259        return 0;
 260
 261err_deregister:
 262        guc_action_deregister_ct_buffer(guc,
 263                                        ctch->owner,
 264                                        INTEL_GUC_CT_BUFFER_TYPE_RECV);
 265err_out:
 266        DRM_ERROR("CT: can't open channel %d; err=%d\n", ctch->owner, err);
 267        return err;
 268}
 269
 270static void ctch_disable(struct intel_guc *guc,
 271                         struct intel_guc_ct_channel *ctch)
 272{
 273        GEM_BUG_ON(!ctch->enabled);
 274
 275        ctch->enabled = false;
 276
 277        guc_action_deregister_ct_buffer(guc,
 278                                        ctch->owner,
 279                                        INTEL_GUC_CT_BUFFER_TYPE_SEND);
 280        guc_action_deregister_ct_buffer(guc,
 281                                        ctch->owner,
 282                                        INTEL_GUC_CT_BUFFER_TYPE_RECV);
 283}
 284
 285static u32 ctch_get_next_fence(struct intel_guc_ct_channel *ctch)
 286{
 287        /* For now it's trivial */
 288        return ++ctch->next_fence;
 289}
 290
 291/**
 292 * DOC: CTB Host to GuC request
 293 *
 294 * Format of the CTB Host to GuC request message is as follows::
 295 *
 296 *      +------------+---------+---------+---------+---------+
 297 *      |   msg[0]   |   [1]   |   [2]   |   ...   |  [n-1]  |
 298 *      +------------+---------+---------+---------+---------+
 299 *      |   MESSAGE  |       MESSAGE PAYLOAD                 |
 300 *      +   HEADER   +---------+---------+---------+---------+
 301 *      |            |    0    |    1    |   ...   |    n    |
 302 *      +============+=========+=========+=========+=========+
 303 *      |  len >= 1  |  FENCE  |     request specific data   |
 304 *      +------+-----+---------+---------+---------+---------+
 305 *
 306 *                   ^-----------------len-------------------^
 307 */
 308
 309static int ctb_write(struct intel_guc_ct_buffer *ctb,
 310                     const u32 *action,
 311                     u32 len /* in dwords */,
 312                     u32 fence,
 313                     bool want_response)
 314{
 315        struct guc_ct_buffer_desc *desc = ctb->desc;
 316        u32 head = desc->head / 4;      /* in dwords */
 317        u32 tail = desc->tail / 4;      /* in dwords */
 318        u32 size = desc->size / 4;      /* in dwords */
 319        u32 used;                       /* in dwords */
 320        u32 header;
 321        u32 *cmds = ctb->cmds;
 322        unsigned int i;
 323
 324        GEM_BUG_ON(desc->size % 4);
 325        GEM_BUG_ON(desc->head % 4);
 326        GEM_BUG_ON(desc->tail % 4);
 327        GEM_BUG_ON(tail >= size);
 328
 329        /*
 330         * tail == head condition indicates empty. GuC FW does not support
 331         * using up the entire buffer to get tail == head meaning full.
 332         */
 333        if (tail < head)
 334                used = (size - head) + tail;
 335        else
 336                used = tail - head;
 337
 338        /* make sure there is a space including extra dw for the fence */
 339        if (unlikely(used + len + 1 >= size))
 340                return -ENOSPC;
 341
 342        /*
 343         * Write the message. The format is the following:
 344         * DW0: header (including action code)
 345         * DW1: fence
 346         * DW2+: action data
 347         */
 348        header = (len << GUC_CT_MSG_LEN_SHIFT) |
 349                 (GUC_CT_MSG_WRITE_FENCE_TO_DESC) |
 350                 (want_response ? GUC_CT_MSG_SEND_STATUS : 0) |
 351                 (action[0] << GUC_CT_MSG_ACTION_SHIFT);
 352
 353        CT_DEBUG_DRIVER("CT: writing %*ph %*ph %*ph\n",
 354                        4, &header, 4, &fence,
 355                        4 * (len - 1), &action[1]);
 356
 357        cmds[tail] = header;
 358        tail = (tail + 1) % size;
 359
 360        cmds[tail] = fence;
 361        tail = (tail + 1) % size;
 362
 363        for (i = 1; i < len; i++) {
 364                cmds[tail] = action[i];
 365                tail = (tail + 1) % size;
 366        }
 367
 368        /* now update desc tail (back in bytes) */
 369        desc->tail = tail * 4;
 370        GEM_BUG_ON(desc->tail > desc->size);
 371
 372        return 0;
 373}
 374
 375/**
 376 * wait_for_ctb_desc_update - Wait for the CT buffer descriptor update.
 377 * @desc:       buffer descriptor
 378 * @fence:      response fence
 379 * @status:     placeholder for status
 380 *
 381 * Guc will update CT buffer descriptor with new fence and status
 382 * after processing the command identified by the fence. Wait for
 383 * specified fence and then read from the descriptor status of the
 384 * command.
 385 *
 386 * Return:
 387 * *    0 response received (status is valid)
 388 * *    -ETIMEDOUT no response within hardcoded timeout
 389 * *    -EPROTO no response, CT buffer is in error
 390 */
 391static int wait_for_ctb_desc_update(struct guc_ct_buffer_desc *desc,
 392                                    u32 fence,
 393                                    u32 *status)
 394{
 395        int err;
 396
 397        /*
 398         * Fast commands should complete in less than 10us, so sample quickly
 399         * up to that length of time, then switch to a slower sleep-wait loop.
 400         * No GuC command should ever take longer than 10ms.
 401         */
 402#define done (READ_ONCE(desc->fence) == fence)
 403        err = wait_for_us(done, 10);
 404        if (err)
 405                err = wait_for(done, 10);
 406#undef done
 407
 408        if (unlikely(err)) {
 409                DRM_ERROR("CT: fence %u failed; reported fence=%u\n",
 410                          fence, desc->fence);
 411
 412                if (WARN_ON(desc->is_in_error)) {
 413                        /* Something went wrong with the messaging, try to reset
 414                         * the buffer and hope for the best
 415                         */
 416                        guc_ct_buffer_desc_reset(desc);
 417                        err = -EPROTO;
 418                }
 419        }
 420
 421        *status = desc->status;
 422        return err;
 423}
 424
 425/**
 426 * wait_for_ct_request_update - Wait for CT request state update.
 427 * @req:        pointer to pending request
 428 * @status:     placeholder for status
 429 *
 430 * For each sent request, Guc shall send bac CT response message.
 431 * Our message handler will update status of tracked request once
 432 * response message with given fence is received. Wait here and
 433 * check for valid response status value.
 434 *
 435 * Return:
 436 * *    0 response received (status is valid)
 437 * *    -ETIMEDOUT no response within hardcoded timeout
 438 */
 439static int wait_for_ct_request_update(struct ct_request *req, u32 *status)
 440{
 441        int err;
 442
 443        /*
 444         * Fast commands should complete in less than 10us, so sample quickly
 445         * up to that length of time, then switch to a slower sleep-wait loop.
 446         * No GuC command should ever take longer than 10ms.
 447         */
 448#define done INTEL_GUC_MSG_IS_RESPONSE(READ_ONCE(req->status))
 449        err = wait_for_us(done, 10);
 450        if (err)
 451                err = wait_for(done, 10);
 452#undef done
 453
 454        if (unlikely(err))
 455                DRM_ERROR("CT: fence %u err %d\n", req->fence, err);
 456
 457        *status = req->status;
 458        return err;
 459}
 460
 461static int ctch_send(struct intel_guc_ct *ct,
 462                     struct intel_guc_ct_channel *ctch,
 463                     const u32 *action,
 464                     u32 len,
 465                     u32 *response_buf,
 466                     u32 response_buf_size,
 467                     u32 *status)
 468{
 469        struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_SEND];
 470        struct guc_ct_buffer_desc *desc = ctb->desc;
 471        struct ct_request request;
 472        unsigned long flags;
 473        u32 fence;
 474        int err;
 475
 476        GEM_BUG_ON(!ctch->enabled);
 477        GEM_BUG_ON(!len);
 478        GEM_BUG_ON(len & ~GUC_CT_MSG_LEN_MASK);
 479        GEM_BUG_ON(!response_buf && response_buf_size);
 480
 481        fence = ctch_get_next_fence(ctch);
 482        request.fence = fence;
 483        request.status = 0;
 484        request.response_len = response_buf_size;
 485        request.response_buf = response_buf;
 486
 487        spin_lock_irqsave(&ct->lock, flags);
 488        list_add_tail(&request.link, &ct->pending_requests);
 489        spin_unlock_irqrestore(&ct->lock, flags);
 490
 491        err = ctb_write(ctb, action, len, fence, !!response_buf);
 492        if (unlikely(err))
 493                goto unlink;
 494
 495        intel_guc_notify(ct_to_guc(ct));
 496
 497        if (response_buf)
 498                err = wait_for_ct_request_update(&request, status);
 499        else
 500                err = wait_for_ctb_desc_update(desc, fence, status);
 501        if (unlikely(err))
 502                goto unlink;
 503
 504        if (!INTEL_GUC_MSG_IS_RESPONSE_SUCCESS(*status)) {
 505                err = -EIO;
 506                goto unlink;
 507        }
 508
 509        if (response_buf) {
 510                /* There shall be no data in the status */
 511                WARN_ON(INTEL_GUC_MSG_TO_DATA(request.status));
 512                /* Return actual response len */
 513                err = request.response_len;
 514        } else {
 515                /* There shall be no response payload */
 516                WARN_ON(request.response_len);
 517                /* Return data decoded from the status dword */
 518                err = INTEL_GUC_MSG_TO_DATA(*status);
 519        }
 520
 521unlink:
 522        spin_lock_irqsave(&ct->lock, flags);
 523        list_del(&request.link);
 524        spin_unlock_irqrestore(&ct->lock, flags);
 525
 526        return err;
 527}
 528
 529/*
 530 * Command Transport (CT) buffer based GuC send function.
 531 */
 532static int intel_guc_send_ct(struct intel_guc *guc, const u32 *action, u32 len,
 533                             u32 *response_buf, u32 response_buf_size)
 534{
 535        struct intel_guc_ct *ct = &guc->ct;
 536        struct intel_guc_ct_channel *ctch = &ct->host_channel;
 537        u32 status = ~0; /* undefined */
 538        int ret;
 539
 540        mutex_lock(&guc->send_mutex);
 541
 542        ret = ctch_send(ct, ctch, action, len, response_buf, response_buf_size,
 543                        &status);
 544        if (unlikely(ret < 0)) {
 545                DRM_ERROR("CT: send action %#X failed; err=%d status=%#X\n",
 546                          action[0], ret, status);
 547        } else if (unlikely(ret)) {
 548                CT_DEBUG_DRIVER("CT: send action %#x returned %d (%#x)\n",
 549                                action[0], ret, ret);
 550        }
 551
 552        mutex_unlock(&guc->send_mutex);
 553        return ret;
 554}
 555
 556static inline unsigned int ct_header_get_len(u32 header)
 557{
 558        return (header >> GUC_CT_MSG_LEN_SHIFT) & GUC_CT_MSG_LEN_MASK;
 559}
 560
 561static inline unsigned int ct_header_get_action(u32 header)
 562{
 563        return (header >> GUC_CT_MSG_ACTION_SHIFT) & GUC_CT_MSG_ACTION_MASK;
 564}
 565
 566static inline bool ct_header_is_response(u32 header)
 567{
 568        return !!(header & GUC_CT_MSG_IS_RESPONSE);
 569}
 570
 571static int ctb_read(struct intel_guc_ct_buffer *ctb, u32 *data)
 572{
 573        struct guc_ct_buffer_desc *desc = ctb->desc;
 574        u32 head = desc->head / 4;      /* in dwords */
 575        u32 tail = desc->tail / 4;      /* in dwords */
 576        u32 size = desc->size / 4;      /* in dwords */
 577        u32 *cmds = ctb->cmds;
 578        s32 available;                  /* in dwords */
 579        unsigned int len;
 580        unsigned int i;
 581
 582        GEM_BUG_ON(desc->size % 4);
 583        GEM_BUG_ON(desc->head % 4);
 584        GEM_BUG_ON(desc->tail % 4);
 585        GEM_BUG_ON(tail >= size);
 586        GEM_BUG_ON(head >= size);
 587
 588        /* tail == head condition indicates empty */
 589        available = tail - head;
 590        if (unlikely(available == 0))
 591                return -ENODATA;
 592
 593        /* beware of buffer wrap case */
 594        if (unlikely(available < 0))
 595                available += size;
 596        CT_DEBUG_DRIVER("CT: available %d (%u:%u)\n", available, head, tail);
 597        GEM_BUG_ON(available < 0);
 598
 599        data[0] = cmds[head];
 600        head = (head + 1) % size;
 601
 602        /* message len with header */
 603        len = ct_header_get_len(data[0]) + 1;
 604        if (unlikely(len > (u32)available)) {
 605                DRM_ERROR("CT: incomplete message %*ph %*ph %*ph\n",
 606                          4, data,
 607                          4 * (head + available - 1 > size ?
 608                               size - head : available - 1), &cmds[head],
 609                          4 * (head + available - 1 > size ?
 610                               available - 1 - size + head : 0), &cmds[0]);
 611                return -EPROTO;
 612        }
 613
 614        for (i = 1; i < len; i++) {
 615                data[i] = cmds[head];
 616                head = (head + 1) % size;
 617        }
 618        CT_DEBUG_DRIVER("CT: received %*ph\n", 4 * len, data);
 619
 620        desc->head = head * 4;
 621        return 0;
 622}
 623
 624/**
 625 * DOC: CTB GuC to Host response
 626 *
 627 * Format of the CTB GuC to Host response message is as follows::
 628 *
 629 *      +------------+---------+---------+---------+---------+---------+
 630 *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
 631 *      +------------+---------+---------+---------+---------+---------+
 632 *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
 633 *      +   HEADER   +---------+---------+---------+---------+---------+
 634 *      |            |    0    |    1    |    2    |   ...   |    n    |
 635 *      +============+=========+=========+=========+=========+=========+
 636 *      |  len >= 2  |  FENCE  |  STATUS |   response specific data    |
 637 *      +------+-----+---------+---------+---------+---------+---------+
 638 *
 639 *                   ^-----------------------len-----------------------^
 640 */
 641
 642static int ct_handle_response(struct intel_guc_ct *ct, const u32 *msg)
 643{
 644        u32 header = msg[0];
 645        u32 len = ct_header_get_len(header);
 646        u32 msglen = len + 1; /* total message length including header */
 647        u32 fence;
 648        u32 status;
 649        u32 datalen;
 650        struct ct_request *req;
 651        bool found = false;
 652
 653        GEM_BUG_ON(!ct_header_is_response(header));
 654        GEM_BUG_ON(!in_irq());
 655
 656        /* Response payload shall at least include fence and status */
 657        if (unlikely(len < 2)) {
 658                DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
 659                return -EPROTO;
 660        }
 661
 662        fence = msg[1];
 663        status = msg[2];
 664        datalen = len - 2;
 665
 666        /* Format of the status follows RESPONSE message */
 667        if (unlikely(!INTEL_GUC_MSG_IS_RESPONSE(status))) {
 668                DRM_ERROR("CT: corrupted response %*ph\n", 4 * msglen, msg);
 669                return -EPROTO;
 670        }
 671
 672        CT_DEBUG_DRIVER("CT: response fence %u status %#x\n", fence, status);
 673
 674        spin_lock(&ct->lock);
 675        list_for_each_entry(req, &ct->pending_requests, link) {
 676                if (unlikely(fence != req->fence)) {
 677                        CT_DEBUG_DRIVER("CT: request %u awaits response\n",
 678                                        req->fence);
 679                        continue;
 680                }
 681                if (unlikely(datalen > req->response_len)) {
 682                        DRM_ERROR("CT: response %u too long %*ph\n",
 683                                  req->fence, 4 * msglen, msg);
 684                        datalen = 0;
 685                }
 686                if (datalen)
 687                        memcpy(req->response_buf, msg + 3, 4 * datalen);
 688                req->response_len = datalen;
 689                WRITE_ONCE(req->status, status);
 690                found = true;
 691                break;
 692        }
 693        spin_unlock(&ct->lock);
 694
 695        if (!found)
 696                DRM_ERROR("CT: unsolicited response %*ph\n", 4 * msglen, msg);
 697        return 0;
 698}
 699
 700static void ct_process_request(struct intel_guc_ct *ct,
 701                               u32 action, u32 len, const u32 *payload)
 702{
 703        struct intel_guc *guc = ct_to_guc(ct);
 704        int ret;
 705
 706        CT_DEBUG_DRIVER("CT: request %x %*ph\n", action, 4 * len, payload);
 707
 708        switch (action) {
 709        case INTEL_GUC_ACTION_DEFAULT:
 710                ret = intel_guc_to_host_process_recv_msg(guc, payload, len);
 711                if (unlikely(ret))
 712                        goto fail_unexpected;
 713                break;
 714
 715        default:
 716fail_unexpected:
 717                DRM_ERROR("CT: unexpected request %x %*ph\n",
 718                          action, 4 * len, payload);
 719                break;
 720        }
 721}
 722
 723static bool ct_process_incoming_requests(struct intel_guc_ct *ct)
 724{
 725        unsigned long flags;
 726        struct ct_incoming_request *request;
 727        u32 header;
 728        u32 *payload;
 729        bool done;
 730
 731        spin_lock_irqsave(&ct->lock, flags);
 732        request = list_first_entry_or_null(&ct->incoming_requests,
 733                                           struct ct_incoming_request, link);
 734        if (request)
 735                list_del(&request->link);
 736        done = !!list_empty(&ct->incoming_requests);
 737        spin_unlock_irqrestore(&ct->lock, flags);
 738
 739        if (!request)
 740                return true;
 741
 742        header = request->msg[0];
 743        payload = &request->msg[1];
 744        ct_process_request(ct,
 745                           ct_header_get_action(header),
 746                           ct_header_get_len(header),
 747                           payload);
 748
 749        kfree(request);
 750        return done;
 751}
 752
 753static void ct_incoming_request_worker_func(struct work_struct *w)
 754{
 755        struct intel_guc_ct *ct = container_of(w, struct intel_guc_ct, worker);
 756        bool done;
 757
 758        done = ct_process_incoming_requests(ct);
 759        if (!done)
 760                queue_work(system_unbound_wq, &ct->worker);
 761}
 762
 763/**
 764 * DOC: CTB GuC to Host request
 765 *
 766 * Format of the CTB GuC to Host request message is as follows::
 767 *
 768 *      +------------+---------+---------+---------+---------+---------+
 769 *      |   msg[0]   |   [1]   |   [2]   |   [3]   |   ...   |  [n-1]  |
 770 *      +------------+---------+---------+---------+---------+---------+
 771 *      |   MESSAGE  |       MESSAGE PAYLOAD                           |
 772 *      +   HEADER   +---------+---------+---------+---------+---------+
 773 *      |            |    0    |    1    |    2    |   ...   |    n    |
 774 *      +============+=========+=========+=========+=========+=========+
 775 *      |     len    |            request specific data                |
 776 *      +------+-----+---------+---------+---------+---------+---------+
 777 *
 778 *                   ^-----------------------len-----------------------^
 779 */
 780
 781static int ct_handle_request(struct intel_guc_ct *ct, const u32 *msg)
 782{
 783        u32 header = msg[0];
 784        u32 len = ct_header_get_len(header);
 785        u32 msglen = len + 1; /* total message length including header */
 786        struct ct_incoming_request *request;
 787        unsigned long flags;
 788
 789        GEM_BUG_ON(ct_header_is_response(header));
 790
 791        request = kmalloc(sizeof(*request) + 4 * msglen, GFP_ATOMIC);
 792        if (unlikely(!request)) {
 793                DRM_ERROR("CT: dropping request %*ph\n", 4 * msglen, msg);
 794                return 0; /* XXX: -ENOMEM ? */
 795        }
 796        memcpy(request->msg, msg, 4 * msglen);
 797
 798        spin_lock_irqsave(&ct->lock, flags);
 799        list_add_tail(&request->link, &ct->incoming_requests);
 800        spin_unlock_irqrestore(&ct->lock, flags);
 801
 802        queue_work(system_unbound_wq, &ct->worker);
 803        return 0;
 804}
 805
 806static void ct_process_host_channel(struct intel_guc_ct *ct)
 807{
 808        struct intel_guc_ct_channel *ctch = &ct->host_channel;
 809        struct intel_guc_ct_buffer *ctb = &ctch->ctbs[CTB_RECV];
 810        u32 msg[GUC_CT_MSG_LEN_MASK + 1]; /* one extra dw for the header */
 811        int err = 0;
 812
 813        if (!ctch->enabled)
 814                return;
 815
 816        do {
 817                err = ctb_read(ctb, msg);
 818                if (err)
 819                        break;
 820
 821                if (ct_header_is_response(msg[0]))
 822                        err = ct_handle_response(ct, msg);
 823                else
 824                        err = ct_handle_request(ct, msg);
 825        } while (!err);
 826
 827        if (GEM_WARN_ON(err == -EPROTO)) {
 828                DRM_ERROR("CT: corrupted message detected!\n");
 829                ctb->desc->is_in_error = 1;
 830        }
 831}
 832
 833/*
 834 * When we're communicating with the GuC over CT, GuC uses events
 835 * to notify us about new messages being posted on the RECV buffer.
 836 */
 837static void intel_guc_to_host_event_handler_ct(struct intel_guc *guc)
 838{
 839        struct intel_guc_ct *ct = &guc->ct;
 840
 841        ct_process_host_channel(ct);
 842}
 843
 844/**
 845 * intel_guc_ct_init - Init CT communication
 846 * @ct: pointer to CT struct
 847 *
 848 * Allocate memory required for communication via
 849 * the CT channel.
 850 *
 851 * Return: 0 on success, a negative errno code on failure.
 852 */
 853int intel_guc_ct_init(struct intel_guc_ct *ct)
 854{
 855        struct intel_guc *guc = ct_to_guc(ct);
 856        struct intel_guc_ct_channel *ctch = &ct->host_channel;
 857        int err;
 858
 859        err = ctch_init(guc, ctch);
 860        if (unlikely(err)) {
 861                DRM_ERROR("CT: can't open channel %d; err=%d\n",
 862                          ctch->owner, err);
 863                return err;
 864        }
 865
 866        GEM_BUG_ON(!ctch->vma);
 867        return 0;
 868}
 869
 870/**
 871 * intel_guc_ct_fini - Fini CT communication
 872 * @ct: pointer to CT struct
 873 *
 874 * Deallocate memory required for communication via
 875 * the CT channel.
 876 */
 877void intel_guc_ct_fini(struct intel_guc_ct *ct)
 878{
 879        struct intel_guc *guc = ct_to_guc(ct);
 880        struct intel_guc_ct_channel *ctch = &ct->host_channel;
 881
 882        ctch_fini(guc, ctch);
 883}
 884
 885/**
 886 * intel_guc_ct_enable - Enable buffer based command transport.
 887 * @ct: pointer to CT struct
 888 *
 889 * Return: 0 on success, a negative errno code on failure.
 890 */
 891int intel_guc_ct_enable(struct intel_guc_ct *ct)
 892{
 893        struct intel_guc *guc = ct_to_guc(ct);
 894        struct intel_guc_ct_channel *ctch = &ct->host_channel;
 895        int err;
 896
 897        if (ctch->enabled)
 898                return 0;
 899
 900        err = ctch_enable(guc, ctch);
 901        if (unlikely(err))
 902                return err;
 903
 904        /* Switch into cmd transport buffer based send() */
 905        guc->send = intel_guc_send_ct;
 906        guc->handler = intel_guc_to_host_event_handler_ct;
 907        DRM_INFO("CT: %s\n", enableddisabled(true));
 908        return 0;
 909}
 910
 911/**
 912 * intel_guc_ct_disable - Disable buffer based command transport.
 913 * @ct: pointer to CT struct
 914 */
 915void intel_guc_ct_disable(struct intel_guc_ct *ct)
 916{
 917        struct intel_guc *guc = ct_to_guc(ct);
 918        struct intel_guc_ct_channel *ctch = &ct->host_channel;
 919
 920        if (!ctch->enabled)
 921                return;
 922
 923        ctch_disable(guc, ctch);
 924
 925        /* Disable send */
 926        guc->send = intel_guc_send_nop;
 927        guc->handler = intel_guc_to_host_event_handler_nop;
 928        DRM_INFO("CT: %s\n", enableddisabled(false));
 929}
 930