linux/drivers/gpu/drm/i915/intel_guc_submission.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2014 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 */
  24
  25#include <linux/circ_buf.h>
  26#include <trace/events/dma_fence.h>
  27
  28#include "intel_guc_submission.h"
  29#include "intel_lrc_reg.h"
  30#include "i915_drv.h"
  31
  32#define GUC_PREEMPT_FINISHED            0x1
  33#define GUC_PREEMPT_BREADCRUMB_DWORDS   0x8
  34#define GUC_PREEMPT_BREADCRUMB_BYTES    \
  35        (sizeof(u32) * GUC_PREEMPT_BREADCRUMB_DWORDS)
  36
  37/**
  38 * DOC: GuC-based command submission
  39 *
  40 * GuC client:
  41 * A intel_guc_client refers to a submission path through GuC. Currently, there
  42 * are two clients. One of them (the execbuf_client) is charged with all
  43 * submissions to the GuC, the other one (preempt_client) is responsible for
  44 * preempting the execbuf_client. This struct is the owner of a doorbell, a
  45 * process descriptor and a workqueue (all of them inside a single gem object
  46 * that contains all required pages for these elements).
  47 *
  48 * GuC stage descriptor:
  49 * During initialization, the driver allocates a static pool of 1024 such
  50 * descriptors, and shares them with the GuC.
  51 * Currently, there exists a 1:1 mapping between a intel_guc_client and a
  52 * guc_stage_desc (via the client's stage_id), so effectively only one
  53 * gets used. This stage descriptor lets the GuC know about the doorbell,
  54 * workqueue and process descriptor. Theoretically, it also lets the GuC
  55 * know about our HW contexts (context ID, etc...), but we actually
  56 * employ a kind of submission where the GuC uses the LRCA sent via the work
  57 * item instead (the single guc_stage_desc associated to execbuf client
  58 * contains information about the default kernel context only, but this is
  59 * essentially unused). This is called a "proxy" submission.
  60 *
  61 * The Scratch registers:
  62 * There are 16 MMIO-based registers start from 0xC180. The kernel driver writes
  63 * a value to the action register (SOFT_SCRATCH_0) along with any data. It then
  64 * triggers an interrupt on the GuC via another register write (0xC4C8).
  65 * Firmware writes a success/fail code back to the action register after
  66 * processes the request. The kernel driver polls waiting for this update and
  67 * then proceeds.
  68 * See intel_guc_send()
  69 *
  70 * Doorbells:
  71 * Doorbells are interrupts to uKernel. A doorbell is a single cache line (QW)
  72 * mapped into process space.
  73 *
  74 * Work Items:
  75 * There are several types of work items that the host may place into a
  76 * workqueue, each with its own requirements and limitations. Currently only
  77 * WQ_TYPE_INORDER is needed to support legacy submission via GuC, which
  78 * represents in-order queue. The kernel driver packs ring tail pointer and an
  79 * ELSP context descriptor dword into Work Item.
  80 * See guc_add_request()
  81 *
  82 */
  83
  84static inline struct i915_priolist *to_priolist(struct rb_node *rb)
  85{
  86        return rb_entry(rb, struct i915_priolist, node);
  87}
  88
  89static inline bool is_high_priority(struct intel_guc_client *client)
  90{
  91        return (client->priority == GUC_CLIENT_PRIORITY_KMD_HIGH ||
  92                client->priority == GUC_CLIENT_PRIORITY_HIGH);
  93}
  94
  95static int reserve_doorbell(struct intel_guc_client *client)
  96{
  97        unsigned long offset;
  98        unsigned long end;
  99        u16 id;
 100
 101        GEM_BUG_ON(client->doorbell_id != GUC_DOORBELL_INVALID);
 102
 103        /*
 104         * The bitmap tracks which doorbell registers are currently in use.
 105         * It is split into two halves; the first half is used for normal
 106         * priority contexts, the second half for high-priority ones.
 107         */
 108        offset = 0;
 109        end = GUC_NUM_DOORBELLS / 2;
 110        if (is_high_priority(client)) {
 111                offset = end;
 112                end += offset;
 113        }
 114
 115        id = find_next_zero_bit(client->guc->doorbell_bitmap, end, offset);
 116        if (id == end)
 117                return -ENOSPC;
 118
 119        __set_bit(id, client->guc->doorbell_bitmap);
 120        client->doorbell_id = id;
 121        DRM_DEBUG_DRIVER("client %u (high prio=%s) reserved doorbell: %d\n",
 122                         client->stage_id, yesno(is_high_priority(client)),
 123                         id);
 124        return 0;
 125}
 126
 127static void unreserve_doorbell(struct intel_guc_client *client)
 128{
 129        GEM_BUG_ON(client->doorbell_id == GUC_DOORBELL_INVALID);
 130
 131        __clear_bit(client->doorbell_id, client->guc->doorbell_bitmap);
 132        client->doorbell_id = GUC_DOORBELL_INVALID;
 133}
 134
 135/*
 136 * Tell the GuC to allocate or deallocate a specific doorbell
 137 */
 138
 139static int __guc_allocate_doorbell(struct intel_guc *guc, u32 stage_id)
 140{
 141        u32 action[] = {
 142                INTEL_GUC_ACTION_ALLOCATE_DOORBELL,
 143                stage_id
 144        };
 145
 146        return intel_guc_send(guc, action, ARRAY_SIZE(action));
 147}
 148
 149static int __guc_deallocate_doorbell(struct intel_guc *guc, u32 stage_id)
 150{
 151        u32 action[] = {
 152                INTEL_GUC_ACTION_DEALLOCATE_DOORBELL,
 153                stage_id
 154        };
 155
 156        return intel_guc_send(guc, action, ARRAY_SIZE(action));
 157}
 158
 159static struct guc_stage_desc *__get_stage_desc(struct intel_guc_client *client)
 160{
 161        struct guc_stage_desc *base = client->guc->stage_desc_pool_vaddr;
 162
 163        return &base[client->stage_id];
 164}
 165
 166/*
 167 * Initialise, update, or clear doorbell data shared with the GuC
 168 *
 169 * These functions modify shared data and so need access to the mapped
 170 * client object which contains the page being used for the doorbell
 171 */
 172
 173static void __update_doorbell_desc(struct intel_guc_client *client, u16 new_id)
 174{
 175        struct guc_stage_desc *desc;
 176
 177        /* Update the GuC's idea of the doorbell ID */
 178        desc = __get_stage_desc(client);
 179        desc->db_id = new_id;
 180}
 181
 182static struct guc_doorbell_info *__get_doorbell(struct intel_guc_client *client)
 183{
 184        return client->vaddr + client->doorbell_offset;
 185}
 186
 187static bool has_doorbell(struct intel_guc_client *client)
 188{
 189        if (client->doorbell_id == GUC_DOORBELL_INVALID)
 190                return false;
 191
 192        return test_bit(client->doorbell_id, client->guc->doorbell_bitmap);
 193}
 194
 195static void __create_doorbell(struct intel_guc_client *client)
 196{
 197        struct guc_doorbell_info *doorbell;
 198
 199        doorbell = __get_doorbell(client);
 200        doorbell->db_status = GUC_DOORBELL_ENABLED;
 201        doorbell->cookie = 0;
 202}
 203
 204static void __destroy_doorbell(struct intel_guc_client *client)
 205{
 206        struct drm_i915_private *dev_priv = guc_to_i915(client->guc);
 207        struct guc_doorbell_info *doorbell;
 208        u16 db_id = client->doorbell_id;
 209
 210
 211        doorbell = __get_doorbell(client);
 212        doorbell->db_status = GUC_DOORBELL_DISABLED;
 213        doorbell->cookie = 0;
 214
 215        /* Doorbell release flow requires that we wait for GEN8_DRB_VALID bit
 216         * to go to zero after updating db_status before we call the GuC to
 217         * release the doorbell
 218         */
 219        if (wait_for_us(!(I915_READ(GEN8_DRBREGL(db_id)) & GEN8_DRB_VALID), 10))
 220                WARN_ONCE(true, "Doorbell never became invalid after disable\n");
 221}
 222
 223static int create_doorbell(struct intel_guc_client *client)
 224{
 225        int ret;
 226
 227        __update_doorbell_desc(client, client->doorbell_id);
 228        __create_doorbell(client);
 229
 230        ret = __guc_allocate_doorbell(client->guc, client->stage_id);
 231        if (ret) {
 232                __destroy_doorbell(client);
 233                __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
 234                DRM_ERROR("Couldn't create client %u doorbell: %d\n",
 235                          client->stage_id, ret);
 236                return ret;
 237        }
 238
 239        return 0;
 240}
 241
 242static int destroy_doorbell(struct intel_guc_client *client)
 243{
 244        int ret;
 245
 246        GEM_BUG_ON(!has_doorbell(client));
 247
 248        __destroy_doorbell(client);
 249        ret = __guc_deallocate_doorbell(client->guc, client->stage_id);
 250        if (ret)
 251                DRM_ERROR("Couldn't destroy client %u doorbell: %d\n",
 252                          client->stage_id, ret);
 253
 254        __update_doorbell_desc(client, GUC_DOORBELL_INVALID);
 255
 256        return ret;
 257}
 258
 259static unsigned long __select_cacheline(struct intel_guc *guc)
 260{
 261        unsigned long offset;
 262
 263        /* Doorbell uses a single cache line within a page */
 264        offset = offset_in_page(guc->db_cacheline);
 265
 266        /* Moving to next cache line to reduce contention */
 267        guc->db_cacheline += cache_line_size();
 268
 269        DRM_DEBUG_DRIVER("reserved cacheline 0x%lx, next 0x%x, linesize %u\n",
 270                         offset, guc->db_cacheline, cache_line_size());
 271        return offset;
 272}
 273
 274static inline struct guc_process_desc *
 275__get_process_desc(struct intel_guc_client *client)
 276{
 277        return client->vaddr + client->proc_desc_offset;
 278}
 279
 280/*
 281 * Initialise the process descriptor shared with the GuC firmware.
 282 */
 283static void guc_proc_desc_init(struct intel_guc *guc,
 284                               struct intel_guc_client *client)
 285{
 286        struct guc_process_desc *desc;
 287
 288        desc = memset(__get_process_desc(client), 0, sizeof(*desc));
 289
 290        /*
 291         * XXX: pDoorbell and WQVBaseAddress are pointers in process address
 292         * space for ring3 clients (set them as in mmap_ioctl) or kernel
 293         * space for kernel clients (map on demand instead? May make debug
 294         * easier to have it mapped).
 295         */
 296        desc->wq_base_addr = 0;
 297        desc->db_base_addr = 0;
 298
 299        desc->stage_id = client->stage_id;
 300        desc->wq_size_bytes = GUC_WQ_SIZE;
 301        desc->wq_status = WQ_STATUS_ACTIVE;
 302        desc->priority = client->priority;
 303}
 304
 305static int guc_stage_desc_pool_create(struct intel_guc *guc)
 306{
 307        struct i915_vma *vma;
 308        void *vaddr;
 309
 310        vma = intel_guc_allocate_vma(guc,
 311                                     PAGE_ALIGN(sizeof(struct guc_stage_desc) *
 312                                     GUC_MAX_STAGE_DESCRIPTORS));
 313        if (IS_ERR(vma))
 314                return PTR_ERR(vma);
 315
 316        vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
 317        if (IS_ERR(vaddr)) {
 318                i915_vma_unpin_and_release(&vma);
 319                return PTR_ERR(vaddr);
 320        }
 321
 322        guc->stage_desc_pool = vma;
 323        guc->stage_desc_pool_vaddr = vaddr;
 324        ida_init(&guc->stage_ids);
 325
 326        return 0;
 327}
 328
 329static void guc_stage_desc_pool_destroy(struct intel_guc *guc)
 330{
 331        ida_destroy(&guc->stage_ids);
 332        i915_gem_object_unpin_map(guc->stage_desc_pool->obj);
 333        i915_vma_unpin_and_release(&guc->stage_desc_pool);
 334}
 335
 336/*
 337 * Initialise/clear the stage descriptor shared with the GuC firmware.
 338 *
 339 * This descriptor tells the GuC where (in GGTT space) to find the important
 340 * data structures relating to this client (doorbell, process descriptor,
 341 * write queue, etc).
 342 */
 343static void guc_stage_desc_init(struct intel_guc *guc,
 344                                struct intel_guc_client *client)
 345{
 346        struct drm_i915_private *dev_priv = guc_to_i915(guc);
 347        struct intel_engine_cs *engine;
 348        struct i915_gem_context *ctx = client->owner;
 349        struct guc_stage_desc *desc;
 350        unsigned int tmp;
 351        u32 gfx_addr;
 352
 353        desc = __get_stage_desc(client);
 354        memset(desc, 0, sizeof(*desc));
 355
 356        desc->attribute = GUC_STAGE_DESC_ATTR_ACTIVE |
 357                          GUC_STAGE_DESC_ATTR_KERNEL;
 358        if (is_high_priority(client))
 359                desc->attribute |= GUC_STAGE_DESC_ATTR_PREEMPT;
 360        desc->stage_id = client->stage_id;
 361        desc->priority = client->priority;
 362        desc->db_id = client->doorbell_id;
 363
 364        for_each_engine_masked(engine, dev_priv, client->engines, tmp) {
 365                struct intel_context *ce = &ctx->engine[engine->id];
 366                u32 guc_engine_id = engine->guc_id;
 367                struct guc_execlist_context *lrc = &desc->lrc[guc_engine_id];
 368
 369                /* TODO: We have a design issue to be solved here. Only when we
 370                 * receive the first batch, we know which engine is used by the
 371                 * user. But here GuC expects the lrc and ring to be pinned. It
 372                 * is not an issue for default context, which is the only one
 373                 * for now who owns a GuC client. But for future owner of GuC
 374                 * client, need to make sure lrc is pinned prior to enter here.
 375                 */
 376                if (!ce->state)
 377                        break;  /* XXX: continue? */
 378
 379                /*
 380                 * XXX: When this is a GUC_STAGE_DESC_ATTR_KERNEL client (proxy
 381                 * submission or, in other words, not using a direct submission
 382                 * model) the KMD's LRCA is not used for any work submission.
 383                 * Instead, the GuC uses the LRCA of the user mode context (see
 384                 * guc_add_request below).
 385                 */
 386                lrc->context_desc = lower_32_bits(ce->lrc_desc);
 387
 388                /* The state page is after PPHWSP */
 389                lrc->ring_lrca =
 390                        guc_ggtt_offset(ce->state) + LRC_STATE_PN * PAGE_SIZE;
 391
 392                /* XXX: In direct submission, the GuC wants the HW context id
 393                 * here. In proxy submission, it wants the stage id
 394                 */
 395                lrc->context_id = (client->stage_id << GUC_ELC_CTXID_OFFSET) |
 396                                (guc_engine_id << GUC_ELC_ENGINE_OFFSET);
 397
 398                lrc->ring_begin = guc_ggtt_offset(ce->ring->vma);
 399                lrc->ring_end = lrc->ring_begin + ce->ring->size - 1;
 400                lrc->ring_next_free_location = lrc->ring_begin;
 401                lrc->ring_current_tail_pointer_value = 0;
 402
 403                desc->engines_used |= (1 << guc_engine_id);
 404        }
 405
 406        DRM_DEBUG_DRIVER("Host engines 0x%x => GuC engines used 0x%x\n",
 407                         client->engines, desc->engines_used);
 408        WARN_ON(desc->engines_used == 0);
 409
 410        /*
 411         * The doorbell, process descriptor, and workqueue are all parts
 412         * of the client object, which the GuC will reference via the GGTT
 413         */
 414        gfx_addr = guc_ggtt_offset(client->vma);
 415        desc->db_trigger_phy = sg_dma_address(client->vma->pages->sgl) +
 416                                client->doorbell_offset;
 417        desc->db_trigger_cpu = ptr_to_u64(__get_doorbell(client));
 418        desc->db_trigger_uk = gfx_addr + client->doorbell_offset;
 419        desc->process_desc = gfx_addr + client->proc_desc_offset;
 420        desc->wq_addr = gfx_addr + GUC_DB_SIZE;
 421        desc->wq_size = GUC_WQ_SIZE;
 422
 423        desc->desc_private = ptr_to_u64(client);
 424}
 425
 426static void guc_stage_desc_fini(struct intel_guc *guc,
 427                                struct intel_guc_client *client)
 428{
 429        struct guc_stage_desc *desc;
 430
 431        desc = __get_stage_desc(client);
 432        memset(desc, 0, sizeof(*desc));
 433}
 434
 435/* Construct a Work Item and append it to the GuC's Work Queue */
 436static void guc_wq_item_append(struct intel_guc_client *client,
 437                               u32 target_engine, u32 context_desc,
 438                               u32 ring_tail, u32 fence_id)
 439{
 440        /* wqi_len is in DWords, and does not include the one-word header */
 441        const size_t wqi_size = sizeof(struct guc_wq_item);
 442        const u32 wqi_len = wqi_size / sizeof(u32) - 1;
 443        struct guc_process_desc *desc = __get_process_desc(client);
 444        struct guc_wq_item *wqi;
 445        u32 wq_off;
 446
 447        lockdep_assert_held(&client->wq_lock);
 448
 449        /* For now workqueue item is 4 DWs; workqueue buffer is 2 pages. So we
 450         * should not have the case where structure wqi is across page, neither
 451         * wrapped to the beginning. This simplifies the implementation below.
 452         *
 453         * XXX: if not the case, we need save data to a temp wqi and copy it to
 454         * workqueue buffer dw by dw.
 455         */
 456        BUILD_BUG_ON(wqi_size != 16);
 457
 458        /* Free space is guaranteed. */
 459        wq_off = READ_ONCE(desc->tail);
 460        GEM_BUG_ON(CIRC_SPACE(wq_off, READ_ONCE(desc->head),
 461                              GUC_WQ_SIZE) < wqi_size);
 462        GEM_BUG_ON(wq_off & (wqi_size - 1));
 463
 464        /* WQ starts from the page after doorbell / process_desc */
 465        wqi = client->vaddr + wq_off + GUC_DB_SIZE;
 466
 467        /* Now fill in the 4-word work queue item */
 468        wqi->header = WQ_TYPE_INORDER |
 469                      (wqi_len << WQ_LEN_SHIFT) |
 470                      (target_engine << WQ_TARGET_SHIFT) |
 471                      WQ_NO_WCFLUSH_WAIT;
 472        wqi->context_desc = context_desc;
 473        wqi->submit_element_info = ring_tail << WQ_RING_TAIL_SHIFT;
 474        GEM_BUG_ON(ring_tail > WQ_RING_TAIL_MAX);
 475        wqi->fence_id = fence_id;
 476
 477        /* Make the update visible to GuC */
 478        WRITE_ONCE(desc->tail, (wq_off + wqi_size) & (GUC_WQ_SIZE - 1));
 479}
 480
 481static void guc_reset_wq(struct intel_guc_client *client)
 482{
 483        struct guc_process_desc *desc = __get_process_desc(client);
 484
 485        desc->head = 0;
 486        desc->tail = 0;
 487}
 488
 489static void guc_ring_doorbell(struct intel_guc_client *client)
 490{
 491        struct guc_doorbell_info *db;
 492        u32 cookie;
 493
 494        lockdep_assert_held(&client->wq_lock);
 495
 496        /* pointer of current doorbell cacheline */
 497        db = __get_doorbell(client);
 498
 499        /*
 500         * We're not expecting the doorbell cookie to change behind our back,
 501         * we also need to treat 0 as a reserved value.
 502         */
 503        cookie = READ_ONCE(db->cookie);
 504        WARN_ON_ONCE(xchg(&db->cookie, cookie + 1 ?: cookie + 2) != cookie);
 505
 506        /* XXX: doorbell was lost and need to acquire it again */
 507        GEM_BUG_ON(db->db_status != GUC_DOORBELL_ENABLED);
 508}
 509
 510static void guc_add_request(struct intel_guc *guc, struct i915_request *rq)
 511{
 512        struct intel_guc_client *client = guc->execbuf_client;
 513        struct intel_engine_cs *engine = rq->engine;
 514        u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(rq->ctx,
 515                                                                 engine));
 516        u32 ring_tail = intel_ring_set_tail(rq->ring, rq->tail) / sizeof(u64);
 517
 518        spin_lock(&client->wq_lock);
 519
 520        guc_wq_item_append(client, engine->guc_id, ctx_desc,
 521                           ring_tail, rq->global_seqno);
 522        guc_ring_doorbell(client);
 523
 524        client->submissions[engine->id] += 1;
 525
 526        spin_unlock(&client->wq_lock);
 527}
 528
 529/*
 530 * When we're doing submissions using regular execlists backend, writing to
 531 * ELSP from CPU side is enough to make sure that writes to ringbuffer pages
 532 * pinned in mappable aperture portion of GGTT are visible to command streamer.
 533 * Writes done by GuC on our behalf are not guaranteeing such ordering,
 534 * therefore, to ensure the flush, we're issuing a POSTING READ.
 535 */
 536static void flush_ggtt_writes(struct i915_vma *vma)
 537{
 538        struct drm_i915_private *dev_priv = to_i915(vma->obj->base.dev);
 539
 540        if (i915_vma_is_map_and_fenceable(vma))
 541                POSTING_READ_FW(GUC_STATUS);
 542}
 543
 544static void inject_preempt_context(struct work_struct *work)
 545{
 546        struct guc_preempt_work *preempt_work =
 547                container_of(work, typeof(*preempt_work), work);
 548        struct intel_engine_cs *engine = preempt_work->engine;
 549        struct intel_guc *guc = container_of(preempt_work, typeof(*guc),
 550                                             preempt_work[engine->id]);
 551        struct intel_guc_client *client = guc->preempt_client;
 552        struct guc_stage_desc *stage_desc = __get_stage_desc(client);
 553        u32 ctx_desc = lower_32_bits(intel_lr_context_descriptor(client->owner,
 554                                                                 engine));
 555        u32 data[7];
 556
 557        /*
 558         * The ring contains commands to write GUC_PREEMPT_FINISHED into HWSP.
 559         * See guc_fill_preempt_context().
 560         */
 561        spin_lock_irq(&client->wq_lock);
 562        guc_wq_item_append(client, engine->guc_id, ctx_desc,
 563                           GUC_PREEMPT_BREADCRUMB_BYTES / sizeof(u64), 0);
 564        spin_unlock_irq(&client->wq_lock);
 565
 566        /*
 567         * If GuC firmware performs an engine reset while that engine had
 568         * a preemption pending, it will set the terminated attribute bit
 569         * on our preemption stage descriptor. GuC firmware retains all
 570         * pending work items for a high-priority GuC client, unlike the
 571         * normal-priority GuC client where work items are dropped. It
 572         * wants to make sure the preempt-to-idle work doesn't run when
 573         * scheduling resumes, and uses this bit to inform its scheduler
 574         * and presumably us as well. Our job is to clear it for the next
 575         * preemption after reset, otherwise that and future preemptions
 576         * will never complete. We'll just clear it every time.
 577         */
 578        stage_desc->attribute &= ~GUC_STAGE_DESC_ATTR_TERMINATED;
 579
 580        data[0] = INTEL_GUC_ACTION_REQUEST_PREEMPTION;
 581        data[1] = client->stage_id;
 582        data[2] = INTEL_GUC_PREEMPT_OPTION_DROP_WORK_Q |
 583                  INTEL_GUC_PREEMPT_OPTION_DROP_SUBMIT_Q;
 584        data[3] = engine->guc_id;
 585        data[4] = guc->execbuf_client->priority;
 586        data[5] = guc->execbuf_client->stage_id;
 587        data[6] = guc_ggtt_offset(guc->shared_data);
 588
 589        if (WARN_ON(intel_guc_send(guc, data, ARRAY_SIZE(data)))) {
 590                execlists_clear_active(&engine->execlists,
 591                                       EXECLISTS_ACTIVE_PREEMPT);
 592                tasklet_schedule(&engine->execlists.tasklet);
 593        }
 594}
 595
 596/*
 597 * We're using user interrupt and HWSP value to mark that preemption has
 598 * finished and GPU is idle. Normally, we could unwind and continue similar to
 599 * execlists submission path. Unfortunately, with GuC we also need to wait for
 600 * it to finish its own postprocessing, before attempting to submit. Otherwise
 601 * GuC may silently ignore our submissions, and thus we risk losing request at
 602 * best, executing out-of-order and causing kernel panic at worst.
 603 */
 604#define GUC_PREEMPT_POSTPROCESS_DELAY_MS 10
 605static void wait_for_guc_preempt_report(struct intel_engine_cs *engine)
 606{
 607        struct intel_guc *guc = &engine->i915->guc;
 608        struct guc_shared_ctx_data *data = guc->shared_data_vaddr;
 609        struct guc_ctx_report *report =
 610                &data->preempt_ctx_report[engine->guc_id];
 611
 612        WARN_ON(wait_for_atomic(report->report_return_status ==
 613                                INTEL_GUC_REPORT_STATUS_COMPLETE,
 614                                GUC_PREEMPT_POSTPROCESS_DELAY_MS));
 615        /*
 616         * GuC is expecting that we're also going to clear the affected context
 617         * counter, let's also reset the return status to not depend on GuC
 618         * resetting it after recieving another preempt action
 619         */
 620        report->affected_count = 0;
 621        report->report_return_status = INTEL_GUC_REPORT_STATUS_UNKNOWN;
 622}
 623
 624/**
 625 * guc_submit() - Submit commands through GuC
 626 * @engine: engine associated with the commands
 627 *
 628 * The only error here arises if the doorbell hardware isn't functioning
 629 * as expected, which really shouln't happen.
 630 */
 631static void guc_submit(struct intel_engine_cs *engine)
 632{
 633        struct intel_guc *guc = &engine->i915->guc;
 634        struct intel_engine_execlists * const execlists = &engine->execlists;
 635        struct execlist_port *port = execlists->port;
 636        unsigned int n;
 637
 638        for (n = 0; n < execlists_num_ports(execlists); n++) {
 639                struct i915_request *rq;
 640                unsigned int count;
 641
 642                rq = port_unpack(&port[n], &count);
 643                if (rq && count == 0) {
 644                        port_set(&port[n], port_pack(rq, ++count));
 645
 646                        flush_ggtt_writes(rq->ring->vma);
 647
 648                        guc_add_request(guc, rq);
 649                }
 650        }
 651}
 652
 653static void port_assign(struct execlist_port *port, struct i915_request *rq)
 654{
 655        GEM_BUG_ON(port_isset(port));
 656
 657        port_set(port, i915_request_get(rq));
 658}
 659
 660static void guc_dequeue(struct intel_engine_cs *engine)
 661{
 662        struct intel_engine_execlists * const execlists = &engine->execlists;
 663        struct execlist_port *port = execlists->port;
 664        struct i915_request *last = NULL;
 665        const struct execlist_port * const last_port =
 666                &execlists->port[execlists->port_mask];
 667        bool submit = false;
 668        struct rb_node *rb;
 669
 670        spin_lock_irq(&engine->timeline->lock);
 671        rb = execlists->first;
 672        GEM_BUG_ON(rb_first(&execlists->queue) != rb);
 673
 674        if (port_isset(port)) {
 675                if (engine->i915->preempt_context) {
 676                        struct guc_preempt_work *preempt_work =
 677                                &engine->i915->guc.preempt_work[engine->id];
 678
 679                        if (execlists->queue_priority >
 680                            max(port_request(port)->priotree.priority, 0)) {
 681                                execlists_set_active(execlists,
 682                                                     EXECLISTS_ACTIVE_PREEMPT);
 683                                queue_work(engine->i915->guc.preempt_wq,
 684                                           &preempt_work->work);
 685                                goto unlock;
 686                        }
 687                }
 688
 689                port++;
 690                if (port_isset(port))
 691                        goto unlock;
 692        }
 693        GEM_BUG_ON(port_isset(port));
 694
 695        while (rb) {
 696                struct i915_priolist *p = to_priolist(rb);
 697                struct i915_request *rq, *rn;
 698
 699                list_for_each_entry_safe(rq, rn, &p->requests, priotree.link) {
 700                        if (last && rq->ctx != last->ctx) {
 701                                if (port == last_port) {
 702                                        __list_del_many(&p->requests,
 703                                                        &rq->priotree.link);
 704                                        goto done;
 705                                }
 706
 707                                if (submit)
 708                                        port_assign(port, last);
 709                                port++;
 710                        }
 711
 712                        INIT_LIST_HEAD(&rq->priotree.link);
 713
 714                        __i915_request_submit(rq);
 715                        trace_i915_request_in(rq, port_index(port, execlists));
 716                        last = rq;
 717                        submit = true;
 718                }
 719
 720                rb = rb_next(rb);
 721                rb_erase(&p->node, &execlists->queue);
 722                INIT_LIST_HEAD(&p->requests);
 723                if (p->priority != I915_PRIORITY_NORMAL)
 724                        kmem_cache_free(engine->i915->priorities, p);
 725        }
 726done:
 727        execlists->queue_priority = rb ? to_priolist(rb)->priority : INT_MIN;
 728        execlists->first = rb;
 729        if (submit) {
 730                port_assign(port, last);
 731                execlists_set_active(execlists, EXECLISTS_ACTIVE_USER);
 732                guc_submit(engine);
 733        }
 734
 735        /* We must always keep the beast fed if we have work piled up */
 736        GEM_BUG_ON(port_isset(execlists->port) &&
 737                   !execlists_is_active(execlists, EXECLISTS_ACTIVE_USER));
 738        GEM_BUG_ON(execlists->first && !port_isset(execlists->port));
 739
 740unlock:
 741        spin_unlock_irq(&engine->timeline->lock);
 742}
 743
 744static void guc_submission_tasklet(unsigned long data)
 745{
 746        struct intel_engine_cs * const engine = (struct intel_engine_cs *)data;
 747        struct intel_engine_execlists * const execlists = &engine->execlists;
 748        struct execlist_port *port = execlists->port;
 749        struct i915_request *rq;
 750
 751        rq = port_request(&port[0]);
 752        while (rq && i915_request_completed(rq)) {
 753                trace_i915_request_out(rq);
 754                i915_request_put(rq);
 755
 756                execlists_port_complete(execlists, port);
 757
 758                rq = port_request(&port[0]);
 759        }
 760        if (!rq)
 761                execlists_clear_active(execlists, EXECLISTS_ACTIVE_USER);
 762
 763        if (execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT) &&
 764            intel_read_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX) ==
 765            GUC_PREEMPT_FINISHED) {
 766                execlists_cancel_port_requests(&engine->execlists);
 767                execlists_unwind_incomplete_requests(execlists);
 768
 769                wait_for_guc_preempt_report(engine);
 770
 771                execlists_clear_active(execlists, EXECLISTS_ACTIVE_PREEMPT);
 772                intel_write_status_page(engine, I915_GEM_HWS_PREEMPT_INDEX, 0);
 773        }
 774
 775        if (!execlists_is_active(execlists, EXECLISTS_ACTIVE_PREEMPT))
 776                guc_dequeue(engine);
 777}
 778
 779/*
 780 * Everything below here is concerned with setup & teardown, and is
 781 * therefore not part of the somewhat time-critical batch-submission
 782 * path of guc_submit() above.
 783 */
 784
 785/* Check that a doorbell register is in the expected state */
 786static bool doorbell_ok(struct intel_guc *guc, u16 db_id)
 787{
 788        struct drm_i915_private *dev_priv = guc_to_i915(guc);
 789        u32 drbregl;
 790        bool valid;
 791
 792        GEM_BUG_ON(db_id >= GUC_DOORBELL_INVALID);
 793
 794        drbregl = I915_READ(GEN8_DRBREGL(db_id));
 795        valid = drbregl & GEN8_DRB_VALID;
 796
 797        if (test_bit(db_id, guc->doorbell_bitmap) == valid)
 798                return true;
 799
 800        DRM_DEBUG_DRIVER("Doorbell %d has unexpected state (0x%x): valid=%s\n",
 801                         db_id, drbregl, yesno(valid));
 802
 803        return false;
 804}
 805
 806static bool guc_verify_doorbells(struct intel_guc *guc)
 807{
 808        u16 db_id;
 809
 810        for (db_id = 0; db_id < GUC_NUM_DOORBELLS; ++db_id)
 811                if (!doorbell_ok(guc, db_id))
 812                        return false;
 813
 814        return true;
 815}
 816
 817static int guc_clients_doorbell_init(struct intel_guc *guc)
 818{
 819        int ret;
 820
 821        ret = create_doorbell(guc->execbuf_client);
 822        if (ret)
 823                return ret;
 824
 825        if (guc->preempt_client) {
 826                ret = create_doorbell(guc->preempt_client);
 827                if (ret) {
 828                        destroy_doorbell(guc->execbuf_client);
 829                        return ret;
 830                }
 831        }
 832
 833        return 0;
 834}
 835
 836static void guc_clients_doorbell_fini(struct intel_guc *guc)
 837{
 838        /*
 839         * By the time we're here, GuC has already been reset.
 840         * Instead of trying (in vain) to communicate with it, let's just
 841         * cleanup the doorbell HW and our internal state.
 842         */
 843        if (guc->preempt_client) {
 844                __destroy_doorbell(guc->preempt_client);
 845                __update_doorbell_desc(guc->preempt_client,
 846                                       GUC_DOORBELL_INVALID);
 847        }
 848        __destroy_doorbell(guc->execbuf_client);
 849        __update_doorbell_desc(guc->execbuf_client, GUC_DOORBELL_INVALID);
 850}
 851
 852/**
 853 * guc_client_alloc() - Allocate an intel_guc_client
 854 * @dev_priv:   driver private data structure
 855 * @engines:    The set of engines to enable for this client
 856 * @priority:   four levels priority _CRITICAL, _HIGH, _NORMAL and _LOW
 857 *              The kernel client to replace ExecList submission is created with
 858 *              NORMAL priority. Priority of a client for scheduler can be HIGH,
 859 *              while a preemption context can use CRITICAL.
 860 * @ctx:        the context that owns the client (we use the default render
 861 *              context)
 862 *
 863 * Return:      An intel_guc_client object if success, else NULL.
 864 */
 865static struct intel_guc_client *
 866guc_client_alloc(struct drm_i915_private *dev_priv,
 867                 u32 engines,
 868                 u32 priority,
 869                 struct i915_gem_context *ctx)
 870{
 871        struct intel_guc_client *client;
 872        struct intel_guc *guc = &dev_priv->guc;
 873        struct i915_vma *vma;
 874        void *vaddr;
 875        int ret;
 876
 877        client = kzalloc(sizeof(*client), GFP_KERNEL);
 878        if (!client)
 879                return ERR_PTR(-ENOMEM);
 880
 881        client->guc = guc;
 882        client->owner = ctx;
 883        client->engines = engines;
 884        client->priority = priority;
 885        client->doorbell_id = GUC_DOORBELL_INVALID;
 886        spin_lock_init(&client->wq_lock);
 887
 888        ret = ida_simple_get(&guc->stage_ids, 0, GUC_MAX_STAGE_DESCRIPTORS,
 889                             GFP_KERNEL);
 890        if (ret < 0)
 891                goto err_client;
 892
 893        client->stage_id = ret;
 894
 895        /* The first page is doorbell/proc_desc. Two followed pages are wq. */
 896        vma = intel_guc_allocate_vma(guc, GUC_DB_SIZE + GUC_WQ_SIZE);
 897        if (IS_ERR(vma)) {
 898                ret = PTR_ERR(vma);
 899                goto err_id;
 900        }
 901
 902        /* We'll keep just the first (doorbell/proc) page permanently kmap'd. */
 903        client->vma = vma;
 904
 905        vaddr = i915_gem_object_pin_map(vma->obj, I915_MAP_WB);
 906        if (IS_ERR(vaddr)) {
 907                ret = PTR_ERR(vaddr);
 908                goto err_vma;
 909        }
 910        client->vaddr = vaddr;
 911
 912        client->doorbell_offset = __select_cacheline(guc);
 913
 914        /*
 915         * Since the doorbell only requires a single cacheline, we can save
 916         * space by putting the application process descriptor in the same
 917         * page. Use the half of the page that doesn't include the doorbell.
 918         */
 919        if (client->doorbell_offset >= (GUC_DB_SIZE / 2))
 920                client->proc_desc_offset = 0;
 921        else
 922                client->proc_desc_offset = (GUC_DB_SIZE / 2);
 923
 924        guc_proc_desc_init(guc, client);
 925        guc_stage_desc_init(guc, client);
 926
 927        ret = reserve_doorbell(client);
 928        if (ret)
 929                goto err_vaddr;
 930
 931        DRM_DEBUG_DRIVER("new priority %u client %p for engine(s) 0x%x: stage_id %u\n",
 932                         priority, client, client->engines, client->stage_id);
 933        DRM_DEBUG_DRIVER("doorbell id %u, cacheline offset 0x%lx\n",
 934                         client->doorbell_id, client->doorbell_offset);
 935
 936        return client;
 937
 938err_vaddr:
 939        i915_gem_object_unpin_map(client->vma->obj);
 940err_vma:
 941        i915_vma_unpin_and_release(&client->vma);
 942err_id:
 943        ida_simple_remove(&guc->stage_ids, client->stage_id);
 944err_client:
 945        kfree(client);
 946        return ERR_PTR(ret);
 947}
 948
 949static void guc_client_free(struct intel_guc_client *client)
 950{
 951        unreserve_doorbell(client);
 952        guc_stage_desc_fini(client->guc, client);
 953        i915_gem_object_unpin_map(client->vma->obj);
 954        i915_vma_unpin_and_release(&client->vma);
 955        ida_simple_remove(&client->guc->stage_ids, client->stage_id);
 956        kfree(client);
 957}
 958
 959static inline bool ctx_save_restore_disabled(struct intel_context *ce)
 960{
 961        u32 sr = ce->lrc_reg_state[CTX_CONTEXT_CONTROL + 1];
 962
 963#define SR_DISABLED \
 964        _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT | \
 965                           CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT)
 966
 967        return (sr & SR_DISABLED) == SR_DISABLED;
 968
 969#undef SR_DISABLED
 970}
 971
 972static void guc_fill_preempt_context(struct intel_guc *guc)
 973{
 974        struct drm_i915_private *dev_priv = guc_to_i915(guc);
 975        struct intel_guc_client *client = guc->preempt_client;
 976        struct intel_engine_cs *engine;
 977        enum intel_engine_id id;
 978
 979        for_each_engine(engine, dev_priv, id) {
 980                struct intel_context *ce = &client->owner->engine[id];
 981                u32 addr = intel_hws_preempt_done_address(engine);
 982                u32 *cs;
 983
 984                GEM_BUG_ON(!ce->pin_count);
 985
 986                /*
 987                 * We rely on this context image *not* being saved after
 988                 * preemption. This ensures that the RING_HEAD / RING_TAIL
 989                 * remain pointing at initial values forever.
 990                 */
 991                GEM_BUG_ON(!ctx_save_restore_disabled(ce));
 992
 993                cs = ce->ring->vaddr;
 994                if (id == RCS) {
 995                        cs = gen8_emit_ggtt_write_rcs(cs,
 996                                                      GUC_PREEMPT_FINISHED,
 997                                                      addr);
 998                } else {
 999                        cs = gen8_emit_ggtt_write(cs,
1000                                                  GUC_PREEMPT_FINISHED,
1001                                                  addr);
1002                        *cs++ = MI_NOOP;
1003                        *cs++ = MI_NOOP;
1004                }
1005                *cs++ = MI_USER_INTERRUPT;
1006                *cs++ = MI_NOOP;
1007
1008                GEM_BUG_ON((void *)cs - ce->ring->vaddr !=
1009                           GUC_PREEMPT_BREADCRUMB_BYTES);
1010
1011                flush_ggtt_writes(ce->ring->vma);
1012        }
1013}
1014
1015static int guc_clients_create(struct intel_guc *guc)
1016{
1017        struct drm_i915_private *dev_priv = guc_to_i915(guc);
1018        struct intel_guc_client *client;
1019
1020        GEM_BUG_ON(guc->execbuf_client);
1021        GEM_BUG_ON(guc->preempt_client);
1022
1023        client = guc_client_alloc(dev_priv,
1024                                  INTEL_INFO(dev_priv)->ring_mask,
1025                                  GUC_CLIENT_PRIORITY_KMD_NORMAL,
1026                                  dev_priv->kernel_context);
1027        if (IS_ERR(client)) {
1028                DRM_ERROR("Failed to create GuC client for submission!\n");
1029                return PTR_ERR(client);
1030        }
1031        guc->execbuf_client = client;
1032
1033        if (dev_priv->preempt_context) {
1034                client = guc_client_alloc(dev_priv,
1035                                          INTEL_INFO(dev_priv)->ring_mask,
1036                                          GUC_CLIENT_PRIORITY_KMD_HIGH,
1037                                          dev_priv->preempt_context);
1038                if (IS_ERR(client)) {
1039                        DRM_ERROR("Failed to create GuC client for preemption!\n");
1040                        guc_client_free(guc->execbuf_client);
1041                        guc->execbuf_client = NULL;
1042                        return PTR_ERR(client);
1043                }
1044                guc->preempt_client = client;
1045
1046                guc_fill_preempt_context(guc);
1047        }
1048
1049        return 0;
1050}
1051
1052static void guc_clients_destroy(struct intel_guc *guc)
1053{
1054        struct intel_guc_client *client;
1055
1056        client = fetch_and_zero(&guc->preempt_client);
1057        if (client)
1058                guc_client_free(client);
1059
1060        client = fetch_and_zero(&guc->execbuf_client);
1061        guc_client_free(client);
1062}
1063
1064/*
1065 * Set up the memory resources to be shared with the GuC (via the GGTT)
1066 * at firmware loading time.
1067 */
1068int intel_guc_submission_init(struct intel_guc *guc)
1069{
1070        struct drm_i915_private *dev_priv = guc_to_i915(guc);
1071        struct intel_engine_cs *engine;
1072        enum intel_engine_id id;
1073        int ret;
1074
1075        if (guc->stage_desc_pool)
1076                return 0;
1077
1078        ret = guc_stage_desc_pool_create(guc);
1079        if (ret)
1080                return ret;
1081        /*
1082         * Keep static analysers happy, let them know that we allocated the
1083         * vma after testing that it didn't exist earlier.
1084         */
1085        GEM_BUG_ON(!guc->stage_desc_pool);
1086
1087        WARN_ON(!guc_verify_doorbells(guc));
1088        ret = guc_clients_create(guc);
1089        if (ret)
1090                return ret;
1091
1092        for_each_engine(engine, dev_priv, id) {
1093                guc->preempt_work[id].engine = engine;
1094                INIT_WORK(&guc->preempt_work[id].work, inject_preempt_context);
1095        }
1096
1097        return 0;
1098
1099}
1100
1101void intel_guc_submission_fini(struct intel_guc *guc)
1102{
1103        struct drm_i915_private *dev_priv = guc_to_i915(guc);
1104        struct intel_engine_cs *engine;
1105        enum intel_engine_id id;
1106
1107        for_each_engine(engine, dev_priv, id)
1108                cancel_work_sync(&guc->preempt_work[id].work);
1109
1110        guc_clients_destroy(guc);
1111        WARN_ON(!guc_verify_doorbells(guc));
1112
1113        guc_stage_desc_pool_destroy(guc);
1114}
1115
1116static void guc_interrupts_capture(struct drm_i915_private *dev_priv)
1117{
1118        struct intel_rps *rps = &dev_priv->gt_pm.rps;
1119        struct intel_engine_cs *engine;
1120        enum intel_engine_id id;
1121        int irqs;
1122
1123        /* tell all command streamers to forward interrupts (but not vblank)
1124         * to GuC
1125         */
1126        irqs = _MASKED_BIT_ENABLE(GFX_INTERRUPT_STEERING);
1127        for_each_engine(engine, dev_priv, id)
1128                I915_WRITE(RING_MODE_GEN7(engine), irqs);
1129
1130        /* route USER_INTERRUPT to Host, all others are sent to GuC. */
1131        irqs = GT_RENDER_USER_INTERRUPT << GEN8_RCS_IRQ_SHIFT |
1132               GT_RENDER_USER_INTERRUPT << GEN8_BCS_IRQ_SHIFT;
1133        /* These three registers have the same bit definitions */
1134        I915_WRITE(GUC_BCS_RCS_IER, ~irqs);
1135        I915_WRITE(GUC_VCS2_VCS1_IER, ~irqs);
1136        I915_WRITE(GUC_WD_VECS_IER, ~irqs);
1137
1138        /*
1139         * The REDIRECT_TO_GUC bit of the PMINTRMSK register directs all
1140         * (unmasked) PM interrupts to the GuC. All other bits of this
1141         * register *disable* generation of a specific interrupt.
1142         *
1143         * 'pm_intrmsk_mbz' indicates bits that are NOT to be set when
1144         * writing to the PM interrupt mask register, i.e. interrupts
1145         * that must not be disabled.
1146         *
1147         * If the GuC is handling these interrupts, then we must not let
1148         * the PM code disable ANY interrupt that the GuC is expecting.
1149         * So for each ENABLED (0) bit in this register, we must SET the
1150         * bit in pm_intrmsk_mbz so that it's left enabled for the GuC.
1151         * GuC needs ARAT expired interrupt unmasked hence it is set in
1152         * pm_intrmsk_mbz.
1153         *
1154         * Here we CLEAR REDIRECT_TO_GUC bit in pm_intrmsk_mbz, which will
1155         * result in the register bit being left SET!
1156         */
1157        rps->pm_intrmsk_mbz |= ARAT_EXPIRED_INTRMSK;
1158        rps->pm_intrmsk_mbz &= ~GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1159}
1160
1161static void guc_interrupts_release(struct drm_i915_private *dev_priv)
1162{
1163        struct intel_rps *rps = &dev_priv->gt_pm.rps;
1164        struct intel_engine_cs *engine;
1165        enum intel_engine_id id;
1166        int irqs;
1167
1168        /*
1169         * tell all command streamers NOT to forward interrupts or vblank
1170         * to GuC.
1171         */
1172        irqs = _MASKED_FIELD(GFX_FORWARD_VBLANK_MASK, GFX_FORWARD_VBLANK_NEVER);
1173        irqs |= _MASKED_BIT_DISABLE(GFX_INTERRUPT_STEERING);
1174        for_each_engine(engine, dev_priv, id)
1175                I915_WRITE(RING_MODE_GEN7(engine), irqs);
1176
1177        /* route all GT interrupts to the host */
1178        I915_WRITE(GUC_BCS_RCS_IER, 0);
1179        I915_WRITE(GUC_VCS2_VCS1_IER, 0);
1180        I915_WRITE(GUC_WD_VECS_IER, 0);
1181
1182        rps->pm_intrmsk_mbz |= GEN8_PMINTR_DISABLE_REDIRECT_TO_GUC;
1183        rps->pm_intrmsk_mbz &= ~ARAT_EXPIRED_INTRMSK;
1184}
1185
1186static void guc_submission_park(struct intel_engine_cs *engine)
1187{
1188        intel_engine_unpin_breadcrumbs_irq(engine);
1189}
1190
1191static void guc_submission_unpark(struct intel_engine_cs *engine)
1192{
1193        intel_engine_pin_breadcrumbs_irq(engine);
1194}
1195
1196int intel_guc_submission_enable(struct intel_guc *guc)
1197{
1198        struct drm_i915_private *dev_priv = guc_to_i915(guc);
1199        struct intel_engine_cs *engine;
1200        enum intel_engine_id id;
1201        int err;
1202
1203        /*
1204         * We're using GuC work items for submitting work through GuC. Since
1205         * we're coalescing multiple requests from a single context into a
1206         * single work item prior to assigning it to execlist_port, we can
1207         * never have more work items than the total number of ports (for all
1208         * engines). The GuC firmware is controlling the HEAD of work queue,
1209         * and it is guaranteed that it will remove the work item from the
1210         * queue before our request is completed.
1211         */
1212        BUILD_BUG_ON(ARRAY_SIZE(engine->execlists.port) *
1213                     sizeof(struct guc_wq_item) *
1214                     I915_NUM_ENGINES > GUC_WQ_SIZE);
1215
1216        GEM_BUG_ON(!guc->execbuf_client);
1217
1218        guc_reset_wq(guc->execbuf_client);
1219        if (guc->preempt_client)
1220                guc_reset_wq(guc->preempt_client);
1221
1222        err = intel_guc_sample_forcewake(guc);
1223        if (err)
1224                return err;
1225
1226        err = guc_clients_doorbell_init(guc);
1227        if (err)
1228                return err;
1229
1230        /* Take over from manual control of ELSP (execlists) */
1231        guc_interrupts_capture(dev_priv);
1232
1233        for_each_engine(engine, dev_priv, id) {
1234                struct intel_engine_execlists * const execlists =
1235                        &engine->execlists;
1236
1237                execlists->tasklet.func = guc_submission_tasklet;
1238                engine->park = guc_submission_park;
1239                engine->unpark = guc_submission_unpark;
1240
1241                engine->flags &= ~I915_ENGINE_SUPPORTS_STATS;
1242        }
1243
1244        return 0;
1245}
1246
1247void intel_guc_submission_disable(struct intel_guc *guc)
1248{
1249        struct drm_i915_private *dev_priv = guc_to_i915(guc);
1250
1251        GEM_BUG_ON(dev_priv->gt.awake); /* GT should be parked first */
1252
1253        guc_interrupts_release(dev_priv);
1254        guc_clients_doorbell_fini(guc);
1255
1256        /* Revert back to manual ELSP submission */
1257        intel_engines_reset_default_submission(dev_priv);
1258}
1259
1260#if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
1261#include "selftests/intel_guc.c"
1262#endif
1263