linux/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 *
  23 */
  24#include <linux/kthread.h>
  25#include <linux/wait.h>
  26#include <linux/sched.h>
  27#include <drm/drmP.h>
  28#include "gpu_scheduler.h"
  29
  30#define CREATE_TRACE_POINTS
  31#include "gpu_sched_trace.h"
  32
  33static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity);
  34static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
  35
  36struct kmem_cache *sched_fence_slab;
  37atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
  38
  39/* Initialize a given run queue struct */
  40static void amd_sched_rq_init(struct amd_sched_rq *rq)
  41{
  42        spin_lock_init(&rq->lock);
  43        INIT_LIST_HEAD(&rq->entities);
  44        rq->current_entity = NULL;
  45}
  46
  47static void amd_sched_rq_add_entity(struct amd_sched_rq *rq,
  48                                    struct amd_sched_entity *entity)
  49{
  50        if (!list_empty(&entity->list))
  51                return;
  52        spin_lock(&rq->lock);
  53        list_add_tail(&entity->list, &rq->entities);
  54        spin_unlock(&rq->lock);
  55}
  56
  57static void amd_sched_rq_remove_entity(struct amd_sched_rq *rq,
  58                                       struct amd_sched_entity *entity)
  59{
  60        if (list_empty(&entity->list))
  61                return;
  62        spin_lock(&rq->lock);
  63        list_del_init(&entity->list);
  64        if (rq->current_entity == entity)
  65                rq->current_entity = NULL;
  66        spin_unlock(&rq->lock);
  67}
  68
  69/**
  70 * Select an entity which could provide a job to run
  71 *
  72 * @rq          The run queue to check.
  73 *
  74 * Try to find a ready entity, returns NULL if none found.
  75 */
  76static struct amd_sched_entity *
  77amd_sched_rq_select_entity(struct amd_sched_rq *rq)
  78{
  79        struct amd_sched_entity *entity;
  80
  81        spin_lock(&rq->lock);
  82
  83        entity = rq->current_entity;
  84        if (entity) {
  85                list_for_each_entry_continue(entity, &rq->entities, list) {
  86                        if (amd_sched_entity_is_ready(entity)) {
  87                                rq->current_entity = entity;
  88                                spin_unlock(&rq->lock);
  89                                return entity;
  90                        }
  91                }
  92        }
  93
  94        list_for_each_entry(entity, &rq->entities, list) {
  95
  96                if (amd_sched_entity_is_ready(entity)) {
  97                        rq->current_entity = entity;
  98                        spin_unlock(&rq->lock);
  99                        return entity;
 100                }
 101
 102                if (entity == rq->current_entity)
 103                        break;
 104        }
 105
 106        spin_unlock(&rq->lock);
 107
 108        return NULL;
 109}
 110
 111/**
 112 * Init a context entity used by scheduler when submit to HW ring.
 113 *
 114 * @sched       The pointer to the scheduler
 115 * @entity      The pointer to a valid amd_sched_entity
 116 * @rq          The run queue this entity belongs
 117 * @kernel      If this is an entity for the kernel
 118 * @jobs        The max number of jobs in the job queue
 119 *
 120 * return 0 if succeed. negative error code on failure
 121*/
 122int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
 123                          struct amd_sched_entity *entity,
 124                          struct amd_sched_rq *rq,
 125                          uint32_t jobs)
 126{
 127        int r;
 128
 129        if (!(sched && entity && rq))
 130                return -EINVAL;
 131
 132        memset(entity, 0, sizeof(struct amd_sched_entity));
 133        INIT_LIST_HEAD(&entity->list);
 134        entity->rq = rq;
 135        entity->sched = sched;
 136
 137        spin_lock_init(&entity->queue_lock);
 138        r = kfifo_alloc(&entity->job_queue, jobs * sizeof(void *), GFP_KERNEL);
 139        if (r)
 140                return r;
 141
 142        atomic_set(&entity->fence_seq, 0);
 143        entity->fence_context = fence_context_alloc(1);
 144
 145        return 0;
 146}
 147
 148/**
 149 * Query if entity is initialized
 150 *
 151 * @sched       Pointer to scheduler instance
 152 * @entity      The pointer to a valid scheduler entity
 153 *
 154 * return true if entity is initialized, false otherwise
 155*/
 156static bool amd_sched_entity_is_initialized(struct amd_gpu_scheduler *sched,
 157                                            struct amd_sched_entity *entity)
 158{
 159        return entity->sched == sched &&
 160                entity->rq != NULL;
 161}
 162
 163/**
 164 * Check if entity is idle
 165 *
 166 * @entity      The pointer to a valid scheduler entity
 167 *
 168 * Return true if entity don't has any unscheduled jobs.
 169 */
 170static bool amd_sched_entity_is_idle(struct amd_sched_entity *entity)
 171{
 172        rmb();
 173        if (kfifo_is_empty(&entity->job_queue))
 174                return true;
 175
 176        return false;
 177}
 178
 179/**
 180 * Check if entity is ready
 181 *
 182 * @entity      The pointer to a valid scheduler entity
 183 *
 184 * Return true if entity could provide a job.
 185 */
 186static bool amd_sched_entity_is_ready(struct amd_sched_entity *entity)
 187{
 188        if (kfifo_is_empty(&entity->job_queue))
 189                return false;
 190
 191        if (ACCESS_ONCE(entity->dependency))
 192                return false;
 193
 194        return true;
 195}
 196
 197/**
 198 * Destroy a context entity
 199 *
 200 * @sched       Pointer to scheduler instance
 201 * @entity      The pointer to a valid scheduler entity
 202 *
 203 * Cleanup and free the allocated resources.
 204 */
 205void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
 206                           struct amd_sched_entity *entity)
 207{
 208        struct amd_sched_rq *rq = entity->rq;
 209
 210        if (!amd_sched_entity_is_initialized(sched, entity))
 211                return;
 212
 213        /**
 214         * The client will not queue more IBs during this fini, consume existing
 215         * queued IBs
 216        */
 217        wait_event(sched->job_scheduled, amd_sched_entity_is_idle(entity));
 218
 219        amd_sched_rq_remove_entity(rq, entity);
 220        kfifo_free(&entity->job_queue);
 221}
 222
 223static void amd_sched_entity_wakeup(struct fence *f, struct fence_cb *cb)
 224{
 225        struct amd_sched_entity *entity =
 226                container_of(cb, struct amd_sched_entity, cb);
 227        entity->dependency = NULL;
 228        fence_put(f);
 229        amd_sched_wakeup(entity->sched);
 230}
 231
 232static bool amd_sched_entity_add_dependency_cb(struct amd_sched_entity *entity)
 233{
 234        struct amd_gpu_scheduler *sched = entity->sched;
 235        struct fence * fence = entity->dependency;
 236        struct amd_sched_fence *s_fence;
 237
 238        if (fence->context == entity->fence_context) {
 239                /* We can ignore fences from ourself */
 240                fence_put(entity->dependency);
 241                return false;
 242        }
 243
 244        s_fence = to_amd_sched_fence(fence);
 245        if (s_fence && s_fence->sched == sched) {
 246                /* Fence is from the same scheduler */
 247                if (test_bit(AMD_SCHED_FENCE_SCHEDULED_BIT, &fence->flags)) {
 248                        /* Ignore it when it is already scheduled */
 249                        fence_put(entity->dependency);
 250                        return false;
 251                }
 252
 253                /* Wait for fence to be scheduled */
 254                entity->cb.func = amd_sched_entity_wakeup;
 255                list_add_tail(&entity->cb.node, &s_fence->scheduled_cb);
 256                return true;
 257        }
 258
 259        if (!fence_add_callback(entity->dependency, &entity->cb,
 260                                amd_sched_entity_wakeup))
 261                return true;
 262
 263        fence_put(entity->dependency);
 264        return false;
 265}
 266
 267static struct amd_sched_job *
 268amd_sched_entity_pop_job(struct amd_sched_entity *entity)
 269{
 270        struct amd_gpu_scheduler *sched = entity->sched;
 271        struct amd_sched_job *sched_job;
 272
 273        if (!kfifo_out_peek(&entity->job_queue, &sched_job, sizeof(sched_job)))
 274                return NULL;
 275
 276        while ((entity->dependency = sched->ops->dependency(sched_job)))
 277                if (amd_sched_entity_add_dependency_cb(entity))
 278                        return NULL;
 279
 280        return sched_job;
 281}
 282
 283/**
 284 * Helper to submit a job to the job queue
 285 *
 286 * @sched_job           The pointer to job required to submit
 287 *
 288 * Returns true if we could submit the job.
 289 */
 290static bool amd_sched_entity_in(struct amd_sched_job *sched_job)
 291{
 292        struct amd_gpu_scheduler *sched = sched_job->sched;
 293        struct amd_sched_entity *entity = sched_job->s_entity;
 294        bool added, first = false;
 295
 296        spin_lock(&entity->queue_lock);
 297        added = kfifo_in(&entity->job_queue, &sched_job,
 298                        sizeof(sched_job)) == sizeof(sched_job);
 299
 300        if (added && kfifo_len(&entity->job_queue) == sizeof(sched_job))
 301                first = true;
 302
 303        spin_unlock(&entity->queue_lock);
 304
 305        /* first job wakes up scheduler */
 306        if (first) {
 307                /* Add the entity to the run queue */
 308                amd_sched_rq_add_entity(entity->rq, entity);
 309                amd_sched_wakeup(sched);
 310        }
 311        return added;
 312}
 313
 314/**
 315 * Submit a job to the job queue
 316 *
 317 * @sched_job           The pointer to job required to submit
 318 *
 319 * Returns 0 for success, negative error code otherwise.
 320 */
 321void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
 322{
 323        struct amd_sched_entity *entity = sched_job->s_entity;
 324
 325        trace_amd_sched_job(sched_job);
 326        wait_event(entity->sched->job_scheduled,
 327                   amd_sched_entity_in(sched_job));
 328}
 329
 330/**
 331 * Return ture if we can push more jobs to the hw.
 332 */
 333static bool amd_sched_ready(struct amd_gpu_scheduler *sched)
 334{
 335        return atomic_read(&sched->hw_rq_count) <
 336                sched->hw_submission_limit;
 337}
 338
 339/**
 340 * Wake up the scheduler when it is ready
 341 */
 342static void amd_sched_wakeup(struct amd_gpu_scheduler *sched)
 343{
 344        if (amd_sched_ready(sched))
 345                wake_up_interruptible(&sched->wake_up_worker);
 346}
 347
 348/**
 349 * Select next entity to process
 350*/
 351static struct amd_sched_entity *
 352amd_sched_select_entity(struct amd_gpu_scheduler *sched)
 353{
 354        struct amd_sched_entity *entity;
 355        int i;
 356
 357        if (!amd_sched_ready(sched))
 358                return NULL;
 359
 360        /* Kernel run queue has higher priority than normal run queue*/
 361        for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++) {
 362                entity = amd_sched_rq_select_entity(&sched->sched_rq[i]);
 363                if (entity)
 364                        break;
 365        }
 366
 367        return entity;
 368}
 369
 370static void amd_sched_process_job(struct fence *f, struct fence_cb *cb)
 371{
 372        struct amd_sched_fence *s_fence =
 373                container_of(cb, struct amd_sched_fence, cb);
 374        struct amd_gpu_scheduler *sched = s_fence->sched;
 375        unsigned long flags;
 376
 377        atomic_dec(&sched->hw_rq_count);
 378        amd_sched_fence_signal(s_fence);
 379        if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
 380                cancel_delayed_work(&s_fence->dwork);
 381                spin_lock_irqsave(&sched->fence_list_lock, flags);
 382                list_del_init(&s_fence->list);
 383                spin_unlock_irqrestore(&sched->fence_list_lock, flags);
 384        }
 385        trace_amd_sched_process_job(s_fence);
 386        fence_put(&s_fence->base);
 387        wake_up_interruptible(&sched->wake_up_worker);
 388}
 389
 390static void amd_sched_fence_work_func(struct work_struct *work)
 391{
 392        struct amd_sched_fence *s_fence =
 393                container_of(work, struct amd_sched_fence, dwork.work);
 394        struct amd_gpu_scheduler *sched = s_fence->sched;
 395        struct amd_sched_fence *entity, *tmp;
 396        unsigned long flags;
 397
 398        DRM_ERROR("[%s] scheduler is timeout!\n", sched->name);
 399
 400        /* Clean all pending fences */
 401        spin_lock_irqsave(&sched->fence_list_lock, flags);
 402        list_for_each_entry_safe(entity, tmp, &sched->fence_list, list) {
 403                DRM_ERROR("  fence no %d\n", entity->base.seqno);
 404                cancel_delayed_work(&entity->dwork);
 405                list_del_init(&entity->list);
 406                fence_put(&entity->base);
 407        }
 408        spin_unlock_irqrestore(&sched->fence_list_lock, flags);
 409}
 410
 411static int amd_sched_main(void *param)
 412{
 413        struct sched_param sparam = {.sched_priority = 1};
 414        struct amd_gpu_scheduler *sched = (struct amd_gpu_scheduler *)param;
 415        int r, count;
 416
 417        spin_lock_init(&sched->fence_list_lock);
 418        INIT_LIST_HEAD(&sched->fence_list);
 419        sched_setscheduler(current, SCHED_FIFO, &sparam);
 420
 421        while (!kthread_should_stop()) {
 422                struct amd_sched_entity *entity;
 423                struct amd_sched_fence *s_fence;
 424                struct amd_sched_job *sched_job;
 425                struct fence *fence;
 426                unsigned long flags;
 427
 428                wait_event_interruptible(sched->wake_up_worker,
 429                        (entity = amd_sched_select_entity(sched)) ||
 430                        kthread_should_stop());
 431
 432                if (!entity)
 433                        continue;
 434
 435                sched_job = amd_sched_entity_pop_job(entity);
 436                if (!sched_job)
 437                        continue;
 438
 439                s_fence = sched_job->s_fence;
 440
 441                if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
 442                        INIT_DELAYED_WORK(&s_fence->dwork, amd_sched_fence_work_func);
 443                        schedule_delayed_work(&s_fence->dwork, sched->timeout);
 444                        spin_lock_irqsave(&sched->fence_list_lock, flags);
 445                        list_add_tail(&s_fence->list, &sched->fence_list);
 446                        spin_unlock_irqrestore(&sched->fence_list_lock, flags);
 447                }
 448
 449                atomic_inc(&sched->hw_rq_count);
 450                fence = sched->ops->run_job(sched_job);
 451                amd_sched_fence_scheduled(s_fence);
 452                if (fence) {
 453                        r = fence_add_callback(fence, &s_fence->cb,
 454                                               amd_sched_process_job);
 455                        if (r == -ENOENT)
 456                                amd_sched_process_job(fence, &s_fence->cb);
 457                        else if (r)
 458                                DRM_ERROR("fence add callback failed (%d)\n", r);
 459                        fence_put(fence);
 460                } else {
 461                        DRM_ERROR("Failed to run job!\n");
 462                        amd_sched_process_job(NULL, &s_fence->cb);
 463                }
 464
 465                count = kfifo_out(&entity->job_queue, &sched_job,
 466                                sizeof(sched_job));
 467                WARN_ON(count != sizeof(sched_job));
 468                wake_up(&sched->job_scheduled);
 469        }
 470        return 0;
 471}
 472
 473/**
 474 * Init a gpu scheduler instance
 475 *
 476 * @sched               The pointer to the scheduler
 477 * @ops                 The backend operations for this scheduler.
 478 * @hw_submissions      Number of hw submissions to do.
 479 * @name                Name used for debugging
 480 *
 481 * Return 0 on success, otherwise error code.
 482*/
 483int amd_sched_init(struct amd_gpu_scheduler *sched,
 484                   struct amd_sched_backend_ops *ops,
 485                   unsigned hw_submission, long timeout, const char *name)
 486{
 487        int i;
 488        sched->ops = ops;
 489        sched->hw_submission_limit = hw_submission;
 490        sched->name = name;
 491        sched->timeout = timeout;
 492        for (i = 0; i < AMD_SCHED_MAX_PRIORITY; i++)
 493                amd_sched_rq_init(&sched->sched_rq[i]);
 494
 495        init_waitqueue_head(&sched->wake_up_worker);
 496        init_waitqueue_head(&sched->job_scheduled);
 497        atomic_set(&sched->hw_rq_count, 0);
 498        if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
 499                sched_fence_slab = kmem_cache_create(
 500                        "amd_sched_fence", sizeof(struct amd_sched_fence), 0,
 501                        SLAB_HWCACHE_ALIGN, NULL);
 502                if (!sched_fence_slab)
 503                        return -ENOMEM;
 504        }
 505
 506        /* Each scheduler will run on a seperate kernel thread */
 507        sched->thread = kthread_run(amd_sched_main, sched, sched->name);
 508        if (IS_ERR(sched->thread)) {
 509                DRM_ERROR("Failed to create scheduler for %s.\n", name);
 510                return PTR_ERR(sched->thread);
 511        }
 512
 513        return 0;
 514}
 515
 516/**
 517 * Destroy a gpu scheduler
 518 *
 519 * @sched       The pointer to the scheduler
 520 */
 521void amd_sched_fini(struct amd_gpu_scheduler *sched)
 522{
 523        if (sched->thread)
 524                kthread_stop(sched->thread);
 525        if (atomic_dec_and_test(&sched_fence_slab_ref))
 526                kmem_cache_destroy(sched_fence_slab);
 527}
 528