linux/include/drm/gpu_scheduler.h
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#ifndef _DRM_GPU_SCHEDULER_H_
  25#define _DRM_GPU_SCHEDULER_H_
  26
  27#include <drm/spsc_queue.h>
  28#include <linux/dma-fence.h>
  29#include <linux/completion.h>
  30
  31#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
  32
  33struct drm_gpu_scheduler;
  34struct drm_sched_rq;
  35
  36/* These are often used as an (initial) index
  37 * to an array, and as such should start at 0.
  38 */
  39enum drm_sched_priority {
  40        DRM_SCHED_PRIORITY_MIN,
  41        DRM_SCHED_PRIORITY_NORMAL,
  42        DRM_SCHED_PRIORITY_HIGH,
  43        DRM_SCHED_PRIORITY_KERNEL,
  44
  45        DRM_SCHED_PRIORITY_COUNT,
  46        DRM_SCHED_PRIORITY_UNSET = -2
  47};
  48
  49/**
  50 * struct drm_sched_entity - A wrapper around a job queue (typically
  51 * attached to the DRM file_priv).
  52 *
  53 * @list: used to append this struct to the list of entities in the
  54 *        runqueue.
  55 * @rq: runqueue on which this entity is currently scheduled.
  56 * @sched_list: A list of schedulers (drm_gpu_schedulers).
  57 *              Jobs from this entity can be scheduled on any scheduler
  58 *              on this list.
  59 * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
  60 * @priority: priority of the entity
  61 * @rq_lock: lock to modify the runqueue to which this entity belongs.
  62 * @job_queue: the list of jobs of this entity.
  63 * @fence_seq: a linearly increasing seqno incremented with each
  64 *             new &drm_sched_fence which is part of the entity.
  65 * @fence_context: a unique context for all the fences which belong
  66 *                 to this entity.
  67 *                 The &drm_sched_fence.scheduled uses the
  68 *                 fence_context but &drm_sched_fence.finished uses
  69 *                 fence_context + 1.
  70 * @dependency: the dependency fence of the job which is on the top
  71 *              of the job queue.
  72 * @cb: callback for the dependency fence above.
  73 * @guilty: points to ctx's guilty.
  74 * @fini_status: contains the exit status in case the process was signalled.
  75 * @last_scheduled: points to the finished fence of the last scheduled job.
  76 * @last_user: last group leader pushing a job into the entity.
  77 * @stopped: Marks the enity as removed from rq and destined for termination.
  78 * @entity_idle: Signals when enityt is not in use
  79 *
  80 * Entities will emit jobs in order to their corresponding hardware
  81 * ring, and the scheduler will alternate between entities based on
  82 * scheduling policy.
  83 */
  84struct drm_sched_entity {
  85        struct list_head                list;
  86        struct drm_sched_rq             *rq;
  87        struct drm_gpu_scheduler        **sched_list;
  88        unsigned int                    num_sched_list;
  89        enum drm_sched_priority         priority;
  90        spinlock_t                      rq_lock;
  91
  92        struct spsc_queue               job_queue;
  93
  94        atomic_t                        fence_seq;
  95        uint64_t                        fence_context;
  96
  97        struct dma_fence                *dependency;
  98        struct dma_fence_cb             cb;
  99        atomic_t                        *guilty;
 100        struct dma_fence                *last_scheduled;
 101        struct task_struct              *last_user;
 102        bool                            stopped;
 103        struct completion               entity_idle;
 104};
 105
 106/**
 107 * struct drm_sched_rq - queue of entities to be scheduled.
 108 *
 109 * @lock: to modify the entities list.
 110 * @sched: the scheduler to which this rq belongs to.
 111 * @entities: list of the entities to be scheduled.
 112 * @current_entity: the entity which is to be scheduled.
 113 *
 114 * Run queue is a set of entities scheduling command submissions for
 115 * one specific ring. It implements the scheduling policy that selects
 116 * the next entity to emit commands from.
 117 */
 118struct drm_sched_rq {
 119        spinlock_t                      lock;
 120        struct drm_gpu_scheduler        *sched;
 121        struct list_head                entities;
 122        struct drm_sched_entity         *current_entity;
 123};
 124
 125/**
 126 * struct drm_sched_fence - fences corresponding to the scheduling of a job.
 127 */
 128struct drm_sched_fence {
 129        /**
 130         * @scheduled: this fence is what will be signaled by the scheduler
 131         * when the job is scheduled.
 132         */
 133        struct dma_fence                scheduled;
 134
 135        /**
 136         * @finished: this fence is what will be signaled by the scheduler
 137         * when the job is completed.
 138         *
 139         * When setting up an out fence for the job, you should use
 140         * this, since it's available immediately upon
 141         * drm_sched_job_init(), and the fence returned by the driver
 142         * from run_job() won't be created until the dependencies have
 143         * resolved.
 144         */
 145        struct dma_fence                finished;
 146
 147        /**
 148         * @parent: the fence returned by &drm_sched_backend_ops.run_job
 149         * when scheduling the job on hardware. We signal the
 150         * &drm_sched_fence.finished fence once parent is signalled.
 151         */
 152        struct dma_fence                *parent;
 153        /**
 154         * @sched: the scheduler instance to which the job having this struct
 155         * belongs to.
 156         */
 157        struct drm_gpu_scheduler        *sched;
 158        /**
 159         * @lock: the lock used by the scheduled and the finished fences.
 160         */
 161        spinlock_t                      lock;
 162        /**
 163         * @owner: job owner for debugging
 164         */
 165        void                            *owner;
 166};
 167
 168struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
 169
 170/**
 171 * struct drm_sched_job - A job to be run by an entity.
 172 *
 173 * @queue_node: used to append this struct to the queue of jobs in an entity.
 174 * @list: a job participates in a "pending" and "done" lists.
 175 * @sched: the scheduler instance on which this job is scheduled.
 176 * @s_fence: contains the fences for the scheduling of job.
 177 * @finish_cb: the callback for the finished fence.
 178 * @id: a unique id assigned to each job scheduled on the scheduler.
 179 * @karma: increment on every hang caused by this job. If this exceeds the hang
 180 *         limit of the scheduler then the job is marked guilty and will not
 181 *         be scheduled further.
 182 * @s_priority: the priority of the job.
 183 * @entity: the entity to which this job belongs.
 184 * @cb: the callback for the parent fence in s_fence.
 185 *
 186 * A job is created by the driver using drm_sched_job_init(), and
 187 * should call drm_sched_entity_push_job() once it wants the scheduler
 188 * to schedule the job.
 189 */
 190struct drm_sched_job {
 191        struct spsc_node                queue_node;
 192        struct list_head                list;
 193        struct drm_gpu_scheduler        *sched;
 194        struct drm_sched_fence          *s_fence;
 195        struct dma_fence_cb             finish_cb;
 196        uint64_t                        id;
 197        atomic_t                        karma;
 198        enum drm_sched_priority         s_priority;
 199        struct drm_sched_entity         *entity;
 200        struct dma_fence_cb             cb;
 201};
 202
 203static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
 204                                            int threshold)
 205{
 206        return s_job && atomic_inc_return(&s_job->karma) > threshold;
 207}
 208
 209/**
 210 * struct drm_sched_backend_ops
 211 *
 212 * Define the backend operations called by the scheduler,
 213 * these functions should be implemented in driver side.
 214 */
 215struct drm_sched_backend_ops {
 216        /**
 217         * @dependency: Called when the scheduler is considering scheduling
 218         * this job next, to get another struct dma_fence for this job to
 219         * block on.  Once it returns NULL, run_job() may be called.
 220         */
 221        struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
 222                                        struct drm_sched_entity *s_entity);
 223
 224        /**
 225         * @run_job: Called to execute the job once all of the dependencies
 226         * have been resolved.  This may be called multiple times, if
 227         * timedout_job() has happened and drm_sched_job_recovery()
 228         * decides to try it again.
 229         */
 230        struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
 231
 232        /**
 233         * @timedout_job: Called when a job has taken too long to execute,
 234         * to trigger GPU recovery.
 235         */
 236        void (*timedout_job)(struct drm_sched_job *sched_job);
 237
 238        /**
 239         * @free_job: Called once the job's finished fence has been signaled
 240         * and it's time to clean it up.
 241         */
 242        void (*free_job)(struct drm_sched_job *sched_job);
 243};
 244
 245/**
 246 * struct drm_gpu_scheduler
 247 *
 248 * @ops: backend operations provided by the driver.
 249 * @hw_submission_limit: the max size of the hardware queue.
 250 * @timeout: the time after which a job is removed from the scheduler.
 251 * @name: name of the ring for which this scheduler is being used.
 252 * @sched_rq: priority wise array of run queues.
 253 * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
 254 *                  is ready to be scheduled.
 255 * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
 256 *                 waits on this wait queue until all the scheduled jobs are
 257 *                 finished.
 258 * @hw_rq_count: the number of jobs currently in the hardware queue.
 259 * @job_id_count: used to assign unique id to the each job.
 260 * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
 261 *            timeout interval is over.
 262 * @thread: the kthread on which the scheduler which run.
 263 * @pending_list: the list of jobs which are currently in the job queue.
 264 * @job_list_lock: lock to protect the pending_list.
 265 * @hang_limit: once the hangs by a job crosses this limit then it is marked
 266 *              guilty and it will be considered for scheduling further.
 267 * @score: score to help loadbalancer pick a idle sched
 268 * @ready: marks if the underlying HW is ready to work
 269 * @free_guilty: A hit to time out handler to free the guilty job.
 270 *
 271 * One scheduler is implemented for each hardware ring.
 272 */
 273struct drm_gpu_scheduler {
 274        const struct drm_sched_backend_ops      *ops;
 275        uint32_t                        hw_submission_limit;
 276        long                            timeout;
 277        const char                      *name;
 278        struct drm_sched_rq             sched_rq[DRM_SCHED_PRIORITY_COUNT];
 279        wait_queue_head_t               wake_up_worker;
 280        wait_queue_head_t               job_scheduled;
 281        atomic_t                        hw_rq_count;
 282        atomic64_t                      job_id_count;
 283        struct delayed_work             work_tdr;
 284        struct task_struct              *thread;
 285        struct list_head                pending_list;
 286        spinlock_t                      job_list_lock;
 287        int                             hang_limit;
 288        atomic_t                        score;
 289        bool                            ready;
 290        bool                            free_guilty;
 291};
 292
 293int drm_sched_init(struct drm_gpu_scheduler *sched,
 294                   const struct drm_sched_backend_ops *ops,
 295                   uint32_t hw_submission, unsigned hang_limit, long timeout,
 296                   const char *name);
 297
 298void drm_sched_fini(struct drm_gpu_scheduler *sched);
 299int drm_sched_job_init(struct drm_sched_job *job,
 300                       struct drm_sched_entity *entity,
 301                       void *owner);
 302void drm_sched_entity_modify_sched(struct drm_sched_entity *entity,
 303                                    struct drm_gpu_scheduler **sched_list,
 304                                   unsigned int num_sched_list);
 305
 306void drm_sched_job_cleanup(struct drm_sched_job *job);
 307void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
 308void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
 309void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
 310void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
 311void drm_sched_increase_karma(struct drm_sched_job *bad);
 312bool drm_sched_dependency_optimized(struct dma_fence* fence,
 313                                    struct drm_sched_entity *entity);
 314void drm_sched_fault(struct drm_gpu_scheduler *sched);
 315void drm_sched_job_kickout(struct drm_sched_job *s_job);
 316
 317void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
 318                             struct drm_sched_entity *entity);
 319void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 320                                struct drm_sched_entity *entity);
 321
 322int drm_sched_entity_init(struct drm_sched_entity *entity,
 323                          enum drm_sched_priority priority,
 324                          struct drm_gpu_scheduler **sched_list,
 325                          unsigned int num_sched_list,
 326                          atomic_t *guilty);
 327long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
 328void drm_sched_entity_fini(struct drm_sched_entity *entity);
 329void drm_sched_entity_destroy(struct drm_sched_entity *entity);
 330void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
 331struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
 332void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 333                               struct drm_sched_entity *entity);
 334void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
 335                                   enum drm_sched_priority priority);
 336bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
 337
 338struct drm_sched_fence *drm_sched_fence_create(
 339        struct drm_sched_entity *s_entity, void *owner);
 340void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
 341void drm_sched_fence_finished(struct drm_sched_fence *fence);
 342
 343unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
 344void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
 345                                unsigned long remaining);
 346struct drm_gpu_scheduler *
 347drm_sched_pick_best(struct drm_gpu_scheduler **sched_list,
 348                     unsigned int num_sched_list);
 349
 350#endif
 351