linux/include/drm/gpu_scheduler.h
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#ifndef _DRM_GPU_SCHEDULER_H_
  25#define _DRM_GPU_SCHEDULER_H_
  26
  27#include <drm/spsc_queue.h>
  28#include <linux/dma-fence.h>
  29
  30#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
  31
  32struct drm_gpu_scheduler;
  33struct drm_sched_rq;
  34
  35enum drm_sched_priority {
  36        DRM_SCHED_PRIORITY_MIN,
  37        DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
  38        DRM_SCHED_PRIORITY_NORMAL,
  39        DRM_SCHED_PRIORITY_HIGH_SW,
  40        DRM_SCHED_PRIORITY_HIGH_HW,
  41        DRM_SCHED_PRIORITY_KERNEL,
  42        DRM_SCHED_PRIORITY_MAX,
  43        DRM_SCHED_PRIORITY_INVALID = -1,
  44        DRM_SCHED_PRIORITY_UNSET = -2
  45};
  46
  47/**
  48 * struct drm_sched_entity - A wrapper around a job queue (typically
  49 * attached to the DRM file_priv).
  50 *
  51 * @list: used to append this struct to the list of entities in the
  52 *        runqueue.
  53 * @rq: runqueue on which this entity is currently scheduled.
  54 * @rq_list: a list of run queues on which jobs from this entity can
  55 *           be scheduled
  56 * @num_rq_list: number of run queues in the rq_list
  57 * @rq_lock: lock to modify the runqueue to which this entity belongs.
  58 * @job_queue: the list of jobs of this entity.
  59 * @fence_seq: a linearly increasing seqno incremented with each
  60 *             new &drm_sched_fence which is part of the entity.
  61 * @fence_context: a unique context for all the fences which belong
  62 *                 to this entity.
  63 *                 The &drm_sched_fence.scheduled uses the
  64 *                 fence_context but &drm_sched_fence.finished uses
  65 *                 fence_context + 1.
  66 * @dependency: the dependency fence of the job which is on the top
  67 *              of the job queue.
  68 * @cb: callback for the dependency fence above.
  69 * @guilty: points to ctx's guilty.
  70 * @fini_status: contains the exit status in case the process was signalled.
  71 * @last_scheduled: points to the finished fence of the last scheduled job.
  72 * @last_user: last group leader pushing a job into the entity.
  73 * @stopped: Marks the enity as removed from rq and destined for termination.
  74 *
  75 * Entities will emit jobs in order to their corresponding hardware
  76 * ring, and the scheduler will alternate between entities based on
  77 * scheduling policy.
  78 */
  79struct drm_sched_entity {
  80        struct list_head                list;
  81        struct drm_sched_rq             *rq;
  82        struct drm_sched_rq             **rq_list;
  83        unsigned int                    num_rq_list;
  84        spinlock_t                      rq_lock;
  85
  86        struct spsc_queue               job_queue;
  87
  88        atomic_t                        fence_seq;
  89        uint64_t                        fence_context;
  90
  91        struct dma_fence                *dependency;
  92        struct dma_fence_cb             cb;
  93        atomic_t                        *guilty;
  94        struct dma_fence                *last_scheduled;
  95        struct task_struct              *last_user;
  96        bool                            stopped;
  97};
  98
  99/**
 100 * struct drm_sched_rq - queue of entities to be scheduled.
 101 *
 102 * @lock: to modify the entities list.
 103 * @sched: the scheduler to which this rq belongs to.
 104 * @entities: list of the entities to be scheduled.
 105 * @current_entity: the entity which is to be scheduled.
 106 *
 107 * Run queue is a set of entities scheduling command submissions for
 108 * one specific ring. It implements the scheduling policy that selects
 109 * the next entity to emit commands from.
 110 */
 111struct drm_sched_rq {
 112        spinlock_t                      lock;
 113        struct drm_gpu_scheduler        *sched;
 114        struct list_head                entities;
 115        struct drm_sched_entity         *current_entity;
 116};
 117
 118/**
 119 * struct drm_sched_fence - fences corresponding to the scheduling of a job.
 120 */
 121struct drm_sched_fence {
 122        /**
 123         * @scheduled: this fence is what will be signaled by the scheduler
 124         * when the job is scheduled.
 125         */
 126        struct dma_fence                scheduled;
 127
 128        /**
 129         * @finished: this fence is what will be signaled by the scheduler
 130         * when the job is completed.
 131         *
 132         * When setting up an out fence for the job, you should use
 133         * this, since it's available immediately upon
 134         * drm_sched_job_init(), and the fence returned by the driver
 135         * from run_job() won't be created until the dependencies have
 136         * resolved.
 137         */
 138        struct dma_fence                finished;
 139
 140        /**
 141         * @parent: the fence returned by &drm_sched_backend_ops.run_job
 142         * when scheduling the job on hardware. We signal the
 143         * &drm_sched_fence.finished fence once parent is signalled.
 144         */
 145        struct dma_fence                *parent;
 146        /**
 147         * @sched: the scheduler instance to which the job having this struct
 148         * belongs to.
 149         */
 150        struct drm_gpu_scheduler        *sched;
 151        /**
 152         * @lock: the lock used by the scheduled and the finished fences.
 153         */
 154        spinlock_t                      lock;
 155        /**
 156         * @owner: job owner for debugging
 157         */
 158        void                            *owner;
 159};
 160
 161struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
 162
 163/**
 164 * struct drm_sched_job - A job to be run by an entity.
 165 *
 166 * @queue_node: used to append this struct to the queue of jobs in an entity.
 167 * @sched: the scheduler instance on which this job is scheduled.
 168 * @s_fence: contains the fences for the scheduling of job.
 169 * @finish_cb: the callback for the finished fence.
 170 * @finish_work: schedules the function @drm_sched_job_finish once the job has
 171 *               finished to remove the job from the
 172 *               @drm_gpu_scheduler.ring_mirror_list.
 173 * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
 174 * @id: a unique id assigned to each job scheduled on the scheduler.
 175 * @karma: increment on every hang caused by this job. If this exceeds the hang
 176 *         limit of the scheduler then the job is marked guilty and will not
 177 *         be scheduled further.
 178 * @s_priority: the priority of the job.
 179 * @entity: the entity to which this job belongs.
 180 * @cb: the callback for the parent fence in s_fence.
 181 *
 182 * A job is created by the driver using drm_sched_job_init(), and
 183 * should call drm_sched_entity_push_job() once it wants the scheduler
 184 * to schedule the job.
 185 */
 186struct drm_sched_job {
 187        struct spsc_node                queue_node;
 188        struct drm_gpu_scheduler        *sched;
 189        struct drm_sched_fence          *s_fence;
 190        struct dma_fence_cb             finish_cb;
 191        struct work_struct              finish_work;
 192        struct list_head                node;
 193        uint64_t                        id;
 194        atomic_t                        karma;
 195        enum drm_sched_priority         s_priority;
 196        struct drm_sched_entity  *entity;
 197        struct dma_fence_cb             cb;
 198};
 199
 200static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
 201                                            int threshold)
 202{
 203        return (s_job && atomic_inc_return(&s_job->karma) > threshold);
 204}
 205
 206/**
 207 * struct drm_sched_backend_ops
 208 *
 209 * Define the backend operations called by the scheduler,
 210 * these functions should be implemented in driver side.
 211 */
 212struct drm_sched_backend_ops {
 213        /**
 214         * @dependency: Called when the scheduler is considering scheduling
 215         * this job next, to get another struct dma_fence for this job to
 216         * block on.  Once it returns NULL, run_job() may be called.
 217         */
 218        struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
 219                                        struct drm_sched_entity *s_entity);
 220
 221        /**
 222         * @run_job: Called to execute the job once all of the dependencies
 223         * have been resolved.  This may be called multiple times, if
 224         * timedout_job() has happened and drm_sched_job_recovery()
 225         * decides to try it again.
 226         */
 227        struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
 228
 229        /**
 230         * @timedout_job: Called when a job has taken too long to execute,
 231         * to trigger GPU recovery.
 232         */
 233        void (*timedout_job)(struct drm_sched_job *sched_job);
 234
 235        /**
 236         * @free_job: Called once the job's finished fence has been signaled
 237         * and it's time to clean it up.
 238         */
 239        void (*free_job)(struct drm_sched_job *sched_job);
 240};
 241
 242/**
 243 * struct drm_gpu_scheduler
 244 *
 245 * @ops: backend operations provided by the driver.
 246 * @hw_submission_limit: the max size of the hardware queue.
 247 * @timeout: the time after which a job is removed from the scheduler.
 248 * @name: name of the ring for which this scheduler is being used.
 249 * @sched_rq: priority wise array of run queues.
 250 * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
 251 *                  is ready to be scheduled.
 252 * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
 253 *                 waits on this wait queue until all the scheduled jobs are
 254 *                 finished.
 255 * @hw_rq_count: the number of jobs currently in the hardware queue.
 256 * @job_id_count: used to assign unique id to the each job.
 257 * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
 258 *            timeout interval is over.
 259 * @thread: the kthread on which the scheduler which run.
 260 * @ring_mirror_list: the list of jobs which are currently in the job queue.
 261 * @job_list_lock: lock to protect the ring_mirror_list.
 262 * @hang_limit: once the hangs by a job crosses this limit then it is marked
 263 *              guilty and it will be considered for scheduling further.
 264 * @num_jobs: the number of jobs in queue in the scheduler
 265 * @ready: marks if the underlying HW is ready to work
 266 *
 267 * One scheduler is implemented for each hardware ring.
 268 */
 269struct drm_gpu_scheduler {
 270        const struct drm_sched_backend_ops      *ops;
 271        uint32_t                        hw_submission_limit;
 272        long                            timeout;
 273        const char                      *name;
 274        struct drm_sched_rq             sched_rq[DRM_SCHED_PRIORITY_MAX];
 275        wait_queue_head_t               wake_up_worker;
 276        wait_queue_head_t               job_scheduled;
 277        atomic_t                        hw_rq_count;
 278        atomic64_t                      job_id_count;
 279        struct delayed_work             work_tdr;
 280        struct task_struct              *thread;
 281        struct list_head                ring_mirror_list;
 282        spinlock_t                      job_list_lock;
 283        int                             hang_limit;
 284        atomic_t                        num_jobs;
 285        bool                    ready;
 286};
 287
 288int drm_sched_init(struct drm_gpu_scheduler *sched,
 289                   const struct drm_sched_backend_ops *ops,
 290                   uint32_t hw_submission, unsigned hang_limit, long timeout,
 291                   const char *name);
 292
 293void drm_sched_fini(struct drm_gpu_scheduler *sched);
 294int drm_sched_job_init(struct drm_sched_job *job,
 295                       struct drm_sched_entity *entity,
 296                       void *owner);
 297void drm_sched_job_cleanup(struct drm_sched_job *job);
 298void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
 299void drm_sched_stop(struct drm_gpu_scheduler *sched);
 300void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
 301void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
 302void drm_sched_increase_karma(struct drm_sched_job *bad);
 303bool drm_sched_dependency_optimized(struct dma_fence* fence,
 304                                    struct drm_sched_entity *entity);
 305void drm_sched_fault(struct drm_gpu_scheduler *sched);
 306void drm_sched_job_kickout(struct drm_sched_job *s_job);
 307
 308void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
 309                             struct drm_sched_entity *entity);
 310void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 311                                struct drm_sched_entity *entity);
 312
 313int drm_sched_entity_init(struct drm_sched_entity *entity,
 314                          struct drm_sched_rq **rq_list,
 315                          unsigned int num_rq_list,
 316                          atomic_t *guilty);
 317long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
 318void drm_sched_entity_fini(struct drm_sched_entity *entity);
 319void drm_sched_entity_destroy(struct drm_sched_entity *entity);
 320void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
 321struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
 322void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 323                               struct drm_sched_entity *entity);
 324void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
 325                                   enum drm_sched_priority priority);
 326bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
 327
 328struct drm_sched_fence *drm_sched_fence_create(
 329        struct drm_sched_entity *s_entity, void *owner);
 330void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
 331void drm_sched_fence_finished(struct drm_sched_fence *fence);
 332
 333unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
 334void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
 335                                unsigned long remaining);
 336
 337#endif
 338