linux/include/drm/gpu_scheduler.h
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#ifndef _DRM_GPU_SCHEDULER_H_
  25#define _DRM_GPU_SCHEDULER_H_
  26
  27#include <drm/spsc_queue.h>
  28#include <linux/dma-fence.h>
  29#include <linux/completion.h>
  30
  31#define MAX_WAIT_SCHED_ENTITY_Q_EMPTY msecs_to_jiffies(1000)
  32
  33struct drm_gpu_scheduler;
  34struct drm_sched_rq;
  35
  36enum drm_sched_priority {
  37        DRM_SCHED_PRIORITY_MIN,
  38        DRM_SCHED_PRIORITY_LOW = DRM_SCHED_PRIORITY_MIN,
  39        DRM_SCHED_PRIORITY_NORMAL,
  40        DRM_SCHED_PRIORITY_HIGH_SW,
  41        DRM_SCHED_PRIORITY_HIGH_HW,
  42        DRM_SCHED_PRIORITY_KERNEL,
  43        DRM_SCHED_PRIORITY_MAX,
  44        DRM_SCHED_PRIORITY_INVALID = -1,
  45        DRM_SCHED_PRIORITY_UNSET = -2
  46};
  47
  48/**
  49 * struct drm_sched_entity - A wrapper around a job queue (typically
  50 * attached to the DRM file_priv).
  51 *
  52 * @list: used to append this struct to the list of entities in the
  53 *        runqueue.
  54 * @rq: runqueue on which this entity is currently scheduled.
  55 * @sched_list: A list of schedulers (drm_gpu_schedulers).
  56 *              Jobs from this entity can be scheduled on any scheduler
  57 *              on this list.
  58 * @num_sched_list: number of drm_gpu_schedulers in the sched_list.
  59 * @rq_lock: lock to modify the runqueue to which this entity belongs.
  60 * @job_queue: the list of jobs of this entity.
  61 * @fence_seq: a linearly increasing seqno incremented with each
  62 *             new &drm_sched_fence which is part of the entity.
  63 * @fence_context: a unique context for all the fences which belong
  64 *                 to this entity.
  65 *                 The &drm_sched_fence.scheduled uses the
  66 *                 fence_context but &drm_sched_fence.finished uses
  67 *                 fence_context + 1.
  68 * @dependency: the dependency fence of the job which is on the top
  69 *              of the job queue.
  70 * @cb: callback for the dependency fence above.
  71 * @guilty: points to ctx's guilty.
  72 * @fini_status: contains the exit status in case the process was signalled.
  73 * @last_scheduled: points to the finished fence of the last scheduled job.
  74 * @last_user: last group leader pushing a job into the entity.
  75 * @stopped: Marks the enity as removed from rq and destined for termination.
  76 * @entity_idle: Signals when enityt is not in use
  77 *
  78 * Entities will emit jobs in order to their corresponding hardware
  79 * ring, and the scheduler will alternate between entities based on
  80 * scheduling policy.
  81 */
  82struct drm_sched_entity {
  83        struct list_head                list;
  84        struct drm_sched_rq             *rq;
  85        struct drm_gpu_scheduler        **sched_list;
  86        unsigned int                    num_sched_list;
  87        enum drm_sched_priority         priority;
  88        spinlock_t                      rq_lock;
  89
  90        struct spsc_queue               job_queue;
  91
  92        atomic_t                        fence_seq;
  93        uint64_t                        fence_context;
  94
  95        struct dma_fence                *dependency;
  96        struct dma_fence_cb             cb;
  97        atomic_t                        *guilty;
  98        struct dma_fence                *last_scheduled;
  99        struct task_struct              *last_user;
 100        bool                            stopped;
 101        struct completion               entity_idle;
 102};
 103
 104/**
 105 * struct drm_sched_rq - queue of entities to be scheduled.
 106 *
 107 * @lock: to modify the entities list.
 108 * @sched: the scheduler to which this rq belongs to.
 109 * @entities: list of the entities to be scheduled.
 110 * @current_entity: the entity which is to be scheduled.
 111 *
 112 * Run queue is a set of entities scheduling command submissions for
 113 * one specific ring. It implements the scheduling policy that selects
 114 * the next entity to emit commands from.
 115 */
 116struct drm_sched_rq {
 117        spinlock_t                      lock;
 118        struct drm_gpu_scheduler        *sched;
 119        struct list_head                entities;
 120        struct drm_sched_entity         *current_entity;
 121};
 122
 123/**
 124 * struct drm_sched_fence - fences corresponding to the scheduling of a job.
 125 */
 126struct drm_sched_fence {
 127        /**
 128         * @scheduled: this fence is what will be signaled by the scheduler
 129         * when the job is scheduled.
 130         */
 131        struct dma_fence                scheduled;
 132
 133        /**
 134         * @finished: this fence is what will be signaled by the scheduler
 135         * when the job is completed.
 136         *
 137         * When setting up an out fence for the job, you should use
 138         * this, since it's available immediately upon
 139         * drm_sched_job_init(), and the fence returned by the driver
 140         * from run_job() won't be created until the dependencies have
 141         * resolved.
 142         */
 143        struct dma_fence                finished;
 144
 145        /**
 146         * @parent: the fence returned by &drm_sched_backend_ops.run_job
 147         * when scheduling the job on hardware. We signal the
 148         * &drm_sched_fence.finished fence once parent is signalled.
 149         */
 150        struct dma_fence                *parent;
 151        /**
 152         * @sched: the scheduler instance to which the job having this struct
 153         * belongs to.
 154         */
 155        struct drm_gpu_scheduler        *sched;
 156        /**
 157         * @lock: the lock used by the scheduled and the finished fences.
 158         */
 159        spinlock_t                      lock;
 160        /**
 161         * @owner: job owner for debugging
 162         */
 163        void                            *owner;
 164};
 165
 166struct drm_sched_fence *to_drm_sched_fence(struct dma_fence *f);
 167
 168/**
 169 * struct drm_sched_job - A job to be run by an entity.
 170 *
 171 * @queue_node: used to append this struct to the queue of jobs in an entity.
 172 * @sched: the scheduler instance on which this job is scheduled.
 173 * @s_fence: contains the fences for the scheduling of job.
 174 * @finish_cb: the callback for the finished fence.
 175 * @node: used to append this struct to the @drm_gpu_scheduler.ring_mirror_list.
 176 * @id: a unique id assigned to each job scheduled on the scheduler.
 177 * @karma: increment on every hang caused by this job. If this exceeds the hang
 178 *         limit of the scheduler then the job is marked guilty and will not
 179 *         be scheduled further.
 180 * @s_priority: the priority of the job.
 181 * @entity: the entity to which this job belongs.
 182 * @cb: the callback for the parent fence in s_fence.
 183 *
 184 * A job is created by the driver using drm_sched_job_init(), and
 185 * should call drm_sched_entity_push_job() once it wants the scheduler
 186 * to schedule the job.
 187 */
 188struct drm_sched_job {
 189        struct spsc_node                queue_node;
 190        struct drm_gpu_scheduler        *sched;
 191        struct drm_sched_fence          *s_fence;
 192        struct dma_fence_cb             finish_cb;
 193        struct list_head                node;
 194        uint64_t                        id;
 195        atomic_t                        karma;
 196        enum drm_sched_priority         s_priority;
 197        struct drm_sched_entity  *entity;
 198        struct dma_fence_cb             cb;
 199};
 200
 201static inline bool drm_sched_invalidate_job(struct drm_sched_job *s_job,
 202                                            int threshold)
 203{
 204        return (s_job && atomic_inc_return(&s_job->karma) > threshold);
 205}
 206
 207/**
 208 * struct drm_sched_backend_ops
 209 *
 210 * Define the backend operations called by the scheduler,
 211 * these functions should be implemented in driver side.
 212 */
 213struct drm_sched_backend_ops {
 214        /**
 215         * @dependency: Called when the scheduler is considering scheduling
 216         * this job next, to get another struct dma_fence for this job to
 217         * block on.  Once it returns NULL, run_job() may be called.
 218         */
 219        struct dma_fence *(*dependency)(struct drm_sched_job *sched_job,
 220                                        struct drm_sched_entity *s_entity);
 221
 222        /**
 223         * @run_job: Called to execute the job once all of the dependencies
 224         * have been resolved.  This may be called multiple times, if
 225         * timedout_job() has happened and drm_sched_job_recovery()
 226         * decides to try it again.
 227         */
 228        struct dma_fence *(*run_job)(struct drm_sched_job *sched_job);
 229
 230        /**
 231         * @timedout_job: Called when a job has taken too long to execute,
 232         * to trigger GPU recovery.
 233         */
 234        void (*timedout_job)(struct drm_sched_job *sched_job);
 235
 236        /**
 237         * @free_job: Called once the job's finished fence has been signaled
 238         * and it's time to clean it up.
 239         */
 240        void (*free_job)(struct drm_sched_job *sched_job);
 241};
 242
 243/**
 244 * struct drm_gpu_scheduler
 245 *
 246 * @ops: backend operations provided by the driver.
 247 * @hw_submission_limit: the max size of the hardware queue.
 248 * @timeout: the time after which a job is removed from the scheduler.
 249 * @name: name of the ring for which this scheduler is being used.
 250 * @sched_rq: priority wise array of run queues.
 251 * @wake_up_worker: the wait queue on which the scheduler sleeps until a job
 252 *                  is ready to be scheduled.
 253 * @job_scheduled: once @drm_sched_entity_do_release is called the scheduler
 254 *                 waits on this wait queue until all the scheduled jobs are
 255 *                 finished.
 256 * @hw_rq_count: the number of jobs currently in the hardware queue.
 257 * @job_id_count: used to assign unique id to the each job.
 258 * @work_tdr: schedules a delayed call to @drm_sched_job_timedout after the
 259 *            timeout interval is over.
 260 * @thread: the kthread on which the scheduler which run.
 261 * @ring_mirror_list: the list of jobs which are currently in the job queue.
 262 * @job_list_lock: lock to protect the ring_mirror_list.
 263 * @hang_limit: once the hangs by a job crosses this limit then it is marked
 264 *              guilty and it will be considered for scheduling further.
 265 * @score: score to help loadbalancer pick a idle sched
 266 * @ready: marks if the underlying HW is ready to work
 267 * @free_guilty: A hit to time out handler to free the guilty job.
 268 *
 269 * One scheduler is implemented for each hardware ring.
 270 */
 271struct drm_gpu_scheduler {
 272        const struct drm_sched_backend_ops      *ops;
 273        uint32_t                        hw_submission_limit;
 274        long                            timeout;
 275        const char                      *name;
 276        struct drm_sched_rq             sched_rq[DRM_SCHED_PRIORITY_MAX];
 277        wait_queue_head_t               wake_up_worker;
 278        wait_queue_head_t               job_scheduled;
 279        atomic_t                        hw_rq_count;
 280        atomic64_t                      job_id_count;
 281        struct delayed_work             work_tdr;
 282        struct task_struct              *thread;
 283        struct list_head                ring_mirror_list;
 284        spinlock_t                      job_list_lock;
 285        int                             hang_limit;
 286        atomic_t                        score;
 287        bool                            ready;
 288        bool                            free_guilty;
 289};
 290
 291int drm_sched_init(struct drm_gpu_scheduler *sched,
 292                   const struct drm_sched_backend_ops *ops,
 293                   uint32_t hw_submission, unsigned hang_limit, long timeout,
 294                   const char *name);
 295
 296void drm_sched_fini(struct drm_gpu_scheduler *sched);
 297int drm_sched_job_init(struct drm_sched_job *job,
 298                       struct drm_sched_entity *entity,
 299                       void *owner);
 300void drm_sched_job_cleanup(struct drm_sched_job *job);
 301void drm_sched_wakeup(struct drm_gpu_scheduler *sched);
 302void drm_sched_stop(struct drm_gpu_scheduler *sched, struct drm_sched_job *bad);
 303void drm_sched_start(struct drm_gpu_scheduler *sched, bool full_recovery);
 304void drm_sched_resubmit_jobs(struct drm_gpu_scheduler *sched);
 305void drm_sched_increase_karma(struct drm_sched_job *bad);
 306bool drm_sched_dependency_optimized(struct dma_fence* fence,
 307                                    struct drm_sched_entity *entity);
 308void drm_sched_fault(struct drm_gpu_scheduler *sched);
 309void drm_sched_job_kickout(struct drm_sched_job *s_job);
 310
 311void drm_sched_rq_add_entity(struct drm_sched_rq *rq,
 312                             struct drm_sched_entity *entity);
 313void drm_sched_rq_remove_entity(struct drm_sched_rq *rq,
 314                                struct drm_sched_entity *entity);
 315
 316int drm_sched_entity_init(struct drm_sched_entity *entity,
 317                          enum drm_sched_priority priority,
 318                          struct drm_gpu_scheduler **sched_list,
 319                          unsigned int num_sched_list,
 320                          atomic_t *guilty);
 321long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout);
 322void drm_sched_entity_fini(struct drm_sched_entity *entity);
 323void drm_sched_entity_destroy(struct drm_sched_entity *entity);
 324void drm_sched_entity_select_rq(struct drm_sched_entity *entity);
 325struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity);
 326void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
 327                               struct drm_sched_entity *entity);
 328void drm_sched_entity_set_priority(struct drm_sched_entity *entity,
 329                                   enum drm_sched_priority priority);
 330bool drm_sched_entity_is_ready(struct drm_sched_entity *entity);
 331
 332struct drm_sched_fence *drm_sched_fence_create(
 333        struct drm_sched_entity *s_entity, void *owner);
 334void drm_sched_fence_scheduled(struct drm_sched_fence *fence);
 335void drm_sched_fence_finished(struct drm_sched_fence *fence);
 336
 337unsigned long drm_sched_suspend_timeout(struct drm_gpu_scheduler *sched);
 338void drm_sched_resume_timeout(struct drm_gpu_scheduler *sched,
 339                                unsigned long remaining);
 340
 341#endif
 342