linux/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
<<
>>
Prefs
   1/*
   2 * Copyright 2015 Advanced Micro Devices, Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 */
  23
  24#ifndef _GPU_SCHEDULER_H_
  25#define _GPU_SCHEDULER_H_
  26
  27#include <linux/kfifo.h>
  28#include <linux/fence.h>
  29
  30struct amd_gpu_scheduler;
  31struct amd_sched_rq;
  32
  33/**
  34 * A scheduler entity is a wrapper around a job queue or a group
  35 * of other entities. Entities take turns emitting jobs from their
  36 * job queues to corresponding hardware ring based on scheduling
  37 * policy.
  38*/
  39struct amd_sched_entity {
  40        struct list_head                list;
  41        struct amd_sched_rq             *rq;
  42        struct amd_gpu_scheduler        *sched;
  43
  44        spinlock_t                      queue_lock;
  45        struct kfifo                    job_queue;
  46
  47        atomic_t                        fence_seq;
  48        uint64_t                        fence_context;
  49
  50        struct fence                    *dependency;
  51        struct fence_cb                 cb;
  52};
  53
  54/**
  55 * Run queue is a set of entities scheduling command submissions for
  56 * one specific ring. It implements the scheduling policy that selects
  57 * the next entity to emit commands from.
  58*/
  59struct amd_sched_rq {
  60        spinlock_t              lock;
  61        struct list_head        entities;
  62        struct amd_sched_entity *current_entity;
  63};
  64
  65struct amd_sched_fence {
  66        struct fence                    scheduled;
  67        struct fence                    finished;
  68        struct fence_cb                 cb;
  69        struct fence                    *parent;
  70        struct amd_gpu_scheduler        *sched;
  71        spinlock_t                      lock;
  72        void                            *owner;
  73};
  74
  75struct amd_sched_job {
  76        struct amd_gpu_scheduler        *sched;
  77        struct amd_sched_entity         *s_entity;
  78        struct amd_sched_fence          *s_fence;
  79        struct fence_cb                 finish_cb;
  80        struct work_struct              finish_work;
  81        struct list_head                node;
  82        struct delayed_work             work_tdr;
  83};
  84
  85extern const struct fence_ops amd_sched_fence_ops_scheduled;
  86extern const struct fence_ops amd_sched_fence_ops_finished;
  87static inline struct amd_sched_fence *to_amd_sched_fence(struct fence *f)
  88{
  89        if (f->ops == &amd_sched_fence_ops_scheduled)
  90                return container_of(f, struct amd_sched_fence, scheduled);
  91
  92        if (f->ops == &amd_sched_fence_ops_finished)
  93                return container_of(f, struct amd_sched_fence, finished);
  94
  95        return NULL;
  96}
  97
  98/**
  99 * Define the backend operations called by the scheduler,
 100 * these functions should be implemented in driver side
 101*/
 102struct amd_sched_backend_ops {
 103        struct fence *(*dependency)(struct amd_sched_job *sched_job);
 104        struct fence *(*run_job)(struct amd_sched_job *sched_job);
 105        void (*timedout_job)(struct amd_sched_job *sched_job);
 106        void (*free_job)(struct amd_sched_job *sched_job);
 107};
 108
 109enum amd_sched_priority {
 110        AMD_SCHED_PRIORITY_KERNEL = 0,
 111        AMD_SCHED_PRIORITY_NORMAL,
 112        AMD_SCHED_MAX_PRIORITY
 113};
 114
 115/**
 116 * One scheduler is implemented for each hardware ring
 117*/
 118struct amd_gpu_scheduler {
 119        const struct amd_sched_backend_ops      *ops;
 120        uint32_t                        hw_submission_limit;
 121        long                            timeout;
 122        const char                      *name;
 123        struct amd_sched_rq             sched_rq[AMD_SCHED_MAX_PRIORITY];
 124        wait_queue_head_t               wake_up_worker;
 125        wait_queue_head_t               job_scheduled;
 126        atomic_t                        hw_rq_count;
 127        struct task_struct              *thread;
 128        struct list_head        ring_mirror_list;
 129        spinlock_t                      job_list_lock;
 130};
 131
 132int amd_sched_init(struct amd_gpu_scheduler *sched,
 133                   const struct amd_sched_backend_ops *ops,
 134                   uint32_t hw_submission, long timeout, const char *name);
 135void amd_sched_fini(struct amd_gpu_scheduler *sched);
 136
 137int amd_sched_entity_init(struct amd_gpu_scheduler *sched,
 138                          struct amd_sched_entity *entity,
 139                          struct amd_sched_rq *rq,
 140                          uint32_t jobs);
 141void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
 142                           struct amd_sched_entity *entity);
 143void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
 144
 145int amd_sched_fence_slab_init(void);
 146void amd_sched_fence_slab_fini(void);
 147
 148struct amd_sched_fence *amd_sched_fence_create(
 149        struct amd_sched_entity *s_entity, void *owner);
 150void amd_sched_fence_scheduled(struct amd_sched_fence *fence);
 151void amd_sched_fence_finished(struct amd_sched_fence *fence);
 152int amd_sched_job_init(struct amd_sched_job *job,
 153                       struct amd_gpu_scheduler *sched,
 154                       struct amd_sched_entity *entity,
 155                       void *owner);
 156void amd_sched_hw_job_reset(struct amd_gpu_scheduler *sched);
 157void amd_sched_job_recovery(struct amd_gpu_scheduler *sched);
 158#endif
 159