1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#ifndef __AMDGPU_JOB_H__
24#define __AMDGPU_JOB_H__
25
26
27#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0)
28
29#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1)
30
31#define AMDGPU_HAVE_CTX_SWITCH (1 << 2)
32
33#define AMDGPU_IB_PREEMPTED (1 << 3)
34
35#define to_amdgpu_job(sched_job) \
36 container_of((sched_job), struct amdgpu_job, base)
37
38#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
39
40struct amdgpu_fence;
41enum amdgpu_ib_pool_type;
42
43struct amdgpu_job {
44 struct drm_sched_job base;
45 struct amdgpu_vm *vm;
46 struct amdgpu_sync sync;
47 struct amdgpu_sync sched_sync;
48 struct amdgpu_ib *ibs;
49 struct dma_fence *fence;
50 uint32_t preamble_status;
51 uint32_t preemption_status;
52 uint32_t num_ibs;
53 bool vm_needs_flush;
54 uint64_t vm_pd_addr;
55 unsigned vmid;
56 unsigned pasid;
57 uint32_t gds_base, gds_size;
58 uint32_t gws_base, gws_size;
59 uint32_t oa_base, oa_size;
60 uint32_t vram_lost_counter;
61
62
63 uint64_t uf_addr;
64 uint64_t uf_sequence;
65};
66
67int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
68 struct amdgpu_job **job, struct amdgpu_vm *vm);
69int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
70 enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
71void amdgpu_job_free_resources(struct amdgpu_job *job);
72void amdgpu_job_free(struct amdgpu_job *job);
73int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
74 void *owner, struct dma_fence **f);
75int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
76 struct dma_fence **fence);
77
78void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched);
79
80#endif
81