1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#ifndef __AMDGPU_JOB_H__
24#define __AMDGPU_JOB_H__
25
26
27#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0)
28
29#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1)
30
31#define AMDGPU_HAVE_CTX_SWITCH (1 << 2)
32
33#define AMDGPU_IB_PREEMPTED (1 << 3)
34
35#define to_amdgpu_job(sched_job) \
36 container_of((sched_job), struct amdgpu_job, base)
37
38#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
39
40struct amdgpu_fence;
41
42struct amdgpu_job {
43 struct drm_sched_job base;
44 struct amdgpu_vm *vm;
45 struct amdgpu_sync sync;
46 struct amdgpu_sync sched_sync;
47 struct amdgpu_ib *ibs;
48 struct dma_fence *fence;
49 uint32_t preamble_status;
50 uint32_t preemption_status;
51 uint32_t num_ibs;
52 bool vm_needs_flush;
53 uint64_t vm_pd_addr;
54 unsigned vmid;
55 unsigned pasid;
56 uint32_t gds_base, gds_size;
57 uint32_t gws_base, gws_size;
58 uint32_t oa_base, oa_size;
59 uint32_t vram_lost_counter;
60
61
62 uint64_t uf_addr;
63 uint64_t uf_sequence;
64
65};
66
67int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
68 struct amdgpu_job **job, struct amdgpu_vm *vm);
69int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
70 struct amdgpu_job **job);
71
72void amdgpu_job_free_resources(struct amdgpu_job *job);
73void amdgpu_job_free(struct amdgpu_job *job);
74int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
75 void *owner, struct dma_fence **f);
76int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
77 struct dma_fence **fence);
78
79void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched);
80
81#endif
82