1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#ifndef __AMDGPU_JOB_H__
24#define __AMDGPU_JOB_H__
25
26
27#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0)
28
29#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1)
30
31#define AMDGPU_HAVE_CTX_SWITCH (1 << 2)
32
33#define AMDGPU_IB_PREEMPTED (1 << 3)
34
35#define to_amdgpu_job(sched_job) \
36 container_of((sched_job), struct amdgpu_job, base)
37
38#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
39
40struct amdgpu_fence;
41enum amdgpu_ib_pool_type;
42
43struct amdgpu_job {
44 struct drm_sched_job base;
45 struct amdgpu_vm *vm;
46 struct amdgpu_sync sync;
47 struct amdgpu_sync sched_sync;
48 struct amdgpu_ib *ibs;
49 struct dma_fence hw_fence;
50 struct dma_fence *external_hw_fence;
51 uint32_t preamble_status;
52 uint32_t preemption_status;
53 uint32_t num_ibs;
54 bool vm_needs_flush;
55 uint64_t vm_pd_addr;
56 unsigned vmid;
57 unsigned pasid;
58 uint32_t gds_base, gds_size;
59 uint32_t gws_base, gws_size;
60 uint32_t oa_base, oa_size;
61 uint32_t vram_lost_counter;
62
63
64 uint64_t uf_addr;
65 uint64_t uf_sequence;
66
67
68 uint32_t job_run_counter;
69};
70
71int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
72 struct amdgpu_job **job, struct amdgpu_vm *vm);
73int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
74 enum amdgpu_ib_pool_type pool, struct amdgpu_job **job);
75void amdgpu_job_free_resources(struct amdgpu_job *job);
76void amdgpu_job_free(struct amdgpu_job *job);
77int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
78 void *owner, struct dma_fence **f);
79int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
80 struct dma_fence **fence);
81
82void amdgpu_job_stop_all_jobs_on_sched(struct drm_gpu_scheduler *sched);
83
84#endif
85