1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#ifndef __AMDGPU_JOB_H__
24#define __AMDGPU_JOB_H__
25
26
27#define AMDGPU_PREAMBLE_IB_PRESENT (1 << 0)
28
29#define AMDGPU_PREAMBLE_IB_PRESENT_FIRST (1 << 1)
30
31#define AMDGPU_HAVE_CTX_SWITCH (1 << 2)
32
33#define to_amdgpu_job(sched_job) \
34 container_of((sched_job), struct amdgpu_job, base)
35
36#define AMDGPU_JOB_GET_VMID(job) ((job) ? (job)->vmid : 0)
37
38struct amdgpu_fence;
39
40struct amdgpu_job {
41 struct drm_sched_job base;
42 struct amdgpu_vm *vm;
43 struct amdgpu_sync sync;
44 struct amdgpu_sync sched_sync;
45 struct amdgpu_ib *ibs;
46 struct dma_fence *fence;
47 uint32_t preamble_status;
48 uint32_t num_ibs;
49 void *owner;
50 bool vm_needs_flush;
51 uint64_t vm_pd_addr;
52 unsigned vmid;
53 unsigned pasid;
54 uint32_t gds_base, gds_size;
55 uint32_t gws_base, gws_size;
56 uint32_t oa_base, oa_size;
57 uint32_t vram_lost_counter;
58
59
60 uint64_t uf_addr;
61 uint64_t uf_sequence;
62
63};
64
65int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs,
66 struct amdgpu_job **job, struct amdgpu_vm *vm);
67int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size,
68 struct amdgpu_job **job);
69
70void amdgpu_job_free_resources(struct amdgpu_job *job);
71void amdgpu_job_free(struct amdgpu_job *job);
72int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
73 void *owner, struct dma_fence **f);
74int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
75 struct dma_fence **fence);
76#endif
77