1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#ifndef __AMDGPU_GFX_H__
25#define __AMDGPU_GFX_H__
26
27int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg);
28void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg);
29
30void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se,
31 unsigned max_sh);
32
33void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev);
34
35int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
36 struct amdgpu_ring *ring,
37 struct amdgpu_irq_src *irq);
38
39void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring,
40 struct amdgpu_irq_src *irq);
41
42void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev);
43int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
44 unsigned hpd_size);
45
46int amdgpu_gfx_compute_mqd_sw_init(struct amdgpu_device *adev,
47 unsigned mqd_size);
48void amdgpu_gfx_compute_mqd_sw_fini(struct amdgpu_device *adev);
49
50
51
52
53
54
55
56
57
58static inline u32 amdgpu_gfx_create_bitmask(u32 bit_width)
59{
60 return (u32)((1ULL << bit_width) - 1);
61}
62
63static inline int amdgpu_gfx_queue_to_bit(struct amdgpu_device *adev,
64 int mec, int pipe, int queue)
65{
66 int bit = 0;
67
68 bit += mec * adev->gfx.mec.num_pipe_per_mec
69 * adev->gfx.mec.num_queue_per_pipe;
70 bit += pipe * adev->gfx.mec.num_queue_per_pipe;
71 bit += queue;
72
73 return bit;
74}
75
76static inline void amdgpu_gfx_bit_to_queue(struct amdgpu_device *adev, int bit,
77 int *mec, int *pipe, int *queue)
78{
79 *queue = bit % adev->gfx.mec.num_queue_per_pipe;
80 *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
81 % adev->gfx.mec.num_pipe_per_mec;
82 *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
83 / adev->gfx.mec.num_pipe_per_mec;
84
85}
86static inline bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
87 int mec, int pipe, int queue)
88{
89 return test_bit(amdgpu_gfx_queue_to_bit(adev, mec, pipe, queue),
90 adev->gfx.mec.queue_bitmap);
91}
92
93#endif
94