1
2
3
4
5
6
7
8#include "habanalabs.h"
9
10#include <linux/slab.h>
11
12
13
14
15
16
17
18
19
20struct hl_eqe_work {
21 struct work_struct eq_work;
22 struct hl_device *hdev;
23 struct hl_eq_entry eq_entry;
24};
25
26
27
28
29
30
31
32
33
34inline u32 hl_cq_inc_ptr(u32 ptr)
35{
36 ptr++;
37 if (unlikely(ptr == HL_CQ_LENGTH))
38 ptr = 0;
39 return ptr;
40}
41
42
43
44
45
46
47
48
49
50inline u32 hl_eq_inc_ptr(u32 ptr)
51{
52 ptr++;
53 if (unlikely(ptr == HL_EQ_LENGTH))
54 ptr = 0;
55 return ptr;
56}
57
58static void irq_handle_eqe(struct work_struct *work)
59{
60 struct hl_eqe_work *eqe_work = container_of(work, struct hl_eqe_work,
61 eq_work);
62 struct hl_device *hdev = eqe_work->hdev;
63
64 hdev->asic_funcs->handle_eqe(hdev, &eqe_work->eq_entry);
65
66 kfree(eqe_work);
67}
68
69
70
71
72
73
74
75
76irqreturn_t hl_irq_handler_cq(int irq, void *arg)
77{
78 struct hl_cq *cq = arg;
79 struct hl_device *hdev = cq->hdev;
80 struct hl_hw_queue *queue;
81 struct hl_cs_job *job;
82 bool shadow_index_valid;
83 u16 shadow_index;
84 struct hl_cq_entry *cq_entry, *cq_base;
85
86 if (hdev->disabled) {
87 dev_dbg(hdev->dev,
88 "Device disabled but received IRQ %d for CQ %d\n",
89 irq, cq->hw_queue_id);
90 return IRQ_HANDLED;
91 }
92
93 cq_base = cq->kernel_address;
94
95 while (1) {
96 bool entry_ready = ((le32_to_cpu(cq_base[cq->ci].data) &
97 CQ_ENTRY_READY_MASK)
98 >> CQ_ENTRY_READY_SHIFT);
99
100 if (!entry_ready)
101 break;
102
103 cq_entry = (struct hl_cq_entry *) &cq_base[cq->ci];
104
105
106
107
108 dma_rmb();
109
110 shadow_index_valid = ((le32_to_cpu(cq_entry->data) &
111 CQ_ENTRY_SHADOW_INDEX_VALID_MASK)
112 >> CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT);
113
114 shadow_index = (u16) ((le32_to_cpu(cq_entry->data) &
115 CQ_ENTRY_SHADOW_INDEX_MASK)
116 >> CQ_ENTRY_SHADOW_INDEX_SHIFT);
117
118 queue = &hdev->kernel_queues[cq->hw_queue_id];
119
120 if ((shadow_index_valid) && (!hdev->disabled)) {
121 job = queue->shadow_queue[hl_pi_2_offset(shadow_index)];
122 queue_work(hdev->cq_wq[cq->cq_idx], &job->finish_work);
123 }
124
125 atomic_inc(&queue->ci);
126
127
128 cq_entry->data = cpu_to_le32(le32_to_cpu(cq_entry->data) &
129 ~CQ_ENTRY_READY_MASK);
130
131 cq->ci = hl_cq_inc_ptr(cq->ci);
132
133
134 atomic_inc(&cq->free_slots_cnt);
135 }
136
137 return IRQ_HANDLED;
138}
139
140
141
142
143
144
145
146
147irqreturn_t hl_irq_handler_eq(int irq, void *arg)
148{
149 struct hl_eq *eq = arg;
150 struct hl_device *hdev = eq->hdev;
151 struct hl_eq_entry *eq_entry;
152 struct hl_eq_entry *eq_base;
153 struct hl_eqe_work *handle_eqe_work;
154
155 eq_base = eq->kernel_address;
156
157 while (1) {
158 bool entry_ready =
159 ((le32_to_cpu(eq_base[eq->ci].hdr.ctl) &
160 EQ_CTL_READY_MASK) >> EQ_CTL_READY_SHIFT);
161
162 if (!entry_ready)
163 break;
164
165 eq_entry = &eq_base[eq->ci];
166
167
168
169
170
171 dma_rmb();
172
173 if (hdev->disabled) {
174 dev_warn(hdev->dev,
175 "Device disabled but received IRQ %d for EQ\n",
176 irq);
177 goto skip_irq;
178 }
179
180 handle_eqe_work = kmalloc(sizeof(*handle_eqe_work), GFP_ATOMIC);
181 if (handle_eqe_work) {
182 INIT_WORK(&handle_eqe_work->eq_work, irq_handle_eqe);
183 handle_eqe_work->hdev = hdev;
184
185 memcpy(&handle_eqe_work->eq_entry, eq_entry,
186 sizeof(*eq_entry));
187
188 queue_work(hdev->eq_wq, &handle_eqe_work->eq_work);
189 }
190skip_irq:
191
192 eq_entry->hdr.ctl =
193 cpu_to_le32(le32_to_cpu(eq_entry->hdr.ctl) &
194 ~EQ_CTL_READY_MASK);
195
196 eq->ci = hl_eq_inc_ptr(eq->ci);
197
198 hdev->asic_funcs->update_eq_ci(hdev, eq->ci);
199 }
200
201 return IRQ_HANDLED;
202}
203
204
205
206
207
208
209
210
211
212
213
214int hl_cq_init(struct hl_device *hdev, struct hl_cq *q, u32 hw_queue_id)
215{
216 void *p;
217
218 p = hdev->asic_funcs->asic_dma_alloc_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
219 &q->bus_address, GFP_KERNEL | __GFP_ZERO);
220 if (!p)
221 return -ENOMEM;
222
223 q->hdev = hdev;
224 q->kernel_address = p;
225 q->hw_queue_id = hw_queue_id;
226 q->ci = 0;
227 q->pi = 0;
228
229 atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
230
231 return 0;
232}
233
234
235
236
237
238
239
240
241
242void hl_cq_fini(struct hl_device *hdev, struct hl_cq *q)
243{
244 hdev->asic_funcs->asic_dma_free_coherent(hdev, HL_CQ_SIZE_IN_BYTES,
245 q->kernel_address,
246 q->bus_address);
247}
248
249void hl_cq_reset(struct hl_device *hdev, struct hl_cq *q)
250{
251 q->ci = 0;
252 q->pi = 0;
253
254 atomic_set(&q->free_slots_cnt, HL_CQ_LENGTH);
255
256
257
258
259
260
261
262
263 memset(q->kernel_address, 0, HL_CQ_SIZE_IN_BYTES);
264}
265
266
267
268
269
270
271
272
273
274
275int hl_eq_init(struct hl_device *hdev, struct hl_eq *q)
276{
277 void *p;
278
279 p = hdev->asic_funcs->cpu_accessible_dma_pool_alloc(hdev,
280 HL_EQ_SIZE_IN_BYTES,
281 &q->bus_address);
282 if (!p)
283 return -ENOMEM;
284
285 q->hdev = hdev;
286 q->kernel_address = p;
287 q->ci = 0;
288
289 return 0;
290}
291
292
293
294
295
296
297
298
299
300void hl_eq_fini(struct hl_device *hdev, struct hl_eq *q)
301{
302 flush_workqueue(hdev->eq_wq);
303
304 hdev->asic_funcs->cpu_accessible_dma_pool_free(hdev,
305 HL_EQ_SIZE_IN_BYTES,
306 q->kernel_address);
307}
308
309void hl_eq_reset(struct hl_device *hdev, struct hl_eq *q)
310{
311 q->ci = 0;
312
313
314
315
316
317
318
319
320 memset(q->kernel_address, 0, HL_EQ_SIZE_IN_BYTES);
321}
322