1
2
3
4
5
6
7
8#include <uapi/misc/habanalabs.h>
9#include "habanalabs.h"
10
11#include <linux/kernel.h>
12#include <linux/fs.h>
13#include <linux/uaccess.h>
14#include <linux/slab.h>
15
16static u32 hl_debug_struct_size[HL_DEBUG_OP_TIMESTAMP + 1] = {
17 [HL_DEBUG_OP_ETR] = sizeof(struct hl_debug_params_etr),
18 [HL_DEBUG_OP_ETF] = sizeof(struct hl_debug_params_etf),
19 [HL_DEBUG_OP_STM] = sizeof(struct hl_debug_params_stm),
20 [HL_DEBUG_OP_FUNNEL] = 0,
21 [HL_DEBUG_OP_BMON] = sizeof(struct hl_debug_params_bmon),
22 [HL_DEBUG_OP_SPMU] = sizeof(struct hl_debug_params_spmu),
23 [HL_DEBUG_OP_TIMESTAMP] = 0
24
25};
26
27static int device_status_info(struct hl_device *hdev, struct hl_info_args *args)
28{
29 struct hl_info_device_status dev_stat = {0};
30 u32 size = args->return_size;
31 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
32
33 if ((!size) || (!out))
34 return -EINVAL;
35
36 dev_stat.status = hl_device_status(hdev);
37
38 return copy_to_user(out, &dev_stat,
39 min((size_t)size, sizeof(dev_stat))) ? -EFAULT : 0;
40}
41
42static int hw_ip_info(struct hl_device *hdev, struct hl_info_args *args)
43{
44 struct hl_info_hw_ip_info hw_ip = {0};
45 u32 size = args->return_size;
46 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
47 struct asic_fixed_properties *prop = &hdev->asic_prop;
48 u64 sram_kmd_size, dram_kmd_size;
49
50 if ((!size) || (!out))
51 return -EINVAL;
52
53 sram_kmd_size = (prop->sram_user_base_address -
54 prop->sram_base_address);
55 dram_kmd_size = (prop->dram_user_base_address -
56 prop->dram_base_address);
57
58 hw_ip.device_id = hdev->asic_funcs->get_pci_id(hdev);
59 hw_ip.sram_base_address = prop->sram_user_base_address;
60 hw_ip.dram_base_address = prop->dram_user_base_address;
61 hw_ip.tpc_enabled_mask = prop->tpc_enabled_mask;
62 hw_ip.sram_size = prop->sram_size - sram_kmd_size;
63 hw_ip.dram_size = prop->dram_size - dram_kmd_size;
64 if (hw_ip.dram_size > PAGE_SIZE)
65 hw_ip.dram_enabled = 1;
66 hw_ip.num_of_events = prop->num_of_events;
67
68 memcpy(hw_ip.cpucp_version, prop->cpucp_info.cpucp_version,
69 min(VERSION_MAX_LEN, HL_INFO_VERSION_MAX_LEN));
70
71 memcpy(hw_ip.card_name, prop->cpucp_info.card_name,
72 min(CARD_NAME_MAX_LEN, HL_INFO_CARD_NAME_MAX_LEN));
73
74 hw_ip.cpld_version = le32_to_cpu(prop->cpucp_info.cpld_version);
75 hw_ip.module_id = le32_to_cpu(prop->cpucp_info.card_location);
76
77 hw_ip.psoc_pci_pll_nr = prop->psoc_pci_pll_nr;
78 hw_ip.psoc_pci_pll_nf = prop->psoc_pci_pll_nf;
79 hw_ip.psoc_pci_pll_od = prop->psoc_pci_pll_od;
80 hw_ip.psoc_pci_pll_div_factor = prop->psoc_pci_pll_div_factor;
81
82 return copy_to_user(out, &hw_ip,
83 min((size_t)size, sizeof(hw_ip))) ? -EFAULT : 0;
84}
85
86static int hw_events_info(struct hl_device *hdev, bool aggregate,
87 struct hl_info_args *args)
88{
89 u32 size, max_size = args->return_size;
90 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
91 void *arr;
92
93 if ((!max_size) || (!out))
94 return -EINVAL;
95
96 arr = hdev->asic_funcs->get_events_stat(hdev, aggregate, &size);
97
98 return copy_to_user(out, arr, min(max_size, size)) ? -EFAULT : 0;
99}
100
101static int dram_usage_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
102{
103 struct hl_device *hdev = hpriv->hdev;
104 struct hl_info_dram_usage dram_usage = {0};
105 u32 max_size = args->return_size;
106 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
107 struct asic_fixed_properties *prop = &hdev->asic_prop;
108 u64 dram_kmd_size;
109
110 if ((!max_size) || (!out))
111 return -EINVAL;
112
113 dram_kmd_size = (prop->dram_user_base_address -
114 prop->dram_base_address);
115 dram_usage.dram_free_mem = (prop->dram_size - dram_kmd_size) -
116 atomic64_read(&hdev->dram_used_mem);
117 if (hpriv->ctx)
118 dram_usage.ctx_dram_mem =
119 atomic64_read(&hpriv->ctx->dram_phys_mem);
120
121 return copy_to_user(out, &dram_usage,
122 min((size_t) max_size, sizeof(dram_usage))) ? -EFAULT : 0;
123}
124
125static int hw_idle(struct hl_device *hdev, struct hl_info_args *args)
126{
127 struct hl_info_hw_idle hw_idle = {0};
128 u32 max_size = args->return_size;
129 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
130
131 if ((!max_size) || (!out))
132 return -EINVAL;
133
134 hw_idle.is_idle = hdev->asic_funcs->is_device_idle(hdev,
135 &hw_idle.busy_engines_mask_ext, NULL);
136
137 return copy_to_user(out, &hw_idle,
138 min((size_t) max_size, sizeof(hw_idle))) ? -EFAULT : 0;
139}
140
141static int debug_coresight(struct hl_device *hdev, struct hl_debug_args *args)
142{
143 struct hl_debug_params *params;
144 void *input = NULL, *output = NULL;
145 int rc;
146
147 params = kzalloc(sizeof(*params), GFP_KERNEL);
148 if (!params)
149 return -ENOMEM;
150
151 params->reg_idx = args->reg_idx;
152 params->enable = args->enable;
153 params->op = args->op;
154
155 if (args->input_ptr && args->input_size) {
156 input = kzalloc(hl_debug_struct_size[args->op], GFP_KERNEL);
157 if (!input) {
158 rc = -ENOMEM;
159 goto out;
160 }
161
162 if (copy_from_user(input, u64_to_user_ptr(args->input_ptr),
163 args->input_size)) {
164 rc = -EFAULT;
165 dev_err(hdev->dev, "failed to copy input debug data\n");
166 goto out;
167 }
168
169 params->input = input;
170 }
171
172 if (args->output_ptr && args->output_size) {
173 output = kzalloc(args->output_size, GFP_KERNEL);
174 if (!output) {
175 rc = -ENOMEM;
176 goto out;
177 }
178
179 params->output = output;
180 params->output_size = args->output_size;
181 }
182
183 rc = hdev->asic_funcs->debug_coresight(hdev, params);
184 if (rc) {
185 dev_err(hdev->dev,
186 "debug coresight operation failed %d\n", rc);
187 goto out;
188 }
189
190 if (output && copy_to_user((void __user *) (uintptr_t) args->output_ptr,
191 output, args->output_size)) {
192 dev_err(hdev->dev, "copy to user failed in debug ioctl\n");
193 rc = -EFAULT;
194 goto out;
195 }
196
197
198out:
199 kfree(params);
200 kfree(output);
201 kfree(input);
202
203 return rc;
204}
205
206static int device_utilization(struct hl_device *hdev, struct hl_info_args *args)
207{
208 struct hl_info_device_utilization device_util = {0};
209 u32 max_size = args->return_size;
210 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
211
212 if ((!max_size) || (!out))
213 return -EINVAL;
214
215 if ((args->period_ms < 100) || (args->period_ms > 1000) ||
216 (args->period_ms % 100)) {
217 dev_err(hdev->dev,
218 "period %u must be between 100 - 1000 and must be divisible by 100\n",
219 args->period_ms);
220 return -EINVAL;
221 }
222
223 device_util.utilization = hl_device_utilization(hdev, args->period_ms);
224
225 return copy_to_user(out, &device_util,
226 min((size_t) max_size, sizeof(device_util))) ? -EFAULT : 0;
227}
228
229static int get_clk_rate(struct hl_device *hdev, struct hl_info_args *args)
230{
231 struct hl_info_clk_rate clk_rate = {0};
232 u32 max_size = args->return_size;
233 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
234 int rc;
235
236 if ((!max_size) || (!out))
237 return -EINVAL;
238
239 rc = hdev->asic_funcs->get_clk_rate(hdev, &clk_rate.cur_clk_rate_mhz,
240 &clk_rate.max_clk_rate_mhz);
241 if (rc)
242 return rc;
243
244 return copy_to_user(out, &clk_rate,
245 min((size_t) max_size, sizeof(clk_rate))) ? -EFAULT : 0;
246}
247
248static int get_reset_count(struct hl_device *hdev, struct hl_info_args *args)
249{
250 struct hl_info_reset_count reset_count = {0};
251 u32 max_size = args->return_size;
252 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
253
254 if ((!max_size) || (!out))
255 return -EINVAL;
256
257 reset_count.hard_reset_cnt = hdev->hard_reset_cnt;
258 reset_count.soft_reset_cnt = hdev->soft_reset_cnt;
259
260 return copy_to_user(out, &reset_count,
261 min((size_t) max_size, sizeof(reset_count))) ? -EFAULT : 0;
262}
263
264static int time_sync_info(struct hl_device *hdev, struct hl_info_args *args)
265{
266 struct hl_info_time_sync time_sync = {0};
267 u32 max_size = args->return_size;
268 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
269
270 if ((!max_size) || (!out))
271 return -EINVAL;
272
273 time_sync.device_time = hdev->asic_funcs->get_device_time(hdev);
274 time_sync.host_time = ktime_get_raw_ns();
275
276 return copy_to_user(out, &time_sync,
277 min((size_t) max_size, sizeof(time_sync))) ? -EFAULT : 0;
278}
279
280static int pci_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
281{
282 struct hl_device *hdev = hpriv->hdev;
283 struct hl_info_pci_counters pci_counters = {0};
284 u32 max_size = args->return_size;
285 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
286 int rc;
287
288 if ((!max_size) || (!out))
289 return -EINVAL;
290
291 rc = hl_fw_cpucp_pci_counters_get(hdev, &pci_counters);
292 if (rc)
293 return rc;
294
295 return copy_to_user(out, &pci_counters,
296 min((size_t) max_size, sizeof(pci_counters))) ? -EFAULT : 0;
297}
298
299static int clk_throttle_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
300{
301 struct hl_device *hdev = hpriv->hdev;
302 struct hl_info_clk_throttle clk_throttle = {0};
303 u32 max_size = args->return_size;
304 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
305
306 if ((!max_size) || (!out))
307 return -EINVAL;
308
309 clk_throttle.clk_throttling_reason = hdev->clk_throttling_reason;
310
311 return copy_to_user(out, &clk_throttle,
312 min((size_t) max_size, sizeof(clk_throttle))) ? -EFAULT : 0;
313}
314
315static int cs_counters_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
316{
317 struct hl_device *hdev = hpriv->hdev;
318 struct hl_info_cs_counters cs_counters = { {0} };
319 u32 max_size = args->return_size;
320 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
321
322 if ((!max_size) || (!out))
323 return -EINVAL;
324
325 memcpy(&cs_counters.cs_counters, &hdev->aggregated_cs_counters,
326 sizeof(struct hl_cs_counters));
327
328 if (hpriv->ctx)
329 memcpy(&cs_counters.ctx_cs_counters, &hpriv->ctx->cs_counters,
330 sizeof(struct hl_cs_counters));
331
332 return copy_to_user(out, &cs_counters,
333 min((size_t) max_size, sizeof(cs_counters))) ? -EFAULT : 0;
334}
335
336static int sync_manager_info(struct hl_fpriv *hpriv, struct hl_info_args *args)
337{
338 struct hl_device *hdev = hpriv->hdev;
339 struct asic_fixed_properties *prop = &hdev->asic_prop;
340 struct hl_info_sync_manager sm_info = {0};
341 u32 max_size = args->return_size;
342 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
343
344 if ((!max_size) || (!out))
345 return -EINVAL;
346
347 if (args->dcore_id >= HL_MAX_DCORES)
348 return -EINVAL;
349
350 sm_info.first_available_sync_object =
351 prop->first_available_user_sob[args->dcore_id];
352 sm_info.first_available_monitor =
353 prop->first_available_user_mon[args->dcore_id];
354
355
356 return copy_to_user(out, &sm_info, min_t(size_t, (size_t) max_size,
357 sizeof(sm_info))) ? -EFAULT : 0;
358}
359
360static int total_energy_consumption_info(struct hl_fpriv *hpriv,
361 struct hl_info_args *args)
362{
363 struct hl_device *hdev = hpriv->hdev;
364 struct hl_info_energy total_energy = {0};
365 u32 max_size = args->return_size;
366 void __user *out = (void __user *) (uintptr_t) args->return_pointer;
367 int rc;
368
369 if ((!max_size) || (!out))
370 return -EINVAL;
371
372 rc = hl_fw_cpucp_total_energy_get(hdev,
373 &total_energy.total_energy_consumption);
374 if (rc)
375 return rc;
376
377 return copy_to_user(out, &total_energy,
378 min((size_t) max_size, sizeof(total_energy))) ? -EFAULT : 0;
379}
380
381static int _hl_info_ioctl(struct hl_fpriv *hpriv, void *data,
382 struct device *dev)
383{
384 struct hl_info_args *args = data;
385 struct hl_device *hdev = hpriv->hdev;
386 int rc;
387
388
389
390
391
392 switch (args->op) {
393 case HL_INFO_HW_IP_INFO:
394 return hw_ip_info(hdev, args);
395
396 case HL_INFO_DEVICE_STATUS:
397 return device_status_info(hdev, args);
398
399 case HL_INFO_RESET_COUNT:
400 return get_reset_count(hdev, args);
401
402 default:
403 break;
404 }
405
406 if (hl_device_disabled_or_in_reset(hdev)) {
407 dev_warn_ratelimited(dev,
408 "Device is %s. Can't execute INFO IOCTL\n",
409 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
410 return -EBUSY;
411 }
412
413 switch (args->op) {
414 case HL_INFO_HW_EVENTS:
415 rc = hw_events_info(hdev, false, args);
416 break;
417
418 case HL_INFO_DRAM_USAGE:
419 rc = dram_usage_info(hpriv, args);
420 break;
421
422 case HL_INFO_HW_IDLE:
423 rc = hw_idle(hdev, args);
424 break;
425
426 case HL_INFO_DEVICE_UTILIZATION:
427 rc = device_utilization(hdev, args);
428 break;
429
430 case HL_INFO_HW_EVENTS_AGGREGATE:
431 rc = hw_events_info(hdev, true, args);
432 break;
433
434 case HL_INFO_CLK_RATE:
435 rc = get_clk_rate(hdev, args);
436 break;
437
438 case HL_INFO_TIME_SYNC:
439 return time_sync_info(hdev, args);
440
441 case HL_INFO_CS_COUNTERS:
442 return cs_counters_info(hpriv, args);
443
444 case HL_INFO_PCI_COUNTERS:
445 return pci_counters_info(hpriv, args);
446
447 case HL_INFO_CLK_THROTTLE_REASON:
448 return clk_throttle_info(hpriv, args);
449
450 case HL_INFO_SYNC_MANAGER:
451 return sync_manager_info(hpriv, args);
452
453 case HL_INFO_TOTAL_ENERGY:
454 return total_energy_consumption_info(hpriv, args);
455
456 default:
457 dev_err(dev, "Invalid request %d\n", args->op);
458 rc = -ENOTTY;
459 break;
460 }
461
462 return rc;
463}
464
465static int hl_info_ioctl(struct hl_fpriv *hpriv, void *data)
466{
467 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev);
468}
469
470static int hl_info_ioctl_control(struct hl_fpriv *hpriv, void *data)
471{
472 return _hl_info_ioctl(hpriv, data, hpriv->hdev->dev_ctrl);
473}
474
475static int hl_debug_ioctl(struct hl_fpriv *hpriv, void *data)
476{
477 struct hl_debug_args *args = data;
478 struct hl_device *hdev = hpriv->hdev;
479 int rc = 0;
480
481 if (hl_device_disabled_or_in_reset(hdev)) {
482 dev_warn_ratelimited(hdev->dev,
483 "Device is %s. Can't execute DEBUG IOCTL\n",
484 atomic_read(&hdev->in_reset) ? "in_reset" : "disabled");
485 return -EBUSY;
486 }
487
488 switch (args->op) {
489 case HL_DEBUG_OP_ETR:
490 case HL_DEBUG_OP_ETF:
491 case HL_DEBUG_OP_STM:
492 case HL_DEBUG_OP_FUNNEL:
493 case HL_DEBUG_OP_BMON:
494 case HL_DEBUG_OP_SPMU:
495 case HL_DEBUG_OP_TIMESTAMP:
496 if (!hdev->in_debug) {
497 dev_err_ratelimited(hdev->dev,
498 "Rejecting debug configuration request because device not in debug mode\n");
499 return -EFAULT;
500 }
501 args->input_size =
502 min(args->input_size, hl_debug_struct_size[args->op]);
503 rc = debug_coresight(hdev, args);
504 break;
505 case HL_DEBUG_OP_SET_MODE:
506 rc = hl_device_set_debug_mode(hdev, (bool) args->enable);
507 break;
508 default:
509 dev_err(hdev->dev, "Invalid request %d\n", args->op);
510 rc = -ENOTTY;
511 break;
512 }
513
514 return rc;
515}
516
517#define HL_IOCTL_DEF(ioctl, _func) \
518 [_IOC_NR(ioctl)] = {.cmd = ioctl, .func = _func}
519
520static const struct hl_ioctl_desc hl_ioctls[] = {
521 HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl),
522 HL_IOCTL_DEF(HL_IOCTL_CB, hl_cb_ioctl),
523 HL_IOCTL_DEF(HL_IOCTL_CS, hl_cs_ioctl),
524 HL_IOCTL_DEF(HL_IOCTL_WAIT_CS, hl_cs_wait_ioctl),
525 HL_IOCTL_DEF(HL_IOCTL_MEMORY, hl_mem_ioctl),
526 HL_IOCTL_DEF(HL_IOCTL_DEBUG, hl_debug_ioctl)
527};
528
529static const struct hl_ioctl_desc hl_ioctls_control[] = {
530 HL_IOCTL_DEF(HL_IOCTL_INFO, hl_info_ioctl_control)
531};
532
533static long _hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg,
534 const struct hl_ioctl_desc *ioctl, struct device *dev)
535{
536 struct hl_fpriv *hpriv = filep->private_data;
537 struct hl_device *hdev = hpriv->hdev;
538 unsigned int nr = _IOC_NR(cmd);
539 char stack_kdata[128] = {0};
540 char *kdata = NULL;
541 unsigned int usize, asize;
542 hl_ioctl_t *func;
543 u32 hl_size;
544 int retcode;
545
546 if (hdev->hard_reset_pending) {
547 dev_crit_ratelimited(hdev->dev_ctrl,
548 "Device HARD reset pending! Please close FD\n");
549 return -ENODEV;
550 }
551
552
553 func = ioctl->func;
554
555 if (unlikely(!func)) {
556 dev_dbg(dev, "no function\n");
557 retcode = -ENOTTY;
558 goto out_err;
559 }
560
561 hl_size = _IOC_SIZE(ioctl->cmd);
562 usize = asize = _IOC_SIZE(cmd);
563 if (hl_size > asize)
564 asize = hl_size;
565
566 cmd = ioctl->cmd;
567
568 if (cmd & (IOC_IN | IOC_OUT)) {
569 if (asize <= sizeof(stack_kdata)) {
570 kdata = stack_kdata;
571 } else {
572 kdata = kzalloc(asize, GFP_KERNEL);
573 if (!kdata) {
574 retcode = -ENOMEM;
575 goto out_err;
576 }
577 }
578 }
579
580 if (cmd & IOC_IN) {
581 if (copy_from_user(kdata, (void __user *)arg, usize)) {
582 retcode = -EFAULT;
583 goto out_err;
584 }
585 } else if (cmd & IOC_OUT) {
586 memset(kdata, 0, usize);
587 }
588
589 retcode = func(hpriv, kdata);
590
591 if ((cmd & IOC_OUT) && copy_to_user((void __user *)arg, kdata, usize))
592 retcode = -EFAULT;
593
594out_err:
595 if (retcode)
596 dev_dbg(dev, "error in ioctl: pid=%d, cmd=0x%02x, nr=0x%02x\n",
597 task_pid_nr(current), cmd, nr);
598
599 if (kdata != stack_kdata)
600 kfree(kdata);
601
602 return retcode;
603}
604
605long hl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
606{
607 struct hl_fpriv *hpriv = filep->private_data;
608 struct hl_device *hdev = hpriv->hdev;
609 const struct hl_ioctl_desc *ioctl = NULL;
610 unsigned int nr = _IOC_NR(cmd);
611
612 if ((nr >= HL_COMMAND_START) && (nr < HL_COMMAND_END)) {
613 ioctl = &hl_ioctls[nr];
614 } else {
615 dev_err(hdev->dev, "invalid ioctl: pid=%d, nr=0x%02x\n",
616 task_pid_nr(current), nr);
617 return -ENOTTY;
618 }
619
620 return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev);
621}
622
623long hl_ioctl_control(struct file *filep, unsigned int cmd, unsigned long arg)
624{
625 struct hl_fpriv *hpriv = filep->private_data;
626 struct hl_device *hdev = hpriv->hdev;
627 const struct hl_ioctl_desc *ioctl = NULL;
628 unsigned int nr = _IOC_NR(cmd);
629
630 if (nr == _IOC_NR(HL_IOCTL_INFO)) {
631 ioctl = &hl_ioctls_control[nr];
632 } else {
633 dev_err(hdev->dev_ctrl, "invalid ioctl: pid=%d, nr=0x%02x\n",
634 task_pid_nr(current), nr);
635 return -ENOTTY;
636 }
637
638 return _hl_ioctl(filep, cmd, arg, ioctl, hdev->dev_ctrl);
639}
640