1
2
3
4
5
6
7#include <linux/coresight.h>
8#include <linux/coresight-pmu.h>
9#include <linux/cpumask.h>
10#include <linux/device.h>
11#include <linux/list.h>
12#include <linux/mm.h>
13#include <linux/init.h>
14#include <linux/perf_event.h>
15#include <linux/percpu-defs.h>
16#include <linux/slab.h>
17#include <linux/stringhash.h>
18#include <linux/types.h>
19#include <linux/workqueue.h>
20
21#include "coresight-config.h"
22#include "coresight-etm-perf.h"
23#include "coresight-priv.h"
24#include "coresight-syscfg.h"
25
26static struct pmu etm_pmu;
27static bool etm_perf_up;
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43struct etm_ctxt {
44 struct perf_output_handle handle;
45 struct etm_event_data *event_data;
46};
47
48static DEFINE_PER_CPU(struct etm_ctxt, etm_ctxt);
49static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
50
51
52
53
54
55PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
56
57PMU_FORMAT_ATTR(contextid1, "config:" __stringify(ETM_OPT_CTXTID));
58
59PMU_FORMAT_ATTR(contextid2, "config:" __stringify(ETM_OPT_CTXTID2));
60PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
61PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
62
63PMU_FORMAT_ATTR(preset, "config:0-3");
64
65PMU_FORMAT_ATTR(sinkid, "config2:0-31");
66
67PMU_FORMAT_ATTR(configid, "config2:32-63");
68
69
70
71
72
73
74
75static ssize_t format_attr_contextid_show(struct device *dev,
76 struct device_attribute *attr,
77 char *page)
78{
79 int pid_fmt = ETM_OPT_CTXTID;
80
81#if IS_ENABLED(CONFIG_CORESIGHT_SOURCE_ETM4X)
82 pid_fmt = is_kernel_in_hyp_mode() ? ETM_OPT_CTXTID2 : ETM_OPT_CTXTID;
83#endif
84 return sprintf(page, "config:%d\n", pid_fmt);
85}
86
87static struct device_attribute format_attr_contextid =
88 __ATTR(contextid, 0444, format_attr_contextid_show, NULL);
89
90static struct attribute *etm_config_formats_attr[] = {
91 &format_attr_cycacc.attr,
92 &format_attr_contextid.attr,
93 &format_attr_contextid1.attr,
94 &format_attr_contextid2.attr,
95 &format_attr_timestamp.attr,
96 &format_attr_retstack.attr,
97 &format_attr_sinkid.attr,
98 &format_attr_preset.attr,
99 &format_attr_configid.attr,
100 NULL,
101};
102
103static const struct attribute_group etm_pmu_format_group = {
104 .name = "format",
105 .attrs = etm_config_formats_attr,
106};
107
108static struct attribute *etm_config_sinks_attr[] = {
109 NULL,
110};
111
112static const struct attribute_group etm_pmu_sinks_group = {
113 .name = "sinks",
114 .attrs = etm_config_sinks_attr,
115};
116
117static struct attribute *etm_config_events_attr[] = {
118 NULL,
119};
120
121static const struct attribute_group etm_pmu_events_group = {
122 .name = "events",
123 .attrs = etm_config_events_attr,
124};
125
126static const struct attribute_group *etm_pmu_attr_groups[] = {
127 &etm_pmu_format_group,
128 &etm_pmu_sinks_group,
129 &etm_pmu_events_group,
130 NULL,
131};
132
133static inline struct list_head **
134etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
135{
136 return per_cpu_ptr(data->path, cpu);
137}
138
139static inline struct list_head *
140etm_event_cpu_path(struct etm_event_data *data, int cpu)
141{
142 return *etm_event_cpu_path_ptr(data, cpu);
143}
144
145static void etm_event_read(struct perf_event *event) {}
146
147static int etm_addr_filters_alloc(struct perf_event *event)
148{
149 struct etm_filters *filters;
150 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
151
152 filters = kzalloc_node(sizeof(struct etm_filters), GFP_KERNEL, node);
153 if (!filters)
154 return -ENOMEM;
155
156 if (event->parent)
157 memcpy(filters, event->parent->hw.addr_filters,
158 sizeof(*filters));
159
160 event->hw.addr_filters = filters;
161
162 return 0;
163}
164
165static void etm_event_destroy(struct perf_event *event)
166{
167 kfree(event->hw.addr_filters);
168 event->hw.addr_filters = NULL;
169}
170
171static int etm_event_init(struct perf_event *event)
172{
173 int ret = 0;
174
175 if (event->attr.type != etm_pmu.type) {
176 ret = -ENOENT;
177 goto out;
178 }
179
180 ret = etm_addr_filters_alloc(event);
181 if (ret)
182 goto out;
183
184 event->destroy = etm_event_destroy;
185out:
186 return ret;
187}
188
189static void free_sink_buffer(struct etm_event_data *event_data)
190{
191 int cpu;
192 cpumask_t *mask = &event_data->mask;
193 struct coresight_device *sink;
194
195 if (!event_data->snk_config)
196 return;
197
198 if (WARN_ON(cpumask_empty(mask)))
199 return;
200
201 cpu = cpumask_first(mask);
202 sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
203 sink_ops(sink)->free_buffer(event_data->snk_config);
204}
205
206static void free_event_data(struct work_struct *work)
207{
208 int cpu;
209 cpumask_t *mask;
210 struct etm_event_data *event_data;
211
212 event_data = container_of(work, struct etm_event_data, work);
213 mask = &event_data->mask;
214
215
216 free_sink_buffer(event_data);
217
218
219 if (event_data->cfg_hash)
220 cscfg_deactivate_config(event_data->cfg_hash);
221
222 for_each_cpu(cpu, mask) {
223 struct list_head **ppath;
224
225 ppath = etm_event_cpu_path_ptr(event_data, cpu);
226 if (!(IS_ERR_OR_NULL(*ppath)))
227 coresight_release_path(*ppath);
228 *ppath = NULL;
229 }
230
231 free_percpu(event_data->path);
232 kfree(event_data);
233}
234
235static void *alloc_event_data(int cpu)
236{
237 cpumask_t *mask;
238 struct etm_event_data *event_data;
239
240
241 event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
242 if (!event_data)
243 return NULL;
244
245
246 mask = &event_data->mask;
247 if (cpu != -1)
248 cpumask_set_cpu(cpu, mask);
249 else
250 cpumask_copy(mask, cpu_present_mask);
251
252
253
254
255
256
257
258
259
260 event_data->path = alloc_percpu(struct list_head *);
261
262 if (!event_data->path) {
263 kfree(event_data);
264 return NULL;
265 }
266
267 return event_data;
268}
269
270static void etm_free_aux(void *data)
271{
272 struct etm_event_data *event_data = data;
273
274 schedule_work(&event_data->work);
275}
276
277
278
279
280
281
282static bool sinks_compatible(struct coresight_device *a,
283 struct coresight_device *b)
284{
285 if (!a || !b)
286 return false;
287
288
289
290
291
292 return (a->subtype.sink_subtype == b->subtype.sink_subtype) &&
293 (sink_ops(a) == sink_ops(b));
294}
295
296static void *etm_setup_aux(struct perf_event *event, void **pages,
297 int nr_pages, bool overwrite)
298{
299 u32 id, cfg_hash;
300 int cpu = event->cpu;
301 cpumask_t *mask;
302 struct coresight_device *sink = NULL;
303 struct coresight_device *user_sink = NULL, *last_sink = NULL;
304 struct etm_event_data *event_data = NULL;
305
306 event_data = alloc_event_data(cpu);
307 if (!event_data)
308 return NULL;
309 INIT_WORK(&event_data->work, free_event_data);
310
311
312 if (event->attr.config2 & GENMASK_ULL(31, 0)) {
313 id = (u32)event->attr.config2;
314 sink = user_sink = coresight_get_sink_by_id(id);
315 }
316
317
318 cfg_hash = (u32)((event->attr.config2 & GENMASK_ULL(63, 32)) >> 32);
319 if (cfg_hash) {
320 if (cscfg_activate_config(cfg_hash))
321 goto err;
322 event_data->cfg_hash = cfg_hash;
323 }
324
325 mask = &event_data->mask;
326
327
328
329
330
331
332
333
334 for_each_cpu(cpu, mask) {
335 struct list_head *path;
336 struct coresight_device *csdev;
337
338 csdev = per_cpu(csdev_src, cpu);
339
340
341
342
343
344 if (!csdev) {
345 cpumask_clear_cpu(cpu, mask);
346 continue;
347 }
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362 if (!user_sink) {
363
364 sink = coresight_find_default_sink(csdev);
365 if (!sink) {
366 cpumask_clear_cpu(cpu, mask);
367 continue;
368 }
369
370
371 if (last_sink && !sinks_compatible(last_sink, sink)) {
372 cpumask_clear_cpu(cpu, mask);
373 continue;
374 }
375 last_sink = sink;
376 }
377
378
379
380
381
382
383 path = coresight_build_path(csdev, sink);
384 if (IS_ERR(path)) {
385 cpumask_clear_cpu(cpu, mask);
386 continue;
387 }
388
389 *etm_event_cpu_path_ptr(event_data, cpu) = path;
390 }
391
392
393 if (!sink)
394 goto err;
395
396
397 cpu = cpumask_first(mask);
398 if (cpu >= nr_cpu_ids)
399 goto err;
400
401 if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer)
402 goto err;
403
404
405
406
407
408
409
410 event_data->snk_config =
411 sink_ops(sink)->alloc_buffer(sink, event, pages,
412 nr_pages, overwrite);
413 if (!event_data->snk_config)
414 goto err;
415
416out:
417 return event_data;
418
419err:
420 etm_free_aux(event_data);
421 event_data = NULL;
422 goto out;
423}
424
425static void etm_event_start(struct perf_event *event, int flags)
426{
427 int cpu = smp_processor_id();
428 struct etm_event_data *event_data;
429 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
430 struct perf_output_handle *handle = &ctxt->handle;
431 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
432 struct list_head *path;
433
434 if (!csdev)
435 goto fail;
436
437
438 if (WARN_ON(ctxt->event_data))
439 goto fail;
440
441
442
443
444
445 event_data = perf_aux_output_begin(handle, event);
446 if (!event_data)
447 goto fail;
448
449
450
451
452
453
454
455
456 if (!cpumask_test_cpu(cpu, &event_data->mask))
457 goto fail_end_stop;
458
459 path = etm_event_cpu_path(event_data, cpu);
460
461 sink = coresight_get_sink(path);
462 if (WARN_ON_ONCE(!sink))
463 goto fail_end_stop;
464
465
466 if (coresight_enable_path(path, CS_MODE_PERF, handle))
467 goto fail_end_stop;
468
469
470 event->hw.state = 0;
471
472
473 if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
474 goto fail_disable_path;
475
476
477 ctxt->event_data = event_data;
478out:
479 return;
480
481fail_disable_path:
482 coresight_disable_path(path);
483fail_end_stop:
484 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
485 perf_aux_output_end(handle, 0);
486fail:
487 event->hw.state = PERF_HES_STOPPED;
488 goto out;
489}
490
491static void etm_event_stop(struct perf_event *event, int mode)
492{
493 int cpu = smp_processor_id();
494 unsigned long size;
495 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
496 struct etm_ctxt *ctxt = this_cpu_ptr(&etm_ctxt);
497 struct perf_output_handle *handle = &ctxt->handle;
498 struct etm_event_data *event_data;
499 struct list_head *path;
500
501
502
503
504
505 if (handle->event &&
506 WARN_ON(perf_get_aux(handle) != ctxt->event_data))
507 return;
508
509 event_data = ctxt->event_data;
510
511 ctxt->event_data = NULL;
512
513 if (event->hw.state == PERF_HES_STOPPED)
514 return;
515
516
517 if (WARN_ON(!event_data))
518 return;
519
520 if (!csdev)
521 return;
522
523 path = etm_event_cpu_path(event_data, cpu);
524 if (!path)
525 return;
526
527 sink = coresight_get_sink(path);
528 if (!sink)
529 return;
530
531
532 source_ops(csdev)->disable(csdev, event);
533
534
535 event->hw.state = PERF_HES_STOPPED;
536
537
538
539
540
541
542
543 if (handle->event && (mode & PERF_EF_UPDATE)) {
544 if (WARN_ON_ONCE(handle->event != event))
545 return;
546
547
548 if (!sink_ops(sink)->update_buffer)
549 return;
550
551 size = sink_ops(sink)->update_buffer(sink, handle,
552 event_data->snk_config);
553 perf_aux_output_end(handle, size);
554 }
555
556
557 coresight_disable_path(path);
558}
559
560static int etm_event_add(struct perf_event *event, int mode)
561{
562 int ret = 0;
563 struct hw_perf_event *hwc = &event->hw;
564
565 if (mode & PERF_EF_START) {
566 etm_event_start(event, 0);
567 if (hwc->state & PERF_HES_STOPPED)
568 ret = -EINVAL;
569 } else {
570 hwc->state = PERF_HES_STOPPED;
571 }
572
573 return ret;
574}
575
576static void etm_event_del(struct perf_event *event, int mode)
577{
578 etm_event_stop(event, PERF_EF_UPDATE);
579}
580
581static int etm_addr_filters_validate(struct list_head *filters)
582{
583 bool range = false, address = false;
584 int index = 0;
585 struct perf_addr_filter *filter;
586
587 list_for_each_entry(filter, filters, entry) {
588
589
590
591
592 if (++index > ETM_ADDR_CMP_MAX)
593 return -EOPNOTSUPP;
594
595
596 if (filter->size) {
597
598
599
600
601 if (filter->action == PERF_ADDR_FILTER_ACTION_START ||
602 filter->action == PERF_ADDR_FILTER_ACTION_STOP)
603 return -EOPNOTSUPP;
604
605 range = true;
606 } else
607 address = true;
608
609
610
611
612
613 if (range && address)
614 return -EOPNOTSUPP;
615 }
616
617 return 0;
618}
619
620static void etm_addr_filters_sync(struct perf_event *event)
621{
622 struct perf_addr_filters_head *head = perf_event_addr_filters(event);
623 unsigned long start, stop;
624 struct perf_addr_filter_range *fr = event->addr_filter_ranges;
625 struct etm_filters *filters = event->hw.addr_filters;
626 struct etm_filter *etm_filter;
627 struct perf_addr_filter *filter;
628 int i = 0;
629
630 list_for_each_entry(filter, &head->list, entry) {
631 start = fr[i].start;
632 stop = start + fr[i].size;
633 etm_filter = &filters->etm_filter[i];
634
635 switch (filter->action) {
636 case PERF_ADDR_FILTER_ACTION_FILTER:
637 etm_filter->start_addr = start;
638 etm_filter->stop_addr = stop;
639 etm_filter->type = ETM_ADDR_TYPE_RANGE;
640 break;
641 case PERF_ADDR_FILTER_ACTION_START:
642 etm_filter->start_addr = start;
643 etm_filter->type = ETM_ADDR_TYPE_START;
644 break;
645 case PERF_ADDR_FILTER_ACTION_STOP:
646 etm_filter->stop_addr = stop;
647 etm_filter->type = ETM_ADDR_TYPE_STOP;
648 break;
649 }
650 i++;
651 }
652
653 filters->nr_filters = i;
654}
655
656int etm_perf_symlink(struct coresight_device *csdev, bool link)
657{
658 char entry[sizeof("cpu9999999")];
659 int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
660 struct device *pmu_dev = etm_pmu.dev;
661 struct device *cs_dev = &csdev->dev;
662
663 sprintf(entry, "cpu%d", cpu);
664
665 if (!etm_perf_up)
666 return -EPROBE_DEFER;
667
668 if (link) {
669 ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
670 if (ret)
671 return ret;
672 per_cpu(csdev_src, cpu) = csdev;
673 } else {
674 sysfs_remove_link(&pmu_dev->kobj, entry);
675 per_cpu(csdev_src, cpu) = NULL;
676 }
677
678 return 0;
679}
680EXPORT_SYMBOL_GPL(etm_perf_symlink);
681
682static ssize_t etm_perf_sink_name_show(struct device *dev,
683 struct device_attribute *dattr,
684 char *buf)
685{
686 struct dev_ext_attribute *ea;
687
688 ea = container_of(dattr, struct dev_ext_attribute, attr);
689 return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var));
690}
691
692static struct dev_ext_attribute *
693etm_perf_add_symlink_group(struct device *dev, const char *name, const char *group_name)
694{
695 struct dev_ext_attribute *ea;
696 unsigned long hash;
697 int ret;
698 struct device *pmu_dev = etm_pmu.dev;
699
700 if (!etm_perf_up)
701 return ERR_PTR(-EPROBE_DEFER);
702
703 ea = devm_kzalloc(dev, sizeof(*ea), GFP_KERNEL);
704 if (!ea)
705 return ERR_PTR(-ENOMEM);
706
707
708
709
710
711
712
713 hash = hashlen_hash(hashlen_string(NULL, name));
714
715 sysfs_attr_init(&ea->attr.attr);
716 ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL);
717 if (!ea->attr.attr.name)
718 return ERR_PTR(-ENOMEM);
719
720 ea->attr.attr.mode = 0444;
721 ea->var = (unsigned long *)hash;
722
723 ret = sysfs_add_file_to_group(&pmu_dev->kobj,
724 &ea->attr.attr, group_name);
725
726 return ret ? ERR_PTR(ret) : ea;
727}
728
729int etm_perf_add_symlink_sink(struct coresight_device *csdev)
730{
731 const char *name;
732 struct device *dev = &csdev->dev;
733 int err = 0;
734
735 if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
736 csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
737 return -EINVAL;
738
739 if (csdev->ea != NULL)
740 return -EINVAL;
741
742 name = dev_name(dev);
743 csdev->ea = etm_perf_add_symlink_group(dev, name, "sinks");
744 if (IS_ERR(csdev->ea)) {
745 err = PTR_ERR(csdev->ea);
746 csdev->ea = NULL;
747 } else
748 csdev->ea->attr.show = etm_perf_sink_name_show;
749
750 return err;
751}
752
753static void etm_perf_del_symlink_group(struct dev_ext_attribute *ea, const char *group_name)
754{
755 struct device *pmu_dev = etm_pmu.dev;
756
757 sysfs_remove_file_from_group(&pmu_dev->kobj,
758 &ea->attr.attr, group_name);
759}
760
761void etm_perf_del_symlink_sink(struct coresight_device *csdev)
762{
763 if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
764 csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
765 return;
766
767 if (!csdev->ea)
768 return;
769
770 etm_perf_del_symlink_group(csdev->ea, "sinks");
771 csdev->ea = NULL;
772}
773
774static ssize_t etm_perf_cscfg_event_show(struct device *dev,
775 struct device_attribute *dattr,
776 char *buf)
777{
778 struct dev_ext_attribute *ea;
779
780 ea = container_of(dattr, struct dev_ext_attribute, attr);
781 return scnprintf(buf, PAGE_SIZE, "configid=0x%lx\n", (unsigned long)(ea->var));
782}
783
784int etm_perf_add_symlink_cscfg(struct device *dev, struct cscfg_config_desc *config_desc)
785{
786 int err = 0;
787
788 if (config_desc->event_ea != NULL)
789 return 0;
790
791 config_desc->event_ea = etm_perf_add_symlink_group(dev, config_desc->name, "events");
792
793
794 if (!IS_ERR(config_desc->event_ea))
795 config_desc->event_ea->attr.show = etm_perf_cscfg_event_show;
796 else {
797 err = PTR_ERR(config_desc->event_ea);
798 config_desc->event_ea = NULL;
799 }
800
801 return err;
802}
803
804void etm_perf_del_symlink_cscfg(struct cscfg_config_desc *config_desc)
805{
806 if (!config_desc->event_ea)
807 return;
808
809 etm_perf_del_symlink_group(config_desc->event_ea, "events");
810 config_desc->event_ea = NULL;
811}
812
813int __init etm_perf_init(void)
814{
815 int ret;
816
817 etm_pmu.capabilities = (PERF_PMU_CAP_EXCLUSIVE |
818 PERF_PMU_CAP_ITRACE);
819
820 etm_pmu.attr_groups = etm_pmu_attr_groups;
821 etm_pmu.task_ctx_nr = perf_sw_context;
822 etm_pmu.read = etm_event_read;
823 etm_pmu.event_init = etm_event_init;
824 etm_pmu.setup_aux = etm_setup_aux;
825 etm_pmu.free_aux = etm_free_aux;
826 etm_pmu.start = etm_event_start;
827 etm_pmu.stop = etm_event_stop;
828 etm_pmu.add = etm_event_add;
829 etm_pmu.del = etm_event_del;
830 etm_pmu.addr_filters_sync = etm_addr_filters_sync;
831 etm_pmu.addr_filters_validate = etm_addr_filters_validate;
832 etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX;
833
834 ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
835 if (ret == 0)
836 etm_perf_up = true;
837
838 return ret;
839}
840
841void etm_perf_exit(void)
842{
843 perf_pmu_unregister(&etm_pmu);
844}
845