1
2
3
4
5
6
7
8
9#include <linux/bitops.h>
10#include <linux/err.h>
11#include <linux/kernel.h>
12#include <linux/log2.h>
13#include <linux/types.h>
14
15#include <stdlib.h>
16
17#include "auxtrace.h"
18#include "color.h"
19#include "cs-etm.h"
20#include "cs-etm-decoder/cs-etm-decoder.h"
21#include "debug.h"
22#include "evlist.h"
23#include "intlist.h"
24#include "machine.h"
25#include "map.h"
26#include "perf.h"
27#include "thread.h"
28#include "thread_map.h"
29#include "thread-stack.h"
30#include "util.h"
31
32#define MAX_TIMESTAMP (~0ULL)
33
34
35
36
37
38
39
40#define A64_INSTR_SIZE 4
41
42struct cs_etm_auxtrace {
43 struct auxtrace auxtrace;
44 struct auxtrace_queues queues;
45 struct auxtrace_heap heap;
46 struct itrace_synth_opts synth_opts;
47 struct perf_session *session;
48 struct machine *machine;
49 struct thread *unknown_thread;
50
51 u8 timeless_decoding;
52 u8 snapshot_mode;
53 u8 data_queued;
54 u8 sample_branches;
55 u8 sample_instructions;
56
57 int num_cpu;
58 u32 auxtrace_type;
59 u64 branches_sample_type;
60 u64 branches_id;
61 u64 instructions_sample_type;
62 u64 instructions_sample_period;
63 u64 instructions_id;
64 u64 **metadata;
65 u64 kernel_start;
66 unsigned int pmu_type;
67};
68
69struct cs_etm_queue {
70 struct cs_etm_auxtrace *etm;
71 struct thread *thread;
72 struct cs_etm_decoder *decoder;
73 struct auxtrace_buffer *buffer;
74 const struct cs_etm_state *state;
75 union perf_event *event_buf;
76 unsigned int queue_nr;
77 pid_t pid, tid;
78 int cpu;
79 u64 time;
80 u64 timestamp;
81 u64 offset;
82 u64 period_instructions;
83 struct branch_stack *last_branch;
84 struct branch_stack *last_branch_rb;
85 size_t last_branch_pos;
86 struct cs_etm_packet *prev_packet;
87 struct cs_etm_packet *packet;
88};
89
90static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
91static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
92 pid_t tid, u64 time_);
93
94static void cs_etm__packet_dump(const char *pkt_string)
95{
96 const char *color = PERF_COLOR_BLUE;
97 int len = strlen(pkt_string);
98
99 if (len && (pkt_string[len-1] == '\n'))
100 color_fprintf(stdout, color, " %s", pkt_string);
101 else
102 color_fprintf(stdout, color, " %s\n", pkt_string);
103
104 fflush(stdout);
105}
106
107static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
108 struct auxtrace_buffer *buffer)
109{
110 int i, ret;
111 const char *color = PERF_COLOR_BLUE;
112 struct cs_etm_decoder_params d_params;
113 struct cs_etm_trace_params *t_params;
114 struct cs_etm_decoder *decoder;
115 size_t buffer_used = 0;
116
117 fprintf(stdout, "\n");
118 color_fprintf(stdout, color,
119 ". ... CoreSight ETM Trace data: size %zu bytes\n",
120 buffer->size);
121
122
123 t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
124 for (i = 0; i < etm->num_cpu; i++) {
125 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
126 t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
127 t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
128 t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
129 t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
130 t_params[i].etmv4.reg_configr =
131 etm->metadata[i][CS_ETMV4_TRCCONFIGR];
132 t_params[i].etmv4.reg_traceidr =
133 etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
134 }
135
136
137 d_params.packet_printer = cs_etm__packet_dump;
138 d_params.operation = CS_ETM_OPERATION_PRINT;
139 d_params.formatted = true;
140 d_params.fsyncs = false;
141 d_params.hsyncs = false;
142 d_params.frame_aligned = true;
143
144 decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
145
146 zfree(&t_params);
147
148 if (!decoder)
149 return;
150 do {
151 size_t consumed;
152
153 ret = cs_etm_decoder__process_data_block(
154 decoder, buffer->offset,
155 &((u8 *)buffer->data)[buffer_used],
156 buffer->size - buffer_used, &consumed);
157 if (ret)
158 break;
159
160 buffer_used += consumed;
161 } while (buffer_used < buffer->size);
162
163 cs_etm_decoder__free(decoder);
164}
165
166static int cs_etm__flush_events(struct perf_session *session,
167 struct perf_tool *tool)
168{
169 int ret;
170 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
171 struct cs_etm_auxtrace,
172 auxtrace);
173 if (dump_trace)
174 return 0;
175
176 if (!tool->ordered_events)
177 return -EINVAL;
178
179 if (!etm->timeless_decoding)
180 return -EINVAL;
181
182 ret = cs_etm__update_queues(etm);
183
184 if (ret < 0)
185 return ret;
186
187 return cs_etm__process_timeless_queues(etm, -1, MAX_TIMESTAMP - 1);
188}
189
190static void cs_etm__free_queue(void *priv)
191{
192 struct cs_etm_queue *etmq = priv;
193
194 if (!etmq)
195 return;
196
197 thread__zput(etmq->thread);
198 cs_etm_decoder__free(etmq->decoder);
199 zfree(&etmq->event_buf);
200 zfree(&etmq->last_branch);
201 zfree(&etmq->last_branch_rb);
202 zfree(&etmq->prev_packet);
203 zfree(&etmq->packet);
204 free(etmq);
205}
206
207static void cs_etm__free_events(struct perf_session *session)
208{
209 unsigned int i;
210 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
211 struct cs_etm_auxtrace,
212 auxtrace);
213 struct auxtrace_queues *queues = &aux->queues;
214
215 for (i = 0; i < queues->nr_queues; i++) {
216 cs_etm__free_queue(queues->queue_array[i].priv);
217 queues->queue_array[i].priv = NULL;
218 }
219
220 auxtrace_queues__free(queues);
221}
222
223static void cs_etm__free(struct perf_session *session)
224{
225 int i;
226 struct int_node *inode, *tmp;
227 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
228 struct cs_etm_auxtrace,
229 auxtrace);
230 cs_etm__free_events(session);
231 session->auxtrace = NULL;
232
233
234 intlist__for_each_entry_safe(inode, tmp, traceid_list)
235 intlist__remove(traceid_list, inode);
236
237 intlist__delete(traceid_list);
238
239 for (i = 0; i < aux->num_cpu; i++)
240 zfree(&aux->metadata[i]);
241
242 thread__zput(aux->unknown_thread);
243 zfree(&aux->metadata);
244 zfree(&aux);
245}
246
247static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
248 size_t size, u8 *buffer)
249{
250 u8 cpumode;
251 u64 offset;
252 int len;
253 struct thread *thread;
254 struct machine *machine;
255 struct addr_location al;
256
257 if (!etmq)
258 return -1;
259
260 machine = etmq->etm->machine;
261 if (address >= etmq->etm->kernel_start)
262 cpumode = PERF_RECORD_MISC_KERNEL;
263 else
264 cpumode = PERF_RECORD_MISC_USER;
265
266 thread = etmq->thread;
267 if (!thread) {
268 if (cpumode != PERF_RECORD_MISC_KERNEL)
269 return -EINVAL;
270 thread = etmq->etm->unknown_thread;
271 }
272
273 if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso)
274 return 0;
275
276 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
277 dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
278 return 0;
279
280 offset = al.map->map_ip(al.map, address);
281
282 map__load(al.map);
283
284 len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
285
286 if (len <= 0)
287 return 0;
288
289 return len;
290}
291
292static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
293 unsigned int queue_nr)
294{
295 int i;
296 struct cs_etm_decoder_params d_params;
297 struct cs_etm_trace_params *t_params;
298 struct cs_etm_queue *etmq;
299 size_t szp = sizeof(struct cs_etm_packet);
300
301 etmq = zalloc(sizeof(*etmq));
302 if (!etmq)
303 return NULL;
304
305 etmq->packet = zalloc(szp);
306 if (!etmq->packet)
307 goto out_free;
308
309 if (etm->synth_opts.last_branch || etm->sample_branches) {
310 etmq->prev_packet = zalloc(szp);
311 if (!etmq->prev_packet)
312 goto out_free;
313 }
314
315 if (etm->synth_opts.last_branch) {
316 size_t sz = sizeof(struct branch_stack);
317
318 sz += etm->synth_opts.last_branch_sz *
319 sizeof(struct branch_entry);
320 etmq->last_branch = zalloc(sz);
321 if (!etmq->last_branch)
322 goto out_free;
323 etmq->last_branch_rb = zalloc(sz);
324 if (!etmq->last_branch_rb)
325 goto out_free;
326 }
327
328 etmq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
329 if (!etmq->event_buf)
330 goto out_free;
331
332 etmq->etm = etm;
333 etmq->queue_nr = queue_nr;
334 etmq->pid = -1;
335 etmq->tid = -1;
336 etmq->cpu = -1;
337
338
339 t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
340
341 if (!t_params)
342 goto out_free;
343
344 for (i = 0; i < etm->num_cpu; i++) {
345 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
346 t_params[i].etmv4.reg_idr0 = etm->metadata[i][CS_ETMV4_TRCIDR0];
347 t_params[i].etmv4.reg_idr1 = etm->metadata[i][CS_ETMV4_TRCIDR1];
348 t_params[i].etmv4.reg_idr2 = etm->metadata[i][CS_ETMV4_TRCIDR2];
349 t_params[i].etmv4.reg_idr8 = etm->metadata[i][CS_ETMV4_TRCIDR8];
350 t_params[i].etmv4.reg_configr =
351 etm->metadata[i][CS_ETMV4_TRCCONFIGR];
352 t_params[i].etmv4.reg_traceidr =
353 etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
354 }
355
356
357 d_params.packet_printer = cs_etm__packet_dump;
358 d_params.operation = CS_ETM_OPERATION_DECODE;
359 d_params.formatted = true;
360 d_params.fsyncs = false;
361 d_params.hsyncs = false;
362 d_params.frame_aligned = true;
363 d_params.data = etmq;
364
365 etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
366
367 zfree(&t_params);
368
369 if (!etmq->decoder)
370 goto out_free;
371
372
373
374
375
376 if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
377 0x0L, ((u64) -1L),
378 cs_etm__mem_access))
379 goto out_free_decoder;
380
381 etmq->offset = 0;
382 etmq->period_instructions = 0;
383
384 return etmq;
385
386out_free_decoder:
387 cs_etm_decoder__free(etmq->decoder);
388out_free:
389 zfree(&etmq->event_buf);
390 zfree(&etmq->last_branch);
391 zfree(&etmq->last_branch_rb);
392 zfree(&etmq->prev_packet);
393 zfree(&etmq->packet);
394 free(etmq);
395
396 return NULL;
397}
398
399static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
400 struct auxtrace_queue *queue,
401 unsigned int queue_nr)
402{
403 struct cs_etm_queue *etmq = queue->priv;
404
405 if (list_empty(&queue->head) || etmq)
406 return 0;
407
408 etmq = cs_etm__alloc_queue(etm, queue_nr);
409
410 if (!etmq)
411 return -ENOMEM;
412
413 queue->priv = etmq;
414
415 if (queue->cpu != -1)
416 etmq->cpu = queue->cpu;
417
418 etmq->tid = queue->tid;
419
420 return 0;
421}
422
423static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
424{
425 unsigned int i;
426 int ret;
427
428 for (i = 0; i < etm->queues.nr_queues; i++) {
429 ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
430 if (ret)
431 return ret;
432 }
433
434 return 0;
435}
436
437static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
438{
439 if (etm->queues.new_data) {
440 etm->queues.new_data = false;
441 return cs_etm__setup_queues(etm);
442 }
443
444 return 0;
445}
446
447static inline void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq)
448{
449 struct branch_stack *bs_src = etmq->last_branch_rb;
450 struct branch_stack *bs_dst = etmq->last_branch;
451 size_t nr = 0;
452
453
454
455
456
457 bs_dst->nr = bs_src->nr;
458
459
460
461
462 if (!bs_src->nr)
463 return;
464
465
466
467
468
469
470 nr = etmq->etm->synth_opts.last_branch_sz - etmq->last_branch_pos;
471 memcpy(&bs_dst->entries[0],
472 &bs_src->entries[etmq->last_branch_pos],
473 sizeof(struct branch_entry) * nr);
474
475
476
477
478
479
480
481
482 if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
483 memcpy(&bs_dst->entries[nr],
484 &bs_src->entries[0],
485 sizeof(struct branch_entry) * etmq->last_branch_pos);
486 }
487}
488
489static inline void cs_etm__reset_last_branch_rb(struct cs_etm_queue *etmq)
490{
491 etmq->last_branch_pos = 0;
492 etmq->last_branch_rb->nr = 0;
493}
494
495static inline u64 cs_etm__last_executed_instr(struct cs_etm_packet *packet)
496{
497
498 if (packet->sample_type == CS_ETM_TRACE_ON)
499 return 0;
500
501
502
503
504
505
506
507
508
509 return packet->end_addr - A64_INSTR_SIZE;
510}
511
512static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
513{
514
515 if (packet->sample_type == CS_ETM_TRACE_ON)
516 return 0;
517
518 return packet->start_addr;
519}
520
521static inline u64 cs_etm__instr_count(const struct cs_etm_packet *packet)
522{
523
524
525
526
527
528
529 return (packet->end_addr - packet->start_addr) / A64_INSTR_SIZE;
530}
531
532static inline u64 cs_etm__instr_addr(const struct cs_etm_packet *packet,
533 u64 offset)
534{
535
536
537
538
539
540
541 return packet->start_addr + offset * A64_INSTR_SIZE;
542}
543
544static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq)
545{
546 struct branch_stack *bs = etmq->last_branch_rb;
547 struct branch_entry *be;
548
549
550
551
552
553
554
555 if (!etmq->last_branch_pos)
556 etmq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
557
558 etmq->last_branch_pos -= 1;
559
560 be = &bs->entries[etmq->last_branch_pos];
561 be->from = cs_etm__last_executed_instr(etmq->prev_packet);
562 be->to = cs_etm__first_executed_instr(etmq->packet);
563
564 be->flags.mispred = 0;
565 be->flags.predicted = 1;
566
567
568
569
570
571 if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
572 bs->nr += 1;
573}
574
575static int cs_etm__inject_event(union perf_event *event,
576 struct perf_sample *sample, u64 type)
577{
578 event->header.size = perf_event__sample_event_size(sample, type, 0);
579 return perf_event__synthesize_sample(event, type, 0, sample);
580}
581
582
583static int
584cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
585{
586 struct auxtrace_buffer *aux_buffer = etmq->buffer;
587 struct auxtrace_buffer *old_buffer = aux_buffer;
588 struct auxtrace_queue *queue;
589
590 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
591
592 aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
593
594
595 if (!aux_buffer) {
596 if (old_buffer)
597 auxtrace_buffer__drop_data(old_buffer);
598 buff->len = 0;
599 return 0;
600 }
601
602 etmq->buffer = aux_buffer;
603
604
605 if (!aux_buffer->data) {
606
607 int fd = perf_data__fd(etmq->etm->session->data);
608
609 aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
610 if (!aux_buffer->data)
611 return -ENOMEM;
612 }
613
614
615 if (old_buffer)
616 auxtrace_buffer__drop_data(old_buffer);
617
618 buff->offset = aux_buffer->offset;
619 buff->len = aux_buffer->size;
620 buff->buf = aux_buffer->data;
621
622 buff->ref_timestamp = aux_buffer->reference;
623
624 return buff->len;
625}
626
627static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
628 struct auxtrace_queue *queue)
629{
630 struct cs_etm_queue *etmq = queue->priv;
631
632
633 if (queue->tid == -1)
634 return;
635
636 if ((!etmq->thread) && (etmq->tid != -1))
637 etmq->thread = machine__find_thread(etm->machine, -1,
638 etmq->tid);
639
640 if (etmq->thread) {
641 etmq->pid = etmq->thread->pid_;
642 if (queue->cpu == -1)
643 etmq->cpu = etmq->thread->cpu;
644 }
645}
646
647static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
648 u64 addr, u64 period)
649{
650 int ret = 0;
651 struct cs_etm_auxtrace *etm = etmq->etm;
652 union perf_event *event = etmq->event_buf;
653 struct perf_sample sample = {.ip = 0,};
654
655 event->sample.header.type = PERF_RECORD_SAMPLE;
656 event->sample.header.misc = PERF_RECORD_MISC_USER;
657 event->sample.header.size = sizeof(struct perf_event_header);
658
659 sample.ip = addr;
660 sample.pid = etmq->pid;
661 sample.tid = etmq->tid;
662 sample.id = etmq->etm->instructions_id;
663 sample.stream_id = etmq->etm->instructions_id;
664 sample.period = period;
665 sample.cpu = etmq->packet->cpu;
666 sample.flags = 0;
667 sample.insn_len = 1;
668 sample.cpumode = event->header.misc;
669
670 if (etm->synth_opts.last_branch) {
671 cs_etm__copy_last_branch_rb(etmq);
672 sample.branch_stack = etmq->last_branch;
673 }
674
675 if (etm->synth_opts.inject) {
676 ret = cs_etm__inject_event(event, &sample,
677 etm->instructions_sample_type);
678 if (ret)
679 return ret;
680 }
681
682 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
683
684 if (ret)
685 pr_err(
686 "CS ETM Trace: failed to deliver instruction event, error %d\n",
687 ret);
688
689 if (etm->synth_opts.last_branch)
690 cs_etm__reset_last_branch_rb(etmq);
691
692 return ret;
693}
694
695
696
697
698
699static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq)
700{
701 int ret = 0;
702 struct cs_etm_auxtrace *etm = etmq->etm;
703 struct perf_sample sample = {.ip = 0,};
704 union perf_event *event = etmq->event_buf;
705 struct dummy_branch_stack {
706 u64 nr;
707 struct branch_entry entries;
708 } dummy_bs;
709
710 event->sample.header.type = PERF_RECORD_SAMPLE;
711 event->sample.header.misc = PERF_RECORD_MISC_USER;
712 event->sample.header.size = sizeof(struct perf_event_header);
713
714 sample.ip = cs_etm__last_executed_instr(etmq->prev_packet);
715 sample.pid = etmq->pid;
716 sample.tid = etmq->tid;
717 sample.addr = cs_etm__first_executed_instr(etmq->packet);
718 sample.id = etmq->etm->branches_id;
719 sample.stream_id = etmq->etm->branches_id;
720 sample.period = 1;
721 sample.cpu = etmq->packet->cpu;
722 sample.flags = 0;
723 sample.cpumode = PERF_RECORD_MISC_USER;
724
725
726
727
728 if (etm->synth_opts.last_branch) {
729 dummy_bs = (struct dummy_branch_stack){
730 .nr = 1,
731 .entries = {
732 .from = sample.ip,
733 .to = sample.addr,
734 },
735 };
736 sample.branch_stack = (struct branch_stack *)&dummy_bs;
737 }
738
739 if (etm->synth_opts.inject) {
740 ret = cs_etm__inject_event(event, &sample,
741 etm->branches_sample_type);
742 if (ret)
743 return ret;
744 }
745
746 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
747
748 if (ret)
749 pr_err(
750 "CS ETM Trace: failed to deliver instruction event, error %d\n",
751 ret);
752
753 return ret;
754}
755
756struct cs_etm_synth {
757 struct perf_tool dummy_tool;
758 struct perf_session *session;
759};
760
761static int cs_etm__event_synth(struct perf_tool *tool,
762 union perf_event *event,
763 struct perf_sample *sample __maybe_unused,
764 struct machine *machine __maybe_unused)
765{
766 struct cs_etm_synth *cs_etm_synth =
767 container_of(tool, struct cs_etm_synth, dummy_tool);
768
769 return perf_session__deliver_synth_event(cs_etm_synth->session,
770 event, NULL);
771}
772
773static int cs_etm__synth_event(struct perf_session *session,
774 struct perf_event_attr *attr, u64 id)
775{
776 struct cs_etm_synth cs_etm_synth;
777
778 memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
779 cs_etm_synth.session = session;
780
781 return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
782 &id, cs_etm__event_synth);
783}
784
785static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
786 struct perf_session *session)
787{
788 struct perf_evlist *evlist = session->evlist;
789 struct perf_evsel *evsel;
790 struct perf_event_attr attr;
791 bool found = false;
792 u64 id;
793 int err;
794
795 evlist__for_each_entry(evlist, evsel) {
796 if (evsel->attr.type == etm->pmu_type) {
797 found = true;
798 break;
799 }
800 }
801
802 if (!found) {
803 pr_debug("No selected events with CoreSight Trace data\n");
804 return 0;
805 }
806
807 memset(&attr, 0, sizeof(struct perf_event_attr));
808 attr.size = sizeof(struct perf_event_attr);
809 attr.type = PERF_TYPE_HARDWARE;
810 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
811 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
812 PERF_SAMPLE_PERIOD;
813 if (etm->timeless_decoding)
814 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
815 else
816 attr.sample_type |= PERF_SAMPLE_TIME;
817
818 attr.exclude_user = evsel->attr.exclude_user;
819 attr.exclude_kernel = evsel->attr.exclude_kernel;
820 attr.exclude_hv = evsel->attr.exclude_hv;
821 attr.exclude_host = evsel->attr.exclude_host;
822 attr.exclude_guest = evsel->attr.exclude_guest;
823 attr.sample_id_all = evsel->attr.sample_id_all;
824 attr.read_format = evsel->attr.read_format;
825
826
827 id = evsel->id[0] + 1000000000;
828
829 if (!id)
830 id = 1;
831
832 if (etm->synth_opts.branches) {
833 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
834 attr.sample_period = 1;
835 attr.sample_type |= PERF_SAMPLE_ADDR;
836 err = cs_etm__synth_event(session, &attr, id);
837 if (err)
838 return err;
839 etm->sample_branches = true;
840 etm->branches_sample_type = attr.sample_type;
841 etm->branches_id = id;
842 id += 1;
843 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
844 }
845
846 if (etm->synth_opts.last_branch)
847 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
848
849 if (etm->synth_opts.instructions) {
850 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
851 attr.sample_period = etm->synth_opts.period;
852 etm->instructions_sample_period = attr.sample_period;
853 err = cs_etm__synth_event(session, &attr, id);
854 if (err)
855 return err;
856 etm->sample_instructions = true;
857 etm->instructions_sample_type = attr.sample_type;
858 etm->instructions_id = id;
859 id += 1;
860 }
861
862 return 0;
863}
864
865static int cs_etm__sample(struct cs_etm_queue *etmq)
866{
867 struct cs_etm_auxtrace *etm = etmq->etm;
868 struct cs_etm_packet *tmp;
869 int ret;
870 u64 instrs_executed;
871
872 instrs_executed = cs_etm__instr_count(etmq->packet);
873 etmq->period_instructions += instrs_executed;
874
875
876
877
878
879 if (etm->synth_opts.last_branch &&
880 etmq->prev_packet &&
881 etmq->prev_packet->sample_type == CS_ETM_RANGE &&
882 etmq->prev_packet->last_instr_taken_branch)
883 cs_etm__update_last_branch_rb(etmq);
884
885 if (etm->sample_instructions &&
886 etmq->period_instructions >= etm->instructions_sample_period) {
887
888
889
890
891
892
893 u64 instrs_over = etmq->period_instructions -
894 etm->instructions_sample_period;
895
896
897
898
899
900
901 u64 offset = (instrs_executed - instrs_over - 1);
902 u64 addr = cs_etm__instr_addr(etmq->packet, offset);
903
904 ret = cs_etm__synth_instruction_sample(
905 etmq, addr, etm->instructions_sample_period);
906 if (ret)
907 return ret;
908
909
910 etmq->period_instructions = instrs_over;
911 }
912
913 if (etm->sample_branches && etmq->prev_packet) {
914 bool generate_sample = false;
915
916
917 if (etmq->prev_packet->sample_type == CS_ETM_TRACE_ON)
918 generate_sample = true;
919
920
921 if (etmq->prev_packet->sample_type == CS_ETM_RANGE &&
922 etmq->prev_packet->last_instr_taken_branch)
923 generate_sample = true;
924
925 if (generate_sample) {
926 ret = cs_etm__synth_branch_sample(etmq);
927 if (ret)
928 return ret;
929 }
930 }
931
932 if (etm->sample_branches || etm->synth_opts.last_branch) {
933
934
935
936
937 tmp = etmq->packet;
938 etmq->packet = etmq->prev_packet;
939 etmq->prev_packet = tmp;
940 }
941
942 return 0;
943}
944
945static int cs_etm__flush(struct cs_etm_queue *etmq)
946{
947 int err = 0;
948 struct cs_etm_auxtrace *etm = etmq->etm;
949 struct cs_etm_packet *tmp;
950
951 if (!etmq->prev_packet)
952 return 0;
953
954
955 if (etmq->prev_packet->sample_type == CS_ETM_EMPTY)
956 goto swap_packet;
957
958 if (etmq->etm->synth_opts.last_branch &&
959 etmq->prev_packet->sample_type == CS_ETM_RANGE) {
960
961
962
963
964
965
966
967 u64 addr = cs_etm__last_executed_instr(etmq->prev_packet);
968
969 err = cs_etm__synth_instruction_sample(
970 etmq, addr,
971 etmq->period_instructions);
972 if (err)
973 return err;
974
975 etmq->period_instructions = 0;
976
977 }
978
979 if (etm->sample_branches &&
980 etmq->prev_packet->sample_type == CS_ETM_RANGE) {
981 err = cs_etm__synth_branch_sample(etmq);
982 if (err)
983 return err;
984 }
985
986swap_packet:
987 if (etmq->etm->synth_opts.last_branch) {
988
989
990
991
992 tmp = etmq->packet;
993 etmq->packet = etmq->prev_packet;
994 etmq->prev_packet = tmp;
995 }
996
997 return err;
998}
999
1000static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
1001{
1002 struct cs_etm_auxtrace *etm = etmq->etm;
1003 struct cs_etm_buffer buffer;
1004 size_t buffer_used, processed;
1005 int err = 0;
1006
1007 if (!etm->kernel_start)
1008 etm->kernel_start = machine__kernel_start(etm->machine);
1009
1010
1011 while (1) {
1012 buffer_used = 0;
1013 memset(&buffer, 0, sizeof(buffer));
1014 err = cs_etm__get_trace(&buffer, etmq);
1015 if (err <= 0)
1016 return err;
1017
1018
1019
1020
1021 err = cs_etm_decoder__reset(etmq->decoder);
1022 if (err != 0)
1023 return err;
1024
1025
1026 do {
1027 processed = 0;
1028 err = cs_etm_decoder__process_data_block(
1029 etmq->decoder,
1030 etmq->offset,
1031 &buffer.buf[buffer_used],
1032 buffer.len - buffer_used,
1033 &processed);
1034 if (err)
1035 return err;
1036
1037 etmq->offset += processed;
1038 buffer_used += processed;
1039
1040
1041 while (1) {
1042 err = cs_etm_decoder__get_packet(etmq->decoder,
1043 etmq->packet);
1044 if (err <= 0)
1045
1046
1047
1048
1049 break;
1050
1051 switch (etmq->packet->sample_type) {
1052 case CS_ETM_RANGE:
1053
1054
1055
1056
1057
1058 cs_etm__sample(etmq);
1059 break;
1060 case CS_ETM_TRACE_ON:
1061
1062
1063
1064
1065 cs_etm__flush(etmq);
1066 break;
1067 case CS_ETM_EMPTY:
1068
1069
1070
1071
1072 pr_err("CS ETM Trace: empty packet\n");
1073 return -EINVAL;
1074 default:
1075 break;
1076 }
1077 }
1078 } while (buffer.len > buffer_used);
1079
1080 if (err == 0)
1081
1082 err = cs_etm__flush(etmq);
1083 }
1084
1085 return err;
1086}
1087
1088static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
1089 pid_t tid, u64 time_)
1090{
1091 unsigned int i;
1092 struct auxtrace_queues *queues = &etm->queues;
1093
1094 for (i = 0; i < queues->nr_queues; i++) {
1095 struct auxtrace_queue *queue = &etm->queues.queue_array[i];
1096 struct cs_etm_queue *etmq = queue->priv;
1097
1098 if (etmq && ((tid == -1) || (etmq->tid == tid))) {
1099 etmq->time = time_;
1100 cs_etm__set_pid_tid_cpu(etm, queue);
1101 cs_etm__run_decoder(etmq);
1102 }
1103 }
1104
1105 return 0;
1106}
1107
1108static int cs_etm__process_event(struct perf_session *session,
1109 union perf_event *event,
1110 struct perf_sample *sample,
1111 struct perf_tool *tool)
1112{
1113 int err = 0;
1114 u64 timestamp;
1115 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
1116 struct cs_etm_auxtrace,
1117 auxtrace);
1118
1119 if (dump_trace)
1120 return 0;
1121
1122 if (!tool->ordered_events) {
1123 pr_err("CoreSight ETM Trace requires ordered events\n");
1124 return -EINVAL;
1125 }
1126
1127 if (!etm->timeless_decoding)
1128 return -EINVAL;
1129
1130 if (sample->time && (sample->time != (u64) -1))
1131 timestamp = sample->time;
1132 else
1133 timestamp = 0;
1134
1135 if (timestamp || etm->timeless_decoding) {
1136 err = cs_etm__update_queues(etm);
1137 if (err)
1138 return err;
1139 }
1140
1141 if (event->header.type == PERF_RECORD_EXIT)
1142 return cs_etm__process_timeless_queues(etm,
1143 event->fork.tid,
1144 sample->time);
1145
1146 return 0;
1147}
1148
1149static int cs_etm__process_auxtrace_event(struct perf_session *session,
1150 union perf_event *event,
1151 struct perf_tool *tool __maybe_unused)
1152{
1153 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
1154 struct cs_etm_auxtrace,
1155 auxtrace);
1156 if (!etm->data_queued) {
1157 struct auxtrace_buffer *buffer;
1158 off_t data_offset;
1159 int fd = perf_data__fd(session->data);
1160 bool is_pipe = perf_data__is_pipe(session->data);
1161 int err;
1162
1163 if (is_pipe)
1164 data_offset = 0;
1165 else {
1166 data_offset = lseek(fd, 0, SEEK_CUR);
1167 if (data_offset == -1)
1168 return -errno;
1169 }
1170
1171 err = auxtrace_queues__add_event(&etm->queues, session,
1172 event, data_offset, &buffer);
1173 if (err)
1174 return err;
1175
1176 if (dump_trace)
1177 if (auxtrace_buffer__get_data(buffer, fd)) {
1178 cs_etm__dump_event(etm, buffer);
1179 auxtrace_buffer__put_data(buffer);
1180 }
1181 }
1182
1183 return 0;
1184}
1185
1186static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
1187{
1188 struct perf_evsel *evsel;
1189 struct perf_evlist *evlist = etm->session->evlist;
1190 bool timeless_decoding = true;
1191
1192
1193
1194
1195
1196 evlist__for_each_entry(evlist, evsel) {
1197 if ((evsel->attr.sample_type & PERF_SAMPLE_TIME))
1198 timeless_decoding = false;
1199 }
1200
1201 return timeless_decoding;
1202}
1203
1204static const char * const cs_etm_global_header_fmts[] = {
1205 [CS_HEADER_VERSION_0] = " Header version %llx\n",
1206 [CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
1207 [CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
1208};
1209
1210static const char * const cs_etm_priv_fmts[] = {
1211 [CS_ETM_MAGIC] = " Magic number %llx\n",
1212 [CS_ETM_CPU] = " CPU %lld\n",
1213 [CS_ETM_ETMCR] = " ETMCR %llx\n",
1214 [CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
1215 [CS_ETM_ETMCCER] = " ETMCCER %llx\n",
1216 [CS_ETM_ETMIDR] = " ETMIDR %llx\n",
1217};
1218
1219static const char * const cs_etmv4_priv_fmts[] = {
1220 [CS_ETM_MAGIC] = " Magic number %llx\n",
1221 [CS_ETM_CPU] = " CPU %lld\n",
1222 [CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
1223 [CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
1224 [CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
1225 [CS_ETMV4_TRCIDR1] = " TRCIDR1 %llx\n",
1226 [CS_ETMV4_TRCIDR2] = " TRCIDR2 %llx\n",
1227 [CS_ETMV4_TRCIDR8] = " TRCIDR8 %llx\n",
1228 [CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
1229};
1230
1231static void cs_etm__print_auxtrace_info(u64 *val, int num)
1232{
1233 int i, j, cpu = 0;
1234
1235 for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
1236 fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
1237
1238 for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
1239 if (val[i] == __perf_cs_etmv3_magic)
1240 for (j = 0; j < CS_ETM_PRIV_MAX; j++, i++)
1241 fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
1242 else if (val[i] == __perf_cs_etmv4_magic)
1243 for (j = 0; j < CS_ETMV4_PRIV_MAX; j++, i++)
1244 fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
1245 else
1246
1247 return;
1248 }
1249}
1250
1251int cs_etm__process_auxtrace_info(union perf_event *event,
1252 struct perf_session *session)
1253{
1254 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
1255 struct cs_etm_auxtrace *etm = NULL;
1256 struct int_node *inode;
1257 unsigned int pmu_type;
1258 int event_header_size = sizeof(struct perf_event_header);
1259 int info_header_size;
1260 int total_size = auxtrace_info->header.size;
1261 int priv_size = 0;
1262 int num_cpu;
1263 int err = 0, idx = -1;
1264 int i, j, k;
1265 u64 *ptr, *hdr = NULL;
1266 u64 **metadata = NULL;
1267
1268
1269
1270
1271
1272 info_header_size = 8;
1273
1274 if (total_size < (event_header_size + info_header_size))
1275 return -EINVAL;
1276
1277 priv_size = total_size - event_header_size - info_header_size;
1278
1279
1280 ptr = (u64 *) auxtrace_info->priv;
1281
1282
1283 if (ptr[0] != 0)
1284 return -EINVAL;
1285
1286 hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_0_MAX);
1287 if (!hdr)
1288 return -ENOMEM;
1289
1290
1291 for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
1292 hdr[i] = ptr[i];
1293 num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
1294 pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
1295 0xffffffff);
1296
1297
1298
1299
1300
1301
1302 traceid_list = intlist__new(NULL);
1303 if (!traceid_list) {
1304 err = -ENOMEM;
1305 goto err_free_hdr;
1306 }
1307
1308 metadata = zalloc(sizeof(*metadata) * num_cpu);
1309 if (!metadata) {
1310 err = -ENOMEM;
1311 goto err_free_traceid_list;
1312 }
1313
1314
1315
1316
1317
1318
1319
1320 for (j = 0; j < num_cpu; j++) {
1321 if (ptr[i] == __perf_cs_etmv3_magic) {
1322 metadata[j] = zalloc(sizeof(*metadata[j]) *
1323 CS_ETM_PRIV_MAX);
1324 if (!metadata[j]) {
1325 err = -ENOMEM;
1326 goto err_free_metadata;
1327 }
1328 for (k = 0; k < CS_ETM_PRIV_MAX; k++)
1329 metadata[j][k] = ptr[i + k];
1330
1331
1332 idx = metadata[j][CS_ETM_ETMTRACEIDR];
1333 i += CS_ETM_PRIV_MAX;
1334 } else if (ptr[i] == __perf_cs_etmv4_magic) {
1335 metadata[j] = zalloc(sizeof(*metadata[j]) *
1336 CS_ETMV4_PRIV_MAX);
1337 if (!metadata[j]) {
1338 err = -ENOMEM;
1339 goto err_free_metadata;
1340 }
1341 for (k = 0; k < CS_ETMV4_PRIV_MAX; k++)
1342 metadata[j][k] = ptr[i + k];
1343
1344
1345 idx = metadata[j][CS_ETMV4_TRCTRACEIDR];
1346 i += CS_ETMV4_PRIV_MAX;
1347 }
1348
1349
1350 inode = intlist__findnew(traceid_list, idx);
1351
1352
1353 if (!inode) {
1354 err = PTR_ERR(inode);
1355 goto err_free_metadata;
1356 }
1357
1358
1359
1360
1361
1362 if (inode->priv) {
1363 err = -EINVAL;
1364 goto err_free_metadata;
1365 }
1366
1367 inode->priv = &metadata[j][CS_ETM_CPU];
1368 }
1369
1370
1371
1372
1373
1374
1375
1376
1377 if (i * 8 != priv_size) {
1378 err = -EINVAL;
1379 goto err_free_metadata;
1380 }
1381
1382 etm = zalloc(sizeof(*etm));
1383
1384 if (!etm) {
1385 err = -ENOMEM;
1386 goto err_free_metadata;
1387 }
1388
1389 err = auxtrace_queues__init(&etm->queues);
1390 if (err)
1391 goto err_free_etm;
1392
1393 etm->session = session;
1394 etm->machine = &session->machines.host;
1395
1396 etm->num_cpu = num_cpu;
1397 etm->pmu_type = pmu_type;
1398 etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
1399 etm->metadata = metadata;
1400 etm->auxtrace_type = auxtrace_info->type;
1401 etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
1402
1403 etm->auxtrace.process_event = cs_etm__process_event;
1404 etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
1405 etm->auxtrace.flush_events = cs_etm__flush_events;
1406 etm->auxtrace.free_events = cs_etm__free_events;
1407 etm->auxtrace.free = cs_etm__free;
1408 session->auxtrace = &etm->auxtrace;
1409
1410 etm->unknown_thread = thread__new(999999999, 999999999);
1411 if (!etm->unknown_thread)
1412 goto err_free_queues;
1413
1414
1415
1416
1417
1418 INIT_LIST_HEAD(&etm->unknown_thread->node);
1419
1420 err = thread__set_comm(etm->unknown_thread, "unknown", 0);
1421 if (err)
1422 goto err_delete_thread;
1423
1424 if (thread__init_map_groups(etm->unknown_thread, etm->machine))
1425 goto err_delete_thread;
1426
1427 if (dump_trace) {
1428 cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
1429 return 0;
1430 }
1431
1432 if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
1433 etm->synth_opts = *session->itrace_synth_opts;
1434 } else {
1435 itrace_synth_opts__set_default(&etm->synth_opts);
1436 etm->synth_opts.callchain = false;
1437 }
1438
1439 err = cs_etm__synth_events(etm, session);
1440 if (err)
1441 goto err_delete_thread;
1442
1443 err = auxtrace_queues__process_index(&etm->queues, session);
1444 if (err)
1445 goto err_delete_thread;
1446
1447 etm->data_queued = etm->queues.populated;
1448
1449 return 0;
1450
1451err_delete_thread:
1452 thread__zput(etm->unknown_thread);
1453err_free_queues:
1454 auxtrace_queues__free(&etm->queues);
1455 session->auxtrace = NULL;
1456err_free_etm:
1457 zfree(&etm);
1458err_free_metadata:
1459
1460 for (j = 0; j < num_cpu; j++)
1461 free(metadata[j]);
1462 zfree(&metadata);
1463err_free_traceid_list:
1464 intlist__delete(traceid_list);
1465err_free_hdr:
1466 zfree(&hdr);
1467
1468 return -EINVAL;
1469}
1470