1
2
3
4
5
6
7
8
9#include <linux/bitops.h>
10#include <linux/coresight-pmu.h>
11#include <linux/err.h>
12#include <linux/kernel.h>
13#include <linux/log2.h>
14#include <linux/types.h>
15#include <linux/zalloc.h>
16
17#include <opencsd/ocsd_if_types.h>
18#include <stdlib.h>
19
20#include "auxtrace.h"
21#include "color.h"
22#include "cs-etm.h"
23#include "cs-etm-decoder/cs-etm-decoder.h"
24#include "debug.h"
25#include "dso.h"
26#include "evlist.h"
27#include "intlist.h"
28#include "machine.h"
29#include "map.h"
30#include "perf.h"
31#include "session.h"
32#include "map_symbol.h"
33#include "branch.h"
34#include "symbol.h"
35#include "tool.h"
36#include "thread.h"
37#include "thread-stack.h"
38#include <tools/libc_compat.h>
39#include "util/synthetic-events.h"
40
41#define MAX_TIMESTAMP (~0ULL)
42
43struct cs_etm_auxtrace {
44 struct auxtrace auxtrace;
45 struct auxtrace_queues queues;
46 struct auxtrace_heap heap;
47 struct itrace_synth_opts synth_opts;
48 struct perf_session *session;
49 struct machine *machine;
50 struct thread *unknown_thread;
51
52 u8 timeless_decoding;
53 u8 snapshot_mode;
54 u8 data_queued;
55 u8 sample_branches;
56 u8 sample_instructions;
57
58 int num_cpu;
59 u32 auxtrace_type;
60 u64 branches_sample_type;
61 u64 branches_id;
62 u64 instructions_sample_type;
63 u64 instructions_sample_period;
64 u64 instructions_id;
65 u64 **metadata;
66 u64 kernel_start;
67 unsigned int pmu_type;
68};
69
70struct cs_etm_traceid_queue {
71 u8 trace_chan_id;
72 pid_t pid, tid;
73 u64 period_instructions;
74 size_t last_branch_pos;
75 union perf_event *event_buf;
76 struct thread *thread;
77 struct branch_stack *last_branch;
78 struct branch_stack *last_branch_rb;
79 struct cs_etm_packet *prev_packet;
80 struct cs_etm_packet *packet;
81 struct cs_etm_packet_queue packet_queue;
82};
83
84struct cs_etm_queue {
85 struct cs_etm_auxtrace *etm;
86 struct cs_etm_decoder *decoder;
87 struct auxtrace_buffer *buffer;
88 unsigned int queue_nr;
89 u8 pending_timestamp;
90 u64 offset;
91 const unsigned char *buf;
92 size_t buf_len, buf_used;
93
94 struct intlist *traceid_queues_list;
95 struct cs_etm_traceid_queue **traceid_queues;
96};
97
98
99static struct intlist *traceid_list;
100
101static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
102static int cs_etm__process_queues(struct cs_etm_auxtrace *etm);
103static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
104 pid_t tid);
105static int cs_etm__get_data_block(struct cs_etm_queue *etmq);
106static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
107
108
109#define ETMIDR_PTM_VERSION 0x00000300
110
111
112
113
114
115
116
117#define TO_CS_QUEUE_NR(queue_nr, trace_chan_id) \
118 (queue_nr << 16 | trace_chan_id)
119#define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16)
120#define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff)
121
122static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
123{
124 etmidr &= ETMIDR_PTM_VERSION;
125
126 if (etmidr == ETMIDR_PTM_VERSION)
127 return CS_ETM_PROTO_PTM;
128
129 return CS_ETM_PROTO_ETMV3;
130}
131
132static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
133{
134 struct int_node *inode;
135 u64 *metadata;
136
137 inode = intlist__find(traceid_list, trace_chan_id);
138 if (!inode)
139 return -EINVAL;
140
141 metadata = inode->priv;
142 *magic = metadata[CS_ETM_MAGIC];
143 return 0;
144}
145
146int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
147{
148 struct int_node *inode;
149 u64 *metadata;
150
151 inode = intlist__find(traceid_list, trace_chan_id);
152 if (!inode)
153 return -EINVAL;
154
155 metadata = inode->priv;
156 *cpu = (int)metadata[CS_ETM_CPU];
157 return 0;
158}
159
160
161
162
163
164
165
166
167
168
169
170
171
172int cs_etm__get_pid_fmt(u8 trace_chan_id, u64 *pid_fmt)
173{
174 struct int_node *inode;
175 u64 *metadata, val;
176
177 inode = intlist__find(traceid_list, trace_chan_id);
178 if (!inode)
179 return -EINVAL;
180
181 metadata = inode->priv;
182
183 if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
184 val = metadata[CS_ETM_ETMCR];
185
186 if (val & BIT(ETM_OPT_CTXTID))
187 *pid_fmt = BIT(ETM_OPT_CTXTID);
188 } else {
189 val = metadata[CS_ETMV4_TRCCONFIGR];
190
191 if (val & (BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT)))
192 *pid_fmt = BIT(ETM_OPT_CTXTID2);
193
194 else if (val & BIT(ETM4_CFG_BIT_CTXTID))
195 *pid_fmt = BIT(ETM_OPT_CTXTID);
196 }
197
198 return 0;
199}
200
201void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
202 u8 trace_chan_id)
203{
204
205
206
207
208
209
210
211 etmq->pending_timestamp = trace_chan_id;
212}
213
214static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
215 u8 *trace_chan_id)
216{
217 struct cs_etm_packet_queue *packet_queue;
218
219 if (!etmq->pending_timestamp)
220 return 0;
221
222 if (trace_chan_id)
223 *trace_chan_id = etmq->pending_timestamp;
224
225 packet_queue = cs_etm__etmq_get_packet_queue(etmq,
226 etmq->pending_timestamp);
227 if (!packet_queue)
228 return 0;
229
230
231 etmq->pending_timestamp = 0;
232
233
234 return packet_queue->timestamp;
235}
236
237static void cs_etm__clear_packet_queue(struct cs_etm_packet_queue *queue)
238{
239 int i;
240
241 queue->head = 0;
242 queue->tail = 0;
243 queue->packet_count = 0;
244 for (i = 0; i < CS_ETM_PACKET_MAX_BUFFER; i++) {
245 queue->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
246 queue->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
247 queue->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
248 queue->packet_buffer[i].instr_count = 0;
249 queue->packet_buffer[i].last_instr_taken_branch = false;
250 queue->packet_buffer[i].last_instr_size = 0;
251 queue->packet_buffer[i].last_instr_type = 0;
252 queue->packet_buffer[i].last_instr_subtype = 0;
253 queue->packet_buffer[i].last_instr_cond = 0;
254 queue->packet_buffer[i].flags = 0;
255 queue->packet_buffer[i].exception_number = UINT32_MAX;
256 queue->packet_buffer[i].trace_chan_id = UINT8_MAX;
257 queue->packet_buffer[i].cpu = INT_MIN;
258 }
259}
260
261static void cs_etm__clear_all_packet_queues(struct cs_etm_queue *etmq)
262{
263 int idx;
264 struct int_node *inode;
265 struct cs_etm_traceid_queue *tidq;
266 struct intlist *traceid_queues_list = etmq->traceid_queues_list;
267
268 intlist__for_each_entry(inode, traceid_queues_list) {
269 idx = (int)(intptr_t)inode->priv;
270 tidq = etmq->traceid_queues[idx];
271 cs_etm__clear_packet_queue(&tidq->packet_queue);
272 }
273}
274
275static int cs_etm__init_traceid_queue(struct cs_etm_queue *etmq,
276 struct cs_etm_traceid_queue *tidq,
277 u8 trace_chan_id)
278{
279 int rc = -ENOMEM;
280 struct auxtrace_queue *queue;
281 struct cs_etm_auxtrace *etm = etmq->etm;
282
283 cs_etm__clear_packet_queue(&tidq->packet_queue);
284
285 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
286 tidq->tid = queue->tid;
287 tidq->pid = -1;
288 tidq->trace_chan_id = trace_chan_id;
289
290 tidq->packet = zalloc(sizeof(struct cs_etm_packet));
291 if (!tidq->packet)
292 goto out;
293
294 tidq->prev_packet = zalloc(sizeof(struct cs_etm_packet));
295 if (!tidq->prev_packet)
296 goto out_free;
297
298 if (etm->synth_opts.last_branch) {
299 size_t sz = sizeof(struct branch_stack);
300
301 sz += etm->synth_opts.last_branch_sz *
302 sizeof(struct branch_entry);
303 tidq->last_branch = zalloc(sz);
304 if (!tidq->last_branch)
305 goto out_free;
306 tidq->last_branch_rb = zalloc(sz);
307 if (!tidq->last_branch_rb)
308 goto out_free;
309 }
310
311 tidq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
312 if (!tidq->event_buf)
313 goto out_free;
314
315 return 0;
316
317out_free:
318 zfree(&tidq->last_branch_rb);
319 zfree(&tidq->last_branch);
320 zfree(&tidq->prev_packet);
321 zfree(&tidq->packet);
322out:
323 return rc;
324}
325
326static struct cs_etm_traceid_queue
327*cs_etm__etmq_get_traceid_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
328{
329 int idx;
330 struct int_node *inode;
331 struct intlist *traceid_queues_list;
332 struct cs_etm_traceid_queue *tidq, **traceid_queues;
333 struct cs_etm_auxtrace *etm = etmq->etm;
334
335 if (etm->timeless_decoding)
336 trace_chan_id = CS_ETM_PER_THREAD_TRACEID;
337
338 traceid_queues_list = etmq->traceid_queues_list;
339
340
341
342
343
344 inode = intlist__find(traceid_queues_list, trace_chan_id);
345 if (inode) {
346 idx = (int)(intptr_t)inode->priv;
347 return etmq->traceid_queues[idx];
348 }
349
350
351 tidq = malloc(sizeof(*tidq));
352 if (!tidq)
353 return NULL;
354
355 memset(tidq, 0, sizeof(*tidq));
356
357
358 idx = intlist__nr_entries(traceid_queues_list);
359
360 inode = intlist__findnew(traceid_queues_list, trace_chan_id);
361 if (!inode)
362 goto out_free;
363
364
365 inode->priv = (void *)(intptr_t)idx;
366
367 if (cs_etm__init_traceid_queue(etmq, tidq, trace_chan_id))
368 goto out_free;
369
370
371 traceid_queues = etmq->traceid_queues;
372 traceid_queues = reallocarray(traceid_queues,
373 idx + 1,
374 sizeof(*traceid_queues));
375
376
377
378
379
380 if (!traceid_queues)
381 goto out_free;
382
383 traceid_queues[idx] = tidq;
384 etmq->traceid_queues = traceid_queues;
385
386 return etmq->traceid_queues[idx];
387
388out_free:
389
390
391
392
393 intlist__remove(traceid_queues_list, inode);
394 free(tidq);
395
396 return NULL;
397}
398
399struct cs_etm_packet_queue
400*cs_etm__etmq_get_packet_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
401{
402 struct cs_etm_traceid_queue *tidq;
403
404 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
405 if (tidq)
406 return &tidq->packet_queue;
407
408 return NULL;
409}
410
411static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
412 struct cs_etm_traceid_queue *tidq)
413{
414 struct cs_etm_packet *tmp;
415
416 if (etm->sample_branches || etm->synth_opts.last_branch ||
417 etm->sample_instructions) {
418
419
420
421
422 tmp = tidq->packet;
423 tidq->packet = tidq->prev_packet;
424 tidq->prev_packet = tmp;
425 }
426}
427
428static void cs_etm__packet_dump(const char *pkt_string)
429{
430 const char *color = PERF_COLOR_BLUE;
431 int len = strlen(pkt_string);
432
433 if (len && (pkt_string[len-1] == '\n'))
434 color_fprintf(stdout, color, " %s", pkt_string);
435 else
436 color_fprintf(stdout, color, " %s\n", pkt_string);
437
438 fflush(stdout);
439}
440
441static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
442 struct cs_etm_auxtrace *etm, int idx,
443 u32 etmidr)
444{
445 u64 **metadata = etm->metadata;
446
447 t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
448 t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
449 t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
450}
451
452static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
453 struct cs_etm_auxtrace *etm, int idx)
454{
455 u64 **metadata = etm->metadata;
456
457 t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
458 t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
459 t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
460 t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
461 t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
462 t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
463 t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
464}
465
466static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
467 struct cs_etm_auxtrace *etm)
468{
469 int i;
470 u32 etmidr;
471 u64 architecture;
472
473 for (i = 0; i < etm->num_cpu; i++) {
474 architecture = etm->metadata[i][CS_ETM_MAGIC];
475
476 switch (architecture) {
477 case __perf_cs_etmv3_magic:
478 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
479 cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
480 break;
481 case __perf_cs_etmv4_magic:
482 cs_etm__set_trace_param_etmv4(t_params, etm, i);
483 break;
484 default:
485 return -EINVAL;
486 }
487 }
488
489 return 0;
490}
491
492static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
493 struct cs_etm_queue *etmq,
494 enum cs_etm_decoder_operation mode)
495{
496 int ret = -EINVAL;
497
498 if (!(mode < CS_ETM_OPERATION_MAX))
499 goto out;
500
501 d_params->packet_printer = cs_etm__packet_dump;
502 d_params->operation = mode;
503 d_params->data = etmq;
504 d_params->formatted = true;
505 d_params->fsyncs = false;
506 d_params->hsyncs = false;
507 d_params->frame_aligned = true;
508
509 ret = 0;
510out:
511 return ret;
512}
513
514static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
515 struct auxtrace_buffer *buffer)
516{
517 int ret;
518 const char *color = PERF_COLOR_BLUE;
519 struct cs_etm_decoder_params d_params;
520 struct cs_etm_trace_params *t_params;
521 struct cs_etm_decoder *decoder;
522 size_t buffer_used = 0;
523
524 fprintf(stdout, "\n");
525 color_fprintf(stdout, color,
526 ". ... CoreSight ETM Trace data: size %zu bytes\n",
527 buffer->size);
528
529
530 t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
531
532 if (!t_params)
533 return;
534
535 if (cs_etm__init_trace_params(t_params, etm))
536 goto out_free;
537
538
539 if (cs_etm__init_decoder_params(&d_params, NULL,
540 CS_ETM_OPERATION_PRINT))
541 goto out_free;
542
543 decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
544
545 if (!decoder)
546 goto out_free;
547 do {
548 size_t consumed;
549
550 ret = cs_etm_decoder__process_data_block(
551 decoder, buffer->offset,
552 &((u8 *)buffer->data)[buffer_used],
553 buffer->size - buffer_used, &consumed);
554 if (ret)
555 break;
556
557 buffer_used += consumed;
558 } while (buffer_used < buffer->size);
559
560 cs_etm_decoder__free(decoder);
561
562out_free:
563 zfree(&t_params);
564}
565
566static int cs_etm__flush_events(struct perf_session *session,
567 struct perf_tool *tool)
568{
569 int ret;
570 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
571 struct cs_etm_auxtrace,
572 auxtrace);
573 if (dump_trace)
574 return 0;
575
576 if (!tool->ordered_events)
577 return -EINVAL;
578
579 ret = cs_etm__update_queues(etm);
580
581 if (ret < 0)
582 return ret;
583
584 if (etm->timeless_decoding)
585 return cs_etm__process_timeless_queues(etm, -1);
586
587 return cs_etm__process_queues(etm);
588}
589
590static void cs_etm__free_traceid_queues(struct cs_etm_queue *etmq)
591{
592 int idx;
593 uintptr_t priv;
594 struct int_node *inode, *tmp;
595 struct cs_etm_traceid_queue *tidq;
596 struct intlist *traceid_queues_list = etmq->traceid_queues_list;
597
598 intlist__for_each_entry_safe(inode, tmp, traceid_queues_list) {
599 priv = (uintptr_t)inode->priv;
600 idx = priv;
601
602
603 tidq = etmq->traceid_queues[idx];
604 thread__zput(tidq->thread);
605 zfree(&tidq->event_buf);
606 zfree(&tidq->last_branch);
607 zfree(&tidq->last_branch_rb);
608 zfree(&tidq->prev_packet);
609 zfree(&tidq->packet);
610 zfree(&tidq);
611
612
613
614
615
616 intlist__remove(traceid_queues_list, inode);
617 }
618
619
620 intlist__delete(traceid_queues_list);
621 etmq->traceid_queues_list = NULL;
622
623
624 zfree(&etmq->traceid_queues);
625}
626
627static void cs_etm__free_queue(void *priv)
628{
629 struct cs_etm_queue *etmq = priv;
630
631 if (!etmq)
632 return;
633
634 cs_etm_decoder__free(etmq->decoder);
635 cs_etm__free_traceid_queues(etmq);
636 free(etmq);
637}
638
639static void cs_etm__free_events(struct perf_session *session)
640{
641 unsigned int i;
642 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
643 struct cs_etm_auxtrace,
644 auxtrace);
645 struct auxtrace_queues *queues = &aux->queues;
646
647 for (i = 0; i < queues->nr_queues; i++) {
648 cs_etm__free_queue(queues->queue_array[i].priv);
649 queues->queue_array[i].priv = NULL;
650 }
651
652 auxtrace_queues__free(queues);
653}
654
655static void cs_etm__free(struct perf_session *session)
656{
657 int i;
658 struct int_node *inode, *tmp;
659 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
660 struct cs_etm_auxtrace,
661 auxtrace);
662 cs_etm__free_events(session);
663 session->auxtrace = NULL;
664
665
666 intlist__for_each_entry_safe(inode, tmp, traceid_list)
667 intlist__remove(traceid_list, inode);
668
669 intlist__delete(traceid_list);
670
671 for (i = 0; i < aux->num_cpu; i++)
672 zfree(&aux->metadata[i]);
673
674 thread__zput(aux->unknown_thread);
675 zfree(&aux->metadata);
676 zfree(&aux);
677}
678
679static bool cs_etm__evsel_is_auxtrace(struct perf_session *session,
680 struct evsel *evsel)
681{
682 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
683 struct cs_etm_auxtrace,
684 auxtrace);
685
686 return evsel->core.attr.type == aux->pmu_type;
687}
688
689static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address)
690{
691 struct machine *machine;
692
693 machine = etmq->etm->machine;
694
695 if (address >= etmq->etm->kernel_start) {
696 if (machine__is_host(machine))
697 return PERF_RECORD_MISC_KERNEL;
698 else
699 return PERF_RECORD_MISC_GUEST_KERNEL;
700 } else {
701 if (machine__is_host(machine))
702 return PERF_RECORD_MISC_USER;
703 else if (perf_guest)
704 return PERF_RECORD_MISC_GUEST_USER;
705 else
706 return PERF_RECORD_MISC_HYPERVISOR;
707 }
708}
709
710static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id,
711 u64 address, size_t size, u8 *buffer)
712{
713 u8 cpumode;
714 u64 offset;
715 int len;
716 struct thread *thread;
717 struct machine *machine;
718 struct addr_location al;
719 struct cs_etm_traceid_queue *tidq;
720
721 if (!etmq)
722 return 0;
723
724 machine = etmq->etm->machine;
725 cpumode = cs_etm__cpu_mode(etmq, address);
726 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
727 if (!tidq)
728 return 0;
729
730 thread = tidq->thread;
731 if (!thread) {
732 if (cpumode != PERF_RECORD_MISC_KERNEL)
733 return 0;
734 thread = etmq->etm->unknown_thread;
735 }
736
737 if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso)
738 return 0;
739
740 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
741 dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
742 return 0;
743
744 offset = al.map->map_ip(al.map, address);
745
746 map__load(al.map);
747
748 len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
749
750 if (len <= 0)
751 return 0;
752
753 return len;
754}
755
756static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm)
757{
758 struct cs_etm_decoder_params d_params;
759 struct cs_etm_trace_params *t_params = NULL;
760 struct cs_etm_queue *etmq;
761
762 etmq = zalloc(sizeof(*etmq));
763 if (!etmq)
764 return NULL;
765
766 etmq->traceid_queues_list = intlist__new(NULL);
767 if (!etmq->traceid_queues_list)
768 goto out_free;
769
770
771 t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
772
773 if (!t_params)
774 goto out_free;
775
776 if (cs_etm__init_trace_params(t_params, etm))
777 goto out_free;
778
779
780 if (cs_etm__init_decoder_params(&d_params, etmq,
781 CS_ETM_OPERATION_DECODE))
782 goto out_free;
783
784 etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
785
786 if (!etmq->decoder)
787 goto out_free;
788
789
790
791
792
793 if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
794 0x0L, ((u64) -1L),
795 cs_etm__mem_access))
796 goto out_free_decoder;
797
798 zfree(&t_params);
799 return etmq;
800
801out_free_decoder:
802 cs_etm_decoder__free(etmq->decoder);
803out_free:
804 intlist__delete(etmq->traceid_queues_list);
805 free(etmq);
806
807 return NULL;
808}
809
810static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
811 struct auxtrace_queue *queue,
812 unsigned int queue_nr)
813{
814 int ret = 0;
815 unsigned int cs_queue_nr;
816 u8 trace_chan_id;
817 u64 timestamp;
818 struct cs_etm_queue *etmq = queue->priv;
819
820 if (list_empty(&queue->head) || etmq)
821 goto out;
822
823 etmq = cs_etm__alloc_queue(etm);
824
825 if (!etmq) {
826 ret = -ENOMEM;
827 goto out;
828 }
829
830 queue->priv = etmq;
831 etmq->etm = etm;
832 etmq->queue_nr = queue_nr;
833 etmq->offset = 0;
834
835 if (etm->timeless_decoding)
836 goto out;
837
838
839
840
841
842
843
844
845
846 while (1) {
847
848
849
850
851 ret = cs_etm__get_data_block(etmq);
852 if (ret <= 0)
853 goto out;
854
855
856
857
858
859
860 ret = cs_etm__decode_data_block(etmq);
861 if (ret)
862 goto out;
863
864
865
866
867
868 timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
869
870
871 if (timestamp)
872 break;
873
874
875
876
877
878
879
880
881 cs_etm__clear_all_packet_queues(etmq);
882 }
883
884
885
886
887
888
889
890
891
892
893
894 cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
895 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp);
896out:
897 return ret;
898}
899
900static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
901{
902 unsigned int i;
903 int ret;
904
905 if (!etm->kernel_start)
906 etm->kernel_start = machine__kernel_start(etm->machine);
907
908 for (i = 0; i < etm->queues.nr_queues; i++) {
909 ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
910 if (ret)
911 return ret;
912 }
913
914 return 0;
915}
916
917static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
918{
919 if (etm->queues.new_data) {
920 etm->queues.new_data = false;
921 return cs_etm__setup_queues(etm);
922 }
923
924 return 0;
925}
926
927static inline
928void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq,
929 struct cs_etm_traceid_queue *tidq)
930{
931 struct branch_stack *bs_src = tidq->last_branch_rb;
932 struct branch_stack *bs_dst = tidq->last_branch;
933 size_t nr = 0;
934
935
936
937
938
939 bs_dst->nr = bs_src->nr;
940
941
942
943
944 if (!bs_src->nr)
945 return;
946
947
948
949
950
951
952 nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos;
953 memcpy(&bs_dst->entries[0],
954 &bs_src->entries[tidq->last_branch_pos],
955 sizeof(struct branch_entry) * nr);
956
957
958
959
960
961
962
963
964 if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
965 memcpy(&bs_dst->entries[nr],
966 &bs_src->entries[0],
967 sizeof(struct branch_entry) * tidq->last_branch_pos);
968 }
969}
970
971static inline
972void cs_etm__reset_last_branch_rb(struct cs_etm_traceid_queue *tidq)
973{
974 tidq->last_branch_pos = 0;
975 tidq->last_branch_rb->nr = 0;
976}
977
978static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
979 u8 trace_chan_id, u64 addr)
980{
981 u8 instrBytes[2];
982
983 cs_etm__mem_access(etmq, trace_chan_id, addr,
984 ARRAY_SIZE(instrBytes), instrBytes);
985
986
987
988
989
990 return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
991}
992
993static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
994{
995
996 if (packet->sample_type == CS_ETM_DISCONTINUITY)
997 return 0;
998
999 return packet->start_addr;
1000}
1001
1002static inline
1003u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet)
1004{
1005
1006 if (packet->sample_type == CS_ETM_DISCONTINUITY)
1007 return 0;
1008
1009 return packet->end_addr - packet->last_instr_size;
1010}
1011
1012static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
1013 u64 trace_chan_id,
1014 const struct cs_etm_packet *packet,
1015 u64 offset)
1016{
1017 if (packet->isa == CS_ETM_ISA_T32) {
1018 u64 addr = packet->start_addr;
1019
1020 while (offset) {
1021 addr += cs_etm__t32_instr_size(etmq,
1022 trace_chan_id, addr);
1023 offset--;
1024 }
1025 return addr;
1026 }
1027
1028
1029 return packet->start_addr + offset * 4;
1030}
1031
1032static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq,
1033 struct cs_etm_traceid_queue *tidq)
1034{
1035 struct branch_stack *bs = tidq->last_branch_rb;
1036 struct branch_entry *be;
1037
1038
1039
1040
1041
1042
1043
1044 if (!tidq->last_branch_pos)
1045 tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
1046
1047 tidq->last_branch_pos -= 1;
1048
1049 be = &bs->entries[tidq->last_branch_pos];
1050 be->from = cs_etm__last_executed_instr(tidq->prev_packet);
1051 be->to = cs_etm__first_executed_instr(tidq->packet);
1052
1053 be->flags.mispred = 0;
1054 be->flags.predicted = 1;
1055
1056
1057
1058
1059
1060 if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
1061 bs->nr += 1;
1062}
1063
1064static int cs_etm__inject_event(union perf_event *event,
1065 struct perf_sample *sample, u64 type)
1066{
1067 event->header.size = perf_event__sample_event_size(sample, type, 0);
1068 return perf_event__synthesize_sample(event, type, 0, sample);
1069}
1070
1071
1072static int
1073cs_etm__get_trace(struct cs_etm_queue *etmq)
1074{
1075 struct auxtrace_buffer *aux_buffer = etmq->buffer;
1076 struct auxtrace_buffer *old_buffer = aux_buffer;
1077 struct auxtrace_queue *queue;
1078
1079 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
1080
1081 aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
1082
1083
1084 if (!aux_buffer) {
1085 if (old_buffer)
1086 auxtrace_buffer__drop_data(old_buffer);
1087 etmq->buf_len = 0;
1088 return 0;
1089 }
1090
1091 etmq->buffer = aux_buffer;
1092
1093
1094 if (!aux_buffer->data) {
1095
1096 int fd = perf_data__fd(etmq->etm->session->data);
1097
1098 aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
1099 if (!aux_buffer->data)
1100 return -ENOMEM;
1101 }
1102
1103
1104 if (old_buffer)
1105 auxtrace_buffer__drop_data(old_buffer);
1106
1107 etmq->buf_used = 0;
1108 etmq->buf_len = aux_buffer->size;
1109 etmq->buf = aux_buffer->data;
1110
1111 return etmq->buf_len;
1112}
1113
1114static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
1115 struct cs_etm_traceid_queue *tidq)
1116{
1117 if ((!tidq->thread) && (tidq->tid != -1))
1118 tidq->thread = machine__find_thread(etm->machine, -1,
1119 tidq->tid);
1120
1121 if (tidq->thread)
1122 tidq->pid = tidq->thread->pid_;
1123}
1124
1125int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,
1126 pid_t tid, u8 trace_chan_id)
1127{
1128 int cpu, err = -EINVAL;
1129 struct cs_etm_auxtrace *etm = etmq->etm;
1130 struct cs_etm_traceid_queue *tidq;
1131
1132 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
1133 if (!tidq)
1134 return err;
1135
1136 if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
1137 return err;
1138
1139 err = machine__set_current_tid(etm->machine, cpu, tid, tid);
1140 if (err)
1141 return err;
1142
1143 tidq->tid = tid;
1144 thread__zput(tidq->thread);
1145
1146 cs_etm__set_pid_tid_cpu(etm, tidq);
1147 return 0;
1148}
1149
1150bool cs_etm__etmq_is_timeless(struct cs_etm_queue *etmq)
1151{
1152 return !!etmq->etm->timeless_decoding;
1153}
1154
1155static void cs_etm__copy_insn(struct cs_etm_queue *etmq,
1156 u64 trace_chan_id,
1157 const struct cs_etm_packet *packet,
1158 struct perf_sample *sample)
1159{
1160
1161
1162
1163
1164 if (packet->sample_type == CS_ETM_DISCONTINUITY) {
1165 sample->insn_len = 0;
1166 return;
1167 }
1168
1169
1170
1171
1172
1173 if (packet->isa == CS_ETM_ISA_T32)
1174 sample->insn_len = cs_etm__t32_instr_size(etmq, trace_chan_id,
1175 sample->ip);
1176
1177 else
1178 sample->insn_len = 4;
1179
1180 cs_etm__mem_access(etmq, trace_chan_id, sample->ip,
1181 sample->insn_len, (void *)sample->insn);
1182}
1183
1184static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
1185 struct cs_etm_traceid_queue *tidq,
1186 u64 addr, u64 period)
1187{
1188 int ret = 0;
1189 struct cs_etm_auxtrace *etm = etmq->etm;
1190 union perf_event *event = tidq->event_buf;
1191 struct perf_sample sample = {.ip = 0,};
1192
1193 event->sample.header.type = PERF_RECORD_SAMPLE;
1194 event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
1195 event->sample.header.size = sizeof(struct perf_event_header);
1196
1197 sample.ip = addr;
1198 sample.pid = tidq->pid;
1199 sample.tid = tidq->tid;
1200 sample.id = etmq->etm->instructions_id;
1201 sample.stream_id = etmq->etm->instructions_id;
1202 sample.period = period;
1203 sample.cpu = tidq->packet->cpu;
1204 sample.flags = tidq->prev_packet->flags;
1205 sample.cpumode = event->sample.header.misc;
1206
1207 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->packet, &sample);
1208
1209 if (etm->synth_opts.last_branch)
1210 sample.branch_stack = tidq->last_branch;
1211
1212 if (etm->synth_opts.inject) {
1213 ret = cs_etm__inject_event(event, &sample,
1214 etm->instructions_sample_type);
1215 if (ret)
1216 return ret;
1217 }
1218
1219 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1220
1221 if (ret)
1222 pr_err(
1223 "CS ETM Trace: failed to deliver instruction event, error %d\n",
1224 ret);
1225
1226 return ret;
1227}
1228
1229
1230
1231
1232
1233static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
1234 struct cs_etm_traceid_queue *tidq)
1235{
1236 int ret = 0;
1237 struct cs_etm_auxtrace *etm = etmq->etm;
1238 struct perf_sample sample = {.ip = 0,};
1239 union perf_event *event = tidq->event_buf;
1240 struct dummy_branch_stack {
1241 u64 nr;
1242 u64 hw_idx;
1243 struct branch_entry entries;
1244 } dummy_bs;
1245 u64 ip;
1246
1247 ip = cs_etm__last_executed_instr(tidq->prev_packet);
1248
1249 event->sample.header.type = PERF_RECORD_SAMPLE;
1250 event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
1251 event->sample.header.size = sizeof(struct perf_event_header);
1252
1253 sample.ip = ip;
1254 sample.pid = tidq->pid;
1255 sample.tid = tidq->tid;
1256 sample.addr = cs_etm__first_executed_instr(tidq->packet);
1257 sample.id = etmq->etm->branches_id;
1258 sample.stream_id = etmq->etm->branches_id;
1259 sample.period = 1;
1260 sample.cpu = tidq->packet->cpu;
1261 sample.flags = tidq->prev_packet->flags;
1262 sample.cpumode = event->sample.header.misc;
1263
1264 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->prev_packet,
1265 &sample);
1266
1267
1268
1269
1270 if (etm->synth_opts.last_branch) {
1271 dummy_bs = (struct dummy_branch_stack){
1272 .nr = 1,
1273 .hw_idx = -1ULL,
1274 .entries = {
1275 .from = sample.ip,
1276 .to = sample.addr,
1277 },
1278 };
1279 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1280 }
1281
1282 if (etm->synth_opts.inject) {
1283 ret = cs_etm__inject_event(event, &sample,
1284 etm->branches_sample_type);
1285 if (ret)
1286 return ret;
1287 }
1288
1289 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1290
1291 if (ret)
1292 pr_err(
1293 "CS ETM Trace: failed to deliver instruction event, error %d\n",
1294 ret);
1295
1296 return ret;
1297}
1298
1299struct cs_etm_synth {
1300 struct perf_tool dummy_tool;
1301 struct perf_session *session;
1302};
1303
1304static int cs_etm__event_synth(struct perf_tool *tool,
1305 union perf_event *event,
1306 struct perf_sample *sample __maybe_unused,
1307 struct machine *machine __maybe_unused)
1308{
1309 struct cs_etm_synth *cs_etm_synth =
1310 container_of(tool, struct cs_etm_synth, dummy_tool);
1311
1312 return perf_session__deliver_synth_event(cs_etm_synth->session,
1313 event, NULL);
1314}
1315
1316static int cs_etm__synth_event(struct perf_session *session,
1317 struct perf_event_attr *attr, u64 id)
1318{
1319 struct cs_etm_synth cs_etm_synth;
1320
1321 memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
1322 cs_etm_synth.session = session;
1323
1324 return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
1325 &id, cs_etm__event_synth);
1326}
1327
1328static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
1329 struct perf_session *session)
1330{
1331 struct evlist *evlist = session->evlist;
1332 struct evsel *evsel;
1333 struct perf_event_attr attr;
1334 bool found = false;
1335 u64 id;
1336 int err;
1337
1338 evlist__for_each_entry(evlist, evsel) {
1339 if (evsel->core.attr.type == etm->pmu_type) {
1340 found = true;
1341 break;
1342 }
1343 }
1344
1345 if (!found) {
1346 pr_debug("No selected events with CoreSight Trace data\n");
1347 return 0;
1348 }
1349
1350 memset(&attr, 0, sizeof(struct perf_event_attr));
1351 attr.size = sizeof(struct perf_event_attr);
1352 attr.type = PERF_TYPE_HARDWARE;
1353 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
1354 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
1355 PERF_SAMPLE_PERIOD;
1356 if (etm->timeless_decoding)
1357 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
1358 else
1359 attr.sample_type |= PERF_SAMPLE_TIME;
1360
1361 attr.exclude_user = evsel->core.attr.exclude_user;
1362 attr.exclude_kernel = evsel->core.attr.exclude_kernel;
1363 attr.exclude_hv = evsel->core.attr.exclude_hv;
1364 attr.exclude_host = evsel->core.attr.exclude_host;
1365 attr.exclude_guest = evsel->core.attr.exclude_guest;
1366 attr.sample_id_all = evsel->core.attr.sample_id_all;
1367 attr.read_format = evsel->core.attr.read_format;
1368
1369
1370 id = evsel->core.id[0] + 1000000000;
1371
1372 if (!id)
1373 id = 1;
1374
1375 if (etm->synth_opts.branches) {
1376 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
1377 attr.sample_period = 1;
1378 attr.sample_type |= PERF_SAMPLE_ADDR;
1379 err = cs_etm__synth_event(session, &attr, id);
1380 if (err)
1381 return err;
1382 etm->sample_branches = true;
1383 etm->branches_sample_type = attr.sample_type;
1384 etm->branches_id = id;
1385 id += 1;
1386 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
1387 }
1388
1389 if (etm->synth_opts.last_branch) {
1390 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
1391
1392
1393
1394
1395
1396 attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
1397 }
1398
1399 if (etm->synth_opts.instructions) {
1400 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1401 attr.sample_period = etm->synth_opts.period;
1402 etm->instructions_sample_period = attr.sample_period;
1403 err = cs_etm__synth_event(session, &attr, id);
1404 if (err)
1405 return err;
1406 etm->sample_instructions = true;
1407 etm->instructions_sample_type = attr.sample_type;
1408 etm->instructions_id = id;
1409 id += 1;
1410 }
1411
1412 return 0;
1413}
1414
1415static int cs_etm__sample(struct cs_etm_queue *etmq,
1416 struct cs_etm_traceid_queue *tidq)
1417{
1418 struct cs_etm_auxtrace *etm = etmq->etm;
1419 int ret;
1420 u8 trace_chan_id = tidq->trace_chan_id;
1421 u64 instrs_prev;
1422
1423
1424 instrs_prev = tidq->period_instructions;
1425
1426 tidq->period_instructions += tidq->packet->instr_count;
1427
1428
1429
1430
1431
1432 if (etm->synth_opts.last_branch &&
1433 tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1434 tidq->prev_packet->last_instr_taken_branch)
1435 cs_etm__update_last_branch_rb(etmq, tidq);
1436
1437 if (etm->sample_instructions &&
1438 tidq->period_instructions >= etm->instructions_sample_period) {
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491 u64 offset = etm->instructions_sample_period - instrs_prev;
1492 u64 addr;
1493
1494
1495 if (etm->synth_opts.last_branch)
1496 cs_etm__copy_last_branch_rb(etmq, tidq);
1497
1498 while (tidq->period_instructions >=
1499 etm->instructions_sample_period) {
1500
1501
1502
1503
1504
1505
1506 addr = cs_etm__instr_addr(etmq, trace_chan_id,
1507 tidq->packet, offset - 1);
1508 ret = cs_etm__synth_instruction_sample(
1509 etmq, tidq, addr,
1510 etm->instructions_sample_period);
1511 if (ret)
1512 return ret;
1513
1514 offset += etm->instructions_sample_period;
1515 tidq->period_instructions -=
1516 etm->instructions_sample_period;
1517 }
1518 }
1519
1520 if (etm->sample_branches) {
1521 bool generate_sample = false;
1522
1523
1524 if (tidq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1525 generate_sample = true;
1526
1527
1528 if (tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1529 tidq->prev_packet->last_instr_taken_branch)
1530 generate_sample = true;
1531
1532 if (generate_sample) {
1533 ret = cs_etm__synth_branch_sample(etmq, tidq);
1534 if (ret)
1535 return ret;
1536 }
1537 }
1538
1539 cs_etm__packet_swap(etm, tidq);
1540
1541 return 0;
1542}
1543
1544static int cs_etm__exception(struct cs_etm_traceid_queue *tidq)
1545{
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557 if (tidq->prev_packet->sample_type == CS_ETM_RANGE)
1558 tidq->prev_packet->last_instr_taken_branch = true;
1559
1560 return 0;
1561}
1562
1563static int cs_etm__flush(struct cs_etm_queue *etmq,
1564 struct cs_etm_traceid_queue *tidq)
1565{
1566 int err = 0;
1567 struct cs_etm_auxtrace *etm = etmq->etm;
1568
1569
1570 if (tidq->prev_packet->sample_type == CS_ETM_EMPTY)
1571 goto swap_packet;
1572
1573 if (etmq->etm->synth_opts.last_branch &&
1574 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1575 u64 addr;
1576
1577
1578 cs_etm__copy_last_branch_rb(etmq, tidq);
1579
1580
1581
1582
1583
1584
1585
1586
1587 addr = cs_etm__last_executed_instr(tidq->prev_packet);
1588
1589 err = cs_etm__synth_instruction_sample(
1590 etmq, tidq, addr,
1591 tidq->period_instructions);
1592 if (err)
1593 return err;
1594
1595 tidq->period_instructions = 0;
1596
1597 }
1598
1599 if (etm->sample_branches &&
1600 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1601 err = cs_etm__synth_branch_sample(etmq, tidq);
1602 if (err)
1603 return err;
1604 }
1605
1606swap_packet:
1607 cs_etm__packet_swap(etm, tidq);
1608
1609
1610 if (etm->synth_opts.last_branch)
1611 cs_etm__reset_last_branch_rb(tidq);
1612
1613 return err;
1614}
1615
1616static int cs_etm__end_block(struct cs_etm_queue *etmq,
1617 struct cs_etm_traceid_queue *tidq)
1618{
1619 int err;
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630 if (etmq->etm->synth_opts.last_branch &&
1631 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1632 u64 addr;
1633
1634
1635 cs_etm__copy_last_branch_rb(etmq, tidq);
1636
1637
1638
1639
1640
1641 addr = cs_etm__last_executed_instr(tidq->prev_packet);
1642
1643 err = cs_etm__synth_instruction_sample(
1644 etmq, tidq, addr,
1645 tidq->period_instructions);
1646 if (err)
1647 return err;
1648
1649 tidq->period_instructions = 0;
1650 }
1651
1652 return 0;
1653}
1654
1655
1656
1657
1658
1659
1660
1661static int cs_etm__get_data_block(struct cs_etm_queue *etmq)
1662{
1663 int ret;
1664
1665 if (!etmq->buf_len) {
1666 ret = cs_etm__get_trace(etmq);
1667 if (ret <= 0)
1668 return ret;
1669
1670
1671
1672
1673 ret = cs_etm_decoder__reset(etmq->decoder);
1674 if (ret)
1675 return ret;
1676 }
1677
1678 return etmq->buf_len;
1679}
1680
1681static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq, u8 trace_chan_id,
1682 struct cs_etm_packet *packet,
1683 u64 end_addr)
1684{
1685
1686 u16 instr16 = 0;
1687 u32 instr32 = 0;
1688 u64 addr;
1689
1690 switch (packet->isa) {
1691 case CS_ETM_ISA_T32:
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704 addr = end_addr - 2;
1705 cs_etm__mem_access(etmq, trace_chan_id, addr,
1706 sizeof(instr16), (u8 *)&instr16);
1707 if ((instr16 & 0xFF00) == 0xDF00)
1708 return true;
1709
1710 break;
1711 case CS_ETM_ISA_A32:
1712
1713
1714
1715
1716
1717
1718
1719
1720 addr = end_addr - 4;
1721 cs_etm__mem_access(etmq, trace_chan_id, addr,
1722 sizeof(instr32), (u8 *)&instr32);
1723 if ((instr32 & 0x0F000000) == 0x0F000000 &&
1724 (instr32 & 0xF0000000) != 0xF0000000)
1725 return true;
1726
1727 break;
1728 case CS_ETM_ISA_A64:
1729
1730
1731
1732
1733
1734
1735
1736
1737 addr = end_addr - 4;
1738 cs_etm__mem_access(etmq, trace_chan_id, addr,
1739 sizeof(instr32), (u8 *)&instr32);
1740 if ((instr32 & 0xFFE0001F) == 0xd4000001)
1741 return true;
1742
1743 break;
1744 case CS_ETM_ISA_UNKNOWN:
1745 default:
1746 break;
1747 }
1748
1749 return false;
1750}
1751
1752static bool cs_etm__is_syscall(struct cs_etm_queue *etmq,
1753 struct cs_etm_traceid_queue *tidq, u64 magic)
1754{
1755 u8 trace_chan_id = tidq->trace_chan_id;
1756 struct cs_etm_packet *packet = tidq->packet;
1757 struct cs_etm_packet *prev_packet = tidq->prev_packet;
1758
1759 if (magic == __perf_cs_etmv3_magic)
1760 if (packet->exception_number == CS_ETMV3_EXC_SVC)
1761 return true;
1762
1763
1764
1765
1766
1767
1768 if (magic == __perf_cs_etmv4_magic) {
1769 if (packet->exception_number == CS_ETMV4_EXC_CALL &&
1770 cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
1771 prev_packet->end_addr))
1772 return true;
1773 }
1774
1775 return false;
1776}
1777
1778static bool cs_etm__is_async_exception(struct cs_etm_traceid_queue *tidq,
1779 u64 magic)
1780{
1781 struct cs_etm_packet *packet = tidq->packet;
1782
1783 if (magic == __perf_cs_etmv3_magic)
1784 if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
1785 packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
1786 packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
1787 packet->exception_number == CS_ETMV3_EXC_IRQ ||
1788 packet->exception_number == CS_ETMV3_EXC_FIQ)
1789 return true;
1790
1791 if (magic == __perf_cs_etmv4_magic)
1792 if (packet->exception_number == CS_ETMV4_EXC_RESET ||
1793 packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
1794 packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
1795 packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
1796 packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
1797 packet->exception_number == CS_ETMV4_EXC_IRQ ||
1798 packet->exception_number == CS_ETMV4_EXC_FIQ)
1799 return true;
1800
1801 return false;
1802}
1803
1804static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq,
1805 struct cs_etm_traceid_queue *tidq,
1806 u64 magic)
1807{
1808 u8 trace_chan_id = tidq->trace_chan_id;
1809 struct cs_etm_packet *packet = tidq->packet;
1810 struct cs_etm_packet *prev_packet = tidq->prev_packet;
1811
1812 if (magic == __perf_cs_etmv3_magic)
1813 if (packet->exception_number == CS_ETMV3_EXC_SMC ||
1814 packet->exception_number == CS_ETMV3_EXC_HYP ||
1815 packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
1816 packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
1817 packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
1818 packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
1819 packet->exception_number == CS_ETMV3_EXC_GENERIC)
1820 return true;
1821
1822 if (magic == __perf_cs_etmv4_magic) {
1823 if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
1824 packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
1825 packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
1826 packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
1827 return true;
1828
1829
1830
1831
1832
1833 if (packet->exception_number == CS_ETMV4_EXC_CALL &&
1834 !cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
1835 prev_packet->end_addr))
1836 return true;
1837
1838
1839
1840
1841
1842
1843
1844
1845 if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
1846 packet->exception_number <= CS_ETMV4_EXC_END)
1847 return true;
1848 }
1849
1850 return false;
1851}
1852
1853static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
1854 struct cs_etm_traceid_queue *tidq)
1855{
1856 struct cs_etm_packet *packet = tidq->packet;
1857 struct cs_etm_packet *prev_packet = tidq->prev_packet;
1858 u8 trace_chan_id = tidq->trace_chan_id;
1859 u64 magic;
1860 int ret;
1861
1862 switch (packet->sample_type) {
1863 case CS_ETM_RANGE:
1864
1865
1866
1867
1868
1869 if (packet->last_instr_type == OCSD_INSTR_BR &&
1870 packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
1871 packet->flags = PERF_IP_FLAG_BRANCH;
1872
1873 if (packet->last_instr_cond)
1874 packet->flags |= PERF_IP_FLAG_CONDITIONAL;
1875 }
1876
1877
1878
1879
1880
1881 if (packet->last_instr_type == OCSD_INSTR_BR &&
1882 packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
1883 packet->flags = PERF_IP_FLAG_BRANCH |
1884 PERF_IP_FLAG_CALL;
1885
1886
1887
1888
1889
1890 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1891 packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
1892 packet->flags = PERF_IP_FLAG_BRANCH |
1893 PERF_IP_FLAG_CALL;
1894
1895
1896
1897
1898
1899
1900 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1901 packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
1902 packet->flags = PERF_IP_FLAG_BRANCH |
1903 PERF_IP_FLAG_RETURN;
1904
1905
1906
1907
1908
1909
1910 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1911 packet->last_instr_subtype == OCSD_S_INSTR_NONE)
1912 packet->flags = PERF_IP_FLAG_BRANCH |
1913 PERF_IP_FLAG_RETURN;
1914
1915
1916 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1917 packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
1918 packet->flags = PERF_IP_FLAG_BRANCH |
1919 PERF_IP_FLAG_RETURN;
1920
1921
1922
1923
1924
1925
1926 if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1927 prev_packet->flags |= PERF_IP_FLAG_BRANCH |
1928 PERF_IP_FLAG_TRACE_BEGIN;
1929
1930
1931
1932
1933
1934
1935
1936 if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
1937 PERF_IP_FLAG_RETURN |
1938 PERF_IP_FLAG_INTERRUPT) &&
1939 cs_etm__is_svc_instr(etmq, trace_chan_id,
1940 packet, packet->start_addr))
1941 prev_packet->flags = PERF_IP_FLAG_BRANCH |
1942 PERF_IP_FLAG_RETURN |
1943 PERF_IP_FLAG_SYSCALLRET;
1944 break;
1945 case CS_ETM_DISCONTINUITY:
1946
1947
1948
1949
1950
1951 if (prev_packet->sample_type == CS_ETM_RANGE)
1952 prev_packet->flags |= PERF_IP_FLAG_BRANCH |
1953 PERF_IP_FLAG_TRACE_END;
1954 break;
1955 case CS_ETM_EXCEPTION:
1956 ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
1957 if (ret)
1958 return ret;
1959
1960
1961 if (cs_etm__is_syscall(etmq, tidq, magic))
1962 packet->flags = PERF_IP_FLAG_BRANCH |
1963 PERF_IP_FLAG_CALL |
1964 PERF_IP_FLAG_SYSCALLRET;
1965
1966
1967
1968
1969 else if (cs_etm__is_async_exception(tidq, magic))
1970 packet->flags = PERF_IP_FLAG_BRANCH |
1971 PERF_IP_FLAG_CALL |
1972 PERF_IP_FLAG_ASYNC |
1973 PERF_IP_FLAG_INTERRUPT;
1974
1975
1976
1977
1978 else if (cs_etm__is_sync_exception(etmq, tidq, magic))
1979 packet->flags = PERF_IP_FLAG_BRANCH |
1980 PERF_IP_FLAG_CALL |
1981 PERF_IP_FLAG_INTERRUPT;
1982
1983
1984
1985
1986
1987
1988
1989
1990 if (prev_packet->sample_type == CS_ETM_RANGE)
1991 prev_packet->flags = packet->flags;
1992 break;
1993 case CS_ETM_EXCEPTION_RET:
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019 if (prev_packet->sample_type == CS_ETM_RANGE)
2020 prev_packet->flags = PERF_IP_FLAG_BRANCH |
2021 PERF_IP_FLAG_RETURN |
2022 PERF_IP_FLAG_INTERRUPT;
2023 break;
2024 case CS_ETM_EMPTY:
2025 default:
2026 break;
2027 }
2028
2029 return 0;
2030}
2031
2032static int cs_etm__decode_data_block(struct cs_etm_queue *etmq)
2033{
2034 int ret = 0;
2035 size_t processed = 0;
2036
2037
2038
2039
2040
2041
2042
2043
2044 ret = cs_etm_decoder__process_data_block(etmq->decoder,
2045 etmq->offset,
2046 &etmq->buf[etmq->buf_used],
2047 etmq->buf_len,
2048 &processed);
2049 if (ret)
2050 goto out;
2051
2052 etmq->offset += processed;
2053 etmq->buf_used += processed;
2054 etmq->buf_len -= processed;
2055
2056out:
2057 return ret;
2058}
2059
2060static int cs_etm__process_traceid_queue(struct cs_etm_queue *etmq,
2061 struct cs_etm_traceid_queue *tidq)
2062{
2063 int ret;
2064 struct cs_etm_packet_queue *packet_queue;
2065
2066 packet_queue = &tidq->packet_queue;
2067
2068
2069 while (1) {
2070 ret = cs_etm_decoder__get_packet(packet_queue,
2071 tidq->packet);
2072 if (ret <= 0)
2073
2074
2075
2076
2077 break;
2078
2079
2080
2081
2082
2083
2084
2085
2086 ret = cs_etm__set_sample_flags(etmq, tidq);
2087 if (ret < 0)
2088 break;
2089
2090 switch (tidq->packet->sample_type) {
2091 case CS_ETM_RANGE:
2092
2093
2094
2095
2096
2097 cs_etm__sample(etmq, tidq);
2098 break;
2099 case CS_ETM_EXCEPTION:
2100 case CS_ETM_EXCEPTION_RET:
2101
2102
2103
2104
2105
2106 cs_etm__exception(tidq);
2107 break;
2108 case CS_ETM_DISCONTINUITY:
2109
2110
2111
2112
2113 cs_etm__flush(etmq, tidq);
2114 break;
2115 case CS_ETM_EMPTY:
2116
2117
2118
2119
2120 pr_err("CS ETM Trace: empty packet\n");
2121 return -EINVAL;
2122 default:
2123 break;
2124 }
2125 }
2126
2127 return ret;
2128}
2129
2130static void cs_etm__clear_all_traceid_queues(struct cs_etm_queue *etmq)
2131{
2132 int idx;
2133 struct int_node *inode;
2134 struct cs_etm_traceid_queue *tidq;
2135 struct intlist *traceid_queues_list = etmq->traceid_queues_list;
2136
2137 intlist__for_each_entry(inode, traceid_queues_list) {
2138 idx = (int)(intptr_t)inode->priv;
2139 tidq = etmq->traceid_queues[idx];
2140
2141
2142 cs_etm__process_traceid_queue(etmq, tidq);
2143
2144
2145
2146
2147
2148 cs_etm__flush(etmq, tidq);
2149 }
2150}
2151
2152static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
2153{
2154 int err = 0;
2155 struct cs_etm_traceid_queue *tidq;
2156
2157 tidq = cs_etm__etmq_get_traceid_queue(etmq, CS_ETM_PER_THREAD_TRACEID);
2158 if (!tidq)
2159 return -EINVAL;
2160
2161
2162 while (1) {
2163 err = cs_etm__get_data_block(etmq);
2164 if (err <= 0)
2165 return err;
2166
2167
2168 do {
2169 err = cs_etm__decode_data_block(etmq);
2170 if (err)
2171 return err;
2172
2173
2174
2175
2176
2177
2178 err = cs_etm__process_traceid_queue(etmq, tidq);
2179
2180 } while (etmq->buf_len);
2181
2182 if (err == 0)
2183
2184 err = cs_etm__end_block(etmq, tidq);
2185 }
2186
2187 return err;
2188}
2189
2190static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
2191 pid_t tid)
2192{
2193 unsigned int i;
2194 struct auxtrace_queues *queues = &etm->queues;
2195
2196 for (i = 0; i < queues->nr_queues; i++) {
2197 struct auxtrace_queue *queue = &etm->queues.queue_array[i];
2198 struct cs_etm_queue *etmq = queue->priv;
2199 struct cs_etm_traceid_queue *tidq;
2200
2201 if (!etmq)
2202 continue;
2203
2204 tidq = cs_etm__etmq_get_traceid_queue(etmq,
2205 CS_ETM_PER_THREAD_TRACEID);
2206
2207 if (!tidq)
2208 continue;
2209
2210 if ((tid == -1) || (tidq->tid == tid)) {
2211 cs_etm__set_pid_tid_cpu(etm, tidq);
2212 cs_etm__run_decoder(etmq);
2213 }
2214 }
2215
2216 return 0;
2217}
2218
2219static int cs_etm__process_queues(struct cs_etm_auxtrace *etm)
2220{
2221 int ret = 0;
2222 unsigned int cs_queue_nr, queue_nr;
2223 u8 trace_chan_id;
2224 u64 timestamp;
2225 struct auxtrace_queue *queue;
2226 struct cs_etm_queue *etmq;
2227 struct cs_etm_traceid_queue *tidq;
2228
2229 while (1) {
2230 if (!etm->heap.heap_cnt)
2231 goto out;
2232
2233
2234 cs_queue_nr = etm->heap.heap_array[0].queue_nr;
2235 queue_nr = TO_QUEUE_NR(cs_queue_nr);
2236 trace_chan_id = TO_TRACE_CHAN_ID(cs_queue_nr);
2237 queue = &etm->queues.queue_array[queue_nr];
2238 etmq = queue->priv;
2239
2240
2241
2242
2243
2244 auxtrace_heap__pop(&etm->heap);
2245
2246 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
2247 if (!tidq) {
2248
2249
2250
2251
2252
2253 ret = -EINVAL;
2254 goto out;
2255 }
2256
2257
2258
2259
2260
2261 ret = cs_etm__process_traceid_queue(etmq, tidq);
2262 if (ret < 0)
2263 goto out;
2264
2265
2266
2267
2268
2269
2270refetch:
2271 ret = cs_etm__get_data_block(etmq);
2272 if (ret < 0)
2273 goto out;
2274
2275
2276
2277
2278
2279 if (!ret)
2280 continue;
2281
2282 ret = cs_etm__decode_data_block(etmq);
2283 if (ret)
2284 goto out;
2285
2286 timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
2287
2288 if (!timestamp) {
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298 cs_etm__clear_all_traceid_queues(etmq);
2299
2300
2301 goto refetch;
2302 }
2303
2304
2305
2306
2307
2308
2309
2310 cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
2311 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, timestamp);
2312 }
2313
2314out:
2315 return ret;
2316}
2317
2318static int cs_etm__process_itrace_start(struct cs_etm_auxtrace *etm,
2319 union perf_event *event)
2320{
2321 struct thread *th;
2322
2323 if (etm->timeless_decoding)
2324 return 0;
2325
2326
2327
2328
2329
2330 th = machine__findnew_thread(etm->machine,
2331 event->itrace_start.pid,
2332 event->itrace_start.tid);
2333 if (!th)
2334 return -ENOMEM;
2335
2336 thread__put(th);
2337
2338 return 0;
2339}
2340
2341static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
2342 union perf_event *event)
2343{
2344 struct thread *th;
2345 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
2346
2347
2348
2349
2350
2351 if (etm->timeless_decoding)
2352 return 0;
2353
2354
2355
2356
2357
2358
2359 if (!out)
2360 return 0;
2361
2362
2363
2364
2365
2366 th = machine__findnew_thread(etm->machine,
2367 event->context_switch.next_prev_pid,
2368 event->context_switch.next_prev_tid);
2369 if (!th)
2370 return -ENOMEM;
2371
2372 thread__put(th);
2373
2374 return 0;
2375}
2376
2377static int cs_etm__process_event(struct perf_session *session,
2378 union perf_event *event,
2379 struct perf_sample *sample,
2380 struct perf_tool *tool)
2381{
2382 int err = 0;
2383 u64 timestamp;
2384 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2385 struct cs_etm_auxtrace,
2386 auxtrace);
2387
2388 if (dump_trace)
2389 return 0;
2390
2391 if (!tool->ordered_events) {
2392 pr_err("CoreSight ETM Trace requires ordered events\n");
2393 return -EINVAL;
2394 }
2395
2396 if (sample->time && (sample->time != (u64) -1))
2397 timestamp = sample->time;
2398 else
2399 timestamp = 0;
2400
2401 if (timestamp || etm->timeless_decoding) {
2402 err = cs_etm__update_queues(etm);
2403 if (err)
2404 return err;
2405 }
2406
2407 if (etm->timeless_decoding &&
2408 event->header.type == PERF_RECORD_EXIT)
2409 return cs_etm__process_timeless_queues(etm,
2410 event->fork.tid);
2411
2412 if (event->header.type == PERF_RECORD_ITRACE_START)
2413 return cs_etm__process_itrace_start(etm, event);
2414 else if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2415 return cs_etm__process_switch_cpu_wide(etm, event);
2416
2417 if (!etm->timeless_decoding &&
2418 event->header.type == PERF_RECORD_AUX)
2419 return cs_etm__process_queues(etm);
2420
2421 return 0;
2422}
2423
2424static int cs_etm__process_auxtrace_event(struct perf_session *session,
2425 union perf_event *event,
2426 struct perf_tool *tool __maybe_unused)
2427{
2428 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2429 struct cs_etm_auxtrace,
2430 auxtrace);
2431 if (!etm->data_queued) {
2432 struct auxtrace_buffer *buffer;
2433 off_t data_offset;
2434 int fd = perf_data__fd(session->data);
2435 bool is_pipe = perf_data__is_pipe(session->data);
2436 int err;
2437
2438 if (is_pipe)
2439 data_offset = 0;
2440 else {
2441 data_offset = lseek(fd, 0, SEEK_CUR);
2442 if (data_offset == -1)
2443 return -errno;
2444 }
2445
2446 err = auxtrace_queues__add_event(&etm->queues, session,
2447 event, data_offset, &buffer);
2448 if (err)
2449 return err;
2450
2451 if (dump_trace)
2452 if (auxtrace_buffer__get_data(buffer, fd)) {
2453 cs_etm__dump_event(etm, buffer);
2454 auxtrace_buffer__put_data(buffer);
2455 }
2456 }
2457
2458 return 0;
2459}
2460
2461static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
2462{
2463 struct evsel *evsel;
2464 struct evlist *evlist = etm->session->evlist;
2465 bool timeless_decoding = true;
2466
2467
2468
2469
2470
2471 evlist__for_each_entry(evlist, evsel) {
2472 if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
2473 timeless_decoding = false;
2474 }
2475
2476 return timeless_decoding;
2477}
2478
2479static const char * const cs_etm_global_header_fmts[] = {
2480 [CS_HEADER_VERSION] = " Header version %llx\n",
2481 [CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
2482 [CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
2483};
2484
2485static const char * const cs_etm_priv_fmts[] = {
2486 [CS_ETM_MAGIC] = " Magic number %llx\n",
2487 [CS_ETM_CPU] = " CPU %lld\n",
2488 [CS_ETM_NR_TRC_PARAMS] = " NR_TRC_PARAMS %llx\n",
2489 [CS_ETM_ETMCR] = " ETMCR %llx\n",
2490 [CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
2491 [CS_ETM_ETMCCER] = " ETMCCER %llx\n",
2492 [CS_ETM_ETMIDR] = " ETMIDR %llx\n",
2493};
2494
2495static const char * const cs_etmv4_priv_fmts[] = {
2496 [CS_ETM_MAGIC] = " Magic number %llx\n",
2497 [CS_ETM_CPU] = " CPU %lld\n",
2498 [CS_ETM_NR_TRC_PARAMS] = " NR_TRC_PARAMS %llx\n",
2499 [CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
2500 [CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
2501 [CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
2502 [CS_ETMV4_TRCIDR1] = " TRCIDR1 %llx\n",
2503 [CS_ETMV4_TRCIDR2] = " TRCIDR2 %llx\n",
2504 [CS_ETMV4_TRCIDR8] = " TRCIDR8 %llx\n",
2505 [CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
2506};
2507
2508static const char * const param_unk_fmt =
2509 " Unknown parameter [%d] %llx\n";
2510static const char * const magic_unk_fmt =
2511 " Magic number Unknown %llx\n";
2512
2513static int cs_etm__print_cpu_metadata_v0(__u64 *val, int *offset)
2514{
2515 int i = *offset, j, nr_params = 0, fmt_offset;
2516 __u64 magic;
2517
2518
2519 magic = val[i + CS_ETM_MAGIC];
2520 if ((magic != __perf_cs_etmv3_magic) &&
2521 (magic != __perf_cs_etmv4_magic)) {
2522
2523 fprintf(stdout, magic_unk_fmt, magic);
2524 return -EINVAL;
2525 }
2526
2527
2528 fprintf(stdout, cs_etm_priv_fmts[CS_ETM_MAGIC], val[i++]);
2529 fprintf(stdout, cs_etm_priv_fmts[CS_ETM_CPU], val[i++]);
2530
2531 if (magic == __perf_cs_etmv3_magic) {
2532 nr_params = CS_ETM_NR_TRC_PARAMS_V0;
2533 fmt_offset = CS_ETM_ETMCR;
2534
2535 for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
2536 fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
2537 } else if (magic == __perf_cs_etmv4_magic) {
2538 nr_params = CS_ETMV4_NR_TRC_PARAMS_V0;
2539 fmt_offset = CS_ETMV4_TRCCONFIGR;
2540
2541 for (j = fmt_offset; j < nr_params + fmt_offset; j++, i++)
2542 fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
2543 }
2544 *offset = i;
2545 return 0;
2546}
2547
2548static int cs_etm__print_cpu_metadata_v1(__u64 *val, int *offset)
2549{
2550 int i = *offset, j, total_params = 0;
2551 __u64 magic;
2552
2553 magic = val[i + CS_ETM_MAGIC];
2554
2555 total_params = val[i + CS_ETM_NR_TRC_PARAMS] + CS_ETM_COMMON_BLK_MAX_V1;
2556
2557 if (magic == __perf_cs_etmv3_magic) {
2558 for (j = 0; j < total_params; j++, i++) {
2559
2560 if (j >= CS_ETM_PRIV_MAX)
2561 fprintf(stdout, param_unk_fmt, j, val[i]);
2562 else
2563 fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
2564 }
2565 } else if (magic == __perf_cs_etmv4_magic) {
2566 for (j = 0; j < total_params; j++, i++) {
2567
2568 if (j >= CS_ETMV4_PRIV_MAX)
2569 fprintf(stdout, param_unk_fmt, j, val[i]);
2570 else
2571 fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
2572 }
2573 } else {
2574
2575 fprintf(stdout, magic_unk_fmt, magic);
2576 return -EINVAL;
2577 }
2578 *offset = i;
2579 return 0;
2580}
2581
2582static void cs_etm__print_auxtrace_info(__u64 *val, int num)
2583{
2584 int i, cpu = 0, version, err;
2585
2586
2587 version = val[0];
2588 if (version > CS_HEADER_CURRENT_VERSION) {
2589
2590 fprintf(stdout, " Unknown Header Version = %x, ", version);
2591 fprintf(stdout, "Version supported <= %x\n", CS_HEADER_CURRENT_VERSION);
2592 return;
2593 }
2594
2595 for (i = 0; i < CS_HEADER_VERSION_MAX; i++)
2596 fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
2597
2598 for (i = CS_HEADER_VERSION_MAX; cpu < num; cpu++) {
2599 if (version == 0)
2600 err = cs_etm__print_cpu_metadata_v0(val, &i);
2601 else if (version == 1)
2602 err = cs_etm__print_cpu_metadata_v1(val, &i);
2603 if (err)
2604 return;
2605 }
2606}
2607
2608
2609
2610
2611
2612
2613
2614
2615
2616
2617
2618static u64 *cs_etm__create_meta_blk(u64 *buff_in, int *buff_in_offset,
2619 int out_blk_size, int nr_params_v0)
2620{
2621 u64 *metadata = NULL;
2622 int hdr_version;
2623 int nr_in_params, nr_out_params, nr_cmn_params;
2624 int i, k;
2625
2626 metadata = zalloc(sizeof(*metadata) * out_blk_size);
2627 if (!metadata)
2628 return NULL;
2629
2630
2631 i = *buff_in_offset;
2632 hdr_version = buff_in[CS_HEADER_VERSION];
2633
2634 if (!hdr_version) {
2635
2636 nr_in_params = nr_params_v0;
2637 metadata[CS_ETM_MAGIC] = buff_in[i + CS_ETM_MAGIC];
2638 metadata[CS_ETM_CPU] = buff_in[i + CS_ETM_CPU];
2639 metadata[CS_ETM_NR_TRC_PARAMS] = nr_in_params;
2640
2641 for (k = CS_ETM_COMMON_BLK_MAX_V1 - 1; k < nr_in_params; k++)
2642 metadata[k + 1] = buff_in[i + k];
2643
2644 nr_cmn_params = 2;
2645 } else {
2646
2647
2648 nr_cmn_params = 3;
2649 nr_in_params = buff_in[i + CS_ETM_NR_TRC_PARAMS];
2650
2651
2652 nr_out_params = nr_in_params + nr_cmn_params;
2653 if (nr_out_params > out_blk_size)
2654 nr_out_params = out_blk_size;
2655
2656 for (k = CS_ETM_MAGIC; k < nr_out_params; k++)
2657 metadata[k] = buff_in[i + k];
2658
2659
2660 metadata[CS_ETM_NR_TRC_PARAMS] = nr_out_params - nr_cmn_params;
2661 }
2662
2663
2664 i += nr_in_params + nr_cmn_params;
2665 *buff_in_offset = i;
2666 return metadata;
2667}
2668
2669int cs_etm__process_auxtrace_info(union perf_event *event,
2670 struct perf_session *session)
2671{
2672 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
2673 struct cs_etm_auxtrace *etm = NULL;
2674 struct int_node *inode;
2675 unsigned int pmu_type;
2676 int event_header_size = sizeof(struct perf_event_header);
2677 int info_header_size;
2678 int total_size = auxtrace_info->header.size;
2679 int priv_size = 0;
2680 int num_cpu, trcidr_idx;
2681 int err = 0;
2682 int i, j;
2683 u64 *ptr, *hdr = NULL;
2684 u64 **metadata = NULL;
2685 u64 hdr_version;
2686
2687
2688
2689
2690
2691 info_header_size = 8;
2692
2693 if (total_size < (event_header_size + info_header_size))
2694 return -EINVAL;
2695
2696 priv_size = total_size - event_header_size - info_header_size;
2697
2698
2699 ptr = (u64 *) auxtrace_info->priv;
2700
2701
2702 hdr_version = ptr[0];
2703 if (hdr_version > CS_HEADER_CURRENT_VERSION) {
2704
2705 if (dump_trace)
2706 cs_etm__print_auxtrace_info(auxtrace_info->priv, 0);
2707 return -EINVAL;
2708 }
2709
2710 hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_MAX);
2711 if (!hdr)
2712 return -ENOMEM;
2713
2714
2715 for (i = 0; i < CS_HEADER_VERSION_MAX; i++)
2716 hdr[i] = ptr[i];
2717 num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
2718 pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
2719 0xffffffff);
2720
2721
2722
2723
2724
2725
2726 traceid_list = intlist__new(NULL);
2727 if (!traceid_list) {
2728 err = -ENOMEM;
2729 goto err_free_hdr;
2730 }
2731
2732 metadata = zalloc(sizeof(*metadata) * num_cpu);
2733 if (!metadata) {
2734 err = -ENOMEM;
2735 goto err_free_traceid_list;
2736 }
2737
2738
2739
2740
2741
2742
2743
2744 for (j = 0; j < num_cpu; j++) {
2745 if (ptr[i] == __perf_cs_etmv3_magic) {
2746 metadata[j] =
2747 cs_etm__create_meta_blk(ptr, &i,
2748 CS_ETM_PRIV_MAX,
2749 CS_ETM_NR_TRC_PARAMS_V0);
2750
2751
2752 trcidr_idx = CS_ETM_ETMTRACEIDR;
2753
2754 } else if (ptr[i] == __perf_cs_etmv4_magic) {
2755 metadata[j] =
2756 cs_etm__create_meta_blk(ptr, &i,
2757 CS_ETMV4_PRIV_MAX,
2758 CS_ETMV4_NR_TRC_PARAMS_V0);
2759
2760
2761 trcidr_idx = CS_ETMV4_TRCTRACEIDR;
2762 }
2763
2764 if (!metadata[j]) {
2765 err = -ENOMEM;
2766 goto err_free_metadata;
2767 }
2768
2769
2770 inode = intlist__findnew(traceid_list, metadata[j][trcidr_idx]);
2771
2772
2773 if (!inode) {
2774 err = -ENOMEM;
2775 goto err_free_metadata;
2776 }
2777
2778
2779
2780
2781
2782 if (inode->priv) {
2783 err = -EINVAL;
2784 goto err_free_metadata;
2785 }
2786
2787 inode->priv = metadata[j];
2788 }
2789
2790
2791
2792
2793
2794
2795
2796
2797 if (i * 8 != priv_size) {
2798 err = -EINVAL;
2799 goto err_free_metadata;
2800 }
2801
2802 etm = zalloc(sizeof(*etm));
2803
2804 if (!etm) {
2805 err = -ENOMEM;
2806 goto err_free_metadata;
2807 }
2808
2809 err = auxtrace_queues__init(&etm->queues);
2810 if (err)
2811 goto err_free_etm;
2812
2813 etm->session = session;
2814 etm->machine = &session->machines.host;
2815
2816 etm->num_cpu = num_cpu;
2817 etm->pmu_type = pmu_type;
2818 etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
2819 etm->metadata = metadata;
2820 etm->auxtrace_type = auxtrace_info->type;
2821 etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
2822
2823 etm->auxtrace.process_event = cs_etm__process_event;
2824 etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
2825 etm->auxtrace.flush_events = cs_etm__flush_events;
2826 etm->auxtrace.free_events = cs_etm__free_events;
2827 etm->auxtrace.free = cs_etm__free;
2828 etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
2829 session->auxtrace = &etm->auxtrace;
2830
2831 etm->unknown_thread = thread__new(999999999, 999999999);
2832 if (!etm->unknown_thread) {
2833 err = -ENOMEM;
2834 goto err_free_queues;
2835 }
2836
2837
2838
2839
2840
2841 INIT_LIST_HEAD(&etm->unknown_thread->node);
2842
2843 err = thread__set_comm(etm->unknown_thread, "unknown", 0);
2844 if (err)
2845 goto err_delete_thread;
2846
2847 if (thread__init_maps(etm->unknown_thread, etm->machine)) {
2848 err = -ENOMEM;
2849 goto err_delete_thread;
2850 }
2851
2852 if (dump_trace) {
2853 cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
2854 return 0;
2855 }
2856
2857 if (session->itrace_synth_opts->set) {
2858 etm->synth_opts = *session->itrace_synth_opts;
2859 } else {
2860 itrace_synth_opts__set_default(&etm->synth_opts,
2861 session->itrace_synth_opts->default_no_sample);
2862 etm->synth_opts.callchain = false;
2863 }
2864
2865 err = cs_etm__synth_events(etm, session);
2866 if (err)
2867 goto err_delete_thread;
2868
2869 err = auxtrace_queues__process_index(&etm->queues, session);
2870 if (err)
2871 goto err_delete_thread;
2872
2873 etm->data_queued = etm->queues.populated;
2874
2875 return 0;
2876
2877err_delete_thread:
2878 thread__zput(etm->unknown_thread);
2879err_free_queues:
2880 auxtrace_queues__free(&etm->queues);
2881 session->auxtrace = NULL;
2882err_free_etm:
2883 zfree(&etm);
2884err_free_metadata:
2885
2886 for (j = 0; j < num_cpu; j++)
2887 zfree(&metadata[j]);
2888 zfree(&metadata);
2889err_free_traceid_list:
2890 intlist__delete(traceid_list);
2891err_free_hdr:
2892 zfree(&hdr);
2893
2894
2895
2896
2897
2898 if (dump_trace)
2899 cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
2900 return err;
2901}
2902