1
2
3
4
5
6
7
8#include <api/fs/fs.h>
9#include <errno.h>
10#include <inttypes.h>
11#include <poll.h>
12#include "cpumap.h"
13#include "util/mmap.h"
14#include "thread_map.h"
15#include "target.h"
16#include "evlist.h"
17#include "evsel.h"
18#include "debug.h"
19#include "units.h"
20#include <internal/lib.h>
21#include "affinity.h"
22#include "../perf.h"
23#include "asm/bug.h"
24#include "bpf-event.h"
25#include "util/string2.h"
26#include "util/perf_api_probe.h"
27#include <signal.h>
28#include <unistd.h>
29#include <sched.h>
30#include <stdlib.h>
31
32#include "parse-events.h"
33#include <subcmd/parse-options.h>
34
35#include <fcntl.h>
36#include <sys/ioctl.h>
37#include <sys/mman.h>
38
39#include <linux/bitops.h>
40#include <linux/hash.h>
41#include <linux/log2.h>
42#include <linux/err.h>
43#include <linux/string.h>
44#include <linux/zalloc.h>
45#include <perf/evlist.h>
46#include <perf/evsel.h>
47#include <perf/cpumap.h>
48#include <perf/mmap.h>
49
50#include <internal/xyarray.h>
51
52#ifdef LACKS_SIGQUEUE_PROTOTYPE
53int sigqueue(pid_t pid, int sig, const union sigval value);
54#endif
55
56#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
57#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
58
59void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
60 struct perf_thread_map *threads)
61{
62 perf_evlist__init(&evlist->core);
63 perf_evlist__set_maps(&evlist->core, cpus, threads);
64 evlist->workload.pid = -1;
65 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
66}
67
68struct evlist *evlist__new(void)
69{
70 struct evlist *evlist = zalloc(sizeof(*evlist));
71
72 if (evlist != NULL)
73 evlist__init(evlist, NULL, NULL);
74
75 return evlist;
76}
77
78struct evlist *perf_evlist__new_default(void)
79{
80 struct evlist *evlist = evlist__new();
81
82 if (evlist && perf_evlist__add_default(evlist)) {
83 evlist__delete(evlist);
84 evlist = NULL;
85 }
86
87 return evlist;
88}
89
90struct evlist *perf_evlist__new_dummy(void)
91{
92 struct evlist *evlist = evlist__new();
93
94 if (evlist && perf_evlist__add_dummy(evlist)) {
95 evlist__delete(evlist);
96 evlist = NULL;
97 }
98
99 return evlist;
100}
101
102
103
104
105
106
107
108
109void perf_evlist__set_id_pos(struct evlist *evlist)
110{
111 struct evsel *first = evlist__first(evlist);
112
113 evlist->id_pos = first->id_pos;
114 evlist->is_pos = first->is_pos;
115}
116
117static void perf_evlist__update_id_pos(struct evlist *evlist)
118{
119 struct evsel *evsel;
120
121 evlist__for_each_entry(evlist, evsel)
122 evsel__calc_id_pos(evsel);
123
124 perf_evlist__set_id_pos(evlist);
125}
126
127static void evlist__purge(struct evlist *evlist)
128{
129 struct evsel *pos, *n;
130
131 evlist__for_each_entry_safe(evlist, n, pos) {
132 list_del_init(&pos->core.node);
133 pos->evlist = NULL;
134 evsel__delete(pos);
135 }
136
137 evlist->core.nr_entries = 0;
138}
139
140void evlist__exit(struct evlist *evlist)
141{
142 zfree(&evlist->mmap);
143 zfree(&evlist->overwrite_mmap);
144 perf_evlist__exit(&evlist->core);
145}
146
147void evlist__delete(struct evlist *evlist)
148{
149 if (evlist == NULL)
150 return;
151
152 evlist__munmap(evlist);
153 evlist__close(evlist);
154 evlist__purge(evlist);
155 evlist__exit(evlist);
156 free(evlist);
157}
158
159void evlist__add(struct evlist *evlist, struct evsel *entry)
160{
161 entry->evlist = evlist;
162 entry->idx = evlist->core.nr_entries;
163 entry->tracking = !entry->idx;
164
165 perf_evlist__add(&evlist->core, &entry->core);
166
167 if (evlist->core.nr_entries == 1)
168 perf_evlist__set_id_pos(evlist);
169}
170
171void evlist__remove(struct evlist *evlist, struct evsel *evsel)
172{
173 evsel->evlist = NULL;
174 perf_evlist__remove(&evlist->core, &evsel->core);
175}
176
177void perf_evlist__splice_list_tail(struct evlist *evlist,
178 struct list_head *list)
179{
180 struct evsel *evsel, *temp;
181
182 __evlist__for_each_entry_safe(list, temp, evsel) {
183 list_del_init(&evsel->core.node);
184 evlist__add(evlist, evsel);
185 }
186}
187
188int __evlist__set_tracepoints_handlers(struct evlist *evlist,
189 const struct evsel_str_handler *assocs, size_t nr_assocs)
190{
191 struct evsel *evsel;
192 size_t i;
193 int err;
194
195 for (i = 0; i < nr_assocs; i++) {
196
197 evsel = perf_evlist__find_tracepoint_by_name(evlist, assocs[i].name);
198 if (evsel == NULL)
199 continue;
200
201 err = -EEXIST;
202 if (evsel->handler != NULL)
203 goto out;
204 evsel->handler = assocs[i].handler;
205 }
206
207 err = 0;
208out:
209 return err;
210}
211
212void __perf_evlist__set_leader(struct list_head *list)
213{
214 struct evsel *evsel, *leader;
215
216 leader = list_entry(list->next, struct evsel, core.node);
217 evsel = list_entry(list->prev, struct evsel, core.node);
218
219 leader->core.nr_members = evsel->idx - leader->idx + 1;
220
221 __evlist__for_each_entry(list, evsel) {
222 evsel->leader = leader;
223 }
224}
225
226void perf_evlist__set_leader(struct evlist *evlist)
227{
228 if (evlist->core.nr_entries) {
229 evlist->nr_groups = evlist->core.nr_entries > 1 ? 1 : 0;
230 __perf_evlist__set_leader(&evlist->core.entries);
231 }
232}
233
234int __perf_evlist__add_default(struct evlist *evlist, bool precise)
235{
236 struct evsel *evsel = evsel__new_cycles(precise);
237
238 if (evsel == NULL)
239 return -ENOMEM;
240
241 evlist__add(evlist, evsel);
242 return 0;
243}
244
245int perf_evlist__add_dummy(struct evlist *evlist)
246{
247 struct perf_event_attr attr = {
248 .type = PERF_TYPE_SOFTWARE,
249 .config = PERF_COUNT_SW_DUMMY,
250 .size = sizeof(attr),
251 };
252 struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries);
253
254 if (evsel == NULL)
255 return -ENOMEM;
256
257 evlist__add(evlist, evsel);
258 return 0;
259}
260
261static int evlist__add_attrs(struct evlist *evlist,
262 struct perf_event_attr *attrs, size_t nr_attrs)
263{
264 struct evsel *evsel, *n;
265 LIST_HEAD(head);
266 size_t i;
267
268 for (i = 0; i < nr_attrs; i++) {
269 evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
270 if (evsel == NULL)
271 goto out_delete_partial_list;
272 list_add_tail(&evsel->core.node, &head);
273 }
274
275 perf_evlist__splice_list_tail(evlist, &head);
276
277 return 0;
278
279out_delete_partial_list:
280 __evlist__for_each_entry_safe(&head, n, evsel)
281 evsel__delete(evsel);
282 return -1;
283}
284
285int __perf_evlist__add_default_attrs(struct evlist *evlist,
286 struct perf_event_attr *attrs, size_t nr_attrs)
287{
288 size_t i;
289
290 for (i = 0; i < nr_attrs; i++)
291 event_attr_init(attrs + i);
292
293 return evlist__add_attrs(evlist, attrs, nr_attrs);
294}
295
296struct evsel *
297perf_evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
298{
299 struct evsel *evsel;
300
301 evlist__for_each_entry(evlist, evsel) {
302 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
303 (int)evsel->core.attr.config == id)
304 return evsel;
305 }
306
307 return NULL;
308}
309
310struct evsel *
311perf_evlist__find_tracepoint_by_name(struct evlist *evlist,
312 const char *name)
313{
314 struct evsel *evsel;
315
316 evlist__for_each_entry(evlist, evsel) {
317 if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
318 (strcmp(evsel->name, name) == 0))
319 return evsel;
320 }
321
322 return NULL;
323}
324
325int perf_evlist__add_newtp(struct evlist *evlist,
326 const char *sys, const char *name, void *handler)
327{
328 struct evsel *evsel = evsel__newtp(sys, name);
329
330 if (IS_ERR(evsel))
331 return -1;
332
333 evsel->handler = handler;
334 evlist__add(evlist, evsel);
335 return 0;
336}
337
338static int perf_evlist__nr_threads(struct evlist *evlist,
339 struct evsel *evsel)
340{
341 if (evsel->core.system_wide)
342 return 1;
343 else
344 return perf_thread_map__nr(evlist->core.threads);
345}
346
347void evlist__cpu_iter_start(struct evlist *evlist)
348{
349 struct evsel *pos;
350
351
352
353
354
355
356
357 evlist__for_each_entry(evlist, pos)
358 pos->cpu_iter = 0;
359}
360
361bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu)
362{
363 if (ev->cpu_iter >= ev->core.cpus->nr)
364 return true;
365 if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu)
366 return true;
367 return false;
368}
369
370bool evsel__cpu_iter_skip(struct evsel *ev, int cpu)
371{
372 if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) {
373 ev->cpu_iter++;
374 return false;
375 }
376 return true;
377}
378
379void evlist__disable(struct evlist *evlist)
380{
381 struct evsel *pos;
382 struct affinity affinity;
383 int cpu, i, imm = 0;
384 bool has_imm = false;
385
386 if (affinity__setup(&affinity) < 0)
387 return;
388
389
390 for (imm = 0; imm <= 1; imm++) {
391 evlist__for_each_cpu(evlist, i, cpu) {
392 affinity__set(&affinity, cpu);
393
394 evlist__for_each_entry(evlist, pos) {
395 if (evsel__cpu_iter_skip(pos, cpu))
396 continue;
397 if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
398 continue;
399 if (pos->immediate)
400 has_imm = true;
401 if (pos->immediate != imm)
402 continue;
403 evsel__disable_cpu(pos, pos->cpu_iter - 1);
404 }
405 }
406 if (!has_imm)
407 break;
408 }
409
410 affinity__cleanup(&affinity);
411 evlist__for_each_entry(evlist, pos) {
412 if (!evsel__is_group_leader(pos) || !pos->core.fd)
413 continue;
414 pos->disabled = true;
415 }
416
417 evlist->enabled = false;
418}
419
420void evlist__enable(struct evlist *evlist)
421{
422 struct evsel *pos;
423 struct affinity affinity;
424 int cpu, i;
425
426 if (affinity__setup(&affinity) < 0)
427 return;
428
429 evlist__for_each_cpu(evlist, i, cpu) {
430 affinity__set(&affinity, cpu);
431
432 evlist__for_each_entry(evlist, pos) {
433 if (evsel__cpu_iter_skip(pos, cpu))
434 continue;
435 if (!evsel__is_group_leader(pos) || !pos->core.fd)
436 continue;
437 evsel__enable_cpu(pos, pos->cpu_iter - 1);
438 }
439 }
440 affinity__cleanup(&affinity);
441 evlist__for_each_entry(evlist, pos) {
442 if (!evsel__is_group_leader(pos) || !pos->core.fd)
443 continue;
444 pos->disabled = false;
445 }
446
447 evlist->enabled = true;
448}
449
450void perf_evlist__toggle_enable(struct evlist *evlist)
451{
452 (evlist->enabled ? evlist__disable : evlist__enable)(evlist);
453}
454
455static int perf_evlist__enable_event_cpu(struct evlist *evlist,
456 struct evsel *evsel, int cpu)
457{
458 int thread;
459 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
460
461 if (!evsel->core.fd)
462 return -EINVAL;
463
464 for (thread = 0; thread < nr_threads; thread++) {
465 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
466 if (err)
467 return err;
468 }
469 return 0;
470}
471
472static int perf_evlist__enable_event_thread(struct evlist *evlist,
473 struct evsel *evsel,
474 int thread)
475{
476 int cpu;
477 int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
478
479 if (!evsel->core.fd)
480 return -EINVAL;
481
482 for (cpu = 0; cpu < nr_cpus; cpu++) {
483 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
484 if (err)
485 return err;
486 }
487 return 0;
488}
489
490int perf_evlist__enable_event_idx(struct evlist *evlist,
491 struct evsel *evsel, int idx)
492{
493 bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus);
494
495 if (per_cpu_mmaps)
496 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
497 else
498 return perf_evlist__enable_event_thread(evlist, evsel, idx);
499}
500
501int evlist__add_pollfd(struct evlist *evlist, int fd)
502{
503 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN);
504}
505
506int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
507{
508 return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
509}
510
511int evlist__poll(struct evlist *evlist, int timeout)
512{
513 return perf_evlist__poll(&evlist->core, timeout);
514}
515
516struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id)
517{
518 struct hlist_head *head;
519 struct perf_sample_id *sid;
520 int hash;
521
522 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
523 head = &evlist->core.heads[hash];
524
525 hlist_for_each_entry(sid, head, node)
526 if (sid->id == id)
527 return sid;
528
529 return NULL;
530}
531
532struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id)
533{
534 struct perf_sample_id *sid;
535
536 if (evlist->core.nr_entries == 1 || !id)
537 return evlist__first(evlist);
538
539 sid = perf_evlist__id2sid(evlist, id);
540 if (sid)
541 return container_of(sid->evsel, struct evsel, core);
542
543 if (!perf_evlist__sample_id_all(evlist))
544 return evlist__first(evlist);
545
546 return NULL;
547}
548
549struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist,
550 u64 id)
551{
552 struct perf_sample_id *sid;
553
554 if (!id)
555 return NULL;
556
557 sid = perf_evlist__id2sid(evlist, id);
558 if (sid)
559 return container_of(sid->evsel, struct evsel, core);
560
561 return NULL;
562}
563
564static int perf_evlist__event2id(struct evlist *evlist,
565 union perf_event *event, u64 *id)
566{
567 const __u64 *array = event->sample.array;
568 ssize_t n;
569
570 n = (event->header.size - sizeof(event->header)) >> 3;
571
572 if (event->header.type == PERF_RECORD_SAMPLE) {
573 if (evlist->id_pos >= n)
574 return -1;
575 *id = array[evlist->id_pos];
576 } else {
577 if (evlist->is_pos > n)
578 return -1;
579 n -= evlist->is_pos;
580 *id = array[n];
581 }
582 return 0;
583}
584
585struct evsel *perf_evlist__event2evsel(struct evlist *evlist,
586 union perf_event *event)
587{
588 struct evsel *first = evlist__first(evlist);
589 struct hlist_head *head;
590 struct perf_sample_id *sid;
591 int hash;
592 u64 id;
593
594 if (evlist->core.nr_entries == 1)
595 return first;
596
597 if (!first->core.attr.sample_id_all &&
598 event->header.type != PERF_RECORD_SAMPLE)
599 return first;
600
601 if (perf_evlist__event2id(evlist, event, &id))
602 return NULL;
603
604
605 if (!id)
606 return first;
607
608 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
609 head = &evlist->core.heads[hash];
610
611 hlist_for_each_entry(sid, head, node) {
612 if (sid->id == id)
613 return container_of(sid->evsel, struct evsel, core);
614 }
615 return NULL;
616}
617
618static int perf_evlist__set_paused(struct evlist *evlist, bool value)
619{
620 int i;
621
622 if (!evlist->overwrite_mmap)
623 return 0;
624
625 for (i = 0; i < evlist->core.nr_mmaps; i++) {
626 int fd = evlist->overwrite_mmap[i].core.fd;
627 int err;
628
629 if (fd < 0)
630 continue;
631 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
632 if (err)
633 return err;
634 }
635 return 0;
636}
637
638static int perf_evlist__pause(struct evlist *evlist)
639{
640 return perf_evlist__set_paused(evlist, true);
641}
642
643static int perf_evlist__resume(struct evlist *evlist)
644{
645 return perf_evlist__set_paused(evlist, false);
646}
647
648static void evlist__munmap_nofree(struct evlist *evlist)
649{
650 int i;
651
652 if (evlist->mmap)
653 for (i = 0; i < evlist->core.nr_mmaps; i++)
654 perf_mmap__munmap(&evlist->mmap[i].core);
655
656 if (evlist->overwrite_mmap)
657 for (i = 0; i < evlist->core.nr_mmaps; i++)
658 perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
659}
660
661void evlist__munmap(struct evlist *evlist)
662{
663 evlist__munmap_nofree(evlist);
664 zfree(&evlist->mmap);
665 zfree(&evlist->overwrite_mmap);
666}
667
668static void perf_mmap__unmap_cb(struct perf_mmap *map)
669{
670 struct mmap *m = container_of(map, struct mmap, core);
671
672 mmap__munmap(m);
673}
674
675static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
676 bool overwrite)
677{
678 int i;
679 struct mmap *map;
680
681 map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
682 if (!map)
683 return NULL;
684
685 for (i = 0; i < evlist->core.nr_mmaps; i++) {
686 struct perf_mmap *prev = i ? &map[i - 1].core : NULL;
687
688
689
690
691
692
693
694
695
696
697 perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb);
698 }
699
700 return map;
701}
702
703static void
704perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
705 struct perf_mmap_param *_mp,
706 int idx, bool per_cpu)
707{
708 struct evlist *evlist = container_of(_evlist, struct evlist, core);
709 struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
710
711 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu);
712}
713
714static struct perf_mmap*
715perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
716{
717 struct evlist *evlist = container_of(_evlist, struct evlist, core);
718 struct mmap *maps;
719
720 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
721
722 if (!maps) {
723 maps = evlist__alloc_mmap(evlist, overwrite);
724 if (!maps)
725 return NULL;
726
727 if (overwrite) {
728 evlist->overwrite_mmap = maps;
729 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
730 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
731 } else {
732 evlist->mmap = maps;
733 }
734 }
735
736 return &maps[idx].core;
737}
738
739static int
740perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp,
741 int output, int cpu)
742{
743 struct mmap *map = container_of(_map, struct mmap, core);
744 struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
745
746 return mmap__mmap(map, mp, output, cpu);
747}
748
749unsigned long perf_event_mlock_kb_in_pages(void)
750{
751 unsigned long pages;
752 int max;
753
754 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
755
756
757
758
759
760 max = 512;
761 } else {
762 max -= (page_size / 1024);
763 }
764
765 pages = (max * 1024) / page_size;
766 if (!is_power_of_2(pages))
767 pages = rounddown_pow_of_two(pages);
768
769 return pages;
770}
771
772size_t evlist__mmap_size(unsigned long pages)
773{
774 if (pages == UINT_MAX)
775 pages = perf_event_mlock_kb_in_pages();
776 else if (!is_power_of_2(pages))
777 return 0;
778
779 return (pages + 1) * page_size;
780}
781
782static long parse_pages_arg(const char *str, unsigned long min,
783 unsigned long max)
784{
785 unsigned long pages, val;
786 static struct parse_tag tags[] = {
787 { .tag = 'B', .mult = 1 },
788 { .tag = 'K', .mult = 1 << 10 },
789 { .tag = 'M', .mult = 1 << 20 },
790 { .tag = 'G', .mult = 1 << 30 },
791 { .tag = 0 },
792 };
793
794 if (str == NULL)
795 return -EINVAL;
796
797 val = parse_tag_value(str, tags);
798 if (val != (unsigned long) -1) {
799
800 pages = PERF_ALIGN(val, page_size) / page_size;
801 } else {
802
803 char *eptr;
804 pages = strtoul(str, &eptr, 10);
805 if (*eptr != '\0')
806 return -EINVAL;
807 }
808
809 if (pages == 0 && min == 0) {
810
811 } else if (!is_power_of_2(pages)) {
812 char buf[100];
813
814
815 pages = roundup_pow_of_two(pages);
816 if (!pages)
817 return -EINVAL;
818
819 unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
820 pr_info("rounding mmap pages size to %s (%lu pages)\n",
821 buf, pages);
822 }
823
824 if (pages > max)
825 return -EINVAL;
826
827 return pages;
828}
829
830int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
831{
832 unsigned long max = UINT_MAX;
833 long pages;
834
835 if (max > SIZE_MAX / page_size)
836 max = SIZE_MAX / page_size;
837
838 pages = parse_pages_arg(str, 1, max);
839 if (pages < 0) {
840 pr_err("Invalid argument for --mmap_pages/-m\n");
841 return -1;
842 }
843
844 *mmap_pages = pages;
845 return 0;
846}
847
848int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
849 int unset __maybe_unused)
850{
851 return __perf_evlist__parse_mmap_pages(opt->value, str);
852}
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
872 unsigned int auxtrace_pages,
873 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
874 int comp_level)
875{
876
877
878
879
880
881 struct mmap_params mp = {
882 .nr_cblocks = nr_cblocks,
883 .affinity = affinity,
884 .flush = flush,
885 .comp_level = comp_level
886 };
887 struct perf_evlist_mmap_ops ops = {
888 .idx = perf_evlist__mmap_cb_idx,
889 .get = perf_evlist__mmap_cb_get,
890 .mmap = perf_evlist__mmap_cb_mmap,
891 };
892
893 evlist->core.mmap_len = evlist__mmap_size(pages);
894 pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
895
896 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
897 auxtrace_pages, auxtrace_overwrite);
898
899 return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core);
900}
901
902int evlist__mmap(struct evlist *evlist, unsigned int pages)
903{
904 return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
905}
906
907int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
908{
909 bool all_threads = (target->per_thread && target->system_wide);
910 struct perf_cpu_map *cpus;
911 struct perf_thread_map *threads;
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931 threads = thread_map__new_str(target->pid, target->tid, target->uid,
932 all_threads);
933
934 if (!threads)
935 return -1;
936
937 if (target__uses_dummy_map(target))
938 cpus = perf_cpu_map__dummy_new();
939 else
940 cpus = perf_cpu_map__new(target->cpu_list);
941
942 if (!cpus)
943 goto out_delete_threads;
944
945 evlist->core.has_user_cpus = !!target->cpu_list;
946
947 perf_evlist__set_maps(&evlist->core, cpus, threads);
948
949 return 0;
950
951out_delete_threads:
952 perf_thread_map__put(threads);
953 return -1;
954}
955
956void __perf_evlist__set_sample_bit(struct evlist *evlist,
957 enum perf_event_sample_format bit)
958{
959 struct evsel *evsel;
960
961 evlist__for_each_entry(evlist, evsel)
962 __evsel__set_sample_bit(evsel, bit);
963}
964
965void __perf_evlist__reset_sample_bit(struct evlist *evlist,
966 enum perf_event_sample_format bit)
967{
968 struct evsel *evsel;
969
970 evlist__for_each_entry(evlist, evsel)
971 __evsel__reset_sample_bit(evsel, bit);
972}
973
974int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
975{
976 struct evsel *evsel;
977 int err = 0;
978
979 evlist__for_each_entry(evlist, evsel) {
980 if (evsel->filter == NULL)
981 continue;
982
983
984
985
986
987 err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
988 if (err) {
989 *err_evsel = evsel;
990 break;
991 }
992 }
993
994 return err;
995}
996
997int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter)
998{
999 struct evsel *evsel;
1000 int err = 0;
1001
1002 if (filter == NULL)
1003 return -1;
1004
1005 evlist__for_each_entry(evlist, evsel) {
1006 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1007 continue;
1008
1009 err = evsel__set_filter(evsel, filter);
1010 if (err)
1011 break;
1012 }
1013
1014 return err;
1015}
1016
1017int perf_evlist__append_tp_filter(struct evlist *evlist, const char *filter)
1018{
1019 struct evsel *evsel;
1020 int err = 0;
1021
1022 if (filter == NULL)
1023 return -1;
1024
1025 evlist__for_each_entry(evlist, evsel) {
1026 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1027 continue;
1028
1029 err = evsel__append_tp_filter(evsel, filter);
1030 if (err)
1031 break;
1032 }
1033
1034 return err;
1035}
1036
1037char *asprintf__tp_filter_pids(size_t npids, pid_t *pids)
1038{
1039 char *filter;
1040 size_t i;
1041
1042 for (i = 0; i < npids; ++i) {
1043 if (i == 0) {
1044 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1045 return NULL;
1046 } else {
1047 char *tmp;
1048
1049 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1050 goto out_free;
1051
1052 free(filter);
1053 filter = tmp;
1054 }
1055 }
1056
1057 return filter;
1058out_free:
1059 free(filter);
1060 return NULL;
1061}
1062
1063int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1064{
1065 char *filter = asprintf__tp_filter_pids(npids, pids);
1066 int ret = perf_evlist__set_tp_filter(evlist, filter);
1067
1068 free(filter);
1069 return ret;
1070}
1071
1072int perf_evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
1073{
1074 return perf_evlist__set_tp_filter_pids(evlist, 1, &pid);
1075}
1076
1077int perf_evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1078{
1079 char *filter = asprintf__tp_filter_pids(npids, pids);
1080 int ret = perf_evlist__append_tp_filter(evlist, filter);
1081
1082 free(filter);
1083 return ret;
1084}
1085
1086int perf_evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
1087{
1088 return perf_evlist__append_tp_filter_pids(evlist, 1, &pid);
1089}
1090
1091bool perf_evlist__valid_sample_type(struct evlist *evlist)
1092{
1093 struct evsel *pos;
1094
1095 if (evlist->core.nr_entries == 1)
1096 return true;
1097
1098 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1099 return false;
1100
1101 evlist__for_each_entry(evlist, pos) {
1102 if (pos->id_pos != evlist->id_pos ||
1103 pos->is_pos != evlist->is_pos)
1104 return false;
1105 }
1106
1107 return true;
1108}
1109
1110u64 __perf_evlist__combined_sample_type(struct evlist *evlist)
1111{
1112 struct evsel *evsel;
1113
1114 if (evlist->combined_sample_type)
1115 return evlist->combined_sample_type;
1116
1117 evlist__for_each_entry(evlist, evsel)
1118 evlist->combined_sample_type |= evsel->core.attr.sample_type;
1119
1120 return evlist->combined_sample_type;
1121}
1122
1123u64 perf_evlist__combined_sample_type(struct evlist *evlist)
1124{
1125 evlist->combined_sample_type = 0;
1126 return __perf_evlist__combined_sample_type(evlist);
1127}
1128
1129u64 perf_evlist__combined_branch_type(struct evlist *evlist)
1130{
1131 struct evsel *evsel;
1132 u64 branch_type = 0;
1133
1134 evlist__for_each_entry(evlist, evsel)
1135 branch_type |= evsel->core.attr.branch_sample_type;
1136 return branch_type;
1137}
1138
1139bool perf_evlist__valid_read_format(struct evlist *evlist)
1140{
1141 struct evsel *first = evlist__first(evlist), *pos = first;
1142 u64 read_format = first->core.attr.read_format;
1143 u64 sample_type = first->core.attr.sample_type;
1144
1145 evlist__for_each_entry(evlist, pos) {
1146 if (read_format != pos->core.attr.read_format) {
1147 pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n",
1148 read_format, (u64)pos->core.attr.read_format);
1149 }
1150 }
1151
1152
1153 if ((sample_type & PERF_SAMPLE_READ) &&
1154 !(read_format & PERF_FORMAT_ID)) {
1155 return false;
1156 }
1157
1158 return true;
1159}
1160
1161u16 perf_evlist__id_hdr_size(struct evlist *evlist)
1162{
1163 struct evsel *first = evlist__first(evlist);
1164 struct perf_sample *data;
1165 u64 sample_type;
1166 u16 size = 0;
1167
1168 if (!first->core.attr.sample_id_all)
1169 goto out;
1170
1171 sample_type = first->core.attr.sample_type;
1172
1173 if (sample_type & PERF_SAMPLE_TID)
1174 size += sizeof(data->tid) * 2;
1175
1176 if (sample_type & PERF_SAMPLE_TIME)
1177 size += sizeof(data->time);
1178
1179 if (sample_type & PERF_SAMPLE_ID)
1180 size += sizeof(data->id);
1181
1182 if (sample_type & PERF_SAMPLE_STREAM_ID)
1183 size += sizeof(data->stream_id);
1184
1185 if (sample_type & PERF_SAMPLE_CPU)
1186 size += sizeof(data->cpu) * 2;
1187
1188 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1189 size += sizeof(data->id);
1190out:
1191 return size;
1192}
1193
1194bool perf_evlist__valid_sample_id_all(struct evlist *evlist)
1195{
1196 struct evsel *first = evlist__first(evlist), *pos = first;
1197
1198 evlist__for_each_entry_continue(evlist, pos) {
1199 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
1200 return false;
1201 }
1202
1203 return true;
1204}
1205
1206bool perf_evlist__sample_id_all(struct evlist *evlist)
1207{
1208 struct evsel *first = evlist__first(evlist);
1209 return first->core.attr.sample_id_all;
1210}
1211
1212void perf_evlist__set_selected(struct evlist *evlist,
1213 struct evsel *evsel)
1214{
1215 evlist->selected = evsel;
1216}
1217
1218void evlist__close(struct evlist *evlist)
1219{
1220 struct evsel *evsel;
1221 struct affinity affinity;
1222 int cpu, i;
1223
1224
1225
1226
1227
1228 if (!evlist->core.cpus) {
1229 evlist__for_each_entry_reverse(evlist, evsel)
1230 evsel__close(evsel);
1231 return;
1232 }
1233
1234 if (affinity__setup(&affinity) < 0)
1235 return;
1236 evlist__for_each_cpu(evlist, i, cpu) {
1237 affinity__set(&affinity, cpu);
1238
1239 evlist__for_each_entry_reverse(evlist, evsel) {
1240 if (evsel__cpu_iter_skip(evsel, cpu))
1241 continue;
1242 perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1);
1243 }
1244 }
1245 affinity__cleanup(&affinity);
1246 evlist__for_each_entry_reverse(evlist, evsel) {
1247 perf_evsel__free_fd(&evsel->core);
1248 perf_evsel__free_id(&evsel->core);
1249 }
1250}
1251
1252static int perf_evlist__create_syswide_maps(struct evlist *evlist)
1253{
1254 struct perf_cpu_map *cpus;
1255 struct perf_thread_map *threads;
1256 int err = -ENOMEM;
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267 cpus = perf_cpu_map__new(NULL);
1268 if (!cpus)
1269 goto out;
1270
1271 threads = perf_thread_map__new_dummy();
1272 if (!threads)
1273 goto out_put;
1274
1275 perf_evlist__set_maps(&evlist->core, cpus, threads);
1276out:
1277 return err;
1278out_put:
1279 perf_cpu_map__put(cpus);
1280 goto out;
1281}
1282
1283int evlist__open(struct evlist *evlist)
1284{
1285 struct evsel *evsel;
1286 int err;
1287
1288
1289
1290
1291
1292 if (evlist->core.threads == NULL && evlist->core.cpus == NULL) {
1293 err = perf_evlist__create_syswide_maps(evlist);
1294 if (err < 0)
1295 goto out_err;
1296 }
1297
1298 perf_evlist__update_id_pos(evlist);
1299
1300 evlist__for_each_entry(evlist, evsel) {
1301 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
1302 if (err < 0)
1303 goto out_err;
1304 }
1305
1306 return 0;
1307out_err:
1308 evlist__close(evlist);
1309 errno = -err;
1310 return err;
1311}
1312
1313int perf_evlist__prepare_workload(struct evlist *evlist, struct target *target,
1314 const char *argv[], bool pipe_output,
1315 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
1316{
1317 int child_ready_pipe[2], go_pipe[2];
1318 char bf;
1319
1320 if (pipe(child_ready_pipe) < 0) {
1321 perror("failed to create 'ready' pipe");
1322 return -1;
1323 }
1324
1325 if (pipe(go_pipe) < 0) {
1326 perror("failed to create 'go' pipe");
1327 goto out_close_ready_pipe;
1328 }
1329
1330 evlist->workload.pid = fork();
1331 if (evlist->workload.pid < 0) {
1332 perror("failed to fork");
1333 goto out_close_pipes;
1334 }
1335
1336 if (!evlist->workload.pid) {
1337 int ret;
1338
1339 if (pipe_output)
1340 dup2(2, 1);
1341
1342 signal(SIGTERM, SIG_DFL);
1343
1344 close(child_ready_pipe[0]);
1345 close(go_pipe[1]);
1346 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1347
1348
1349
1350
1351 close(child_ready_pipe[1]);
1352
1353
1354
1355
1356 ret = read(go_pipe[0], &bf, 1);
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367 if (ret != 1) {
1368 if (ret == -1)
1369 perror("unable to read pipe");
1370 exit(ret);
1371 }
1372
1373 execvp(argv[0], (char **)argv);
1374
1375 if (exec_error) {
1376 union sigval val;
1377
1378 val.sival_int = errno;
1379 if (sigqueue(getppid(), SIGUSR1, val))
1380 perror(argv[0]);
1381 } else
1382 perror(argv[0]);
1383 exit(-1);
1384 }
1385
1386 if (exec_error) {
1387 struct sigaction act = {
1388 .sa_flags = SA_SIGINFO,
1389 .sa_sigaction = exec_error,
1390 };
1391 sigaction(SIGUSR1, &act, NULL);
1392 }
1393
1394 if (target__none(target)) {
1395 if (evlist->core.threads == NULL) {
1396 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1397 __func__, __LINE__);
1398 goto out_close_pipes;
1399 }
1400 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
1401 }
1402
1403 close(child_ready_pipe[1]);
1404 close(go_pipe[0]);
1405
1406
1407
1408 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1409 perror("unable to read pipe");
1410 goto out_close_pipes;
1411 }
1412
1413 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
1414 evlist->workload.cork_fd = go_pipe[1];
1415 close(child_ready_pipe[0]);
1416 return 0;
1417
1418out_close_pipes:
1419 close(go_pipe[0]);
1420 close(go_pipe[1]);
1421out_close_ready_pipe:
1422 close(child_ready_pipe[0]);
1423 close(child_ready_pipe[1]);
1424 return -1;
1425}
1426
1427int perf_evlist__start_workload(struct evlist *evlist)
1428{
1429 if (evlist->workload.cork_fd > 0) {
1430 char bf = 0;
1431 int ret;
1432
1433
1434
1435 ret = write(evlist->workload.cork_fd, &bf, 1);
1436 if (ret < 0)
1437 perror("unable to write to pipe");
1438
1439 close(evlist->workload.cork_fd);
1440 return ret;
1441 }
1442
1443 return 0;
1444}
1445
1446int perf_evlist__parse_sample(struct evlist *evlist, union perf_event *event,
1447 struct perf_sample *sample)
1448{
1449 struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
1450
1451 if (!evsel)
1452 return -EFAULT;
1453 return evsel__parse_sample(evsel, event, sample);
1454}
1455
1456int perf_evlist__parse_sample_timestamp(struct evlist *evlist,
1457 union perf_event *event,
1458 u64 *timestamp)
1459{
1460 struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
1461
1462 if (!evsel)
1463 return -EFAULT;
1464 return evsel__parse_sample_timestamp(evsel, event, timestamp);
1465}
1466
1467int perf_evlist__strerror_open(struct evlist *evlist,
1468 int err, char *buf, size_t size)
1469{
1470 int printed, value;
1471 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1472
1473 switch (err) {
1474 case EACCES:
1475 case EPERM:
1476 printed = scnprintf(buf, size,
1477 "Error:\t%s.\n"
1478 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1479
1480 value = perf_event_paranoid();
1481
1482 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1483
1484 if (value >= 2) {
1485 printed += scnprintf(buf + printed, size - printed,
1486 "For your workloads it needs to be <= 1\nHint:\t");
1487 }
1488 printed += scnprintf(buf + printed, size - printed,
1489 "For system wide tracing it needs to be set to -1.\n");
1490
1491 printed += scnprintf(buf + printed, size - printed,
1492 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1493 "Hint:\tThe current value is %d.", value);
1494 break;
1495 case EINVAL: {
1496 struct evsel *first = evlist__first(evlist);
1497 int max_freq;
1498
1499 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1500 goto out_default;
1501
1502 if (first->core.attr.sample_freq < (u64)max_freq)
1503 goto out_default;
1504
1505 printed = scnprintf(buf, size,
1506 "Error:\t%s.\n"
1507 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1508 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1509 emsg, max_freq, first->core.attr.sample_freq);
1510 break;
1511 }
1512 default:
1513out_default:
1514 scnprintf(buf, size, "%s", emsg);
1515 break;
1516 }
1517
1518 return 0;
1519}
1520
1521int perf_evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
1522{
1523 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
1524 int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
1525
1526 switch (err) {
1527 case EPERM:
1528 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
1529 printed += scnprintf(buf + printed, size - printed,
1530 "Error:\t%s.\n"
1531 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
1532 "Hint:\tTried using %zd kB.\n",
1533 emsg, pages_max_per_user, pages_attempted);
1534
1535 if (pages_attempted >= pages_max_per_user) {
1536 printed += scnprintf(buf + printed, size - printed,
1537 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1538 pages_max_per_user + pages_attempted);
1539 }
1540
1541 printed += scnprintf(buf + printed, size - printed,
1542 "Hint:\tTry using a smaller -m/--mmap-pages value.");
1543 break;
1544 default:
1545 scnprintf(buf, size, "%s", emsg);
1546 break;
1547 }
1548
1549 return 0;
1550}
1551
1552void perf_evlist__to_front(struct evlist *evlist,
1553 struct evsel *move_evsel)
1554{
1555 struct evsel *evsel, *n;
1556 LIST_HEAD(move);
1557
1558 if (move_evsel == evlist__first(evlist))
1559 return;
1560
1561 evlist__for_each_entry_safe(evlist, n, evsel) {
1562 if (evsel->leader == move_evsel->leader)
1563 list_move_tail(&evsel->core.node, &move);
1564 }
1565
1566 list_splice(&move, &evlist->core.entries);
1567}
1568
1569struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist)
1570{
1571 struct evsel *evsel;
1572
1573 evlist__for_each_entry(evlist, evsel) {
1574 if (evsel->tracking)
1575 return evsel;
1576 }
1577
1578 return evlist__first(evlist);
1579}
1580
1581void perf_evlist__set_tracking_event(struct evlist *evlist,
1582 struct evsel *tracking_evsel)
1583{
1584 struct evsel *evsel;
1585
1586 if (tracking_evsel->tracking)
1587 return;
1588
1589 evlist__for_each_entry(evlist, evsel) {
1590 if (evsel != tracking_evsel)
1591 evsel->tracking = false;
1592 }
1593
1594 tracking_evsel->tracking = true;
1595}
1596
1597struct evsel *
1598perf_evlist__find_evsel_by_str(struct evlist *evlist,
1599 const char *str)
1600{
1601 struct evsel *evsel;
1602
1603 evlist__for_each_entry(evlist, evsel) {
1604 if (!evsel->name)
1605 continue;
1606 if (strcmp(str, evsel->name) == 0)
1607 return evsel;
1608 }
1609
1610 return NULL;
1611}
1612
1613void perf_evlist__toggle_bkw_mmap(struct evlist *evlist,
1614 enum bkw_mmap_state state)
1615{
1616 enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1617 enum action {
1618 NONE,
1619 PAUSE,
1620 RESUME,
1621 } action = NONE;
1622
1623 if (!evlist->overwrite_mmap)
1624 return;
1625
1626 switch (old_state) {
1627 case BKW_MMAP_NOTREADY: {
1628 if (state != BKW_MMAP_RUNNING)
1629 goto state_err;
1630 break;
1631 }
1632 case BKW_MMAP_RUNNING: {
1633 if (state != BKW_MMAP_DATA_PENDING)
1634 goto state_err;
1635 action = PAUSE;
1636 break;
1637 }
1638 case BKW_MMAP_DATA_PENDING: {
1639 if (state != BKW_MMAP_EMPTY)
1640 goto state_err;
1641 break;
1642 }
1643 case BKW_MMAP_EMPTY: {
1644 if (state != BKW_MMAP_RUNNING)
1645 goto state_err;
1646 action = RESUME;
1647 break;
1648 }
1649 default:
1650 WARN_ONCE(1, "Shouldn't get there\n");
1651 }
1652
1653 evlist->bkw_mmap_state = state;
1654
1655 switch (action) {
1656 case PAUSE:
1657 perf_evlist__pause(evlist);
1658 break;
1659 case RESUME:
1660 perf_evlist__resume(evlist);
1661 break;
1662 case NONE:
1663 default:
1664 break;
1665 }
1666
1667state_err:
1668 return;
1669}
1670
1671bool perf_evlist__exclude_kernel(struct evlist *evlist)
1672{
1673 struct evsel *evsel;
1674
1675 evlist__for_each_entry(evlist, evsel) {
1676 if (!evsel->core.attr.exclude_kernel)
1677 return false;
1678 }
1679
1680 return true;
1681}
1682
1683
1684
1685
1686
1687
1688void perf_evlist__force_leader(struct evlist *evlist)
1689{
1690 if (!evlist->nr_groups) {
1691 struct evsel *leader = evlist__first(evlist);
1692
1693 perf_evlist__set_leader(evlist);
1694 leader->forced_leader = true;
1695 }
1696}
1697
1698struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
1699 struct evsel *evsel,
1700 bool close)
1701{
1702 struct evsel *c2, *leader;
1703 bool is_open = true;
1704
1705 leader = evsel->leader;
1706 pr_debug("Weak group for %s/%d failed\n",
1707 leader->name, leader->core.nr_members);
1708
1709
1710
1711
1712
1713 evlist__for_each_entry(evsel_list, c2) {
1714 if (c2 == evsel)
1715 is_open = false;
1716 if (c2->leader == leader) {
1717 if (is_open && close)
1718 perf_evsel__close(&c2->core);
1719 c2->leader = c2;
1720 c2->core.nr_members = 0;
1721
1722
1723
1724
1725 c2->reset_group = true;
1726 }
1727 }
1728 return leader;
1729}
1730