linux/tools/perf/util/thread.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#include <errno.h>
   3#include <stdlib.h>
   4#include <stdio.h>
   5#include <string.h>
   6#include <linux/kernel.h>
   7#include <linux/zalloc.h>
   8#include "dso.h"
   9#include "session.h"
  10#include "thread.h"
  11#include "thread-stack.h"
  12#include "debug.h"
  13#include "namespaces.h"
  14#include "comm.h"
  15#include "map.h"
  16#include "symbol.h"
  17#include "unwind.h"
  18#include "callchain.h"
  19
  20#include <api/fs/fs.h>
  21
  22int thread__init_maps(struct thread *thread, struct machine *machine)
  23{
  24        pid_t pid = thread->pid_;
  25
  26        if (pid == thread->tid || pid == -1) {
  27                thread->maps = maps__new(machine);
  28        } else {
  29                struct thread *leader = __machine__findnew_thread(machine, pid, pid);
  30                if (leader) {
  31                        thread->maps = maps__get(leader->maps);
  32                        thread__put(leader);
  33                }
  34        }
  35
  36        return thread->maps ? 0 : -1;
  37}
  38
  39struct thread *thread__new(pid_t pid, pid_t tid)
  40{
  41        char *comm_str;
  42        struct comm *comm;
  43        struct thread *thread = zalloc(sizeof(*thread));
  44
  45        if (thread != NULL) {
  46                thread->pid_ = pid;
  47                thread->tid = tid;
  48                thread->ppid = -1;
  49                thread->cpu = -1;
  50                thread->lbr_stitch_enable = false;
  51                INIT_LIST_HEAD(&thread->namespaces_list);
  52                INIT_LIST_HEAD(&thread->comm_list);
  53                init_rwsem(&thread->namespaces_lock);
  54                init_rwsem(&thread->comm_lock);
  55
  56                comm_str = malloc(32);
  57                if (!comm_str)
  58                        goto err_thread;
  59
  60                snprintf(comm_str, 32, ":%d", tid);
  61                comm = comm__new(comm_str, 0, false);
  62                free(comm_str);
  63                if (!comm)
  64                        goto err_thread;
  65
  66                list_add(&comm->list, &thread->comm_list);
  67                refcount_set(&thread->refcnt, 1);
  68                RB_CLEAR_NODE(&thread->rb_node);
  69                /* Thread holds first ref to nsdata. */
  70                thread->nsinfo = nsinfo__new(pid);
  71                srccode_state_init(&thread->srccode_state);
  72        }
  73
  74        return thread;
  75
  76err_thread:
  77        free(thread);
  78        return NULL;
  79}
  80
  81void thread__delete(struct thread *thread)
  82{
  83        struct namespaces *namespaces, *tmp_namespaces;
  84        struct comm *comm, *tmp_comm;
  85
  86        BUG_ON(!RB_EMPTY_NODE(&thread->rb_node));
  87
  88        thread_stack__free(thread);
  89
  90        if (thread->maps) {
  91                maps__put(thread->maps);
  92                thread->maps = NULL;
  93        }
  94        down_write(&thread->namespaces_lock);
  95        list_for_each_entry_safe(namespaces, tmp_namespaces,
  96                                 &thread->namespaces_list, list) {
  97                list_del_init(&namespaces->list);
  98                namespaces__free(namespaces);
  99        }
 100        up_write(&thread->namespaces_lock);
 101
 102        down_write(&thread->comm_lock);
 103        list_for_each_entry_safe(comm, tmp_comm, &thread->comm_list, list) {
 104                list_del_init(&comm->list);
 105                comm__free(comm);
 106        }
 107        up_write(&thread->comm_lock);
 108
 109        nsinfo__zput(thread->nsinfo);
 110        srccode_state_free(&thread->srccode_state);
 111
 112        exit_rwsem(&thread->namespaces_lock);
 113        exit_rwsem(&thread->comm_lock);
 114        thread__free_stitch_list(thread);
 115        free(thread);
 116}
 117
 118struct thread *thread__get(struct thread *thread)
 119{
 120        if (thread)
 121                refcount_inc(&thread->refcnt);
 122        return thread;
 123}
 124
 125void thread__put(struct thread *thread)
 126{
 127        if (thread && refcount_dec_and_test(&thread->refcnt)) {
 128                /*
 129                 * Remove it from the dead threads list, as last reference is
 130                 * gone, if it is in a dead threads list.
 131                 *
 132                 * We may not be there anymore if say, the machine where it was
 133                 * stored was already deleted, so we already removed it from
 134                 * the dead threads and some other piece of code still keeps a
 135                 * reference.
 136                 *
 137                 * This is what 'perf sched' does and finally drops it in
 138                 * perf_sched__lat(), where it calls perf_sched__read_events(),
 139                 * that processes the events by creating a session and deleting
 140                 * it, which ends up destroying the list heads for the dead
 141                 * threads, but before it does that it removes all threads from
 142                 * it using list_del_init().
 143                 *
 144                 * So we need to check here if it is in a dead threads list and
 145                 * if so, remove it before finally deleting the thread, to avoid
 146                 * an use after free situation.
 147                 */
 148                if (!list_empty(&thread->node))
 149                        list_del_init(&thread->node);
 150                thread__delete(thread);
 151        }
 152}
 153
 154static struct namespaces *__thread__namespaces(const struct thread *thread)
 155{
 156        if (list_empty(&thread->namespaces_list))
 157                return NULL;
 158
 159        return list_first_entry(&thread->namespaces_list, struct namespaces, list);
 160}
 161
 162struct namespaces *thread__namespaces(struct thread *thread)
 163{
 164        struct namespaces *ns;
 165
 166        down_read(&thread->namespaces_lock);
 167        ns = __thread__namespaces(thread);
 168        up_read(&thread->namespaces_lock);
 169
 170        return ns;
 171}
 172
 173static int __thread__set_namespaces(struct thread *thread, u64 timestamp,
 174                                    struct perf_record_namespaces *event)
 175{
 176        struct namespaces *new, *curr = __thread__namespaces(thread);
 177
 178        new = namespaces__new(event);
 179        if (!new)
 180                return -ENOMEM;
 181
 182        list_add(&new->list, &thread->namespaces_list);
 183
 184        if (timestamp && curr) {
 185                /*
 186                 * setns syscall must have changed few or all the namespaces
 187                 * of this thread. Update end time for the namespaces
 188                 * previously used.
 189                 */
 190                curr = list_next_entry(new, list);
 191                curr->end_time = timestamp;
 192        }
 193
 194        return 0;
 195}
 196
 197int thread__set_namespaces(struct thread *thread, u64 timestamp,
 198                           struct perf_record_namespaces *event)
 199{
 200        int ret;
 201
 202        down_write(&thread->namespaces_lock);
 203        ret = __thread__set_namespaces(thread, timestamp, event);
 204        up_write(&thread->namespaces_lock);
 205        return ret;
 206}
 207
 208struct comm *thread__comm(const struct thread *thread)
 209{
 210        if (list_empty(&thread->comm_list))
 211                return NULL;
 212
 213        return list_first_entry(&thread->comm_list, struct comm, list);
 214}
 215
 216struct comm *thread__exec_comm(const struct thread *thread)
 217{
 218        struct comm *comm, *last = NULL, *second_last = NULL;
 219
 220        list_for_each_entry(comm, &thread->comm_list, list) {
 221                if (comm->exec)
 222                        return comm;
 223                second_last = last;
 224                last = comm;
 225        }
 226
 227        /*
 228         * 'last' with no start time might be the parent's comm of a synthesized
 229         * thread (created by processing a synthesized fork event). For a main
 230         * thread, that is very probably wrong. Prefer a later comm to avoid
 231         * that case.
 232         */
 233        if (second_last && !last->start && thread->pid_ == thread->tid)
 234                return second_last;
 235
 236        return last;
 237}
 238
 239static int ____thread__set_comm(struct thread *thread, const char *str,
 240                                u64 timestamp, bool exec)
 241{
 242        struct comm *new, *curr = thread__comm(thread);
 243
 244        /* Override the default :tid entry */
 245        if (!thread->comm_set) {
 246                int err = comm__override(curr, str, timestamp, exec);
 247                if (err)
 248                        return err;
 249        } else {
 250                new = comm__new(str, timestamp, exec);
 251                if (!new)
 252                        return -ENOMEM;
 253                list_add(&new->list, &thread->comm_list);
 254
 255                if (exec)
 256                        unwind__flush_access(thread->maps);
 257        }
 258
 259        thread->comm_set = true;
 260
 261        return 0;
 262}
 263
 264int __thread__set_comm(struct thread *thread, const char *str, u64 timestamp,
 265                       bool exec)
 266{
 267        int ret;
 268
 269        down_write(&thread->comm_lock);
 270        ret = ____thread__set_comm(thread, str, timestamp, exec);
 271        up_write(&thread->comm_lock);
 272        return ret;
 273}
 274
 275int thread__set_comm_from_proc(struct thread *thread)
 276{
 277        char path[64];
 278        char *comm = NULL;
 279        size_t sz;
 280        int err = -1;
 281
 282        if (!(snprintf(path, sizeof(path), "%d/task/%d/comm",
 283                       thread->pid_, thread->tid) >= (int)sizeof(path)) &&
 284            procfs__read_str(path, &comm, &sz) == 0) {
 285                comm[sz - 1] = '\0';
 286                err = thread__set_comm(thread, comm, 0);
 287        }
 288
 289        return err;
 290}
 291
 292static const char *__thread__comm_str(const struct thread *thread)
 293{
 294        const struct comm *comm = thread__comm(thread);
 295
 296        if (!comm)
 297                return NULL;
 298
 299        return comm__str(comm);
 300}
 301
 302const char *thread__comm_str(struct thread *thread)
 303{
 304        const char *str;
 305
 306        down_read(&thread->comm_lock);
 307        str = __thread__comm_str(thread);
 308        up_read(&thread->comm_lock);
 309
 310        return str;
 311}
 312
 313/* CHECKME: it should probably better return the max comm len from its comm list */
 314int thread__comm_len(struct thread *thread)
 315{
 316        if (!thread->comm_len) {
 317                const char *comm = thread__comm_str(thread);
 318                if (!comm)
 319                        return 0;
 320                thread->comm_len = strlen(comm);
 321        }
 322
 323        return thread->comm_len;
 324}
 325
 326size_t thread__fprintf(struct thread *thread, FILE *fp)
 327{
 328        return fprintf(fp, "Thread %d %s\n", thread->tid, thread__comm_str(thread)) +
 329               maps__fprintf(thread->maps, fp);
 330}
 331
 332int thread__insert_map(struct thread *thread, struct map *map)
 333{
 334        int ret;
 335
 336        ret = unwind__prepare_access(thread->maps, map, NULL);
 337        if (ret)
 338                return ret;
 339
 340        maps__fixup_overlappings(thread->maps, map, stderr);
 341        maps__insert(thread->maps, map);
 342
 343        return 0;
 344}
 345
 346static int __thread__prepare_access(struct thread *thread)
 347{
 348        bool initialized = false;
 349        int err = 0;
 350        struct maps *maps = thread->maps;
 351        struct map *map;
 352
 353        down_read(&maps->lock);
 354
 355        maps__for_each_entry(maps, map) {
 356                err = unwind__prepare_access(thread->maps, map, &initialized);
 357                if (err || initialized)
 358                        break;
 359        }
 360
 361        up_read(&maps->lock);
 362
 363        return err;
 364}
 365
 366static int thread__prepare_access(struct thread *thread)
 367{
 368        int err = 0;
 369
 370        if (dwarf_callchain_users)
 371                err = __thread__prepare_access(thread);
 372
 373        return err;
 374}
 375
 376static int thread__clone_maps(struct thread *thread, struct thread *parent, bool do_maps_clone)
 377{
 378        /* This is new thread, we share map groups for process. */
 379        if (thread->pid_ == parent->pid_)
 380                return thread__prepare_access(thread);
 381
 382        if (thread->maps == parent->maps) {
 383                pr_debug("broken map groups on thread %d/%d parent %d/%d\n",
 384                         thread->pid_, thread->tid, parent->pid_, parent->tid);
 385                return 0;
 386        }
 387        /* But this one is new process, copy maps. */
 388        return do_maps_clone ? maps__clone(thread, parent->maps) : 0;
 389}
 390
 391int thread__fork(struct thread *thread, struct thread *parent, u64 timestamp, bool do_maps_clone)
 392{
 393        if (parent->comm_set) {
 394                const char *comm = thread__comm_str(parent);
 395                int err;
 396                if (!comm)
 397                        return -ENOMEM;
 398                err = thread__set_comm(thread, comm, timestamp);
 399                if (err)
 400                        return err;
 401        }
 402
 403        thread->ppid = parent->tid;
 404        return thread__clone_maps(thread, parent, do_maps_clone);
 405}
 406
 407void thread__find_cpumode_addr_location(struct thread *thread, u64 addr,
 408                                        struct addr_location *al)
 409{
 410        size_t i;
 411        const u8 cpumodes[] = {
 412                PERF_RECORD_MISC_USER,
 413                PERF_RECORD_MISC_KERNEL,
 414                PERF_RECORD_MISC_GUEST_USER,
 415                PERF_RECORD_MISC_GUEST_KERNEL
 416        };
 417
 418        for (i = 0; i < ARRAY_SIZE(cpumodes); i++) {
 419                thread__find_symbol(thread, cpumodes[i], addr, al);
 420                if (al->map)
 421                        break;
 422        }
 423}
 424
 425struct thread *thread__main_thread(struct machine *machine, struct thread *thread)
 426{
 427        if (thread->pid_ == thread->tid)
 428                return thread__get(thread);
 429
 430        if (thread->pid_ == -1)
 431                return NULL;
 432
 433        return machine__find_thread(machine, thread->pid_, thread->pid_);
 434}
 435
 436int thread__memcpy(struct thread *thread, struct machine *machine,
 437                   void *buf, u64 ip, int len, bool *is64bit)
 438{
 439       u8 cpumode = PERF_RECORD_MISC_USER;
 440       struct addr_location al;
 441       long offset;
 442
 443       if (machine__kernel_ip(machine, ip))
 444               cpumode = PERF_RECORD_MISC_KERNEL;
 445
 446       if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso ||
 447           al.map->dso->data.status == DSO_DATA_STATUS_ERROR ||
 448           map__load(al.map) < 0)
 449               return -1;
 450
 451       offset = al.map->map_ip(al.map, ip);
 452       if (is64bit)
 453               *is64bit = al.map->dso->is_64_bit;
 454
 455       return dso__data_read_offset(al.map->dso, machine, offset, buf, len);
 456}
 457
 458void thread__free_stitch_list(struct thread *thread)
 459{
 460        struct lbr_stitch *lbr_stitch = thread->lbr_stitch;
 461        struct stitch_list *pos, *tmp;
 462
 463        if (!lbr_stitch)
 464                return;
 465
 466        list_for_each_entry_safe(pos, tmp, &lbr_stitch->lists, node) {
 467                list_del_init(&pos->node);
 468                free(pos);
 469        }
 470
 471        list_for_each_entry_safe(pos, tmp, &lbr_stitch->free_lists, node) {
 472                list_del_init(&pos->node);
 473                free(pos);
 474        }
 475
 476        zfree(&lbr_stitch->prev_lbr_cursor);
 477        zfree(&thread->lbr_stitch);
 478}
 479