linux/tools/lib/lockdep/preload.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2#define _GNU_SOURCE
   3#include <pthread.h>
   4#include <stdio.h>
   5#include <dlfcn.h>
   6#include <stdlib.h>
   7#include <sysexits.h>
   8#include <unistd.h>
   9#include "include/liblockdep/mutex.h"
  10#include "../../include/linux/rbtree.h"
  11
  12/**
  13 * struct lock_lookup - liblockdep's view of a single unique lock
  14 * @orig: pointer to the original pthread lock, used for lookups
  15 * @dep_map: lockdep's dep_map structure
  16 * @key: lockdep's key structure
  17 * @node: rb-tree node used to store the lock in a global tree
  18 * @name: a unique name for the lock
  19 */
  20struct lock_lookup {
  21        void *orig; /* Original pthread lock, used for lookups */
  22        struct lockdep_map dep_map; /* Since all locks are dynamic, we need
  23                                     * a dep_map and a key for each lock */
  24        /*
  25         * Wait, there's no support for key classes? Yup :(
  26         * Most big projects wrap the pthread api with their own calls to
  27         * be compatible with different locking methods. This means that
  28         * "classes" will be brokes since the function that creates all
  29         * locks will point to a generic locking function instead of the
  30         * actual code that wants to do the locking.
  31         */
  32        struct lock_class_key key;
  33        struct rb_node node;
  34#define LIBLOCKDEP_MAX_LOCK_NAME 22
  35        char name[LIBLOCKDEP_MAX_LOCK_NAME];
  36};
  37
  38/* This is where we store our locks */
  39static struct rb_root locks = RB_ROOT;
  40static pthread_rwlock_t locks_rwlock = PTHREAD_RWLOCK_INITIALIZER;
  41
  42/* pthread mutex API */
  43
  44#ifdef __GLIBC__
  45extern int __pthread_mutex_init(pthread_mutex_t *mutex, const pthread_mutexattr_t *attr);
  46extern int __pthread_mutex_lock(pthread_mutex_t *mutex);
  47extern int __pthread_mutex_trylock(pthread_mutex_t *mutex);
  48extern int __pthread_mutex_unlock(pthread_mutex_t *mutex);
  49extern int __pthread_mutex_destroy(pthread_mutex_t *mutex);
  50#else
  51#define __pthread_mutex_init    NULL
  52#define __pthread_mutex_lock    NULL
  53#define __pthread_mutex_trylock NULL
  54#define __pthread_mutex_unlock  NULL
  55#define __pthread_mutex_destroy NULL
  56#endif
  57static int (*ll_pthread_mutex_init)(pthread_mutex_t *mutex,
  58                        const pthread_mutexattr_t *attr)        = __pthread_mutex_init;
  59static int (*ll_pthread_mutex_lock)(pthread_mutex_t *mutex)     = __pthread_mutex_lock;
  60static int (*ll_pthread_mutex_trylock)(pthread_mutex_t *mutex)  = __pthread_mutex_trylock;
  61static int (*ll_pthread_mutex_unlock)(pthread_mutex_t *mutex)   = __pthread_mutex_unlock;
  62static int (*ll_pthread_mutex_destroy)(pthread_mutex_t *mutex)  = __pthread_mutex_destroy;
  63
  64/* pthread rwlock API */
  65
  66#ifdef __GLIBC__
  67extern int __pthread_rwlock_init(pthread_rwlock_t *rwlock, const pthread_rwlockattr_t *attr);
  68extern int __pthread_rwlock_destroy(pthread_rwlock_t *rwlock);
  69extern int __pthread_rwlock_wrlock(pthread_rwlock_t *rwlock);
  70extern int __pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock);
  71extern int __pthread_rwlock_rdlock(pthread_rwlock_t *rwlock);
  72extern int __pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock);
  73extern int __pthread_rwlock_unlock(pthread_rwlock_t *rwlock);
  74#else
  75#define __pthread_rwlock_init           NULL
  76#define __pthread_rwlock_destroy        NULL
  77#define __pthread_rwlock_wrlock         NULL
  78#define __pthread_rwlock_trywrlock      NULL
  79#define __pthread_rwlock_rdlock         NULL
  80#define __pthread_rwlock_tryrdlock      NULL
  81#define __pthread_rwlock_unlock         NULL
  82#endif
  83
  84static int (*ll_pthread_rwlock_init)(pthread_rwlock_t *rwlock,
  85                        const pthread_rwlockattr_t *attr)               = __pthread_rwlock_init;
  86static int (*ll_pthread_rwlock_destroy)(pthread_rwlock_t *rwlock)       = __pthread_rwlock_destroy;
  87static int (*ll_pthread_rwlock_rdlock)(pthread_rwlock_t *rwlock)        = __pthread_rwlock_rdlock;
  88static int (*ll_pthread_rwlock_tryrdlock)(pthread_rwlock_t *rwlock)     = __pthread_rwlock_tryrdlock;
  89static int (*ll_pthread_rwlock_trywrlock)(pthread_rwlock_t *rwlock)     = __pthread_rwlock_trywrlock;
  90static int (*ll_pthread_rwlock_wrlock)(pthread_rwlock_t *rwlock)        = __pthread_rwlock_wrlock;
  91static int (*ll_pthread_rwlock_unlock)(pthread_rwlock_t *rwlock)        = __pthread_rwlock_unlock;
  92
  93enum { none, prepare, done, } __init_state;
  94static void init_preload(void);
  95static void try_init_preload(void)
  96{
  97        if (__init_state != done)
  98                init_preload();
  99}
 100
 101static struct rb_node **__get_lock_node(void *lock, struct rb_node **parent)
 102{
 103        struct rb_node **node = &locks.rb_node;
 104        struct lock_lookup *l;
 105
 106        *parent = NULL;
 107
 108        while (*node) {
 109                l = rb_entry(*node, struct lock_lookup, node);
 110
 111                *parent = *node;
 112                if (lock < l->orig)
 113                        node = &l->node.rb_left;
 114                else if (lock > l->orig)
 115                        node = &l->node.rb_right;
 116                else
 117                        return node;
 118        }
 119
 120        return node;
 121}
 122
 123#ifndef LIBLOCKDEP_STATIC_ENTRIES
 124#define LIBLOCKDEP_STATIC_ENTRIES       1024
 125#endif
 126
 127static struct lock_lookup __locks[LIBLOCKDEP_STATIC_ENTRIES];
 128static int __locks_nr;
 129
 130static inline bool is_static_lock(struct lock_lookup *lock)
 131{
 132        return lock >= __locks && lock < __locks + ARRAY_SIZE(__locks);
 133}
 134
 135static struct lock_lookup *alloc_lock(void)
 136{
 137        if (__init_state != done) {
 138                /*
 139                 * Some programs attempt to initialize and use locks in their
 140                 * allocation path. This means that a call to malloc() would
 141                 * result in locks being initialized and locked.
 142                 *
 143                 * Why is it an issue for us? dlsym() below will try allocating
 144                 * to give us the original function. Since this allocation will
 145                 * result in a locking operations, we have to let pthread deal
 146                 * with it, but we can't! we don't have the pointer to the
 147                 * original API since we're inside dlsym() trying to get it
 148                 */
 149
 150                int idx = __locks_nr++;
 151                if (idx >= ARRAY_SIZE(__locks)) {
 152                        dprintf(STDERR_FILENO,
 153                "LOCKDEP error: insufficient LIBLOCKDEP_STATIC_ENTRIES\n");
 154                        exit(EX_UNAVAILABLE);
 155                }
 156                return __locks + idx;
 157        }
 158
 159        return malloc(sizeof(struct lock_lookup));
 160}
 161
 162static inline void free_lock(struct lock_lookup *lock)
 163{
 164        if (likely(!is_static_lock(lock)))
 165                free(lock);
 166}
 167
 168/**
 169 * __get_lock - find or create a lock instance
 170 * @lock: pointer to a pthread lock function
 171 *
 172 * Try to find an existing lock in the rbtree using the provided pointer. If
 173 * one wasn't found - create it.
 174 */
 175static struct lock_lookup *__get_lock(void *lock)
 176{
 177        struct rb_node **node, *parent;
 178        struct lock_lookup *l;
 179
 180        ll_pthread_rwlock_rdlock(&locks_rwlock);
 181        node = __get_lock_node(lock, &parent);
 182        ll_pthread_rwlock_unlock(&locks_rwlock);
 183        if (*node) {
 184                return rb_entry(*node, struct lock_lookup, node);
 185        }
 186
 187        /* We didn't find the lock, let's create it */
 188        l = alloc_lock();
 189        if (l == NULL)
 190                return NULL;
 191
 192        l->orig = lock;
 193        /*
 194         * Currently the name of the lock is the ptr value of the pthread lock,
 195         * while not optimal, it makes debugging a bit easier.
 196         *
 197         * TODO: Get the real name of the lock using libdwarf
 198         */
 199        sprintf(l->name, "%p", lock);
 200        lockdep_init_map(&l->dep_map, l->name, &l->key, 0);
 201
 202        ll_pthread_rwlock_wrlock(&locks_rwlock);
 203        /* This might have changed since the last time we fetched it */
 204        node = __get_lock_node(lock, &parent);
 205        rb_link_node(&l->node, parent, node);
 206        rb_insert_color(&l->node, &locks);
 207        ll_pthread_rwlock_unlock(&locks_rwlock);
 208
 209        return l;
 210}
 211
 212static void __del_lock(struct lock_lookup *lock)
 213{
 214        ll_pthread_rwlock_wrlock(&locks_rwlock);
 215        rb_erase(&lock->node, &locks);
 216        ll_pthread_rwlock_unlock(&locks_rwlock);
 217        free_lock(lock);
 218}
 219
 220int pthread_mutex_init(pthread_mutex_t *mutex,
 221                        const pthread_mutexattr_t *attr)
 222{
 223        int r;
 224
 225        /*
 226         * We keep trying to init our preload module because there might be
 227         * code in init sections that tries to touch locks before we are
 228         * initialized, in that case we'll need to manually call preload
 229         * to get us going.
 230         *
 231         * Funny enough, kernel's lockdep had the same issue, and used
 232         * (almost) the same solution. See look_up_lock_class() in
 233         * kernel/locking/lockdep.c for details.
 234         */
 235        try_init_preload();
 236
 237        r = ll_pthread_mutex_init(mutex, attr);
 238        if (r == 0)
 239                /*
 240                 * We do a dummy initialization here so that lockdep could
 241                 * warn us if something fishy is going on - such as
 242                 * initializing a held lock.
 243                 */
 244                __get_lock(mutex);
 245
 246        return r;
 247}
 248
 249int pthread_mutex_lock(pthread_mutex_t *mutex)
 250{
 251        int r;
 252
 253        try_init_preload();
 254
 255        lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL,
 256                        (unsigned long)_RET_IP_);
 257        /*
 258         * Here's the thing with pthread mutexes: unlike the kernel variant,
 259         * they can fail.
 260         *
 261         * This means that the behaviour here is a bit different from what's
 262         * going on in the kernel: there we just tell lockdep that we took the
 263         * lock before actually taking it, but here we must deal with the case
 264         * that locking failed.
 265         *
 266         * To do that we'll "release" the lock if locking failed - this way
 267         * we'll get lockdep doing the correct checks when we try to take
 268         * the lock, and if that fails - we'll be back to the correct
 269         * state by releasing it.
 270         */
 271        r = ll_pthread_mutex_lock(mutex);
 272        if (r)
 273                lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
 274
 275        return r;
 276}
 277
 278int pthread_mutex_trylock(pthread_mutex_t *mutex)
 279{
 280        int r;
 281
 282        try_init_preload();
 283
 284        lock_acquire(&__get_lock(mutex)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
 285        r = ll_pthread_mutex_trylock(mutex);
 286        if (r)
 287                lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
 288
 289        return r;
 290}
 291
 292int pthread_mutex_unlock(pthread_mutex_t *mutex)
 293{
 294        int r;
 295
 296        try_init_preload();
 297
 298        lock_release(&__get_lock(mutex)->dep_map, 0, (unsigned long)_RET_IP_);
 299        /*
 300         * Just like taking a lock, only in reverse!
 301         *
 302         * If we fail releasing the lock, tell lockdep we're holding it again.
 303         */
 304        r = ll_pthread_mutex_unlock(mutex);
 305        if (r)
 306                lock_acquire(&__get_lock(mutex)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
 307
 308        return r;
 309}
 310
 311int pthread_mutex_destroy(pthread_mutex_t *mutex)
 312{
 313        try_init_preload();
 314
 315        /*
 316         * Let's see if we're releasing a lock that's held.
 317         *
 318         * TODO: Hook into free() and add that check there as well.
 319         */
 320        debug_check_no_locks_freed(mutex, sizeof(*mutex));
 321        __del_lock(__get_lock(mutex));
 322        return ll_pthread_mutex_destroy(mutex);
 323}
 324
 325/* This is the rwlock part, very similar to what happened with mutex above */
 326int pthread_rwlock_init(pthread_rwlock_t *rwlock,
 327                        const pthread_rwlockattr_t *attr)
 328{
 329        int r;
 330
 331        try_init_preload();
 332
 333        r = ll_pthread_rwlock_init(rwlock, attr);
 334        if (r == 0)
 335                __get_lock(rwlock);
 336
 337        return r;
 338}
 339
 340int pthread_rwlock_destroy(pthread_rwlock_t *rwlock)
 341{
 342        try_init_preload();
 343
 344        debug_check_no_locks_freed(rwlock, sizeof(*rwlock));
 345        __del_lock(__get_lock(rwlock));
 346        return ll_pthread_rwlock_destroy(rwlock);
 347}
 348
 349int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock)
 350{
 351        int r;
 352
 353        init_preload();
 354
 355        lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 2, 1, NULL, (unsigned long)_RET_IP_);
 356        r = ll_pthread_rwlock_rdlock(rwlock);
 357        if (r)
 358                lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
 359
 360        return r;
 361}
 362
 363int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock)
 364{
 365        int r;
 366
 367        init_preload();
 368
 369        lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 2, 1, NULL, (unsigned long)_RET_IP_);
 370        r = ll_pthread_rwlock_tryrdlock(rwlock);
 371        if (r)
 372                lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
 373
 374        return r;
 375}
 376
 377int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock)
 378{
 379        int r;
 380
 381        init_preload();
 382
 383        lock_acquire(&__get_lock(rwlock)->dep_map, 0, 1, 0, 1, NULL, (unsigned long)_RET_IP_);
 384        r = ll_pthread_rwlock_trywrlock(rwlock);
 385        if (r)
 386                lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
 387
 388        return r;
 389}
 390
 391int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock)
 392{
 393        int r;
 394
 395        init_preload();
 396
 397        lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
 398        r = ll_pthread_rwlock_wrlock(rwlock);
 399        if (r)
 400                lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
 401
 402        return r;
 403}
 404
 405int pthread_rwlock_unlock(pthread_rwlock_t *rwlock)
 406{
 407        int r;
 408
 409        init_preload();
 410
 411        lock_release(&__get_lock(rwlock)->dep_map, 0, (unsigned long)_RET_IP_);
 412        r = ll_pthread_rwlock_unlock(rwlock);
 413        if (r)
 414                lock_acquire(&__get_lock(rwlock)->dep_map, 0, 0, 0, 1, NULL, (unsigned long)_RET_IP_);
 415
 416        return r;
 417}
 418
 419__attribute__((constructor)) static void init_preload(void)
 420{
 421        if (__init_state == done)
 422                return;
 423
 424#ifndef __GLIBC__
 425        __init_state = prepare;
 426
 427        ll_pthread_mutex_init = dlsym(RTLD_NEXT, "pthread_mutex_init");
 428        ll_pthread_mutex_lock = dlsym(RTLD_NEXT, "pthread_mutex_lock");
 429        ll_pthread_mutex_trylock = dlsym(RTLD_NEXT, "pthread_mutex_trylock");
 430        ll_pthread_mutex_unlock = dlsym(RTLD_NEXT, "pthread_mutex_unlock");
 431        ll_pthread_mutex_destroy = dlsym(RTLD_NEXT, "pthread_mutex_destroy");
 432
 433        ll_pthread_rwlock_init = dlsym(RTLD_NEXT, "pthread_rwlock_init");
 434        ll_pthread_rwlock_destroy = dlsym(RTLD_NEXT, "pthread_rwlock_destroy");
 435        ll_pthread_rwlock_rdlock = dlsym(RTLD_NEXT, "pthread_rwlock_rdlock");
 436        ll_pthread_rwlock_tryrdlock = dlsym(RTLD_NEXT, "pthread_rwlock_tryrdlock");
 437        ll_pthread_rwlock_wrlock = dlsym(RTLD_NEXT, "pthread_rwlock_wrlock");
 438        ll_pthread_rwlock_trywrlock = dlsym(RTLD_NEXT, "pthread_rwlock_trywrlock");
 439        ll_pthread_rwlock_unlock = dlsym(RTLD_NEXT, "pthread_rwlock_unlock");
 440#endif
 441
 442        __init_state = done;
 443}
 444