linux/drivers/base/regmap/regmap-debugfs.c
<<
>>
Prefs
   1/*
   2 * Register map access API - debugfs
   3 *
   4 * Copyright 2011 Wolfson Microelectronics plc
   5 *
   6 * Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/slab.h>
  14#include <linux/mutex.h>
  15#include <linux/debugfs.h>
  16#include <linux/uaccess.h>
  17#include <linux/device.h>
  18#include <linux/list.h>
  19
  20#include "internal.h"
  21
  22struct regmap_debugfs_node {
  23        struct regmap *map;
  24        const char *name;
  25        struct list_head link;
  26};
  27
  28static unsigned int dummy_index;
  29static struct dentry *regmap_debugfs_root;
  30static LIST_HEAD(regmap_debugfs_early_list);
  31static DEFINE_MUTEX(regmap_debugfs_early_lock);
  32
  33/* Calculate the length of a fixed format  */
  34static size_t regmap_calc_reg_len(int max_val)
  35{
  36        return snprintf(NULL, 0, "%x", max_val);
  37}
  38
  39static ssize_t regmap_name_read_file(struct file *file,
  40                                     char __user *user_buf, size_t count,
  41                                     loff_t *ppos)
  42{
  43        struct regmap *map = file->private_data;
  44        const char *name = "nodev";
  45        int ret;
  46        char *buf;
  47
  48        buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
  49        if (!buf)
  50                return -ENOMEM;
  51
  52        if (map->dev && map->dev->driver)
  53                name = map->dev->driver->name;
  54
  55        ret = snprintf(buf, PAGE_SIZE, "%s\n", name);
  56        if (ret < 0) {
  57                kfree(buf);
  58                return ret;
  59        }
  60
  61        ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
  62        kfree(buf);
  63        return ret;
  64}
  65
  66static const struct file_operations regmap_name_fops = {
  67        .open = simple_open,
  68        .read = regmap_name_read_file,
  69        .llseek = default_llseek,
  70};
  71
  72static void regmap_debugfs_free_dump_cache(struct regmap *map)
  73{
  74        struct regmap_debugfs_off_cache *c;
  75
  76        while (!list_empty(&map->debugfs_off_cache)) {
  77                c = list_first_entry(&map->debugfs_off_cache,
  78                                     struct regmap_debugfs_off_cache,
  79                                     list);
  80                list_del(&c->list);
  81                kfree(c);
  82        }
  83}
  84
  85static bool regmap_printable(struct regmap *map, unsigned int reg)
  86{
  87        if (regmap_precious(map, reg))
  88                return false;
  89
  90        if (!regmap_readable(map, reg) && !regmap_cached(map, reg))
  91                return false;
  92
  93        return true;
  94}
  95
  96/*
  97 * Work out where the start offset maps into register numbers, bearing
  98 * in mind that we suppress hidden registers.
  99 */
 100static unsigned int regmap_debugfs_get_dump_start(struct regmap *map,
 101                                                  unsigned int base,
 102                                                  loff_t from,
 103                                                  loff_t *pos)
 104{
 105        struct regmap_debugfs_off_cache *c = NULL;
 106        loff_t p = 0;
 107        unsigned int i, ret;
 108        unsigned int fpos_offset;
 109        unsigned int reg_offset;
 110
 111        /* Suppress the cache if we're using a subrange */
 112        if (base)
 113                return base;
 114
 115        /*
 116         * If we don't have a cache build one so we don't have to do a
 117         * linear scan each time.
 118         */
 119        mutex_lock(&map->cache_lock);
 120        i = base;
 121        if (list_empty(&map->debugfs_off_cache)) {
 122                for (; i <= map->max_register; i += map->reg_stride) {
 123                        /* Skip unprinted registers, closing off cache entry */
 124                        if (!regmap_printable(map, i)) {
 125                                if (c) {
 126                                        c->max = p - 1;
 127                                        c->max_reg = i - map->reg_stride;
 128                                        list_add_tail(&c->list,
 129                                                      &map->debugfs_off_cache);
 130                                        c = NULL;
 131                                }
 132
 133                                continue;
 134                        }
 135
 136                        /* No cache entry?  Start a new one */
 137                        if (!c) {
 138                                c = kzalloc(sizeof(*c), GFP_KERNEL);
 139                                if (!c) {
 140                                        regmap_debugfs_free_dump_cache(map);
 141                                        mutex_unlock(&map->cache_lock);
 142                                        return base;
 143                                }
 144                                c->min = p;
 145                                c->base_reg = i;
 146                        }
 147
 148                        p += map->debugfs_tot_len;
 149                }
 150        }
 151
 152        /* Close the last entry off if we didn't scan beyond it */
 153        if (c) {
 154                c->max = p - 1;
 155                c->max_reg = i - map->reg_stride;
 156                list_add_tail(&c->list,
 157                              &map->debugfs_off_cache);
 158        }
 159
 160        /*
 161         * This should never happen; we return above if we fail to
 162         * allocate and we should never be in this code if there are
 163         * no registers at all.
 164         */
 165        WARN_ON(list_empty(&map->debugfs_off_cache));
 166        ret = base;
 167
 168        /* Find the relevant block:offset */
 169        list_for_each_entry(c, &map->debugfs_off_cache, list) {
 170                if (from >= c->min && from <= c->max) {
 171                        fpos_offset = from - c->min;
 172                        reg_offset = fpos_offset / map->debugfs_tot_len;
 173                        *pos = c->min + (reg_offset * map->debugfs_tot_len);
 174                        mutex_unlock(&map->cache_lock);
 175                        return c->base_reg + (reg_offset * map->reg_stride);
 176                }
 177
 178                *pos = c->max;
 179                ret = c->max_reg;
 180        }
 181        mutex_unlock(&map->cache_lock);
 182
 183        return ret;
 184}
 185
 186static inline void regmap_calc_tot_len(struct regmap *map,
 187                                       void *buf, size_t count)
 188{
 189        /* Calculate the length of a fixed format  */
 190        if (!map->debugfs_tot_len) {
 191                map->debugfs_reg_len = regmap_calc_reg_len(map->max_register),
 192                map->debugfs_val_len = 2 * map->format.val_bytes;
 193                map->debugfs_tot_len = map->debugfs_reg_len +
 194                        map->debugfs_val_len + 3;      /* : \n */
 195        }
 196}
 197
 198static ssize_t regmap_read_debugfs(struct regmap *map, unsigned int from,
 199                                   unsigned int to, char __user *user_buf,
 200                                   size_t count, loff_t *ppos)
 201{
 202        size_t buf_pos = 0;
 203        loff_t p = *ppos;
 204        ssize_t ret;
 205        int i;
 206        char *buf;
 207        unsigned int val, start_reg;
 208
 209        if (*ppos < 0 || !count)
 210                return -EINVAL;
 211
 212        buf = kmalloc(count, GFP_KERNEL);
 213        if (!buf)
 214                return -ENOMEM;
 215
 216        regmap_calc_tot_len(map, buf, count);
 217
 218        /* Work out which register we're starting at */
 219        start_reg = regmap_debugfs_get_dump_start(map, from, *ppos, &p);
 220
 221        for (i = start_reg; i <= to; i += map->reg_stride) {
 222                if (!regmap_readable(map, i) && !regmap_cached(map, i))
 223                        continue;
 224
 225                if (regmap_precious(map, i))
 226                        continue;
 227
 228                /* If we're in the region the user is trying to read */
 229                if (p >= *ppos) {
 230                        /* ...but not beyond it */
 231                        if (buf_pos + map->debugfs_tot_len > count)
 232                                break;
 233
 234                        /* Format the register */
 235                        snprintf(buf + buf_pos, count - buf_pos, "%.*x: ",
 236                                 map->debugfs_reg_len, i - from);
 237                        buf_pos += map->debugfs_reg_len + 2;
 238
 239                        /* Format the value, write all X if we can't read */
 240                        ret = regmap_read(map, i, &val);
 241                        if (ret == 0)
 242                                snprintf(buf + buf_pos, count - buf_pos,
 243                                         "%.*x", map->debugfs_val_len, val);
 244                        else
 245                                memset(buf + buf_pos, 'X',
 246                                       map->debugfs_val_len);
 247                        buf_pos += 2 * map->format.val_bytes;
 248
 249                        buf[buf_pos++] = '\n';
 250                }
 251                p += map->debugfs_tot_len;
 252        }
 253
 254        ret = buf_pos;
 255
 256        if (copy_to_user(user_buf, buf, buf_pos)) {
 257                ret = -EFAULT;
 258                goto out;
 259        }
 260
 261        *ppos += buf_pos;
 262
 263out:
 264        kfree(buf);
 265        return ret;
 266}
 267
 268static ssize_t regmap_map_read_file(struct file *file, char __user *user_buf,
 269                                    size_t count, loff_t *ppos)
 270{
 271        struct regmap *map = file->private_data;
 272
 273        return regmap_read_debugfs(map, 0, map->max_register, user_buf,
 274                                   count, ppos);
 275}
 276
 277#undef REGMAP_ALLOW_WRITE_DEBUGFS
 278#ifdef REGMAP_ALLOW_WRITE_DEBUGFS
 279/*
 280 * This can be dangerous especially when we have clients such as
 281 * PMICs, therefore don't provide any real compile time configuration option
 282 * for this feature, people who want to use this will need to modify
 283 * the source code directly.
 284 */
 285static ssize_t regmap_map_write_file(struct file *file,
 286                                     const char __user *user_buf,
 287                                     size_t count, loff_t *ppos)
 288{
 289        char buf[32];
 290        size_t buf_size;
 291        char *start = buf;
 292        unsigned long reg, value;
 293        struct regmap *map = file->private_data;
 294        int ret;
 295
 296        buf_size = min(count, (sizeof(buf)-1));
 297        if (copy_from_user(buf, user_buf, buf_size))
 298                return -EFAULT;
 299        buf[buf_size] = 0;
 300
 301        while (*start == ' ')
 302                start++;
 303        reg = simple_strtoul(start, &start, 16);
 304        while (*start == ' ')
 305                start++;
 306        if (kstrtoul(start, 16, &value))
 307                return -EINVAL;
 308
 309        /* Userspace has been fiddling around behind the kernel's back */
 310        add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 311
 312        ret = regmap_write(map, reg, value);
 313        if (ret < 0)
 314                return ret;
 315        return buf_size;
 316}
 317#else
 318#define regmap_map_write_file NULL
 319#endif
 320
 321static const struct file_operations regmap_map_fops = {
 322        .open = simple_open,
 323        .read = regmap_map_read_file,
 324        .write = regmap_map_write_file,
 325        .llseek = default_llseek,
 326};
 327
 328static ssize_t regmap_range_read_file(struct file *file, char __user *user_buf,
 329                                      size_t count, loff_t *ppos)
 330{
 331        struct regmap_range_node *range = file->private_data;
 332        struct regmap *map = range->map;
 333
 334        return regmap_read_debugfs(map, range->range_min, range->range_max,
 335                                   user_buf, count, ppos);
 336}
 337
 338static const struct file_operations regmap_range_fops = {
 339        .open = simple_open,
 340        .read = regmap_range_read_file,
 341        .llseek = default_llseek,
 342};
 343
 344static ssize_t regmap_reg_ranges_read_file(struct file *file,
 345                                           char __user *user_buf, size_t count,
 346                                           loff_t *ppos)
 347{
 348        struct regmap *map = file->private_data;
 349        struct regmap_debugfs_off_cache *c;
 350        loff_t p = 0;
 351        size_t buf_pos = 0;
 352        char *buf;
 353        char *entry;
 354        int ret;
 355        unsigned entry_len;
 356
 357        if (*ppos < 0 || !count)
 358                return -EINVAL;
 359
 360        buf = kmalloc(count, GFP_KERNEL);
 361        if (!buf)
 362                return -ENOMEM;
 363
 364        entry = kmalloc(PAGE_SIZE, GFP_KERNEL);
 365        if (!entry) {
 366                kfree(buf);
 367                return -ENOMEM;
 368        }
 369
 370        /* While we are at it, build the register dump cache
 371         * now so the read() operation on the `registers' file
 372         * can benefit from using the cache.  We do not care
 373         * about the file position information that is contained
 374         * in the cache, just about the actual register blocks */
 375        regmap_calc_tot_len(map, buf, count);
 376        regmap_debugfs_get_dump_start(map, 0, *ppos, &p);
 377
 378        /* Reset file pointer as the fixed-format of the `registers'
 379         * file is not compatible with the `range' file */
 380        p = 0;
 381        mutex_lock(&map->cache_lock);
 382        list_for_each_entry(c, &map->debugfs_off_cache, list) {
 383                entry_len = snprintf(entry, PAGE_SIZE, "%x-%x\n",
 384                                     c->base_reg, c->max_reg);
 385                if (p >= *ppos) {
 386                        if (buf_pos + entry_len > count)
 387                                break;
 388                        memcpy(buf + buf_pos, entry, entry_len);
 389                        buf_pos += entry_len;
 390                }
 391                p += entry_len;
 392        }
 393        mutex_unlock(&map->cache_lock);
 394
 395        kfree(entry);
 396        ret = buf_pos;
 397
 398        if (copy_to_user(user_buf, buf, buf_pos)) {
 399                ret = -EFAULT;
 400                goto out_buf;
 401        }
 402
 403        *ppos += buf_pos;
 404out_buf:
 405        kfree(buf);
 406        return ret;
 407}
 408
 409static const struct file_operations regmap_reg_ranges_fops = {
 410        .open = simple_open,
 411        .read = regmap_reg_ranges_read_file,
 412        .llseek = default_llseek,
 413};
 414
 415static int regmap_access_show(struct seq_file *s, void *ignored)
 416{
 417        struct regmap *map = s->private;
 418        int i, reg_len;
 419
 420        reg_len = regmap_calc_reg_len(map->max_register);
 421
 422        for (i = 0; i <= map->max_register; i += map->reg_stride) {
 423                /* Ignore registers which are neither readable nor writable */
 424                if (!regmap_readable(map, i) && !regmap_writeable(map, i))
 425                        continue;
 426
 427                /* Format the register */
 428                seq_printf(s, "%.*x: %c %c %c %c\n", reg_len, i,
 429                           regmap_readable(map, i) ? 'y' : 'n',
 430                           regmap_writeable(map, i) ? 'y' : 'n',
 431                           regmap_volatile(map, i) ? 'y' : 'n',
 432                           regmap_precious(map, i) ? 'y' : 'n');
 433        }
 434
 435        return 0;
 436}
 437
 438static int access_open(struct inode *inode, struct file *file)
 439{
 440        return single_open(file, regmap_access_show, inode->i_private);
 441}
 442
 443static const struct file_operations regmap_access_fops = {
 444        .open           = access_open,
 445        .read           = seq_read,
 446        .llseek         = seq_lseek,
 447        .release        = single_release,
 448};
 449
 450static ssize_t regmap_cache_only_write_file(struct file *file,
 451                                            const char __user *user_buf,
 452                                            size_t count, loff_t *ppos)
 453{
 454        struct regmap *map = container_of(file->private_data,
 455                                          struct regmap, cache_only);
 456        ssize_t result;
 457        bool was_enabled, require_sync = false;
 458        int err;
 459
 460        map->lock(map->lock_arg);
 461
 462        was_enabled = map->cache_only;
 463
 464        result = debugfs_write_file_bool(file, user_buf, count, ppos);
 465        if (result < 0) {
 466                map->unlock(map->lock_arg);
 467                return result;
 468        }
 469
 470        if (map->cache_only && !was_enabled) {
 471                dev_warn(map->dev, "debugfs cache_only=Y forced\n");
 472                add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 473        } else if (!map->cache_only && was_enabled) {
 474                dev_warn(map->dev, "debugfs cache_only=N forced: syncing cache\n");
 475                require_sync = true;
 476        }
 477
 478        map->unlock(map->lock_arg);
 479
 480        if (require_sync) {
 481                err = regcache_sync(map);
 482                if (err)
 483                        dev_err(map->dev, "Failed to sync cache %d\n", err);
 484        }
 485
 486        return result;
 487}
 488
 489static const struct file_operations regmap_cache_only_fops = {
 490        .open = simple_open,
 491        .read = debugfs_read_file_bool,
 492        .write = regmap_cache_only_write_file,
 493};
 494
 495static ssize_t regmap_cache_bypass_write_file(struct file *file,
 496                                              const char __user *user_buf,
 497                                              size_t count, loff_t *ppos)
 498{
 499        struct regmap *map = container_of(file->private_data,
 500                                          struct regmap, cache_bypass);
 501        ssize_t result;
 502        bool was_enabled;
 503
 504        map->lock(map->lock_arg);
 505
 506        was_enabled = map->cache_bypass;
 507
 508        result = debugfs_write_file_bool(file, user_buf, count, ppos);
 509        if (result < 0)
 510                goto out;
 511
 512        if (map->cache_bypass && !was_enabled) {
 513                dev_warn(map->dev, "debugfs cache_bypass=Y forced\n");
 514                add_taint(TAINT_USER, LOCKDEP_STILL_OK);
 515        } else if (!map->cache_bypass && was_enabled) {
 516                dev_warn(map->dev, "debugfs cache_bypass=N forced\n");
 517        }
 518
 519out:
 520        map->unlock(map->lock_arg);
 521
 522        return result;
 523}
 524
 525static const struct file_operations regmap_cache_bypass_fops = {
 526        .open = simple_open,
 527        .read = debugfs_read_file_bool,
 528        .write = regmap_cache_bypass_write_file,
 529};
 530
 531void regmap_debugfs_init(struct regmap *map, const char *name)
 532{
 533        struct rb_node *next;
 534        struct regmap_range_node *range_node;
 535        const char *devname = "dummy";
 536
 537        /*
 538         * Userspace can initiate reads from the hardware over debugfs.
 539         * Normally internal regmap structures and buffers are protected with
 540         * a mutex or a spinlock, but if the regmap owner decided to disable
 541         * all locking mechanisms, this is no longer the case. For safety:
 542         * don't create the debugfs entries if locking is disabled.
 543         */
 544        if (map->debugfs_disable) {
 545                dev_dbg(map->dev, "regmap locking disabled - not creating debugfs entries\n");
 546                return;
 547        }
 548
 549        /* If we don't have the debugfs root yet, postpone init */
 550        if (!regmap_debugfs_root) {
 551                struct regmap_debugfs_node *node;
 552                node = kzalloc(sizeof(*node), GFP_KERNEL);
 553                if (!node)
 554                        return;
 555                node->map = map;
 556                node->name = name;
 557                mutex_lock(&regmap_debugfs_early_lock);
 558                list_add(&node->link, &regmap_debugfs_early_list);
 559                mutex_unlock(&regmap_debugfs_early_lock);
 560                return;
 561        }
 562
 563        INIT_LIST_HEAD(&map->debugfs_off_cache);
 564        mutex_init(&map->cache_lock);
 565
 566        if (map->dev)
 567                devname = dev_name(map->dev);
 568
 569        if (name) {
 570                map->debugfs_name = kasprintf(GFP_KERNEL, "%s-%s",
 571                                              devname, name);
 572                name = map->debugfs_name;
 573        } else {
 574                name = devname;
 575        }
 576
 577        if (!strcmp(name, "dummy")) {
 578                map->debugfs_name = kasprintf(GFP_KERNEL, "dummy%d",
 579                                                dummy_index);
 580                name = map->debugfs_name;
 581                dummy_index++;
 582        }
 583
 584        map->debugfs = debugfs_create_dir(name, regmap_debugfs_root);
 585        if (!map->debugfs) {
 586                dev_warn(map->dev,
 587                         "Failed to create %s debugfs directory\n", name);
 588
 589                kfree(map->debugfs_name);
 590                map->debugfs_name = NULL;
 591                return;
 592        }
 593
 594        debugfs_create_file("name", 0400, map->debugfs,
 595                            map, &regmap_name_fops);
 596
 597        debugfs_create_file("range", 0400, map->debugfs,
 598                            map, &regmap_reg_ranges_fops);
 599
 600        if (map->max_register || regmap_readable(map, 0)) {
 601                umode_t registers_mode;
 602
 603#if defined(REGMAP_ALLOW_WRITE_DEBUGFS)
 604                registers_mode = 0600;
 605#else
 606                registers_mode = 0400;
 607#endif
 608
 609                debugfs_create_file("registers", registers_mode, map->debugfs,
 610                                    map, &regmap_map_fops);
 611                debugfs_create_file("access", 0400, map->debugfs,
 612                                    map, &regmap_access_fops);
 613        }
 614
 615        if (map->cache_type) {
 616                debugfs_create_file("cache_only", 0600, map->debugfs,
 617                                    &map->cache_only, &regmap_cache_only_fops);
 618                debugfs_create_bool("cache_dirty", 0400, map->debugfs,
 619                                    &map->cache_dirty);
 620                debugfs_create_file("cache_bypass", 0600, map->debugfs,
 621                                    &map->cache_bypass,
 622                                    &regmap_cache_bypass_fops);
 623        }
 624
 625        next = rb_first(&map->range_tree);
 626        while (next) {
 627                range_node = rb_entry(next, struct regmap_range_node, node);
 628
 629                if (range_node->name)
 630                        debugfs_create_file(range_node->name, 0400,
 631                                            map->debugfs, range_node,
 632                                            &regmap_range_fops);
 633
 634                next = rb_next(&range_node->node);
 635        }
 636
 637        if (map->cache_ops && map->cache_ops->debugfs_init)
 638                map->cache_ops->debugfs_init(map);
 639}
 640
 641void regmap_debugfs_exit(struct regmap *map)
 642{
 643        if (map->debugfs) {
 644                debugfs_remove_recursive(map->debugfs);
 645                mutex_lock(&map->cache_lock);
 646                regmap_debugfs_free_dump_cache(map);
 647                mutex_unlock(&map->cache_lock);
 648                kfree(map->debugfs_name);
 649        } else {
 650                struct regmap_debugfs_node *node, *tmp;
 651
 652                mutex_lock(&regmap_debugfs_early_lock);
 653                list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list,
 654                                         link) {
 655                        if (node->map == map) {
 656                                list_del(&node->link);
 657                                kfree(node);
 658                        }
 659                }
 660                mutex_unlock(&regmap_debugfs_early_lock);
 661        }
 662}
 663
 664void regmap_debugfs_initcall(void)
 665{
 666        struct regmap_debugfs_node *node, *tmp;
 667
 668        regmap_debugfs_root = debugfs_create_dir("regmap", NULL);
 669        if (!regmap_debugfs_root) {
 670                pr_warn("regmap: Failed to create debugfs root\n");
 671                return;
 672        }
 673
 674        mutex_lock(&regmap_debugfs_early_lock);
 675        list_for_each_entry_safe(node, tmp, &regmap_debugfs_early_list, link) {
 676                regmap_debugfs_init(node->map, node->name);
 677                list_del(&node->link);
 678                kfree(node);
 679        }
 680        mutex_unlock(&regmap_debugfs_early_lock);
 681}
 682