linux/drivers/base/regmap/regcache.c
<<
>>
Prefs
   1/*
   2 * Register cache access API
   3 *
   4 * Copyright 2011 Wolfson Microelectronics plc
   5 *
   6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/slab.h>
  14#include <linux/export.h>
  15#include <linux/device.h>
  16#include <trace/events/regmap.h>
  17#include <linux/bsearch.h>
  18#include <linux/sort.h>
  19
  20#include "internal.h"
  21
  22static const struct regcache_ops *cache_types[] = {
  23        &regcache_rbtree_ops,
  24        &regcache_lzo_ops,
  25        &regcache_flat_ops,
  26};
  27
  28static int regcache_hw_init(struct regmap *map)
  29{
  30        int i, j;
  31        int ret;
  32        int count;
  33        unsigned int val;
  34        void *tmp_buf;
  35
  36        if (!map->num_reg_defaults_raw)
  37                return -EINVAL;
  38
  39        if (!map->reg_defaults_raw) {
  40                u32 cache_bypass = map->cache_bypass;
  41                dev_warn(map->dev, "No cache defaults, reading back from HW\n");
  42
  43                /* Bypass the cache access till data read from HW*/
  44                map->cache_bypass = 1;
  45                tmp_buf = kmalloc(map->cache_size_raw, GFP_KERNEL);
  46                if (!tmp_buf)
  47                        return -EINVAL;
  48                ret = regmap_raw_read(map, 0, tmp_buf,
  49                                      map->num_reg_defaults_raw);
  50                map->cache_bypass = cache_bypass;
  51                if (ret < 0) {
  52                        kfree(tmp_buf);
  53                        return ret;
  54                }
  55                map->reg_defaults_raw = tmp_buf;
  56                map->cache_free = 1;
  57        }
  58
  59        /* calculate the size of reg_defaults */
  60        for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) {
  61                val = regcache_get_val(map, map->reg_defaults_raw, i);
  62                if (regmap_volatile(map, i * map->reg_stride))
  63                        continue;
  64                count++;
  65        }
  66
  67        map->reg_defaults = kmalloc(count * sizeof(struct reg_default),
  68                                      GFP_KERNEL);
  69        if (!map->reg_defaults) {
  70                ret = -ENOMEM;
  71                goto err_free;
  72        }
  73
  74        /* fill the reg_defaults */
  75        map->num_reg_defaults = count;
  76        for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) {
  77                val = regcache_get_val(map, map->reg_defaults_raw, i);
  78                if (regmap_volatile(map, i * map->reg_stride))
  79                        continue;
  80                map->reg_defaults[j].reg = i * map->reg_stride;
  81                map->reg_defaults[j].def = val;
  82                j++;
  83        }
  84
  85        return 0;
  86
  87err_free:
  88        if (map->cache_free)
  89                kfree(map->reg_defaults_raw);
  90
  91        return ret;
  92}
  93
  94int regcache_init(struct regmap *map, const struct regmap_config *config)
  95{
  96        int ret;
  97        int i;
  98        void *tmp_buf;
  99
 100        for (i = 0; i < config->num_reg_defaults; i++)
 101                if (config->reg_defaults[i].reg % map->reg_stride)
 102                        return -EINVAL;
 103
 104        if (map->cache_type == REGCACHE_NONE) {
 105                map->cache_bypass = true;
 106                return 0;
 107        }
 108
 109        for (i = 0; i < ARRAY_SIZE(cache_types); i++)
 110                if (cache_types[i]->type == map->cache_type)
 111                        break;
 112
 113        if (i == ARRAY_SIZE(cache_types)) {
 114                dev_err(map->dev, "Could not match compress type: %d\n",
 115                        map->cache_type);
 116                return -EINVAL;
 117        }
 118
 119        map->num_reg_defaults = config->num_reg_defaults;
 120        map->num_reg_defaults_raw = config->num_reg_defaults_raw;
 121        map->reg_defaults_raw = config->reg_defaults_raw;
 122        map->cache_word_size = DIV_ROUND_UP(config->val_bits, 8);
 123        map->cache_size_raw = map->cache_word_size * config->num_reg_defaults_raw;
 124
 125        map->cache = NULL;
 126        map->cache_ops = cache_types[i];
 127
 128        if (!map->cache_ops->read ||
 129            !map->cache_ops->write ||
 130            !map->cache_ops->name)
 131                return -EINVAL;
 132
 133        /* We still need to ensure that the reg_defaults
 134         * won't vanish from under us.  We'll need to make
 135         * a copy of it.
 136         */
 137        if (config->reg_defaults) {
 138                if (!map->num_reg_defaults)
 139                        return -EINVAL;
 140                tmp_buf = kmemdup(config->reg_defaults, map->num_reg_defaults *
 141                                  sizeof(struct reg_default), GFP_KERNEL);
 142                if (!tmp_buf)
 143                        return -ENOMEM;
 144                map->reg_defaults = tmp_buf;
 145        } else if (map->num_reg_defaults_raw) {
 146                /* Some devices such as PMICs don't have cache defaults,
 147                 * we cope with this by reading back the HW registers and
 148                 * crafting the cache defaults by hand.
 149                 */
 150                ret = regcache_hw_init(map);
 151                if (ret < 0)
 152                        return ret;
 153        }
 154
 155        if (!map->max_register)
 156                map->max_register = map->num_reg_defaults_raw;
 157
 158        if (map->cache_ops->init) {
 159                dev_dbg(map->dev, "Initializing %s cache\n",
 160                        map->cache_ops->name);
 161                ret = map->cache_ops->init(map);
 162                if (ret)
 163                        goto err_free;
 164        }
 165        return 0;
 166
 167err_free:
 168        kfree(map->reg_defaults);
 169        if (map->cache_free)
 170                kfree(map->reg_defaults_raw);
 171
 172        return ret;
 173}
 174
 175void regcache_exit(struct regmap *map)
 176{
 177        if (map->cache_type == REGCACHE_NONE)
 178                return;
 179
 180        BUG_ON(!map->cache_ops);
 181
 182        kfree(map->reg_defaults);
 183        if (map->cache_free)
 184                kfree(map->reg_defaults_raw);
 185
 186        if (map->cache_ops->exit) {
 187                dev_dbg(map->dev, "Destroying %s cache\n",
 188                        map->cache_ops->name);
 189                map->cache_ops->exit(map);
 190        }
 191}
 192
 193/**
 194 * regcache_read: Fetch the value of a given register from the cache.
 195 *
 196 * @map: map to configure.
 197 * @reg: The register index.
 198 * @value: The value to be returned.
 199 *
 200 * Return a negative value on failure, 0 on success.
 201 */
 202int regcache_read(struct regmap *map,
 203                  unsigned int reg, unsigned int *value)
 204{
 205        int ret;
 206
 207        if (map->cache_type == REGCACHE_NONE)
 208                return -ENOSYS;
 209
 210        BUG_ON(!map->cache_ops);
 211
 212        if (!regmap_volatile(map, reg)) {
 213                ret = map->cache_ops->read(map, reg, value);
 214
 215                if (ret == 0)
 216                        trace_regmap_reg_read_cache(map->dev, reg, *value);
 217
 218                return ret;
 219        }
 220
 221        return -EINVAL;
 222}
 223
 224/**
 225 * regcache_write: Set the value of a given register in the cache.
 226 *
 227 * @map: map to configure.
 228 * @reg: The register index.
 229 * @value: The new register value.
 230 *
 231 * Return a negative value on failure, 0 on success.
 232 */
 233int regcache_write(struct regmap *map,
 234                   unsigned int reg, unsigned int value)
 235{
 236        if (map->cache_type == REGCACHE_NONE)
 237                return 0;
 238
 239        BUG_ON(!map->cache_ops);
 240
 241        if (!regmap_volatile(map, reg))
 242                return map->cache_ops->write(map, reg, value);
 243
 244        return 0;
 245}
 246
 247static int regcache_default_sync(struct regmap *map, unsigned int min,
 248                                 unsigned int max)
 249{
 250        unsigned int reg;
 251
 252        for (reg = min; reg <= max; reg++) {
 253                unsigned int val;
 254                int ret;
 255
 256                if (regmap_volatile(map, reg))
 257                        continue;
 258
 259                ret = regcache_read(map, reg, &val);
 260                if (ret)
 261                        return ret;
 262
 263                /* Is this the hardware default?  If so skip. */
 264                ret = regcache_lookup_reg(map, reg);
 265                if (ret >= 0 && val == map->reg_defaults[ret].def)
 266                        continue;
 267
 268                map->cache_bypass = 1;
 269                ret = _regmap_write(map, reg, val);
 270                map->cache_bypass = 0;
 271                if (ret)
 272                        return ret;
 273                dev_dbg(map->dev, "Synced register %#x, value %#x\n", reg, val);
 274        }
 275
 276        return 0;
 277}
 278
 279/**
 280 * regcache_sync: Sync the register cache with the hardware.
 281 *
 282 * @map: map to configure.
 283 *
 284 * Any registers that should not be synced should be marked as
 285 * volatile.  In general drivers can choose not to use the provided
 286 * syncing functionality if they so require.
 287 *
 288 * Return a negative value on failure, 0 on success.
 289 */
 290int regcache_sync(struct regmap *map)
 291{
 292        int ret = 0;
 293        unsigned int i;
 294        const char *name;
 295        unsigned int bypass;
 296
 297        BUG_ON(!map->cache_ops);
 298
 299        map->lock(map->lock_arg);
 300        /* Remember the initial bypass state */
 301        bypass = map->cache_bypass;
 302        dev_dbg(map->dev, "Syncing %s cache\n",
 303                map->cache_ops->name);
 304        name = map->cache_ops->name;
 305        trace_regcache_sync(map->dev, name, "start");
 306
 307        if (!map->cache_dirty)
 308                goto out;
 309
 310        map->async = true;
 311
 312        /* Apply any patch first */
 313        map->cache_bypass = 1;
 314        for (i = 0; i < map->patch_regs; i++) {
 315                if (map->patch[i].reg % map->reg_stride) {
 316                        ret = -EINVAL;
 317                        goto out;
 318                }
 319                ret = _regmap_write(map, map->patch[i].reg, map->patch[i].def);
 320                if (ret != 0) {
 321                        dev_err(map->dev, "Failed to write %x = %x: %d\n",
 322                                map->patch[i].reg, map->patch[i].def, ret);
 323                        goto out;
 324                }
 325        }
 326        map->cache_bypass = 0;
 327
 328        if (map->cache_ops->sync)
 329                ret = map->cache_ops->sync(map, 0, map->max_register);
 330        else
 331                ret = regcache_default_sync(map, 0, map->max_register);
 332
 333        if (ret == 0)
 334                map->cache_dirty = false;
 335
 336out:
 337        /* Restore the bypass state */
 338        map->async = false;
 339        map->cache_bypass = bypass;
 340        map->unlock(map->lock_arg);
 341
 342        regmap_async_complete(map);
 343
 344        trace_regcache_sync(map->dev, name, "stop");
 345
 346        return ret;
 347}
 348EXPORT_SYMBOL_GPL(regcache_sync);
 349
 350/**
 351 * regcache_sync_region: Sync part  of the register cache with the hardware.
 352 *
 353 * @map: map to sync.
 354 * @min: first register to sync
 355 * @max: last register to sync
 356 *
 357 * Write all non-default register values in the specified region to
 358 * the hardware.
 359 *
 360 * Return a negative value on failure, 0 on success.
 361 */
 362int regcache_sync_region(struct regmap *map, unsigned int min,
 363                         unsigned int max)
 364{
 365        int ret = 0;
 366        const char *name;
 367        unsigned int bypass;
 368
 369        BUG_ON(!map->cache_ops);
 370
 371        map->lock(map->lock_arg);
 372
 373        /* Remember the initial bypass state */
 374        bypass = map->cache_bypass;
 375
 376        name = map->cache_ops->name;
 377        dev_dbg(map->dev, "Syncing %s cache from %d-%d\n", name, min, max);
 378
 379        trace_regcache_sync(map->dev, name, "start region");
 380
 381        if (!map->cache_dirty)
 382                goto out;
 383
 384        map->async = true;
 385
 386        if (map->cache_ops->sync)
 387                ret = map->cache_ops->sync(map, min, max);
 388        else
 389                ret = regcache_default_sync(map, min, max);
 390
 391out:
 392        /* Restore the bypass state */
 393        map->cache_bypass = bypass;
 394        map->async = false;
 395        map->unlock(map->lock_arg);
 396
 397        regmap_async_complete(map);
 398
 399        trace_regcache_sync(map->dev, name, "stop region");
 400
 401        return ret;
 402}
 403EXPORT_SYMBOL_GPL(regcache_sync_region);
 404
 405/**
 406 * regcache_drop_region: Discard part of the register cache
 407 *
 408 * @map: map to operate on
 409 * @min: first register to discard
 410 * @max: last register to discard
 411 *
 412 * Discard part of the register cache.
 413 *
 414 * Return a negative value on failure, 0 on success.
 415 */
 416int regcache_drop_region(struct regmap *map, unsigned int min,
 417                         unsigned int max)
 418{
 419        int ret = 0;
 420
 421        if (!map->cache_ops || !map->cache_ops->drop)
 422                return -EINVAL;
 423
 424        map->lock(map->lock_arg);
 425
 426        trace_regcache_drop_region(map->dev, min, max);
 427
 428        ret = map->cache_ops->drop(map, min, max);
 429
 430        map->unlock(map->lock_arg);
 431
 432        return ret;
 433}
 434EXPORT_SYMBOL_GPL(regcache_drop_region);
 435
 436/**
 437 * regcache_cache_only: Put a register map into cache only mode
 438 *
 439 * @map: map to configure
 440 * @cache_only: flag if changes should be written to the hardware
 441 *
 442 * When a register map is marked as cache only writes to the register
 443 * map API will only update the register cache, they will not cause
 444 * any hardware changes.  This is useful for allowing portions of
 445 * drivers to act as though the device were functioning as normal when
 446 * it is disabled for power saving reasons.
 447 */
 448void regcache_cache_only(struct regmap *map, bool enable)
 449{
 450        map->lock(map->lock_arg);
 451        WARN_ON(map->cache_bypass && enable);
 452        map->cache_only = enable;
 453        trace_regmap_cache_only(map->dev, enable);
 454        map->unlock(map->lock_arg);
 455}
 456EXPORT_SYMBOL_GPL(regcache_cache_only);
 457
 458/**
 459 * regcache_mark_dirty: Mark the register cache as dirty
 460 *
 461 * @map: map to mark
 462 *
 463 * Mark the register cache as dirty, for example due to the device
 464 * having been powered down for suspend.  If the cache is not marked
 465 * as dirty then the cache sync will be suppressed.
 466 */
 467void regcache_mark_dirty(struct regmap *map)
 468{
 469        map->lock(map->lock_arg);
 470        map->cache_dirty = true;
 471        map->unlock(map->lock_arg);
 472}
 473EXPORT_SYMBOL_GPL(regcache_mark_dirty);
 474
 475/**
 476 * regcache_cache_bypass: Put a register map into cache bypass mode
 477 *
 478 * @map: map to configure
 479 * @cache_bypass: flag if changes should not be written to the hardware
 480 *
 481 * When a register map is marked with the cache bypass option, writes
 482 * to the register map API will only update the hardware and not the
 483 * the cache directly.  This is useful when syncing the cache back to
 484 * the hardware.
 485 */
 486void regcache_cache_bypass(struct regmap *map, bool enable)
 487{
 488        map->lock(map->lock_arg);
 489        WARN_ON(map->cache_only && enable);
 490        map->cache_bypass = enable;
 491        trace_regmap_cache_bypass(map->dev, enable);
 492        map->unlock(map->lock_arg);
 493}
 494EXPORT_SYMBOL_GPL(regcache_cache_bypass);
 495
 496bool regcache_set_val(struct regmap *map, void *base, unsigned int idx,
 497                      unsigned int val)
 498{
 499        if (regcache_get_val(map, base, idx) == val)
 500                return true;
 501
 502        /* Use device native format if possible */
 503        if (map->format.format_val) {
 504                map->format.format_val(base + (map->cache_word_size * idx),
 505                                       val, 0);
 506                return false;
 507        }
 508
 509        switch (map->cache_word_size) {
 510        case 1: {
 511                u8 *cache = base;
 512                cache[idx] = val;
 513                break;
 514        }
 515        case 2: {
 516                u16 *cache = base;
 517                cache[idx] = val;
 518                break;
 519        }
 520        case 4: {
 521                u32 *cache = base;
 522                cache[idx] = val;
 523                break;
 524        }
 525        default:
 526                BUG();
 527        }
 528        return false;
 529}
 530
 531unsigned int regcache_get_val(struct regmap *map, const void *base,
 532                              unsigned int idx)
 533{
 534        if (!base)
 535                return -EINVAL;
 536
 537        /* Use device native format if possible */
 538        if (map->format.parse_val)
 539                return map->format.parse_val(regcache_get_val_addr(map, base,
 540                                                                   idx));
 541
 542        switch (map->cache_word_size) {
 543        case 1: {
 544                const u8 *cache = base;
 545                return cache[idx];
 546        }
 547        case 2: {
 548                const u16 *cache = base;
 549                return cache[idx];
 550        }
 551        case 4: {
 552                const u32 *cache = base;
 553                return cache[idx];
 554        }
 555        default:
 556                BUG();
 557        }
 558        /* unreachable */
 559        return -1;
 560}
 561
 562static int regcache_default_cmp(const void *a, const void *b)
 563{
 564        const struct reg_default *_a = a;
 565        const struct reg_default *_b = b;
 566
 567        return _a->reg - _b->reg;
 568}
 569
 570int regcache_lookup_reg(struct regmap *map, unsigned int reg)
 571{
 572        struct reg_default key;
 573        struct reg_default *r;
 574
 575        key.reg = reg;
 576        key.def = 0;
 577
 578        r = bsearch(&key, map->reg_defaults, map->num_reg_defaults,
 579                    sizeof(struct reg_default), regcache_default_cmp);
 580
 581        if (r)
 582                return r - map->reg_defaults;
 583        else
 584                return -ENOENT;
 585}
 586
 587static bool regcache_reg_present(unsigned long *cache_present, unsigned int idx)
 588{
 589        if (!cache_present)
 590                return true;
 591
 592        return test_bit(idx, cache_present);
 593}
 594
 595static int regcache_sync_block_single(struct regmap *map, void *block,
 596                                      unsigned long *cache_present,
 597                                      unsigned int block_base,
 598                                      unsigned int start, unsigned int end)
 599{
 600        unsigned int i, regtmp, val;
 601        int ret;
 602
 603        for (i = start; i < end; i++) {
 604                regtmp = block_base + (i * map->reg_stride);
 605
 606                if (!regcache_reg_present(cache_present, i))
 607                        continue;
 608
 609                val = regcache_get_val(map, block, i);
 610
 611                /* Is this the hardware default?  If so skip. */
 612                ret = regcache_lookup_reg(map, regtmp);
 613                if (ret >= 0 && val == map->reg_defaults[ret].def)
 614                        continue;
 615
 616                map->cache_bypass = 1;
 617
 618                ret = _regmap_write(map, regtmp, val);
 619
 620                map->cache_bypass = 0;
 621                if (ret != 0)
 622                        return ret;
 623                dev_dbg(map->dev, "Synced register %#x, value %#x\n",
 624                        regtmp, val);
 625        }
 626
 627        return 0;
 628}
 629
 630static int regcache_sync_block_raw_flush(struct regmap *map, const void **data,
 631                                         unsigned int base, unsigned int cur)
 632{
 633        size_t val_bytes = map->format.val_bytes;
 634        int ret, count;
 635
 636        if (*data == NULL)
 637                return 0;
 638
 639        count = cur - base;
 640
 641        dev_dbg(map->dev, "Writing %zu bytes for %d registers from 0x%x-0x%x\n",
 642                count * val_bytes, count, base, cur - 1);
 643
 644        map->cache_bypass = 1;
 645
 646        ret = _regmap_raw_write(map, base, *data, count * val_bytes);
 647
 648        map->cache_bypass = 0;
 649
 650        *data = NULL;
 651
 652        return ret;
 653}
 654
 655static int regcache_sync_block_raw(struct regmap *map, void *block,
 656                            unsigned long *cache_present,
 657                            unsigned int block_base, unsigned int start,
 658                            unsigned int end)
 659{
 660        unsigned int i, val;
 661        unsigned int regtmp = 0;
 662        unsigned int base = 0;
 663        const void *data = NULL;
 664        int ret;
 665
 666        for (i = start; i < end; i++) {
 667                regtmp = block_base + (i * map->reg_stride);
 668
 669                if (!regcache_reg_present(cache_present, i)) {
 670                        ret = regcache_sync_block_raw_flush(map, &data,
 671                                                            base, regtmp);
 672                        if (ret != 0)
 673                                return ret;
 674                        continue;
 675                }
 676
 677                val = regcache_get_val(map, block, i);
 678
 679                /* Is this the hardware default?  If so skip. */
 680                ret = regcache_lookup_reg(map, regtmp);
 681                if (ret >= 0 && val == map->reg_defaults[ret].def) {
 682                        ret = regcache_sync_block_raw_flush(map, &data,
 683                                                            base, regtmp);
 684                        if (ret != 0)
 685                                return ret;
 686                        continue;
 687                }
 688
 689                if (!data) {
 690                        data = regcache_get_val_addr(map, block, i);
 691                        base = regtmp;
 692                }
 693        }
 694
 695        return regcache_sync_block_raw_flush(map, &data, base, regtmp +
 696                        map->reg_stride);
 697}
 698
 699int regcache_sync_block(struct regmap *map, void *block,
 700                        unsigned long *cache_present,
 701                        unsigned int block_base, unsigned int start,
 702                        unsigned int end)
 703{
 704        if (regmap_can_raw_write(map))
 705                return regcache_sync_block_raw(map, block, cache_present,
 706                                               block_base, start, end);
 707        else
 708                return regcache_sync_block_single(map, block, cache_present,
 709                                                  block_base, start, end);
 710}
 711