linux/drivers/base/regmap/regmap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2//
   3// Register map access API
   4//
   5// Copyright 2011 Wolfson Microelectronics plc
   6//
   7// Author: Mark Brown <broonie@opensource.wolfsonmicro.com>
   8
   9#include <linux/device.h>
  10#include <linux/slab.h>
  11#include <linux/export.h>
  12#include <linux/mutex.h>
  13#include <linux/err.h>
  14#include <linux/property.h>
  15#include <linux/rbtree.h>
  16#include <linux/sched.h>
  17#include <linux/delay.h>
  18#include <linux/log2.h>
  19#include <linux/hwspinlock.h>
  20#include <asm/unaligned.h>
  21
  22#define CREATE_TRACE_POINTS
  23#include "trace.h"
  24
  25#include "internal.h"
  26
  27/*
  28 * Sometimes for failures during very early init the trace
  29 * infrastructure isn't available early enough to be used.  For this
  30 * sort of problem defining LOG_DEVICE will add printks for basic
  31 * register I/O on a specific device.
  32 */
  33#undef LOG_DEVICE
  34
  35#ifdef LOG_DEVICE
  36static inline bool regmap_should_log(struct regmap *map)
  37{
  38        return (map->dev && strcmp(dev_name(map->dev), LOG_DEVICE) == 0);
  39}
  40#else
  41static inline bool regmap_should_log(struct regmap *map) { return false; }
  42#endif
  43
  44
  45static int _regmap_update_bits(struct regmap *map, unsigned int reg,
  46                               unsigned int mask, unsigned int val,
  47                               bool *change, bool force_write);
  48
  49static int _regmap_bus_reg_read(void *context, unsigned int reg,
  50                                unsigned int *val);
  51static int _regmap_bus_read(void *context, unsigned int reg,
  52                            unsigned int *val);
  53static int _regmap_bus_formatted_write(void *context, unsigned int reg,
  54                                       unsigned int val);
  55static int _regmap_bus_reg_write(void *context, unsigned int reg,
  56                                 unsigned int val);
  57static int _regmap_bus_raw_write(void *context, unsigned int reg,
  58                                 unsigned int val);
  59
  60bool regmap_reg_in_ranges(unsigned int reg,
  61                          const struct regmap_range *ranges,
  62                          unsigned int nranges)
  63{
  64        const struct regmap_range *r;
  65        int i;
  66
  67        for (i = 0, r = ranges; i < nranges; i++, r++)
  68                if (regmap_reg_in_range(reg, r))
  69                        return true;
  70        return false;
  71}
  72EXPORT_SYMBOL_GPL(regmap_reg_in_ranges);
  73
  74bool regmap_check_range_table(struct regmap *map, unsigned int reg,
  75                              const struct regmap_access_table *table)
  76{
  77        /* Check "no ranges" first */
  78        if (regmap_reg_in_ranges(reg, table->no_ranges, table->n_no_ranges))
  79                return false;
  80
  81        /* In case zero "yes ranges" are supplied, any reg is OK */
  82        if (!table->n_yes_ranges)
  83                return true;
  84
  85        return regmap_reg_in_ranges(reg, table->yes_ranges,
  86                                    table->n_yes_ranges);
  87}
  88EXPORT_SYMBOL_GPL(regmap_check_range_table);
  89
  90bool regmap_writeable(struct regmap *map, unsigned int reg)
  91{
  92        if (map->max_register && reg > map->max_register)
  93                return false;
  94
  95        if (map->writeable_reg)
  96                return map->writeable_reg(map->dev, reg);
  97
  98        if (map->wr_table)
  99                return regmap_check_range_table(map, reg, map->wr_table);
 100
 101        return true;
 102}
 103
 104bool regmap_cached(struct regmap *map, unsigned int reg)
 105{
 106        int ret;
 107        unsigned int val;
 108
 109        if (map->cache_type == REGCACHE_NONE)
 110                return false;
 111
 112        if (!map->cache_ops)
 113                return false;
 114
 115        if (map->max_register && reg > map->max_register)
 116                return false;
 117
 118        map->lock(map->lock_arg);
 119        ret = regcache_read(map, reg, &val);
 120        map->unlock(map->lock_arg);
 121        if (ret)
 122                return false;
 123
 124        return true;
 125}
 126
 127bool regmap_readable(struct regmap *map, unsigned int reg)
 128{
 129        if (!map->reg_read)
 130                return false;
 131
 132        if (map->max_register && reg > map->max_register)
 133                return false;
 134
 135        if (map->format.format_write)
 136                return false;
 137
 138        if (map->readable_reg)
 139                return map->readable_reg(map->dev, reg);
 140
 141        if (map->rd_table)
 142                return regmap_check_range_table(map, reg, map->rd_table);
 143
 144        return true;
 145}
 146
 147bool regmap_volatile(struct regmap *map, unsigned int reg)
 148{
 149        if (!map->format.format_write && !regmap_readable(map, reg))
 150                return false;
 151
 152        if (map->volatile_reg)
 153                return map->volatile_reg(map->dev, reg);
 154
 155        if (map->volatile_table)
 156                return regmap_check_range_table(map, reg, map->volatile_table);
 157
 158        if (map->cache_ops)
 159                return false;
 160        else
 161                return true;
 162}
 163
 164bool regmap_precious(struct regmap *map, unsigned int reg)
 165{
 166        if (!regmap_readable(map, reg))
 167                return false;
 168
 169        if (map->precious_reg)
 170                return map->precious_reg(map->dev, reg);
 171
 172        if (map->precious_table)
 173                return regmap_check_range_table(map, reg, map->precious_table);
 174
 175        return false;
 176}
 177
 178bool regmap_writeable_noinc(struct regmap *map, unsigned int reg)
 179{
 180        if (map->writeable_noinc_reg)
 181                return map->writeable_noinc_reg(map->dev, reg);
 182
 183        if (map->wr_noinc_table)
 184                return regmap_check_range_table(map, reg, map->wr_noinc_table);
 185
 186        return true;
 187}
 188
 189bool regmap_readable_noinc(struct regmap *map, unsigned int reg)
 190{
 191        if (map->readable_noinc_reg)
 192                return map->readable_noinc_reg(map->dev, reg);
 193
 194        if (map->rd_noinc_table)
 195                return regmap_check_range_table(map, reg, map->rd_noinc_table);
 196
 197        return true;
 198}
 199
 200static bool regmap_volatile_range(struct regmap *map, unsigned int reg,
 201        size_t num)
 202{
 203        unsigned int i;
 204
 205        for (i = 0; i < num; i++)
 206                if (!regmap_volatile(map, reg + regmap_get_offset(map, i)))
 207                        return false;
 208
 209        return true;
 210}
 211
 212static void regmap_format_12_20_write(struct regmap *map,
 213                                     unsigned int reg, unsigned int val)
 214{
 215        u8 *out = map->work_buf;
 216
 217        out[0] = reg >> 4;
 218        out[1] = (reg << 4) | (val >> 16);
 219        out[2] = val >> 8;
 220        out[3] = val;
 221}
 222
 223
 224static void regmap_format_2_6_write(struct regmap *map,
 225                                     unsigned int reg, unsigned int val)
 226{
 227        u8 *out = map->work_buf;
 228
 229        *out = (reg << 6) | val;
 230}
 231
 232static void regmap_format_4_12_write(struct regmap *map,
 233                                     unsigned int reg, unsigned int val)
 234{
 235        __be16 *out = map->work_buf;
 236        *out = cpu_to_be16((reg << 12) | val);
 237}
 238
 239static void regmap_format_7_9_write(struct regmap *map,
 240                                    unsigned int reg, unsigned int val)
 241{
 242        __be16 *out = map->work_buf;
 243        *out = cpu_to_be16((reg << 9) | val);
 244}
 245
 246static void regmap_format_10_14_write(struct regmap *map,
 247                                    unsigned int reg, unsigned int val)
 248{
 249        u8 *out = map->work_buf;
 250
 251        out[2] = val;
 252        out[1] = (val >> 8) | (reg << 6);
 253        out[0] = reg >> 2;
 254}
 255
 256static void regmap_format_8(void *buf, unsigned int val, unsigned int shift)
 257{
 258        u8 *b = buf;
 259
 260        b[0] = val << shift;
 261}
 262
 263static void regmap_format_16_be(void *buf, unsigned int val, unsigned int shift)
 264{
 265        put_unaligned_be16(val << shift, buf);
 266}
 267
 268static void regmap_format_16_le(void *buf, unsigned int val, unsigned int shift)
 269{
 270        put_unaligned_le16(val << shift, buf);
 271}
 272
 273static void regmap_format_16_native(void *buf, unsigned int val,
 274                                    unsigned int shift)
 275{
 276        u16 v = val << shift;
 277
 278        memcpy(buf, &v, sizeof(v));
 279}
 280
 281static void regmap_format_24(void *buf, unsigned int val, unsigned int shift)
 282{
 283        u8 *b = buf;
 284
 285        val <<= shift;
 286
 287        b[0] = val >> 16;
 288        b[1] = val >> 8;
 289        b[2] = val;
 290}
 291
 292static void regmap_format_32_be(void *buf, unsigned int val, unsigned int shift)
 293{
 294        put_unaligned_be32(val << shift, buf);
 295}
 296
 297static void regmap_format_32_le(void *buf, unsigned int val, unsigned int shift)
 298{
 299        put_unaligned_le32(val << shift, buf);
 300}
 301
 302static void regmap_format_32_native(void *buf, unsigned int val,
 303                                    unsigned int shift)
 304{
 305        u32 v = val << shift;
 306
 307        memcpy(buf, &v, sizeof(v));
 308}
 309
 310#ifdef CONFIG_64BIT
 311static void regmap_format_64_be(void *buf, unsigned int val, unsigned int shift)
 312{
 313        put_unaligned_be64((u64) val << shift, buf);
 314}
 315
 316static void regmap_format_64_le(void *buf, unsigned int val, unsigned int shift)
 317{
 318        put_unaligned_le64((u64) val << shift, buf);
 319}
 320
 321static void regmap_format_64_native(void *buf, unsigned int val,
 322                                    unsigned int shift)
 323{
 324        u64 v = (u64) val << shift;
 325
 326        memcpy(buf, &v, sizeof(v));
 327}
 328#endif
 329
 330static void regmap_parse_inplace_noop(void *buf)
 331{
 332}
 333
 334static unsigned int regmap_parse_8(const void *buf)
 335{
 336        const u8 *b = buf;
 337
 338        return b[0];
 339}
 340
 341static unsigned int regmap_parse_16_be(const void *buf)
 342{
 343        return get_unaligned_be16(buf);
 344}
 345
 346static unsigned int regmap_parse_16_le(const void *buf)
 347{
 348        return get_unaligned_le16(buf);
 349}
 350
 351static void regmap_parse_16_be_inplace(void *buf)
 352{
 353        u16 v = get_unaligned_be16(buf);
 354
 355        memcpy(buf, &v, sizeof(v));
 356}
 357
 358static void regmap_parse_16_le_inplace(void *buf)
 359{
 360        u16 v = get_unaligned_le16(buf);
 361
 362        memcpy(buf, &v, sizeof(v));
 363}
 364
 365static unsigned int regmap_parse_16_native(const void *buf)
 366{
 367        u16 v;
 368
 369        memcpy(&v, buf, sizeof(v));
 370        return v;
 371}
 372
 373static unsigned int regmap_parse_24(const void *buf)
 374{
 375        const u8 *b = buf;
 376        unsigned int ret = b[2];
 377        ret |= ((unsigned int)b[1]) << 8;
 378        ret |= ((unsigned int)b[0]) << 16;
 379
 380        return ret;
 381}
 382
 383static unsigned int regmap_parse_32_be(const void *buf)
 384{
 385        return get_unaligned_be32(buf);
 386}
 387
 388static unsigned int regmap_parse_32_le(const void *buf)
 389{
 390        return get_unaligned_le32(buf);
 391}
 392
 393static void regmap_parse_32_be_inplace(void *buf)
 394{
 395        u32 v = get_unaligned_be32(buf);
 396
 397        memcpy(buf, &v, sizeof(v));
 398}
 399
 400static void regmap_parse_32_le_inplace(void *buf)
 401{
 402        u32 v = get_unaligned_le32(buf);
 403
 404        memcpy(buf, &v, sizeof(v));
 405}
 406
 407static unsigned int regmap_parse_32_native(const void *buf)
 408{
 409        u32 v;
 410
 411        memcpy(&v, buf, sizeof(v));
 412        return v;
 413}
 414
 415#ifdef CONFIG_64BIT
 416static unsigned int regmap_parse_64_be(const void *buf)
 417{
 418        return get_unaligned_be64(buf);
 419}
 420
 421static unsigned int regmap_parse_64_le(const void *buf)
 422{
 423        return get_unaligned_le64(buf);
 424}
 425
 426static void regmap_parse_64_be_inplace(void *buf)
 427{
 428        u64 v =  get_unaligned_be64(buf);
 429
 430        memcpy(buf, &v, sizeof(v));
 431}
 432
 433static void regmap_parse_64_le_inplace(void *buf)
 434{
 435        u64 v = get_unaligned_le64(buf);
 436
 437        memcpy(buf, &v, sizeof(v));
 438}
 439
 440static unsigned int regmap_parse_64_native(const void *buf)
 441{
 442        u64 v;
 443
 444        memcpy(&v, buf, sizeof(v));
 445        return v;
 446}
 447#endif
 448
 449static void regmap_lock_hwlock(void *__map)
 450{
 451        struct regmap *map = __map;
 452
 453        hwspin_lock_timeout(map->hwlock, UINT_MAX);
 454}
 455
 456static void regmap_lock_hwlock_irq(void *__map)
 457{
 458        struct regmap *map = __map;
 459
 460        hwspin_lock_timeout_irq(map->hwlock, UINT_MAX);
 461}
 462
 463static void regmap_lock_hwlock_irqsave(void *__map)
 464{
 465        struct regmap *map = __map;
 466
 467        hwspin_lock_timeout_irqsave(map->hwlock, UINT_MAX,
 468                                    &map->spinlock_flags);
 469}
 470
 471static void regmap_unlock_hwlock(void *__map)
 472{
 473        struct regmap *map = __map;
 474
 475        hwspin_unlock(map->hwlock);
 476}
 477
 478static void regmap_unlock_hwlock_irq(void *__map)
 479{
 480        struct regmap *map = __map;
 481
 482        hwspin_unlock_irq(map->hwlock);
 483}
 484
 485static void regmap_unlock_hwlock_irqrestore(void *__map)
 486{
 487        struct regmap *map = __map;
 488
 489        hwspin_unlock_irqrestore(map->hwlock, &map->spinlock_flags);
 490}
 491
 492static void regmap_lock_unlock_none(void *__map)
 493{
 494
 495}
 496
 497static void regmap_lock_mutex(void *__map)
 498{
 499        struct regmap *map = __map;
 500        mutex_lock(&map->mutex);
 501}
 502
 503static void regmap_unlock_mutex(void *__map)
 504{
 505        struct regmap *map = __map;
 506        mutex_unlock(&map->mutex);
 507}
 508
 509static void regmap_lock_spinlock(void *__map)
 510__acquires(&map->spinlock)
 511{
 512        struct regmap *map = __map;
 513        unsigned long flags;
 514
 515        spin_lock_irqsave(&map->spinlock, flags);
 516        map->spinlock_flags = flags;
 517}
 518
 519static void regmap_unlock_spinlock(void *__map)
 520__releases(&map->spinlock)
 521{
 522        struct regmap *map = __map;
 523        spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
 524}
 525
 526static void dev_get_regmap_release(struct device *dev, void *res)
 527{
 528        /*
 529         * We don't actually have anything to do here; the goal here
 530         * is not to manage the regmap but to provide a simple way to
 531         * get the regmap back given a struct device.
 532         */
 533}
 534
 535static bool _regmap_range_add(struct regmap *map,
 536                              struct regmap_range_node *data)
 537{
 538        struct rb_root *root = &map->range_tree;
 539        struct rb_node **new = &(root->rb_node), *parent = NULL;
 540
 541        while (*new) {
 542                struct regmap_range_node *this =
 543                        rb_entry(*new, struct regmap_range_node, node);
 544
 545                parent = *new;
 546                if (data->range_max < this->range_min)
 547                        new = &((*new)->rb_left);
 548                else if (data->range_min > this->range_max)
 549                        new = &((*new)->rb_right);
 550                else
 551                        return false;
 552        }
 553
 554        rb_link_node(&data->node, parent, new);
 555        rb_insert_color(&data->node, root);
 556
 557        return true;
 558}
 559
 560static struct regmap_range_node *_regmap_range_lookup(struct regmap *map,
 561                                                      unsigned int reg)
 562{
 563        struct rb_node *node = map->range_tree.rb_node;
 564
 565        while (node) {
 566                struct regmap_range_node *this =
 567                        rb_entry(node, struct regmap_range_node, node);
 568
 569                if (reg < this->range_min)
 570                        node = node->rb_left;
 571                else if (reg > this->range_max)
 572                        node = node->rb_right;
 573                else
 574                        return this;
 575        }
 576
 577        return NULL;
 578}
 579
 580static void regmap_range_exit(struct regmap *map)
 581{
 582        struct rb_node *next;
 583        struct regmap_range_node *range_node;
 584
 585        next = rb_first(&map->range_tree);
 586        while (next) {
 587                range_node = rb_entry(next, struct regmap_range_node, node);
 588                next = rb_next(&range_node->node);
 589                rb_erase(&range_node->node, &map->range_tree);
 590                kfree(range_node);
 591        }
 592
 593        kfree(map->selector_work_buf);
 594}
 595
 596static int regmap_set_name(struct regmap *map, const struct regmap_config *config)
 597{
 598        if (config->name) {
 599                const char *name = kstrdup_const(config->name, GFP_KERNEL);
 600
 601                if (!name)
 602                        return -ENOMEM;
 603
 604                kfree_const(map->name);
 605                map->name = name;
 606        }
 607
 608        return 0;
 609}
 610
 611int regmap_attach_dev(struct device *dev, struct regmap *map,
 612                      const struct regmap_config *config)
 613{
 614        struct regmap **m;
 615        int ret;
 616
 617        map->dev = dev;
 618
 619        ret = regmap_set_name(map, config);
 620        if (ret)
 621                return ret;
 622
 623        regmap_debugfs_init(map);
 624
 625        /* Add a devres resource for dev_get_regmap() */
 626        m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
 627        if (!m) {
 628                regmap_debugfs_exit(map);
 629                return -ENOMEM;
 630        }
 631        *m = map;
 632        devres_add(dev, m);
 633
 634        return 0;
 635}
 636EXPORT_SYMBOL_GPL(regmap_attach_dev);
 637
 638static enum regmap_endian regmap_get_reg_endian(const struct regmap_bus *bus,
 639                                        const struct regmap_config *config)
 640{
 641        enum regmap_endian endian;
 642
 643        /* Retrieve the endianness specification from the regmap config */
 644        endian = config->reg_format_endian;
 645
 646        /* If the regmap config specified a non-default value, use that */
 647        if (endian != REGMAP_ENDIAN_DEFAULT)
 648                return endian;
 649
 650        /* Retrieve the endianness specification from the bus config */
 651        if (bus && bus->reg_format_endian_default)
 652                endian = bus->reg_format_endian_default;
 653
 654        /* If the bus specified a non-default value, use that */
 655        if (endian != REGMAP_ENDIAN_DEFAULT)
 656                return endian;
 657
 658        /* Use this if no other value was found */
 659        return REGMAP_ENDIAN_BIG;
 660}
 661
 662enum regmap_endian regmap_get_val_endian(struct device *dev,
 663                                         const struct regmap_bus *bus,
 664                                         const struct regmap_config *config)
 665{
 666        struct fwnode_handle *fwnode = dev ? dev_fwnode(dev) : NULL;
 667        enum regmap_endian endian;
 668
 669        /* Retrieve the endianness specification from the regmap config */
 670        endian = config->val_format_endian;
 671
 672        /* If the regmap config specified a non-default value, use that */
 673        if (endian != REGMAP_ENDIAN_DEFAULT)
 674                return endian;
 675
 676        /* If the firmware node exist try to get endianness from it */
 677        if (fwnode_property_read_bool(fwnode, "big-endian"))
 678                endian = REGMAP_ENDIAN_BIG;
 679        else if (fwnode_property_read_bool(fwnode, "little-endian"))
 680                endian = REGMAP_ENDIAN_LITTLE;
 681        else if (fwnode_property_read_bool(fwnode, "native-endian"))
 682                endian = REGMAP_ENDIAN_NATIVE;
 683
 684        /* If the endianness was specified in fwnode, use that */
 685        if (endian != REGMAP_ENDIAN_DEFAULT)
 686                return endian;
 687
 688        /* Retrieve the endianness specification from the bus config */
 689        if (bus && bus->val_format_endian_default)
 690                endian = bus->val_format_endian_default;
 691
 692        /* If the bus specified a non-default value, use that */
 693        if (endian != REGMAP_ENDIAN_DEFAULT)
 694                return endian;
 695
 696        /* Use this if no other value was found */
 697        return REGMAP_ENDIAN_BIG;
 698}
 699EXPORT_SYMBOL_GPL(regmap_get_val_endian);
 700
 701struct regmap *__regmap_init(struct device *dev,
 702                             const struct regmap_bus *bus,
 703                             void *bus_context,
 704                             const struct regmap_config *config,
 705                             struct lock_class_key *lock_key,
 706                             const char *lock_name)
 707{
 708        struct regmap *map;
 709        int ret = -EINVAL;
 710        enum regmap_endian reg_endian, val_endian;
 711        int i, j;
 712
 713        if (!config)
 714                goto err;
 715
 716        map = kzalloc(sizeof(*map), GFP_KERNEL);
 717        if (map == NULL) {
 718                ret = -ENOMEM;
 719                goto err;
 720        }
 721
 722        ret = regmap_set_name(map, config);
 723        if (ret)
 724                goto err_map;
 725
 726        ret = -EINVAL; /* Later error paths rely on this */
 727
 728        if (config->disable_locking) {
 729                map->lock = map->unlock = regmap_lock_unlock_none;
 730                map->can_sleep = config->can_sleep;
 731                regmap_debugfs_disable(map);
 732        } else if (config->lock && config->unlock) {
 733                map->lock = config->lock;
 734                map->unlock = config->unlock;
 735                map->lock_arg = config->lock_arg;
 736                map->can_sleep = config->can_sleep;
 737        } else if (config->use_hwlock) {
 738                map->hwlock = hwspin_lock_request_specific(config->hwlock_id);
 739                if (!map->hwlock) {
 740                        ret = -ENXIO;
 741                        goto err_name;
 742                }
 743
 744                switch (config->hwlock_mode) {
 745                case HWLOCK_IRQSTATE:
 746                        map->lock = regmap_lock_hwlock_irqsave;
 747                        map->unlock = regmap_unlock_hwlock_irqrestore;
 748                        break;
 749                case HWLOCK_IRQ:
 750                        map->lock = regmap_lock_hwlock_irq;
 751                        map->unlock = regmap_unlock_hwlock_irq;
 752                        break;
 753                default:
 754                        map->lock = regmap_lock_hwlock;
 755                        map->unlock = regmap_unlock_hwlock;
 756                        break;
 757                }
 758
 759                map->lock_arg = map;
 760        } else {
 761                if ((bus && bus->fast_io) ||
 762                    config->fast_io) {
 763                        spin_lock_init(&map->spinlock);
 764                        map->lock = regmap_lock_spinlock;
 765                        map->unlock = regmap_unlock_spinlock;
 766                        lockdep_set_class_and_name(&map->spinlock,
 767                                                   lock_key, lock_name);
 768                } else {
 769                        mutex_init(&map->mutex);
 770                        map->lock = regmap_lock_mutex;
 771                        map->unlock = regmap_unlock_mutex;
 772                        map->can_sleep = true;
 773                        lockdep_set_class_and_name(&map->mutex,
 774                                                   lock_key, lock_name);
 775                }
 776                map->lock_arg = map;
 777        }
 778
 779        /*
 780         * When we write in fast-paths with regmap_bulk_write() don't allocate
 781         * scratch buffers with sleeping allocations.
 782         */
 783        if ((bus && bus->fast_io) || config->fast_io)
 784                map->alloc_flags = GFP_ATOMIC;
 785        else
 786                map->alloc_flags = GFP_KERNEL;
 787
 788        map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
 789        map->format.pad_bytes = config->pad_bits / 8;
 790        map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
 791        map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
 792                        config->val_bits + config->pad_bits, 8);
 793        map->reg_shift = config->pad_bits % 8;
 794        if (config->reg_stride)
 795                map->reg_stride = config->reg_stride;
 796        else
 797                map->reg_stride = 1;
 798        if (is_power_of_2(map->reg_stride))
 799                map->reg_stride_order = ilog2(map->reg_stride);
 800        else
 801                map->reg_stride_order = -1;
 802        map->use_single_read = config->use_single_read || !bus || !bus->read;
 803        map->use_single_write = config->use_single_write || !bus || !bus->write;
 804        map->can_multi_write = config->can_multi_write && bus && bus->write;
 805        if (bus) {
 806                map->max_raw_read = bus->max_raw_read;
 807                map->max_raw_write = bus->max_raw_write;
 808        }
 809        map->dev = dev;
 810        map->bus = bus;
 811        map->bus_context = bus_context;
 812        map->max_register = config->max_register;
 813        map->wr_table = config->wr_table;
 814        map->rd_table = config->rd_table;
 815        map->volatile_table = config->volatile_table;
 816        map->precious_table = config->precious_table;
 817        map->wr_noinc_table = config->wr_noinc_table;
 818        map->rd_noinc_table = config->rd_noinc_table;
 819        map->writeable_reg = config->writeable_reg;
 820        map->readable_reg = config->readable_reg;
 821        map->volatile_reg = config->volatile_reg;
 822        map->precious_reg = config->precious_reg;
 823        map->writeable_noinc_reg = config->writeable_noinc_reg;
 824        map->readable_noinc_reg = config->readable_noinc_reg;
 825        map->cache_type = config->cache_type;
 826
 827        spin_lock_init(&map->async_lock);
 828        INIT_LIST_HEAD(&map->async_list);
 829        INIT_LIST_HEAD(&map->async_free);
 830        init_waitqueue_head(&map->async_waitq);
 831
 832        if (config->read_flag_mask ||
 833            config->write_flag_mask ||
 834            config->zero_flag_mask) {
 835                map->read_flag_mask = config->read_flag_mask;
 836                map->write_flag_mask = config->write_flag_mask;
 837        } else if (bus) {
 838                map->read_flag_mask = bus->read_flag_mask;
 839        }
 840
 841        if (!bus) {
 842                map->reg_read  = config->reg_read;
 843                map->reg_write = config->reg_write;
 844
 845                map->defer_caching = false;
 846                goto skip_format_initialization;
 847        } else if (!bus->read || !bus->write) {
 848                map->reg_read = _regmap_bus_reg_read;
 849                map->reg_write = _regmap_bus_reg_write;
 850                map->reg_update_bits = bus->reg_update_bits;
 851
 852                map->defer_caching = false;
 853                goto skip_format_initialization;
 854        } else {
 855                map->reg_read  = _regmap_bus_read;
 856                map->reg_update_bits = bus->reg_update_bits;
 857        }
 858
 859        reg_endian = regmap_get_reg_endian(bus, config);
 860        val_endian = regmap_get_val_endian(dev, bus, config);
 861
 862        switch (config->reg_bits + map->reg_shift) {
 863        case 2:
 864                switch (config->val_bits) {
 865                case 6:
 866                        map->format.format_write = regmap_format_2_6_write;
 867                        break;
 868                default:
 869                        goto err_hwlock;
 870                }
 871                break;
 872
 873        case 4:
 874                switch (config->val_bits) {
 875                case 12:
 876                        map->format.format_write = regmap_format_4_12_write;
 877                        break;
 878                default:
 879                        goto err_hwlock;
 880                }
 881                break;
 882
 883        case 7:
 884                switch (config->val_bits) {
 885                case 9:
 886                        map->format.format_write = regmap_format_7_9_write;
 887                        break;
 888                default:
 889                        goto err_hwlock;
 890                }
 891                break;
 892
 893        case 10:
 894                switch (config->val_bits) {
 895                case 14:
 896                        map->format.format_write = regmap_format_10_14_write;
 897                        break;
 898                default:
 899                        goto err_hwlock;
 900                }
 901                break;
 902
 903        case 12:
 904                switch (config->val_bits) {
 905                case 20:
 906                        map->format.format_write = regmap_format_12_20_write;
 907                        break;
 908                default:
 909                        goto err_hwlock;
 910                }
 911                break;
 912
 913        case 8:
 914                map->format.format_reg = regmap_format_8;
 915                break;
 916
 917        case 16:
 918                switch (reg_endian) {
 919                case REGMAP_ENDIAN_BIG:
 920                        map->format.format_reg = regmap_format_16_be;
 921                        break;
 922                case REGMAP_ENDIAN_LITTLE:
 923                        map->format.format_reg = regmap_format_16_le;
 924                        break;
 925                case REGMAP_ENDIAN_NATIVE:
 926                        map->format.format_reg = regmap_format_16_native;
 927                        break;
 928                default:
 929                        goto err_hwlock;
 930                }
 931                break;
 932
 933        case 24:
 934                if (reg_endian != REGMAP_ENDIAN_BIG)
 935                        goto err_hwlock;
 936                map->format.format_reg = regmap_format_24;
 937                break;
 938
 939        case 32:
 940                switch (reg_endian) {
 941                case REGMAP_ENDIAN_BIG:
 942                        map->format.format_reg = regmap_format_32_be;
 943                        break;
 944                case REGMAP_ENDIAN_LITTLE:
 945                        map->format.format_reg = regmap_format_32_le;
 946                        break;
 947                case REGMAP_ENDIAN_NATIVE:
 948                        map->format.format_reg = regmap_format_32_native;
 949                        break;
 950                default:
 951                        goto err_hwlock;
 952                }
 953                break;
 954
 955#ifdef CONFIG_64BIT
 956        case 64:
 957                switch (reg_endian) {
 958                case REGMAP_ENDIAN_BIG:
 959                        map->format.format_reg = regmap_format_64_be;
 960                        break;
 961                case REGMAP_ENDIAN_LITTLE:
 962                        map->format.format_reg = regmap_format_64_le;
 963                        break;
 964                case REGMAP_ENDIAN_NATIVE:
 965                        map->format.format_reg = regmap_format_64_native;
 966                        break;
 967                default:
 968                        goto err_hwlock;
 969                }
 970                break;
 971#endif
 972
 973        default:
 974                goto err_hwlock;
 975        }
 976
 977        if (val_endian == REGMAP_ENDIAN_NATIVE)
 978                map->format.parse_inplace = regmap_parse_inplace_noop;
 979
 980        switch (config->val_bits) {
 981        case 8:
 982                map->format.format_val = regmap_format_8;
 983                map->format.parse_val = regmap_parse_8;
 984                map->format.parse_inplace = regmap_parse_inplace_noop;
 985                break;
 986        case 16:
 987                switch (val_endian) {
 988                case REGMAP_ENDIAN_BIG:
 989                        map->format.format_val = regmap_format_16_be;
 990                        map->format.parse_val = regmap_parse_16_be;
 991                        map->format.parse_inplace = regmap_parse_16_be_inplace;
 992                        break;
 993                case REGMAP_ENDIAN_LITTLE:
 994                        map->format.format_val = regmap_format_16_le;
 995                        map->format.parse_val = regmap_parse_16_le;
 996                        map->format.parse_inplace = regmap_parse_16_le_inplace;
 997                        break;
 998                case REGMAP_ENDIAN_NATIVE:
 999                        map->format.format_val = regmap_format_16_native;
1000                        map->format.parse_val = regmap_parse_16_native;
1001                        break;
1002                default:
1003                        goto err_hwlock;
1004                }
1005                break;
1006        case 24:
1007                if (val_endian != REGMAP_ENDIAN_BIG)
1008                        goto err_hwlock;
1009                map->format.format_val = regmap_format_24;
1010                map->format.parse_val = regmap_parse_24;
1011                break;
1012        case 32:
1013                switch (val_endian) {
1014                case REGMAP_ENDIAN_BIG:
1015                        map->format.format_val = regmap_format_32_be;
1016                        map->format.parse_val = regmap_parse_32_be;
1017                        map->format.parse_inplace = regmap_parse_32_be_inplace;
1018                        break;
1019                case REGMAP_ENDIAN_LITTLE:
1020                        map->format.format_val = regmap_format_32_le;
1021                        map->format.parse_val = regmap_parse_32_le;
1022                        map->format.parse_inplace = regmap_parse_32_le_inplace;
1023                        break;
1024                case REGMAP_ENDIAN_NATIVE:
1025                        map->format.format_val = regmap_format_32_native;
1026                        map->format.parse_val = regmap_parse_32_native;
1027                        break;
1028                default:
1029                        goto err_hwlock;
1030                }
1031                break;
1032#ifdef CONFIG_64BIT
1033        case 64:
1034                switch (val_endian) {
1035                case REGMAP_ENDIAN_BIG:
1036                        map->format.format_val = regmap_format_64_be;
1037                        map->format.parse_val = regmap_parse_64_be;
1038                        map->format.parse_inplace = regmap_parse_64_be_inplace;
1039                        break;
1040                case REGMAP_ENDIAN_LITTLE:
1041                        map->format.format_val = regmap_format_64_le;
1042                        map->format.parse_val = regmap_parse_64_le;
1043                        map->format.parse_inplace = regmap_parse_64_le_inplace;
1044                        break;
1045                case REGMAP_ENDIAN_NATIVE:
1046                        map->format.format_val = regmap_format_64_native;
1047                        map->format.parse_val = regmap_parse_64_native;
1048                        break;
1049                default:
1050                        goto err_hwlock;
1051                }
1052                break;
1053#endif
1054        }
1055
1056        if (map->format.format_write) {
1057                if ((reg_endian != REGMAP_ENDIAN_BIG) ||
1058                    (val_endian != REGMAP_ENDIAN_BIG))
1059                        goto err_hwlock;
1060                map->use_single_write = true;
1061        }
1062
1063        if (!map->format.format_write &&
1064            !(map->format.format_reg && map->format.format_val))
1065                goto err_hwlock;
1066
1067        map->work_buf = kzalloc(map->format.buf_size, GFP_KERNEL);
1068        if (map->work_buf == NULL) {
1069                ret = -ENOMEM;
1070                goto err_hwlock;
1071        }
1072
1073        if (map->format.format_write) {
1074                map->defer_caching = false;
1075                map->reg_write = _regmap_bus_formatted_write;
1076        } else if (map->format.format_val) {
1077                map->defer_caching = true;
1078                map->reg_write = _regmap_bus_raw_write;
1079        }
1080
1081skip_format_initialization:
1082
1083        map->range_tree = RB_ROOT;
1084        for (i = 0; i < config->num_ranges; i++) {
1085                const struct regmap_range_cfg *range_cfg = &config->ranges[i];
1086                struct regmap_range_node *new;
1087
1088                /* Sanity check */
1089                if (range_cfg->range_max < range_cfg->range_min) {
1090                        dev_err(map->dev, "Invalid range %d: %d < %d\n", i,
1091                                range_cfg->range_max, range_cfg->range_min);
1092                        goto err_range;
1093                }
1094
1095                if (range_cfg->range_max > map->max_register) {
1096                        dev_err(map->dev, "Invalid range %d: %d > %d\n", i,
1097                                range_cfg->range_max, map->max_register);
1098                        goto err_range;
1099                }
1100
1101                if (range_cfg->selector_reg > map->max_register) {
1102                        dev_err(map->dev,
1103                                "Invalid range %d: selector out of map\n", i);
1104                        goto err_range;
1105                }
1106
1107                if (range_cfg->window_len == 0) {
1108                        dev_err(map->dev, "Invalid range %d: window_len 0\n",
1109                                i);
1110                        goto err_range;
1111                }
1112
1113                /* Make sure, that this register range has no selector
1114                   or data window within its boundary */
1115                for (j = 0; j < config->num_ranges; j++) {
1116                        unsigned sel_reg = config->ranges[j].selector_reg;
1117                        unsigned win_min = config->ranges[j].window_start;
1118                        unsigned win_max = win_min +
1119                                           config->ranges[j].window_len - 1;
1120
1121                        /* Allow data window inside its own virtual range */
1122                        if (j == i)
1123                                continue;
1124
1125                        if (range_cfg->range_min <= sel_reg &&
1126                            sel_reg <= range_cfg->range_max) {
1127                                dev_err(map->dev,
1128                                        "Range %d: selector for %d in window\n",
1129                                        i, j);
1130                                goto err_range;
1131                        }
1132
1133                        if (!(win_max < range_cfg->range_min ||
1134                              win_min > range_cfg->range_max)) {
1135                                dev_err(map->dev,
1136                                        "Range %d: window for %d in window\n",
1137                                        i, j);
1138                                goto err_range;
1139                        }
1140                }
1141
1142                new = kzalloc(sizeof(*new), GFP_KERNEL);
1143                if (new == NULL) {
1144                        ret = -ENOMEM;
1145                        goto err_range;
1146                }
1147
1148                new->map = map;
1149                new->name = range_cfg->name;
1150                new->range_min = range_cfg->range_min;
1151                new->range_max = range_cfg->range_max;
1152                new->selector_reg = range_cfg->selector_reg;
1153                new->selector_mask = range_cfg->selector_mask;
1154                new->selector_shift = range_cfg->selector_shift;
1155                new->window_start = range_cfg->window_start;
1156                new->window_len = range_cfg->window_len;
1157
1158                if (!_regmap_range_add(map, new)) {
1159                        dev_err(map->dev, "Failed to add range %d\n", i);
1160                        kfree(new);
1161                        goto err_range;
1162                }
1163
1164                if (map->selector_work_buf == NULL) {
1165                        map->selector_work_buf =
1166                                kzalloc(map->format.buf_size, GFP_KERNEL);
1167                        if (map->selector_work_buf == NULL) {
1168                                ret = -ENOMEM;
1169                                goto err_range;
1170                        }
1171                }
1172        }
1173
1174        ret = regcache_init(map, config);
1175        if (ret != 0)
1176                goto err_range;
1177
1178        if (dev) {
1179                ret = regmap_attach_dev(dev, map, config);
1180                if (ret != 0)
1181                        goto err_regcache;
1182        } else {
1183                regmap_debugfs_init(map);
1184        }
1185
1186        return map;
1187
1188err_regcache:
1189        regcache_exit(map);
1190err_range:
1191        regmap_range_exit(map);
1192        kfree(map->work_buf);
1193err_hwlock:
1194        if (map->hwlock)
1195                hwspin_lock_free(map->hwlock);
1196err_name:
1197        kfree_const(map->name);
1198err_map:
1199        kfree(map);
1200err:
1201        return ERR_PTR(ret);
1202}
1203EXPORT_SYMBOL_GPL(__regmap_init);
1204
1205static void devm_regmap_release(struct device *dev, void *res)
1206{
1207        regmap_exit(*(struct regmap **)res);
1208}
1209
1210struct regmap *__devm_regmap_init(struct device *dev,
1211                                  const struct regmap_bus *bus,
1212                                  void *bus_context,
1213                                  const struct regmap_config *config,
1214                                  struct lock_class_key *lock_key,
1215                                  const char *lock_name)
1216{
1217        struct regmap **ptr, *regmap;
1218
1219        ptr = devres_alloc(devm_regmap_release, sizeof(*ptr), GFP_KERNEL);
1220        if (!ptr)
1221                return ERR_PTR(-ENOMEM);
1222
1223        regmap = __regmap_init(dev, bus, bus_context, config,
1224                               lock_key, lock_name);
1225        if (!IS_ERR(regmap)) {
1226                *ptr = regmap;
1227                devres_add(dev, ptr);
1228        } else {
1229                devres_free(ptr);
1230        }
1231
1232        return regmap;
1233}
1234EXPORT_SYMBOL_GPL(__devm_regmap_init);
1235
1236static void regmap_field_init(struct regmap_field *rm_field,
1237        struct regmap *regmap, struct reg_field reg_field)
1238{
1239        rm_field->regmap = regmap;
1240        rm_field->reg = reg_field.reg;
1241        rm_field->shift = reg_field.lsb;
1242        rm_field->mask = GENMASK(reg_field.msb, reg_field.lsb);
1243        rm_field->id_size = reg_field.id_size;
1244        rm_field->id_offset = reg_field.id_offset;
1245}
1246
1247/**
1248 * devm_regmap_field_alloc() - Allocate and initialise a register field.
1249 *
1250 * @dev: Device that will be interacted with
1251 * @regmap: regmap bank in which this register field is located.
1252 * @reg_field: Register field with in the bank.
1253 *
1254 * The return value will be an ERR_PTR() on error or a valid pointer
1255 * to a struct regmap_field. The regmap_field will be automatically freed
1256 * by the device management code.
1257 */
1258struct regmap_field *devm_regmap_field_alloc(struct device *dev,
1259                struct regmap *regmap, struct reg_field reg_field)
1260{
1261        struct regmap_field *rm_field = devm_kzalloc(dev,
1262                                        sizeof(*rm_field), GFP_KERNEL);
1263        if (!rm_field)
1264                return ERR_PTR(-ENOMEM);
1265
1266        regmap_field_init(rm_field, regmap, reg_field);
1267
1268        return rm_field;
1269
1270}
1271EXPORT_SYMBOL_GPL(devm_regmap_field_alloc);
1272
1273
1274/**
1275 * regmap_field_bulk_alloc() - Allocate and initialise a bulk register field.
1276 *
1277 * @regmap: regmap bank in which this register field is located.
1278 * @rm_field: regmap register fields within the bank.
1279 * @reg_field: Register fields within the bank.
1280 * @num_fields: Number of register fields.
1281 *
1282 * The return value will be an -ENOMEM on error or zero for success.
1283 * Newly allocated regmap_fields should be freed by calling
1284 * regmap_field_bulk_free()
1285 */
1286int regmap_field_bulk_alloc(struct regmap *regmap,
1287                            struct regmap_field **rm_field,
1288                            struct reg_field *reg_field,
1289                            int num_fields)
1290{
1291        struct regmap_field *rf;
1292        int i;
1293
1294        rf = kcalloc(num_fields, sizeof(*rf), GFP_KERNEL);
1295        if (!rf)
1296                return -ENOMEM;
1297
1298        for (i = 0; i < num_fields; i++) {
1299                regmap_field_init(&rf[i], regmap, reg_field[i]);
1300                rm_field[i] = &rf[i];
1301        }
1302
1303        return 0;
1304}
1305EXPORT_SYMBOL_GPL(regmap_field_bulk_alloc);
1306
1307/**
1308 * devm_regmap_field_bulk_alloc() - Allocate and initialise a bulk register
1309 * fields.
1310 *
1311 * @dev: Device that will be interacted with
1312 * @regmap: regmap bank in which this register field is located.
1313 * @rm_field: regmap register fields within the bank.
1314 * @reg_field: Register fields within the bank.
1315 * @num_fields: Number of register fields.
1316 *
1317 * The return value will be an -ENOMEM on error or zero for success.
1318 * Newly allocated regmap_fields will be automatically freed by the
1319 * device management code.
1320 */
1321int devm_regmap_field_bulk_alloc(struct device *dev,
1322                                 struct regmap *regmap,
1323                                 struct regmap_field **rm_field,
1324                                 struct reg_field *reg_field,
1325                                 int num_fields)
1326{
1327        struct regmap_field *rf;
1328        int i;
1329
1330        rf = devm_kcalloc(dev, num_fields, sizeof(*rf), GFP_KERNEL);
1331        if (!rf)
1332                return -ENOMEM;
1333
1334        for (i = 0; i < num_fields; i++) {
1335                regmap_field_init(&rf[i], regmap, reg_field[i]);
1336                rm_field[i] = &rf[i];
1337        }
1338
1339        return 0;
1340}
1341EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_alloc);
1342
1343/**
1344 * regmap_field_bulk_free() - Free register field allocated using
1345 *                       regmap_field_bulk_alloc.
1346 *
1347 * @field: regmap fields which should be freed.
1348 */
1349void regmap_field_bulk_free(struct regmap_field *field)
1350{
1351        kfree(field);
1352}
1353EXPORT_SYMBOL_GPL(regmap_field_bulk_free);
1354
1355/**
1356 * devm_regmap_field_bulk_free() - Free a bulk register field allocated using
1357 *                            devm_regmap_field_bulk_alloc.
1358 *
1359 * @dev: Device that will be interacted with
1360 * @field: regmap field which should be freed.
1361 *
1362 * Free register field allocated using devm_regmap_field_bulk_alloc(). Usually
1363 * drivers need not call this function, as the memory allocated via devm
1364 * will be freed as per device-driver life-cycle.
1365 */
1366void devm_regmap_field_bulk_free(struct device *dev,
1367                                 struct regmap_field *field)
1368{
1369        devm_kfree(dev, field);
1370}
1371EXPORT_SYMBOL_GPL(devm_regmap_field_bulk_free);
1372
1373/**
1374 * devm_regmap_field_free() - Free a register field allocated using
1375 *                            devm_regmap_field_alloc.
1376 *
1377 * @dev: Device that will be interacted with
1378 * @field: regmap field which should be freed.
1379 *
1380 * Free register field allocated using devm_regmap_field_alloc(). Usually
1381 * drivers need not call this function, as the memory allocated via devm
1382 * will be freed as per device-driver life-cyle.
1383 */
1384void devm_regmap_field_free(struct device *dev,
1385        struct regmap_field *field)
1386{
1387        devm_kfree(dev, field);
1388}
1389EXPORT_SYMBOL_GPL(devm_regmap_field_free);
1390
1391/**
1392 * regmap_field_alloc() - Allocate and initialise a register field.
1393 *
1394 * @regmap: regmap bank in which this register field is located.
1395 * @reg_field: Register field with in the bank.
1396 *
1397 * The return value will be an ERR_PTR() on error or a valid pointer
1398 * to a struct regmap_field. The regmap_field should be freed by the
1399 * user once its finished working with it using regmap_field_free().
1400 */
1401struct regmap_field *regmap_field_alloc(struct regmap *regmap,
1402                struct reg_field reg_field)
1403{
1404        struct regmap_field *rm_field = kzalloc(sizeof(*rm_field), GFP_KERNEL);
1405
1406        if (!rm_field)
1407                return ERR_PTR(-ENOMEM);
1408
1409        regmap_field_init(rm_field, regmap, reg_field);
1410
1411        return rm_field;
1412}
1413EXPORT_SYMBOL_GPL(regmap_field_alloc);
1414
1415/**
1416 * regmap_field_free() - Free register field allocated using
1417 *                       regmap_field_alloc.
1418 *
1419 * @field: regmap field which should be freed.
1420 */
1421void regmap_field_free(struct regmap_field *field)
1422{
1423        kfree(field);
1424}
1425EXPORT_SYMBOL_GPL(regmap_field_free);
1426
1427/**
1428 * regmap_reinit_cache() - Reinitialise the current register cache
1429 *
1430 * @map: Register map to operate on.
1431 * @config: New configuration.  Only the cache data will be used.
1432 *
1433 * Discard any existing register cache for the map and initialize a
1434 * new cache.  This can be used to restore the cache to defaults or to
1435 * update the cache configuration to reflect runtime discovery of the
1436 * hardware.
1437 *
1438 * No explicit locking is done here, the user needs to ensure that
1439 * this function will not race with other calls to regmap.
1440 */
1441int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
1442{
1443        int ret;
1444
1445        regcache_exit(map);
1446        regmap_debugfs_exit(map);
1447
1448        map->max_register = config->max_register;
1449        map->writeable_reg = config->writeable_reg;
1450        map->readable_reg = config->readable_reg;
1451        map->volatile_reg = config->volatile_reg;
1452        map->precious_reg = config->precious_reg;
1453        map->writeable_noinc_reg = config->writeable_noinc_reg;
1454        map->readable_noinc_reg = config->readable_noinc_reg;
1455        map->cache_type = config->cache_type;
1456
1457        ret = regmap_set_name(map, config);
1458        if (ret)
1459                return ret;
1460
1461        regmap_debugfs_init(map);
1462
1463        map->cache_bypass = false;
1464        map->cache_only = false;
1465
1466        return regcache_init(map, config);
1467}
1468EXPORT_SYMBOL_GPL(regmap_reinit_cache);
1469
1470/**
1471 * regmap_exit() - Free a previously allocated register map
1472 *
1473 * @map: Register map to operate on.
1474 */
1475void regmap_exit(struct regmap *map)
1476{
1477        struct regmap_async *async;
1478
1479        regcache_exit(map);
1480        regmap_debugfs_exit(map);
1481        regmap_range_exit(map);
1482        if (map->bus && map->bus->free_context)
1483                map->bus->free_context(map->bus_context);
1484        kfree(map->work_buf);
1485        while (!list_empty(&map->async_free)) {
1486                async = list_first_entry_or_null(&map->async_free,
1487                                                 struct regmap_async,
1488                                                 list);
1489                list_del(&async->list);
1490                kfree(async->work_buf);
1491                kfree(async);
1492        }
1493        if (map->hwlock)
1494                hwspin_lock_free(map->hwlock);
1495        if (map->lock == regmap_lock_mutex)
1496                mutex_destroy(&map->mutex);
1497        kfree_const(map->name);
1498        kfree(map->patch);
1499        kfree(map);
1500}
1501EXPORT_SYMBOL_GPL(regmap_exit);
1502
1503static int dev_get_regmap_match(struct device *dev, void *res, void *data)
1504{
1505        struct regmap **r = res;
1506        if (!r || !*r) {
1507                WARN_ON(!r || !*r);
1508                return 0;
1509        }
1510
1511        /* If the user didn't specify a name match any */
1512        if (data)
1513                return !strcmp((*r)->name, data);
1514        else
1515                return 1;
1516}
1517
1518/**
1519 * dev_get_regmap() - Obtain the regmap (if any) for a device
1520 *
1521 * @dev: Device to retrieve the map for
1522 * @name: Optional name for the register map, usually NULL.
1523 *
1524 * Returns the regmap for the device if one is present, or NULL.  If
1525 * name is specified then it must match the name specified when
1526 * registering the device, if it is NULL then the first regmap found
1527 * will be used.  Devices with multiple register maps are very rare,
1528 * generic code should normally not need to specify a name.
1529 */
1530struct regmap *dev_get_regmap(struct device *dev, const char *name)
1531{
1532        struct regmap **r = devres_find(dev, dev_get_regmap_release,
1533                                        dev_get_regmap_match, (void *)name);
1534
1535        if (!r)
1536                return NULL;
1537        return *r;
1538}
1539EXPORT_SYMBOL_GPL(dev_get_regmap);
1540
1541/**
1542 * regmap_get_device() - Obtain the device from a regmap
1543 *
1544 * @map: Register map to operate on.
1545 *
1546 * Returns the underlying device that the regmap has been created for.
1547 */
1548struct device *regmap_get_device(struct regmap *map)
1549{
1550        return map->dev;
1551}
1552EXPORT_SYMBOL_GPL(regmap_get_device);
1553
1554static int _regmap_select_page(struct regmap *map, unsigned int *reg,
1555                               struct regmap_range_node *range,
1556                               unsigned int val_num)
1557{
1558        void *orig_work_buf;
1559        unsigned int win_offset;
1560        unsigned int win_page;
1561        bool page_chg;
1562        int ret;
1563
1564        win_offset = (*reg - range->range_min) % range->window_len;
1565        win_page = (*reg - range->range_min) / range->window_len;
1566
1567        if (val_num > 1) {
1568                /* Bulk write shouldn't cross range boundary */
1569                if (*reg + val_num - 1 > range->range_max)
1570                        return -EINVAL;
1571
1572                /* ... or single page boundary */
1573                if (val_num > range->window_len - win_offset)
1574                        return -EINVAL;
1575        }
1576
1577        /* It is possible to have selector register inside data window.
1578           In that case, selector register is located on every page and
1579           it needs no page switching, when accessed alone. */
1580        if (val_num > 1 ||
1581            range->window_start + win_offset != range->selector_reg) {
1582                /* Use separate work_buf during page switching */
1583                orig_work_buf = map->work_buf;
1584                map->work_buf = map->selector_work_buf;
1585
1586                ret = _regmap_update_bits(map, range->selector_reg,
1587                                          range->selector_mask,
1588                                          win_page << range->selector_shift,
1589                                          &page_chg, false);
1590
1591                map->work_buf = orig_work_buf;
1592
1593                if (ret != 0)
1594                        return ret;
1595        }
1596
1597        *reg = range->window_start + win_offset;
1598
1599        return 0;
1600}
1601
1602static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes,
1603                                          unsigned long mask)
1604{
1605        u8 *buf;
1606        int i;
1607
1608        if (!mask || !map->work_buf)
1609                return;
1610
1611        buf = map->work_buf;
1612
1613        for (i = 0; i < max_bytes; i++)
1614                buf[i] |= (mask >> (8 * i)) & 0xff;
1615}
1616
1617static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg,
1618                                  const void *val, size_t val_len, bool noinc)
1619{
1620        struct regmap_range_node *range;
1621        unsigned long flags;
1622        void *work_val = map->work_buf + map->format.reg_bytes +
1623                map->format.pad_bytes;
1624        void *buf;
1625        int ret = -ENOTSUPP;
1626        size_t len;
1627        int i;
1628
1629        WARN_ON(!map->bus);
1630
1631        /* Check for unwritable or noinc registers in range
1632         * before we start
1633         */
1634        if (!regmap_writeable_noinc(map, reg)) {
1635                for (i = 0; i < val_len / map->format.val_bytes; i++) {
1636                        unsigned int element =
1637                                reg + regmap_get_offset(map, i);
1638                        if (!regmap_writeable(map, element) ||
1639                                regmap_writeable_noinc(map, element))
1640                                return -EINVAL;
1641                }
1642        }
1643
1644        if (!map->cache_bypass && map->format.parse_val) {
1645                unsigned int ival;
1646                int val_bytes = map->format.val_bytes;
1647                for (i = 0; i < val_len / val_bytes; i++) {
1648                        ival = map->format.parse_val(val + (i * val_bytes));
1649                        ret = regcache_write(map,
1650                                             reg + regmap_get_offset(map, i),
1651                                             ival);
1652                        if (ret) {
1653                                dev_err(map->dev,
1654                                        "Error in caching of register: %x ret: %d\n",
1655                                        reg + i, ret);
1656                                return ret;
1657                        }
1658                }
1659                if (map->cache_only) {
1660                        map->cache_dirty = true;
1661                        return 0;
1662                }
1663        }
1664
1665        range = _regmap_range_lookup(map, reg);
1666        if (range) {
1667                int val_num = val_len / map->format.val_bytes;
1668                int win_offset = (reg - range->range_min) % range->window_len;
1669                int win_residue = range->window_len - win_offset;
1670
1671                /* If the write goes beyond the end of the window split it */
1672                while (val_num > win_residue) {
1673                        dev_dbg(map->dev, "Writing window %d/%zu\n",
1674                                win_residue, val_len / map->format.val_bytes);
1675                        ret = _regmap_raw_write_impl(map, reg, val,
1676                                                     win_residue *
1677                                                     map->format.val_bytes, noinc);
1678                        if (ret != 0)
1679                                return ret;
1680
1681                        reg += win_residue;
1682                        val_num -= win_residue;
1683                        val += win_residue * map->format.val_bytes;
1684                        val_len -= win_residue * map->format.val_bytes;
1685
1686                        win_offset = (reg - range->range_min) %
1687                                range->window_len;
1688                        win_residue = range->window_len - win_offset;
1689                }
1690
1691                ret = _regmap_select_page(map, &reg, range, noinc ? 1 : val_num);
1692                if (ret != 0)
1693                        return ret;
1694        }
1695
1696        map->format.format_reg(map->work_buf, reg, map->reg_shift);
1697        regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
1698                                      map->write_flag_mask);
1699
1700        /*
1701         * Essentially all I/O mechanisms will be faster with a single
1702         * buffer to write.  Since register syncs often generate raw
1703         * writes of single registers optimise that case.
1704         */
1705        if (val != work_val && val_len == map->format.val_bytes) {
1706                memcpy(work_val, val, map->format.val_bytes);
1707                val = work_val;
1708        }
1709
1710        if (map->async && map->bus->async_write) {
1711                struct regmap_async *async;
1712
1713                trace_regmap_async_write_start(map, reg, val_len);
1714
1715                spin_lock_irqsave(&map->async_lock, flags);
1716                async = list_first_entry_or_null(&map->async_free,
1717                                                 struct regmap_async,
1718                                                 list);
1719                if (async)
1720                        list_del(&async->list);
1721                spin_unlock_irqrestore(&map->async_lock, flags);
1722
1723                if (!async) {
1724                        async = map->bus->async_alloc();
1725                        if (!async)
1726                                return -ENOMEM;
1727
1728                        async->work_buf = kzalloc(map->format.buf_size,
1729                                                  GFP_KERNEL | GFP_DMA);
1730                        if (!async->work_buf) {
1731                                kfree(async);
1732                                return -ENOMEM;
1733                        }
1734                }
1735
1736                async->map = map;
1737
1738                /* If the caller supplied the value we can use it safely. */
1739                memcpy(async->work_buf, map->work_buf, map->format.pad_bytes +
1740                       map->format.reg_bytes + map->format.val_bytes);
1741
1742                spin_lock_irqsave(&map->async_lock, flags);
1743                list_add_tail(&async->list, &map->async_list);
1744                spin_unlock_irqrestore(&map->async_lock, flags);
1745
1746                if (val != work_val)
1747                        ret = map->bus->async_write(map->bus_context,
1748                                                    async->work_buf,
1749                                                    map->format.reg_bytes +
1750                                                    map->format.pad_bytes,
1751                                                    val, val_len, async);
1752                else
1753                        ret = map->bus->async_write(map->bus_context,
1754                                                    async->work_buf,
1755                                                    map->format.reg_bytes +
1756                                                    map->format.pad_bytes +
1757                                                    val_len, NULL, 0, async);
1758
1759                if (ret != 0) {
1760                        dev_err(map->dev, "Failed to schedule write: %d\n",
1761                                ret);
1762
1763                        spin_lock_irqsave(&map->async_lock, flags);
1764                        list_move(&async->list, &map->async_free);
1765                        spin_unlock_irqrestore(&map->async_lock, flags);
1766                }
1767
1768                return ret;
1769        }
1770
1771        trace_regmap_hw_write_start(map, reg, val_len / map->format.val_bytes);
1772
1773        /* If we're doing a single register write we can probably just
1774         * send the work_buf directly, otherwise try to do a gather
1775         * write.
1776         */
1777        if (val == work_val)
1778                ret = map->bus->write(map->bus_context, map->work_buf,
1779                                      map->format.reg_bytes +
1780                                      map->format.pad_bytes +
1781                                      val_len);
1782        else if (map->bus->gather_write)
1783                ret = map->bus->gather_write(map->bus_context, map->work_buf,
1784                                             map->format.reg_bytes +
1785                                             map->format.pad_bytes,
1786                                             val, val_len);
1787        else
1788                ret = -ENOTSUPP;
1789
1790        /* If that didn't work fall back on linearising by hand. */
1791        if (ret == -ENOTSUPP) {
1792                len = map->format.reg_bytes + map->format.pad_bytes + val_len;
1793                buf = kzalloc(len, GFP_KERNEL);
1794                if (!buf)
1795                        return -ENOMEM;
1796
1797                memcpy(buf, map->work_buf, map->format.reg_bytes);
1798                memcpy(buf + map->format.reg_bytes + map->format.pad_bytes,
1799                       val, val_len);
1800                ret = map->bus->write(map->bus_context, buf, len);
1801
1802                kfree(buf);
1803        } else if (ret != 0 && !map->cache_bypass && map->format.parse_val) {
1804                /* regcache_drop_region() takes lock that we already have,
1805                 * thus call map->cache_ops->drop() directly
1806                 */
1807                if (map->cache_ops && map->cache_ops->drop)
1808                        map->cache_ops->drop(map, reg, reg + 1);
1809        }
1810
1811        trace_regmap_hw_write_done(map, reg, val_len / map->format.val_bytes);
1812
1813        return ret;
1814}
1815
1816/**
1817 * regmap_can_raw_write - Test if regmap_raw_write() is supported
1818 *
1819 * @map: Map to check.
1820 */
1821bool regmap_can_raw_write(struct regmap *map)
1822{
1823        return map->bus && map->bus->write && map->format.format_val &&
1824                map->format.format_reg;
1825}
1826EXPORT_SYMBOL_GPL(regmap_can_raw_write);
1827
1828/**
1829 * regmap_get_raw_read_max - Get the maximum size we can read
1830 *
1831 * @map: Map to check.
1832 */
1833size_t regmap_get_raw_read_max(struct regmap *map)
1834{
1835        return map->max_raw_read;
1836}
1837EXPORT_SYMBOL_GPL(regmap_get_raw_read_max);
1838
1839/**
1840 * regmap_get_raw_write_max - Get the maximum size we can read
1841 *
1842 * @map: Map to check.
1843 */
1844size_t regmap_get_raw_write_max(struct regmap *map)
1845{
1846        return map->max_raw_write;
1847}
1848EXPORT_SYMBOL_GPL(regmap_get_raw_write_max);
1849
1850static int _regmap_bus_formatted_write(void *context, unsigned int reg,
1851                                       unsigned int val)
1852{
1853        int ret;
1854        struct regmap_range_node *range;
1855        struct regmap *map = context;
1856
1857        WARN_ON(!map->bus || !map->format.format_write);
1858
1859        range = _regmap_range_lookup(map, reg);
1860        if (range) {
1861                ret = _regmap_select_page(map, &reg, range, 1);
1862                if (ret != 0)
1863                        return ret;
1864        }
1865
1866        map->format.format_write(map, reg, val);
1867
1868        trace_regmap_hw_write_start(map, reg, 1);
1869
1870        ret = map->bus->write(map->bus_context, map->work_buf,
1871                              map->format.buf_size);
1872
1873        trace_regmap_hw_write_done(map, reg, 1);
1874
1875        return ret;
1876}
1877
1878static int _regmap_bus_reg_write(void *context, unsigned int reg,
1879                                 unsigned int val)
1880{
1881        struct regmap *map = context;
1882
1883        return map->bus->reg_write(map->bus_context, reg, val);
1884}
1885
1886static int _regmap_bus_raw_write(void *context, unsigned int reg,
1887                                 unsigned int val)
1888{
1889        struct regmap *map = context;
1890
1891        WARN_ON(!map->bus || !map->format.format_val);
1892
1893        map->format.format_val(map->work_buf + map->format.reg_bytes
1894                               + map->format.pad_bytes, val, 0);
1895        return _regmap_raw_write_impl(map, reg,
1896                                      map->work_buf +
1897                                      map->format.reg_bytes +
1898                                      map->format.pad_bytes,
1899                                      map->format.val_bytes,
1900                                      false);
1901}
1902
1903static inline void *_regmap_map_get_context(struct regmap *map)
1904{
1905        return (map->bus) ? map : map->bus_context;
1906}
1907
1908int _regmap_write(struct regmap *map, unsigned int reg,
1909                  unsigned int val)
1910{
1911        int ret;
1912        void *context = _regmap_map_get_context(map);
1913
1914        if (!regmap_writeable(map, reg))
1915                return -EIO;
1916
1917        if (!map->cache_bypass && !map->defer_caching) {
1918                ret = regcache_write(map, reg, val);
1919                if (ret != 0)
1920                        return ret;
1921                if (map->cache_only) {
1922                        map->cache_dirty = true;
1923                        return 0;
1924                }
1925        }
1926
1927        ret = map->reg_write(context, reg, val);
1928        if (ret == 0) {
1929                if (regmap_should_log(map))
1930                        dev_info(map->dev, "%x <= %x\n", reg, val);
1931
1932                trace_regmap_reg_write(map, reg, val);
1933        }
1934
1935        return ret;
1936}
1937
1938/**
1939 * regmap_write() - Write a value to a single register
1940 *
1941 * @map: Register map to write to
1942 * @reg: Register to write to
1943 * @val: Value to be written
1944 *
1945 * A value of zero will be returned on success, a negative errno will
1946 * be returned in error cases.
1947 */
1948int regmap_write(struct regmap *map, unsigned int reg, unsigned int val)
1949{
1950        int ret;
1951
1952        if (!IS_ALIGNED(reg, map->reg_stride))
1953                return -EINVAL;
1954
1955        map->lock(map->lock_arg);
1956
1957        ret = _regmap_write(map, reg, val);
1958
1959        map->unlock(map->lock_arg);
1960
1961        return ret;
1962}
1963EXPORT_SYMBOL_GPL(regmap_write);
1964
1965/**
1966 * regmap_write_async() - Write a value to a single register asynchronously
1967 *
1968 * @map: Register map to write to
1969 * @reg: Register to write to
1970 * @val: Value to be written
1971 *
1972 * A value of zero will be returned on success, a negative errno will
1973 * be returned in error cases.
1974 */
1975int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val)
1976{
1977        int ret;
1978
1979        if (!IS_ALIGNED(reg, map->reg_stride))
1980                return -EINVAL;
1981
1982        map->lock(map->lock_arg);
1983
1984        map->async = true;
1985
1986        ret = _regmap_write(map, reg, val);
1987
1988        map->async = false;
1989
1990        map->unlock(map->lock_arg);
1991
1992        return ret;
1993}
1994EXPORT_SYMBOL_GPL(regmap_write_async);
1995
1996int _regmap_raw_write(struct regmap *map, unsigned int reg,
1997                      const void *val, size_t val_len, bool noinc)
1998{
1999        size_t val_bytes = map->format.val_bytes;
2000        size_t val_count = val_len / val_bytes;
2001        size_t chunk_count, chunk_bytes;
2002        size_t chunk_regs = val_count;
2003        int ret, i;
2004
2005        if (!val_count)
2006                return -EINVAL;
2007
2008        if (map->use_single_write)
2009                chunk_regs = 1;
2010        else if (map->max_raw_write && val_len > map->max_raw_write)
2011                chunk_regs = map->max_raw_write / val_bytes;
2012
2013        chunk_count = val_count / chunk_regs;
2014        chunk_bytes = chunk_regs * val_bytes;
2015
2016        /* Write as many bytes as possible with chunk_size */
2017        for (i = 0; i < chunk_count; i++) {
2018                ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc);
2019                if (ret)
2020                        return ret;
2021
2022                reg += regmap_get_offset(map, chunk_regs);
2023                val += chunk_bytes;
2024                val_len -= chunk_bytes;
2025        }
2026
2027        /* Write remaining bytes */
2028        if (val_len)
2029                ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc);
2030
2031        return ret;
2032}
2033
2034/**
2035 * regmap_raw_write() - Write raw values to one or more registers
2036 *
2037 * @map: Register map to write to
2038 * @reg: Initial register to write to
2039 * @val: Block of data to be written, laid out for direct transmission to the
2040 *       device
2041 * @val_len: Length of data pointed to by val.
2042 *
2043 * This function is intended to be used for things like firmware
2044 * download where a large block of data needs to be transferred to the
2045 * device.  No formatting will be done on the data provided.
2046 *
2047 * A value of zero will be returned on success, a negative errno will
2048 * be returned in error cases.
2049 */
2050int regmap_raw_write(struct regmap *map, unsigned int reg,
2051                     const void *val, size_t val_len)
2052{
2053        int ret;
2054
2055        if (!regmap_can_raw_write(map))
2056                return -EINVAL;
2057        if (val_len % map->format.val_bytes)
2058                return -EINVAL;
2059
2060        map->lock(map->lock_arg);
2061
2062        ret = _regmap_raw_write(map, reg, val, val_len, false);
2063
2064        map->unlock(map->lock_arg);
2065
2066        return ret;
2067}
2068EXPORT_SYMBOL_GPL(regmap_raw_write);
2069
2070/**
2071 * regmap_noinc_write(): Write data from a register without incrementing the
2072 *                      register number
2073 *
2074 * @map: Register map to write to
2075 * @reg: Register to write to
2076 * @val: Pointer to data buffer
2077 * @val_len: Length of output buffer in bytes.
2078 *
2079 * The regmap API usually assumes that bulk bus write operations will write a
2080 * range of registers. Some devices have certain registers for which a write
2081 * operation can write to an internal FIFO.
2082 *
2083 * The target register must be volatile but registers after it can be
2084 * completely unrelated cacheable registers.
2085 *
2086 * This will attempt multiple writes as required to write val_len bytes.
2087 *
2088 * A value of zero will be returned on success, a negative errno will be
2089 * returned in error cases.
2090 */
2091int regmap_noinc_write(struct regmap *map, unsigned int reg,
2092                      const void *val, size_t val_len)
2093{
2094        size_t write_len;
2095        int ret;
2096
2097        if (!map->bus)
2098                return -EINVAL;
2099        if (!map->bus->write)
2100                return -ENOTSUPP;
2101        if (val_len % map->format.val_bytes)
2102                return -EINVAL;
2103        if (!IS_ALIGNED(reg, map->reg_stride))
2104                return -EINVAL;
2105        if (val_len == 0)
2106                return -EINVAL;
2107
2108        map->lock(map->lock_arg);
2109
2110        if (!regmap_volatile(map, reg) || !regmap_writeable_noinc(map, reg)) {
2111                ret = -EINVAL;
2112                goto out_unlock;
2113        }
2114
2115        while (val_len) {
2116                if (map->max_raw_write && map->max_raw_write < val_len)
2117                        write_len = map->max_raw_write;
2118                else
2119                        write_len = val_len;
2120                ret = _regmap_raw_write(map, reg, val, write_len, true);
2121                if (ret)
2122                        goto out_unlock;
2123                val = ((u8 *)val) + write_len;
2124                val_len -= write_len;
2125        }
2126
2127out_unlock:
2128        map->unlock(map->lock_arg);
2129        return ret;
2130}
2131EXPORT_SYMBOL_GPL(regmap_noinc_write);
2132
2133/**
2134 * regmap_field_update_bits_base() - Perform a read/modify/write cycle a
2135 *                                   register field.
2136 *
2137 * @field: Register field to write to
2138 * @mask: Bitmask to change
2139 * @val: Value to be written
2140 * @change: Boolean indicating if a write was done
2141 * @async: Boolean indicating asynchronously
2142 * @force: Boolean indicating use force update
2143 *
2144 * Perform a read/modify/write cycle on the register field with change,
2145 * async, force option.
2146 *
2147 * A value of zero will be returned on success, a negative errno will
2148 * be returned in error cases.
2149 */
2150int regmap_field_update_bits_base(struct regmap_field *field,
2151                                  unsigned int mask, unsigned int val,
2152                                  bool *change, bool async, bool force)
2153{
2154        mask = (mask << field->shift) & field->mask;
2155
2156        return regmap_update_bits_base(field->regmap, field->reg,
2157                                       mask, val << field->shift,
2158                                       change, async, force);
2159}
2160EXPORT_SYMBOL_GPL(regmap_field_update_bits_base);
2161
2162/**
2163 * regmap_fields_update_bits_base() - Perform a read/modify/write cycle a
2164 *                                    register field with port ID
2165 *
2166 * @field: Register field to write to
2167 * @id: port ID
2168 * @mask: Bitmask to change
2169 * @val: Value to be written
2170 * @change: Boolean indicating if a write was done
2171 * @async: Boolean indicating asynchronously
2172 * @force: Boolean indicating use force update
2173 *
2174 * A value of zero will be returned on success, a negative errno will
2175 * be returned in error cases.
2176 */
2177int regmap_fields_update_bits_base(struct regmap_field *field, unsigned int id,
2178                                   unsigned int mask, unsigned int val,
2179                                   bool *change, bool async, bool force)
2180{
2181        if (id >= field->id_size)
2182                return -EINVAL;
2183
2184        mask = (mask << field->shift) & field->mask;
2185
2186        return regmap_update_bits_base(field->regmap,
2187                                       field->reg + (field->id_offset * id),
2188                                       mask, val << field->shift,
2189                                       change, async, force);
2190}
2191EXPORT_SYMBOL_GPL(regmap_fields_update_bits_base);
2192
2193/**
2194 * regmap_bulk_write() - Write multiple registers to the device
2195 *
2196 * @map: Register map to write to
2197 * @reg: First register to be write from
2198 * @val: Block of data to be written, in native register size for device
2199 * @val_count: Number of registers to write
2200 *
2201 * This function is intended to be used for writing a large block of
2202 * data to the device either in single transfer or multiple transfer.
2203 *
2204 * A value of zero will be returned on success, a negative errno will
2205 * be returned in error cases.
2206 */
2207int regmap_bulk_write(struct regmap *map, unsigned int reg, const void *val,
2208                     size_t val_count)
2209{
2210        int ret = 0, i;
2211        size_t val_bytes = map->format.val_bytes;
2212
2213        if (!IS_ALIGNED(reg, map->reg_stride))
2214                return -EINVAL;
2215
2216        /*
2217         * Some devices don't support bulk write, for them we have a series of
2218         * single write operations.
2219         */
2220        if (!map->bus || !map->format.parse_inplace) {
2221                map->lock(map->lock_arg);
2222                for (i = 0; i < val_count; i++) {
2223                        unsigned int ival;
2224
2225                        switch (val_bytes) {
2226                        case 1:
2227                                ival = *(u8 *)(val + (i * val_bytes));
2228                                break;
2229                        case 2:
2230                                ival = *(u16 *)(val + (i * val_bytes));
2231                                break;
2232                        case 4:
2233                                ival = *(u32 *)(val + (i * val_bytes));
2234                                break;
2235#ifdef CONFIG_64BIT
2236                        case 8:
2237                                ival = *(u64 *)(val + (i * val_bytes));
2238                                break;
2239#endif
2240                        default:
2241                                ret = -EINVAL;
2242                                goto out;
2243                        }
2244
2245                        ret = _regmap_write(map,
2246                                            reg + regmap_get_offset(map, i),
2247                                            ival);
2248                        if (ret != 0)
2249                                goto out;
2250                }
2251out:
2252                map->unlock(map->lock_arg);
2253        } else {
2254                void *wval;
2255
2256                wval = kmemdup(val, val_count * val_bytes, map->alloc_flags);
2257                if (!wval)
2258                        return -ENOMEM;
2259
2260                for (i = 0; i < val_count * val_bytes; i += val_bytes)
2261                        map->format.parse_inplace(wval + i);
2262
2263                ret = regmap_raw_write(map, reg, wval, val_bytes * val_count);
2264
2265                kfree(wval);
2266        }
2267        return ret;
2268}
2269EXPORT_SYMBOL_GPL(regmap_bulk_write);
2270
2271/*
2272 * _regmap_raw_multi_reg_write()
2273 *
2274 * the (register,newvalue) pairs in regs have not been formatted, but
2275 * they are all in the same page and have been changed to being page
2276 * relative. The page register has been written if that was necessary.
2277 */
2278static int _regmap_raw_multi_reg_write(struct regmap *map,
2279                                       const struct reg_sequence *regs,
2280                                       size_t num_regs)
2281{
2282        int ret;
2283        void *buf;
2284        int i;
2285        u8 *u8;
2286        size_t val_bytes = map->format.val_bytes;
2287        size_t reg_bytes = map->format.reg_bytes;
2288        size_t pad_bytes = map->format.pad_bytes;
2289        size_t pair_size = reg_bytes + pad_bytes + val_bytes;
2290        size_t len = pair_size * num_regs;
2291
2292        if (!len)
2293                return -EINVAL;
2294
2295        buf = kzalloc(len, GFP_KERNEL);
2296        if (!buf)
2297                return -ENOMEM;
2298
2299        /* We have to linearise by hand. */
2300
2301        u8 = buf;
2302
2303        for (i = 0; i < num_regs; i++) {
2304                unsigned int reg = regs[i].reg;
2305                unsigned int val = regs[i].def;
2306                trace_regmap_hw_write_start(map, reg, 1);
2307                map->format.format_reg(u8, reg, map->reg_shift);
2308                u8 += reg_bytes + pad_bytes;
2309                map->format.format_val(u8, val, 0);
2310                u8 += val_bytes;
2311        }
2312        u8 = buf;
2313        *u8 |= map->write_flag_mask;
2314
2315        ret = map->bus->write(map->bus_context, buf, len);
2316
2317        kfree(buf);
2318
2319        for (i = 0; i < num_regs; i++) {
2320                int reg = regs[i].reg;
2321                trace_regmap_hw_write_done(map, reg, 1);
2322        }
2323        return ret;
2324}
2325
2326static unsigned int _regmap_register_page(struct regmap *map,
2327                                          unsigned int reg,
2328                                          struct regmap_range_node *range)
2329{
2330        unsigned int win_page = (reg - range->range_min) / range->window_len;
2331
2332        return win_page;
2333}
2334
2335static int _regmap_range_multi_paged_reg_write(struct regmap *map,
2336                                               struct reg_sequence *regs,
2337                                               size_t num_regs)
2338{
2339        int ret;
2340        int i, n;
2341        struct reg_sequence *base;
2342        unsigned int this_page = 0;
2343        unsigned int page_change = 0;
2344        /*
2345         * the set of registers are not neccessarily in order, but
2346         * since the order of write must be preserved this algorithm
2347         * chops the set each time the page changes. This also applies
2348         * if there is a delay required at any point in the sequence.
2349         */
2350        base = regs;
2351        for (i = 0, n = 0; i < num_regs; i++, n++) {
2352                unsigned int reg = regs[i].reg;
2353                struct regmap_range_node *range;
2354
2355                range = _regmap_range_lookup(map, reg);
2356                if (range) {
2357                        unsigned int win_page = _regmap_register_page(map, reg,
2358                                                                      range);
2359
2360                        if (i == 0)
2361                                this_page = win_page;
2362                        if (win_page != this_page) {
2363                                this_page = win_page;
2364                                page_change = 1;
2365                        }
2366                }
2367
2368                /* If we have both a page change and a delay make sure to
2369                 * write the regs and apply the delay before we change the
2370                 * page.
2371                 */
2372
2373                if (page_change || regs[i].delay_us) {
2374
2375                                /* For situations where the first write requires
2376                                 * a delay we need to make sure we don't call
2377                                 * raw_multi_reg_write with n=0
2378                                 * This can't occur with page breaks as we
2379                                 * never write on the first iteration
2380                                 */
2381                                if (regs[i].delay_us && i == 0)
2382                                        n = 1;
2383
2384                                ret = _regmap_raw_multi_reg_write(map, base, n);
2385                                if (ret != 0)
2386                                        return ret;
2387
2388                                if (regs[i].delay_us) {
2389                                        if (map->can_sleep)
2390                                                fsleep(regs[i].delay_us);
2391                                        else
2392                                                udelay(regs[i].delay_us);
2393                                }
2394
2395                                base += n;
2396                                n = 0;
2397
2398                                if (page_change) {
2399                                        ret = _regmap_select_page(map,
2400                                                                  &base[n].reg,
2401                                                                  range, 1);
2402                                        if (ret != 0)
2403                                                return ret;
2404
2405                                        page_change = 0;
2406                                }
2407
2408                }
2409
2410        }
2411        if (n > 0)
2412                return _regmap_raw_multi_reg_write(map, base, n);
2413        return 0;
2414}
2415
2416static int _regmap_multi_reg_write(struct regmap *map,
2417                                   const struct reg_sequence *regs,
2418                                   size_t num_regs)
2419{
2420        int i;
2421        int ret;
2422
2423        if (!map->can_multi_write) {
2424                for (i = 0; i < num_regs; i++) {
2425                        ret = _regmap_write(map, regs[i].reg, regs[i].def);
2426                        if (ret != 0)
2427                                return ret;
2428
2429                        if (regs[i].delay_us) {
2430                                if (map->can_sleep)
2431                                        fsleep(regs[i].delay_us);
2432                                else
2433                                        udelay(regs[i].delay_us);
2434                        }
2435                }
2436                return 0;
2437        }
2438
2439        if (!map->format.parse_inplace)
2440                return -EINVAL;
2441
2442        if (map->writeable_reg)
2443                for (i = 0; i < num_regs; i++) {
2444                        int reg = regs[i].reg;
2445                        if (!map->writeable_reg(map->dev, reg))
2446                                return -EINVAL;
2447                        if (!IS_ALIGNED(reg, map->reg_stride))
2448                                return -EINVAL;
2449                }
2450
2451        if (!map->cache_bypass) {
2452                for (i = 0; i < num_regs; i++) {
2453                        unsigned int val = regs[i].def;
2454                        unsigned int reg = regs[i].reg;
2455                        ret = regcache_write(map, reg, val);
2456                        if (ret) {
2457                                dev_err(map->dev,
2458                                "Error in caching of register: %x ret: %d\n",
2459                                                                reg, ret);
2460                                return ret;
2461                        }
2462                }
2463                if (map->cache_only) {
2464                        map->cache_dirty = true;
2465                        return 0;
2466                }
2467        }
2468
2469        WARN_ON(!map->bus);
2470
2471        for (i = 0; i < num_regs; i++) {
2472                unsigned int reg = regs[i].reg;
2473                struct regmap_range_node *range;
2474
2475                /* Coalesce all the writes between a page break or a delay
2476                 * in a sequence
2477                 */
2478                range = _regmap_range_lookup(map, reg);
2479                if (range || regs[i].delay_us) {
2480                        size_t len = sizeof(struct reg_sequence)*num_regs;
2481                        struct reg_sequence *base = kmemdup(regs, len,
2482                                                           GFP_KERNEL);
2483                        if (!base)
2484                                return -ENOMEM;
2485                        ret = _regmap_range_multi_paged_reg_write(map, base,
2486                                                                  num_regs);
2487                        kfree(base);
2488
2489                        return ret;
2490                }
2491        }
2492        return _regmap_raw_multi_reg_write(map, regs, num_regs);
2493}
2494
2495/**
2496 * regmap_multi_reg_write() - Write multiple registers to the device
2497 *
2498 * @map: Register map to write to
2499 * @regs: Array of structures containing register,value to be written
2500 * @num_regs: Number of registers to write
2501 *
2502 * Write multiple registers to the device where the set of register, value
2503 * pairs are supplied in any order, possibly not all in a single range.
2504 *
2505 * The 'normal' block write mode will send ultimately send data on the
2506 * target bus as R,V1,V2,V3,..,Vn where successively higher registers are
2507 * addressed. However, this alternative block multi write mode will send
2508 * the data as R1,V1,R2,V2,..,Rn,Vn on the target bus. The target device
2509 * must of course support the mode.
2510 *
2511 * A value of zero will be returned on success, a negative errno will be
2512 * returned in error cases.
2513 */
2514int regmap_multi_reg_write(struct regmap *map, const struct reg_sequence *regs,
2515                           int num_regs)
2516{
2517        int ret;
2518
2519        map->lock(map->lock_arg);
2520
2521        ret = _regmap_multi_reg_write(map, regs, num_regs);
2522
2523        map->unlock(map->lock_arg);
2524
2525        return ret;
2526}
2527EXPORT_SYMBOL_GPL(regmap_multi_reg_write);
2528
2529/**
2530 * regmap_multi_reg_write_bypassed() - Write multiple registers to the
2531 *                                     device but not the cache
2532 *
2533 * @map: Register map to write to
2534 * @regs: Array of structures containing register,value to be written
2535 * @num_regs: Number of registers to write
2536 *
2537 * Write multiple registers to the device but not the cache where the set
2538 * of register are supplied in any order.
2539 *
2540 * This function is intended to be used for writing a large block of data
2541 * atomically to the device in single transfer for those I2C client devices
2542 * that implement this alternative block write mode.
2543 *
2544 * A value of zero will be returned on success, a negative errno will
2545 * be returned in error cases.
2546 */
2547int regmap_multi_reg_write_bypassed(struct regmap *map,
2548                                    const struct reg_sequence *regs,
2549                                    int num_regs)
2550{
2551        int ret;
2552        bool bypass;
2553
2554        map->lock(map->lock_arg);
2555
2556        bypass = map->cache_bypass;
2557        map->cache_bypass = true;
2558
2559        ret = _regmap_multi_reg_write(map, regs, num_regs);
2560
2561        map->cache_bypass = bypass;
2562
2563        map->unlock(map->lock_arg);
2564
2565        return ret;
2566}
2567EXPORT_SYMBOL_GPL(regmap_multi_reg_write_bypassed);
2568
2569/**
2570 * regmap_raw_write_async() - Write raw values to one or more registers
2571 *                            asynchronously
2572 *
2573 * @map: Register map to write to
2574 * @reg: Initial register to write to
2575 * @val: Block of data to be written, laid out for direct transmission to the
2576 *       device.  Must be valid until regmap_async_complete() is called.
2577 * @val_len: Length of data pointed to by val.
2578 *
2579 * This function is intended to be used for things like firmware
2580 * download where a large block of data needs to be transferred to the
2581 * device.  No formatting will be done on the data provided.
2582 *
2583 * If supported by the underlying bus the write will be scheduled
2584 * asynchronously, helping maximise I/O speed on higher speed buses
2585 * like SPI.  regmap_async_complete() can be called to ensure that all
2586 * asynchrnous writes have been completed.
2587 *
2588 * A value of zero will be returned on success, a negative errno will
2589 * be returned in error cases.
2590 */
2591int regmap_raw_write_async(struct regmap *map, unsigned int reg,
2592                           const void *val, size_t val_len)
2593{
2594        int ret;
2595
2596        if (val_len % map->format.val_bytes)
2597                return -EINVAL;
2598        if (!IS_ALIGNED(reg, map->reg_stride))
2599                return -EINVAL;
2600
2601        map->lock(map->lock_arg);
2602
2603        map->async = true;
2604
2605        ret = _regmap_raw_write(map, reg, val, val_len, false);
2606
2607        map->async = false;
2608
2609        map->unlock(map->lock_arg);
2610
2611        return ret;
2612}
2613EXPORT_SYMBOL_GPL(regmap_raw_write_async);
2614
2615static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2616                            unsigned int val_len, bool noinc)
2617{
2618        struct regmap_range_node *range;
2619        int ret;
2620
2621        WARN_ON(!map->bus);
2622
2623        if (!map->bus || !map->bus->read)
2624                return -EINVAL;
2625
2626        range = _regmap_range_lookup(map, reg);
2627        if (range) {
2628                ret = _regmap_select_page(map, &reg, range,
2629                                          noinc ? 1 : val_len / map->format.val_bytes);
2630                if (ret != 0)
2631                        return ret;
2632        }
2633
2634        map->format.format_reg(map->work_buf, reg, map->reg_shift);
2635        regmap_set_work_buf_flag_mask(map, map->format.reg_bytes,
2636                                      map->read_flag_mask);
2637        trace_regmap_hw_read_start(map, reg, val_len / map->format.val_bytes);
2638
2639        ret = map->bus->read(map->bus_context, map->work_buf,
2640                             map->format.reg_bytes + map->format.pad_bytes,
2641                             val, val_len);
2642
2643        trace_regmap_hw_read_done(map, reg, val_len / map->format.val_bytes);
2644
2645        return ret;
2646}
2647
2648static int _regmap_bus_reg_read(void *context, unsigned int reg,
2649                                unsigned int *val)
2650{
2651        struct regmap *map = context;
2652
2653        return map->bus->reg_read(map->bus_context, reg, val);
2654}
2655
2656static int _regmap_bus_read(void *context, unsigned int reg,
2657                            unsigned int *val)
2658{
2659        int ret;
2660        struct regmap *map = context;
2661        void *work_val = map->work_buf + map->format.reg_bytes +
2662                map->format.pad_bytes;
2663
2664        if (!map->format.parse_val)
2665                return -EINVAL;
2666
2667        ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false);
2668        if (ret == 0)
2669                *val = map->format.parse_val(work_val);
2670
2671        return ret;
2672}
2673
2674static int _regmap_read(struct regmap *map, unsigned int reg,
2675                        unsigned int *val)
2676{
2677        int ret;
2678        void *context = _regmap_map_get_context(map);
2679
2680        if (!map->cache_bypass) {
2681                ret = regcache_read(map, reg, val);
2682                if (ret == 0)
2683                        return 0;
2684        }
2685
2686        if (map->cache_only)
2687                return -EBUSY;
2688
2689        if (!regmap_readable(map, reg))
2690                return -EIO;
2691
2692        ret = map->reg_read(context, reg, val);
2693        if (ret == 0) {
2694                if (regmap_should_log(map))
2695                        dev_info(map->dev, "%x => %x\n", reg, *val);
2696
2697                trace_regmap_reg_read(map, reg, *val);
2698
2699                if (!map->cache_bypass)
2700                        regcache_write(map, reg, *val);
2701        }
2702
2703        return ret;
2704}
2705
2706/**
2707 * regmap_read() - Read a value from a single register
2708 *
2709 * @map: Register map to read from
2710 * @reg: Register to be read from
2711 * @val: Pointer to store read value
2712 *
2713 * A value of zero will be returned on success, a negative errno will
2714 * be returned in error cases.
2715 */
2716int regmap_read(struct regmap *map, unsigned int reg, unsigned int *val)
2717{
2718        int ret;
2719
2720        if (!IS_ALIGNED(reg, map->reg_stride))
2721                return -EINVAL;
2722
2723        map->lock(map->lock_arg);
2724
2725        ret = _regmap_read(map, reg, val);
2726
2727        map->unlock(map->lock_arg);
2728
2729        return ret;
2730}
2731EXPORT_SYMBOL_GPL(regmap_read);
2732
2733/**
2734 * regmap_raw_read() - Read raw data from the device
2735 *
2736 * @map: Register map to read from
2737 * @reg: First register to be read from
2738 * @val: Pointer to store read value
2739 * @val_len: Size of data to read
2740 *
2741 * A value of zero will be returned on success, a negative errno will
2742 * be returned in error cases.
2743 */
2744int regmap_raw_read(struct regmap *map, unsigned int reg, void *val,
2745                    size_t val_len)
2746{
2747        size_t val_bytes = map->format.val_bytes;
2748        size_t val_count = val_len / val_bytes;
2749        unsigned int v;
2750        int ret, i;
2751
2752        if (!map->bus)
2753                return -EINVAL;
2754        if (val_len % map->format.val_bytes)
2755                return -EINVAL;
2756        if (!IS_ALIGNED(reg, map->reg_stride))
2757                return -EINVAL;
2758        if (val_count == 0)
2759                return -EINVAL;
2760
2761        map->lock(map->lock_arg);
2762
2763        if (regmap_volatile_range(map, reg, val_count) || map->cache_bypass ||
2764            map->cache_type == REGCACHE_NONE) {
2765                size_t chunk_count, chunk_bytes;
2766                size_t chunk_regs = val_count;
2767
2768                if (!map->bus->read) {
2769                        ret = -ENOTSUPP;
2770                        goto out;
2771                }
2772
2773                if (map->use_single_read)
2774                        chunk_regs = 1;
2775                else if (map->max_raw_read && val_len > map->max_raw_read)
2776                        chunk_regs = map->max_raw_read / val_bytes;
2777
2778                chunk_count = val_count / chunk_regs;
2779                chunk_bytes = chunk_regs * val_bytes;
2780
2781                /* Read bytes that fit into whole chunks */
2782                for (i = 0; i < chunk_count; i++) {
2783                        ret = _regmap_raw_read(map, reg, val, chunk_bytes, false);
2784                        if (ret != 0)
2785                                goto out;
2786
2787                        reg += regmap_get_offset(map, chunk_regs);
2788                        val += chunk_bytes;
2789                        val_len -= chunk_bytes;
2790                }
2791
2792                /* Read remaining bytes */
2793                if (val_len) {
2794                        ret = _regmap_raw_read(map, reg, val, val_len, false);
2795                        if (ret != 0)
2796                                goto out;
2797                }
2798        } else {
2799                /* Otherwise go word by word for the cache; should be low
2800                 * cost as we expect to hit the cache.
2801                 */
2802                for (i = 0; i < val_count; i++) {
2803                        ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2804                                           &v);
2805                        if (ret != 0)
2806                                goto out;
2807
2808                        map->format.format_val(val + (i * val_bytes), v, 0);
2809                }
2810        }
2811
2812 out:
2813        map->unlock(map->lock_arg);
2814
2815        return ret;
2816}
2817EXPORT_SYMBOL_GPL(regmap_raw_read);
2818
2819/**
2820 * regmap_noinc_read(): Read data from a register without incrementing the
2821 *                      register number
2822 *
2823 * @map: Register map to read from
2824 * @reg: Register to read from
2825 * @val: Pointer to data buffer
2826 * @val_len: Length of output buffer in bytes.
2827 *
2828 * The regmap API usually assumes that bulk bus read operations will read a
2829 * range of registers. Some devices have certain registers for which a read
2830 * operation read will read from an internal FIFO.
2831 *
2832 * The target register must be volatile but registers after it can be
2833 * completely unrelated cacheable registers.
2834 *
2835 * This will attempt multiple reads as required to read val_len bytes.
2836 *
2837 * A value of zero will be returned on success, a negative errno will be
2838 * returned in error cases.
2839 */
2840int regmap_noinc_read(struct regmap *map, unsigned int reg,
2841                      void *val, size_t val_len)
2842{
2843        size_t read_len;
2844        int ret;
2845
2846        if (!map->bus)
2847                return -EINVAL;
2848        if (!map->bus->read)
2849                return -ENOTSUPP;
2850        if (val_len % map->format.val_bytes)
2851                return -EINVAL;
2852        if (!IS_ALIGNED(reg, map->reg_stride))
2853                return -EINVAL;
2854        if (val_len == 0)
2855                return -EINVAL;
2856
2857        map->lock(map->lock_arg);
2858
2859        if (!regmap_volatile(map, reg) || !regmap_readable_noinc(map, reg)) {
2860                ret = -EINVAL;
2861                goto out_unlock;
2862        }
2863
2864        while (val_len) {
2865                if (map->max_raw_read && map->max_raw_read < val_len)
2866                        read_len = map->max_raw_read;
2867                else
2868                        read_len = val_len;
2869                ret = _regmap_raw_read(map, reg, val, read_len, true);
2870                if (ret)
2871                        goto out_unlock;
2872                val = ((u8 *)val) + read_len;
2873                val_len -= read_len;
2874        }
2875
2876out_unlock:
2877        map->unlock(map->lock_arg);
2878        return ret;
2879}
2880EXPORT_SYMBOL_GPL(regmap_noinc_read);
2881
2882/**
2883 * regmap_field_read(): Read a value to a single register field
2884 *
2885 * @field: Register field to read from
2886 * @val: Pointer to store read value
2887 *
2888 * A value of zero will be returned on success, a negative errno will
2889 * be returned in error cases.
2890 */
2891int regmap_field_read(struct regmap_field *field, unsigned int *val)
2892{
2893        int ret;
2894        unsigned int reg_val;
2895        ret = regmap_read(field->regmap, field->reg, &reg_val);
2896        if (ret != 0)
2897                return ret;
2898
2899        reg_val &= field->mask;
2900        reg_val >>= field->shift;
2901        *val = reg_val;
2902
2903        return ret;
2904}
2905EXPORT_SYMBOL_GPL(regmap_field_read);
2906
2907/**
2908 * regmap_fields_read() - Read a value to a single register field with port ID
2909 *
2910 * @field: Register field to read from
2911 * @id: port ID
2912 * @val: Pointer to store read value
2913 *
2914 * A value of zero will be returned on success, a negative errno will
2915 * be returned in error cases.
2916 */
2917int regmap_fields_read(struct regmap_field *field, unsigned int id,
2918                       unsigned int *val)
2919{
2920        int ret;
2921        unsigned int reg_val;
2922
2923        if (id >= field->id_size)
2924                return -EINVAL;
2925
2926        ret = regmap_read(field->regmap,
2927                          field->reg + (field->id_offset * id),
2928                          &reg_val);
2929        if (ret != 0)
2930                return ret;
2931
2932        reg_val &= field->mask;
2933        reg_val >>= field->shift;
2934        *val = reg_val;
2935
2936        return ret;
2937}
2938EXPORT_SYMBOL_GPL(regmap_fields_read);
2939
2940/**
2941 * regmap_bulk_read() - Read multiple registers from the device
2942 *
2943 * @map: Register map to read from
2944 * @reg: First register to be read from
2945 * @val: Pointer to store read value, in native register size for device
2946 * @val_count: Number of registers to read
2947 *
2948 * A value of zero will be returned on success, a negative errno will
2949 * be returned in error cases.
2950 */
2951int regmap_bulk_read(struct regmap *map, unsigned int reg, void *val,
2952                     size_t val_count)
2953{
2954        int ret, i;
2955        size_t val_bytes = map->format.val_bytes;
2956        bool vol = regmap_volatile_range(map, reg, val_count);
2957
2958        if (!IS_ALIGNED(reg, map->reg_stride))
2959                return -EINVAL;
2960        if (val_count == 0)
2961                return -EINVAL;
2962
2963        if (map->bus && map->format.parse_inplace && (vol || map->cache_type == REGCACHE_NONE)) {
2964                ret = regmap_raw_read(map, reg, val, val_bytes * val_count);
2965                if (ret != 0)
2966                        return ret;
2967
2968                for (i = 0; i < val_count * val_bytes; i += val_bytes)
2969                        map->format.parse_inplace(val + i);
2970        } else {
2971#ifdef CONFIG_64BIT
2972                u64 *u64 = val;
2973#endif
2974                u32 *u32 = val;
2975                u16 *u16 = val;
2976                u8 *u8 = val;
2977
2978                map->lock(map->lock_arg);
2979
2980                for (i = 0; i < val_count; i++) {
2981                        unsigned int ival;
2982
2983                        ret = _regmap_read(map, reg + regmap_get_offset(map, i),
2984                                           &ival);
2985                        if (ret != 0)
2986                                goto out;
2987
2988                        switch (map->format.val_bytes) {
2989#ifdef CONFIG_64BIT
2990                        case 8:
2991                                u64[i] = ival;
2992                                break;
2993#endif
2994                        case 4:
2995                                u32[i] = ival;
2996                                break;
2997                        case 2:
2998                                u16[i] = ival;
2999                                break;
3000                        case 1:
3001                                u8[i] = ival;
3002                                break;
3003                        default:
3004                                ret = -EINVAL;
3005                                goto out;
3006                        }
3007                }
3008
3009out:
3010                map->unlock(map->lock_arg);
3011        }
3012
3013        return ret;
3014}
3015EXPORT_SYMBOL_GPL(regmap_bulk_read);
3016
3017static int _regmap_update_bits(struct regmap *map, unsigned int reg,
3018                               unsigned int mask, unsigned int val,
3019                               bool *change, bool force_write)
3020{
3021        int ret;
3022        unsigned int tmp, orig;
3023
3024        if (change)
3025                *change = false;
3026
3027        if (regmap_volatile(map, reg) && map->reg_update_bits) {
3028                ret = map->reg_update_bits(map->bus_context, reg, mask, val);
3029                if (ret == 0 && change)
3030                        *change = true;
3031        } else {
3032                ret = _regmap_read(map, reg, &orig);
3033                if (ret != 0)
3034                        return ret;
3035
3036                tmp = orig & ~mask;
3037                tmp |= val & mask;
3038
3039                if (force_write || (tmp != orig)) {
3040                        ret = _regmap_write(map, reg, tmp);
3041                        if (ret == 0 && change)
3042                                *change = true;
3043                }
3044        }
3045
3046        return ret;
3047}
3048
3049/**
3050 * regmap_update_bits_base() - Perform a read/modify/write cycle on a register
3051 *
3052 * @map: Register map to update
3053 * @reg: Register to update
3054 * @mask: Bitmask to change
3055 * @val: New value for bitmask
3056 * @change: Boolean indicating if a write was done
3057 * @async: Boolean indicating asynchronously
3058 * @force: Boolean indicating use force update
3059 *
3060 * Perform a read/modify/write cycle on a register map with change, async, force
3061 * options.
3062 *
3063 * If async is true:
3064 *
3065 * With most buses the read must be done synchronously so this is most useful
3066 * for devices with a cache which do not need to interact with the hardware to
3067 * determine the current register value.
3068 *
3069 * Returns zero for success, a negative number on error.
3070 */
3071int regmap_update_bits_base(struct regmap *map, unsigned int reg,
3072                            unsigned int mask, unsigned int val,
3073                            bool *change, bool async, bool force)
3074{
3075        int ret;
3076
3077        map->lock(map->lock_arg);
3078
3079        map->async = async;
3080
3081        ret = _regmap_update_bits(map, reg, mask, val, change, force);
3082
3083        map->async = false;
3084
3085        map->unlock(map->lock_arg);
3086
3087        return ret;
3088}
3089EXPORT_SYMBOL_GPL(regmap_update_bits_base);
3090
3091/**
3092 * regmap_test_bits() - Check if all specified bits are set in a register.
3093 *
3094 * @map: Register map to operate on
3095 * @reg: Register to read from
3096 * @bits: Bits to test
3097 *
3098 * Returns 0 if at least one of the tested bits is not set, 1 if all tested
3099 * bits are set and a negative error number if the underlying regmap_read()
3100 * fails.
3101 */
3102int regmap_test_bits(struct regmap *map, unsigned int reg, unsigned int bits)
3103{
3104        unsigned int val, ret;
3105
3106        ret = regmap_read(map, reg, &val);
3107        if (ret)
3108                return ret;
3109
3110        return (val & bits) == bits;
3111}
3112EXPORT_SYMBOL_GPL(regmap_test_bits);
3113
3114void regmap_async_complete_cb(struct regmap_async *async, int ret)
3115{
3116        struct regmap *map = async->map;
3117        bool wake;
3118
3119        trace_regmap_async_io_complete(map);
3120
3121        spin_lock(&map->async_lock);
3122        list_move(&async->list, &map->async_free);
3123        wake = list_empty(&map->async_list);
3124
3125        if (ret != 0)
3126                map->async_ret = ret;
3127
3128        spin_unlock(&map->async_lock);
3129
3130        if (wake)
3131                wake_up(&map->async_waitq);
3132}
3133EXPORT_SYMBOL_GPL(regmap_async_complete_cb);
3134
3135static int regmap_async_is_done(struct regmap *map)
3136{
3137        unsigned long flags;
3138        int ret;
3139
3140        spin_lock_irqsave(&map->async_lock, flags);
3141        ret = list_empty(&map->async_list);
3142        spin_unlock_irqrestore(&map->async_lock, flags);
3143
3144        return ret;
3145}
3146
3147/**
3148 * regmap_async_complete - Ensure all asynchronous I/O has completed.
3149 *
3150 * @map: Map to operate on.
3151 *
3152 * Blocks until any pending asynchronous I/O has completed.  Returns
3153 * an error code for any failed I/O operations.
3154 */
3155int regmap_async_complete(struct regmap *map)
3156{
3157        unsigned long flags;
3158        int ret;
3159
3160        /* Nothing to do with no async support */
3161        if (!map->bus || !map->bus->async_write)
3162                return 0;
3163
3164        trace_regmap_async_complete_start(map);
3165
3166        wait_event(map->async_waitq, regmap_async_is_done(map));
3167
3168        spin_lock_irqsave(&map->async_lock, flags);
3169        ret = map->async_ret;
3170        map->async_ret = 0;
3171        spin_unlock_irqrestore(&map->async_lock, flags);
3172
3173        trace_regmap_async_complete_done(map);
3174
3175        return ret;
3176}
3177EXPORT_SYMBOL_GPL(regmap_async_complete);
3178
3179/**
3180 * regmap_register_patch - Register and apply register updates to be applied
3181 *                         on device initialistion
3182 *
3183 * @map: Register map to apply updates to.
3184 * @regs: Values to update.
3185 * @num_regs: Number of entries in regs.
3186 *
3187 * Register a set of register updates to be applied to the device
3188 * whenever the device registers are synchronised with the cache and
3189 * apply them immediately.  Typically this is used to apply
3190 * corrections to be applied to the device defaults on startup, such
3191 * as the updates some vendors provide to undocumented registers.
3192 *
3193 * The caller must ensure that this function cannot be called
3194 * concurrently with either itself or regcache_sync().
3195 */
3196int regmap_register_patch(struct regmap *map, const struct reg_sequence *regs,
3197                          int num_regs)
3198{
3199        struct reg_sequence *p;
3200        int ret;
3201        bool bypass;
3202
3203        if (WARN_ONCE(num_regs <= 0, "invalid registers number (%d)\n",
3204            num_regs))
3205                return 0;
3206
3207        p = krealloc(map->patch,
3208                     sizeof(struct reg_sequence) * (map->patch_regs + num_regs),
3209                     GFP_KERNEL);
3210        if (p) {
3211                memcpy(p + map->patch_regs, regs, num_regs * sizeof(*regs));
3212                map->patch = p;
3213                map->patch_regs += num_regs;
3214        } else {
3215                return -ENOMEM;
3216        }
3217
3218        map->lock(map->lock_arg);
3219
3220        bypass = map->cache_bypass;
3221
3222        map->cache_bypass = true;
3223        map->async = true;
3224
3225        ret = _regmap_multi_reg_write(map, regs, num_regs);
3226
3227        map->async = false;
3228        map->cache_bypass = bypass;
3229
3230        map->unlock(map->lock_arg);
3231
3232        regmap_async_complete(map);
3233
3234        return ret;
3235}
3236EXPORT_SYMBOL_GPL(regmap_register_patch);
3237
3238/**
3239 * regmap_get_val_bytes() - Report the size of a register value
3240 *
3241 * @map: Register map to operate on.
3242 *
3243 * Report the size of a register value, mainly intended to for use by
3244 * generic infrastructure built on top of regmap.
3245 */
3246int regmap_get_val_bytes(struct regmap *map)
3247{
3248        if (map->format.format_write)
3249                return -EINVAL;
3250
3251        return map->format.val_bytes;
3252}
3253EXPORT_SYMBOL_GPL(regmap_get_val_bytes);
3254
3255/**
3256 * regmap_get_max_register() - Report the max register value
3257 *
3258 * @map: Register map to operate on.
3259 *
3260 * Report the max register value, mainly intended to for use by
3261 * generic infrastructure built on top of regmap.
3262 */
3263int regmap_get_max_register(struct regmap *map)
3264{
3265        return map->max_register ? map->max_register : -EINVAL;
3266}
3267EXPORT_SYMBOL_GPL(regmap_get_max_register);
3268
3269/**
3270 * regmap_get_reg_stride() - Report the register address stride
3271 *
3272 * @map: Register map to operate on.
3273 *
3274 * Report the register address stride, mainly intended to for use by
3275 * generic infrastructure built on top of regmap.
3276 */
3277int regmap_get_reg_stride(struct regmap *map)
3278{
3279        return map->reg_stride;
3280}
3281EXPORT_SYMBOL_GPL(regmap_get_reg_stride);
3282
3283int regmap_parse_val(struct regmap *map, const void *buf,
3284                        unsigned int *val)
3285{
3286        if (!map->format.parse_val)
3287                return -EINVAL;
3288
3289        *val = map->format.parse_val(buf);
3290
3291        return 0;
3292}
3293EXPORT_SYMBOL_GPL(regmap_parse_val);
3294
3295static int __init regmap_initcall(void)
3296{
3297        regmap_debugfs_initcall();
3298
3299        return 0;
3300}
3301postcore_initcall(regmap_initcall);
3302