linux/drivers/nvmem/core.c
<<
>>
Prefs
   1/*
   2 * nvmem framework core.
   3 *
   4 * Copyright (C) 2015 Srinivas Kandagatla <srinivas.kandagatla@linaro.org>
   5 * Copyright (C) 2013 Maxime Ripard <maxime.ripard@free-electrons.com>
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 and
   9 * only version 2 as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful,
  12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  14 * GNU General Public License for more details.
  15 */
  16
  17#include <linux/device.h>
  18#include <linux/export.h>
  19#include <linux/fs.h>
  20#include <linux/idr.h>
  21#include <linux/init.h>
  22#include <linux/module.h>
  23#include <linux/nvmem-consumer.h>
  24#include <linux/nvmem-provider.h>
  25#include <linux/of.h>
  26#include <linux/slab.h>
  27
  28struct nvmem_device {
  29        const char              *name;
  30        struct module           *owner;
  31        struct device           dev;
  32        int                     stride;
  33        int                     word_size;
  34        int                     ncells;
  35        int                     id;
  36        int                     users;
  37        size_t                  size;
  38        bool                    read_only;
  39        int                     flags;
  40        struct bin_attribute    eeprom;
  41        struct device           *base_dev;
  42        nvmem_reg_read_t        reg_read;
  43        nvmem_reg_write_t       reg_write;
  44        void *priv;
  45};
  46
  47#define FLAG_COMPAT             BIT(0)
  48
  49struct nvmem_cell {
  50        const char              *name;
  51        int                     offset;
  52        int                     bytes;
  53        int                     bit_offset;
  54        int                     nbits;
  55        struct nvmem_device     *nvmem;
  56        struct list_head        node;
  57};
  58
  59static DEFINE_MUTEX(nvmem_mutex);
  60static DEFINE_IDA(nvmem_ida);
  61
  62static LIST_HEAD(nvmem_cells);
  63static DEFINE_MUTEX(nvmem_cells_mutex);
  64
  65#ifdef CONFIG_DEBUG_LOCK_ALLOC
  66static struct lock_class_key eeprom_lock_key;
  67#endif
  68
  69#define to_nvmem_device(d) container_of(d, struct nvmem_device, dev)
  70static int nvmem_reg_read(struct nvmem_device *nvmem, unsigned int offset,
  71                          void *val, size_t bytes)
  72{
  73        if (nvmem->reg_read)
  74                return nvmem->reg_read(nvmem->priv, offset, val, bytes);
  75
  76        return -EINVAL;
  77}
  78
  79static int nvmem_reg_write(struct nvmem_device *nvmem, unsigned int offset,
  80                           void *val, size_t bytes)
  81{
  82        if (nvmem->reg_write)
  83                return nvmem->reg_write(nvmem->priv, offset, val, bytes);
  84
  85        return -EINVAL;
  86}
  87
  88static ssize_t bin_attr_nvmem_read(struct file *filp, struct kobject *kobj,
  89                                    struct bin_attribute *attr,
  90                                    char *buf, loff_t pos, size_t count)
  91{
  92        struct device *dev;
  93        struct nvmem_device *nvmem;
  94        int rc;
  95
  96        if (attr->private)
  97                dev = attr->private;
  98        else
  99                dev = container_of(kobj, struct device, kobj);
 100        nvmem = to_nvmem_device(dev);
 101
 102        /* Stop the user from reading */
 103        if (pos >= nvmem->size)
 104                return 0;
 105
 106        if (count < nvmem->word_size)
 107                return -EINVAL;
 108
 109        if (pos + count > nvmem->size)
 110                count = nvmem->size - pos;
 111
 112        count = round_down(count, nvmem->word_size);
 113
 114        rc = nvmem_reg_read(nvmem, pos, buf, count);
 115
 116        if (rc)
 117                return rc;
 118
 119        return count;
 120}
 121
 122static ssize_t bin_attr_nvmem_write(struct file *filp, struct kobject *kobj,
 123                                     struct bin_attribute *attr,
 124                                     char *buf, loff_t pos, size_t count)
 125{
 126        struct device *dev;
 127        struct nvmem_device *nvmem;
 128        int rc;
 129
 130        if (attr->private)
 131                dev = attr->private;
 132        else
 133                dev = container_of(kobj, struct device, kobj);
 134        nvmem = to_nvmem_device(dev);
 135
 136        /* Stop the user from writing */
 137        if (pos >= nvmem->size)
 138                return 0;
 139
 140        if (count < nvmem->word_size)
 141                return -EINVAL;
 142
 143        if (pos + count > nvmem->size)
 144                count = nvmem->size - pos;
 145
 146        count = round_down(count, nvmem->word_size);
 147
 148        rc = nvmem_reg_write(nvmem, pos, buf, count);
 149
 150        if (rc)
 151                return rc;
 152
 153        return count;
 154}
 155
 156/* default read/write permissions */
 157static struct bin_attribute bin_attr_rw_nvmem = {
 158        .attr   = {
 159                .name   = "nvmem",
 160                .mode   = S_IWUSR | S_IRUGO,
 161        },
 162        .read   = bin_attr_nvmem_read,
 163        .write  = bin_attr_nvmem_write,
 164};
 165
 166static struct bin_attribute *nvmem_bin_rw_attributes[] = {
 167        &bin_attr_rw_nvmem,
 168        NULL,
 169};
 170
 171static const struct attribute_group nvmem_bin_rw_group = {
 172        .bin_attrs      = nvmem_bin_rw_attributes,
 173};
 174
 175static const struct attribute_group *nvmem_rw_dev_groups[] = {
 176        &nvmem_bin_rw_group,
 177        NULL,
 178};
 179
 180/* read only permission */
 181static struct bin_attribute bin_attr_ro_nvmem = {
 182        .attr   = {
 183                .name   = "nvmem",
 184                .mode   = S_IRUGO,
 185        },
 186        .read   = bin_attr_nvmem_read,
 187};
 188
 189static struct bin_attribute *nvmem_bin_ro_attributes[] = {
 190        &bin_attr_ro_nvmem,
 191        NULL,
 192};
 193
 194static const struct attribute_group nvmem_bin_ro_group = {
 195        .bin_attrs      = nvmem_bin_ro_attributes,
 196};
 197
 198static const struct attribute_group *nvmem_ro_dev_groups[] = {
 199        &nvmem_bin_ro_group,
 200        NULL,
 201};
 202
 203/* default read/write permissions, root only */
 204static struct bin_attribute bin_attr_rw_root_nvmem = {
 205        .attr   = {
 206                .name   = "nvmem",
 207                .mode   = S_IWUSR | S_IRUSR,
 208        },
 209        .read   = bin_attr_nvmem_read,
 210        .write  = bin_attr_nvmem_write,
 211};
 212
 213static struct bin_attribute *nvmem_bin_rw_root_attributes[] = {
 214        &bin_attr_rw_root_nvmem,
 215        NULL,
 216};
 217
 218static const struct attribute_group nvmem_bin_rw_root_group = {
 219        .bin_attrs      = nvmem_bin_rw_root_attributes,
 220};
 221
 222static const struct attribute_group *nvmem_rw_root_dev_groups[] = {
 223        &nvmem_bin_rw_root_group,
 224        NULL,
 225};
 226
 227/* read only permission, root only */
 228static struct bin_attribute bin_attr_ro_root_nvmem = {
 229        .attr   = {
 230                .name   = "nvmem",
 231                .mode   = S_IRUSR,
 232        },
 233        .read   = bin_attr_nvmem_read,
 234};
 235
 236static struct bin_attribute *nvmem_bin_ro_root_attributes[] = {
 237        &bin_attr_ro_root_nvmem,
 238        NULL,
 239};
 240
 241static const struct attribute_group nvmem_bin_ro_root_group = {
 242        .bin_attrs      = nvmem_bin_ro_root_attributes,
 243};
 244
 245static const struct attribute_group *nvmem_ro_root_dev_groups[] = {
 246        &nvmem_bin_ro_root_group,
 247        NULL,
 248};
 249
 250static void nvmem_release(struct device *dev)
 251{
 252        struct nvmem_device *nvmem = to_nvmem_device(dev);
 253
 254        ida_simple_remove(&nvmem_ida, nvmem->id);
 255        kfree(nvmem);
 256}
 257
 258static const struct device_type nvmem_provider_type = {
 259        .release        = nvmem_release,
 260};
 261
 262static struct bus_type nvmem_bus_type = {
 263        .name           = "nvmem",
 264};
 265
 266static int of_nvmem_match(struct device *dev, void *nvmem_np)
 267{
 268        return dev->of_node == nvmem_np;
 269}
 270
 271static struct nvmem_device *of_nvmem_find(struct device_node *nvmem_np)
 272{
 273        struct device *d;
 274
 275        if (!nvmem_np)
 276                return NULL;
 277
 278        d = bus_find_device(&nvmem_bus_type, NULL, nvmem_np, of_nvmem_match);
 279
 280        if (!d)
 281                return NULL;
 282
 283        return to_nvmem_device(d);
 284}
 285
 286static struct nvmem_cell *nvmem_find_cell(const char *cell_id)
 287{
 288        struct nvmem_cell *p;
 289
 290        mutex_lock(&nvmem_cells_mutex);
 291
 292        list_for_each_entry(p, &nvmem_cells, node)
 293                if (p && !strcmp(p->name, cell_id)) {
 294                        mutex_unlock(&nvmem_cells_mutex);
 295                        return p;
 296                }
 297
 298        mutex_unlock(&nvmem_cells_mutex);
 299
 300        return NULL;
 301}
 302
 303static void nvmem_cell_drop(struct nvmem_cell *cell)
 304{
 305        mutex_lock(&nvmem_cells_mutex);
 306        list_del(&cell->node);
 307        mutex_unlock(&nvmem_cells_mutex);
 308        kfree(cell);
 309}
 310
 311static void nvmem_device_remove_all_cells(const struct nvmem_device *nvmem)
 312{
 313        struct nvmem_cell *cell;
 314        struct list_head *p, *n;
 315
 316        list_for_each_safe(p, n, &nvmem_cells) {
 317                cell = list_entry(p, struct nvmem_cell, node);
 318                if (cell->nvmem == nvmem)
 319                        nvmem_cell_drop(cell);
 320        }
 321}
 322
 323static void nvmem_cell_add(struct nvmem_cell *cell)
 324{
 325        mutex_lock(&nvmem_cells_mutex);
 326        list_add_tail(&cell->node, &nvmem_cells);
 327        mutex_unlock(&nvmem_cells_mutex);
 328}
 329
 330static int nvmem_cell_info_to_nvmem_cell(struct nvmem_device *nvmem,
 331                                   const struct nvmem_cell_info *info,
 332                                   struct nvmem_cell *cell)
 333{
 334        cell->nvmem = nvmem;
 335        cell->offset = info->offset;
 336        cell->bytes = info->bytes;
 337        cell->name = info->name;
 338
 339        cell->bit_offset = info->bit_offset;
 340        cell->nbits = info->nbits;
 341
 342        if (cell->nbits)
 343                cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
 344                                           BITS_PER_BYTE);
 345
 346        if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
 347                dev_err(&nvmem->dev,
 348                        "cell %s unaligned to nvmem stride %d\n",
 349                        cell->name, nvmem->stride);
 350                return -EINVAL;
 351        }
 352
 353        return 0;
 354}
 355
 356static int nvmem_add_cells(struct nvmem_device *nvmem,
 357                           const struct nvmem_config *cfg)
 358{
 359        struct nvmem_cell **cells;
 360        const struct nvmem_cell_info *info = cfg->cells;
 361        int i, rval;
 362
 363        cells = kcalloc(cfg->ncells, sizeof(*cells), GFP_KERNEL);
 364        if (!cells)
 365                return -ENOMEM;
 366
 367        for (i = 0; i < cfg->ncells; i++) {
 368                cells[i] = kzalloc(sizeof(**cells), GFP_KERNEL);
 369                if (!cells[i]) {
 370                        rval = -ENOMEM;
 371                        goto err;
 372                }
 373
 374                rval = nvmem_cell_info_to_nvmem_cell(nvmem, &info[i], cells[i]);
 375                if (rval) {
 376                        kfree(cells[i]);
 377                        goto err;
 378                }
 379
 380                nvmem_cell_add(cells[i]);
 381        }
 382
 383        nvmem->ncells = cfg->ncells;
 384        /* remove tmp array */
 385        kfree(cells);
 386
 387        return 0;
 388err:
 389        while (i--)
 390                nvmem_cell_drop(cells[i]);
 391
 392        kfree(cells);
 393
 394        return rval;
 395}
 396
 397/*
 398 * nvmem_setup_compat() - Create an additional binary entry in
 399 * drivers sys directory, to be backwards compatible with the older
 400 * drivers/misc/eeprom drivers.
 401 */
 402static int nvmem_setup_compat(struct nvmem_device *nvmem,
 403                              const struct nvmem_config *config)
 404{
 405        int rval;
 406
 407        if (!config->base_dev)
 408                return -EINVAL;
 409
 410        if (nvmem->read_only)
 411                nvmem->eeprom = bin_attr_ro_root_nvmem;
 412        else
 413                nvmem->eeprom = bin_attr_rw_root_nvmem;
 414        nvmem->eeprom.attr.name = "eeprom";
 415        nvmem->eeprom.size = nvmem->size;
 416#ifdef CONFIG_DEBUG_LOCK_ALLOC
 417        nvmem->eeprom.attr.key = &eeprom_lock_key;
 418#endif
 419        nvmem->eeprom.private = &nvmem->dev;
 420        nvmem->base_dev = config->base_dev;
 421
 422        rval = device_create_bin_file(nvmem->base_dev, &nvmem->eeprom);
 423        if (rval) {
 424                dev_err(&nvmem->dev,
 425                        "Failed to create eeprom binary file %d\n", rval);
 426                return rval;
 427        }
 428
 429        nvmem->flags |= FLAG_COMPAT;
 430
 431        return 0;
 432}
 433
 434/**
 435 * nvmem_register() - Register a nvmem device for given nvmem_config.
 436 * Also creates an binary entry in /sys/bus/nvmem/devices/dev-name/nvmem
 437 *
 438 * @config: nvmem device configuration with which nvmem device is created.
 439 *
 440 * Return: Will be an ERR_PTR() on error or a valid pointer to nvmem_device
 441 * on success.
 442 */
 443
 444struct nvmem_device *nvmem_register(const struct nvmem_config *config)
 445{
 446        struct nvmem_device *nvmem;
 447        struct device_node *np;
 448        int rval;
 449
 450        if (!config->dev)
 451                return ERR_PTR(-EINVAL);
 452
 453        nvmem = kzalloc(sizeof(*nvmem), GFP_KERNEL);
 454        if (!nvmem)
 455                return ERR_PTR(-ENOMEM);
 456
 457        rval  = ida_simple_get(&nvmem_ida, 0, 0, GFP_KERNEL);
 458        if (rval < 0) {
 459                kfree(nvmem);
 460                return ERR_PTR(rval);
 461        }
 462
 463        nvmem->id = rval;
 464        nvmem->owner = config->owner;
 465        nvmem->stride = config->stride;
 466        nvmem->word_size = config->word_size;
 467        nvmem->size = config->size;
 468        nvmem->dev.type = &nvmem_provider_type;
 469        nvmem->dev.bus = &nvmem_bus_type;
 470        nvmem->dev.parent = config->dev;
 471        nvmem->priv = config->priv;
 472        nvmem->reg_read = config->reg_read;
 473        nvmem->reg_write = config->reg_write;
 474        np = config->dev->of_node;
 475        nvmem->dev.of_node = np;
 476        dev_set_name(&nvmem->dev, "%s%d",
 477                     config->name ? : "nvmem",
 478                     config->name ? config->id : nvmem->id);
 479
 480        nvmem->read_only = of_property_read_bool(np, "read-only") |
 481                           config->read_only;
 482
 483        if (config->root_only)
 484                nvmem->dev.groups = nvmem->read_only ?
 485                        nvmem_ro_root_dev_groups :
 486                        nvmem_rw_root_dev_groups;
 487        else
 488                nvmem->dev.groups = nvmem->read_only ?
 489                        nvmem_ro_dev_groups :
 490                        nvmem_rw_dev_groups;
 491
 492        device_initialize(&nvmem->dev);
 493
 494        dev_dbg(&nvmem->dev, "Registering nvmem device %s\n", config->name);
 495
 496        rval = device_add(&nvmem->dev);
 497        if (rval)
 498                goto err_put_device;
 499
 500        if (config->compat) {
 501                rval = nvmem_setup_compat(nvmem, config);
 502                if (rval)
 503                        goto err_device_del;
 504        }
 505
 506        if (config->cells)
 507                nvmem_add_cells(nvmem, config);
 508
 509        return nvmem;
 510
 511err_device_del:
 512        device_del(&nvmem->dev);
 513err_put_device:
 514        put_device(&nvmem->dev);
 515
 516        return ERR_PTR(rval);
 517}
 518EXPORT_SYMBOL_GPL(nvmem_register);
 519
 520/**
 521 * nvmem_unregister() - Unregister previously registered nvmem device
 522 *
 523 * @nvmem: Pointer to previously registered nvmem device.
 524 *
 525 * Return: Will be an negative on error or a zero on success.
 526 */
 527int nvmem_unregister(struct nvmem_device *nvmem)
 528{
 529        mutex_lock(&nvmem_mutex);
 530        if (nvmem->users) {
 531                mutex_unlock(&nvmem_mutex);
 532                return -EBUSY;
 533        }
 534        mutex_unlock(&nvmem_mutex);
 535
 536        if (nvmem->flags & FLAG_COMPAT)
 537                device_remove_bin_file(nvmem->base_dev, &nvmem->eeprom);
 538
 539        nvmem_device_remove_all_cells(nvmem);
 540        device_del(&nvmem->dev);
 541        put_device(&nvmem->dev);
 542
 543        return 0;
 544}
 545EXPORT_SYMBOL_GPL(nvmem_unregister);
 546
 547static struct nvmem_device *__nvmem_device_get(struct device_node *np,
 548                                               struct nvmem_cell **cellp,
 549                                               const char *cell_id)
 550{
 551        struct nvmem_device *nvmem = NULL;
 552
 553        mutex_lock(&nvmem_mutex);
 554
 555        if (np) {
 556                nvmem = of_nvmem_find(np);
 557                if (!nvmem) {
 558                        mutex_unlock(&nvmem_mutex);
 559                        return ERR_PTR(-EPROBE_DEFER);
 560                }
 561        } else {
 562                struct nvmem_cell *cell = nvmem_find_cell(cell_id);
 563
 564                if (cell) {
 565                        nvmem = cell->nvmem;
 566                        *cellp = cell;
 567                }
 568
 569                if (!nvmem) {
 570                        mutex_unlock(&nvmem_mutex);
 571                        return ERR_PTR(-ENOENT);
 572                }
 573        }
 574
 575        nvmem->users++;
 576        mutex_unlock(&nvmem_mutex);
 577
 578        if (!try_module_get(nvmem->owner)) {
 579                dev_err(&nvmem->dev,
 580                        "could not increase module refcount for cell %s\n",
 581                        nvmem->name);
 582
 583                mutex_lock(&nvmem_mutex);
 584                nvmem->users--;
 585                mutex_unlock(&nvmem_mutex);
 586
 587                return ERR_PTR(-EINVAL);
 588        }
 589
 590        return nvmem;
 591}
 592
 593static void __nvmem_device_put(struct nvmem_device *nvmem)
 594{
 595        module_put(nvmem->owner);
 596        mutex_lock(&nvmem_mutex);
 597        nvmem->users--;
 598        mutex_unlock(&nvmem_mutex);
 599}
 600
 601static int nvmem_match(struct device *dev, void *data)
 602{
 603        return !strcmp(dev_name(dev), data);
 604}
 605
 606static struct nvmem_device *nvmem_find(const char *name)
 607{
 608        struct device *d;
 609
 610        d = bus_find_device(&nvmem_bus_type, NULL, (void *)name, nvmem_match);
 611
 612        if (!d)
 613                return NULL;
 614
 615        return to_nvmem_device(d);
 616}
 617
 618#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
 619/**
 620 * of_nvmem_device_get() - Get nvmem device from a given id
 621 *
 622 * @np: Device tree node that uses the nvmem device.
 623 * @id: nvmem name from nvmem-names property.
 624 *
 625 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
 626 * on success.
 627 */
 628struct nvmem_device *of_nvmem_device_get(struct device_node *np, const char *id)
 629{
 630
 631        struct device_node *nvmem_np;
 632        int index;
 633
 634        index = of_property_match_string(np, "nvmem-names", id);
 635
 636        nvmem_np = of_parse_phandle(np, "nvmem", index);
 637        if (!nvmem_np)
 638                return ERR_PTR(-EINVAL);
 639
 640        return __nvmem_device_get(nvmem_np, NULL, NULL);
 641}
 642EXPORT_SYMBOL_GPL(of_nvmem_device_get);
 643#endif
 644
 645/**
 646 * nvmem_device_get() - Get nvmem device from a given id
 647 *
 648 * @dev: Device that uses the nvmem device.
 649 * @dev_name: name of the requested nvmem device.
 650 *
 651 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_device
 652 * on success.
 653 */
 654struct nvmem_device *nvmem_device_get(struct device *dev, const char *dev_name)
 655{
 656        if (dev->of_node) { /* try dt first */
 657                struct nvmem_device *nvmem;
 658
 659                nvmem = of_nvmem_device_get(dev->of_node, dev_name);
 660
 661                if (!IS_ERR(nvmem) || PTR_ERR(nvmem) == -EPROBE_DEFER)
 662                        return nvmem;
 663
 664        }
 665
 666        return nvmem_find(dev_name);
 667}
 668EXPORT_SYMBOL_GPL(nvmem_device_get);
 669
 670static int devm_nvmem_device_match(struct device *dev, void *res, void *data)
 671{
 672        struct nvmem_device **nvmem = res;
 673
 674        if (WARN_ON(!nvmem || !*nvmem))
 675                return 0;
 676
 677        return *nvmem == data;
 678}
 679
 680static void devm_nvmem_device_release(struct device *dev, void *res)
 681{
 682        nvmem_device_put(*(struct nvmem_device **)res);
 683}
 684
 685/**
 686 * devm_nvmem_device_put() - put alredy got nvmem device
 687 *
 688 * @dev: Device that uses the nvmem device.
 689 * @nvmem: pointer to nvmem device allocated by devm_nvmem_cell_get(),
 690 * that needs to be released.
 691 */
 692void devm_nvmem_device_put(struct device *dev, struct nvmem_device *nvmem)
 693{
 694        int ret;
 695
 696        ret = devres_release(dev, devm_nvmem_device_release,
 697                             devm_nvmem_device_match, nvmem);
 698
 699        WARN_ON(ret);
 700}
 701EXPORT_SYMBOL_GPL(devm_nvmem_device_put);
 702
 703/**
 704 * nvmem_device_put() - put alredy got nvmem device
 705 *
 706 * @nvmem: pointer to nvmem device that needs to be released.
 707 */
 708void nvmem_device_put(struct nvmem_device *nvmem)
 709{
 710        __nvmem_device_put(nvmem);
 711}
 712EXPORT_SYMBOL_GPL(nvmem_device_put);
 713
 714/**
 715 * devm_nvmem_device_get() - Get nvmem cell of device form a given id
 716 *
 717 * @dev: Device that requests the nvmem device.
 718 * @id: name id for the requested nvmem device.
 719 *
 720 * Return: ERR_PTR() on error or a valid pointer to a struct nvmem_cell
 721 * on success.  The nvmem_cell will be freed by the automatically once the
 722 * device is freed.
 723 */
 724struct nvmem_device *devm_nvmem_device_get(struct device *dev, const char *id)
 725{
 726        struct nvmem_device **ptr, *nvmem;
 727
 728        ptr = devres_alloc(devm_nvmem_device_release, sizeof(*ptr), GFP_KERNEL);
 729        if (!ptr)
 730                return ERR_PTR(-ENOMEM);
 731
 732        nvmem = nvmem_device_get(dev, id);
 733        if (!IS_ERR(nvmem)) {
 734                *ptr = nvmem;
 735                devres_add(dev, ptr);
 736        } else {
 737                devres_free(ptr);
 738        }
 739
 740        return nvmem;
 741}
 742EXPORT_SYMBOL_GPL(devm_nvmem_device_get);
 743
 744static struct nvmem_cell *nvmem_cell_get_from_list(const char *cell_id)
 745{
 746        struct nvmem_cell *cell = NULL;
 747        struct nvmem_device *nvmem;
 748
 749        nvmem = __nvmem_device_get(NULL, &cell, cell_id);
 750        if (IS_ERR(nvmem))
 751                return ERR_CAST(nvmem);
 752
 753        return cell;
 754}
 755
 756#if IS_ENABLED(CONFIG_NVMEM) && IS_ENABLED(CONFIG_OF)
 757/**
 758 * of_nvmem_cell_get() - Get a nvmem cell from given device node and cell id
 759 *
 760 * @np: Device tree node that uses the nvmem cell.
 761 * @name: nvmem cell name from nvmem-cell-names property, or NULL
 762 *        for the cell at index 0 (the lone cell with no accompanying
 763 *        nvmem-cell-names property).
 764 *
 765 * Return: Will be an ERR_PTR() on error or a valid pointer
 766 * to a struct nvmem_cell.  The nvmem_cell will be freed by the
 767 * nvmem_cell_put().
 768 */
 769struct nvmem_cell *of_nvmem_cell_get(struct device_node *np,
 770                                            const char *name)
 771{
 772        struct device_node *cell_np, *nvmem_np;
 773        struct nvmem_cell *cell;
 774        struct nvmem_device *nvmem;
 775        const __be32 *addr;
 776        int rval, len;
 777        int index = 0;
 778
 779        /* if cell name exists, find index to the name */
 780        if (name)
 781                index = of_property_match_string(np, "nvmem-cell-names", name);
 782
 783        cell_np = of_parse_phandle(np, "nvmem-cells", index);
 784        if (!cell_np)
 785                return ERR_PTR(-EINVAL);
 786
 787        nvmem_np = of_get_next_parent(cell_np);
 788        if (!nvmem_np)
 789                return ERR_PTR(-EINVAL);
 790
 791        nvmem = __nvmem_device_get(nvmem_np, NULL, NULL);
 792        if (IS_ERR(nvmem))
 793                return ERR_CAST(nvmem);
 794
 795        addr = of_get_property(cell_np, "reg", &len);
 796        if (!addr || (len < 2 * sizeof(u32))) {
 797                dev_err(&nvmem->dev, "nvmem: invalid reg on %s\n",
 798                        cell_np->full_name);
 799                rval  = -EINVAL;
 800                goto err_mem;
 801        }
 802
 803        cell = kzalloc(sizeof(*cell), GFP_KERNEL);
 804        if (!cell) {
 805                rval = -ENOMEM;
 806                goto err_mem;
 807        }
 808
 809        cell->nvmem = nvmem;
 810        cell->offset = be32_to_cpup(addr++);
 811        cell->bytes = be32_to_cpup(addr);
 812        cell->name = cell_np->name;
 813
 814        addr = of_get_property(cell_np, "bits", &len);
 815        if (addr && len == (2 * sizeof(u32))) {
 816                cell->bit_offset = be32_to_cpup(addr++);
 817                cell->nbits = be32_to_cpup(addr);
 818        }
 819
 820        if (cell->nbits)
 821                cell->bytes = DIV_ROUND_UP(cell->nbits + cell->bit_offset,
 822                                           BITS_PER_BYTE);
 823
 824        if (!IS_ALIGNED(cell->offset, nvmem->stride)) {
 825                        dev_err(&nvmem->dev,
 826                                "cell %s unaligned to nvmem stride %d\n",
 827                                cell->name, nvmem->stride);
 828                rval  = -EINVAL;
 829                goto err_sanity;
 830        }
 831
 832        nvmem_cell_add(cell);
 833
 834        return cell;
 835
 836err_sanity:
 837        kfree(cell);
 838
 839err_mem:
 840        __nvmem_device_put(nvmem);
 841
 842        return ERR_PTR(rval);
 843}
 844EXPORT_SYMBOL_GPL(of_nvmem_cell_get);
 845#endif
 846
 847/**
 848 * nvmem_cell_get() - Get nvmem cell of device form a given cell name
 849 *
 850 * @dev: Device that requests the nvmem cell.
 851 * @cell_id: nvmem cell name to get.
 852 *
 853 * Return: Will be an ERR_PTR() on error or a valid pointer
 854 * to a struct nvmem_cell.  The nvmem_cell will be freed by the
 855 * nvmem_cell_put().
 856 */
 857struct nvmem_cell *nvmem_cell_get(struct device *dev, const char *cell_id)
 858{
 859        struct nvmem_cell *cell;
 860
 861        if (dev->of_node) { /* try dt first */
 862                cell = of_nvmem_cell_get(dev->of_node, cell_id);
 863                if (!IS_ERR(cell) || PTR_ERR(cell) == -EPROBE_DEFER)
 864                        return cell;
 865        }
 866
 867        return nvmem_cell_get_from_list(cell_id);
 868}
 869EXPORT_SYMBOL_GPL(nvmem_cell_get);
 870
 871static void devm_nvmem_cell_release(struct device *dev, void *res)
 872{
 873        nvmem_cell_put(*(struct nvmem_cell **)res);
 874}
 875
 876/**
 877 * devm_nvmem_cell_get() - Get nvmem cell of device form a given id
 878 *
 879 * @dev: Device that requests the nvmem cell.
 880 * @id: nvmem cell name id to get.
 881 *
 882 * Return: Will be an ERR_PTR() on error or a valid pointer
 883 * to a struct nvmem_cell.  The nvmem_cell will be freed by the
 884 * automatically once the device is freed.
 885 */
 886struct nvmem_cell *devm_nvmem_cell_get(struct device *dev, const char *id)
 887{
 888        struct nvmem_cell **ptr, *cell;
 889
 890        ptr = devres_alloc(devm_nvmem_cell_release, sizeof(*ptr), GFP_KERNEL);
 891        if (!ptr)
 892                return ERR_PTR(-ENOMEM);
 893
 894        cell = nvmem_cell_get(dev, id);
 895        if (!IS_ERR(cell)) {
 896                *ptr = cell;
 897                devres_add(dev, ptr);
 898        } else {
 899                devres_free(ptr);
 900        }
 901
 902        return cell;
 903}
 904EXPORT_SYMBOL_GPL(devm_nvmem_cell_get);
 905
 906static int devm_nvmem_cell_match(struct device *dev, void *res, void *data)
 907{
 908        struct nvmem_cell **c = res;
 909
 910        if (WARN_ON(!c || !*c))
 911                return 0;
 912
 913        return *c == data;
 914}
 915
 916/**
 917 * devm_nvmem_cell_put() - Release previously allocated nvmem cell
 918 * from devm_nvmem_cell_get.
 919 *
 920 * @dev: Device that requests the nvmem cell.
 921 * @cell: Previously allocated nvmem cell by devm_nvmem_cell_get().
 922 */
 923void devm_nvmem_cell_put(struct device *dev, struct nvmem_cell *cell)
 924{
 925        int ret;
 926
 927        ret = devres_release(dev, devm_nvmem_cell_release,
 928                                devm_nvmem_cell_match, cell);
 929
 930        WARN_ON(ret);
 931}
 932EXPORT_SYMBOL(devm_nvmem_cell_put);
 933
 934/**
 935 * nvmem_cell_put() - Release previously allocated nvmem cell.
 936 *
 937 * @cell: Previously allocated nvmem cell by nvmem_cell_get().
 938 */
 939void nvmem_cell_put(struct nvmem_cell *cell)
 940{
 941        struct nvmem_device *nvmem = cell->nvmem;
 942
 943        __nvmem_device_put(nvmem);
 944        nvmem_cell_drop(cell);
 945}
 946EXPORT_SYMBOL_GPL(nvmem_cell_put);
 947
 948static inline void nvmem_shift_read_buffer_in_place(struct nvmem_cell *cell,
 949                                                    void *buf)
 950{
 951        u8 *p, *b;
 952        int i, bit_offset = cell->bit_offset;
 953
 954        p = b = buf;
 955        if (bit_offset) {
 956                /* First shift */
 957                *b++ >>= bit_offset;
 958
 959                /* setup rest of the bytes if any */
 960                for (i = 1; i < cell->bytes; i++) {
 961                        /* Get bits from next byte and shift them towards msb */
 962                        *p |= *b << (BITS_PER_BYTE - bit_offset);
 963
 964                        p = b;
 965                        *b++ >>= bit_offset;
 966                }
 967
 968                /* result fits in less bytes */
 969                if (cell->bytes != DIV_ROUND_UP(cell->nbits, BITS_PER_BYTE))
 970                        *p-- = 0;
 971        }
 972        /* clear msb bits if any leftover in the last byte */
 973        *p &= GENMASK((cell->nbits%BITS_PER_BYTE) - 1, 0);
 974}
 975
 976static int __nvmem_cell_read(struct nvmem_device *nvmem,
 977                      struct nvmem_cell *cell,
 978                      void *buf, size_t *len)
 979{
 980        int rc;
 981
 982        rc = nvmem_reg_read(nvmem, cell->offset, buf, cell->bytes);
 983
 984        if (rc)
 985                return rc;
 986
 987        /* shift bits in-place */
 988        if (cell->bit_offset || cell->nbits)
 989                nvmem_shift_read_buffer_in_place(cell, buf);
 990
 991        if (len)
 992                *len = cell->bytes;
 993
 994        return 0;
 995}
 996
 997/**
 998 * nvmem_cell_read() - Read a given nvmem cell
 999 *
1000 * @cell: nvmem cell to be read.
1001 * @len: pointer to length of cell which will be populated on successful read;
1002 *       can be NULL.
1003 *
1004 * Return: ERR_PTR() on error or a valid pointer to a buffer on success. The
1005 * buffer should be freed by the consumer with a kfree().
1006 */
1007void *nvmem_cell_read(struct nvmem_cell *cell, size_t *len)
1008{
1009        struct nvmem_device *nvmem = cell->nvmem;
1010        u8 *buf;
1011        int rc;
1012
1013        if (!nvmem)
1014                return ERR_PTR(-EINVAL);
1015
1016        buf = kzalloc(cell->bytes, GFP_KERNEL);
1017        if (!buf)
1018                return ERR_PTR(-ENOMEM);
1019
1020        rc = __nvmem_cell_read(nvmem, cell, buf, len);
1021        if (rc) {
1022                kfree(buf);
1023                return ERR_PTR(rc);
1024        }
1025
1026        return buf;
1027}
1028EXPORT_SYMBOL_GPL(nvmem_cell_read);
1029
1030static inline void *nvmem_cell_prepare_write_buffer(struct nvmem_cell *cell,
1031                                                    u8 *_buf, int len)
1032{
1033        struct nvmem_device *nvmem = cell->nvmem;
1034        int i, rc, nbits, bit_offset = cell->bit_offset;
1035        u8 v, *p, *buf, *b, pbyte, pbits;
1036
1037        nbits = cell->nbits;
1038        buf = kzalloc(cell->bytes, GFP_KERNEL);
1039        if (!buf)
1040                return ERR_PTR(-ENOMEM);
1041
1042        memcpy(buf, _buf, len);
1043        p = b = buf;
1044
1045        if (bit_offset) {
1046                pbyte = *b;
1047                *b <<= bit_offset;
1048
1049                /* setup the first byte with lsb bits from nvmem */
1050                rc = nvmem_reg_read(nvmem, cell->offset, &v, 1);
1051                *b++ |= GENMASK(bit_offset - 1, 0) & v;
1052
1053                /* setup rest of the byte if any */
1054                for (i = 1; i < cell->bytes; i++) {
1055                        /* Get last byte bits and shift them towards lsb */
1056                        pbits = pbyte >> (BITS_PER_BYTE - 1 - bit_offset);
1057                        pbyte = *b;
1058                        p = b;
1059                        *b <<= bit_offset;
1060                        *b++ |= pbits;
1061                }
1062        }
1063
1064        /* if it's not end on byte boundary */
1065        if ((nbits + bit_offset) % BITS_PER_BYTE) {
1066                /* setup the last byte with msb bits from nvmem */
1067                rc = nvmem_reg_read(nvmem,
1068                                    cell->offset + cell->bytes - 1, &v, 1);
1069                *p |= GENMASK(7, (nbits + bit_offset) % BITS_PER_BYTE) & v;
1070
1071        }
1072
1073        return buf;
1074}
1075
1076/**
1077 * nvmem_cell_write() - Write to a given nvmem cell
1078 *
1079 * @cell: nvmem cell to be written.
1080 * @buf: Buffer to be written.
1081 * @len: length of buffer to be written to nvmem cell.
1082 *
1083 * Return: length of bytes written or negative on failure.
1084 */
1085int nvmem_cell_write(struct nvmem_cell *cell, void *buf, size_t len)
1086{
1087        struct nvmem_device *nvmem = cell->nvmem;
1088        int rc;
1089
1090        if (!nvmem || nvmem->read_only ||
1091            (cell->bit_offset == 0 && len != cell->bytes))
1092                return -EINVAL;
1093
1094        if (cell->bit_offset || cell->nbits) {
1095                buf = nvmem_cell_prepare_write_buffer(cell, buf, len);
1096                if (IS_ERR(buf))
1097                        return PTR_ERR(buf);
1098        }
1099
1100        rc = nvmem_reg_write(nvmem, cell->offset, buf, cell->bytes);
1101
1102        /* free the tmp buffer */
1103        if (cell->bit_offset || cell->nbits)
1104                kfree(buf);
1105
1106        if (rc)
1107                return rc;
1108
1109        return len;
1110}
1111EXPORT_SYMBOL_GPL(nvmem_cell_write);
1112
1113/**
1114 * nvmem_device_cell_read() - Read a given nvmem device and cell
1115 *
1116 * @nvmem: nvmem device to read from.
1117 * @info: nvmem cell info to be read.
1118 * @buf: buffer pointer which will be populated on successful read.
1119 *
1120 * Return: length of successful bytes read on success and negative
1121 * error code on error.
1122 */
1123ssize_t nvmem_device_cell_read(struct nvmem_device *nvmem,
1124                           struct nvmem_cell_info *info, void *buf)
1125{
1126        struct nvmem_cell cell;
1127        int rc;
1128        ssize_t len;
1129
1130        if (!nvmem)
1131                return -EINVAL;
1132
1133        rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1134        if (rc)
1135                return rc;
1136
1137        rc = __nvmem_cell_read(nvmem, &cell, buf, &len);
1138        if (rc)
1139                return rc;
1140
1141        return len;
1142}
1143EXPORT_SYMBOL_GPL(nvmem_device_cell_read);
1144
1145/**
1146 * nvmem_device_cell_write() - Write cell to a given nvmem device
1147 *
1148 * @nvmem: nvmem device to be written to.
1149 * @info: nvmem cell info to be written.
1150 * @buf: buffer to be written to cell.
1151 *
1152 * Return: length of bytes written or negative error code on failure.
1153 * */
1154int nvmem_device_cell_write(struct nvmem_device *nvmem,
1155                            struct nvmem_cell_info *info, void *buf)
1156{
1157        struct nvmem_cell cell;
1158        int rc;
1159
1160        if (!nvmem)
1161                return -EINVAL;
1162
1163        rc = nvmem_cell_info_to_nvmem_cell(nvmem, info, &cell);
1164        if (rc)
1165                return rc;
1166
1167        return nvmem_cell_write(&cell, buf, cell.bytes);
1168}
1169EXPORT_SYMBOL_GPL(nvmem_device_cell_write);
1170
1171/**
1172 * nvmem_device_read() - Read from a given nvmem device
1173 *
1174 * @nvmem: nvmem device to read from.
1175 * @offset: offset in nvmem device.
1176 * @bytes: number of bytes to read.
1177 * @buf: buffer pointer which will be populated on successful read.
1178 *
1179 * Return: length of successful bytes read on success and negative
1180 * error code on error.
1181 */
1182int nvmem_device_read(struct nvmem_device *nvmem,
1183                      unsigned int offset,
1184                      size_t bytes, void *buf)
1185{
1186        int rc;
1187
1188        if (!nvmem)
1189                return -EINVAL;
1190
1191        rc = nvmem_reg_read(nvmem, offset, buf, bytes);
1192
1193        if (rc)
1194                return rc;
1195
1196        return bytes;
1197}
1198EXPORT_SYMBOL_GPL(nvmem_device_read);
1199
1200/**
1201 * nvmem_device_write() - Write cell to a given nvmem device
1202 *
1203 * @nvmem: nvmem device to be written to.
1204 * @offset: offset in nvmem device.
1205 * @bytes: number of bytes to write.
1206 * @buf: buffer to be written.
1207 *
1208 * Return: length of bytes written or negative error code on failure.
1209 * */
1210int nvmem_device_write(struct nvmem_device *nvmem,
1211                       unsigned int offset,
1212                       size_t bytes, void *buf)
1213{
1214        int rc;
1215
1216        if (!nvmem)
1217                return -EINVAL;
1218
1219        rc = nvmem_reg_write(nvmem, offset, buf, bytes);
1220
1221        if (rc)
1222                return rc;
1223
1224
1225        return bytes;
1226}
1227EXPORT_SYMBOL_GPL(nvmem_device_write);
1228
1229static int __init nvmem_init(void)
1230{
1231        return bus_register(&nvmem_bus_type);
1232}
1233
1234static void __exit nvmem_exit(void)
1235{
1236        bus_unregister(&nvmem_bus_type);
1237}
1238
1239subsys_initcall(nvmem_init);
1240module_exit(nvmem_exit);
1241
1242MODULE_AUTHOR("Srinivas Kandagatla <srinivas.kandagatla@linaro.org");
1243MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com");
1244MODULE_DESCRIPTION("nvmem Driver Core");
1245MODULE_LICENSE("GPL v2");
1246