linux/drivers/nvmem/rave-sp-eeprom.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2
   3/*
   4 * EEPROM driver for RAVE SP
   5 *
   6 * Copyright (C) 2018 Zodiac Inflight Innovations
   7 *
   8 */
   9#include <linux/kernel.h>
  10#include <linux/mfd/rave-sp.h>
  11#include <linux/module.h>
  12#include <linux/nvmem-provider.h>
  13#include <linux/of_device.h>
  14#include <linux/platform_device.h>
  15#include <linux/sizes.h>
  16
  17/**
  18 * enum rave_sp_eeprom_access_type - Supported types of EEPROM access
  19 *
  20 * @RAVE_SP_EEPROM_WRITE:       EEPROM write
  21 * @RAVE_SP_EEPROM_READ:        EEPROM read
  22 */
  23enum rave_sp_eeprom_access_type {
  24        RAVE_SP_EEPROM_WRITE = 0,
  25        RAVE_SP_EEPROM_READ  = 1,
  26};
  27
  28/**
  29 * enum rave_sp_eeprom_header_size - EEPROM command header sizes
  30 *
  31 * @RAVE_SP_EEPROM_HEADER_SMALL: EEPROM header size for "small" devices (< 8K)
  32 * @RAVE_SP_EEPROM_HEADER_BIG:   EEPROM header size for "big" devices (> 8K)
  33 */
  34enum rave_sp_eeprom_header_size {
  35        RAVE_SP_EEPROM_HEADER_SMALL = 4U,
  36        RAVE_SP_EEPROM_HEADER_BIG   = 5U,
  37};
  38
  39#define RAVE_SP_EEPROM_PAGE_SIZE        32U
  40
  41/**
  42 * struct rave_sp_eeprom_page - RAVE SP EEPROM page
  43 *
  44 * @type:       Access type (see enum rave_sp_eeprom_access_type)
  45 * @success:    Success flag (Success = 1, Failure = 0)
  46 * @data:       Read data
  47
  48 * Note this structure corresponds to RSP_*_EEPROM payload from RAVE
  49 * SP ICD
  50 */
  51struct rave_sp_eeprom_page {
  52        u8  type;
  53        u8  success;
  54        u8  data[RAVE_SP_EEPROM_PAGE_SIZE];
  55} __packed;
  56
  57/**
  58 * struct rave_sp_eeprom - RAVE SP EEPROM device
  59 *
  60 * @sp:                 Pointer to parent RAVE SP device
  61 * @mutex:              Lock protecting access to EEPROM
  62 * @address:            EEPROM device address
  63 * @header_size:        Size of EEPROM command header for this device
  64 * @dev:                Pointer to corresponding struct device used for logging
  65 */
  66struct rave_sp_eeprom {
  67        struct rave_sp *sp;
  68        struct mutex mutex;
  69        u8 address;
  70        unsigned int header_size;
  71        struct device *dev;
  72};
  73
  74/**
  75 * rave_sp_eeprom_io - Low-level part of EEPROM page access
  76 *
  77 * @eeprom:     EEPROM device to write to
  78 * @type:       EEPROM access type (read or write)
  79 * @idx:        number of the EEPROM page
  80 * @page:       Data to write or buffer to store result (via page->data)
  81 *
  82 * This function does all of the low-level work required to perform a
  83 * EEPROM access. This includes formatting correct command payload,
  84 * sending it and checking received results.
  85 *
  86 * Returns zero in case of success or negative error code in
  87 * case of failure.
  88 */
  89static int rave_sp_eeprom_io(struct rave_sp_eeprom *eeprom,
  90                             enum rave_sp_eeprom_access_type type,
  91                             u16 idx,
  92                             struct rave_sp_eeprom_page *page)
  93{
  94        const bool is_write = type == RAVE_SP_EEPROM_WRITE;
  95        const unsigned int data_size = is_write ? sizeof(page->data) : 0;
  96        const unsigned int cmd_size = eeprom->header_size + data_size;
  97        const unsigned int rsp_size =
  98                is_write ? sizeof(*page) - sizeof(page->data) : sizeof(*page);
  99        unsigned int offset = 0;
 100        u8 cmd[cmd_size];
 101        int ret;
 102
 103        cmd[offset++] = eeprom->address;
 104        cmd[offset++] = 0;
 105        cmd[offset++] = type;
 106        cmd[offset++] = idx;
 107
 108        /*
 109         * If there's still room in this command's header it means we
 110         * are talkin to EEPROM that uses 16-bit page numbers and we
 111         * have to specify index's MSB in payload as well.
 112         */
 113        if (offset < eeprom->header_size)
 114                cmd[offset++] = idx >> 8;
 115        /*
 116         * Copy our data to write to command buffer first. In case of
 117         * a read data_size should be zero and memcpy would become a
 118         * no-op
 119         */
 120        memcpy(&cmd[offset], page->data, data_size);
 121
 122        ret = rave_sp_exec(eeprom->sp, cmd, cmd_size, page, rsp_size);
 123        if (ret)
 124                return ret;
 125
 126        if (page->type != type)
 127                return -EPROTO;
 128
 129        if (!page->success)
 130                return -EIO;
 131
 132        return 0;
 133}
 134
 135/**
 136 * rave_sp_eeprom_page_access - Access single EEPROM page
 137 *
 138 * @eeprom:     EEPROM device to access
 139 * @type:       Access type to perform (read or write)
 140 * @offset:     Offset within EEPROM to access
 141 * @data:       Data buffer
 142 * @data_len:   Size of the data buffer
 143 *
 144 * This function performs a generic access to a single page or a
 145 * portion thereof. Requested access MUST NOT cross the EEPROM page
 146 * boundary.
 147 *
 148 * Returns zero in case of success or negative error code in
 149 * case of failure.
 150 */
 151static int
 152rave_sp_eeprom_page_access(struct rave_sp_eeprom *eeprom,
 153                           enum rave_sp_eeprom_access_type type,
 154                           unsigned int offset, u8 *data,
 155                           size_t data_len)
 156{
 157        const unsigned int page_offset = offset % RAVE_SP_EEPROM_PAGE_SIZE;
 158        const unsigned int page_nr     = offset / RAVE_SP_EEPROM_PAGE_SIZE;
 159        struct rave_sp_eeprom_page page;
 160        int ret;
 161
 162        /*
 163         * This function will not work if data access we've been asked
 164         * to do is crossing EEPROM page boundary. Normally this
 165         * should never happen and getting here would indicate a bug
 166         * in the code.
 167         */
 168        if (WARN_ON(data_len > sizeof(page.data) - page_offset))
 169                return -EINVAL;
 170
 171        if (type == RAVE_SP_EEPROM_WRITE) {
 172                /*
 173                 * If doing a partial write we need to do a read first
 174                 * to fill the rest of the page with correct data.
 175                 */
 176                if (data_len < RAVE_SP_EEPROM_PAGE_SIZE) {
 177                        ret = rave_sp_eeprom_io(eeprom, RAVE_SP_EEPROM_READ,
 178                                                page_nr, &page);
 179                        if (ret)
 180                                return ret;
 181                }
 182
 183                memcpy(&page.data[page_offset], data, data_len);
 184        }
 185
 186        ret = rave_sp_eeprom_io(eeprom, type, page_nr, &page);
 187        if (ret)
 188                return ret;
 189
 190        /*
 191         * Since we receive the result of the read via 'page.data'
 192         * buffer we need to copy that to 'data'
 193         */
 194        if (type == RAVE_SP_EEPROM_READ)
 195                memcpy(data, &page.data[page_offset], data_len);
 196
 197        return 0;
 198}
 199
 200/**
 201 * rave_sp_eeprom_access - Access EEPROM data
 202 *
 203 * @eeprom:     EEPROM device to access
 204 * @type:       Access type to perform (read or write)
 205 * @offset:     Offset within EEPROM to access
 206 * @data:       Data buffer
 207 * @data_len:   Size of the data buffer
 208 *
 209 * This function performs a generic access (either read or write) at
 210 * arbitrary offset (not necessary page aligned) of arbitrary length
 211 * (is not constrained by EEPROM page size).
 212 *
 213 * Returns zero in case of success or negative error code in case of
 214 * failure.
 215 */
 216static int rave_sp_eeprom_access(struct rave_sp_eeprom *eeprom,
 217                                 enum rave_sp_eeprom_access_type type,
 218                                 unsigned int offset, u8 *data,
 219                                 unsigned int data_len)
 220{
 221        unsigned int residue;
 222        unsigned int chunk;
 223        unsigned int head;
 224        int ret;
 225
 226        mutex_lock(&eeprom->mutex);
 227
 228        head    = offset % RAVE_SP_EEPROM_PAGE_SIZE;
 229        residue = data_len;
 230
 231        do {
 232                /*
 233                 * First iteration, if we are doing an access that is
 234                 * not 32-byte aligned, we need to access only data up
 235                 * to a page boundary to avoid corssing it in
 236                 * rave_sp_eeprom_page_access()
 237                 */
 238                if (unlikely(head)) {
 239                        chunk = RAVE_SP_EEPROM_PAGE_SIZE - head;
 240                        /*
 241                         * This can only happen once per
 242                         * rave_sp_eeprom_access() call, so we set
 243                         * head to zero to process all the other
 244                         * iterations normally.
 245                         */
 246                        head  = 0;
 247                } else {
 248                        chunk = RAVE_SP_EEPROM_PAGE_SIZE;
 249                }
 250
 251                /*
 252                 * We should never read more that 'residue' bytes
 253                 */
 254                chunk = min(chunk, residue);
 255                ret = rave_sp_eeprom_page_access(eeprom, type, offset,
 256                                                 data, chunk);
 257                if (ret)
 258                        goto out;
 259
 260                residue -= chunk;
 261                offset  += chunk;
 262                data    += chunk;
 263        } while (residue);
 264out:
 265        mutex_unlock(&eeprom->mutex);
 266        return ret;
 267}
 268
 269static int rave_sp_eeprom_reg_read(void *eeprom, unsigned int offset,
 270                                   void *val, size_t bytes)
 271{
 272        return rave_sp_eeprom_access(eeprom, RAVE_SP_EEPROM_READ,
 273                                     offset, val, bytes);
 274}
 275
 276static int rave_sp_eeprom_reg_write(void *eeprom, unsigned int offset,
 277                                    void *val, size_t bytes)
 278{
 279        return rave_sp_eeprom_access(eeprom, RAVE_SP_EEPROM_WRITE,
 280                                     offset, val, bytes);
 281}
 282
 283static int rave_sp_eeprom_probe(struct platform_device *pdev)
 284{
 285        struct device *dev = &pdev->dev;
 286        struct rave_sp *sp = dev_get_drvdata(dev->parent);
 287        struct device_node *np = dev->of_node;
 288        struct nvmem_config config = { 0 };
 289        struct rave_sp_eeprom *eeprom;
 290        struct nvmem_device *nvmem;
 291        u32 reg[2], size;
 292
 293        if (of_property_read_u32_array(np, "reg", reg, ARRAY_SIZE(reg))) {
 294                dev_err(dev, "Failed to parse \"reg\" property\n");
 295                return -EINVAL;
 296        }
 297
 298        size = reg[1];
 299        /*
 300         * Per ICD, we have no more than 2 bytes to specify EEPROM
 301         * page.
 302         */
 303        if (size > U16_MAX * RAVE_SP_EEPROM_PAGE_SIZE) {
 304                dev_err(dev, "Specified size is too big\n");
 305                return -EINVAL;
 306        }
 307
 308        eeprom = devm_kzalloc(dev, sizeof(*eeprom), GFP_KERNEL);
 309        if (!eeprom)
 310                return -ENOMEM;
 311
 312        eeprom->address = reg[0];
 313        eeprom->sp      = sp;
 314        eeprom->dev     = dev;
 315
 316        if (size > SZ_8K)
 317                eeprom->header_size = RAVE_SP_EEPROM_HEADER_BIG;
 318        else
 319                eeprom->header_size = RAVE_SP_EEPROM_HEADER_SMALL;
 320
 321        mutex_init(&eeprom->mutex);
 322
 323        config.id               = -1;
 324        of_property_read_string(np, "zii,eeprom-name", &config.name);
 325        config.priv             = eeprom;
 326        config.dev              = dev;
 327        config.size             = size;
 328        config.reg_read         = rave_sp_eeprom_reg_read;
 329        config.reg_write        = rave_sp_eeprom_reg_write;
 330        config.word_size        = 1;
 331        config.stride           = 1;
 332
 333        nvmem = devm_nvmem_register(dev, &config);
 334
 335        return PTR_ERR_OR_ZERO(nvmem);
 336}
 337
 338static const struct of_device_id rave_sp_eeprom_of_match[] = {
 339        { .compatible = "zii,rave-sp-eeprom" },
 340        {}
 341};
 342MODULE_DEVICE_TABLE(of, rave_sp_eeprom_of_match);
 343
 344static struct platform_driver rave_sp_eeprom_driver = {
 345        .probe = rave_sp_eeprom_probe,
 346        .driver = {
 347                .name = KBUILD_MODNAME,
 348                .of_match_table = rave_sp_eeprom_of_match,
 349        },
 350};
 351module_platform_driver(rave_sp_eeprom_driver);
 352
 353MODULE_LICENSE("GPL");
 354MODULE_AUTHOR("Andrey Vostrikov <andrey.vostrikov@cogentembedded.com>");
 355MODULE_AUTHOR("Nikita Yushchenko <nikita.yoush@cogentembedded.com>");
 356MODULE_AUTHOR("Andrey Smirnov <andrew.smirnov@gmail.com>");
 357MODULE_DESCRIPTION("RAVE SP EEPROM driver");
 358