linux/drivers/base/regmap/regcache-lzo.c
<<
>>
Prefs
   1/*
   2 * Register cache access API - LZO caching support
   3 *
   4 * Copyright 2011 Wolfson Microelectronics plc
   5 *
   6 * Author: Dimitris Papastamos <dp@opensource.wolfsonmicro.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 */
  12
  13#include <linux/device.h>
  14#include <linux/lzo.h>
  15#include <linux/slab.h>
  16
  17#include "internal.h"
  18
  19static int regcache_lzo_exit(struct regmap *map);
  20
  21struct regcache_lzo_ctx {
  22        void *wmem;
  23        void *dst;
  24        const void *src;
  25        size_t src_len;
  26        size_t dst_len;
  27        size_t decompressed_size;
  28        unsigned long *sync_bmp;
  29        int sync_bmp_nbits;
  30};
  31
  32#define LZO_BLOCK_NUM 8
  33static int regcache_lzo_block_count(struct regmap *map)
  34{
  35        return LZO_BLOCK_NUM;
  36}
  37
  38static int regcache_lzo_prepare(struct regcache_lzo_ctx *lzo_ctx)
  39{
  40        lzo_ctx->wmem = kmalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
  41        if (!lzo_ctx->wmem)
  42                return -ENOMEM;
  43        return 0;
  44}
  45
  46static int regcache_lzo_compress(struct regcache_lzo_ctx *lzo_ctx)
  47{
  48        size_t compress_size;
  49        int ret;
  50
  51        ret = lzo1x_1_compress(lzo_ctx->src, lzo_ctx->src_len,
  52                               lzo_ctx->dst, &compress_size, lzo_ctx->wmem);
  53        if (ret != LZO_E_OK || compress_size > lzo_ctx->dst_len)
  54                return -EINVAL;
  55        lzo_ctx->dst_len = compress_size;
  56        return 0;
  57}
  58
  59static int regcache_lzo_decompress(struct regcache_lzo_ctx *lzo_ctx)
  60{
  61        size_t dst_len;
  62        int ret;
  63
  64        dst_len = lzo_ctx->dst_len;
  65        ret = lzo1x_decompress_safe(lzo_ctx->src, lzo_ctx->src_len,
  66                                    lzo_ctx->dst, &dst_len);
  67        if (ret != LZO_E_OK || dst_len != lzo_ctx->dst_len)
  68                return -EINVAL;
  69        return 0;
  70}
  71
  72static int regcache_lzo_compress_cache_block(struct regmap *map,
  73                struct regcache_lzo_ctx *lzo_ctx)
  74{
  75        int ret;
  76
  77        lzo_ctx->dst_len = lzo1x_worst_compress(PAGE_SIZE);
  78        lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
  79        if (!lzo_ctx->dst) {
  80                lzo_ctx->dst_len = 0;
  81                return -ENOMEM;
  82        }
  83
  84        ret = regcache_lzo_compress(lzo_ctx);
  85        if (ret < 0)
  86                return ret;
  87        return 0;
  88}
  89
  90static int regcache_lzo_decompress_cache_block(struct regmap *map,
  91                struct regcache_lzo_ctx *lzo_ctx)
  92{
  93        int ret;
  94
  95        lzo_ctx->dst_len = lzo_ctx->decompressed_size;
  96        lzo_ctx->dst = kmalloc(lzo_ctx->dst_len, GFP_KERNEL);
  97        if (!lzo_ctx->dst) {
  98                lzo_ctx->dst_len = 0;
  99                return -ENOMEM;
 100        }
 101
 102        ret = regcache_lzo_decompress(lzo_ctx);
 103        if (ret < 0)
 104                return ret;
 105        return 0;
 106}
 107
 108static inline int regcache_lzo_get_blkindex(struct regmap *map,
 109                                            unsigned int reg)
 110{
 111        return ((reg / map->reg_stride) * map->cache_word_size) /
 112                DIV_ROUND_UP(map->cache_size_raw,
 113                             regcache_lzo_block_count(map));
 114}
 115
 116static inline int regcache_lzo_get_blkpos(struct regmap *map,
 117                                          unsigned int reg)
 118{
 119        return (reg / map->reg_stride) %
 120                    (DIV_ROUND_UP(map->cache_size_raw,
 121                                  regcache_lzo_block_count(map)) /
 122                     map->cache_word_size);
 123}
 124
 125static inline int regcache_lzo_get_blksize(struct regmap *map)
 126{
 127        return DIV_ROUND_UP(map->cache_size_raw,
 128                            regcache_lzo_block_count(map));
 129}
 130
 131static int regcache_lzo_init(struct regmap *map)
 132{
 133        struct regcache_lzo_ctx **lzo_blocks;
 134        size_t bmp_size;
 135        int ret, i, blksize, blkcount;
 136        const char *p, *end;
 137        unsigned long *sync_bmp;
 138
 139        ret = 0;
 140
 141        blkcount = regcache_lzo_block_count(map);
 142        map->cache = kcalloc(blkcount, sizeof(*lzo_blocks),
 143                             GFP_KERNEL);
 144        if (!map->cache)
 145                return -ENOMEM;
 146        lzo_blocks = map->cache;
 147
 148        /*
 149         * allocate a bitmap to be used when syncing the cache with
 150         * the hardware.  Each time a register is modified, the corresponding
 151         * bit is set in the bitmap, so we know that we have to sync
 152         * that register.
 153         */
 154        bmp_size = map->num_reg_defaults_raw;
 155        sync_bmp = kmalloc_array(BITS_TO_LONGS(bmp_size), sizeof(long),
 156                                 GFP_KERNEL);
 157        if (!sync_bmp) {
 158                ret = -ENOMEM;
 159                goto err;
 160        }
 161        bitmap_zero(sync_bmp, bmp_size);
 162
 163        /* allocate the lzo blocks and initialize them */
 164        for (i = 0; i < blkcount; i++) {
 165                lzo_blocks[i] = kzalloc(sizeof **lzo_blocks,
 166                                        GFP_KERNEL);
 167                if (!lzo_blocks[i]) {
 168                        kfree(sync_bmp);
 169                        ret = -ENOMEM;
 170                        goto err;
 171                }
 172                lzo_blocks[i]->sync_bmp = sync_bmp;
 173                lzo_blocks[i]->sync_bmp_nbits = bmp_size;
 174                /* alloc the working space for the compressed block */
 175                ret = regcache_lzo_prepare(lzo_blocks[i]);
 176                if (ret < 0)
 177                        goto err;
 178        }
 179
 180        blksize = regcache_lzo_get_blksize(map);
 181        p = map->reg_defaults_raw;
 182        end = map->reg_defaults_raw + map->cache_size_raw;
 183        /* compress the register map and fill the lzo blocks */
 184        for (i = 0; i < blkcount; i++, p += blksize) {
 185                lzo_blocks[i]->src = p;
 186                if (p + blksize > end)
 187                        lzo_blocks[i]->src_len = end - p;
 188                else
 189                        lzo_blocks[i]->src_len = blksize;
 190                ret = regcache_lzo_compress_cache_block(map,
 191                                                       lzo_blocks[i]);
 192                if (ret < 0)
 193                        goto err;
 194                lzo_blocks[i]->decompressed_size =
 195                        lzo_blocks[i]->src_len;
 196        }
 197
 198        return 0;
 199err:
 200        regcache_lzo_exit(map);
 201        return ret;
 202}
 203
 204static int regcache_lzo_exit(struct regmap *map)
 205{
 206        struct regcache_lzo_ctx **lzo_blocks;
 207        int i, blkcount;
 208
 209        lzo_blocks = map->cache;
 210        if (!lzo_blocks)
 211                return 0;
 212
 213        blkcount = regcache_lzo_block_count(map);
 214        /*
 215         * the pointer to the bitmap used for syncing the cache
 216         * is shared amongst all lzo_blocks.  Ensure it is freed
 217         * only once.
 218         */
 219        if (lzo_blocks[0])
 220                kfree(lzo_blocks[0]->sync_bmp);
 221        for (i = 0; i < blkcount; i++) {
 222                if (lzo_blocks[i]) {
 223                        kfree(lzo_blocks[i]->wmem);
 224                        kfree(lzo_blocks[i]->dst);
 225                }
 226                /* each lzo_block is a pointer returned by kmalloc or NULL */
 227                kfree(lzo_blocks[i]);
 228        }
 229        kfree(lzo_blocks);
 230        map->cache = NULL;
 231        return 0;
 232}
 233
 234static int regcache_lzo_read(struct regmap *map,
 235                             unsigned int reg, unsigned int *value)
 236{
 237        struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
 238        int ret, blkindex, blkpos;
 239        size_t tmp_dst_len;
 240        void *tmp_dst;
 241
 242        /* index of the compressed lzo block */
 243        blkindex = regcache_lzo_get_blkindex(map, reg);
 244        /* register index within the decompressed block */
 245        blkpos = regcache_lzo_get_blkpos(map, reg);
 246        lzo_blocks = map->cache;
 247        lzo_block = lzo_blocks[blkindex];
 248
 249        /* save the pointer and length of the compressed block */
 250        tmp_dst = lzo_block->dst;
 251        tmp_dst_len = lzo_block->dst_len;
 252
 253        /* prepare the source to be the compressed block */
 254        lzo_block->src = lzo_block->dst;
 255        lzo_block->src_len = lzo_block->dst_len;
 256
 257        /* decompress the block */
 258        ret = regcache_lzo_decompress_cache_block(map, lzo_block);
 259        if (ret >= 0)
 260                /* fetch the value from the cache */
 261                *value = regcache_get_val(map, lzo_block->dst, blkpos);
 262
 263        kfree(lzo_block->dst);
 264        /* restore the pointer and length of the compressed block */
 265        lzo_block->dst = tmp_dst;
 266        lzo_block->dst_len = tmp_dst_len;
 267
 268        return ret;
 269}
 270
 271static int regcache_lzo_write(struct regmap *map,
 272                              unsigned int reg, unsigned int value)
 273{
 274        struct regcache_lzo_ctx *lzo_block, **lzo_blocks;
 275        int ret, blkindex, blkpos;
 276        size_t tmp_dst_len;
 277        void *tmp_dst;
 278
 279        /* index of the compressed lzo block */
 280        blkindex = regcache_lzo_get_blkindex(map, reg);
 281        /* register index within the decompressed block */
 282        blkpos = regcache_lzo_get_blkpos(map, reg);
 283        lzo_blocks = map->cache;
 284        lzo_block = lzo_blocks[blkindex];
 285
 286        /* save the pointer and length of the compressed block */
 287        tmp_dst = lzo_block->dst;
 288        tmp_dst_len = lzo_block->dst_len;
 289
 290        /* prepare the source to be the compressed block */
 291        lzo_block->src = lzo_block->dst;
 292        lzo_block->src_len = lzo_block->dst_len;
 293
 294        /* decompress the block */
 295        ret = regcache_lzo_decompress_cache_block(map, lzo_block);
 296        if (ret < 0) {
 297                kfree(lzo_block->dst);
 298                goto out;
 299        }
 300
 301        /* write the new value to the cache */
 302        if (regcache_set_val(map, lzo_block->dst, blkpos, value)) {
 303                kfree(lzo_block->dst);
 304                goto out;
 305        }
 306
 307        /* prepare the source to be the decompressed block */
 308        lzo_block->src = lzo_block->dst;
 309        lzo_block->src_len = lzo_block->dst_len;
 310
 311        /* compress the block */
 312        ret = regcache_lzo_compress_cache_block(map, lzo_block);
 313        if (ret < 0) {
 314                kfree(lzo_block->dst);
 315                kfree(lzo_block->src);
 316                goto out;
 317        }
 318
 319        /* set the bit so we know we have to sync this register */
 320        set_bit(reg / map->reg_stride, lzo_block->sync_bmp);
 321        kfree(tmp_dst);
 322        kfree(lzo_block->src);
 323        return 0;
 324out:
 325        lzo_block->dst = tmp_dst;
 326        lzo_block->dst_len = tmp_dst_len;
 327        return ret;
 328}
 329
 330static int regcache_lzo_sync(struct regmap *map, unsigned int min,
 331                             unsigned int max)
 332{
 333        struct regcache_lzo_ctx **lzo_blocks;
 334        unsigned int val;
 335        int i;
 336        int ret;
 337
 338        lzo_blocks = map->cache;
 339        i = min;
 340        for_each_set_bit_from(i, lzo_blocks[0]->sync_bmp,
 341                              lzo_blocks[0]->sync_bmp_nbits) {
 342                if (i > max)
 343                        continue;
 344
 345                ret = regcache_read(map, i, &val);
 346                if (ret)
 347                        return ret;
 348
 349                /* Is this the hardware default?  If so skip. */
 350                ret = regcache_lookup_reg(map, i);
 351                if (ret > 0 && val == map->reg_defaults[ret].def)
 352                        continue;
 353
 354                map->cache_bypass = true;
 355                ret = _regmap_write(map, i, val);
 356                map->cache_bypass = false;
 357                if (ret)
 358                        return ret;
 359                dev_dbg(map->dev, "Synced register %#x, value %#x\n",
 360                        i, val);
 361        }
 362
 363        return 0;
 364}
 365
 366struct regcache_ops regcache_lzo_ops = {
 367        .type = REGCACHE_COMPRESSED,
 368        .name = "lzo",
 369        .init = regcache_lzo_init,
 370        .exit = regcache_lzo_exit,
 371        .read = regcache_lzo_read,
 372        .write = regcache_lzo_write,
 373        .sync = regcache_lzo_sync
 374};
 375