uboot/drivers/block/blkcache.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Copyright (C) Nelson Integration, LLC 2016
   4 * Author: Eric Nelson<eric@nelint.com>
   5 *
   6 */
   7#include <config.h>
   8#include <common.h>
   9#include <malloc.h>
  10#include <part.h>
  11#include <linux/ctype.h>
  12#include <linux/list.h>
  13
  14struct block_cache_node {
  15        struct list_head lh;
  16        int iftype;
  17        int devnum;
  18        lbaint_t start;
  19        lbaint_t blkcnt;
  20        unsigned long blksz;
  21        char *cache;
  22};
  23
  24static LIST_HEAD(block_cache);
  25
  26static struct block_cache_stats _stats = {
  27        .max_blocks_per_entry = 2,
  28        .max_entries = 32
  29};
  30
  31static struct block_cache_node *cache_find(int iftype, int devnum,
  32                                           lbaint_t start, lbaint_t blkcnt,
  33                                           unsigned long blksz)
  34{
  35        struct block_cache_node *node;
  36
  37        list_for_each_entry(node, &block_cache, lh)
  38                if ((node->iftype == iftype) &&
  39                    (node->devnum == devnum) &&
  40                    (node->blksz == blksz) &&
  41                    (node->start <= start) &&
  42                    (node->start + node->blkcnt >= start + blkcnt)) {
  43                        if (block_cache.next != &node->lh) {
  44                                /* maintain MRU ordering */
  45                                list_del(&node->lh);
  46                                list_add(&node->lh, &block_cache);
  47                        }
  48                        return node;
  49                }
  50        return 0;
  51}
  52
  53int blkcache_read(int iftype, int devnum,
  54                  lbaint_t start, lbaint_t blkcnt,
  55                  unsigned long blksz, void *buffer)
  56{
  57        struct block_cache_node *node = cache_find(iftype, devnum, start,
  58                                                   blkcnt, blksz);
  59        if (node) {
  60                const char *src = node->cache + (start - node->start) * blksz;
  61                memcpy(buffer, src, blksz * blkcnt);
  62                debug("hit: start " LBAF ", count " LBAFU "\n",
  63                      start, blkcnt);
  64                ++_stats.hits;
  65                return 1;
  66        }
  67
  68        debug("miss: start " LBAF ", count " LBAFU "\n",
  69              start, blkcnt);
  70        ++_stats.misses;
  71        return 0;
  72}
  73
  74void blkcache_fill(int iftype, int devnum,
  75                   lbaint_t start, lbaint_t blkcnt,
  76                   unsigned long blksz, void const *buffer)
  77{
  78        lbaint_t bytes;
  79        struct block_cache_node *node;
  80
  81        /* don't cache big stuff */
  82        if (blkcnt > _stats.max_blocks_per_entry)
  83                return;
  84
  85        if (_stats.max_entries == 0)
  86                return;
  87
  88        bytes = blksz * blkcnt;
  89        if (_stats.max_entries <= _stats.entries) {
  90                /* pop LRU */
  91                node = (struct block_cache_node *)block_cache.prev;
  92                list_del(&node->lh);
  93                _stats.entries--;
  94                debug("drop: start " LBAF ", count " LBAFU "\n",
  95                      node->start, node->blkcnt);
  96                if (node->blkcnt * node->blksz < bytes) {
  97                        free(node->cache);
  98                        node->cache = 0;
  99                }
 100        } else {
 101                node = malloc(sizeof(*node));
 102                if (!node)
 103                        return;
 104                node->cache = 0;
 105        }
 106
 107        if (!node->cache) {
 108                node->cache = malloc(bytes);
 109                if (!node->cache) {
 110                        free(node);
 111                        return;
 112                }
 113        }
 114
 115        debug("fill: start " LBAF ", count " LBAFU "\n",
 116              start, blkcnt);
 117
 118        node->iftype = iftype;
 119        node->devnum = devnum;
 120        node->start = start;
 121        node->blkcnt = blkcnt;
 122        node->blksz = blksz;
 123        memcpy(node->cache, buffer, bytes);
 124        list_add(&node->lh, &block_cache);
 125        _stats.entries++;
 126}
 127
 128void blkcache_invalidate(int iftype, int devnum)
 129{
 130        struct list_head *entry, *n;
 131        struct block_cache_node *node;
 132
 133        list_for_each_safe(entry, n, &block_cache) {
 134                node = (struct block_cache_node *)entry;
 135                if ((node->iftype == iftype) &&
 136                    (node->devnum == devnum)) {
 137                        list_del(entry);
 138                        free(node->cache);
 139                        free(node);
 140                        --_stats.entries;
 141                }
 142        }
 143}
 144
 145void blkcache_configure(unsigned blocks, unsigned entries)
 146{
 147        struct block_cache_node *node;
 148        if ((blocks != _stats.max_blocks_per_entry) ||
 149            (entries != _stats.max_entries)) {
 150                /* invalidate cache */
 151                while (!list_empty(&block_cache)) {
 152                        node = (struct block_cache_node *)block_cache.next;
 153                        list_del(&node->lh);
 154                        free(node->cache);
 155                        free(node);
 156                }
 157                _stats.entries = 0;
 158        }
 159
 160        _stats.max_blocks_per_entry = blocks;
 161        _stats.max_entries = entries;
 162
 163        _stats.hits = 0;
 164        _stats.misses = 0;
 165}
 166
 167void blkcache_stats(struct block_cache_stats *stats)
 168{
 169        memcpy(stats, &_stats, sizeof(*stats));
 170        _stats.hits = 0;
 171        _stats.misses = 0;
 172}
 173