qemu/page_cache.c
<<
>>
Prefs
   1/*
   2 * Page cache for QEMU
   3 * The cache is base on a hash of the page address
   4 *
   5 * Copyright 2012 Red Hat, Inc. and/or its affiliates
   6 *
   7 * Authors:
   8 *  Orit Wasserman  <owasserm@redhat.com>
   9 *
  10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  11 * See the COPYING file in the top-level directory.
  12 *
  13 */
  14
  15#include <stdint.h>
  16#include <stdio.h>
  17#include <stdlib.h>
  18#include <strings.h>
  19#include <string.h>
  20#include <sys/time.h>
  21#include <sys/types.h>
  22#include <stdbool.h>
  23#include <glib.h>
  24#include <strings.h>
  25
  26#include "qemu-common.h"
  27#include "migration/page_cache.h"
  28
  29#ifdef DEBUG_CACHE
  30#define DPRINTF(fmt, ...) \
  31    do { fprintf(stdout, "cache: " fmt, ## __VA_ARGS__); } while (0)
  32#else
  33#define DPRINTF(fmt, ...) \
  34    do { } while (0)
  35#endif
  36
  37typedef struct CacheItem CacheItem;
  38
  39struct CacheItem {
  40    uint64_t it_addr;
  41    uint64_t it_age;
  42    uint8_t *it_data;
  43};
  44
  45struct PageCache {
  46    CacheItem *page_cache;
  47    unsigned int page_size;
  48    int64_t max_num_items;
  49    uint64_t max_item_age;
  50    int64_t num_items;
  51};
  52
  53PageCache *cache_init(int64_t num_pages, unsigned int page_size)
  54{
  55    int64_t i;
  56
  57    PageCache *cache;
  58
  59    if (num_pages <= 0) {
  60        DPRINTF("invalid number of pages\n");
  61        return NULL;
  62    }
  63
  64    cache = g_malloc(sizeof(*cache));
  65
  66    /* round down to the nearest power of 2 */
  67    if (!is_power_of_2(num_pages)) {
  68        num_pages = pow2floor(num_pages);
  69        DPRINTF("rounding down to %" PRId64 "\n", num_pages);
  70    }
  71    cache->page_size = page_size;
  72    cache->num_items = 0;
  73    cache->max_item_age = 0;
  74    cache->max_num_items = num_pages;
  75
  76    DPRINTF("Setting cache buckets to %" PRId64 "\n", cache->max_num_items);
  77
  78    cache->page_cache = g_malloc((cache->max_num_items) *
  79                                 sizeof(*cache->page_cache));
  80
  81    for (i = 0; i < cache->max_num_items; i++) {
  82        cache->page_cache[i].it_data = NULL;
  83        cache->page_cache[i].it_age = 0;
  84        cache->page_cache[i].it_addr = -1;
  85    }
  86
  87    return cache;
  88}
  89
  90void cache_fini(PageCache *cache)
  91{
  92    int64_t i;
  93
  94    g_assert(cache);
  95    g_assert(cache->page_cache);
  96
  97    for (i = 0; i < cache->max_num_items; i++) {
  98        g_free(cache->page_cache[i].it_data);
  99    }
 100
 101    g_free(cache->page_cache);
 102    cache->page_cache = NULL;
 103}
 104
 105static size_t cache_get_cache_pos(const PageCache *cache,
 106                                  uint64_t address)
 107{
 108    size_t pos;
 109
 110    g_assert(cache->max_num_items);
 111    pos = (address / cache->page_size) & (cache->max_num_items - 1);
 112    return pos;
 113}
 114
 115bool cache_is_cached(const PageCache *cache, uint64_t addr)
 116{
 117    size_t pos;
 118
 119    g_assert(cache);
 120    g_assert(cache->page_cache);
 121
 122    pos = cache_get_cache_pos(cache, addr);
 123
 124    return (cache->page_cache[pos].it_addr == addr);
 125}
 126
 127static CacheItem *cache_get_by_addr(const PageCache *cache, uint64_t addr)
 128{
 129    size_t pos;
 130
 131    g_assert(cache);
 132    g_assert(cache->page_cache);
 133
 134    pos = cache_get_cache_pos(cache, addr);
 135
 136    return &cache->page_cache[pos];
 137}
 138
 139uint8_t *get_cached_data(const PageCache *cache, uint64_t addr)
 140{
 141    return cache_get_by_addr(cache, addr)->it_data;
 142}
 143
 144void cache_insert(PageCache *cache, uint64_t addr, uint8_t *pdata)
 145{
 146
 147    CacheItem *it = NULL;
 148
 149    g_assert(cache);
 150    g_assert(cache->page_cache);
 151
 152    /* actual update of entry */
 153    it = cache_get_by_addr(cache, addr);
 154
 155    if (!it->it_data) {
 156        cache->num_items++;
 157    }
 158
 159    it->it_data = pdata;
 160    it->it_age = ++cache->max_item_age;
 161    it->it_addr = addr;
 162}
 163
 164int64_t cache_resize(PageCache *cache, int64_t new_num_pages)
 165{
 166    PageCache *new_cache;
 167    int64_t i;
 168
 169    CacheItem *old_it, *new_it;
 170
 171    g_assert(cache);
 172
 173    /* cache was not inited */
 174    if (cache->page_cache == NULL) {
 175        return -1;
 176    }
 177
 178    /* same size */
 179    if (pow2floor(new_num_pages) == cache->max_num_items) {
 180        return cache->max_num_items;
 181    }
 182
 183    new_cache = cache_init(new_num_pages, cache->page_size);
 184    if (!(new_cache)) {
 185        DPRINTF("Error creating new cache\n");
 186        return -1;
 187    }
 188
 189    /* move all data from old cache */
 190    for (i = 0; i < cache->max_num_items; i++) {
 191        old_it = &cache->page_cache[i];
 192        if (old_it->it_addr != -1) {
 193            /* check for collision, if there is, keep MRU page */
 194            new_it = cache_get_by_addr(new_cache, old_it->it_addr);
 195            if (new_it->it_data) {
 196                /* keep the MRU page */
 197                if (new_it->it_age >= old_it->it_age) {
 198                    g_free(old_it->it_data);
 199                } else {
 200                    g_free(new_it->it_data);
 201                    new_it->it_data = old_it->it_data;
 202                    new_it->it_age = old_it->it_age;
 203                    new_it->it_addr = old_it->it_addr;
 204                }
 205            } else {
 206                cache_insert(new_cache, old_it->it_addr, old_it->it_data);
 207            }
 208        }
 209    }
 210
 211    cache->page_cache = new_cache->page_cache;
 212    cache->max_num_items = new_cache->max_num_items;
 213    cache->num_items = new_cache->num_items;
 214
 215    g_free(new_cache);
 216
 217    return cache->max_num_items;
 218}
 219