linux/drivers/xen/tmem.c
<<
>>
Prefs
   1/*
   2 * Xen implementation for transcendent memory (tmem)
   3 *
   4 * Copyright (C) 2009-2011 Oracle Corp.  All rights reserved.
   5 * Author: Dan Magenheimer
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/kernel.h>
  10#include <linux/types.h>
  11#include <linux/init.h>
  12#include <linux/pagemap.h>
  13#include <linux/cleancache.h>
  14#include <linux/frontswap.h>
  15
  16#include <xen/xen.h>
  17#include <xen/interface/xen.h>
  18#include <asm/xen/hypercall.h>
  19#include <asm/xen/page.h>
  20#include <asm/xen/hypervisor.h>
  21#include <xen/tmem.h>
  22
  23#ifndef CONFIG_XEN_TMEM_MODULE
  24bool __read_mostly tmem_enabled = false;
  25
  26static int __init enable_tmem(char *s)
  27{
  28        tmem_enabled = true;
  29        return 1;
  30}
  31__setup("tmem", enable_tmem);
  32#endif
  33
  34#ifdef CONFIG_CLEANCACHE
  35static bool cleancache __read_mostly = true;
  36module_param(cleancache, bool, S_IRUGO);
  37static bool selfballooning __read_mostly = true;
  38module_param(selfballooning, bool, S_IRUGO);
  39#endif /* CONFIG_CLEANCACHE */
  40
  41#ifdef CONFIG_FRONTSWAP
  42static bool frontswap __read_mostly = true;
  43module_param(frontswap, bool, S_IRUGO);
  44#else /* CONFIG_FRONTSWAP */
  45#define frontswap (0)
  46#endif /* CONFIG_FRONTSWAP */
  47
  48#ifdef CONFIG_XEN_SELFBALLOONING
  49static bool selfshrinking __read_mostly = true;
  50module_param(selfshrinking, bool, S_IRUGO);
  51#endif /* CONFIG_XEN_SELFBALLOONING */
  52
  53#define TMEM_CONTROL               0
  54#define TMEM_NEW_POOL              1
  55#define TMEM_DESTROY_POOL          2
  56#define TMEM_NEW_PAGE              3
  57#define TMEM_PUT_PAGE              4
  58#define TMEM_GET_PAGE              5
  59#define TMEM_FLUSH_PAGE            6
  60#define TMEM_FLUSH_OBJECT          7
  61#define TMEM_READ                  8
  62#define TMEM_WRITE                 9
  63#define TMEM_XCHG                 10
  64
  65/* Bits for HYPERVISOR_tmem_op(TMEM_NEW_POOL) */
  66#define TMEM_POOL_PERSIST          1
  67#define TMEM_POOL_SHARED           2
  68#define TMEM_POOL_PAGESIZE_SHIFT   4
  69#define TMEM_VERSION_SHIFT        24
  70
  71
  72struct tmem_pool_uuid {
  73        u64 uuid_lo;
  74        u64 uuid_hi;
  75};
  76
  77struct tmem_oid {
  78        u64 oid[3];
  79};
  80
  81#define TMEM_POOL_PRIVATE_UUID  { 0, 0 }
  82
  83/* flags for tmem_ops.new_pool */
  84#define TMEM_POOL_PERSIST          1
  85#define TMEM_POOL_SHARED           2
  86
  87/* xen tmem foundation ops/hypercalls */
  88
  89static inline int xen_tmem_op(u32 tmem_cmd, u32 tmem_pool, struct tmem_oid oid,
  90        u32 index, unsigned long gmfn, u32 tmem_offset, u32 pfn_offset, u32 len)
  91{
  92        struct tmem_op op;
  93        int rc = 0;
  94
  95        op.cmd = tmem_cmd;
  96        op.pool_id = tmem_pool;
  97        op.u.gen.oid[0] = oid.oid[0];
  98        op.u.gen.oid[1] = oid.oid[1];
  99        op.u.gen.oid[2] = oid.oid[2];
 100        op.u.gen.index = index;
 101        op.u.gen.tmem_offset = tmem_offset;
 102        op.u.gen.pfn_offset = pfn_offset;
 103        op.u.gen.len = len;
 104        set_xen_guest_handle(op.u.gen.gmfn, (void *)gmfn);
 105        rc = HYPERVISOR_tmem_op(&op);
 106        return rc;
 107}
 108
 109static int xen_tmem_new_pool(struct tmem_pool_uuid uuid,
 110                                u32 flags, unsigned long pagesize)
 111{
 112        struct tmem_op op;
 113        int rc = 0, pageshift;
 114
 115        for (pageshift = 0; pagesize != 1; pageshift++)
 116                pagesize >>= 1;
 117        flags |= (pageshift - 12) << TMEM_POOL_PAGESIZE_SHIFT;
 118        flags |= TMEM_SPEC_VERSION << TMEM_VERSION_SHIFT;
 119        op.cmd = TMEM_NEW_POOL;
 120        op.u.new.uuid[0] = uuid.uuid_lo;
 121        op.u.new.uuid[1] = uuid.uuid_hi;
 122        op.u.new.flags = flags;
 123        rc = HYPERVISOR_tmem_op(&op);
 124        return rc;
 125}
 126
 127/* xen generic tmem ops */
 128
 129static int xen_tmem_put_page(u32 pool_id, struct tmem_oid oid,
 130                             u32 index, unsigned long pfn)
 131{
 132        unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
 133
 134        return xen_tmem_op(TMEM_PUT_PAGE, pool_id, oid, index,
 135                gmfn, 0, 0, 0);
 136}
 137
 138static int xen_tmem_get_page(u32 pool_id, struct tmem_oid oid,
 139                             u32 index, unsigned long pfn)
 140{
 141        unsigned long gmfn = xen_pv_domain() ? pfn_to_mfn(pfn) : pfn;
 142
 143        return xen_tmem_op(TMEM_GET_PAGE, pool_id, oid, index,
 144                gmfn, 0, 0, 0);
 145}
 146
 147static int xen_tmem_flush_page(u32 pool_id, struct tmem_oid oid, u32 index)
 148{
 149        return xen_tmem_op(TMEM_FLUSH_PAGE, pool_id, oid, index,
 150                0, 0, 0, 0);
 151}
 152
 153static int xen_tmem_flush_object(u32 pool_id, struct tmem_oid oid)
 154{
 155        return xen_tmem_op(TMEM_FLUSH_OBJECT, pool_id, oid, 0, 0, 0, 0, 0);
 156}
 157
 158
 159#ifdef CONFIG_CLEANCACHE
 160static int xen_tmem_destroy_pool(u32 pool_id)
 161{
 162        struct tmem_oid oid = { { 0 } };
 163
 164        return xen_tmem_op(TMEM_DESTROY_POOL, pool_id, oid, 0, 0, 0, 0, 0);
 165}
 166
 167/* cleancache ops */
 168
 169static void tmem_cleancache_put_page(int pool, struct cleancache_filekey key,
 170                                     pgoff_t index, struct page *page)
 171{
 172        u32 ind = (u32) index;
 173        struct tmem_oid oid = *(struct tmem_oid *)&key;
 174        unsigned long pfn = page_to_pfn(page);
 175
 176        if (pool < 0)
 177                return;
 178        if (ind != index)
 179                return;
 180        mb(); /* ensure page is quiescent; tmem may address it with an alias */
 181        (void)xen_tmem_put_page((u32)pool, oid, ind, pfn);
 182}
 183
 184static int tmem_cleancache_get_page(int pool, struct cleancache_filekey key,
 185                                    pgoff_t index, struct page *page)
 186{
 187        u32 ind = (u32) index;
 188        struct tmem_oid oid = *(struct tmem_oid *)&key;
 189        unsigned long pfn = page_to_pfn(page);
 190        int ret;
 191
 192        /* translate return values to linux semantics */
 193        if (pool < 0)
 194                return -1;
 195        if (ind != index)
 196                return -1;
 197        ret = xen_tmem_get_page((u32)pool, oid, ind, pfn);
 198        if (ret == 1)
 199                return 0;
 200        else
 201                return -1;
 202}
 203
 204static void tmem_cleancache_flush_page(int pool, struct cleancache_filekey key,
 205                                       pgoff_t index)
 206{
 207        u32 ind = (u32) index;
 208        struct tmem_oid oid = *(struct tmem_oid *)&key;
 209
 210        if (pool < 0)
 211                return;
 212        if (ind != index)
 213                return;
 214        (void)xen_tmem_flush_page((u32)pool, oid, ind);
 215}
 216
 217static void tmem_cleancache_flush_inode(int pool, struct cleancache_filekey key)
 218{
 219        struct tmem_oid oid = *(struct tmem_oid *)&key;
 220
 221        if (pool < 0)
 222                return;
 223        (void)xen_tmem_flush_object((u32)pool, oid);
 224}
 225
 226static void tmem_cleancache_flush_fs(int pool)
 227{
 228        if (pool < 0)
 229                return;
 230        (void)xen_tmem_destroy_pool((u32)pool);
 231}
 232
 233static int tmem_cleancache_init_fs(size_t pagesize)
 234{
 235        struct tmem_pool_uuid uuid_private = TMEM_POOL_PRIVATE_UUID;
 236
 237        return xen_tmem_new_pool(uuid_private, 0, pagesize);
 238}
 239
 240static int tmem_cleancache_init_shared_fs(char *uuid, size_t pagesize)
 241{
 242        struct tmem_pool_uuid shared_uuid;
 243
 244        shared_uuid.uuid_lo = *(u64 *)uuid;
 245        shared_uuid.uuid_hi = *(u64 *)(&uuid[8]);
 246        return xen_tmem_new_pool(shared_uuid, TMEM_POOL_SHARED, pagesize);
 247}
 248
 249static struct cleancache_ops tmem_cleancache_ops = {
 250        .put_page = tmem_cleancache_put_page,
 251        .get_page = tmem_cleancache_get_page,
 252        .invalidate_page = tmem_cleancache_flush_page,
 253        .invalidate_inode = tmem_cleancache_flush_inode,
 254        .invalidate_fs = tmem_cleancache_flush_fs,
 255        .init_shared_fs = tmem_cleancache_init_shared_fs,
 256        .init_fs = tmem_cleancache_init_fs
 257};
 258#endif
 259
 260#ifdef CONFIG_FRONTSWAP
 261/* frontswap tmem operations */
 262
 263/* a single tmem poolid is used for all frontswap "types" (swapfiles) */
 264static int tmem_frontswap_poolid;
 265
 266/*
 267 * Swizzling increases objects per swaptype, increasing tmem concurrency
 268 * for heavy swaploads.  Later, larger nr_cpus -> larger SWIZ_BITS
 269 */
 270#define SWIZ_BITS               4
 271#define SWIZ_MASK               ((1 << SWIZ_BITS) - 1)
 272#define _oswiz(_type, _ind)     ((_type << SWIZ_BITS) | (_ind & SWIZ_MASK))
 273#define iswiz(_ind)             (_ind >> SWIZ_BITS)
 274
 275static inline struct tmem_oid oswiz(unsigned type, u32 ind)
 276{
 277        struct tmem_oid oid = { .oid = { 0 } };
 278        oid.oid[0] = _oswiz(type, ind);
 279        return oid;
 280}
 281
 282/* returns 0 if the page was successfully put into frontswap, -1 if not */
 283static int tmem_frontswap_store(unsigned type, pgoff_t offset,
 284                                   struct page *page)
 285{
 286        u64 ind64 = (u64)offset;
 287        u32 ind = (u32)offset;
 288        unsigned long pfn = page_to_pfn(page);
 289        int pool = tmem_frontswap_poolid;
 290        int ret;
 291
 292        if (pool < 0)
 293                return -1;
 294        if (ind64 != ind)
 295                return -1;
 296        mb(); /* ensure page is quiescent; tmem may address it with an alias */
 297        ret = xen_tmem_put_page(pool, oswiz(type, ind), iswiz(ind), pfn);
 298        /* translate Xen tmem return values to linux semantics */
 299        if (ret == 1)
 300                return 0;
 301        else
 302                return -1;
 303}
 304
 305/*
 306 * returns 0 if the page was successfully gotten from frontswap, -1 if
 307 * was not present (should never happen!)
 308 */
 309static int tmem_frontswap_load(unsigned type, pgoff_t offset,
 310                                   struct page *page)
 311{
 312        u64 ind64 = (u64)offset;
 313        u32 ind = (u32)offset;
 314        unsigned long pfn = page_to_pfn(page);
 315        int pool = tmem_frontswap_poolid;
 316        int ret;
 317
 318        if (pool < 0)
 319                return -1;
 320        if (ind64 != ind)
 321                return -1;
 322        ret = xen_tmem_get_page(pool, oswiz(type, ind), iswiz(ind), pfn);
 323        /* translate Xen tmem return values to linux semantics */
 324        if (ret == 1)
 325                return 0;
 326        else
 327                return -1;
 328}
 329
 330/* flush a single page from frontswap */
 331static void tmem_frontswap_flush_page(unsigned type, pgoff_t offset)
 332{
 333        u64 ind64 = (u64)offset;
 334        u32 ind = (u32)offset;
 335        int pool = tmem_frontswap_poolid;
 336
 337        if (pool < 0)
 338                return;
 339        if (ind64 != ind)
 340                return;
 341        (void) xen_tmem_flush_page(pool, oswiz(type, ind), iswiz(ind));
 342}
 343
 344/* flush all pages from the passed swaptype */
 345static void tmem_frontswap_flush_area(unsigned type)
 346{
 347        int pool = tmem_frontswap_poolid;
 348        int ind;
 349
 350        if (pool < 0)
 351                return;
 352        for (ind = SWIZ_MASK; ind >= 0; ind--)
 353                (void)xen_tmem_flush_object(pool, oswiz(type, ind));
 354}
 355
 356static void tmem_frontswap_init(unsigned ignored)
 357{
 358        struct tmem_pool_uuid private = TMEM_POOL_PRIVATE_UUID;
 359
 360        /* a single tmem poolid is used for all frontswap "types" (swapfiles) */
 361        if (tmem_frontswap_poolid < 0)
 362                tmem_frontswap_poolid =
 363                    xen_tmem_new_pool(private, TMEM_POOL_PERSIST, PAGE_SIZE);
 364}
 365
 366static struct frontswap_ops tmem_frontswap_ops = {
 367        .store = tmem_frontswap_store,
 368        .load = tmem_frontswap_load,
 369        .invalidate_page = tmem_frontswap_flush_page,
 370        .invalidate_area = tmem_frontswap_flush_area,
 371        .init = tmem_frontswap_init
 372};
 373#endif
 374
 375static int xen_tmem_init(void)
 376{
 377        if (!xen_domain())
 378                return 0;
 379#ifdef CONFIG_FRONTSWAP
 380        if (tmem_enabled && frontswap) {
 381                char *s = "";
 382                struct frontswap_ops *old_ops;
 383
 384                tmem_frontswap_poolid = -1;
 385                old_ops = frontswap_register_ops(&tmem_frontswap_ops);
 386                if (IS_ERR(old_ops) || old_ops) {
 387                        if (IS_ERR(old_ops))
 388                                return PTR_ERR(old_ops);
 389                        s = " (WARNING: frontswap_ops overridden)";
 390                }
 391                printk(KERN_INFO "frontswap enabled, RAM provided by "
 392                                 "Xen Transcendent Memory%s\n", s);
 393        }
 394#endif
 395#ifdef CONFIG_CLEANCACHE
 396        BUG_ON(sizeof(struct cleancache_filekey) != sizeof(struct tmem_oid));
 397        if (tmem_enabled && cleancache) {
 398                char *s = "";
 399                struct cleancache_ops *old_ops =
 400                        cleancache_register_ops(&tmem_cleancache_ops);
 401                if (old_ops)
 402                        s = " (WARNING: cleancache_ops overridden)";
 403                printk(KERN_INFO "cleancache enabled, RAM provided by "
 404                                 "Xen Transcendent Memory%s\n", s);
 405        }
 406#endif
 407#ifdef CONFIG_XEN_SELFBALLOONING
 408        /*
 409         * There is no point of driving pages to the swap system if they
 410         * aren't going anywhere in tmem universe.
 411         */
 412        if (!frontswap) {
 413                selfshrinking = false;
 414                selfballooning = false;
 415        }
 416        xen_selfballoon_init(selfballooning, selfshrinking);
 417#endif
 418        return 0;
 419}
 420
 421module_init(xen_tmem_init)
 422MODULE_LICENSE("GPL");
 423MODULE_AUTHOR("Dan Magenheimer <dan.magenheimer@oracle.com>");
 424MODULE_DESCRIPTION("Shim to Xen transcendent memory");
 425