linux/tools/testing/nvdimm/test/iomap.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
   4 */
   5#include <linux/memremap.h>
   6#include <linux/rculist.h>
   7#include <linux/export.h>
   8#include <linux/ioport.h>
   9#include <linux/module.h>
  10#include <linux/types.h>
  11#include <linux/pfn_t.h>
  12#include <linux/acpi.h>
  13#include <linux/io.h>
  14#include <linux/mm.h>
  15#include "nfit_test.h"
  16
  17static LIST_HEAD(iomap_head);
  18
  19static struct iomap_ops {
  20        nfit_test_lookup_fn nfit_test_lookup;
  21        nfit_test_evaluate_dsm_fn evaluate_dsm;
  22        struct list_head list;
  23} iomap_ops = {
  24        .list = LIST_HEAD_INIT(iomap_ops.list),
  25};
  26
  27void nfit_test_setup(nfit_test_lookup_fn lookup,
  28                nfit_test_evaluate_dsm_fn evaluate)
  29{
  30        iomap_ops.nfit_test_lookup = lookup;
  31        iomap_ops.evaluate_dsm = evaluate;
  32        list_add_rcu(&iomap_ops.list, &iomap_head);
  33}
  34EXPORT_SYMBOL(nfit_test_setup);
  35
  36void nfit_test_teardown(void)
  37{
  38        list_del_rcu(&iomap_ops.list);
  39        synchronize_rcu();
  40}
  41EXPORT_SYMBOL(nfit_test_teardown);
  42
  43static struct nfit_test_resource *__get_nfit_res(resource_size_t resource)
  44{
  45        struct iomap_ops *ops;
  46
  47        ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
  48        if (ops)
  49                return ops->nfit_test_lookup(resource);
  50        return NULL;
  51}
  52
  53struct nfit_test_resource *get_nfit_res(resource_size_t resource)
  54{
  55        struct nfit_test_resource *res;
  56
  57        rcu_read_lock();
  58        res = __get_nfit_res(resource);
  59        rcu_read_unlock();
  60
  61        return res;
  62}
  63EXPORT_SYMBOL(get_nfit_res);
  64
  65void __iomem *__nfit_test_ioremap(resource_size_t offset, unsigned long size,
  66                void __iomem *(*fallback_fn)(resource_size_t, unsigned long))
  67{
  68        struct nfit_test_resource *nfit_res = get_nfit_res(offset);
  69
  70        if (nfit_res)
  71                return (void __iomem *) nfit_res->buf + offset
  72                        - nfit_res->res.start;
  73        return fallback_fn(offset, size);
  74}
  75
  76void __iomem *__wrap_devm_ioremap_nocache(struct device *dev,
  77                resource_size_t offset, unsigned long size)
  78{
  79        struct nfit_test_resource *nfit_res = get_nfit_res(offset);
  80
  81        if (nfit_res)
  82                return (void __iomem *) nfit_res->buf + offset
  83                        - nfit_res->res.start;
  84        return devm_ioremap_nocache(dev, offset, size);
  85}
  86EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
  87
  88void *__wrap_devm_memremap(struct device *dev, resource_size_t offset,
  89                size_t size, unsigned long flags)
  90{
  91        struct nfit_test_resource *nfit_res = get_nfit_res(offset);
  92
  93        if (nfit_res)
  94                return nfit_res->buf + offset - nfit_res->res.start;
  95        return devm_memremap(dev, offset, size, flags);
  96}
  97EXPORT_SYMBOL(__wrap_devm_memremap);
  98
  99static void nfit_test_kill(void *_pgmap)
 100{
 101        struct dev_pagemap *pgmap = _pgmap;
 102
 103        WARN_ON(!pgmap || !pgmap->ref);
 104
 105        if (pgmap->ops && pgmap->ops->kill)
 106                pgmap->ops->kill(pgmap);
 107        else
 108                percpu_ref_kill(pgmap->ref);
 109
 110        if (pgmap->ops && pgmap->ops->cleanup) {
 111                pgmap->ops->cleanup(pgmap);
 112        } else {
 113                wait_for_completion(&pgmap->done);
 114                percpu_ref_exit(pgmap->ref);
 115        }
 116}
 117
 118static void dev_pagemap_percpu_release(struct percpu_ref *ref)
 119{
 120        struct dev_pagemap *pgmap =
 121                container_of(ref, struct dev_pagemap, internal_ref);
 122
 123        complete(&pgmap->done);
 124}
 125
 126void *__wrap_devm_memremap_pages(struct device *dev, struct dev_pagemap *pgmap)
 127{
 128        int error;
 129        resource_size_t offset = pgmap->res.start;
 130        struct nfit_test_resource *nfit_res = get_nfit_res(offset);
 131
 132        if (!nfit_res)
 133                return devm_memremap_pages(dev, pgmap);
 134
 135        pgmap->dev = dev;
 136        if (!pgmap->ref) {
 137                if (pgmap->ops && (pgmap->ops->kill || pgmap->ops->cleanup))
 138                        return ERR_PTR(-EINVAL);
 139
 140                init_completion(&pgmap->done);
 141                error = percpu_ref_init(&pgmap->internal_ref,
 142                                dev_pagemap_percpu_release, 0, GFP_KERNEL);
 143                if (error)
 144                        return ERR_PTR(error);
 145                pgmap->ref = &pgmap->internal_ref;
 146        } else {
 147                if (!pgmap->ops || !pgmap->ops->kill || !pgmap->ops->cleanup) {
 148                        WARN(1, "Missing reference count teardown definition\n");
 149                        return ERR_PTR(-EINVAL);
 150                }
 151        }
 152
 153        error = devm_add_action_or_reset(dev, nfit_test_kill, pgmap);
 154        if (error)
 155                return ERR_PTR(error);
 156        return nfit_res->buf + offset - nfit_res->res.start;
 157}
 158EXPORT_SYMBOL_GPL(__wrap_devm_memremap_pages);
 159
 160pfn_t __wrap_phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
 161{
 162        struct nfit_test_resource *nfit_res = get_nfit_res(addr);
 163
 164        if (nfit_res)
 165                flags &= ~PFN_MAP;
 166        return phys_to_pfn_t(addr, flags);
 167}
 168EXPORT_SYMBOL(__wrap_phys_to_pfn_t);
 169
 170void *__wrap_memremap(resource_size_t offset, size_t size,
 171                unsigned long flags)
 172{
 173        struct nfit_test_resource *nfit_res = get_nfit_res(offset);
 174
 175        if (nfit_res)
 176                return nfit_res->buf + offset - nfit_res->res.start;
 177        return memremap(offset, size, flags);
 178}
 179EXPORT_SYMBOL(__wrap_memremap);
 180
 181void __wrap_devm_memunmap(struct device *dev, void *addr)
 182{
 183        struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
 184
 185        if (nfit_res)
 186                return;
 187        return devm_memunmap(dev, addr);
 188}
 189EXPORT_SYMBOL(__wrap_devm_memunmap);
 190
 191void __iomem *__wrap_ioremap_nocache(resource_size_t offset, unsigned long size)
 192{
 193        return __nfit_test_ioremap(offset, size, ioremap_nocache);
 194}
 195EXPORT_SYMBOL(__wrap_ioremap_nocache);
 196
 197void __iomem *__wrap_ioremap_wc(resource_size_t offset, unsigned long size)
 198{
 199        return __nfit_test_ioremap(offset, size, ioremap_wc);
 200}
 201EXPORT_SYMBOL(__wrap_ioremap_wc);
 202
 203void __wrap_iounmap(volatile void __iomem *addr)
 204{
 205        struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
 206        if (nfit_res)
 207                return;
 208        return iounmap(addr);
 209}
 210EXPORT_SYMBOL(__wrap_iounmap);
 211
 212void __wrap_memunmap(void *addr)
 213{
 214        struct nfit_test_resource *nfit_res = get_nfit_res((long) addr);
 215
 216        if (nfit_res)
 217                return;
 218        return memunmap(addr);
 219}
 220EXPORT_SYMBOL(__wrap_memunmap);
 221
 222static bool nfit_test_release_region(struct device *dev,
 223                struct resource *parent, resource_size_t start,
 224                resource_size_t n);
 225
 226static void nfit_devres_release(struct device *dev, void *data)
 227{
 228        struct resource *res = *((struct resource **) data);
 229
 230        WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start,
 231                        resource_size(res)));
 232}
 233
 234static int match(struct device *dev, void *__res, void *match_data)
 235{
 236        struct resource *res = *((struct resource **) __res);
 237        resource_size_t start = *((resource_size_t *) match_data);
 238
 239        return res->start == start;
 240}
 241
 242static bool nfit_test_release_region(struct device *dev,
 243                struct resource *parent, resource_size_t start,
 244                resource_size_t n)
 245{
 246        if (parent == &iomem_resource) {
 247                struct nfit_test_resource *nfit_res = get_nfit_res(start);
 248
 249                if (nfit_res) {
 250                        struct nfit_test_request *req;
 251                        struct resource *res = NULL;
 252
 253                        if (dev) {
 254                                devres_release(dev, nfit_devres_release, match,
 255                                                &start);
 256                                return true;
 257                        }
 258
 259                        spin_lock(&nfit_res->lock);
 260                        list_for_each_entry(req, &nfit_res->requests, list)
 261                                if (req->res.start == start) {
 262                                        res = &req->res;
 263                                        list_del(&req->list);
 264                                        break;
 265                                }
 266                        spin_unlock(&nfit_res->lock);
 267
 268                        WARN(!res || resource_size(res) != n,
 269                                        "%s: start: %llx n: %llx mismatch: %pr\n",
 270                                                __func__, start, n, res);
 271                        if (res)
 272                                kfree(req);
 273                        return true;
 274                }
 275        }
 276        return false;
 277}
 278
 279static struct resource *nfit_test_request_region(struct device *dev,
 280                struct resource *parent, resource_size_t start,
 281                resource_size_t n, const char *name, int flags)
 282{
 283        struct nfit_test_resource *nfit_res;
 284
 285        if (parent == &iomem_resource) {
 286                nfit_res = get_nfit_res(start);
 287                if (nfit_res) {
 288                        struct nfit_test_request *req;
 289                        struct resource *res = NULL;
 290
 291                        if (start + n > nfit_res->res.start
 292                                        + resource_size(&nfit_res->res)) {
 293                                pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
 294                                                __func__, start, n,
 295                                                &nfit_res->res);
 296                                return NULL;
 297                        }
 298
 299                        spin_lock(&nfit_res->lock);
 300                        list_for_each_entry(req, &nfit_res->requests, list)
 301                                if (start == req->res.start) {
 302                                        res = &req->res;
 303                                        break;
 304                                }
 305                        spin_unlock(&nfit_res->lock);
 306
 307                        if (res) {
 308                                WARN(1, "%pr already busy\n", res);
 309                                return NULL;
 310                        }
 311
 312                        req = kzalloc(sizeof(*req), GFP_KERNEL);
 313                        if (!req)
 314                                return NULL;
 315                        INIT_LIST_HEAD(&req->list);
 316                        res = &req->res;
 317
 318                        res->start = start;
 319                        res->end = start + n - 1;
 320                        res->name = name;
 321                        res->flags = resource_type(parent);
 322                        res->flags |= IORESOURCE_BUSY | flags;
 323                        spin_lock(&nfit_res->lock);
 324                        list_add(&req->list, &nfit_res->requests);
 325                        spin_unlock(&nfit_res->lock);
 326
 327                        if (dev) {
 328                                struct resource **d;
 329
 330                                d = devres_alloc(nfit_devres_release,
 331                                                sizeof(struct resource *),
 332                                                GFP_KERNEL);
 333                                if (!d)
 334                                        return NULL;
 335                                *d = res;
 336                                devres_add(dev, d);
 337                        }
 338
 339                        pr_debug("%s: %pr\n", __func__, res);
 340                        return res;
 341                }
 342        }
 343        if (dev)
 344                return __devm_request_region(dev, parent, start, n, name);
 345        return __request_region(parent, start, n, name, flags);
 346}
 347
 348struct resource *__wrap___request_region(struct resource *parent,
 349                resource_size_t start, resource_size_t n, const char *name,
 350                int flags)
 351{
 352        return nfit_test_request_region(NULL, parent, start, n, name, flags);
 353}
 354EXPORT_SYMBOL(__wrap___request_region);
 355
 356int __wrap_insert_resource(struct resource *parent, struct resource *res)
 357{
 358        if (get_nfit_res(res->start))
 359                return 0;
 360        return insert_resource(parent, res);
 361}
 362EXPORT_SYMBOL(__wrap_insert_resource);
 363
 364int __wrap_remove_resource(struct resource *res)
 365{
 366        if (get_nfit_res(res->start))
 367                return 0;
 368        return remove_resource(res);
 369}
 370EXPORT_SYMBOL(__wrap_remove_resource);
 371
 372struct resource *__wrap___devm_request_region(struct device *dev,
 373                struct resource *parent, resource_size_t start,
 374                resource_size_t n, const char *name)
 375{
 376        if (!dev)
 377                return NULL;
 378        return nfit_test_request_region(dev, parent, start, n, name, 0);
 379}
 380EXPORT_SYMBOL(__wrap___devm_request_region);
 381
 382void __wrap___release_region(struct resource *parent, resource_size_t start,
 383                resource_size_t n)
 384{
 385        if (!nfit_test_release_region(NULL, parent, start, n))
 386                __release_region(parent, start, n);
 387}
 388EXPORT_SYMBOL(__wrap___release_region);
 389
 390void __wrap___devm_release_region(struct device *dev, struct resource *parent,
 391                resource_size_t start, resource_size_t n)
 392{
 393        if (!nfit_test_release_region(dev, parent, start, n))
 394                __devm_release_region(dev, parent, start, n);
 395}
 396EXPORT_SYMBOL(__wrap___devm_release_region);
 397
 398acpi_status __wrap_acpi_evaluate_object(acpi_handle handle, acpi_string path,
 399                struct acpi_object_list *p, struct acpi_buffer *buf)
 400{
 401        struct nfit_test_resource *nfit_res = get_nfit_res((long) handle);
 402        union acpi_object **obj;
 403
 404        if (!nfit_res || strcmp(path, "_FIT") || !buf)
 405                return acpi_evaluate_object(handle, path, p, buf);
 406
 407        obj = nfit_res->buf;
 408        buf->length = sizeof(union acpi_object);
 409        buf->pointer = *obj;
 410        return AE_OK;
 411}
 412EXPORT_SYMBOL(__wrap_acpi_evaluate_object);
 413
 414union acpi_object * __wrap_acpi_evaluate_dsm(acpi_handle handle, const guid_t *guid,
 415                u64 rev, u64 func, union acpi_object *argv4)
 416{
 417        union acpi_object *obj = ERR_PTR(-ENXIO);
 418        struct iomap_ops *ops;
 419
 420        rcu_read_lock();
 421        ops = list_first_or_null_rcu(&iomap_head, typeof(*ops), list);
 422        if (ops)
 423                obj = ops->evaluate_dsm(handle, guid, rev, func, argv4);
 424        rcu_read_unlock();
 425
 426        if (IS_ERR(obj))
 427                return acpi_evaluate_dsm(handle, guid, rev, func, argv4);
 428        return obj;
 429}
 430EXPORT_SYMBOL(__wrap_acpi_evaluate_dsm);
 431
 432MODULE_LICENSE("GPL v2");
 433