linux/drivers/iommu/ioasid.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * I/O Address Space ID allocator. There is one global IOASID space, split into
   4 * subsets. Users create a subset with DECLARE_IOASID_SET, then allocate and
   5 * free IOASIDs with ioasid_alloc and ioasid_free.
   6 */
   7#include <linux/ioasid.h>
   8#include <linux/module.h>
   9#include <linux/slab.h>
  10#include <linux/spinlock.h>
  11#include <linux/xarray.h>
  12
  13struct ioasid_data {
  14        ioasid_t id;
  15        struct ioasid_set *set;
  16        void *private;
  17        struct rcu_head rcu;
  18};
  19
  20/*
  21 * struct ioasid_allocator_data - Internal data structure to hold information
  22 * about an allocator. There are two types of allocators:
  23 *
  24 * - Default allocator always has its own XArray to track the IOASIDs allocated.
  25 * - Custom allocators may share allocation helpers with different private data.
  26 *   Custom allocators that share the same helper functions also share the same
  27 *   XArray.
  28 * Rules:
  29 * 1. Default allocator is always available, not dynamically registered. This is
  30 *    to prevent race conditions with early boot code that want to register
  31 *    custom allocators or allocate IOASIDs.
  32 * 2. Custom allocators take precedence over the default allocator.
  33 * 3. When all custom allocators sharing the same helper functions are
  34 *    unregistered (e.g. due to hotplug), all outstanding IOASIDs must be
  35 *    freed. Otherwise, outstanding IOASIDs will be lost and orphaned.
  36 * 4. When switching between custom allocators sharing the same helper
  37 *    functions, outstanding IOASIDs are preserved.
  38 * 5. When switching between custom allocator and default allocator, all IOASIDs
  39 *    must be freed to ensure unadulterated space for the new allocator.
  40 *
  41 * @ops:        allocator helper functions and its data
  42 * @list:       registered custom allocators
  43 * @slist:      allocators share the same ops but different data
  44 * @flags:      attributes of the allocator
  45 * @xa:         xarray holds the IOASID space
  46 * @rcu:        used for kfree_rcu when unregistering allocator
  47 */
  48struct ioasid_allocator_data {
  49        struct ioasid_allocator_ops *ops;
  50        struct list_head list;
  51        struct list_head slist;
  52#define IOASID_ALLOCATOR_CUSTOM BIT(0) /* Needs framework to track results */
  53        unsigned long flags;
  54        struct xarray xa;
  55        struct rcu_head rcu;
  56};
  57
  58static DEFINE_SPINLOCK(ioasid_allocator_lock);
  59static LIST_HEAD(allocators_list);
  60
  61static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque);
  62static void default_free(ioasid_t ioasid, void *opaque);
  63
  64static struct ioasid_allocator_ops default_ops = {
  65        .alloc = default_alloc,
  66        .free = default_free,
  67};
  68
  69static struct ioasid_allocator_data default_allocator = {
  70        .ops = &default_ops,
  71        .flags = 0,
  72        .xa = XARRAY_INIT(ioasid_xa, XA_FLAGS_ALLOC),
  73};
  74
  75static struct ioasid_allocator_data *active_allocator = &default_allocator;
  76
  77static ioasid_t default_alloc(ioasid_t min, ioasid_t max, void *opaque)
  78{
  79        ioasid_t id;
  80
  81        if (xa_alloc(&default_allocator.xa, &id, opaque, XA_LIMIT(min, max), GFP_ATOMIC)) {
  82                pr_err("Failed to alloc ioasid from %d to %d\n", min, max);
  83                return INVALID_IOASID;
  84        }
  85
  86        return id;
  87}
  88
  89static void default_free(ioasid_t ioasid, void *opaque)
  90{
  91        struct ioasid_data *ioasid_data;
  92
  93        ioasid_data = xa_erase(&default_allocator.xa, ioasid);
  94        kfree_rcu(ioasid_data, rcu);
  95}
  96
  97/* Allocate and initialize a new custom allocator with its helper functions */
  98static struct ioasid_allocator_data *ioasid_alloc_allocator(struct ioasid_allocator_ops *ops)
  99{
 100        struct ioasid_allocator_data *ia_data;
 101
 102        ia_data = kzalloc(sizeof(*ia_data), GFP_ATOMIC);
 103        if (!ia_data)
 104                return NULL;
 105
 106        xa_init_flags(&ia_data->xa, XA_FLAGS_ALLOC);
 107        INIT_LIST_HEAD(&ia_data->slist);
 108        ia_data->flags |= IOASID_ALLOCATOR_CUSTOM;
 109        ia_data->ops = ops;
 110
 111        /* For tracking custom allocators that share the same ops */
 112        list_add_tail(&ops->list, &ia_data->slist);
 113
 114        return ia_data;
 115}
 116
 117static bool use_same_ops(struct ioasid_allocator_ops *a, struct ioasid_allocator_ops *b)
 118{
 119        return (a->free == b->free) && (a->alloc == b->alloc);
 120}
 121
 122/**
 123 * ioasid_register_allocator - register a custom allocator
 124 * @ops: the custom allocator ops to be registered
 125 *
 126 * Custom allocators take precedence over the default xarray based allocator.
 127 * Private data associated with the IOASID allocated by the custom allocators
 128 * are managed by IOASID framework similar to data stored in xa by default
 129 * allocator.
 130 *
 131 * There can be multiple allocators registered but only one is active. In case
 132 * of runtime removal of a custom allocator, the next one is activated based
 133 * on the registration ordering.
 134 *
 135 * Multiple allocators can share the same alloc() function, in this case the
 136 * IOASID space is shared.
 137 */
 138int ioasid_register_allocator(struct ioasid_allocator_ops *ops)
 139{
 140        struct ioasid_allocator_data *ia_data;
 141        struct ioasid_allocator_data *pallocator;
 142        int ret = 0;
 143
 144        spin_lock(&ioasid_allocator_lock);
 145
 146        ia_data = ioasid_alloc_allocator(ops);
 147        if (!ia_data) {
 148                ret = -ENOMEM;
 149                goto out_unlock;
 150        }
 151
 152        /*
 153         * No particular preference, we activate the first one and keep
 154         * the later registered allocators in a list in case the first one gets
 155         * removed due to hotplug.
 156         */
 157        if (list_empty(&allocators_list)) {
 158                WARN_ON(active_allocator != &default_allocator);
 159                /* Use this new allocator if default is not active */
 160                if (xa_empty(&active_allocator->xa)) {
 161                        rcu_assign_pointer(active_allocator, ia_data);
 162                        list_add_tail(&ia_data->list, &allocators_list);
 163                        goto out_unlock;
 164                }
 165                pr_warn("Default allocator active with outstanding IOASID\n");
 166                ret = -EAGAIN;
 167                goto out_free;
 168        }
 169
 170        /* Check if the allocator is already registered */
 171        list_for_each_entry(pallocator, &allocators_list, list) {
 172                if (pallocator->ops == ops) {
 173                        pr_err("IOASID allocator already registered\n");
 174                        ret = -EEXIST;
 175                        goto out_free;
 176                } else if (use_same_ops(pallocator->ops, ops)) {
 177                        /*
 178                         * If the new allocator shares the same ops,
 179                         * then they will share the same IOASID space.
 180                         * We should put them under the same xarray.
 181                         */
 182                        list_add_tail(&ops->list, &pallocator->slist);
 183                        goto out_free;
 184                }
 185        }
 186        list_add_tail(&ia_data->list, &allocators_list);
 187
 188        spin_unlock(&ioasid_allocator_lock);
 189        return 0;
 190out_free:
 191        kfree(ia_data);
 192out_unlock:
 193        spin_unlock(&ioasid_allocator_lock);
 194        return ret;
 195}
 196EXPORT_SYMBOL_GPL(ioasid_register_allocator);
 197
 198/**
 199 * ioasid_unregister_allocator - Remove a custom IOASID allocator ops
 200 * @ops: the custom allocator to be removed
 201 *
 202 * Remove an allocator from the list, activate the next allocator in
 203 * the order it was registered. Or revert to default allocator if all
 204 * custom allocators are unregistered without outstanding IOASIDs.
 205 */
 206void ioasid_unregister_allocator(struct ioasid_allocator_ops *ops)
 207{
 208        struct ioasid_allocator_data *pallocator;
 209        struct ioasid_allocator_ops *sops;
 210
 211        spin_lock(&ioasid_allocator_lock);
 212        if (list_empty(&allocators_list)) {
 213                pr_warn("No custom IOASID allocators active!\n");
 214                goto exit_unlock;
 215        }
 216
 217        list_for_each_entry(pallocator, &allocators_list, list) {
 218                if (!use_same_ops(pallocator->ops, ops))
 219                        continue;
 220
 221                if (list_is_singular(&pallocator->slist)) {
 222                        /* No shared helper functions */
 223                        list_del(&pallocator->list);
 224                        /*
 225                         * All IOASIDs should have been freed before
 226                         * the last allocator that shares the same ops
 227                         * is unregistered.
 228                         */
 229                        WARN_ON(!xa_empty(&pallocator->xa));
 230                        if (list_empty(&allocators_list)) {
 231                                pr_info("No custom IOASID allocators, switch to default.\n");
 232                                rcu_assign_pointer(active_allocator, &default_allocator);
 233                        } else if (pallocator == active_allocator) {
 234                                rcu_assign_pointer(active_allocator,
 235                                                list_first_entry(&allocators_list,
 236                                                                struct ioasid_allocator_data, list));
 237                                pr_info("IOASID allocator changed");
 238                        }
 239                        kfree_rcu(pallocator, rcu);
 240                        break;
 241                }
 242                /*
 243                 * Find the matching shared ops to delete,
 244                 * but keep outstanding IOASIDs
 245                 */
 246                list_for_each_entry(sops, &pallocator->slist, list) {
 247                        if (sops == ops) {
 248                                list_del(&ops->list);
 249                                break;
 250                        }
 251                }
 252                break;
 253        }
 254
 255exit_unlock:
 256        spin_unlock(&ioasid_allocator_lock);
 257}
 258EXPORT_SYMBOL_GPL(ioasid_unregister_allocator);
 259
 260/**
 261 * ioasid_set_data - Set private data for an allocated ioasid
 262 * @ioasid: the ID to set data
 263 * @data:   the private data
 264 *
 265 * For IOASID that is already allocated, private data can be set
 266 * via this API. Future lookup can be done via ioasid_find.
 267 */
 268int ioasid_set_data(ioasid_t ioasid, void *data)
 269{
 270        struct ioasid_data *ioasid_data;
 271        int ret = 0;
 272
 273        spin_lock(&ioasid_allocator_lock);
 274        ioasid_data = xa_load(&active_allocator->xa, ioasid);
 275        if (ioasid_data)
 276                rcu_assign_pointer(ioasid_data->private, data);
 277        else
 278                ret = -ENOENT;
 279        spin_unlock(&ioasid_allocator_lock);
 280
 281        /*
 282         * Wait for readers to stop accessing the old private data, so the
 283         * caller can free it.
 284         */
 285        if (!ret)
 286                synchronize_rcu();
 287
 288        return ret;
 289}
 290EXPORT_SYMBOL_GPL(ioasid_set_data);
 291
 292/**
 293 * ioasid_alloc - Allocate an IOASID
 294 * @set: the IOASID set
 295 * @min: the minimum ID (inclusive)
 296 * @max: the maximum ID (inclusive)
 297 * @private: data private to the caller
 298 *
 299 * Allocate an ID between @min and @max. The @private pointer is stored
 300 * internally and can be retrieved with ioasid_find().
 301 *
 302 * Return: the allocated ID on success, or %INVALID_IOASID on failure.
 303 */
 304ioasid_t ioasid_alloc(struct ioasid_set *set, ioasid_t min, ioasid_t max,
 305                      void *private)
 306{
 307        struct ioasid_data *data;
 308        void *adata;
 309        ioasid_t id;
 310
 311        data = kzalloc(sizeof(*data), GFP_ATOMIC);
 312        if (!data)
 313                return INVALID_IOASID;
 314
 315        data->set = set;
 316        data->private = private;
 317
 318        /*
 319         * Custom allocator needs allocator data to perform platform specific
 320         * operations.
 321         */
 322        spin_lock(&ioasid_allocator_lock);
 323        adata = active_allocator->flags & IOASID_ALLOCATOR_CUSTOM ? active_allocator->ops->pdata : data;
 324        id = active_allocator->ops->alloc(min, max, adata);
 325        if (id == INVALID_IOASID) {
 326                pr_err("Failed ASID allocation %lu\n", active_allocator->flags);
 327                goto exit_free;
 328        }
 329
 330        if ((active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) &&
 331             xa_alloc(&active_allocator->xa, &id, data, XA_LIMIT(id, id), GFP_ATOMIC)) {
 332                /* Custom allocator needs framework to store and track allocation results */
 333                pr_err("Failed to alloc ioasid from %d\n", id);
 334                active_allocator->ops->free(id, active_allocator->ops->pdata);
 335                goto exit_free;
 336        }
 337        data->id = id;
 338
 339        spin_unlock(&ioasid_allocator_lock);
 340        return id;
 341exit_free:
 342        spin_unlock(&ioasid_allocator_lock);
 343        kfree(data);
 344        return INVALID_IOASID;
 345}
 346EXPORT_SYMBOL_GPL(ioasid_alloc);
 347
 348/**
 349 * ioasid_free - Free an IOASID
 350 * @ioasid: the ID to remove
 351 */
 352void ioasid_free(ioasid_t ioasid)
 353{
 354        struct ioasid_data *ioasid_data;
 355
 356        spin_lock(&ioasid_allocator_lock);
 357        ioasid_data = xa_load(&active_allocator->xa, ioasid);
 358        if (!ioasid_data) {
 359                pr_err("Trying to free unknown IOASID %u\n", ioasid);
 360                goto exit_unlock;
 361        }
 362
 363        active_allocator->ops->free(ioasid, active_allocator->ops->pdata);
 364        /* Custom allocator needs additional steps to free the xa element */
 365        if (active_allocator->flags & IOASID_ALLOCATOR_CUSTOM) {
 366                ioasid_data = xa_erase(&active_allocator->xa, ioasid);
 367                kfree_rcu(ioasid_data, rcu);
 368        }
 369
 370exit_unlock:
 371        spin_unlock(&ioasid_allocator_lock);
 372}
 373EXPORT_SYMBOL_GPL(ioasid_free);
 374
 375/**
 376 * ioasid_find - Find IOASID data
 377 * @set: the IOASID set
 378 * @ioasid: the IOASID to find
 379 * @getter: function to call on the found object
 380 *
 381 * The optional getter function allows to take a reference to the found object
 382 * under the rcu lock. The function can also check if the object is still valid:
 383 * if @getter returns false, then the object is invalid and NULL is returned.
 384 *
 385 * If the IOASID exists, return the private pointer passed to ioasid_alloc.
 386 * Private data can be NULL if not set. Return an error if the IOASID is not
 387 * found, or if @set is not NULL and the IOASID does not belong to the set.
 388 */
 389void *ioasid_find(struct ioasid_set *set, ioasid_t ioasid,
 390                  bool (*getter)(void *))
 391{
 392        void *priv;
 393        struct ioasid_data *ioasid_data;
 394        struct ioasid_allocator_data *idata;
 395
 396        rcu_read_lock();
 397        idata = rcu_dereference(active_allocator);
 398        ioasid_data = xa_load(&idata->xa, ioasid);
 399        if (!ioasid_data) {
 400                priv = ERR_PTR(-ENOENT);
 401                goto unlock;
 402        }
 403        if (set && ioasid_data->set != set) {
 404                /* data found but does not belong to the set */
 405                priv = ERR_PTR(-EACCES);
 406                goto unlock;
 407        }
 408        /* Now IOASID and its set is verified, we can return the private data */
 409        priv = rcu_dereference(ioasid_data->private);
 410        if (getter && !getter(priv))
 411                priv = NULL;
 412unlock:
 413        rcu_read_unlock();
 414
 415        return priv;
 416}
 417EXPORT_SYMBOL_GPL(ioasid_find);
 418
 419MODULE_AUTHOR("Jean-Philippe Brucker <jean-philippe.brucker@arm.com>");
 420MODULE_AUTHOR("Jacob Pan <jacob.jun.pan@linux.intel.com>");
 421MODULE_DESCRIPTION("IO Address Space ID (IOASID) allocator");
 422MODULE_LICENSE("GPL");
 423