linux/drivers/gpu/drm/radeon/radeon_mn.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Christian König <christian.koenig@amd.com>
  29 */
  30
  31#include <linux/firmware.h>
  32#include <linux/module.h>
  33#include <linux/mmu_notifier.h>
  34#include <drm/drmP.h>
  35#include <drm/drm.h>
  36
  37#include "radeon.h"
  38
  39struct radeon_mn {
  40        /* constant after initialisation */
  41        struct radeon_device    *rdev;
  42        struct mm_struct        *mm;
  43        struct mmu_notifier     mn;
  44
  45        /* only used on destruction */
  46        struct work_struct      work;
  47
  48        /* protected by rdev->mn_lock */
  49        struct hlist_node       node;
  50
  51        /* objects protected by lock */
  52        struct mutex            lock;
  53        struct rb_root_cached   objects;
  54};
  55
  56struct radeon_mn_node {
  57        struct interval_tree_node       it;
  58        struct list_head                bos;
  59};
  60
  61/**
  62 * radeon_mn_destroy - destroy the rmn
  63 *
  64 * @work: previously sheduled work item
  65 *
  66 * Lazy destroys the notifier from a work item
  67 */
  68static void radeon_mn_destroy(struct work_struct *work)
  69{
  70        struct radeon_mn *rmn = container_of(work, struct radeon_mn, work);
  71        struct radeon_device *rdev = rmn->rdev;
  72        struct radeon_mn_node *node, *next_node;
  73        struct radeon_bo *bo, *next_bo;
  74
  75        mutex_lock(&rdev->mn_lock);
  76        mutex_lock(&rmn->lock);
  77        hash_del(&rmn->node);
  78        rbtree_postorder_for_each_entry_safe(node, next_node,
  79                                             &rmn->objects.rb_root, it.rb) {
  80
  81                interval_tree_remove(&node->it, &rmn->objects);
  82                list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
  83                        bo->mn = NULL;
  84                        list_del_init(&bo->mn_list);
  85                }
  86                kfree(node);
  87        }
  88        mutex_unlock(&rmn->lock);
  89        mutex_unlock(&rdev->mn_lock);
  90        mmu_notifier_unregister(&rmn->mn, rmn->mm);
  91        kfree(rmn);
  92}
  93
  94/**
  95 * radeon_mn_release - callback to notify about mm destruction
  96 *
  97 * @mn: our notifier
  98 * @mn: the mm this callback is about
  99 *
 100 * Shedule a work item to lazy destroy our notifier.
 101 */
 102static void radeon_mn_release(struct mmu_notifier *mn,
 103                              struct mm_struct *mm)
 104{
 105        struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
 106        INIT_WORK(&rmn->work, radeon_mn_destroy);
 107        schedule_work(&rmn->work);
 108}
 109
 110/**
 111 * radeon_mn_invalidate_range_start - callback to notify about mm change
 112 *
 113 * @mn: our notifier
 114 * @mn: the mm this callback is about
 115 * @start: start of updated range
 116 * @end: end of updated range
 117 *
 118 * We block for all BOs between start and end to be idle and
 119 * unmap them by move them into system domain again.
 120 */
 121static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
 122                                             struct mm_struct *mm,
 123                                             unsigned long start,
 124                                             unsigned long end,
 125                                             bool blockable)
 126{
 127        struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
 128        struct ttm_operation_ctx ctx = { false, false };
 129        struct interval_tree_node *it;
 130        int ret = 0;
 131
 132        /* notification is exclusive, but interval is inclusive */
 133        end -= 1;
 134
 135        /* TODO we should be able to split locking for interval tree and
 136         * the tear down.
 137         */
 138        if (blockable)
 139                mutex_lock(&rmn->lock);
 140        else if (!mutex_trylock(&rmn->lock))
 141                return -EAGAIN;
 142
 143        it = interval_tree_iter_first(&rmn->objects, start, end);
 144        while (it) {
 145                struct radeon_mn_node *node;
 146                struct radeon_bo *bo;
 147                long r;
 148
 149                if (!blockable) {
 150                        ret = -EAGAIN;
 151                        goto out_unlock;
 152                }
 153
 154                node = container_of(it, struct radeon_mn_node, it);
 155                it = interval_tree_iter_next(it, start, end);
 156
 157                list_for_each_entry(bo, &node->bos, mn_list) {
 158
 159                        if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
 160                                continue;
 161
 162                        r = radeon_bo_reserve(bo, true);
 163                        if (r) {
 164                                DRM_ERROR("(%ld) failed to reserve user bo\n", r);
 165                                continue;
 166                        }
 167
 168                        r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
 169                                true, false, MAX_SCHEDULE_TIMEOUT);
 170                        if (r <= 0)
 171                                DRM_ERROR("(%ld) failed to wait for user bo\n", r);
 172
 173                        radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
 174                        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 175                        if (r)
 176                                DRM_ERROR("(%ld) failed to validate user bo\n", r);
 177
 178                        radeon_bo_unreserve(bo);
 179                }
 180        }
 181        
 182out_unlock:
 183        mutex_unlock(&rmn->lock);
 184
 185        return ret;
 186}
 187
 188static const struct mmu_notifier_ops radeon_mn_ops = {
 189        .release = radeon_mn_release,
 190        .invalidate_range_start = radeon_mn_invalidate_range_start,
 191};
 192
 193/**
 194 * radeon_mn_get - create notifier context
 195 *
 196 * @rdev: radeon device pointer
 197 *
 198 * Creates a notifier context for current->mm.
 199 */
 200static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev)
 201{
 202        struct mm_struct *mm = current->mm;
 203        struct radeon_mn *rmn;
 204        int r;
 205
 206        if (down_write_killable(&mm->mmap_sem))
 207                return ERR_PTR(-EINTR);
 208
 209        mutex_lock(&rdev->mn_lock);
 210
 211        hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm)
 212                if (rmn->mm == mm)
 213                        goto release_locks;
 214
 215        rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
 216        if (!rmn) {
 217                rmn = ERR_PTR(-ENOMEM);
 218                goto release_locks;
 219        }
 220
 221        rmn->rdev = rdev;
 222        rmn->mm = mm;
 223        rmn->mn.ops = &radeon_mn_ops;
 224        mutex_init(&rmn->lock);
 225        rmn->objects = RB_ROOT_CACHED;
 226        
 227        r = __mmu_notifier_register(&rmn->mn, mm);
 228        if (r)
 229                goto free_rmn;
 230
 231        hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm);
 232
 233release_locks:
 234        mutex_unlock(&rdev->mn_lock);
 235        up_write(&mm->mmap_sem);
 236
 237        return rmn;
 238
 239free_rmn:
 240        mutex_unlock(&rdev->mn_lock);
 241        up_write(&mm->mmap_sem);
 242        kfree(rmn);
 243
 244        return ERR_PTR(r);
 245}
 246
 247/**
 248 * radeon_mn_register - register a BO for notifier updates
 249 *
 250 * @bo: radeon buffer object
 251 * @addr: userptr addr we should monitor
 252 *
 253 * Registers an MMU notifier for the given BO at the specified address.
 254 * Returns 0 on success, -ERRNO if anything goes wrong.
 255 */
 256int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
 257{
 258        unsigned long end = addr + radeon_bo_size(bo) - 1;
 259        struct radeon_device *rdev = bo->rdev;
 260        struct radeon_mn *rmn;
 261        struct radeon_mn_node *node = NULL;
 262        struct list_head bos;
 263        struct interval_tree_node *it;
 264
 265        rmn = radeon_mn_get(rdev);
 266        if (IS_ERR(rmn))
 267                return PTR_ERR(rmn);
 268
 269        INIT_LIST_HEAD(&bos);
 270
 271        mutex_lock(&rmn->lock);
 272
 273        while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
 274                kfree(node);
 275                node = container_of(it, struct radeon_mn_node, it);
 276                interval_tree_remove(&node->it, &rmn->objects);
 277                addr = min(it->start, addr);
 278                end = max(it->last, end);
 279                list_splice(&node->bos, &bos);
 280        }
 281
 282        if (!node) {
 283                node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL);
 284                if (!node) {
 285                        mutex_unlock(&rmn->lock);
 286                        return -ENOMEM;
 287                }
 288        }
 289
 290        bo->mn = rmn;
 291
 292        node->it.start = addr;
 293        node->it.last = end;
 294        INIT_LIST_HEAD(&node->bos);
 295        list_splice(&bos, &node->bos);
 296        list_add(&bo->mn_list, &node->bos);
 297
 298        interval_tree_insert(&node->it, &rmn->objects);
 299
 300        mutex_unlock(&rmn->lock);
 301
 302        return 0;
 303}
 304
 305/**
 306 * radeon_mn_unregister - unregister a BO for notifier updates
 307 *
 308 * @bo: radeon buffer object
 309 *
 310 * Remove any registration of MMU notifier updates from the buffer object.
 311 */
 312void radeon_mn_unregister(struct radeon_bo *bo)
 313{
 314        struct radeon_device *rdev = bo->rdev;
 315        struct radeon_mn *rmn;
 316        struct list_head *head;
 317
 318        mutex_lock(&rdev->mn_lock);
 319        rmn = bo->mn;
 320        if (rmn == NULL) {
 321                mutex_unlock(&rdev->mn_lock);
 322                return;
 323        }
 324
 325        mutex_lock(&rmn->lock);
 326        /* save the next list entry for later */
 327        head = bo->mn_list.next;
 328
 329        bo->mn = NULL;
 330        list_del(&bo->mn_list);
 331
 332        if (list_empty(head)) {
 333                struct radeon_mn_node *node;
 334                node = container_of(head, struct radeon_mn_node, bos);
 335                interval_tree_remove(&node->it, &rmn->objects);
 336                kfree(node);
 337        }
 338
 339        mutex_unlock(&rmn->lock);
 340        mutex_unlock(&rdev->mn_lock);
 341}
 342