linux/drivers/gpu/drm/radeon/radeon_mn.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Christian König <christian.koenig@amd.com>
  29 */
  30
  31#include <linux/firmware.h>
  32#include <linux/module.h>
  33#include <linux/mmu_notifier.h>
  34
  35#include <drm/drm.h>
  36
  37#include "radeon.h"
  38
  39struct radeon_mn {
  40        /* constant after initialisation */
  41        struct radeon_device    *rdev;
  42        struct mm_struct        *mm;
  43        struct mmu_notifier     mn;
  44
  45        /* only used on destruction */
  46        struct work_struct      work;
  47
  48        /* protected by rdev->mn_lock */
  49        struct hlist_node       node;
  50
  51        /* objects protected by lock */
  52        struct mutex            lock;
  53        struct rb_root_cached   objects;
  54};
  55
  56struct radeon_mn_node {
  57        struct interval_tree_node       it;
  58        struct list_head                bos;
  59};
  60
  61/**
  62 * radeon_mn_destroy - destroy the rmn
  63 *
  64 * @work: previously sheduled work item
  65 *
  66 * Lazy destroys the notifier from a work item
  67 */
  68static void radeon_mn_destroy(struct work_struct *work)
  69{
  70        struct radeon_mn *rmn = container_of(work, struct radeon_mn, work);
  71        struct radeon_device *rdev = rmn->rdev;
  72        struct radeon_mn_node *node, *next_node;
  73        struct radeon_bo *bo, *next_bo;
  74
  75        mutex_lock(&rdev->mn_lock);
  76        mutex_lock(&rmn->lock);
  77        hash_del(&rmn->node);
  78        rbtree_postorder_for_each_entry_safe(node, next_node,
  79                                             &rmn->objects.rb_root, it.rb) {
  80
  81                interval_tree_remove(&node->it, &rmn->objects);
  82                list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
  83                        bo->mn = NULL;
  84                        list_del_init(&bo->mn_list);
  85                }
  86                kfree(node);
  87        }
  88        mutex_unlock(&rmn->lock);
  89        mutex_unlock(&rdev->mn_lock);
  90        mmu_notifier_unregister(&rmn->mn, rmn->mm);
  91        kfree(rmn);
  92}
  93
  94/**
  95 * radeon_mn_release - callback to notify about mm destruction
  96 *
  97 * @mn: our notifier
  98 * @mn: the mm this callback is about
  99 *
 100 * Shedule a work item to lazy destroy our notifier.
 101 */
 102static void radeon_mn_release(struct mmu_notifier *mn,
 103                              struct mm_struct *mm)
 104{
 105        struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
 106        INIT_WORK(&rmn->work, radeon_mn_destroy);
 107        schedule_work(&rmn->work);
 108}
 109
 110/**
 111 * radeon_mn_invalidate_range_start - callback to notify about mm change
 112 *
 113 * @mn: our notifier
 114 * @mn: the mm this callback is about
 115 * @start: start of updated range
 116 * @end: end of updated range
 117 *
 118 * We block for all BOs between start and end to be idle and
 119 * unmap them by move them into system domain again.
 120 */
 121static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
 122                                const struct mmu_notifier_range *range)
 123{
 124        struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
 125        struct ttm_operation_ctx ctx = { false, false };
 126        struct interval_tree_node *it;
 127        unsigned long end;
 128        int ret = 0;
 129
 130        /* notification is exclusive, but interval is inclusive */
 131        end = range->end - 1;
 132
 133        /* TODO we should be able to split locking for interval tree and
 134         * the tear down.
 135         */
 136        if (mmu_notifier_range_blockable(range))
 137                mutex_lock(&rmn->lock);
 138        else if (!mutex_trylock(&rmn->lock))
 139                return -EAGAIN;
 140
 141        it = interval_tree_iter_first(&rmn->objects, range->start, end);
 142        while (it) {
 143                struct radeon_mn_node *node;
 144                struct radeon_bo *bo;
 145                long r;
 146
 147                if (!mmu_notifier_range_blockable(range)) {
 148                        ret = -EAGAIN;
 149                        goto out_unlock;
 150                }
 151
 152                node = container_of(it, struct radeon_mn_node, it);
 153                it = interval_tree_iter_next(it, range->start, end);
 154
 155                list_for_each_entry(bo, &node->bos, mn_list) {
 156
 157                        if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
 158                                continue;
 159
 160                        r = radeon_bo_reserve(bo, true);
 161                        if (r) {
 162                                DRM_ERROR("(%ld) failed to reserve user bo\n", r);
 163                                continue;
 164                        }
 165
 166                        r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
 167                                true, false, MAX_SCHEDULE_TIMEOUT);
 168                        if (r <= 0)
 169                                DRM_ERROR("(%ld) failed to wait for user bo\n", r);
 170
 171                        radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
 172                        r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
 173                        if (r)
 174                                DRM_ERROR("(%ld) failed to validate user bo\n", r);
 175
 176                        radeon_bo_unreserve(bo);
 177                }
 178        }
 179        
 180out_unlock:
 181        mutex_unlock(&rmn->lock);
 182
 183        return ret;
 184}
 185
 186static const struct mmu_notifier_ops radeon_mn_ops = {
 187        .release = radeon_mn_release,
 188        .invalidate_range_start = radeon_mn_invalidate_range_start,
 189};
 190
 191/**
 192 * radeon_mn_get - create notifier context
 193 *
 194 * @rdev: radeon device pointer
 195 *
 196 * Creates a notifier context for current->mm.
 197 */
 198static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev)
 199{
 200        struct mm_struct *mm = current->mm;
 201        struct radeon_mn *rmn;
 202        int r;
 203
 204        if (down_write_killable(&mm->mmap_sem))
 205                return ERR_PTR(-EINTR);
 206
 207        mutex_lock(&rdev->mn_lock);
 208
 209        hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm)
 210                if (rmn->mm == mm)
 211                        goto release_locks;
 212
 213        rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
 214        if (!rmn) {
 215                rmn = ERR_PTR(-ENOMEM);
 216                goto release_locks;
 217        }
 218
 219        rmn->rdev = rdev;
 220        rmn->mm = mm;
 221        rmn->mn.ops = &radeon_mn_ops;
 222        mutex_init(&rmn->lock);
 223        rmn->objects = RB_ROOT_CACHED;
 224        
 225        r = __mmu_notifier_register(&rmn->mn, mm);
 226        if (r)
 227                goto free_rmn;
 228
 229        hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm);
 230
 231release_locks:
 232        mutex_unlock(&rdev->mn_lock);
 233        up_write(&mm->mmap_sem);
 234
 235        return rmn;
 236
 237free_rmn:
 238        mutex_unlock(&rdev->mn_lock);
 239        up_write(&mm->mmap_sem);
 240        kfree(rmn);
 241
 242        return ERR_PTR(r);
 243}
 244
 245/**
 246 * radeon_mn_register - register a BO for notifier updates
 247 *
 248 * @bo: radeon buffer object
 249 * @addr: userptr addr we should monitor
 250 *
 251 * Registers an MMU notifier for the given BO at the specified address.
 252 * Returns 0 on success, -ERRNO if anything goes wrong.
 253 */
 254int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
 255{
 256        unsigned long end = addr + radeon_bo_size(bo) - 1;
 257        struct radeon_device *rdev = bo->rdev;
 258        struct radeon_mn *rmn;
 259        struct radeon_mn_node *node = NULL;
 260        struct list_head bos;
 261        struct interval_tree_node *it;
 262
 263        rmn = radeon_mn_get(rdev);
 264        if (IS_ERR(rmn))
 265                return PTR_ERR(rmn);
 266
 267        INIT_LIST_HEAD(&bos);
 268
 269        mutex_lock(&rmn->lock);
 270
 271        while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
 272                kfree(node);
 273                node = container_of(it, struct radeon_mn_node, it);
 274                interval_tree_remove(&node->it, &rmn->objects);
 275                addr = min(it->start, addr);
 276                end = max(it->last, end);
 277                list_splice(&node->bos, &bos);
 278        }
 279
 280        if (!node) {
 281                node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL);
 282                if (!node) {
 283                        mutex_unlock(&rmn->lock);
 284                        return -ENOMEM;
 285                }
 286        }
 287
 288        bo->mn = rmn;
 289
 290        node->it.start = addr;
 291        node->it.last = end;
 292        INIT_LIST_HEAD(&node->bos);
 293        list_splice(&bos, &node->bos);
 294        list_add(&bo->mn_list, &node->bos);
 295
 296        interval_tree_insert(&node->it, &rmn->objects);
 297
 298        mutex_unlock(&rmn->lock);
 299
 300        return 0;
 301}
 302
 303/**
 304 * radeon_mn_unregister - unregister a BO for notifier updates
 305 *
 306 * @bo: radeon buffer object
 307 *
 308 * Remove any registration of MMU notifier updates from the buffer object.
 309 */
 310void radeon_mn_unregister(struct radeon_bo *bo)
 311{
 312        struct radeon_device *rdev = bo->rdev;
 313        struct radeon_mn *rmn;
 314        struct list_head *head;
 315
 316        mutex_lock(&rdev->mn_lock);
 317        rmn = bo->mn;
 318        if (rmn == NULL) {
 319                mutex_unlock(&rdev->mn_lock);
 320                return;
 321        }
 322
 323        mutex_lock(&rmn->lock);
 324        /* save the next list entry for later */
 325        head = bo->mn_list.next;
 326
 327        bo->mn = NULL;
 328        list_del(&bo->mn_list);
 329
 330        if (list_empty(head)) {
 331                struct radeon_mn_node *node;
 332                node = container_of(head, struct radeon_mn_node, bos);
 333                interval_tree_remove(&node->it, &rmn->objects);
 334                kfree(node);
 335        }
 336
 337        mutex_unlock(&rmn->lock);
 338        mutex_unlock(&rdev->mn_lock);
 339}
 340