linux/drivers/gpu/drm/radeon/radeon_mn.c
<<
>>
Prefs
   1/*
   2 * Copyright 2014 Advanced Micro Devices, Inc.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining a
   6 * copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sub license, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  15 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  16 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  17 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  18 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  19 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  20 *
  21 * The above copyright notice and this permission notice (including the
  22 * next paragraph) shall be included in all copies or substantial portions
  23 * of the Software.
  24 *
  25 */
  26/*
  27 * Authors:
  28 *    Christian König <christian.koenig@amd.com>
  29 */
  30
  31#include <linux/firmware.h>
  32#include <linux/module.h>
  33#include <linux/mmu_notifier.h>
  34#include <drm/drmP.h>
  35#include <drm/drm.h>
  36
  37#include "radeon.h"
  38
  39struct radeon_mn {
  40        /* constant after initialisation */
  41        struct radeon_device    *rdev;
  42        struct mm_struct        *mm;
  43        struct mmu_notifier     mn;
  44
  45        /* only used on destruction */
  46        struct work_struct      work;
  47
  48        /* protected by rdev->mn_lock */
  49        struct hlist_node       node;
  50
  51        /* objects protected by lock */
  52        struct mutex            lock;
  53        struct rb_root          objects;
  54};
  55
  56struct radeon_mn_node {
  57        struct interval_tree_node       it;
  58        struct list_head                bos;
  59};
  60
  61/**
  62 * radeon_mn_destroy - destroy the rmn
  63 *
  64 * @work: previously sheduled work item
  65 *
  66 * Lazy destroys the notifier from a work item
  67 */
  68static void radeon_mn_destroy(struct work_struct *work)
  69{
  70        struct radeon_mn *rmn = container_of(work, struct radeon_mn, work);
  71        struct radeon_device *rdev = rmn->rdev;
  72        struct radeon_mn_node *node, *next_node;
  73        struct radeon_bo *bo, *next_bo;
  74
  75        mutex_lock(&rdev->mn_lock);
  76        mutex_lock(&rmn->lock);
  77        hash_del(&rmn->node);
  78        rbtree_postorder_for_each_entry_safe(node, next_node, &rmn->objects,
  79                                             it.rb) {
  80
  81                interval_tree_remove(&node->it, &rmn->objects);
  82                list_for_each_entry_safe(bo, next_bo, &node->bos, mn_list) {
  83                        bo->mn = NULL;
  84                        list_del_init(&bo->mn_list);
  85                }
  86                kfree(node);
  87        }
  88        mutex_unlock(&rmn->lock);
  89        mutex_unlock(&rdev->mn_lock);
  90        mmu_notifier_unregister(&rmn->mn, rmn->mm);
  91        kfree(rmn);
  92}
  93
  94/**
  95 * radeon_mn_release - callback to notify about mm destruction
  96 *
  97 * @mn: our notifier
  98 * @mn: the mm this callback is about
  99 *
 100 * Shedule a work item to lazy destroy our notifier.
 101 */
 102static void radeon_mn_release(struct mmu_notifier *mn,
 103                              struct mm_struct *mm)
 104{
 105        struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
 106        INIT_WORK(&rmn->work, radeon_mn_destroy);
 107        schedule_work(&rmn->work);
 108}
 109
 110/**
 111 * radeon_mn_invalidate_range_start - callback to notify about mm change
 112 *
 113 * @mn: our notifier
 114 * @mn: the mm this callback is about
 115 * @start: start of updated range
 116 * @end: end of updated range
 117 *
 118 * We block for all BOs between start and end to be idle and
 119 * unmap them by move them into system domain again.
 120 */
 121static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
 122                                             struct mm_struct *mm,
 123                                             unsigned long start,
 124                                             unsigned long end)
 125{
 126        struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
 127        struct interval_tree_node *it;
 128
 129        /* notification is exclusive, but interval is inclusive */
 130        end -= 1;
 131
 132        mutex_lock(&rmn->lock);
 133
 134        it = interval_tree_iter_first(&rmn->objects, start, end);
 135        while (it) {
 136                struct radeon_mn_node *node;
 137                struct radeon_bo *bo;
 138                long r;
 139
 140                node = container_of(it, struct radeon_mn_node, it);
 141                it = interval_tree_iter_next(it, start, end);
 142
 143                list_for_each_entry(bo, &node->bos, mn_list) {
 144
 145                        if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
 146                                continue;
 147
 148                        r = radeon_bo_reserve(bo, true);
 149                        if (r) {
 150                                DRM_ERROR("(%ld) failed to reserve user bo\n", r);
 151                                continue;
 152                        }
 153
 154                        r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
 155                                true, false, MAX_SCHEDULE_TIMEOUT);
 156                        if (r <= 0)
 157                                DRM_ERROR("(%ld) failed to wait for user bo\n", r);
 158
 159                        radeon_ttm_placement_from_domain(bo, RADEON_GEM_DOMAIN_CPU);
 160                        r = ttm_bo_validate(&bo->tbo, &bo->placement, false, false);
 161                        if (r)
 162                                DRM_ERROR("(%ld) failed to validate user bo\n", r);
 163
 164                        radeon_bo_unreserve(bo);
 165                }
 166        }
 167        
 168        mutex_unlock(&rmn->lock);
 169}
 170
 171static const struct mmu_notifier_ops radeon_mn_ops = {
 172        .release = radeon_mn_release,
 173        .invalidate_range_start = radeon_mn_invalidate_range_start,
 174};
 175
 176/**
 177 * radeon_mn_get - create notifier context
 178 *
 179 * @rdev: radeon device pointer
 180 *
 181 * Creates a notifier context for current->mm.
 182 */
 183static struct radeon_mn *radeon_mn_get(struct radeon_device *rdev)
 184{
 185        struct mm_struct *mm = current->mm;
 186        struct radeon_mn *rmn;
 187        int r;
 188
 189        down_write(&mm->mmap_sem);
 190        mutex_lock(&rdev->mn_lock);
 191
 192        hash_for_each_possible(rdev->mn_hash, rmn, node, (unsigned long)mm)
 193                if (rmn->mm == mm)
 194                        goto release_locks;
 195
 196        rmn = kzalloc(sizeof(*rmn), GFP_KERNEL);
 197        if (!rmn) {
 198                rmn = ERR_PTR(-ENOMEM);
 199                goto release_locks;
 200        }
 201
 202        rmn->rdev = rdev;
 203        rmn->mm = mm;
 204        rmn->mn.ops = &radeon_mn_ops;
 205        mutex_init(&rmn->lock);
 206        rmn->objects = RB_ROOT;
 207        
 208        r = __mmu_notifier_register(&rmn->mn, mm);
 209        if (r)
 210                goto free_rmn;
 211
 212        hash_add(rdev->mn_hash, &rmn->node, (unsigned long)mm);
 213
 214release_locks:
 215        mutex_unlock(&rdev->mn_lock);
 216        up_write(&mm->mmap_sem);
 217
 218        return rmn;
 219
 220free_rmn:
 221        mutex_unlock(&rdev->mn_lock);
 222        up_write(&mm->mmap_sem);
 223        kfree(rmn);
 224
 225        return ERR_PTR(r);
 226}
 227
 228/**
 229 * radeon_mn_register - register a BO for notifier updates
 230 *
 231 * @bo: radeon buffer object
 232 * @addr: userptr addr we should monitor
 233 *
 234 * Registers an MMU notifier for the given BO at the specified address.
 235 * Returns 0 on success, -ERRNO if anything goes wrong.
 236 */
 237int radeon_mn_register(struct radeon_bo *bo, unsigned long addr)
 238{
 239        unsigned long end = addr + radeon_bo_size(bo) - 1;
 240        struct radeon_device *rdev = bo->rdev;
 241        struct radeon_mn *rmn;
 242        struct radeon_mn_node *node = NULL;
 243        struct list_head bos;
 244        struct interval_tree_node *it;
 245
 246        rmn = radeon_mn_get(rdev);
 247        if (IS_ERR(rmn))
 248                return PTR_ERR(rmn);
 249
 250        INIT_LIST_HEAD(&bos);
 251
 252        mutex_lock(&rmn->lock);
 253
 254        while ((it = interval_tree_iter_first(&rmn->objects, addr, end))) {
 255                kfree(node);
 256                node = container_of(it, struct radeon_mn_node, it);
 257                interval_tree_remove(&node->it, &rmn->objects);
 258                addr = min(it->start, addr);
 259                end = max(it->last, end);
 260                list_splice(&node->bos, &bos);
 261        }
 262
 263        if (!node) {
 264                node = kmalloc(sizeof(struct radeon_mn_node), GFP_KERNEL);
 265                if (!node) {
 266                        mutex_unlock(&rmn->lock);
 267                        return -ENOMEM;
 268                }
 269        }
 270
 271        bo->mn = rmn;
 272
 273        node->it.start = addr;
 274        node->it.last = end;
 275        INIT_LIST_HEAD(&node->bos);
 276        list_splice(&bos, &node->bos);
 277        list_add(&bo->mn_list, &node->bos);
 278
 279        interval_tree_insert(&node->it, &rmn->objects);
 280
 281        mutex_unlock(&rmn->lock);
 282
 283        return 0;
 284}
 285
 286/**
 287 * radeon_mn_unregister - unregister a BO for notifier updates
 288 *
 289 * @bo: radeon buffer object
 290 *
 291 * Remove any registration of MMU notifier updates from the buffer object.
 292 */
 293void radeon_mn_unregister(struct radeon_bo *bo)
 294{
 295        struct radeon_device *rdev = bo->rdev;
 296        struct radeon_mn *rmn;
 297        struct list_head *head;
 298
 299        mutex_lock(&rdev->mn_lock);
 300        rmn = bo->mn;
 301        if (rmn == NULL) {
 302                mutex_unlock(&rdev->mn_lock);
 303                return;
 304        }
 305
 306        mutex_lock(&rmn->lock);
 307        /* save the next list entry for later */
 308        head = bo->mn_list.next;
 309
 310        bo->mn = NULL;
 311        list_del(&bo->mn_list);
 312
 313        if (list_empty(head)) {
 314                struct radeon_mn_node *node;
 315                node = container_of(head, struct radeon_mn_node, bos);
 316                interval_tree_remove(&node->it, &rmn->objects);
 317                kfree(node);
 318        }
 319
 320        mutex_unlock(&rmn->lock);
 321        mutex_unlock(&rdev->mn_lock);
 322}
 323