linux/drivers/gpu/drm/nouveau/nouveau_fence.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2007 Ben Skeggs.
   3 * All Rights Reserved.
   4 *
   5 * Permission is hereby granted, free of charge, to any person obtaining
   6 * a copy of this software and associated documentation files (the
   7 * "Software"), to deal in the Software without restriction, including
   8 * without limitation the rights to use, copy, modify, merge, publish,
   9 * distribute, sublicense, and/or sell copies of the Software, and to
  10 * permit persons to whom the Software is furnished to do so, subject to
  11 * the following conditions:
  12 *
  13 * The above copyright notice and this permission notice (including the
  14 * next paragraph) shall be included in all copies or substantial
  15 * portions of the Software.
  16 *
  17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
  18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
  19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
  20 * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
  21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
  22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
  23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
  24 *
  25 */
  26
  27#include <drm/drmP.h>
  28
  29#include <linux/ktime.h>
  30#include <linux/hrtimer.h>
  31#include <trace/events/fence.h>
  32
  33#include <nvif/cl826e.h>
  34#include <nvif/notify.h>
  35#include <nvif/event.h>
  36
  37#include "nouveau_drv.h"
  38#include "nouveau_dma.h"
  39#include "nouveau_fence.h"
  40
  41static const struct fence_ops nouveau_fence_ops_uevent;
  42static const struct fence_ops nouveau_fence_ops_legacy;
  43
  44static inline struct nouveau_fence *
  45from_fence(struct fence *fence)
  46{
  47        return container_of(fence, struct nouveau_fence, base);
  48}
  49
  50static inline struct nouveau_fence_chan *
  51nouveau_fctx(struct nouveau_fence *fence)
  52{
  53        return container_of(fence->base.lock, struct nouveau_fence_chan, lock);
  54}
  55
  56static int
  57nouveau_fence_signal(struct nouveau_fence *fence)
  58{
  59        int drop = 0;
  60
  61        fence_signal_locked(&fence->base);
  62        list_del(&fence->head);
  63        rcu_assign_pointer(fence->channel, NULL);
  64
  65        if (test_bit(FENCE_FLAG_USER_BITS, &fence->base.flags)) {
  66                struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
  67
  68                if (!--fctx->notify_ref)
  69                        drop = 1;
  70        }
  71
  72        fence_put(&fence->base);
  73        return drop;
  74}
  75
  76static struct nouveau_fence *
  77nouveau_local_fence(struct fence *fence, struct nouveau_drm *drm) {
  78        struct nouveau_fence_priv *priv = (void*)drm->fence;
  79
  80        if (fence->ops != &nouveau_fence_ops_legacy &&
  81            fence->ops != &nouveau_fence_ops_uevent)
  82                return NULL;
  83
  84        if (fence->context < priv->context_base ||
  85            fence->context >= priv->context_base + priv->contexts)
  86                return NULL;
  87
  88        return from_fence(fence);
  89}
  90
  91void
  92nouveau_fence_context_del(struct nouveau_fence_chan *fctx)
  93{
  94        struct nouveau_fence *fence;
  95
  96        spin_lock_irq(&fctx->lock);
  97        while (!list_empty(&fctx->pending)) {
  98                fence = list_entry(fctx->pending.next, typeof(*fence), head);
  99
 100                if (nouveau_fence_signal(fence))
 101                        nvif_notify_put(&fctx->notify);
 102        }
 103        spin_unlock_irq(&fctx->lock);
 104
 105        nvif_notify_fini(&fctx->notify);
 106        fctx->dead = 1;
 107
 108        /*
 109         * Ensure that all accesses to fence->channel complete before freeing
 110         * the channel.
 111         */
 112        synchronize_rcu();
 113}
 114
 115static void
 116nouveau_fence_context_put(struct kref *fence_ref)
 117{
 118        kfree(container_of(fence_ref, struct nouveau_fence_chan, fence_ref));
 119}
 120
 121void
 122nouveau_fence_context_free(struct nouveau_fence_chan *fctx)
 123{
 124        kref_put(&fctx->fence_ref, nouveau_fence_context_put);
 125}
 126
 127static int
 128nouveau_fence_update(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
 129{
 130        struct nouveau_fence *fence;
 131        int drop = 0;
 132        u32 seq = fctx->read(chan);
 133
 134        while (!list_empty(&fctx->pending)) {
 135                fence = list_entry(fctx->pending.next, typeof(*fence), head);
 136
 137                if ((int)(seq - fence->base.seqno) < 0)
 138                        break;
 139
 140                drop |= nouveau_fence_signal(fence);
 141        }
 142
 143        return drop;
 144}
 145
 146static int
 147nouveau_fence_wait_uevent_handler(struct nvif_notify *notify)
 148{
 149        struct nouveau_fence_chan *fctx =
 150                container_of(notify, typeof(*fctx), notify);
 151        unsigned long flags;
 152        int ret = NVIF_NOTIFY_KEEP;
 153
 154        spin_lock_irqsave(&fctx->lock, flags);
 155        if (!list_empty(&fctx->pending)) {
 156                struct nouveau_fence *fence;
 157                struct nouveau_channel *chan;
 158
 159                fence = list_entry(fctx->pending.next, typeof(*fence), head);
 160                chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
 161                if (nouveau_fence_update(fence->channel, fctx))
 162                        ret = NVIF_NOTIFY_DROP;
 163        }
 164        spin_unlock_irqrestore(&fctx->lock, flags);
 165
 166        return ret;
 167}
 168
 169void
 170nouveau_fence_context_new(struct nouveau_channel *chan, struct nouveau_fence_chan *fctx)
 171{
 172        struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
 173        struct nouveau_cli *cli = (void *)chan->user.client;
 174        int ret;
 175
 176        INIT_LIST_HEAD(&fctx->flip);
 177        INIT_LIST_HEAD(&fctx->pending);
 178        spin_lock_init(&fctx->lock);
 179        fctx->context = priv->context_base + chan->chid;
 180
 181        if (chan == chan->drm->cechan)
 182                strcpy(fctx->name, "copy engine channel");
 183        else if (chan == chan->drm->channel)
 184                strcpy(fctx->name, "generic kernel channel");
 185        else
 186                strcpy(fctx->name, nvxx_client(&cli->base)->name);
 187
 188        kref_init(&fctx->fence_ref);
 189        if (!priv->uevent)
 190                return;
 191
 192        ret = nvif_notify_init(&chan->user, nouveau_fence_wait_uevent_handler,
 193                               false, G82_CHANNEL_DMA_V0_NTFY_UEVENT,
 194                               &(struct nvif_notify_uevent_req) { },
 195                               sizeof(struct nvif_notify_uevent_req),
 196                               sizeof(struct nvif_notify_uevent_rep),
 197                               &fctx->notify);
 198
 199        WARN_ON(ret);
 200}
 201
 202struct nouveau_fence_work {
 203        struct work_struct work;
 204        struct fence_cb cb;
 205        void (*func)(void *);
 206        void *data;
 207};
 208
 209static void
 210nouveau_fence_work_handler(struct work_struct *kwork)
 211{
 212        struct nouveau_fence_work *work = container_of(kwork, typeof(*work), work);
 213        work->func(work->data);
 214        kfree(work);
 215}
 216
 217static void nouveau_fence_work_cb(struct fence *fence, struct fence_cb *cb)
 218{
 219        struct nouveau_fence_work *work = container_of(cb, typeof(*work), cb);
 220
 221        schedule_work(&work->work);
 222}
 223
 224void
 225nouveau_fence_work(struct fence *fence,
 226                   void (*func)(void *), void *data)
 227{
 228        struct nouveau_fence_work *work;
 229
 230        if (fence_is_signaled(fence))
 231                goto err;
 232
 233        work = kmalloc(sizeof(*work), GFP_KERNEL);
 234        if (!work) {
 235                /*
 236                 * this might not be a nouveau fence any more,
 237                 * so force a lazy wait here
 238                 */
 239                WARN_ON(nouveau_fence_wait((struct nouveau_fence *)fence,
 240                                           true, false));
 241                goto err;
 242        }
 243
 244        INIT_WORK(&work->work, nouveau_fence_work_handler);
 245        work->func = func;
 246        work->data = data;
 247
 248        if (fence_add_callback(fence, &work->cb, nouveau_fence_work_cb) < 0)
 249                goto err_free;
 250        return;
 251
 252err_free:
 253        kfree(work);
 254err:
 255        func(data);
 256}
 257
 258int
 259nouveau_fence_emit(struct nouveau_fence *fence, struct nouveau_channel *chan)
 260{
 261        struct nouveau_fence_chan *fctx = chan->fence;
 262        struct nouveau_fence_priv *priv = (void*)chan->drm->fence;
 263        int ret;
 264
 265        fence->channel  = chan;
 266        fence->timeout  = jiffies + (15 * HZ);
 267
 268        if (priv->uevent)
 269                fence_init(&fence->base, &nouveau_fence_ops_uevent,
 270                           &fctx->lock, fctx->context, ++fctx->sequence);
 271        else
 272                fence_init(&fence->base, &nouveau_fence_ops_legacy,
 273                           &fctx->lock, fctx->context, ++fctx->sequence);
 274        kref_get(&fctx->fence_ref);
 275
 276        trace_fence_emit(&fence->base);
 277        ret = fctx->emit(fence);
 278        if (!ret) {
 279                fence_get(&fence->base);
 280                spin_lock_irq(&fctx->lock);
 281
 282                if (nouveau_fence_update(chan, fctx))
 283                        nvif_notify_put(&fctx->notify);
 284
 285                list_add_tail(&fence->head, &fctx->pending);
 286                spin_unlock_irq(&fctx->lock);
 287        }
 288
 289        return ret;
 290}
 291
 292bool
 293nouveau_fence_done(struct nouveau_fence *fence)
 294{
 295        if (fence->base.ops == &nouveau_fence_ops_legacy ||
 296            fence->base.ops == &nouveau_fence_ops_uevent) {
 297                struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 298                struct nouveau_channel *chan;
 299                unsigned long flags;
 300
 301                if (test_bit(FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
 302                        return true;
 303
 304                spin_lock_irqsave(&fctx->lock, flags);
 305                chan = rcu_dereference_protected(fence->channel, lockdep_is_held(&fctx->lock));
 306                if (chan && nouveau_fence_update(chan, fctx))
 307                        nvif_notify_put(&fctx->notify);
 308                spin_unlock_irqrestore(&fctx->lock, flags);
 309        }
 310        return fence_is_signaled(&fence->base);
 311}
 312
 313static long
 314nouveau_fence_wait_legacy(struct fence *f, bool intr, long wait)
 315{
 316        struct nouveau_fence *fence = from_fence(f);
 317        unsigned long sleep_time = NSEC_PER_MSEC / 1000;
 318        unsigned long t = jiffies, timeout = t + wait;
 319
 320        while (!nouveau_fence_done(fence)) {
 321                ktime_t kt;
 322
 323                t = jiffies;
 324
 325                if (wait != MAX_SCHEDULE_TIMEOUT && time_after_eq(t, timeout)) {
 326                        __set_current_state(TASK_RUNNING);
 327                        return 0;
 328                }
 329
 330                __set_current_state(intr ? TASK_INTERRUPTIBLE :
 331                                           TASK_UNINTERRUPTIBLE);
 332
 333                kt = ktime_set(0, sleep_time);
 334                schedule_hrtimeout(&kt, HRTIMER_MODE_REL);
 335                sleep_time *= 2;
 336                if (sleep_time > NSEC_PER_MSEC)
 337                        sleep_time = NSEC_PER_MSEC;
 338
 339                if (intr && signal_pending(current))
 340                        return -ERESTARTSYS;
 341        }
 342
 343        __set_current_state(TASK_RUNNING);
 344
 345        return timeout - t;
 346}
 347
 348static int
 349nouveau_fence_wait_busy(struct nouveau_fence *fence, bool intr)
 350{
 351        int ret = 0;
 352
 353        while (!nouveau_fence_done(fence)) {
 354                if (time_after_eq(jiffies, fence->timeout)) {
 355                        ret = -EBUSY;
 356                        break;
 357                }
 358
 359                __set_current_state(intr ?
 360                                    TASK_INTERRUPTIBLE :
 361                                    TASK_UNINTERRUPTIBLE);
 362
 363                if (intr && signal_pending(current)) {
 364                        ret = -ERESTARTSYS;
 365                        break;
 366                }
 367        }
 368
 369        __set_current_state(TASK_RUNNING);
 370        return ret;
 371}
 372
 373int
 374nouveau_fence_wait(struct nouveau_fence *fence, bool lazy, bool intr)
 375{
 376        long ret;
 377
 378        if (!lazy)
 379                return nouveau_fence_wait_busy(fence, intr);
 380
 381        ret = fence_wait_timeout(&fence->base, intr, 15 * HZ);
 382        if (ret < 0)
 383                return ret;
 384        else if (!ret)
 385                return -EBUSY;
 386        else
 387                return 0;
 388}
 389
 390int
 391nouveau_fence_sync(struct nouveau_bo *nvbo, struct nouveau_channel *chan, bool exclusive, bool intr)
 392{
 393        struct nouveau_fence_chan *fctx = chan->fence;
 394        struct fence *fence;
 395        struct reservation_object *resv = nvbo->bo.resv;
 396        struct reservation_object_list *fobj;
 397        struct nouveau_fence *f;
 398        int ret = 0, i;
 399
 400        if (!exclusive) {
 401                ret = reservation_object_reserve_shared(resv);
 402
 403                if (ret)
 404                        return ret;
 405        }
 406
 407        fobj = reservation_object_get_list(resv);
 408        fence = reservation_object_get_excl(resv);
 409
 410        if (fence && (!exclusive || !fobj || !fobj->shared_count)) {
 411                struct nouveau_channel *prev = NULL;
 412                bool must_wait = true;
 413
 414                f = nouveau_local_fence(fence, chan->drm);
 415                if (f) {
 416                        rcu_read_lock();
 417                        prev = rcu_dereference(f->channel);
 418                        if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
 419                                must_wait = false;
 420                        rcu_read_unlock();
 421                }
 422
 423                if (must_wait)
 424                        ret = fence_wait(fence, intr);
 425
 426                return ret;
 427        }
 428
 429        if (!exclusive || !fobj)
 430                return ret;
 431
 432        for (i = 0; i < fobj->shared_count && !ret; ++i) {
 433                struct nouveau_channel *prev = NULL;
 434                bool must_wait = true;
 435
 436                fence = rcu_dereference_protected(fobj->shared[i],
 437                                                reservation_object_held(resv));
 438
 439                f = nouveau_local_fence(fence, chan->drm);
 440                if (f) {
 441                        rcu_read_lock();
 442                        prev = rcu_dereference(f->channel);
 443                        if (prev && (prev == chan || fctx->sync(f, prev, chan) == 0))
 444                                must_wait = false;
 445                        rcu_read_unlock();
 446                }
 447
 448                if (must_wait)
 449                        ret = fence_wait(fence, intr);
 450        }
 451
 452        return ret;
 453}
 454
 455void
 456nouveau_fence_unref(struct nouveau_fence **pfence)
 457{
 458        if (*pfence)
 459                fence_put(&(*pfence)->base);
 460        *pfence = NULL;
 461}
 462
 463int
 464nouveau_fence_new(struct nouveau_channel *chan, bool sysmem,
 465                  struct nouveau_fence **pfence)
 466{
 467        struct nouveau_fence *fence;
 468        int ret = 0;
 469
 470        if (unlikely(!chan->fence))
 471                return -ENODEV;
 472
 473        fence = kzalloc(sizeof(*fence), GFP_KERNEL);
 474        if (!fence)
 475                return -ENOMEM;
 476
 477        fence->sysmem = sysmem;
 478
 479        ret = nouveau_fence_emit(fence, chan);
 480        if (ret)
 481                nouveau_fence_unref(&fence);
 482
 483        *pfence = fence;
 484        return ret;
 485}
 486
 487static const char *nouveau_fence_get_get_driver_name(struct fence *fence)
 488{
 489        return "nouveau";
 490}
 491
 492static const char *nouveau_fence_get_timeline_name(struct fence *f)
 493{
 494        struct nouveau_fence *fence = from_fence(f);
 495        struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 496
 497        return !fctx->dead ? fctx->name : "dead channel";
 498}
 499
 500/*
 501 * In an ideal world, read would not assume the channel context is still alive.
 502 * This function may be called from another device, running into free memory as a
 503 * result. The drm node should still be there, so we can derive the index from
 504 * the fence context.
 505 */
 506static bool nouveau_fence_is_signaled(struct fence *f)
 507{
 508        struct nouveau_fence *fence = from_fence(f);
 509        struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 510        struct nouveau_channel *chan;
 511        bool ret = false;
 512
 513        rcu_read_lock();
 514        chan = rcu_dereference(fence->channel);
 515        if (chan)
 516                ret = (int)(fctx->read(chan) - fence->base.seqno) >= 0;
 517        rcu_read_unlock();
 518
 519        return ret;
 520}
 521
 522static bool nouveau_fence_no_signaling(struct fence *f)
 523{
 524        struct nouveau_fence *fence = from_fence(f);
 525
 526        /*
 527         * caller should have a reference on the fence,
 528         * else fence could get freed here
 529         */
 530        WARN_ON(atomic_read(&fence->base.refcount.refcount) <= 1);
 531
 532        /*
 533         * This needs uevents to work correctly, but fence_add_callback relies on
 534         * being able to enable signaling. It will still get signaled eventually,
 535         * just not right away.
 536         */
 537        if (nouveau_fence_is_signaled(f)) {
 538                list_del(&fence->head);
 539
 540                fence_put(&fence->base);
 541                return false;
 542        }
 543
 544        return true;
 545}
 546
 547static void nouveau_fence_release(struct fence *f)
 548{
 549        struct nouveau_fence *fence = from_fence(f);
 550        struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 551
 552        kref_put(&fctx->fence_ref, nouveau_fence_context_put);
 553        fence_free(&fence->base);
 554}
 555
 556static const struct fence_ops nouveau_fence_ops_legacy = {
 557        .get_driver_name = nouveau_fence_get_get_driver_name,
 558        .get_timeline_name = nouveau_fence_get_timeline_name,
 559        .enable_signaling = nouveau_fence_no_signaling,
 560        .signaled = nouveau_fence_is_signaled,
 561        .wait = nouveau_fence_wait_legacy,
 562        .release = nouveau_fence_release
 563};
 564
 565static bool nouveau_fence_enable_signaling(struct fence *f)
 566{
 567        struct nouveau_fence *fence = from_fence(f);
 568        struct nouveau_fence_chan *fctx = nouveau_fctx(fence);
 569        bool ret;
 570
 571        if (!fctx->notify_ref++)
 572                nvif_notify_get(&fctx->notify);
 573
 574        ret = nouveau_fence_no_signaling(f);
 575        if (ret)
 576                set_bit(FENCE_FLAG_USER_BITS, &fence->base.flags);
 577        else if (!--fctx->notify_ref)
 578                nvif_notify_put(&fctx->notify);
 579
 580        return ret;
 581}
 582
 583static const struct fence_ops nouveau_fence_ops_uevent = {
 584        .get_driver_name = nouveau_fence_get_get_driver_name,
 585        .get_timeline_name = nouveau_fence_get_timeline_name,
 586        .enable_signaling = nouveau_fence_enable_signaling,
 587        .signaled = nouveau_fence_is_signaled,
 588        .wait = fence_default_wait,
 589        .release = NULL
 590};
 591