linux/drivers/gpu/drm/vmwgfx/vmwgfx_fence.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright © 2011-2014 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <drm/drmP.h>
  29#include "vmwgfx_drv.h"
  30
  31#define VMW_FENCE_WRAP (1 << 31)
  32
  33struct vmw_fence_manager {
  34        int num_fence_objects;
  35        struct vmw_private *dev_priv;
  36        spinlock_t lock;
  37        struct list_head fence_list;
  38        struct work_struct work;
  39        u32 user_fence_size;
  40        u32 fence_size;
  41        u32 event_fence_action_size;
  42        bool fifo_down;
  43        struct list_head cleanup_list;
  44        uint32_t pending_actions[VMW_ACTION_MAX];
  45        struct mutex goal_irq_mutex;
  46        bool goal_irq_on; /* Protected by @goal_irq_mutex */
  47        bool seqno_valid; /* Protected by @lock, and may not be set to true
  48                             without the @goal_irq_mutex held. */
  49        u64 ctx;
  50};
  51
  52struct vmw_user_fence {
  53        struct ttm_base_object base;
  54        struct vmw_fence_obj fence;
  55};
  56
  57/**
  58 * struct vmw_event_fence_action - fence action that delivers a drm event.
  59 *
  60 * @e: A struct drm_pending_event that controls the event delivery.
  61 * @action: A struct vmw_fence_action to hook up to a fence.
  62 * @fence: A referenced pointer to the fence to keep it alive while @action
  63 * hangs on it.
  64 * @dev: Pointer to a struct drm_device so we can access the event stuff.
  65 * @kref: Both @e and @action has destructors, so we need to refcount.
  66 * @size: Size accounted for this object.
  67 * @tv_sec: If non-null, the variable pointed to will be assigned
  68 * current time tv_sec val when the fence signals.
  69 * @tv_usec: Must be set if @tv_sec is set, and the variable pointed to will
  70 * be assigned the current time tv_usec val when the fence signals.
  71 */
  72struct vmw_event_fence_action {
  73        struct vmw_fence_action action;
  74
  75        struct drm_pending_event *event;
  76        struct vmw_fence_obj *fence;
  77        struct drm_device *dev;
  78
  79        uint32_t *tv_sec;
  80        uint32_t *tv_usec;
  81};
  82
  83static struct vmw_fence_manager *
  84fman_from_fence(struct vmw_fence_obj *fence)
  85{
  86        return container_of(fence->base.lock, struct vmw_fence_manager, lock);
  87}
  88
  89/**
  90 * Note on fencing subsystem usage of irqs:
  91 * Typically the vmw_fences_update function is called
  92 *
  93 * a) When a new fence seqno has been submitted by the fifo code.
  94 * b) On-demand when we have waiters. Sleeping waiters will switch on the
  95 * ANY_FENCE irq and call vmw_fences_update function each time an ANY_FENCE
  96 * irq is received. When the last fence waiter is gone, that IRQ is masked
  97 * away.
  98 *
  99 * In situations where there are no waiters and we don't submit any new fences,
 100 * fence objects may not be signaled. This is perfectly OK, since there are
 101 * no consumers of the signaled data, but that is NOT ok when there are fence
 102 * actions attached to a fence. The fencing subsystem then makes use of the
 103 * FENCE_GOAL irq and sets the fence goal seqno to that of the next fence
 104 * which has an action attached, and each time vmw_fences_update is called,
 105 * the subsystem makes sure the fence goal seqno is updated.
 106 *
 107 * The fence goal seqno irq is on as long as there are unsignaled fence
 108 * objects with actions attached to them.
 109 */
 110
 111static void vmw_fence_obj_destroy(struct dma_fence *f)
 112{
 113        struct vmw_fence_obj *fence =
 114                container_of(f, struct vmw_fence_obj, base);
 115
 116        struct vmw_fence_manager *fman = fman_from_fence(fence);
 117
 118        spin_lock(&fman->lock);
 119        list_del_init(&fence->head);
 120        --fman->num_fence_objects;
 121        spin_unlock(&fman->lock);
 122        fence->destroy(fence);
 123}
 124
 125static const char *vmw_fence_get_driver_name(struct dma_fence *f)
 126{
 127        return "vmwgfx";
 128}
 129
 130static const char *vmw_fence_get_timeline_name(struct dma_fence *f)
 131{
 132        return "svga";
 133}
 134
 135static bool vmw_fence_enable_signaling(struct dma_fence *f)
 136{
 137        struct vmw_fence_obj *fence =
 138                container_of(f, struct vmw_fence_obj, base);
 139
 140        struct vmw_fence_manager *fman = fman_from_fence(fence);
 141        struct vmw_private *dev_priv = fman->dev_priv;
 142
 143        u32 *fifo_mem = dev_priv->mmio_virt;
 144        u32 seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
 145        if (seqno - fence->base.seqno < VMW_FENCE_WRAP)
 146                return false;
 147
 148        vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 149
 150        return true;
 151}
 152
 153struct vmwgfx_wait_cb {
 154        struct dma_fence_cb base;
 155        struct task_struct *task;
 156};
 157
 158static void
 159vmwgfx_wait_cb(struct dma_fence *fence, struct dma_fence_cb *cb)
 160{
 161        struct vmwgfx_wait_cb *wait =
 162                container_of(cb, struct vmwgfx_wait_cb, base);
 163
 164        wake_up_process(wait->task);
 165}
 166
 167static void __vmw_fences_update(struct vmw_fence_manager *fman);
 168
 169static long vmw_fence_wait(struct dma_fence *f, bool intr, signed long timeout)
 170{
 171        struct vmw_fence_obj *fence =
 172                container_of(f, struct vmw_fence_obj, base);
 173
 174        struct vmw_fence_manager *fman = fman_from_fence(fence);
 175        struct vmw_private *dev_priv = fman->dev_priv;
 176        struct vmwgfx_wait_cb cb;
 177        long ret = timeout;
 178        unsigned long irq_flags;
 179
 180        if (likely(vmw_fence_obj_signaled(fence)))
 181                return timeout;
 182
 183        vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 184        vmw_seqno_waiter_add(dev_priv);
 185
 186        spin_lock_irqsave(f->lock, irq_flags);
 187
 188        if (intr && signal_pending(current)) {
 189                ret = -ERESTARTSYS;
 190                goto out;
 191        }
 192
 193        cb.base.func = vmwgfx_wait_cb;
 194        cb.task = current;
 195        list_add(&cb.base.node, &f->cb_list);
 196
 197        while (ret > 0) {
 198                __vmw_fences_update(fman);
 199                if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &f->flags))
 200                        break;
 201
 202                if (intr)
 203                        __set_current_state(TASK_INTERRUPTIBLE);
 204                else
 205                        __set_current_state(TASK_UNINTERRUPTIBLE);
 206                spin_unlock_irqrestore(f->lock, irq_flags);
 207
 208                ret = schedule_timeout(ret);
 209
 210                spin_lock_irqsave(f->lock, irq_flags);
 211                if (ret > 0 && intr && signal_pending(current))
 212                        ret = -ERESTARTSYS;
 213        }
 214
 215        if (!list_empty(&cb.base.node))
 216                list_del(&cb.base.node);
 217        __set_current_state(TASK_RUNNING);
 218
 219out:
 220        spin_unlock_irqrestore(f->lock, irq_flags);
 221
 222        vmw_seqno_waiter_remove(dev_priv);
 223
 224        return ret;
 225}
 226
 227static const struct dma_fence_ops vmw_fence_ops = {
 228        .get_driver_name = vmw_fence_get_driver_name,
 229        .get_timeline_name = vmw_fence_get_timeline_name,
 230        .enable_signaling = vmw_fence_enable_signaling,
 231        .wait = vmw_fence_wait,
 232        .release = vmw_fence_obj_destroy,
 233};
 234
 235
 236/**
 237 * Execute signal actions on fences recently signaled.
 238 * This is done from a workqueue so we don't have to execute
 239 * signal actions from atomic context.
 240 */
 241
 242static void vmw_fence_work_func(struct work_struct *work)
 243{
 244        struct vmw_fence_manager *fman =
 245                container_of(work, struct vmw_fence_manager, work);
 246        struct list_head list;
 247        struct vmw_fence_action *action, *next_action;
 248        bool seqno_valid;
 249
 250        do {
 251                INIT_LIST_HEAD(&list);
 252                mutex_lock(&fman->goal_irq_mutex);
 253
 254                spin_lock(&fman->lock);
 255                list_splice_init(&fman->cleanup_list, &list);
 256                seqno_valid = fman->seqno_valid;
 257                spin_unlock(&fman->lock);
 258
 259                if (!seqno_valid && fman->goal_irq_on) {
 260                        fman->goal_irq_on = false;
 261                        vmw_goal_waiter_remove(fman->dev_priv);
 262                }
 263                mutex_unlock(&fman->goal_irq_mutex);
 264
 265                if (list_empty(&list))
 266                        return;
 267
 268                /*
 269                 * At this point, only we should be able to manipulate the
 270                 * list heads of the actions we have on the private list.
 271                 * hence fman::lock not held.
 272                 */
 273
 274                list_for_each_entry_safe(action, next_action, &list, head) {
 275                        list_del_init(&action->head);
 276                        if (action->cleanup)
 277                                action->cleanup(action);
 278                }
 279        } while (1);
 280}
 281
 282struct vmw_fence_manager *vmw_fence_manager_init(struct vmw_private *dev_priv)
 283{
 284        struct vmw_fence_manager *fman = kzalloc(sizeof(*fman), GFP_KERNEL);
 285
 286        if (unlikely(!fman))
 287                return NULL;
 288
 289        fman->dev_priv = dev_priv;
 290        spin_lock_init(&fman->lock);
 291        INIT_LIST_HEAD(&fman->fence_list);
 292        INIT_LIST_HEAD(&fman->cleanup_list);
 293        INIT_WORK(&fman->work, &vmw_fence_work_func);
 294        fman->fifo_down = true;
 295        fman->user_fence_size = ttm_round_pot(sizeof(struct vmw_user_fence));
 296        fman->fence_size = ttm_round_pot(sizeof(struct vmw_fence_obj));
 297        fman->event_fence_action_size =
 298                ttm_round_pot(sizeof(struct vmw_event_fence_action));
 299        mutex_init(&fman->goal_irq_mutex);
 300        fman->ctx = dma_fence_context_alloc(1);
 301
 302        return fman;
 303}
 304
 305void vmw_fence_manager_takedown(struct vmw_fence_manager *fman)
 306{
 307        bool lists_empty;
 308
 309        (void) cancel_work_sync(&fman->work);
 310
 311        spin_lock(&fman->lock);
 312        lists_empty = list_empty(&fman->fence_list) &&
 313                list_empty(&fman->cleanup_list);
 314        spin_unlock(&fman->lock);
 315
 316        BUG_ON(!lists_empty);
 317        kfree(fman);
 318}
 319
 320static int vmw_fence_obj_init(struct vmw_fence_manager *fman,
 321                              struct vmw_fence_obj *fence, u32 seqno,
 322                              void (*destroy) (struct vmw_fence_obj *fence))
 323{
 324        int ret = 0;
 325
 326        dma_fence_init(&fence->base, &vmw_fence_ops, &fman->lock,
 327                       fman->ctx, seqno);
 328        INIT_LIST_HEAD(&fence->seq_passed_actions);
 329        fence->destroy = destroy;
 330
 331        spin_lock(&fman->lock);
 332        if (unlikely(fman->fifo_down)) {
 333                ret = -EBUSY;
 334                goto out_unlock;
 335        }
 336        list_add_tail(&fence->head, &fman->fence_list);
 337        ++fman->num_fence_objects;
 338
 339out_unlock:
 340        spin_unlock(&fman->lock);
 341        return ret;
 342
 343}
 344
 345static void vmw_fences_perform_actions(struct vmw_fence_manager *fman,
 346                                struct list_head *list)
 347{
 348        struct vmw_fence_action *action, *next_action;
 349
 350        list_for_each_entry_safe(action, next_action, list, head) {
 351                list_del_init(&action->head);
 352                fman->pending_actions[action->type]--;
 353                if (action->seq_passed != NULL)
 354                        action->seq_passed(action);
 355
 356                /*
 357                 * Add the cleanup action to the cleanup list so that
 358                 * it will be performed by a worker task.
 359                 */
 360
 361                list_add_tail(&action->head, &fman->cleanup_list);
 362        }
 363}
 364
 365/**
 366 * vmw_fence_goal_new_locked - Figure out a new device fence goal
 367 * seqno if needed.
 368 *
 369 * @fman: Pointer to a fence manager.
 370 * @passed_seqno: The seqno the device currently signals as passed.
 371 *
 372 * This function should be called with the fence manager lock held.
 373 * It is typically called when we have a new passed_seqno, and
 374 * we might need to update the fence goal. It checks to see whether
 375 * the current fence goal has already passed, and, in that case,
 376 * scans through all unsignaled fences to get the next fence object with an
 377 * action attached, and sets the seqno of that fence as a new fence goal.
 378 *
 379 * returns true if the device goal seqno was updated. False otherwise.
 380 */
 381static bool vmw_fence_goal_new_locked(struct vmw_fence_manager *fman,
 382                                      u32 passed_seqno)
 383{
 384        u32 goal_seqno;
 385        u32 *fifo_mem;
 386        struct vmw_fence_obj *fence;
 387
 388        if (likely(!fman->seqno_valid))
 389                return false;
 390
 391        fifo_mem = fman->dev_priv->mmio_virt;
 392        goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
 393        if (likely(passed_seqno - goal_seqno >= VMW_FENCE_WRAP))
 394                return false;
 395
 396        fman->seqno_valid = false;
 397        list_for_each_entry(fence, &fman->fence_list, head) {
 398                if (!list_empty(&fence->seq_passed_actions)) {
 399                        fman->seqno_valid = true;
 400                        vmw_mmio_write(fence->base.seqno,
 401                                       fifo_mem + SVGA_FIFO_FENCE_GOAL);
 402                        break;
 403                }
 404        }
 405
 406        return true;
 407}
 408
 409
 410/**
 411 * vmw_fence_goal_check_locked - Replace the device fence goal seqno if
 412 * needed.
 413 *
 414 * @fence: Pointer to a struct vmw_fence_obj the seqno of which should be
 415 * considered as a device fence goal.
 416 *
 417 * This function should be called with the fence manager lock held.
 418 * It is typically called when an action has been attached to a fence to
 419 * check whether the seqno of that fence should be used for a fence
 420 * goal interrupt. This is typically needed if the current fence goal is
 421 * invalid, or has a higher seqno than that of the current fence object.
 422 *
 423 * returns true if the device goal seqno was updated. False otherwise.
 424 */
 425static bool vmw_fence_goal_check_locked(struct vmw_fence_obj *fence)
 426{
 427        struct vmw_fence_manager *fman = fman_from_fence(fence);
 428        u32 goal_seqno;
 429        u32 *fifo_mem;
 430
 431        if (dma_fence_is_signaled_locked(&fence->base))
 432                return false;
 433
 434        fifo_mem = fman->dev_priv->mmio_virt;
 435        goal_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE_GOAL);
 436        if (likely(fman->seqno_valid &&
 437                   goal_seqno - fence->base.seqno < VMW_FENCE_WRAP))
 438                return false;
 439
 440        vmw_mmio_write(fence->base.seqno, fifo_mem + SVGA_FIFO_FENCE_GOAL);
 441        fman->seqno_valid = true;
 442
 443        return true;
 444}
 445
 446static void __vmw_fences_update(struct vmw_fence_manager *fman)
 447{
 448        struct vmw_fence_obj *fence, *next_fence;
 449        struct list_head action_list;
 450        bool needs_rerun;
 451        uint32_t seqno, new_seqno;
 452        u32 *fifo_mem = fman->dev_priv->mmio_virt;
 453
 454        seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
 455rerun:
 456        list_for_each_entry_safe(fence, next_fence, &fman->fence_list, head) {
 457                if (seqno - fence->base.seqno < VMW_FENCE_WRAP) {
 458                        list_del_init(&fence->head);
 459                        dma_fence_signal_locked(&fence->base);
 460                        INIT_LIST_HEAD(&action_list);
 461                        list_splice_init(&fence->seq_passed_actions,
 462                                         &action_list);
 463                        vmw_fences_perform_actions(fman, &action_list);
 464                } else
 465                        break;
 466        }
 467
 468        /*
 469         * Rerun if the fence goal seqno was updated, and the
 470         * hardware might have raced with that update, so that
 471         * we missed a fence_goal irq.
 472         */
 473
 474        needs_rerun = vmw_fence_goal_new_locked(fman, seqno);
 475        if (unlikely(needs_rerun)) {
 476                new_seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
 477                if (new_seqno != seqno) {
 478                        seqno = new_seqno;
 479                        goto rerun;
 480                }
 481        }
 482
 483        if (!list_empty(&fman->cleanup_list))
 484                (void) schedule_work(&fman->work);
 485}
 486
 487void vmw_fences_update(struct vmw_fence_manager *fman)
 488{
 489        spin_lock(&fman->lock);
 490        __vmw_fences_update(fman);
 491        spin_unlock(&fman->lock);
 492}
 493
 494bool vmw_fence_obj_signaled(struct vmw_fence_obj *fence)
 495{
 496        struct vmw_fence_manager *fman = fman_from_fence(fence);
 497
 498        if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->base.flags))
 499                return 1;
 500
 501        vmw_fences_update(fman);
 502
 503        return dma_fence_is_signaled(&fence->base);
 504}
 505
 506int vmw_fence_obj_wait(struct vmw_fence_obj *fence, bool lazy,
 507                       bool interruptible, unsigned long timeout)
 508{
 509        long ret = dma_fence_wait_timeout(&fence->base, interruptible, timeout);
 510
 511        if (likely(ret > 0))
 512                return 0;
 513        else if (ret == 0)
 514                return -EBUSY;
 515        else
 516                return ret;
 517}
 518
 519void vmw_fence_obj_flush(struct vmw_fence_obj *fence)
 520{
 521        struct vmw_private *dev_priv = fman_from_fence(fence)->dev_priv;
 522
 523        vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 524}
 525
 526static void vmw_fence_destroy(struct vmw_fence_obj *fence)
 527{
 528        dma_fence_free(&fence->base);
 529}
 530
 531int vmw_fence_create(struct vmw_fence_manager *fman,
 532                     uint32_t seqno,
 533                     struct vmw_fence_obj **p_fence)
 534{
 535        struct vmw_fence_obj *fence;
 536        int ret;
 537
 538        fence = kzalloc(sizeof(*fence), GFP_KERNEL);
 539        if (unlikely(!fence))
 540                return -ENOMEM;
 541
 542        ret = vmw_fence_obj_init(fman, fence, seqno,
 543                                 vmw_fence_destroy);
 544        if (unlikely(ret != 0))
 545                goto out_err_init;
 546
 547        *p_fence = fence;
 548        return 0;
 549
 550out_err_init:
 551        kfree(fence);
 552        return ret;
 553}
 554
 555
 556static void vmw_user_fence_destroy(struct vmw_fence_obj *fence)
 557{
 558        struct vmw_user_fence *ufence =
 559                container_of(fence, struct vmw_user_fence, fence);
 560        struct vmw_fence_manager *fman = fman_from_fence(fence);
 561
 562        ttm_base_object_kfree(ufence, base);
 563        /*
 564         * Free kernel space accounting.
 565         */
 566        ttm_mem_global_free(vmw_mem_glob(fman->dev_priv),
 567                            fman->user_fence_size);
 568}
 569
 570static void vmw_user_fence_base_release(struct ttm_base_object **p_base)
 571{
 572        struct ttm_base_object *base = *p_base;
 573        struct vmw_user_fence *ufence =
 574                container_of(base, struct vmw_user_fence, base);
 575        struct vmw_fence_obj *fence = &ufence->fence;
 576
 577        *p_base = NULL;
 578        vmw_fence_obj_unreference(&fence);
 579}
 580
 581int vmw_user_fence_create(struct drm_file *file_priv,
 582                          struct vmw_fence_manager *fman,
 583                          uint32_t seqno,
 584                          struct vmw_fence_obj **p_fence,
 585                          uint32_t *p_handle)
 586{
 587        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 588        struct vmw_user_fence *ufence;
 589        struct vmw_fence_obj *tmp;
 590        struct ttm_mem_global *mem_glob = vmw_mem_glob(fman->dev_priv);
 591        int ret;
 592
 593        /*
 594         * Kernel memory space accounting, since this object may
 595         * be created by a user-space request.
 596         */
 597
 598        ret = ttm_mem_global_alloc(mem_glob, fman->user_fence_size,
 599                                   false, false);
 600        if (unlikely(ret != 0))
 601                return ret;
 602
 603        ufence = kzalloc(sizeof(*ufence), GFP_KERNEL);
 604        if (unlikely(!ufence)) {
 605                ret = -ENOMEM;
 606                goto out_no_object;
 607        }
 608
 609        ret = vmw_fence_obj_init(fman, &ufence->fence, seqno,
 610                                 vmw_user_fence_destroy);
 611        if (unlikely(ret != 0)) {
 612                kfree(ufence);
 613                goto out_no_object;
 614        }
 615
 616        /*
 617         * The base object holds a reference which is freed in
 618         * vmw_user_fence_base_release.
 619         */
 620        tmp = vmw_fence_obj_reference(&ufence->fence);
 621        ret = ttm_base_object_init(tfile, &ufence->base, false,
 622                                   VMW_RES_FENCE,
 623                                   &vmw_user_fence_base_release, NULL);
 624
 625
 626        if (unlikely(ret != 0)) {
 627                /*
 628                 * Free the base object's reference
 629                 */
 630                vmw_fence_obj_unreference(&tmp);
 631                goto out_err;
 632        }
 633
 634        *p_fence = &ufence->fence;
 635        *p_handle = ufence->base.hash.key;
 636
 637        return 0;
 638out_err:
 639        tmp = &ufence->fence;
 640        vmw_fence_obj_unreference(&tmp);
 641out_no_object:
 642        ttm_mem_global_free(mem_glob, fman->user_fence_size);
 643        return ret;
 644}
 645
 646
 647/**
 648 * vmw_wait_dma_fence - Wait for a dma fence
 649 *
 650 * @fman: pointer to a fence manager
 651 * @fence: DMA fence to wait on
 652 *
 653 * This function handles the case when the fence is actually a fence
 654 * array.  If that's the case, it'll wait on each of the child fence
 655 */
 656int vmw_wait_dma_fence(struct vmw_fence_manager *fman,
 657                       struct dma_fence *fence)
 658{
 659        struct dma_fence_array *fence_array;
 660        int ret = 0;
 661        int i;
 662
 663
 664        if (dma_fence_is_signaled(fence))
 665                return 0;
 666
 667        if (!dma_fence_is_array(fence))
 668                return dma_fence_wait(fence, true);
 669
 670        /* From i915: Note that if the fence-array was created in
 671         * signal-on-any mode, we should *not* decompose it into its individual
 672         * fences. However, we don't currently store which mode the fence-array
 673         * is operating in. Fortunately, the only user of signal-on-any is
 674         * private to amdgpu and we should not see any incoming fence-array
 675         * from sync-file being in signal-on-any mode.
 676         */
 677
 678        fence_array = to_dma_fence_array(fence);
 679        for (i = 0; i < fence_array->num_fences; i++) {
 680                struct dma_fence *child = fence_array->fences[i];
 681
 682                ret = dma_fence_wait(child, true);
 683
 684                if (ret < 0)
 685                        return ret;
 686        }
 687
 688        return 0;
 689}
 690
 691
 692/**
 693 * vmw_fence_fifo_down - signal all unsignaled fence objects.
 694 */
 695
 696void vmw_fence_fifo_down(struct vmw_fence_manager *fman)
 697{
 698        struct list_head action_list;
 699        int ret;
 700
 701        /*
 702         * The list may be altered while we traverse it, so always
 703         * restart when we've released the fman->lock.
 704         */
 705
 706        spin_lock(&fman->lock);
 707        fman->fifo_down = true;
 708        while (!list_empty(&fman->fence_list)) {
 709                struct vmw_fence_obj *fence =
 710                        list_entry(fman->fence_list.prev, struct vmw_fence_obj,
 711                                   head);
 712                dma_fence_get(&fence->base);
 713                spin_unlock(&fman->lock);
 714
 715                ret = vmw_fence_obj_wait(fence, false, false,
 716                                         VMW_FENCE_WAIT_TIMEOUT);
 717
 718                if (unlikely(ret != 0)) {
 719                        list_del_init(&fence->head);
 720                        dma_fence_signal(&fence->base);
 721                        INIT_LIST_HEAD(&action_list);
 722                        list_splice_init(&fence->seq_passed_actions,
 723                                         &action_list);
 724                        vmw_fences_perform_actions(fman, &action_list);
 725                }
 726
 727                BUG_ON(!list_empty(&fence->head));
 728                dma_fence_put(&fence->base);
 729                spin_lock(&fman->lock);
 730        }
 731        spin_unlock(&fman->lock);
 732}
 733
 734void vmw_fence_fifo_up(struct vmw_fence_manager *fman)
 735{
 736        spin_lock(&fman->lock);
 737        fman->fifo_down = false;
 738        spin_unlock(&fman->lock);
 739}
 740
 741
 742/**
 743 * vmw_fence_obj_lookup - Look up a user-space fence object
 744 *
 745 * @tfile: A struct ttm_object_file identifying the caller.
 746 * @handle: A handle identifying the fence object.
 747 * @return: A struct vmw_user_fence base ttm object on success or
 748 * an error pointer on failure.
 749 *
 750 * The fence object is looked up and type-checked. The caller needs
 751 * to have opened the fence object first, but since that happens on
 752 * creation and fence objects aren't shareable, that's not an
 753 * issue currently.
 754 */
 755static struct ttm_base_object *
 756vmw_fence_obj_lookup(struct ttm_object_file *tfile, u32 handle)
 757{
 758        struct ttm_base_object *base = ttm_base_object_lookup(tfile, handle);
 759
 760        if (!base) {
 761                pr_err("Invalid fence object handle 0x%08lx.\n",
 762                       (unsigned long)handle);
 763                return ERR_PTR(-EINVAL);
 764        }
 765
 766        if (base->refcount_release != vmw_user_fence_base_release) {
 767                pr_err("Invalid fence object handle 0x%08lx.\n",
 768                       (unsigned long)handle);
 769                ttm_base_object_unref(&base);
 770                return ERR_PTR(-EINVAL);
 771        }
 772
 773        return base;
 774}
 775
 776
 777int vmw_fence_obj_wait_ioctl(struct drm_device *dev, void *data,
 778                             struct drm_file *file_priv)
 779{
 780        struct drm_vmw_fence_wait_arg *arg =
 781            (struct drm_vmw_fence_wait_arg *)data;
 782        unsigned long timeout;
 783        struct ttm_base_object *base;
 784        struct vmw_fence_obj *fence;
 785        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 786        int ret;
 787        uint64_t wait_timeout = ((uint64_t)arg->timeout_us * HZ);
 788
 789        /*
 790         * 64-bit division not present on 32-bit systems, so do an
 791         * approximation. (Divide by 1000000).
 792         */
 793
 794        wait_timeout = (wait_timeout >> 20) + (wait_timeout >> 24) -
 795          (wait_timeout >> 26);
 796
 797        if (!arg->cookie_valid) {
 798                arg->cookie_valid = 1;
 799                arg->kernel_cookie = jiffies + wait_timeout;
 800        }
 801
 802        base = vmw_fence_obj_lookup(tfile, arg->handle);
 803        if (IS_ERR(base))
 804                return PTR_ERR(base);
 805
 806        fence = &(container_of(base, struct vmw_user_fence, base)->fence);
 807
 808        timeout = jiffies;
 809        if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) {
 810                ret = ((vmw_fence_obj_signaled(fence)) ?
 811                       0 : -EBUSY);
 812                goto out;
 813        }
 814
 815        timeout = (unsigned long)arg->kernel_cookie - timeout;
 816
 817        ret = vmw_fence_obj_wait(fence, arg->lazy, true, timeout);
 818
 819out:
 820        ttm_base_object_unref(&base);
 821
 822        /*
 823         * Optionally unref the fence object.
 824         */
 825
 826        if (ret == 0 && (arg->wait_options & DRM_VMW_WAIT_OPTION_UNREF))
 827                return ttm_ref_object_base_unref(tfile, arg->handle,
 828                                                 TTM_REF_USAGE);
 829        return ret;
 830}
 831
 832int vmw_fence_obj_signaled_ioctl(struct drm_device *dev, void *data,
 833                                 struct drm_file *file_priv)
 834{
 835        struct drm_vmw_fence_signaled_arg *arg =
 836                (struct drm_vmw_fence_signaled_arg *) data;
 837        struct ttm_base_object *base;
 838        struct vmw_fence_obj *fence;
 839        struct vmw_fence_manager *fman;
 840        struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
 841        struct vmw_private *dev_priv = vmw_priv(dev);
 842
 843        base = vmw_fence_obj_lookup(tfile, arg->handle);
 844        if (IS_ERR(base))
 845                return PTR_ERR(base);
 846
 847        fence = &(container_of(base, struct vmw_user_fence, base)->fence);
 848        fman = fman_from_fence(fence);
 849
 850        arg->signaled = vmw_fence_obj_signaled(fence);
 851
 852        arg->signaled_flags = arg->flags;
 853        spin_lock(&fman->lock);
 854        arg->passed_seqno = dev_priv->last_read_seqno;
 855        spin_unlock(&fman->lock);
 856
 857        ttm_base_object_unref(&base);
 858
 859        return 0;
 860}
 861
 862
 863int vmw_fence_obj_unref_ioctl(struct drm_device *dev, void *data,
 864                              struct drm_file *file_priv)
 865{
 866        struct drm_vmw_fence_arg *arg =
 867                (struct drm_vmw_fence_arg *) data;
 868
 869        return ttm_ref_object_base_unref(vmw_fpriv(file_priv)->tfile,
 870                                         arg->handle,
 871                                         TTM_REF_USAGE);
 872}
 873
 874/**
 875 * vmw_event_fence_action_seq_passed
 876 *
 877 * @action: The struct vmw_fence_action embedded in a struct
 878 * vmw_event_fence_action.
 879 *
 880 * This function is called when the seqno of the fence where @action is
 881 * attached has passed. It queues the event on the submitter's event list.
 882 * This function is always called from atomic context.
 883 */
 884static void vmw_event_fence_action_seq_passed(struct vmw_fence_action *action)
 885{
 886        struct vmw_event_fence_action *eaction =
 887                container_of(action, struct vmw_event_fence_action, action);
 888        struct drm_device *dev = eaction->dev;
 889        struct drm_pending_event *event = eaction->event;
 890        struct drm_file *file_priv;
 891
 892
 893        if (unlikely(event == NULL))
 894                return;
 895
 896        file_priv = event->file_priv;
 897        spin_lock_irq(&dev->event_lock);
 898
 899        if (likely(eaction->tv_sec != NULL)) {
 900                struct timeval tv;
 901
 902                do_gettimeofday(&tv);
 903                *eaction->tv_sec = tv.tv_sec;
 904                *eaction->tv_usec = tv.tv_usec;
 905        }
 906
 907        drm_send_event_locked(dev, eaction->event);
 908        eaction->event = NULL;
 909        spin_unlock_irq(&dev->event_lock);
 910}
 911
 912/**
 913 * vmw_event_fence_action_cleanup
 914 *
 915 * @action: The struct vmw_fence_action embedded in a struct
 916 * vmw_event_fence_action.
 917 *
 918 * This function is the struct vmw_fence_action destructor. It's typically
 919 * called from a workqueue.
 920 */
 921static void vmw_event_fence_action_cleanup(struct vmw_fence_action *action)
 922{
 923        struct vmw_event_fence_action *eaction =
 924                container_of(action, struct vmw_event_fence_action, action);
 925
 926        vmw_fence_obj_unreference(&eaction->fence);
 927        kfree(eaction);
 928}
 929
 930
 931/**
 932 * vmw_fence_obj_add_action - Add an action to a fence object.
 933 *
 934 * @fence - The fence object.
 935 * @action - The action to add.
 936 *
 937 * Note that the action callbacks may be executed before this function
 938 * returns.
 939 */
 940static void vmw_fence_obj_add_action(struct vmw_fence_obj *fence,
 941                              struct vmw_fence_action *action)
 942{
 943        struct vmw_fence_manager *fman = fman_from_fence(fence);
 944        bool run_update = false;
 945
 946        mutex_lock(&fman->goal_irq_mutex);
 947        spin_lock(&fman->lock);
 948
 949        fman->pending_actions[action->type]++;
 950        if (dma_fence_is_signaled_locked(&fence->base)) {
 951                struct list_head action_list;
 952
 953                INIT_LIST_HEAD(&action_list);
 954                list_add_tail(&action->head, &action_list);
 955                vmw_fences_perform_actions(fman, &action_list);
 956        } else {
 957                list_add_tail(&action->head, &fence->seq_passed_actions);
 958
 959                /*
 960                 * This function may set fman::seqno_valid, so it must
 961                 * be run with the goal_irq_mutex held.
 962                 */
 963                run_update = vmw_fence_goal_check_locked(fence);
 964        }
 965
 966        spin_unlock(&fman->lock);
 967
 968        if (run_update) {
 969                if (!fman->goal_irq_on) {
 970                        fman->goal_irq_on = true;
 971                        vmw_goal_waiter_add(fman->dev_priv);
 972                }
 973                vmw_fences_update(fman);
 974        }
 975        mutex_unlock(&fman->goal_irq_mutex);
 976
 977}
 978
 979/**
 980 * vmw_event_fence_action_create - Post an event for sending when a fence
 981 * object seqno has passed.
 982 *
 983 * @file_priv: The file connection on which the event should be posted.
 984 * @fence: The fence object on which to post the event.
 985 * @event: Event to be posted. This event should've been alloced
 986 * using k[mz]alloc, and should've been completely initialized.
 987 * @interruptible: Interruptible waits if possible.
 988 *
 989 * As a side effect, the object pointed to by @event may have been
 990 * freed when this function returns. If this function returns with
 991 * an error code, the caller needs to free that object.
 992 */
 993
 994int vmw_event_fence_action_queue(struct drm_file *file_priv,
 995                                 struct vmw_fence_obj *fence,
 996                                 struct drm_pending_event *event,
 997                                 uint32_t *tv_sec,
 998                                 uint32_t *tv_usec,
 999                                 bool interruptible)
1000{
1001        struct vmw_event_fence_action *eaction;
1002        struct vmw_fence_manager *fman = fman_from_fence(fence);
1003
1004        eaction = kzalloc(sizeof(*eaction), GFP_KERNEL);
1005        if (unlikely(!eaction))
1006                return -ENOMEM;
1007
1008        eaction->event = event;
1009
1010        eaction->action.seq_passed = vmw_event_fence_action_seq_passed;
1011        eaction->action.cleanup = vmw_event_fence_action_cleanup;
1012        eaction->action.type = VMW_ACTION_EVENT;
1013
1014        eaction->fence = vmw_fence_obj_reference(fence);
1015        eaction->dev = fman->dev_priv->dev;
1016        eaction->tv_sec = tv_sec;
1017        eaction->tv_usec = tv_usec;
1018
1019        vmw_fence_obj_add_action(fence, &eaction->action);
1020
1021        return 0;
1022}
1023
1024struct vmw_event_fence_pending {
1025        struct drm_pending_event base;
1026        struct drm_vmw_event_fence event;
1027};
1028
1029static int vmw_event_fence_action_create(struct drm_file *file_priv,
1030                                  struct vmw_fence_obj *fence,
1031                                  uint32_t flags,
1032                                  uint64_t user_data,
1033                                  bool interruptible)
1034{
1035        struct vmw_event_fence_pending *event;
1036        struct vmw_fence_manager *fman = fman_from_fence(fence);
1037        struct drm_device *dev = fman->dev_priv->dev;
1038        int ret;
1039
1040        event = kzalloc(sizeof(*event), GFP_KERNEL);
1041        if (unlikely(!event)) {
1042                DRM_ERROR("Failed to allocate an event.\n");
1043                ret = -ENOMEM;
1044                goto out_no_space;
1045        }
1046
1047        event->event.base.type = DRM_VMW_EVENT_FENCE_SIGNALED;
1048        event->event.base.length = sizeof(*event);
1049        event->event.user_data = user_data;
1050
1051        ret = drm_event_reserve_init(dev, file_priv, &event->base, &event->event.base);
1052
1053        if (unlikely(ret != 0)) {
1054                DRM_ERROR("Failed to allocate event space for this file.\n");
1055                kfree(event);
1056                goto out_no_space;
1057        }
1058
1059        if (flags & DRM_VMW_FE_FLAG_REQ_TIME)
1060                ret = vmw_event_fence_action_queue(file_priv, fence,
1061                                                   &event->base,
1062                                                   &event->event.tv_sec,
1063                                                   &event->event.tv_usec,
1064                                                   interruptible);
1065        else
1066                ret = vmw_event_fence_action_queue(file_priv, fence,
1067                                                   &event->base,
1068                                                   NULL,
1069                                                   NULL,
1070                                                   interruptible);
1071        if (ret != 0)
1072                goto out_no_queue;
1073
1074        return 0;
1075
1076out_no_queue:
1077        drm_event_cancel_free(dev, &event->base);
1078out_no_space:
1079        return ret;
1080}
1081
1082int vmw_fence_event_ioctl(struct drm_device *dev, void *data,
1083                          struct drm_file *file_priv)
1084{
1085        struct vmw_private *dev_priv = vmw_priv(dev);
1086        struct drm_vmw_fence_event_arg *arg =
1087                (struct drm_vmw_fence_event_arg *) data;
1088        struct vmw_fence_obj *fence = NULL;
1089        struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv);
1090        struct ttm_object_file *tfile = vmw_fp->tfile;
1091        struct drm_vmw_fence_rep __user *user_fence_rep =
1092                (struct drm_vmw_fence_rep __user *)(unsigned long)
1093                arg->fence_rep;
1094        uint32_t handle;
1095        int ret;
1096
1097        /*
1098         * Look up an existing fence object,
1099         * and if user-space wants a new reference,
1100         * add one.
1101         */
1102        if (arg->handle) {
1103                struct ttm_base_object *base =
1104                        vmw_fence_obj_lookup(tfile, arg->handle);
1105
1106                if (IS_ERR(base))
1107                        return PTR_ERR(base);
1108
1109                fence = &(container_of(base, struct vmw_user_fence,
1110                                       base)->fence);
1111                (void) vmw_fence_obj_reference(fence);
1112
1113                if (user_fence_rep != NULL) {
1114                        ret = ttm_ref_object_add(vmw_fp->tfile, base,
1115                                                 TTM_REF_USAGE, NULL, false);
1116                        if (unlikely(ret != 0)) {
1117                                DRM_ERROR("Failed to reference a fence "
1118                                          "object.\n");
1119                                goto out_no_ref_obj;
1120                        }
1121                        handle = base->hash.key;
1122                }
1123                ttm_base_object_unref(&base);
1124        }
1125
1126        /*
1127         * Create a new fence object.
1128         */
1129        if (!fence) {
1130                ret = vmw_execbuf_fence_commands(file_priv, dev_priv,
1131                                                 &fence,
1132                                                 (user_fence_rep) ?
1133                                                 &handle : NULL);
1134                if (unlikely(ret != 0)) {
1135                        DRM_ERROR("Fence event failed to create fence.\n");
1136                        return ret;
1137                }
1138        }
1139
1140        BUG_ON(fence == NULL);
1141
1142        ret = vmw_event_fence_action_create(file_priv, fence,
1143                                            arg->flags,
1144                                            arg->user_data,
1145                                            true);
1146        if (unlikely(ret != 0)) {
1147                if (ret != -ERESTARTSYS)
1148                        DRM_ERROR("Failed to attach event to fence.\n");
1149                goto out_no_create;
1150        }
1151
1152        vmw_execbuf_copy_fence_user(dev_priv, vmw_fp, 0, user_fence_rep, fence,
1153                                    handle, -1, NULL);
1154        vmw_fence_obj_unreference(&fence);
1155        return 0;
1156out_no_create:
1157        if (user_fence_rep != NULL)
1158                ttm_ref_object_base_unref(tfile, handle, TTM_REF_USAGE);
1159out_no_ref_obj:
1160        vmw_fence_obj_unreference(&fence);
1161        return ret;
1162}
1163