linux/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
<<
>>
Prefs
   1/**************************************************************************
   2 *
   3 * Copyright (C) 2010 VMware, Inc., Palo Alto, CA., USA
   4 * All Rights Reserved.
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28
  29#include "vmwgfx_drv.h"
  30
  31struct vmw_marker {
  32        struct list_head head;
  33        uint32_t seqno;
  34        struct timespec submitted;
  35};
  36
  37void vmw_marker_queue_init(struct vmw_marker_queue *queue)
  38{
  39        INIT_LIST_HEAD(&queue->head);
  40        queue->lag = ns_to_timespec(0);
  41        getrawmonotonic(&queue->lag_time);
  42        spin_lock_init(&queue->lock);
  43}
  44
  45void vmw_marker_queue_takedown(struct vmw_marker_queue *queue)
  46{
  47        struct vmw_marker *marker, *next;
  48
  49        spin_lock(&queue->lock);
  50        list_for_each_entry_safe(marker, next, &queue->head, head) {
  51                kfree(marker);
  52        }
  53        spin_unlock(&queue->lock);
  54}
  55
  56int vmw_marker_push(struct vmw_marker_queue *queue,
  57                   uint32_t seqno)
  58{
  59        struct vmw_marker *marker = kmalloc(sizeof(*marker), GFP_KERNEL);
  60
  61        if (unlikely(!marker))
  62                return -ENOMEM;
  63
  64        marker->seqno = seqno;
  65        getrawmonotonic(&marker->submitted);
  66        spin_lock(&queue->lock);
  67        list_add_tail(&marker->head, &queue->head);
  68        spin_unlock(&queue->lock);
  69
  70        return 0;
  71}
  72
  73int vmw_marker_pull(struct vmw_marker_queue *queue,
  74                   uint32_t signaled_seqno)
  75{
  76        struct vmw_marker *marker, *next;
  77        struct timespec now;
  78        bool updated = false;
  79
  80        spin_lock(&queue->lock);
  81        getrawmonotonic(&now);
  82
  83        if (list_empty(&queue->head)) {
  84                queue->lag = ns_to_timespec(0);
  85                queue->lag_time = now;
  86                updated = true;
  87                goto out_unlock;
  88        }
  89
  90        list_for_each_entry_safe(marker, next, &queue->head, head) {
  91                if (signaled_seqno - marker->seqno > (1 << 30))
  92                        continue;
  93
  94                queue->lag = timespec_sub(now, marker->submitted);
  95                queue->lag_time = now;
  96                updated = true;
  97                list_del(&marker->head);
  98                kfree(marker);
  99        }
 100
 101out_unlock:
 102        spin_unlock(&queue->lock);
 103
 104        return (updated) ? 0 : -EBUSY;
 105}
 106
 107static struct timespec vmw_timespec_add(struct timespec t1,
 108                                        struct timespec t2)
 109{
 110        t1.tv_sec += t2.tv_sec;
 111        t1.tv_nsec += t2.tv_nsec;
 112        if (t1.tv_nsec >= 1000000000L) {
 113                t1.tv_sec += 1;
 114                t1.tv_nsec -= 1000000000L;
 115        }
 116
 117        return t1;
 118}
 119
 120static struct timespec vmw_fifo_lag(struct vmw_marker_queue *queue)
 121{
 122        struct timespec now;
 123
 124        spin_lock(&queue->lock);
 125        getrawmonotonic(&now);
 126        queue->lag = vmw_timespec_add(queue->lag,
 127                                      timespec_sub(now, queue->lag_time));
 128        queue->lag_time = now;
 129        spin_unlock(&queue->lock);
 130        return queue->lag;
 131}
 132
 133
 134static bool vmw_lag_lt(struct vmw_marker_queue *queue,
 135                       uint32_t us)
 136{
 137        struct timespec lag, cond;
 138
 139        cond = ns_to_timespec((s64) us * 1000);
 140        lag = vmw_fifo_lag(queue);
 141        return (timespec_compare(&lag, &cond) < 1);
 142}
 143
 144int vmw_wait_lag(struct vmw_private *dev_priv,
 145                 struct vmw_marker_queue *queue, uint32_t us)
 146{
 147        struct vmw_marker *marker;
 148        uint32_t seqno;
 149        int ret;
 150
 151        while (!vmw_lag_lt(queue, us)) {
 152                spin_lock(&queue->lock);
 153                if (list_empty(&queue->head))
 154                        seqno = atomic_read(&dev_priv->marker_seq);
 155                else {
 156                        marker = list_first_entry(&queue->head,
 157                                                 struct vmw_marker, head);
 158                        seqno = marker->seqno;
 159                }
 160                spin_unlock(&queue->lock);
 161
 162                ret = vmw_wait_seqno(dev_priv, false, seqno, true,
 163                                        3*HZ);
 164
 165                if (unlikely(ret != 0))
 166                        return ret;
 167
 168                (void) vmw_marker_pull(queue, seqno);
 169        }
 170        return 0;
 171}
 172