linux/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/**************************************************************************
   3 *
   4 * Copyright 2009-2015 VMware, Inc., Palo Alto, CA., USA
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a
   7 * copy of this software and associated documentation files (the
   8 * "Software"), to deal in the Software without restriction, including
   9 * without limitation the rights to use, copy, modify, merge, publish,
  10 * distribute, sub license, and/or sell copies of the Software, and to
  11 * permit persons to whom the Software is furnished to do so, subject to
  12 * the following conditions:
  13 *
  14 * The above copyright notice and this permission notice (including the
  15 * next paragraph) shall be included in all copies or substantial portions
  16 * of the Software.
  17 *
  18 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  19 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  20 * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
  21 * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
  22 * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
  23 * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
  24 * USE OR OTHER DEALINGS IN THE SOFTWARE.
  25 *
  26 **************************************************************************/
  27
  28#include <drm/drmP.h>
  29#include "vmwgfx_drv.h"
  30
  31#define VMW_FENCE_WRAP (1 << 24)
  32
  33/**
  34 * vmw_thread_fn - Deferred (process context) irq handler
  35 *
  36 * @irq: irq number
  37 * @arg: Closure argument. Pointer to a struct drm_device cast to void *
  38 *
  39 * This function implements the deferred part of irq processing.
  40 * The function is guaranteed to run at least once after the
  41 * vmw_irq_handler has returned with IRQ_WAKE_THREAD.
  42 *
  43 */
  44static irqreturn_t vmw_thread_fn(int irq, void *arg)
  45{
  46        struct drm_device *dev = (struct drm_device *)arg;
  47        struct vmw_private *dev_priv = vmw_priv(dev);
  48        irqreturn_t ret = IRQ_NONE;
  49
  50        if (test_and_clear_bit(VMW_IRQTHREAD_FENCE,
  51                               dev_priv->irqthread_pending)) {
  52                vmw_fences_update(dev_priv->fman);
  53                wake_up_all(&dev_priv->fence_queue);
  54                ret = IRQ_HANDLED;
  55        }
  56
  57        if (test_and_clear_bit(VMW_IRQTHREAD_CMDBUF,
  58                               dev_priv->irqthread_pending)) {
  59                vmw_cmdbuf_irqthread(dev_priv->cman);
  60                ret = IRQ_HANDLED;
  61        }
  62
  63        return ret;
  64}
  65
  66/**
  67 * vmw_irq_handler irq handler
  68 *
  69 * @irq: irq number
  70 * @arg: Closure argument. Pointer to a struct drm_device cast to void *
  71 *
  72 * This function implements the quick part of irq processing.
  73 * The function performs fast actions like clearing the device interrupt
  74 * flags and also reasonably quick actions like waking processes waiting for
  75 * FIFO space. Other IRQ actions are deferred to the IRQ thread.
  76 */
  77static irqreturn_t vmw_irq_handler(int irq, void *arg)
  78{
  79        struct drm_device *dev = (struct drm_device *)arg;
  80        struct vmw_private *dev_priv = vmw_priv(dev);
  81        uint32_t status, masked_status;
  82        irqreturn_t ret = IRQ_HANDLED;
  83
  84        status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
  85        masked_status = status & READ_ONCE(dev_priv->irq_mask);
  86
  87        if (likely(status))
  88                outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
  89
  90        if (!status)
  91                return IRQ_NONE;
  92
  93        if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS)
  94                wake_up_all(&dev_priv->fifo_queue);
  95
  96        if ((masked_status & (SVGA_IRQFLAG_ANY_FENCE |
  97                              SVGA_IRQFLAG_FENCE_GOAL)) &&
  98            !test_and_set_bit(VMW_IRQTHREAD_FENCE, dev_priv->irqthread_pending))
  99                ret = IRQ_WAKE_THREAD;
 100
 101        if ((masked_status & (SVGA_IRQFLAG_COMMAND_BUFFER |
 102                              SVGA_IRQFLAG_ERROR)) &&
 103            !test_and_set_bit(VMW_IRQTHREAD_CMDBUF,
 104                              dev_priv->irqthread_pending))
 105                ret = IRQ_WAKE_THREAD;
 106
 107        return ret;
 108}
 109
 110static bool vmw_fifo_idle(struct vmw_private *dev_priv, uint32_t seqno)
 111{
 112
 113        return (vmw_read(dev_priv, SVGA_REG_BUSY) == 0);
 114}
 115
 116void vmw_update_seqno(struct vmw_private *dev_priv,
 117                         struct vmw_fifo_state *fifo_state)
 118{
 119        u32 *fifo_mem = dev_priv->mmio_virt;
 120        uint32_t seqno = vmw_mmio_read(fifo_mem + SVGA_FIFO_FENCE);
 121
 122        if (dev_priv->last_read_seqno != seqno) {
 123                dev_priv->last_read_seqno = seqno;
 124                vmw_marker_pull(&fifo_state->marker_queue, seqno);
 125                vmw_fences_update(dev_priv->fman);
 126        }
 127}
 128
 129bool vmw_seqno_passed(struct vmw_private *dev_priv,
 130                         uint32_t seqno)
 131{
 132        struct vmw_fifo_state *fifo_state;
 133        bool ret;
 134
 135        if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
 136                return true;
 137
 138        fifo_state = &dev_priv->fifo;
 139        vmw_update_seqno(dev_priv, fifo_state);
 140        if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
 141                return true;
 142
 143        if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE) &&
 144            vmw_fifo_idle(dev_priv, seqno))
 145                return true;
 146
 147        /**
 148         * Then check if the seqno is higher than what we've actually
 149         * emitted. Then the fence is stale and signaled.
 150         */
 151
 152        ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
 153               > VMW_FENCE_WRAP);
 154
 155        return ret;
 156}
 157
 158int vmw_fallback_wait(struct vmw_private *dev_priv,
 159                      bool lazy,
 160                      bool fifo_idle,
 161                      uint32_t seqno,
 162                      bool interruptible,
 163                      unsigned long timeout)
 164{
 165        struct vmw_fifo_state *fifo_state = &dev_priv->fifo;
 166
 167        uint32_t count = 0;
 168        uint32_t signal_seq;
 169        int ret;
 170        unsigned long end_jiffies = jiffies + timeout;
 171        bool (*wait_condition)(struct vmw_private *, uint32_t);
 172        DEFINE_WAIT(__wait);
 173
 174        wait_condition = (fifo_idle) ? &vmw_fifo_idle :
 175                &vmw_seqno_passed;
 176
 177        /**
 178         * Block command submission while waiting for idle.
 179         */
 180
 181        if (fifo_idle) {
 182                down_read(&fifo_state->rwsem);
 183                if (dev_priv->cman) {
 184                        ret = vmw_cmdbuf_idle(dev_priv->cman, interruptible,
 185                                              10*HZ);
 186                        if (ret)
 187                                goto out_err;
 188                }
 189        }
 190
 191        signal_seq = atomic_read(&dev_priv->marker_seq);
 192        ret = 0;
 193
 194        for (;;) {
 195                prepare_to_wait(&dev_priv->fence_queue, &__wait,
 196                                (interruptible) ?
 197                                TASK_INTERRUPTIBLE : TASK_UNINTERRUPTIBLE);
 198                if (wait_condition(dev_priv, seqno))
 199                        break;
 200                if (time_after_eq(jiffies, end_jiffies)) {
 201                        DRM_ERROR("SVGA device lockup.\n");
 202                        break;
 203                }
 204                if (lazy)
 205                        schedule_timeout(1);
 206                else if ((++count & 0x0F) == 0) {
 207                        /**
 208                         * FIXME: Use schedule_hr_timeout here for
 209                         * newer kernels and lower CPU utilization.
 210                         */
 211
 212                        __set_current_state(TASK_RUNNING);
 213                        schedule();
 214                        __set_current_state((interruptible) ?
 215                                            TASK_INTERRUPTIBLE :
 216                                            TASK_UNINTERRUPTIBLE);
 217                }
 218                if (interruptible && signal_pending(current)) {
 219                        ret = -ERESTARTSYS;
 220                        break;
 221                }
 222        }
 223        finish_wait(&dev_priv->fence_queue, &__wait);
 224        if (ret == 0 && fifo_idle) {
 225                u32 *fifo_mem = dev_priv->mmio_virt;
 226
 227                vmw_mmio_write(signal_seq, fifo_mem + SVGA_FIFO_FENCE);
 228        }
 229        wake_up_all(&dev_priv->fence_queue);
 230out_err:
 231        if (fifo_idle)
 232                up_read(&fifo_state->rwsem);
 233
 234        return ret;
 235}
 236
 237void vmw_generic_waiter_add(struct vmw_private *dev_priv,
 238                            u32 flag, int *waiter_count)
 239{
 240        spin_lock_bh(&dev_priv->waiter_lock);
 241        if ((*waiter_count)++ == 0) {
 242                outl(flag, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 243                dev_priv->irq_mask |= flag;
 244                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
 245        }
 246        spin_unlock_bh(&dev_priv->waiter_lock);
 247}
 248
 249void vmw_generic_waiter_remove(struct vmw_private *dev_priv,
 250                               u32 flag, int *waiter_count)
 251{
 252        spin_lock_bh(&dev_priv->waiter_lock);
 253        if (--(*waiter_count) == 0) {
 254                dev_priv->irq_mask &= ~flag;
 255                vmw_write(dev_priv, SVGA_REG_IRQMASK, dev_priv->irq_mask);
 256        }
 257        spin_unlock_bh(&dev_priv->waiter_lock);
 258}
 259
 260void vmw_seqno_waiter_add(struct vmw_private *dev_priv)
 261{
 262        vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
 263                               &dev_priv->fence_queue_waiters);
 264}
 265
 266void vmw_seqno_waiter_remove(struct vmw_private *dev_priv)
 267{
 268        vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_ANY_FENCE,
 269                                  &dev_priv->fence_queue_waiters);
 270}
 271
 272void vmw_goal_waiter_add(struct vmw_private *dev_priv)
 273{
 274        vmw_generic_waiter_add(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
 275                               &dev_priv->goal_queue_waiters);
 276}
 277
 278void vmw_goal_waiter_remove(struct vmw_private *dev_priv)
 279{
 280        vmw_generic_waiter_remove(dev_priv, SVGA_IRQFLAG_FENCE_GOAL,
 281                                  &dev_priv->goal_queue_waiters);
 282}
 283
 284int vmw_wait_seqno(struct vmw_private *dev_priv,
 285                      bool lazy, uint32_t seqno,
 286                      bool interruptible, unsigned long timeout)
 287{
 288        long ret;
 289        struct vmw_fifo_state *fifo = &dev_priv->fifo;
 290
 291        if (likely(dev_priv->last_read_seqno - seqno < VMW_FENCE_WRAP))
 292                return 0;
 293
 294        if (likely(vmw_seqno_passed(dev_priv, seqno)))
 295                return 0;
 296
 297        vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC);
 298
 299        if (!(fifo->capabilities & SVGA_FIFO_CAP_FENCE))
 300                return vmw_fallback_wait(dev_priv, lazy, true, seqno,
 301                                         interruptible, timeout);
 302
 303        if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
 304                return vmw_fallback_wait(dev_priv, lazy, false, seqno,
 305                                         interruptible, timeout);
 306
 307        vmw_seqno_waiter_add(dev_priv);
 308
 309        if (interruptible)
 310                ret = wait_event_interruptible_timeout
 311                    (dev_priv->fence_queue,
 312                     vmw_seqno_passed(dev_priv, seqno),
 313                     timeout);
 314        else
 315                ret = wait_event_timeout
 316                    (dev_priv->fence_queue,
 317                     vmw_seqno_passed(dev_priv, seqno),
 318                     timeout);
 319
 320        vmw_seqno_waiter_remove(dev_priv);
 321
 322        if (unlikely(ret == 0))
 323                ret = -EBUSY;
 324        else if (likely(ret > 0))
 325                ret = 0;
 326
 327        return ret;
 328}
 329
 330static void vmw_irq_preinstall(struct drm_device *dev)
 331{
 332        struct vmw_private *dev_priv = vmw_priv(dev);
 333        uint32_t status;
 334
 335        status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 336        outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 337}
 338
 339void vmw_irq_uninstall(struct drm_device *dev)
 340{
 341        struct vmw_private *dev_priv = vmw_priv(dev);
 342        uint32_t status;
 343
 344        if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK))
 345                return;
 346
 347        if (!dev->irq_enabled)
 348                return;
 349
 350        vmw_write(dev_priv, SVGA_REG_IRQMASK, 0);
 351
 352        status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 353        outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT);
 354
 355        dev->irq_enabled = false;
 356        free_irq(dev->irq, dev);
 357}
 358
 359/**
 360 * vmw_irq_install - Install the irq handlers
 361 *
 362 * @dev:  Pointer to the drm device.
 363 * @irq:  The irq number.
 364 * Return:  Zero if successful. Negative number otherwise.
 365 */
 366int vmw_irq_install(struct drm_device *dev, int irq)
 367{
 368        int ret;
 369
 370        if (dev->irq_enabled)
 371                return -EBUSY;
 372
 373        vmw_irq_preinstall(dev);
 374
 375        ret = request_threaded_irq(irq, vmw_irq_handler, vmw_thread_fn,
 376                                   IRQF_SHARED, VMWGFX_DRIVER_NAME, dev);
 377        if (ret < 0)
 378                return ret;
 379
 380        dev->irq_enabled = true;
 381        dev->irq = irq;
 382
 383        return ret;
 384}
 385