linux/drivers/gpu/drm/msm/mdp/mdp_kms.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2013 Red Hat
   3 * Author: Rob Clark <robdclark@gmail.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18
  19#include "msm_drv.h"
  20#include "mdp_kms.h"
  21
  22
  23struct mdp_irq_wait {
  24        struct mdp_irq irq;
  25        int count;
  26};
  27
  28static DECLARE_WAIT_QUEUE_HEAD(wait_event);
  29
  30static DEFINE_SPINLOCK(list_lock);
  31
  32static void update_irq(struct mdp_kms *mdp_kms)
  33{
  34        struct mdp_irq *irq;
  35        uint32_t irqmask = mdp_kms->vblank_mask;
  36
  37        assert_spin_locked(&list_lock);
  38
  39        list_for_each_entry(irq, &mdp_kms->irq_list, node)
  40                irqmask |= irq->irqmask;
  41
  42        mdp_kms->funcs->set_irqmask(mdp_kms, irqmask, mdp_kms->cur_irq_mask);
  43        mdp_kms->cur_irq_mask = irqmask;
  44}
  45
  46/* if an mdp_irq's irqmask has changed, such as when mdp5 crtc<->encoder
  47 * link changes, this must be called to figure out the new global irqmask
  48 */
  49void mdp_irq_update(struct mdp_kms *mdp_kms)
  50{
  51        unsigned long flags;
  52        spin_lock_irqsave(&list_lock, flags);
  53        update_irq(mdp_kms);
  54        spin_unlock_irqrestore(&list_lock, flags);
  55}
  56
  57void mdp_dispatch_irqs(struct mdp_kms *mdp_kms, uint32_t status)
  58{
  59        struct mdp_irq *handler, *n;
  60        unsigned long flags;
  61
  62        spin_lock_irqsave(&list_lock, flags);
  63        mdp_kms->in_irq = true;
  64        list_for_each_entry_safe(handler, n, &mdp_kms->irq_list, node) {
  65                if (handler->irqmask & status) {
  66                        spin_unlock_irqrestore(&list_lock, flags);
  67                        handler->irq(handler, handler->irqmask & status);
  68                        spin_lock_irqsave(&list_lock, flags);
  69                }
  70        }
  71        mdp_kms->in_irq = false;
  72        update_irq(mdp_kms);
  73        spin_unlock_irqrestore(&list_lock, flags);
  74
  75}
  76
  77void mdp_update_vblank_mask(struct mdp_kms *mdp_kms, uint32_t mask, bool enable)
  78{
  79        unsigned long flags;
  80
  81        spin_lock_irqsave(&list_lock, flags);
  82        if (enable)
  83                mdp_kms->vblank_mask |= mask;
  84        else
  85                mdp_kms->vblank_mask &= ~mask;
  86        update_irq(mdp_kms);
  87        spin_unlock_irqrestore(&list_lock, flags);
  88}
  89
  90static void wait_irq(struct mdp_irq *irq, uint32_t irqstatus)
  91{
  92        struct mdp_irq_wait *wait =
  93                        container_of(irq, struct mdp_irq_wait, irq);
  94        wait->count--;
  95        wake_up_all(&wait_event);
  96}
  97
  98void mdp_irq_wait(struct mdp_kms *mdp_kms, uint32_t irqmask)
  99{
 100        struct mdp_irq_wait wait = {
 101                .irq = {
 102                        .irq = wait_irq,
 103                        .irqmask = irqmask,
 104                },
 105                .count = 1,
 106        };
 107        mdp_irq_register(mdp_kms, &wait.irq);
 108        wait_event_timeout(wait_event, (wait.count <= 0),
 109                        msecs_to_jiffies(100));
 110        mdp_irq_unregister(mdp_kms, &wait.irq);
 111}
 112
 113void mdp_irq_register(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
 114{
 115        unsigned long flags;
 116        bool needs_update = false;
 117
 118        spin_lock_irqsave(&list_lock, flags);
 119
 120        if (!irq->registered) {
 121                irq->registered = true;
 122                list_add(&irq->node, &mdp_kms->irq_list);
 123                needs_update = !mdp_kms->in_irq;
 124        }
 125
 126        spin_unlock_irqrestore(&list_lock, flags);
 127
 128        if (needs_update)
 129                mdp_irq_update(mdp_kms);
 130}
 131
 132void mdp_irq_unregister(struct mdp_kms *mdp_kms, struct mdp_irq *irq)
 133{
 134        unsigned long flags;
 135        bool needs_update = false;
 136
 137        spin_lock_irqsave(&list_lock, flags);
 138
 139        if (irq->registered) {
 140                irq->registered = false;
 141                list_del(&irq->node);
 142                needs_update = !mdp_kms->in_irq;
 143        }
 144
 145        spin_unlock_irqrestore(&list_lock, flags);
 146
 147        if (needs_update)
 148                mdp_irq_update(mdp_kms);
 149}
 150