linux/drivers/gpu/drm/nouveau/nvkm/subdev/timer/base.c
<<
>>
Prefs
   1/*
   2 * Copyright 2012 Red Hat Inc.
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice shall be included in
  12 * all copies or substantial portions of the Software.
  13 *
  14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
  18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
  19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
  20 * OTHER DEALINGS IN THE SOFTWARE.
  21 *
  22 * Authors: Ben Skeggs
  23 */
  24#include "priv.h"
  25
  26s64
  27nvkm_timer_wait_test(struct nvkm_timer_wait *wait)
  28{
  29        struct nvkm_subdev *subdev = &wait->tmr->subdev;
  30        u64 time = nvkm_timer_read(wait->tmr);
  31
  32        if (wait->reads == 0) {
  33                wait->time0 = time;
  34                wait->time1 = time;
  35        }
  36
  37        if (wait->time1 == time) {
  38                if (wait->reads++ == 16) {
  39                        nvkm_fatal(subdev, "stalled at %016llx\n", time);
  40                        return -ETIMEDOUT;
  41                }
  42        } else {
  43                wait->time1 = time;
  44                wait->reads = 1;
  45        }
  46
  47        if (wait->time1 - wait->time0 > wait->limit)
  48                return -ETIMEDOUT;
  49
  50        return wait->time1 - wait->time0;
  51}
  52
  53void
  54nvkm_timer_wait_init(struct nvkm_device *device, u64 nsec,
  55                     struct nvkm_timer_wait *wait)
  56{
  57        wait->tmr = device->timer;
  58        wait->limit = nsec;
  59        wait->reads = 0;
  60}
  61
  62u64
  63nvkm_timer_read(struct nvkm_timer *tmr)
  64{
  65        return tmr->func->read(tmr);
  66}
  67
  68void
  69nvkm_timer_alarm_trigger(struct nvkm_timer *tmr)
  70{
  71        struct nvkm_alarm *alarm, *atemp;
  72        unsigned long flags;
  73        LIST_HEAD(exec);
  74
  75        /* Process pending alarms. */
  76        spin_lock_irqsave(&tmr->lock, flags);
  77        list_for_each_entry_safe(alarm, atemp, &tmr->alarms, head) {
  78                /* Have we hit the earliest alarm that hasn't gone off? */
  79                if (alarm->timestamp > nvkm_timer_read(tmr)) {
  80                        /* Schedule it.  If we didn't race, we're done. */
  81                        tmr->func->alarm_init(tmr, alarm->timestamp);
  82                        if (alarm->timestamp > nvkm_timer_read(tmr))
  83                                break;
  84                }
  85
  86                /* Move to completed list.  We'll drop the lock before
  87                 * executing the callback so it can reschedule itself.
  88                 */
  89                list_del_init(&alarm->head);
  90                list_add(&alarm->exec, &exec);
  91        }
  92
  93        /* Shut down interrupt if no more pending alarms. */
  94        if (list_empty(&tmr->alarms))
  95                tmr->func->alarm_fini(tmr);
  96        spin_unlock_irqrestore(&tmr->lock, flags);
  97
  98        /* Execute completed callbacks. */
  99        list_for_each_entry_safe(alarm, atemp, &exec, exec) {
 100                list_del(&alarm->exec);
 101                alarm->func(alarm);
 102        }
 103}
 104
 105void
 106nvkm_timer_alarm(struct nvkm_timer *tmr, u32 nsec, struct nvkm_alarm *alarm)
 107{
 108        struct nvkm_alarm *list;
 109        unsigned long flags;
 110
 111        /* Remove alarm from pending list.
 112         *
 113         * This both protects against the corruption of the list,
 114         * and implements alarm rescheduling/cancellation.
 115         */
 116        spin_lock_irqsave(&tmr->lock, flags);
 117        list_del_init(&alarm->head);
 118
 119        if (nsec) {
 120                /* Insert into pending list, ordered earliest to latest. */
 121                alarm->timestamp = nvkm_timer_read(tmr) + nsec;
 122                list_for_each_entry(list, &tmr->alarms, head) {
 123                        if (list->timestamp > alarm->timestamp)
 124                                break;
 125                }
 126
 127                list_add_tail(&alarm->head, &list->head);
 128
 129                /* Update HW if this is now the earliest alarm. */
 130                list = list_first_entry(&tmr->alarms, typeof(*list), head);
 131                if (list == alarm) {
 132                        tmr->func->alarm_init(tmr, alarm->timestamp);
 133                        /* This shouldn't happen if callers aren't stupid.
 134                         *
 135                         * Worst case scenario is that it'll take roughly
 136                         * 4 seconds for the next alarm to trigger.
 137                         */
 138                        WARN_ON(alarm->timestamp <= nvkm_timer_read(tmr));
 139                }
 140        }
 141        spin_unlock_irqrestore(&tmr->lock, flags);
 142}
 143
 144static void
 145nvkm_timer_intr(struct nvkm_subdev *subdev)
 146{
 147        struct nvkm_timer *tmr = nvkm_timer(subdev);
 148        tmr->func->intr(tmr);
 149}
 150
 151static int
 152nvkm_timer_fini(struct nvkm_subdev *subdev, bool suspend)
 153{
 154        struct nvkm_timer *tmr = nvkm_timer(subdev);
 155        tmr->func->alarm_fini(tmr);
 156        return 0;
 157}
 158
 159static int
 160nvkm_timer_init(struct nvkm_subdev *subdev)
 161{
 162        struct nvkm_timer *tmr = nvkm_timer(subdev);
 163        if (tmr->func->init)
 164                tmr->func->init(tmr);
 165        tmr->func->time(tmr, ktime_to_ns(ktime_get()));
 166        nvkm_timer_alarm_trigger(tmr);
 167        return 0;
 168}
 169
 170static void *
 171nvkm_timer_dtor(struct nvkm_subdev *subdev)
 172{
 173        return nvkm_timer(subdev);
 174}
 175
 176static const struct nvkm_subdev_func
 177nvkm_timer = {
 178        .dtor = nvkm_timer_dtor,
 179        .init = nvkm_timer_init,
 180        .fini = nvkm_timer_fini,
 181        .intr = nvkm_timer_intr,
 182};
 183
 184int
 185nvkm_timer_new_(const struct nvkm_timer_func *func, struct nvkm_device *device,
 186                int index, struct nvkm_timer **ptmr)
 187{
 188        struct nvkm_timer *tmr;
 189
 190        if (!(tmr = *ptmr = kzalloc(sizeof(*tmr), GFP_KERNEL)))
 191                return -ENOMEM;
 192
 193        nvkm_subdev_ctor(&nvkm_timer, device, index, &tmr->subdev);
 194        tmr->func = func;
 195        INIT_LIST_HEAD(&tmr->alarms);
 196        spin_lock_init(&tmr->lock);
 197        return 0;
 198}
 199