linux/drivers/gpu/drm/msm/msm_gpu.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2013 Red Hat
   3 * Author: Rob Clark <robdclark@gmail.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18#ifndef __MSM_GPU_H__
  19#define __MSM_GPU_H__
  20
  21#include <linux/clk.h>
  22#include <linux/regulator/consumer.h>
  23
  24#include "msm_drv.h"
  25#include "msm_fence.h"
  26#include "msm_ringbuffer.h"
  27
  28struct msm_gem_submit;
  29struct msm_gpu_perfcntr;
  30
  31struct msm_gpu_config {
  32        const char *ioname;
  33        const char *irqname;
  34        uint64_t va_start;
  35        uint64_t va_end;
  36        unsigned int ringsz;
  37};
  38
  39/* So far, with hardware that I've seen to date, we can have:
  40 *  + zero, one, or two z180 2d cores
  41 *  + a3xx or a2xx 3d core, which share a common CP (the firmware
  42 *    for the CP seems to implement some different PM4 packet types
  43 *    but the basics of cmdstream submission are the same)
  44 *
  45 * Which means that the eventual complete "class" hierarchy, once
  46 * support for all past and present hw is in place, becomes:
  47 *  + msm_gpu
  48 *    + adreno_gpu
  49 *      + a3xx_gpu
  50 *      + a2xx_gpu
  51 *    + z180_gpu
  52 */
  53struct msm_gpu_funcs {
  54        int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
  55        int (*hw_init)(struct msm_gpu *gpu);
  56        int (*pm_suspend)(struct msm_gpu *gpu);
  57        int (*pm_resume)(struct msm_gpu *gpu);
  58        void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
  59                        struct msm_file_private *ctx);
  60        void (*flush)(struct msm_gpu *gpu);
  61        irqreturn_t (*irq)(struct msm_gpu *irq);
  62        uint32_t (*last_fence)(struct msm_gpu *gpu);
  63        void (*recover)(struct msm_gpu *gpu);
  64        void (*destroy)(struct msm_gpu *gpu);
  65#ifdef CONFIG_DEBUG_FS
  66        /* show GPU status in debugfs: */
  67        void (*show)(struct msm_gpu *gpu, struct seq_file *m);
  68#endif
  69};
  70
  71struct msm_gpu {
  72        const char *name;
  73        struct drm_device *dev;
  74        struct platform_device *pdev;
  75        const struct msm_gpu_funcs *funcs;
  76
  77        /* performance counters (hw & sw): */
  78        spinlock_t perf_lock;
  79        bool perfcntr_active;
  80        struct {
  81                bool active;
  82                ktime_t time;
  83        } last_sample;
  84        uint32_t totaltime, activetime;    /* sw counters */
  85        uint32_t last_cntrs[5];            /* hw counters */
  86        const struct msm_gpu_perfcntr *perfcntrs;
  87        uint32_t num_perfcntrs;
  88
  89        /* ringbuffer: */
  90        struct msm_ringbuffer *rb;
  91        uint64_t rb_iova;
  92
  93        /* list of GEM active objects: */
  94        struct list_head active_list;
  95
  96        /* fencing: */
  97        struct msm_fence_context *fctx;
  98
  99        /* does gpu need hw_init? */
 100        bool needs_hw_init;
 101
 102        /* worker for handling active-list retiring: */
 103        struct work_struct retire_work;
 104
 105        void __iomem *mmio;
 106        int irq;
 107
 108        struct msm_gem_address_space *aspace;
 109
 110        /* Power Control: */
 111        struct regulator *gpu_reg, *gpu_cx;
 112        struct clk **grp_clks;
 113        int nr_clocks;
 114        struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
 115        uint32_t fast_rate, bus_freq;
 116
 117#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
 118        struct msm_bus_scale_pdata *bus_scale_table;
 119        uint32_t bsc;
 120#endif
 121
 122        /* Hang and Inactivity Detection:
 123         */
 124#define DRM_MSM_INACTIVE_PERIOD   66 /* in ms (roughly four frames) */
 125
 126#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
 127#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
 128        struct timer_list hangcheck_timer;
 129        uint32_t hangcheck_fence;
 130        struct work_struct recover_work;
 131
 132        struct list_head submit_list;
 133};
 134
 135static inline bool msm_gpu_active(struct msm_gpu *gpu)
 136{
 137        return gpu->fctx->last_fence > gpu->funcs->last_fence(gpu);
 138}
 139
 140/* Perf-Counters:
 141 * The select_reg and select_val are just there for the benefit of the child
 142 * class that actually enables the perf counter..  but msm_gpu base class
 143 * will handle sampling/displaying the counters.
 144 */
 145
 146struct msm_gpu_perfcntr {
 147        uint32_t select_reg;
 148        uint32_t sample_reg;
 149        uint32_t select_val;
 150        const char *name;
 151};
 152
 153static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
 154{
 155        msm_writel(data, gpu->mmio + (reg << 2));
 156}
 157
 158static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
 159{
 160        return msm_readl(gpu->mmio + (reg << 2));
 161}
 162
 163static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
 164{
 165        uint32_t val = gpu_read(gpu, reg);
 166
 167        val &= ~mask;
 168        gpu_write(gpu, reg, val | or);
 169}
 170
 171static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
 172{
 173        u64 val;
 174
 175        /*
 176         * Why not a readq here? Two reasons: 1) many of the LO registers are
 177         * not quad word aligned and 2) the GPU hardware designers have a bit
 178         * of a history of putting registers where they fit, especially in
 179         * spins. The longer a GPU family goes the higher the chance that
 180         * we'll get burned.  We could do a series of validity checks if we
 181         * wanted to, but really is a readq() that much better? Nah.
 182         */
 183
 184        /*
 185         * For some lo/hi registers (like perfcounters), the hi value is latched
 186         * when the lo is read, so make sure to read the lo first to trigger
 187         * that
 188         */
 189        val = (u64) msm_readl(gpu->mmio + (lo << 2));
 190        val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
 191
 192        return val;
 193}
 194
 195static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
 196{
 197        /* Why not a writeq here? Read the screed above */
 198        msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
 199        msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
 200}
 201
 202int msm_gpu_pm_suspend(struct msm_gpu *gpu);
 203int msm_gpu_pm_resume(struct msm_gpu *gpu);
 204
 205int msm_gpu_hw_init(struct msm_gpu *gpu);
 206
 207void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
 208void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
 209int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
 210                uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
 211
 212void msm_gpu_retire(struct msm_gpu *gpu);
 213void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 214                struct msm_file_private *ctx);
 215
 216int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 217                struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
 218                const char *name, struct msm_gpu_config *config);
 219
 220void msm_gpu_cleanup(struct msm_gpu *gpu);
 221
 222struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
 223void __init adreno_register(void);
 224void __exit adreno_unregister(void);
 225
 226#endif /* __MSM_GPU_H__ */
 227