linux/drivers/gpu/drm/msm/msm_gpu.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2013 Red Hat
   3 * Author: Rob Clark <robdclark@gmail.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18#ifndef __MSM_GPU_H__
  19#define __MSM_GPU_H__
  20
  21#include <linux/clk.h>
  22#include <linux/regulator/consumer.h>
  23
  24#include "msm_drv.h"
  25#include "msm_fence.h"
  26#include "msm_ringbuffer.h"
  27
  28struct msm_gem_submit;
  29struct msm_gpu_perfcntr;
  30struct msm_gpu_state;
  31
  32struct msm_gpu_config {
  33        const char *ioname;
  34        const char *irqname;
  35        uint64_t va_start;
  36        uint64_t va_end;
  37        unsigned int nr_rings;
  38};
  39
  40/* So far, with hardware that I've seen to date, we can have:
  41 *  + zero, one, or two z180 2d cores
  42 *  + a3xx or a2xx 3d core, which share a common CP (the firmware
  43 *    for the CP seems to implement some different PM4 packet types
  44 *    but the basics of cmdstream submission are the same)
  45 *
  46 * Which means that the eventual complete "class" hierarchy, once
  47 * support for all past and present hw is in place, becomes:
  48 *  + msm_gpu
  49 *    + adreno_gpu
  50 *      + a3xx_gpu
  51 *      + a2xx_gpu
  52 *    + z180_gpu
  53 */
  54struct msm_gpu_funcs {
  55        int (*get_param)(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
  56        int (*hw_init)(struct msm_gpu *gpu);
  57        int (*pm_suspend)(struct msm_gpu *gpu);
  58        int (*pm_resume)(struct msm_gpu *gpu);
  59        void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
  60                        struct msm_file_private *ctx);
  61        void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
  62        irqreturn_t (*irq)(struct msm_gpu *irq);
  63        struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
  64        void (*recover)(struct msm_gpu *gpu);
  65        void (*destroy)(struct msm_gpu *gpu);
  66#ifdef CONFIG_DEBUG_FS
  67        /* show GPU status in debugfs: */
  68        void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
  69                        struct drm_printer *p);
  70        /* for generation specific debugfs: */
  71        int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
  72#endif
  73        int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value);
  74        struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
  75        int (*gpu_state_put)(struct msm_gpu_state *state);
  76};
  77
  78struct msm_gpu {
  79        const char *name;
  80        struct drm_device *dev;
  81        struct platform_device *pdev;
  82        const struct msm_gpu_funcs *funcs;
  83
  84        /* performance counters (hw & sw): */
  85        spinlock_t perf_lock;
  86        bool perfcntr_active;
  87        struct {
  88                bool active;
  89                ktime_t time;
  90        } last_sample;
  91        uint32_t totaltime, activetime;    /* sw counters */
  92        uint32_t last_cntrs[5];            /* hw counters */
  93        const struct msm_gpu_perfcntr *perfcntrs;
  94        uint32_t num_perfcntrs;
  95
  96        struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
  97        int nr_rings;
  98
  99        /* list of GEM active objects: */
 100        struct list_head active_list;
 101
 102        /* does gpu need hw_init? */
 103        bool needs_hw_init;
 104
 105        /* worker for handling active-list retiring: */
 106        struct work_struct retire_work;
 107
 108        void __iomem *mmio;
 109        int irq;
 110
 111        struct msm_gem_address_space *aspace;
 112
 113        /* Power Control: */
 114        struct regulator *gpu_reg, *gpu_cx;
 115        struct clk_bulk_data *grp_clks;
 116        int nr_clocks;
 117        struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
 118        uint32_t fast_rate;
 119
 120        /* Hang and Inactivity Detection:
 121         */
 122#define DRM_MSM_INACTIVE_PERIOD   66 /* in ms (roughly four frames) */
 123
 124#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
 125#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
 126        struct timer_list hangcheck_timer;
 127        struct work_struct recover_work;
 128
 129        struct drm_gem_object *memptrs_bo;
 130
 131        struct {
 132                struct devfreq *devfreq;
 133                u64 busy_cycles;
 134                ktime_t time;
 135        } devfreq;
 136
 137        struct msm_gpu_state *crashstate;
 138};
 139
 140/* It turns out that all targets use the same ringbuffer size */
 141#define MSM_GPU_RINGBUFFER_SZ SZ_32K
 142#define MSM_GPU_RINGBUFFER_BLKSIZE 32
 143
 144#define MSM_GPU_RB_CNTL_DEFAULT \
 145                (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
 146                AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
 147
 148static inline bool msm_gpu_active(struct msm_gpu *gpu)
 149{
 150        int i;
 151
 152        for (i = 0; i < gpu->nr_rings; i++) {
 153                struct msm_ringbuffer *ring = gpu->rb[i];
 154
 155                if (ring->seqno > ring->memptrs->fence)
 156                        return true;
 157        }
 158
 159        return false;
 160}
 161
 162/* Perf-Counters:
 163 * The select_reg and select_val are just there for the benefit of the child
 164 * class that actually enables the perf counter..  but msm_gpu base class
 165 * will handle sampling/displaying the counters.
 166 */
 167
 168struct msm_gpu_perfcntr {
 169        uint32_t select_reg;
 170        uint32_t sample_reg;
 171        uint32_t select_val;
 172        const char *name;
 173};
 174
 175struct msm_gpu_submitqueue {
 176        int id;
 177        u32 flags;
 178        u32 prio;
 179        int faults;
 180        struct list_head node;
 181        struct kref ref;
 182};
 183
 184struct msm_gpu_state_bo {
 185        u64 iova;
 186        size_t size;
 187        void *data;
 188};
 189
 190struct msm_gpu_state {
 191        struct kref ref;
 192        struct timespec64 time;
 193
 194        struct {
 195                u64 iova;
 196                u32 fence;
 197                u32 seqno;
 198                u32 rptr;
 199                u32 wptr;
 200                void *data;
 201                int data_size;
 202        } ring[MSM_GPU_MAX_RINGS];
 203
 204        int nr_registers;
 205        u32 *registers;
 206
 207        u32 rbbm_status;
 208
 209        char *comm;
 210        char *cmd;
 211
 212        int nr_bos;
 213        struct msm_gpu_state_bo *bos;
 214};
 215
 216static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
 217{
 218        msm_writel(data, gpu->mmio + (reg << 2));
 219}
 220
 221static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
 222{
 223        return msm_readl(gpu->mmio + (reg << 2));
 224}
 225
 226static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
 227{
 228        uint32_t val = gpu_read(gpu, reg);
 229
 230        val &= ~mask;
 231        gpu_write(gpu, reg, val | or);
 232}
 233
 234static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
 235{
 236        u64 val;
 237
 238        /*
 239         * Why not a readq here? Two reasons: 1) many of the LO registers are
 240         * not quad word aligned and 2) the GPU hardware designers have a bit
 241         * of a history of putting registers where they fit, especially in
 242         * spins. The longer a GPU family goes the higher the chance that
 243         * we'll get burned.  We could do a series of validity checks if we
 244         * wanted to, but really is a readq() that much better? Nah.
 245         */
 246
 247        /*
 248         * For some lo/hi registers (like perfcounters), the hi value is latched
 249         * when the lo is read, so make sure to read the lo first to trigger
 250         * that
 251         */
 252        val = (u64) msm_readl(gpu->mmio + (lo << 2));
 253        val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
 254
 255        return val;
 256}
 257
 258static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
 259{
 260        /* Why not a writeq here? Read the screed above */
 261        msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
 262        msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
 263}
 264
 265int msm_gpu_pm_suspend(struct msm_gpu *gpu);
 266int msm_gpu_pm_resume(struct msm_gpu *gpu);
 267
 268int msm_gpu_hw_init(struct msm_gpu *gpu);
 269
 270void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
 271void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
 272int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
 273                uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
 274
 275void msm_gpu_retire(struct msm_gpu *gpu);
 276void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 277                struct msm_file_private *ctx);
 278
 279int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 280                struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
 281                const char *name, struct msm_gpu_config *config);
 282
 283void msm_gpu_cleanup(struct msm_gpu *gpu);
 284
 285struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
 286void __init adreno_register(void);
 287void __exit adreno_unregister(void);
 288
 289static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
 290{
 291        if (queue)
 292                kref_put(&queue->ref, msm_submitqueue_destroy);
 293}
 294
 295static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
 296{
 297        struct msm_gpu_state *state = NULL;
 298
 299        mutex_lock(&gpu->dev->struct_mutex);
 300
 301        if (gpu->crashstate) {
 302                kref_get(&gpu->crashstate->ref);
 303                state = gpu->crashstate;
 304        }
 305
 306        mutex_unlock(&gpu->dev->struct_mutex);
 307
 308        return state;
 309}
 310
 311static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
 312{
 313        mutex_lock(&gpu->dev->struct_mutex);
 314
 315        if (gpu->crashstate) {
 316                if (gpu->funcs->gpu_state_put(gpu->crashstate))
 317                        gpu->crashstate = NULL;
 318        }
 319
 320        mutex_unlock(&gpu->dev->struct_mutex);
 321}
 322
 323#endif /* __MSM_GPU_H__ */
 324