linux/drivers/gpu/drm/vc4/vc4_drv.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2015 Broadcom
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License version 2 as
   6 * published by the Free Software Foundation.
   7 */
   8
   9#include <linux/reservation.h>
  10#include <drm/drmP.h>
  11#include <drm/drm_encoder.h>
  12#include <drm/drm_gem_cma_helper.h>
  13#include <drm/drm_atomic.h>
  14#include <drm/drm_syncobj.h>
  15
  16#include "uapi/drm/vc4_drm.h"
  17
  18/* Don't forget to update vc4_bo.c: bo_type_names[] when adding to
  19 * this.
  20 */
  21enum vc4_kernel_bo_type {
  22        /* Any kernel allocation (gem_create_object hook) before it
  23         * gets another type set.
  24         */
  25        VC4_BO_TYPE_KERNEL,
  26        VC4_BO_TYPE_V3D,
  27        VC4_BO_TYPE_V3D_SHADER,
  28        VC4_BO_TYPE_DUMB,
  29        VC4_BO_TYPE_BIN,
  30        VC4_BO_TYPE_RCL,
  31        VC4_BO_TYPE_BCL,
  32        VC4_BO_TYPE_KERNEL_CACHE,
  33        VC4_BO_TYPE_COUNT
  34};
  35
  36/* Performance monitor object. The perform lifetime is controlled by userspace
  37 * using perfmon related ioctls. A perfmon can be attached to a submit_cl
  38 * request, and when this is the case, HW perf counters will be activated just
  39 * before the submit_cl is submitted to the GPU and disabled when the job is
  40 * done. This way, only events related to a specific job will be counted.
  41 */
  42struct vc4_perfmon {
  43        /* Tracks the number of users of the perfmon, when this counter reaches
  44         * zero the perfmon is destroyed.
  45         */
  46        refcount_t refcnt;
  47
  48        /* Number of counters activated in this perfmon instance
  49         * (should be less than DRM_VC4_MAX_PERF_COUNTERS).
  50         */
  51        u8 ncounters;
  52
  53        /* Events counted by the HW perf counters. */
  54        u8 events[DRM_VC4_MAX_PERF_COUNTERS];
  55
  56        /* Storage for counter values. Counters are incremented by the HW
  57         * perf counter values every time the perfmon is attached to a GPU job.
  58         * This way, perfmon users don't have to retrieve the results after
  59         * each job if they want to track events covering several submissions.
  60         * Note that counter values can't be reset, but you can fake a reset by
  61         * destroying the perfmon and creating a new one.
  62         */
  63        u64 counters[0];
  64};
  65
  66struct vc4_dev {
  67        struct drm_device *dev;
  68
  69        struct vc4_hdmi *hdmi;
  70        struct vc4_hvs *hvs;
  71        struct vc4_v3d *v3d;
  72        struct vc4_dpi *dpi;
  73        struct vc4_dsi *dsi1;
  74        struct vc4_vec *vec;
  75
  76        struct vc4_hang_state *hang_state;
  77
  78        /* The kernel-space BO cache.  Tracks buffers that have been
  79         * unreferenced by all other users (refcounts of 0!) but not
  80         * yet freed, so we can do cheap allocations.
  81         */
  82        struct vc4_bo_cache {
  83                /* Array of list heads for entries in the BO cache,
  84                 * based on number of pages, so we can do O(1) lookups
  85                 * in the cache when allocating.
  86                 */
  87                struct list_head *size_list;
  88                uint32_t size_list_size;
  89
  90                /* List of all BOs in the cache, ordered by age, so we
  91                 * can do O(1) lookups when trying to free old
  92                 * buffers.
  93                 */
  94                struct list_head time_list;
  95                struct work_struct time_work;
  96                struct timer_list time_timer;
  97        } bo_cache;
  98
  99        u32 num_labels;
 100        struct vc4_label {
 101                const char *name;
 102                u32 num_allocated;
 103                u32 size_allocated;
 104        } *bo_labels;
 105
 106        /* Protects bo_cache and bo_labels. */
 107        struct mutex bo_lock;
 108
 109        /* Purgeable BO pool. All BOs in this pool can have their memory
 110         * reclaimed if the driver is unable to allocate new BOs. We also
 111         * keep stats related to the purge mechanism here.
 112         */
 113        struct {
 114                struct list_head list;
 115                unsigned int num;
 116                size_t size;
 117                unsigned int purged_num;
 118                size_t purged_size;
 119                struct mutex lock;
 120        } purgeable;
 121
 122        uint64_t dma_fence_context;
 123
 124        /* Sequence number for the last job queued in bin_job_list.
 125         * Starts at 0 (no jobs emitted).
 126         */
 127        uint64_t emit_seqno;
 128
 129        /* Sequence number for the last completed job on the GPU.
 130         * Starts at 0 (no jobs completed).
 131         */
 132        uint64_t finished_seqno;
 133
 134        /* List of all struct vc4_exec_info for jobs to be executed in
 135         * the binner.  The first job in the list is the one currently
 136         * programmed into ct0ca for execution.
 137         */
 138        struct list_head bin_job_list;
 139
 140        /* List of all struct vc4_exec_info for jobs that have
 141         * completed binning and are ready for rendering.  The first
 142         * job in the list is the one currently programmed into ct1ca
 143         * for execution.
 144         */
 145        struct list_head render_job_list;
 146
 147        /* List of the finished vc4_exec_infos waiting to be freed by
 148         * job_done_work.
 149         */
 150        struct list_head job_done_list;
 151        /* Spinlock used to synchronize the job_list and seqno
 152         * accesses between the IRQ handler and GEM ioctls.
 153         */
 154        spinlock_t job_lock;
 155        wait_queue_head_t job_wait_queue;
 156        struct work_struct job_done_work;
 157
 158        /* Used to track the active perfmon if any. Access to this field is
 159         * protected by job_lock.
 160         */
 161        struct vc4_perfmon *active_perfmon;
 162
 163        /* List of struct vc4_seqno_cb for callbacks to be made from a
 164         * workqueue when the given seqno is passed.
 165         */
 166        struct list_head seqno_cb_list;
 167
 168        /* The memory used for storing binner tile alloc, tile state,
 169         * and overflow memory allocations.  This is freed when V3D
 170         * powers down.
 171         */
 172        struct vc4_bo *bin_bo;
 173
 174        /* Size of blocks allocated within bin_bo. */
 175        uint32_t bin_alloc_size;
 176
 177        /* Bitmask of the bin_alloc_size chunks in bin_bo that are
 178         * used.
 179         */
 180        uint32_t bin_alloc_used;
 181
 182        /* Bitmask of the current bin_alloc used for overflow memory. */
 183        uint32_t bin_alloc_overflow;
 184
 185        struct work_struct overflow_mem_work;
 186
 187        int power_refcount;
 188
 189        /* Mutex controlling the power refcount. */
 190        struct mutex power_lock;
 191
 192        struct {
 193                struct timer_list timer;
 194                struct work_struct reset_work;
 195        } hangcheck;
 196
 197        struct semaphore async_modeset;
 198
 199        struct drm_modeset_lock ctm_state_lock;
 200        struct drm_private_obj ctm_manager;
 201};
 202
 203static inline struct vc4_dev *
 204to_vc4_dev(struct drm_device *dev)
 205{
 206        return (struct vc4_dev *)dev->dev_private;
 207}
 208
 209struct vc4_bo {
 210        struct drm_gem_cma_object base;
 211
 212        /* seqno of the last job to render using this BO. */
 213        uint64_t seqno;
 214
 215        /* seqno of the last job to use the RCL to write to this BO.
 216         *
 217         * Note that this doesn't include binner overflow memory
 218         * writes.
 219         */
 220        uint64_t write_seqno;
 221
 222        bool t_format;
 223
 224        /* List entry for the BO's position in either
 225         * vc4_exec_info->unref_list or vc4_dev->bo_cache.time_list
 226         */
 227        struct list_head unref_head;
 228
 229        /* Time in jiffies when the BO was put in vc4->bo_cache. */
 230        unsigned long free_time;
 231
 232        /* List entry for the BO's position in vc4_dev->bo_cache.size_list */
 233        struct list_head size_head;
 234
 235        /* Struct for shader validation state, if created by
 236         * DRM_IOCTL_VC4_CREATE_SHADER_BO.
 237         */
 238        struct vc4_validated_shader_info *validated_shader;
 239
 240        /* normally (resv == &_resv) except for imported bo's */
 241        struct reservation_object *resv;
 242        struct reservation_object _resv;
 243
 244        /* One of enum vc4_kernel_bo_type, or VC4_BO_TYPE_COUNT + i
 245         * for user-allocated labels.
 246         */
 247        int label;
 248
 249        /* Count the number of active users. This is needed to determine
 250         * whether we can move the BO to the purgeable list or not (when the BO
 251         * is used by the GPU or the display engine we can't purge it).
 252         */
 253        refcount_t usecnt;
 254
 255        /* Store purgeable/purged state here */
 256        u32 madv;
 257        struct mutex madv_lock;
 258};
 259
 260static inline struct vc4_bo *
 261to_vc4_bo(struct drm_gem_object *bo)
 262{
 263        return (struct vc4_bo *)bo;
 264}
 265
 266struct vc4_fence {
 267        struct dma_fence base;
 268        struct drm_device *dev;
 269        /* vc4 seqno for signaled() test */
 270        uint64_t seqno;
 271};
 272
 273static inline struct vc4_fence *
 274to_vc4_fence(struct dma_fence *fence)
 275{
 276        return (struct vc4_fence *)fence;
 277}
 278
 279struct vc4_seqno_cb {
 280        struct work_struct work;
 281        uint64_t seqno;
 282        void (*func)(struct vc4_seqno_cb *cb);
 283};
 284
 285struct vc4_v3d {
 286        struct vc4_dev *vc4;
 287        struct platform_device *pdev;
 288        void __iomem *regs;
 289        struct clk *clk;
 290};
 291
 292struct vc4_hvs {
 293        struct platform_device *pdev;
 294        void __iomem *regs;
 295        u32 __iomem *dlist;
 296
 297        /* Memory manager for CRTCs to allocate space in the display
 298         * list.  Units are dwords.
 299         */
 300        struct drm_mm dlist_mm;
 301        /* Memory manager for the LBM memory used by HVS scaling. */
 302        struct drm_mm lbm_mm;
 303        spinlock_t mm_lock;
 304
 305        struct drm_mm_node mitchell_netravali_filter;
 306};
 307
 308struct vc4_plane {
 309        struct drm_plane base;
 310};
 311
 312static inline struct vc4_plane *
 313to_vc4_plane(struct drm_plane *plane)
 314{
 315        return (struct vc4_plane *)plane;
 316}
 317
 318enum vc4_scaling_mode {
 319        VC4_SCALING_NONE,
 320        VC4_SCALING_TPZ,
 321        VC4_SCALING_PPF,
 322};
 323
 324struct vc4_plane_state {
 325        struct drm_plane_state base;
 326        /* System memory copy of the display list for this element, computed
 327         * at atomic_check time.
 328         */
 329        u32 *dlist;
 330        u32 dlist_size; /* Number of dwords allocated for the display list */
 331        u32 dlist_count; /* Number of used dwords in the display list. */
 332
 333        /* Offset in the dlist to various words, for pageflip or
 334         * cursor updates.
 335         */
 336        u32 pos0_offset;
 337        u32 pos2_offset;
 338        u32 ptr0_offset;
 339
 340        /* Offset where the plane's dlist was last stored in the
 341         * hardware at vc4_crtc_atomic_flush() time.
 342         */
 343        u32 __iomem *hw_dlist;
 344
 345        /* Clipped coordinates of the plane on the display. */
 346        int crtc_x, crtc_y, crtc_w, crtc_h;
 347        /* Clipped area being scanned from in the FB. */
 348        u32 src_x, src_y;
 349
 350        u32 src_w[2], src_h[2];
 351
 352        /* Scaling selection for the RGB/Y plane and the Cb/Cr planes. */
 353        enum vc4_scaling_mode x_scaling[2], y_scaling[2];
 354        bool is_unity;
 355        bool is_yuv;
 356
 357        /* Offset to start scanning out from the start of the plane's
 358         * BO.
 359         */
 360        u32 offsets[3];
 361
 362        /* Our allocation in LBM for temporary storage during scaling. */
 363        struct drm_mm_node lbm;
 364
 365        /* Set when the plane has per-pixel alpha content or does not cover
 366         * the entire screen. This is a hint to the CRTC that it might need
 367         * to enable background color fill.
 368         */
 369        bool needs_bg_fill;
 370};
 371
 372static inline struct vc4_plane_state *
 373to_vc4_plane_state(struct drm_plane_state *state)
 374{
 375        return (struct vc4_plane_state *)state;
 376}
 377
 378enum vc4_encoder_type {
 379        VC4_ENCODER_TYPE_NONE,
 380        VC4_ENCODER_TYPE_HDMI,
 381        VC4_ENCODER_TYPE_VEC,
 382        VC4_ENCODER_TYPE_DSI0,
 383        VC4_ENCODER_TYPE_DSI1,
 384        VC4_ENCODER_TYPE_SMI,
 385        VC4_ENCODER_TYPE_DPI,
 386};
 387
 388struct vc4_encoder {
 389        struct drm_encoder base;
 390        enum vc4_encoder_type type;
 391        u32 clock_select;
 392};
 393
 394static inline struct vc4_encoder *
 395to_vc4_encoder(struct drm_encoder *encoder)
 396{
 397        return container_of(encoder, struct vc4_encoder, base);
 398}
 399
 400struct vc4_crtc_data {
 401        /* Which channel of the HVS this pixelvalve sources from. */
 402        int hvs_channel;
 403
 404        enum vc4_encoder_type encoder_types[4];
 405};
 406
 407struct vc4_crtc {
 408        struct drm_crtc base;
 409        const struct vc4_crtc_data *data;
 410        void __iomem *regs;
 411
 412        /* Timestamp at start of vblank irq - unaffected by lock delays. */
 413        ktime_t t_vblank;
 414
 415        /* Which HVS channel we're using for our CRTC. */
 416        int channel;
 417
 418        u8 lut_r[256];
 419        u8 lut_g[256];
 420        u8 lut_b[256];
 421        /* Size in pixels of the COB memory allocated to this CRTC. */
 422        u32 cob_size;
 423
 424        struct drm_pending_vblank_event *event;
 425};
 426
 427static inline struct vc4_crtc *
 428to_vc4_crtc(struct drm_crtc *crtc)
 429{
 430        return (struct vc4_crtc *)crtc;
 431}
 432
 433#define V3D_READ(offset) readl(vc4->v3d->regs + offset)
 434#define V3D_WRITE(offset, val) writel(val, vc4->v3d->regs + offset)
 435#define HVS_READ(offset) readl(vc4->hvs->regs + offset)
 436#define HVS_WRITE(offset, val) writel(val, vc4->hvs->regs + offset)
 437
 438struct vc4_exec_info {
 439        /* Sequence number for this bin/render job. */
 440        uint64_t seqno;
 441
 442        /* Latest write_seqno of any BO that binning depends on. */
 443        uint64_t bin_dep_seqno;
 444
 445        struct dma_fence *fence;
 446
 447        /* Last current addresses the hardware was processing when the
 448         * hangcheck timer checked on us.
 449         */
 450        uint32_t last_ct0ca, last_ct1ca;
 451
 452        /* Kernel-space copy of the ioctl arguments */
 453        struct drm_vc4_submit_cl *args;
 454
 455        /* This is the array of BOs that were looked up at the start of exec.
 456         * Command validation will use indices into this array.
 457         */
 458        struct drm_gem_cma_object **bo;
 459        uint32_t bo_count;
 460
 461        /* List of BOs that are being written by the RCL.  Other than
 462         * the binner temporary storage, this is all the BOs written
 463         * by the job.
 464         */
 465        struct drm_gem_cma_object *rcl_write_bo[4];
 466        uint32_t rcl_write_bo_count;
 467
 468        /* Pointers for our position in vc4->job_list */
 469        struct list_head head;
 470
 471        /* List of other BOs used in the job that need to be released
 472         * once the job is complete.
 473         */
 474        struct list_head unref_list;
 475
 476        /* Current unvalidated indices into @bo loaded by the non-hardware
 477         * VC4_PACKET_GEM_HANDLES.
 478         */
 479        uint32_t bo_index[2];
 480
 481        /* This is the BO where we store the validated command lists, shader
 482         * records, and uniforms.
 483         */
 484        struct drm_gem_cma_object *exec_bo;
 485
 486        /**
 487         * This tracks the per-shader-record state (packet 64) that
 488         * determines the length of the shader record and the offset
 489         * it's expected to be found at.  It gets read in from the
 490         * command lists.
 491         */
 492        struct vc4_shader_state {
 493                uint32_t addr;
 494                /* Maximum vertex index referenced by any primitive using this
 495                 * shader state.
 496                 */
 497                uint32_t max_index;
 498        } *shader_state;
 499
 500        /** How many shader states the user declared they were using. */
 501        uint32_t shader_state_size;
 502        /** How many shader state records the validator has seen. */
 503        uint32_t shader_state_count;
 504
 505        bool found_tile_binning_mode_config_packet;
 506        bool found_start_tile_binning_packet;
 507        bool found_increment_semaphore_packet;
 508        bool found_flush;
 509        uint8_t bin_tiles_x, bin_tiles_y;
 510        /* Physical address of the start of the tile alloc array
 511         * (where each tile's binned CL will start)
 512         */
 513        uint32_t tile_alloc_offset;
 514        /* Bitmask of which binner slots are freed when this job completes. */
 515        uint32_t bin_slots;
 516
 517        /**
 518         * Computed addresses pointing into exec_bo where we start the
 519         * bin thread (ct0) and render thread (ct1).
 520         */
 521        uint32_t ct0ca, ct0ea;
 522        uint32_t ct1ca, ct1ea;
 523
 524        /* Pointer to the unvalidated bin CL (if present). */
 525        void *bin_u;
 526
 527        /* Pointers to the shader recs.  These paddr gets incremented as CL
 528         * packets are relocated in validate_gl_shader_state, and the vaddrs
 529         * (u and v) get incremented and size decremented as the shader recs
 530         * themselves are validated.
 531         */
 532        void *shader_rec_u;
 533        void *shader_rec_v;
 534        uint32_t shader_rec_p;
 535        uint32_t shader_rec_size;
 536
 537        /* Pointers to the uniform data.  These pointers are incremented, and
 538         * size decremented, as each batch of uniforms is uploaded.
 539         */
 540        void *uniforms_u;
 541        void *uniforms_v;
 542        uint32_t uniforms_p;
 543        uint32_t uniforms_size;
 544
 545        /* Pointer to a performance monitor object if the user requested it,
 546         * NULL otherwise.
 547         */
 548        struct vc4_perfmon *perfmon;
 549};
 550
 551/* Per-open file private data. Any driver-specific resource that has to be
 552 * released when the DRM file is closed should be placed here.
 553 */
 554struct vc4_file {
 555        struct {
 556                struct idr idr;
 557                struct mutex lock;
 558        } perfmon;
 559};
 560
 561static inline struct vc4_exec_info *
 562vc4_first_bin_job(struct vc4_dev *vc4)
 563{
 564        return list_first_entry_or_null(&vc4->bin_job_list,
 565                                        struct vc4_exec_info, head);
 566}
 567
 568static inline struct vc4_exec_info *
 569vc4_first_render_job(struct vc4_dev *vc4)
 570{
 571        return list_first_entry_or_null(&vc4->render_job_list,
 572                                        struct vc4_exec_info, head);
 573}
 574
 575static inline struct vc4_exec_info *
 576vc4_last_render_job(struct vc4_dev *vc4)
 577{
 578        if (list_empty(&vc4->render_job_list))
 579                return NULL;
 580        return list_last_entry(&vc4->render_job_list,
 581                               struct vc4_exec_info, head);
 582}
 583
 584/**
 585 * struct vc4_texture_sample_info - saves the offsets into the UBO for texture
 586 * setup parameters.
 587 *
 588 * This will be used at draw time to relocate the reference to the texture
 589 * contents in p0, and validate that the offset combined with
 590 * width/height/stride/etc. from p1 and p2/p3 doesn't sample outside the BO.
 591 * Note that the hardware treats unprovided config parameters as 0, so not all
 592 * of them need to be set up for every texure sample, and we'll store ~0 as
 593 * the offset to mark the unused ones.
 594 *
 595 * See the VC4 3D architecture guide page 41 ("Texture and Memory Lookup Unit
 596 * Setup") for definitions of the texture parameters.
 597 */
 598struct vc4_texture_sample_info {
 599        bool is_direct;
 600        uint32_t p_offset[4];
 601};
 602
 603/**
 604 * struct vc4_validated_shader_info - information about validated shaders that
 605 * needs to be used from command list validation.
 606 *
 607 * For a given shader, each time a shader state record references it, we need
 608 * to verify that the shader doesn't read more uniforms than the shader state
 609 * record's uniform BO pointer can provide, and we need to apply relocations
 610 * and validate the shader state record's uniforms that define the texture
 611 * samples.
 612 */
 613struct vc4_validated_shader_info {
 614        uint32_t uniforms_size;
 615        uint32_t uniforms_src_size;
 616        uint32_t num_texture_samples;
 617        struct vc4_texture_sample_info *texture_samples;
 618
 619        uint32_t num_uniform_addr_offsets;
 620        uint32_t *uniform_addr_offsets;
 621
 622        bool is_threaded;
 623};
 624
 625/**
 626 * _wait_for - magic (register) wait macro
 627 *
 628 * Does the right thing for modeset paths when run under kdgb or similar atomic
 629 * contexts. Note that it's important that we check the condition again after
 630 * having timed out, since the timeout could be due to preemption or similar and
 631 * we've never had a chance to check the condition before the timeout.
 632 */
 633#define _wait_for(COND, MS, W) ({ \
 634        unsigned long timeout__ = jiffies + msecs_to_jiffies(MS) + 1;   \
 635        int ret__ = 0;                                                  \
 636        while (!(COND)) {                                               \
 637                if (time_after(jiffies, timeout__)) {                   \
 638                        if (!(COND))                                    \
 639                                ret__ = -ETIMEDOUT;                     \
 640                        break;                                          \
 641                }                                                       \
 642                if (W && drm_can_sleep())  {                            \
 643                        msleep(W);                                      \
 644                } else {                                                \
 645                        cpu_relax();                                    \
 646                }                                                       \
 647        }                                                               \
 648        ret__;                                                          \
 649})
 650
 651#define wait_for(COND, MS) _wait_for(COND, MS, 1)
 652
 653/* vc4_bo.c */
 654struct drm_gem_object *vc4_create_object(struct drm_device *dev, size_t size);
 655void vc4_free_object(struct drm_gem_object *gem_obj);
 656struct vc4_bo *vc4_bo_create(struct drm_device *dev, size_t size,
 657                             bool from_cache, enum vc4_kernel_bo_type type);
 658int vc4_dumb_create(struct drm_file *file_priv,
 659                    struct drm_device *dev,
 660                    struct drm_mode_create_dumb *args);
 661struct dma_buf *vc4_prime_export(struct drm_device *dev,
 662                                 struct drm_gem_object *obj, int flags);
 663int vc4_create_bo_ioctl(struct drm_device *dev, void *data,
 664                        struct drm_file *file_priv);
 665int vc4_create_shader_bo_ioctl(struct drm_device *dev, void *data,
 666                               struct drm_file *file_priv);
 667int vc4_mmap_bo_ioctl(struct drm_device *dev, void *data,
 668                      struct drm_file *file_priv);
 669int vc4_set_tiling_ioctl(struct drm_device *dev, void *data,
 670                         struct drm_file *file_priv);
 671int vc4_get_tiling_ioctl(struct drm_device *dev, void *data,
 672                         struct drm_file *file_priv);
 673int vc4_get_hang_state_ioctl(struct drm_device *dev, void *data,
 674                             struct drm_file *file_priv);
 675int vc4_label_bo_ioctl(struct drm_device *dev, void *data,
 676                       struct drm_file *file_priv);
 677int vc4_fault(struct vm_fault *vmf);
 678int vc4_mmap(struct file *filp, struct vm_area_struct *vma);
 679struct reservation_object *vc4_prime_res_obj(struct drm_gem_object *obj);
 680int vc4_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
 681struct drm_gem_object *vc4_prime_import_sg_table(struct drm_device *dev,
 682                                                 struct dma_buf_attachment *attach,
 683                                                 struct sg_table *sgt);
 684void *vc4_prime_vmap(struct drm_gem_object *obj);
 685int vc4_bo_cache_init(struct drm_device *dev);
 686void vc4_bo_cache_destroy(struct drm_device *dev);
 687int vc4_bo_stats_debugfs(struct seq_file *m, void *arg);
 688int vc4_bo_inc_usecnt(struct vc4_bo *bo);
 689void vc4_bo_dec_usecnt(struct vc4_bo *bo);
 690void vc4_bo_add_to_purgeable_pool(struct vc4_bo *bo);
 691void vc4_bo_remove_from_purgeable_pool(struct vc4_bo *bo);
 692
 693/* vc4_crtc.c */
 694extern struct platform_driver vc4_crtc_driver;
 695int vc4_crtc_debugfs_regs(struct seq_file *m, void *arg);
 696bool vc4_crtc_get_scanoutpos(struct drm_device *dev, unsigned int crtc_id,
 697                             bool in_vblank_irq, int *vpos, int *hpos,
 698                             ktime_t *stime, ktime_t *etime,
 699                             const struct drm_display_mode *mode);
 700
 701/* vc4_debugfs.c */
 702int vc4_debugfs_init(struct drm_minor *minor);
 703
 704/* vc4_drv.c */
 705void __iomem *vc4_ioremap_regs(struct platform_device *dev, int index);
 706
 707/* vc4_dpi.c */
 708extern struct platform_driver vc4_dpi_driver;
 709int vc4_dpi_debugfs_regs(struct seq_file *m, void *unused);
 710
 711/* vc4_dsi.c */
 712extern struct platform_driver vc4_dsi_driver;
 713int vc4_dsi_debugfs_regs(struct seq_file *m, void *unused);
 714
 715/* vc4_fence.c */
 716extern const struct dma_fence_ops vc4_fence_ops;
 717
 718/* vc4_gem.c */
 719void vc4_gem_init(struct drm_device *dev);
 720void vc4_gem_destroy(struct drm_device *dev);
 721int vc4_submit_cl_ioctl(struct drm_device *dev, void *data,
 722                        struct drm_file *file_priv);
 723int vc4_wait_seqno_ioctl(struct drm_device *dev, void *data,
 724                         struct drm_file *file_priv);
 725int vc4_wait_bo_ioctl(struct drm_device *dev, void *data,
 726                      struct drm_file *file_priv);
 727void vc4_submit_next_bin_job(struct drm_device *dev);
 728void vc4_submit_next_render_job(struct drm_device *dev);
 729void vc4_move_job_to_render(struct drm_device *dev, struct vc4_exec_info *exec);
 730int vc4_wait_for_seqno(struct drm_device *dev, uint64_t seqno,
 731                       uint64_t timeout_ns, bool interruptible);
 732void vc4_job_handle_completed(struct vc4_dev *vc4);
 733int vc4_queue_seqno_cb(struct drm_device *dev,
 734                       struct vc4_seqno_cb *cb, uint64_t seqno,
 735                       void (*func)(struct vc4_seqno_cb *cb));
 736int vc4_gem_madvise_ioctl(struct drm_device *dev, void *data,
 737                          struct drm_file *file_priv);
 738
 739/* vc4_hdmi.c */
 740extern struct platform_driver vc4_hdmi_driver;
 741int vc4_hdmi_debugfs_regs(struct seq_file *m, void *unused);
 742
 743/* vc4_vec.c */
 744extern struct platform_driver vc4_vec_driver;
 745int vc4_vec_debugfs_regs(struct seq_file *m, void *unused);
 746
 747/* vc4_irq.c */
 748irqreturn_t vc4_irq(int irq, void *arg);
 749void vc4_irq_preinstall(struct drm_device *dev);
 750int vc4_irq_postinstall(struct drm_device *dev);
 751void vc4_irq_uninstall(struct drm_device *dev);
 752void vc4_irq_reset(struct drm_device *dev);
 753
 754/* vc4_hvs.c */
 755extern struct platform_driver vc4_hvs_driver;
 756void vc4_hvs_dump_state(struct drm_device *dev);
 757int vc4_hvs_debugfs_regs(struct seq_file *m, void *unused);
 758
 759/* vc4_kms.c */
 760int vc4_kms_load(struct drm_device *dev);
 761
 762/* vc4_plane.c */
 763struct drm_plane *vc4_plane_init(struct drm_device *dev,
 764                                 enum drm_plane_type type);
 765u32 vc4_plane_write_dlist(struct drm_plane *plane, u32 __iomem *dlist);
 766u32 vc4_plane_dlist_size(const struct drm_plane_state *state);
 767void vc4_plane_async_set_fb(struct drm_plane *plane,
 768                            struct drm_framebuffer *fb);
 769
 770/* vc4_v3d.c */
 771extern struct platform_driver vc4_v3d_driver;
 772int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
 773int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
 774int vc4_v3d_get_bin_slot(struct vc4_dev *vc4);
 775
 776/* vc4_validate.c */
 777int
 778vc4_validate_bin_cl(struct drm_device *dev,
 779                    void *validated,
 780                    void *unvalidated,
 781                    struct vc4_exec_info *exec);
 782
 783int
 784vc4_validate_shader_recs(struct drm_device *dev, struct vc4_exec_info *exec);
 785
 786struct drm_gem_cma_object *vc4_use_bo(struct vc4_exec_info *exec,
 787                                      uint32_t hindex);
 788
 789int vc4_get_rcl(struct drm_device *dev, struct vc4_exec_info *exec);
 790
 791bool vc4_check_tex_size(struct vc4_exec_info *exec,
 792                        struct drm_gem_cma_object *fbo,
 793                        uint32_t offset, uint8_t tiling_format,
 794                        uint32_t width, uint32_t height, uint8_t cpp);
 795
 796/* vc4_validate_shader.c */
 797struct vc4_validated_shader_info *
 798vc4_validate_shader(struct drm_gem_cma_object *shader_obj);
 799
 800/* vc4_perfmon.c */
 801void vc4_perfmon_get(struct vc4_perfmon *perfmon);
 802void vc4_perfmon_put(struct vc4_perfmon *perfmon);
 803void vc4_perfmon_start(struct vc4_dev *vc4, struct vc4_perfmon *perfmon);
 804void vc4_perfmon_stop(struct vc4_dev *vc4, struct vc4_perfmon *perfmon,
 805                      bool capture);
 806struct vc4_perfmon *vc4_perfmon_find(struct vc4_file *vc4file, int id);
 807void vc4_perfmon_open_file(struct vc4_file *vc4file);
 808void vc4_perfmon_close_file(struct vc4_file *vc4file);
 809int vc4_perfmon_create_ioctl(struct drm_device *dev, void *data,
 810                             struct drm_file *file_priv);
 811int vc4_perfmon_destroy_ioctl(struct drm_device *dev, void *data,
 812                              struct drm_file *file_priv);
 813int vc4_perfmon_get_values_ioctl(struct drm_device *dev, void *data,
 814                                 struct drm_file *file_priv);
 815