linux/drivers/gpu/drm/msm/msm_ringbuffer.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Copyright (C) 2013 Red Hat
   4 * Author: Rob Clark <robdclark@gmail.com>
   5 */
   6
   7#include "msm_ringbuffer.h"
   8#include "msm_gpu.h"
   9
  10static uint num_hw_submissions = 8;
  11MODULE_PARM_DESC(num_hw_submissions, "The max # of jobs to write into ringbuffer (default 8)");
  12module_param(num_hw_submissions, uint, 0600);
  13
  14static struct dma_fence *msm_job_run(struct drm_sched_job *job)
  15{
  16        struct msm_gem_submit *submit = to_msm_submit(job);
  17        struct msm_gpu *gpu = submit->gpu;
  18
  19        submit->hw_fence = msm_fence_alloc(submit->ring->fctx);
  20
  21        pm_runtime_get_sync(&gpu->pdev->dev);
  22
  23        /* TODO move submit path over to using a per-ring lock.. */
  24        mutex_lock(&gpu->lock);
  25
  26        msm_gpu_submit(gpu, submit);
  27
  28        mutex_unlock(&gpu->lock);
  29
  30        pm_runtime_put(&gpu->pdev->dev);
  31
  32        return dma_fence_get(submit->hw_fence);
  33}
  34
  35static void msm_job_free(struct drm_sched_job *job)
  36{
  37        struct msm_gem_submit *submit = to_msm_submit(job);
  38
  39        drm_sched_job_cleanup(job);
  40        msm_gem_submit_put(submit);
  41}
  42
  43const struct drm_sched_backend_ops msm_sched_ops = {
  44        .run_job = msm_job_run,
  45        .free_job = msm_job_free
  46};
  47
  48struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
  49                void *memptrs, uint64_t memptrs_iova)
  50{
  51        struct msm_ringbuffer *ring;
  52        long sched_timeout;
  53        char name[32];
  54        int ret;
  55
  56        /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
  57        BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
  58
  59        ring = kzalloc(sizeof(*ring), GFP_KERNEL);
  60        if (!ring) {
  61                ret = -ENOMEM;
  62                goto fail;
  63        }
  64
  65        ring->gpu = gpu;
  66        ring->id = id;
  67
  68        ring->start = msm_gem_kernel_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
  69                check_apriv(gpu, MSM_BO_WC | MSM_BO_GPU_READONLY),
  70                gpu->aspace, &ring->bo, &ring->iova);
  71
  72        if (IS_ERR(ring->start)) {
  73                ret = PTR_ERR(ring->start);
  74                ring->start = NULL;
  75                goto fail;
  76        }
  77
  78        msm_gem_object_set_name(ring->bo, "ring%d", id);
  79
  80        ring->end   = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
  81        ring->next  = ring->start;
  82        ring->cur   = ring->start;
  83
  84        ring->memptrs = memptrs;
  85        ring->memptrs_iova = memptrs_iova;
  86
  87         /* currently managing hangcheck ourselves: */
  88        sched_timeout = MAX_SCHEDULE_TIMEOUT;
  89
  90        ret = drm_sched_init(&ring->sched, &msm_sched_ops,
  91                        num_hw_submissions, 0, sched_timeout,
  92                        NULL, NULL, to_msm_bo(ring->bo)->name, gpu->dev->dev);
  93        if (ret) {
  94                goto fail;
  95        }
  96
  97        INIT_LIST_HEAD(&ring->submits);
  98        spin_lock_init(&ring->submit_lock);
  99        spin_lock_init(&ring->preempt_lock);
 100
 101        snprintf(name, sizeof(name), "gpu-ring-%d", ring->id);
 102
 103        ring->fctx = msm_fence_context_alloc(gpu->dev, &ring->memptrs->fence, name);
 104
 105        return ring;
 106
 107fail:
 108        msm_ringbuffer_destroy(ring);
 109        return ERR_PTR(ret);
 110}
 111
 112void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
 113{
 114        if (IS_ERR_OR_NULL(ring))
 115                return;
 116
 117        drm_sched_fini(&ring->sched);
 118
 119        msm_fence_context_free(ring->fctx);
 120
 121        msm_gem_kernel_put(ring->bo, ring->gpu->aspace);
 122
 123        kfree(ring);
 124}
 125