linux/drivers/gpu/drm/lima/lima_sched.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0 OR MIT
   2/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
   3
   4#include <linux/kthread.h>
   5#include <linux/slab.h>
   6#include <linux/vmalloc.h>
   7#include <linux/pm_runtime.h>
   8
   9#include "lima_devfreq.h"
  10#include "lima_drv.h"
  11#include "lima_sched.h"
  12#include "lima_vm.h"
  13#include "lima_mmu.h"
  14#include "lima_l2_cache.h"
  15#include "lima_gem.h"
  16#include "lima_trace.h"
  17
  18struct lima_fence {
  19        struct dma_fence base;
  20        struct lima_sched_pipe *pipe;
  21};
  22
  23static struct kmem_cache *lima_fence_slab;
  24static int lima_fence_slab_refcnt;
  25
  26int lima_sched_slab_init(void)
  27{
  28        if (!lima_fence_slab) {
  29                lima_fence_slab = kmem_cache_create(
  30                        "lima_fence", sizeof(struct lima_fence), 0,
  31                        SLAB_HWCACHE_ALIGN, NULL);
  32                if (!lima_fence_slab)
  33                        return -ENOMEM;
  34        }
  35
  36        lima_fence_slab_refcnt++;
  37        return 0;
  38}
  39
  40void lima_sched_slab_fini(void)
  41{
  42        if (!--lima_fence_slab_refcnt) {
  43                kmem_cache_destroy(lima_fence_slab);
  44                lima_fence_slab = NULL;
  45        }
  46}
  47
  48static inline struct lima_fence *to_lima_fence(struct dma_fence *fence)
  49{
  50        return container_of(fence, struct lima_fence, base);
  51}
  52
  53static const char *lima_fence_get_driver_name(struct dma_fence *fence)
  54{
  55        return "lima";
  56}
  57
  58static const char *lima_fence_get_timeline_name(struct dma_fence *fence)
  59{
  60        struct lima_fence *f = to_lima_fence(fence);
  61
  62        return f->pipe->base.name;
  63}
  64
  65static void lima_fence_release_rcu(struct rcu_head *rcu)
  66{
  67        struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
  68        struct lima_fence *fence = to_lima_fence(f);
  69
  70        kmem_cache_free(lima_fence_slab, fence);
  71}
  72
  73static void lima_fence_release(struct dma_fence *fence)
  74{
  75        struct lima_fence *f = to_lima_fence(fence);
  76
  77        call_rcu(&f->base.rcu, lima_fence_release_rcu);
  78}
  79
  80static const struct dma_fence_ops lima_fence_ops = {
  81        .get_driver_name = lima_fence_get_driver_name,
  82        .get_timeline_name = lima_fence_get_timeline_name,
  83        .release = lima_fence_release,
  84};
  85
  86static struct lima_fence *lima_fence_create(struct lima_sched_pipe *pipe)
  87{
  88        struct lima_fence *fence;
  89
  90        fence = kmem_cache_zalloc(lima_fence_slab, GFP_KERNEL);
  91        if (!fence)
  92                return NULL;
  93
  94        fence->pipe = pipe;
  95        dma_fence_init(&fence->base, &lima_fence_ops, &pipe->fence_lock,
  96                       pipe->fence_context, ++pipe->fence_seqno);
  97
  98        return fence;
  99}
 100
 101static inline struct lima_sched_task *to_lima_task(struct drm_sched_job *job)
 102{
 103        return container_of(job, struct lima_sched_task, base);
 104}
 105
 106static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched)
 107{
 108        return container_of(sched, struct lima_sched_pipe, base);
 109}
 110
 111int lima_sched_task_init(struct lima_sched_task *task,
 112                         struct lima_sched_context *context,
 113                         struct lima_bo **bos, int num_bos,
 114                         struct lima_vm *vm)
 115{
 116        int err, i;
 117
 118        task->bos = kmemdup(bos, sizeof(*bos) * num_bos, GFP_KERNEL);
 119        if (!task->bos)
 120                return -ENOMEM;
 121
 122        for (i = 0; i < num_bos; i++)
 123                drm_gem_object_get(&bos[i]->base.base);
 124
 125        err = drm_sched_job_init(&task->base, &context->base, vm);
 126        if (err) {
 127                kfree(task->bos);
 128                return err;
 129        }
 130
 131        task->num_bos = num_bos;
 132        task->vm = lima_vm_get(vm);
 133
 134        xa_init_flags(&task->deps, XA_FLAGS_ALLOC);
 135
 136        return 0;
 137}
 138
 139void lima_sched_task_fini(struct lima_sched_task *task)
 140{
 141        struct dma_fence *fence;
 142        unsigned long index;
 143        int i;
 144
 145        drm_sched_job_cleanup(&task->base);
 146
 147        xa_for_each(&task->deps, index, fence) {
 148                dma_fence_put(fence);
 149        }
 150        xa_destroy(&task->deps);
 151
 152        if (task->bos) {
 153                for (i = 0; i < task->num_bos; i++)
 154                        drm_gem_object_put(&task->bos[i]->base.base);
 155                kfree(task->bos);
 156        }
 157
 158        lima_vm_put(task->vm);
 159}
 160
 161int lima_sched_context_init(struct lima_sched_pipe *pipe,
 162                            struct lima_sched_context *context,
 163                            atomic_t *guilty)
 164{
 165        struct drm_gpu_scheduler *sched = &pipe->base;
 166
 167        return drm_sched_entity_init(&context->base, DRM_SCHED_PRIORITY_NORMAL,
 168                                     &sched, 1, guilty);
 169}
 170
 171void lima_sched_context_fini(struct lima_sched_pipe *pipe,
 172                             struct lima_sched_context *context)
 173{
 174        drm_sched_entity_fini(&context->base);
 175}
 176
 177struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
 178                                                struct lima_sched_task *task)
 179{
 180        struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
 181
 182        trace_lima_task_submit(task);
 183        drm_sched_entity_push_job(&task->base, &context->base);
 184        return fence;
 185}
 186
 187static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
 188                                               struct drm_sched_entity *entity)
 189{
 190        struct lima_sched_task *task = to_lima_task(job);
 191
 192        if (!xa_empty(&task->deps))
 193                return xa_erase(&task->deps, task->last_dep++);
 194
 195        return NULL;
 196}
 197
 198static int lima_pm_busy(struct lima_device *ldev)
 199{
 200        int ret;
 201
 202        /* resume GPU if it has been suspended by runtime PM */
 203        ret = pm_runtime_get_sync(ldev->dev);
 204        if (ret < 0)
 205                return ret;
 206
 207        lima_devfreq_record_busy(&ldev->devfreq);
 208        return 0;
 209}
 210
 211static void lima_pm_idle(struct lima_device *ldev)
 212{
 213        lima_devfreq_record_idle(&ldev->devfreq);
 214
 215        /* GPU can do auto runtime suspend */
 216        pm_runtime_mark_last_busy(ldev->dev);
 217        pm_runtime_put_autosuspend(ldev->dev);
 218}
 219
 220static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
 221{
 222        struct lima_sched_task *task = to_lima_task(job);
 223        struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
 224        struct lima_device *ldev = pipe->ldev;
 225        struct lima_fence *fence;
 226        struct dma_fence *ret;
 227        int i, err;
 228
 229        /* after GPU reset */
 230        if (job->s_fence->finished.error < 0)
 231                return NULL;
 232
 233        fence = lima_fence_create(pipe);
 234        if (!fence)
 235                return NULL;
 236
 237        err = lima_pm_busy(ldev);
 238        if (err < 0) {
 239                dma_fence_put(&fence->base);
 240                return NULL;
 241        }
 242
 243        task->fence = &fence->base;
 244
 245        /* for caller usage of the fence, otherwise irq handler
 246         * may consume the fence before caller use it
 247         */
 248        ret = dma_fence_get(task->fence);
 249
 250        pipe->current_task = task;
 251
 252        /* this is needed for MMU to work correctly, otherwise GP/PP
 253         * will hang or page fault for unknown reason after running for
 254         * a while.
 255         *
 256         * Need to investigate:
 257         * 1. is it related to TLB
 258         * 2. how much performance will be affected by L2 cache flush
 259         * 3. can we reduce the calling of this function because all
 260         *    GP/PP use the same L2 cache on mali400
 261         *
 262         * TODO:
 263         * 1. move this to task fini to save some wait time?
 264         * 2. when GP/PP use different l2 cache, need PP wait GP l2
 265         *    cache flush?
 266         */
 267        for (i = 0; i < pipe->num_l2_cache; i++)
 268                lima_l2_cache_flush(pipe->l2_cache[i]);
 269
 270        lima_vm_put(pipe->current_vm);
 271        pipe->current_vm = lima_vm_get(task->vm);
 272
 273        if (pipe->bcast_mmu)
 274                lima_mmu_switch_vm(pipe->bcast_mmu, pipe->current_vm);
 275        else {
 276                for (i = 0; i < pipe->num_mmu; i++)
 277                        lima_mmu_switch_vm(pipe->mmu[i], pipe->current_vm);
 278        }
 279
 280        trace_lima_task_run(task);
 281
 282        pipe->error = false;
 283        pipe->task_run(pipe, task);
 284
 285        return task->fence;
 286}
 287
 288static void lima_sched_build_error_task_list(struct lima_sched_task *task)
 289{
 290        struct lima_sched_error_task *et;
 291        struct lima_sched_pipe *pipe = to_lima_pipe(task->base.sched);
 292        struct lima_ip *ip = pipe->processor[0];
 293        int pipe_id = ip->id == lima_ip_gp ? lima_pipe_gp : lima_pipe_pp;
 294        struct lima_device *dev = ip->dev;
 295        struct lima_sched_context *sched_ctx =
 296                container_of(task->base.entity,
 297                             struct lima_sched_context, base);
 298        struct lima_ctx *ctx =
 299                container_of(sched_ctx, struct lima_ctx, context[pipe_id]);
 300        struct lima_dump_task *dt;
 301        struct lima_dump_chunk *chunk;
 302        struct lima_dump_chunk_pid *pid_chunk;
 303        struct lima_dump_chunk_buffer *buffer_chunk;
 304        u32 size, task_size, mem_size;
 305        int i;
 306
 307        mutex_lock(&dev->error_task_list_lock);
 308
 309        if (dev->dump.num_tasks >= lima_max_error_tasks) {
 310                dev_info(dev->dev, "fail to save task state from %s pid %d: "
 311                         "error task list is full\n", ctx->pname, ctx->pid);
 312                goto out;
 313        }
 314
 315        /* frame chunk */
 316        size = sizeof(struct lima_dump_chunk) + pipe->frame_size;
 317        /* process name chunk */
 318        size += sizeof(struct lima_dump_chunk) + sizeof(ctx->pname);
 319        /* pid chunk */
 320        size += sizeof(struct lima_dump_chunk);
 321        /* buffer chunks */
 322        for (i = 0; i < task->num_bos; i++) {
 323                struct lima_bo *bo = task->bos[i];
 324
 325                size += sizeof(struct lima_dump_chunk);
 326                size += bo->heap_size ? bo->heap_size : lima_bo_size(bo);
 327        }
 328
 329        task_size = size + sizeof(struct lima_dump_task);
 330        mem_size = task_size + sizeof(*et);
 331        et = kvmalloc(mem_size, GFP_KERNEL);
 332        if (!et) {
 333                dev_err(dev->dev, "fail to alloc task dump buffer of size %x\n",
 334                        mem_size);
 335                goto out;
 336        }
 337
 338        et->data = et + 1;
 339        et->size = task_size;
 340
 341        dt = et->data;
 342        memset(dt, 0, sizeof(*dt));
 343        dt->id = pipe_id;
 344        dt->size = size;
 345
 346        chunk = (struct lima_dump_chunk *)(dt + 1);
 347        memset(chunk, 0, sizeof(*chunk));
 348        chunk->id = LIMA_DUMP_CHUNK_FRAME;
 349        chunk->size = pipe->frame_size;
 350        memcpy(chunk + 1, task->frame, pipe->frame_size);
 351        dt->num_chunks++;
 352
 353        chunk = (void *)(chunk + 1) + chunk->size;
 354        memset(chunk, 0, sizeof(*chunk));
 355        chunk->id = LIMA_DUMP_CHUNK_PROCESS_NAME;
 356        chunk->size = sizeof(ctx->pname);
 357        memcpy(chunk + 1, ctx->pname, sizeof(ctx->pname));
 358        dt->num_chunks++;
 359
 360        pid_chunk = (void *)(chunk + 1) + chunk->size;
 361        memset(pid_chunk, 0, sizeof(*pid_chunk));
 362        pid_chunk->id = LIMA_DUMP_CHUNK_PROCESS_ID;
 363        pid_chunk->pid = ctx->pid;
 364        dt->num_chunks++;
 365
 366        buffer_chunk = (void *)(pid_chunk + 1) + pid_chunk->size;
 367        for (i = 0; i < task->num_bos; i++) {
 368                struct lima_bo *bo = task->bos[i];
 369                void *data;
 370
 371                memset(buffer_chunk, 0, sizeof(*buffer_chunk));
 372                buffer_chunk->id = LIMA_DUMP_CHUNK_BUFFER;
 373                buffer_chunk->va = lima_vm_get_va(task->vm, bo);
 374
 375                if (bo->heap_size) {
 376                        buffer_chunk->size = bo->heap_size;
 377
 378                        data = vmap(bo->base.pages, bo->heap_size >> PAGE_SHIFT,
 379                                    VM_MAP, pgprot_writecombine(PAGE_KERNEL));
 380                        if (!data) {
 381                                kvfree(et);
 382                                goto out;
 383                        }
 384
 385                        memcpy(buffer_chunk + 1, data, buffer_chunk->size);
 386
 387                        vunmap(data);
 388                } else {
 389                        buffer_chunk->size = lima_bo_size(bo);
 390
 391                        data = drm_gem_shmem_vmap(&bo->base.base);
 392                        if (IS_ERR_OR_NULL(data)) {
 393                                kvfree(et);
 394                                goto out;
 395                        }
 396
 397                        memcpy(buffer_chunk + 1, data, buffer_chunk->size);
 398
 399                        drm_gem_shmem_vunmap(&bo->base.base, data);
 400                }
 401
 402                buffer_chunk = (void *)(buffer_chunk + 1) + buffer_chunk->size;
 403                dt->num_chunks++;
 404        }
 405
 406        list_add(&et->list, &dev->error_task_list);
 407        dev->dump.size += et->size;
 408        dev->dump.num_tasks++;
 409
 410        dev_info(dev->dev, "save error task state success\n");
 411
 412out:
 413        mutex_unlock(&dev->error_task_list_lock);
 414}
 415
 416static void lima_sched_timedout_job(struct drm_sched_job *job)
 417{
 418        struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
 419        struct lima_sched_task *task = to_lima_task(job);
 420        struct lima_device *ldev = pipe->ldev;
 421
 422        if (!pipe->error)
 423                DRM_ERROR("lima job timeout\n");
 424
 425        drm_sched_stop(&pipe->base, &task->base);
 426
 427        drm_sched_increase_karma(&task->base);
 428
 429        lima_sched_build_error_task_list(task);
 430
 431        pipe->task_error(pipe);
 432
 433        if (pipe->bcast_mmu)
 434                lima_mmu_page_fault_resume(pipe->bcast_mmu);
 435        else {
 436                int i;
 437
 438                for (i = 0; i < pipe->num_mmu; i++)
 439                        lima_mmu_page_fault_resume(pipe->mmu[i]);
 440        }
 441
 442        lima_vm_put(pipe->current_vm);
 443        pipe->current_vm = NULL;
 444        pipe->current_task = NULL;
 445
 446        lima_pm_idle(ldev);
 447
 448        drm_sched_resubmit_jobs(&pipe->base);
 449        drm_sched_start(&pipe->base, true);
 450}
 451
 452static void lima_sched_free_job(struct drm_sched_job *job)
 453{
 454        struct lima_sched_task *task = to_lima_task(job);
 455        struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
 456        struct lima_vm *vm = task->vm;
 457        struct lima_bo **bos = task->bos;
 458        int i;
 459
 460        dma_fence_put(task->fence);
 461
 462        for (i = 0; i < task->num_bos; i++)
 463                lima_vm_bo_del(vm, bos[i]);
 464
 465        lima_sched_task_fini(task);
 466        kmem_cache_free(pipe->task_slab, task);
 467}
 468
 469static const struct drm_sched_backend_ops lima_sched_ops = {
 470        .dependency = lima_sched_dependency,
 471        .run_job = lima_sched_run_job,
 472        .timedout_job = lima_sched_timedout_job,
 473        .free_job = lima_sched_free_job,
 474};
 475
 476static void lima_sched_recover_work(struct work_struct *work)
 477{
 478        struct lima_sched_pipe *pipe =
 479                container_of(work, struct lima_sched_pipe, recover_work);
 480        int i;
 481
 482        for (i = 0; i < pipe->num_l2_cache; i++)
 483                lima_l2_cache_flush(pipe->l2_cache[i]);
 484
 485        if (pipe->bcast_mmu) {
 486                lima_mmu_flush_tlb(pipe->bcast_mmu);
 487        } else {
 488                for (i = 0; i < pipe->num_mmu; i++)
 489                        lima_mmu_flush_tlb(pipe->mmu[i]);
 490        }
 491
 492        if (pipe->task_recover(pipe))
 493                drm_sched_fault(&pipe->base);
 494}
 495
 496int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
 497{
 498        unsigned int timeout = lima_sched_timeout_ms > 0 ?
 499                               lima_sched_timeout_ms : 500;
 500
 501        pipe->fence_context = dma_fence_context_alloc(1);
 502        spin_lock_init(&pipe->fence_lock);
 503
 504        INIT_WORK(&pipe->recover_work, lima_sched_recover_work);
 505
 506        return drm_sched_init(&pipe->base, &lima_sched_ops, 1,
 507                              lima_job_hang_limit, msecs_to_jiffies(timeout),
 508                              name);
 509}
 510
 511void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
 512{
 513        drm_sched_fini(&pipe->base);
 514}
 515
 516void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
 517{
 518        struct lima_sched_task *task = pipe->current_task;
 519        struct lima_device *ldev = pipe->ldev;
 520
 521        if (pipe->error) {
 522                if (task && task->recoverable)
 523                        schedule_work(&pipe->recover_work);
 524                else
 525                        drm_sched_fault(&pipe->base);
 526        } else {
 527                pipe->task_fini(pipe);
 528                dma_fence_signal(task->fence);
 529
 530                lima_pm_idle(ldev);
 531        }
 532}
 533