linux/drivers/gpu/drm/msm/msm_gpu.c
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2013 Red Hat
   3 * Author: Rob Clark <robdclark@gmail.com>
   4 *
   5 * This program is free software; you can redistribute it and/or modify it
   6 * under the terms of the GNU General Public License version 2 as published by
   7 * the Free Software Foundation.
   8 *
   9 * This program is distributed in the hope that it will be useful, but WITHOUT
  10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  12 * more details.
  13 *
  14 * You should have received a copy of the GNU General Public License along with
  15 * this program.  If not, see <http://www.gnu.org/licenses/>.
  16 */
  17
  18#include "msm_gpu.h"
  19#include "msm_gem.h"
  20#include "msm_mmu.h"
  21#include "msm_fence.h"
  22
  23
  24/*
  25 * Power Management:
  26 */
  27
  28#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
  29#include <mach/board.h>
  30static void bs_init(struct msm_gpu *gpu)
  31{
  32        if (gpu->bus_scale_table) {
  33                gpu->bsc = msm_bus_scale_register_client(gpu->bus_scale_table);
  34                DBG("bus scale client: %08x", gpu->bsc);
  35        }
  36}
  37
  38static void bs_fini(struct msm_gpu *gpu)
  39{
  40        if (gpu->bsc) {
  41                msm_bus_scale_unregister_client(gpu->bsc);
  42                gpu->bsc = 0;
  43        }
  44}
  45
  46static void bs_set(struct msm_gpu *gpu, int idx)
  47{
  48        if (gpu->bsc) {
  49                DBG("set bus scaling: %d", idx);
  50                msm_bus_scale_client_update_request(gpu->bsc, idx);
  51        }
  52}
  53#else
  54static void bs_init(struct msm_gpu *gpu) {}
  55static void bs_fini(struct msm_gpu *gpu) {}
  56static void bs_set(struct msm_gpu *gpu, int idx) {}
  57#endif
  58
  59static int enable_pwrrail(struct msm_gpu *gpu)
  60{
  61        struct drm_device *dev = gpu->dev;
  62        int ret = 0;
  63
  64        if (gpu->gpu_reg) {
  65                ret = regulator_enable(gpu->gpu_reg);
  66                if (ret) {
  67                        dev_err(dev->dev, "failed to enable 'gpu_reg': %d\n", ret);
  68                        return ret;
  69                }
  70        }
  71
  72        if (gpu->gpu_cx) {
  73                ret = regulator_enable(gpu->gpu_cx);
  74                if (ret) {
  75                        dev_err(dev->dev, "failed to enable 'gpu_cx': %d\n", ret);
  76                        return ret;
  77                }
  78        }
  79
  80        return 0;
  81}
  82
  83static int disable_pwrrail(struct msm_gpu *gpu)
  84{
  85        if (gpu->gpu_cx)
  86                regulator_disable(gpu->gpu_cx);
  87        if (gpu->gpu_reg)
  88                regulator_disable(gpu->gpu_reg);
  89        return 0;
  90}
  91
  92static int enable_clk(struct msm_gpu *gpu)
  93{
  94        int i;
  95
  96        if (gpu->core_clk && gpu->fast_rate)
  97                clk_set_rate(gpu->core_clk, gpu->fast_rate);
  98
  99        /* Set the RBBM timer rate to 19.2Mhz */
 100        if (gpu->rbbmtimer_clk)
 101                clk_set_rate(gpu->rbbmtimer_clk, 19200000);
 102
 103        for (i = gpu->nr_clocks - 1; i >= 0; i--)
 104                if (gpu->grp_clks[i])
 105                        clk_prepare(gpu->grp_clks[i]);
 106
 107        for (i = gpu->nr_clocks - 1; i >= 0; i--)
 108                if (gpu->grp_clks[i])
 109                        clk_enable(gpu->grp_clks[i]);
 110
 111        return 0;
 112}
 113
 114static int disable_clk(struct msm_gpu *gpu)
 115{
 116        int i;
 117
 118        for (i = gpu->nr_clocks - 1; i >= 0; i--)
 119                if (gpu->grp_clks[i])
 120                        clk_disable(gpu->grp_clks[i]);
 121
 122        for (i = gpu->nr_clocks - 1; i >= 0; i--)
 123                if (gpu->grp_clks[i])
 124                        clk_unprepare(gpu->grp_clks[i]);
 125
 126        /*
 127         * Set the clock to a deliberately low rate. On older targets the clock
 128         * speed had to be non zero to avoid problems. On newer targets this
 129         * will be rounded down to zero anyway so it all works out.
 130         */
 131        if (gpu->core_clk)
 132                clk_set_rate(gpu->core_clk, 27000000);
 133
 134        if (gpu->rbbmtimer_clk)
 135                clk_set_rate(gpu->rbbmtimer_clk, 0);
 136
 137        return 0;
 138}
 139
 140static int enable_axi(struct msm_gpu *gpu)
 141{
 142        if (gpu->ebi1_clk)
 143                clk_prepare_enable(gpu->ebi1_clk);
 144        if (gpu->bus_freq)
 145                bs_set(gpu, gpu->bus_freq);
 146        return 0;
 147}
 148
 149static int disable_axi(struct msm_gpu *gpu)
 150{
 151        if (gpu->ebi1_clk)
 152                clk_disable_unprepare(gpu->ebi1_clk);
 153        if (gpu->bus_freq)
 154                bs_set(gpu, 0);
 155        return 0;
 156}
 157
 158int msm_gpu_pm_resume(struct msm_gpu *gpu)
 159{
 160        int ret;
 161
 162        DBG("%s", gpu->name);
 163
 164        ret = enable_pwrrail(gpu);
 165        if (ret)
 166                return ret;
 167
 168        ret = enable_clk(gpu);
 169        if (ret)
 170                return ret;
 171
 172        ret = enable_axi(gpu);
 173        if (ret)
 174                return ret;
 175
 176        gpu->needs_hw_init = true;
 177
 178        return 0;
 179}
 180
 181int msm_gpu_pm_suspend(struct msm_gpu *gpu)
 182{
 183        int ret;
 184
 185        DBG("%s", gpu->name);
 186
 187        ret = disable_axi(gpu);
 188        if (ret)
 189                return ret;
 190
 191        ret = disable_clk(gpu);
 192        if (ret)
 193                return ret;
 194
 195        ret = disable_pwrrail(gpu);
 196        if (ret)
 197                return ret;
 198
 199        return 0;
 200}
 201
 202int msm_gpu_hw_init(struct msm_gpu *gpu)
 203{
 204        int ret;
 205
 206        WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
 207
 208        if (!gpu->needs_hw_init)
 209                return 0;
 210
 211        disable_irq(gpu->irq);
 212        ret = gpu->funcs->hw_init(gpu);
 213        if (!ret)
 214                gpu->needs_hw_init = false;
 215        enable_irq(gpu->irq);
 216
 217        return ret;
 218}
 219
 220/*
 221 * Hangcheck detection for locked gpu:
 222 */
 223
 224static void retire_submits(struct msm_gpu *gpu);
 225
 226static void recover_worker(struct work_struct *work)
 227{
 228        struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
 229        struct drm_device *dev = gpu->dev;
 230        struct msm_gem_submit *submit;
 231        uint32_t fence = gpu->funcs->last_fence(gpu);
 232
 233        msm_update_fence(gpu->fctx, fence + 1);
 234
 235        mutex_lock(&dev->struct_mutex);
 236
 237        dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
 238        list_for_each_entry(submit, &gpu->submit_list, node) {
 239                if (submit->fence->seqno == (fence + 1)) {
 240                        struct task_struct *task;
 241
 242                        rcu_read_lock();
 243                        task = pid_task(submit->pid, PIDTYPE_PID);
 244                        if (task) {
 245                                dev_err(dev->dev, "%s: offending task: %s\n",
 246                                                gpu->name, task->comm);
 247                        }
 248                        rcu_read_unlock();
 249                        break;
 250                }
 251        }
 252
 253        if (msm_gpu_active(gpu)) {
 254                /* retire completed submits, plus the one that hung: */
 255                retire_submits(gpu);
 256
 257                pm_runtime_get_sync(&gpu->pdev->dev);
 258                gpu->funcs->recover(gpu);
 259                pm_runtime_put_sync(&gpu->pdev->dev);
 260
 261                /* replay the remaining submits after the one that hung: */
 262                list_for_each_entry(submit, &gpu->submit_list, node) {
 263                        gpu->funcs->submit(gpu, submit, NULL);
 264                }
 265        }
 266
 267        mutex_unlock(&dev->struct_mutex);
 268
 269        msm_gpu_retire(gpu);
 270}
 271
 272static void hangcheck_timer_reset(struct msm_gpu *gpu)
 273{
 274        DBG("%s", gpu->name);
 275        mod_timer(&gpu->hangcheck_timer,
 276                        round_jiffies_up(jiffies + DRM_MSM_HANGCHECK_JIFFIES));
 277}
 278
 279static void hangcheck_handler(unsigned long data)
 280{
 281        struct msm_gpu *gpu = (struct msm_gpu *)data;
 282        struct drm_device *dev = gpu->dev;
 283        struct msm_drm_private *priv = dev->dev_private;
 284        uint32_t fence = gpu->funcs->last_fence(gpu);
 285
 286        if (fence != gpu->hangcheck_fence) {
 287                /* some progress has been made.. ya! */
 288                gpu->hangcheck_fence = fence;
 289        } else if (fence < gpu->fctx->last_fence) {
 290                /* no progress and not done.. hung! */
 291                gpu->hangcheck_fence = fence;
 292                dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
 293                                gpu->name);
 294                dev_err(dev->dev, "%s:     completed fence: %u\n",
 295                                gpu->name, fence);
 296                dev_err(dev->dev, "%s:     submitted fence: %u\n",
 297                                gpu->name, gpu->fctx->last_fence);
 298                queue_work(priv->wq, &gpu->recover_work);
 299        }
 300
 301        /* if still more pending work, reset the hangcheck timer: */
 302        if (gpu->fctx->last_fence > gpu->hangcheck_fence)
 303                hangcheck_timer_reset(gpu);
 304
 305        /* workaround for missing irq: */
 306        queue_work(priv->wq, &gpu->retire_work);
 307}
 308
 309/*
 310 * Performance Counters:
 311 */
 312
 313/* called under perf_lock */
 314static int update_hw_cntrs(struct msm_gpu *gpu, uint32_t ncntrs, uint32_t *cntrs)
 315{
 316        uint32_t current_cntrs[ARRAY_SIZE(gpu->last_cntrs)];
 317        int i, n = min(ncntrs, gpu->num_perfcntrs);
 318
 319        /* read current values: */
 320        for (i = 0; i < gpu->num_perfcntrs; i++)
 321                current_cntrs[i] = gpu_read(gpu, gpu->perfcntrs[i].sample_reg);
 322
 323        /* update cntrs: */
 324        for (i = 0; i < n; i++)
 325                cntrs[i] = current_cntrs[i] - gpu->last_cntrs[i];
 326
 327        /* save current values: */
 328        for (i = 0; i < gpu->num_perfcntrs; i++)
 329                gpu->last_cntrs[i] = current_cntrs[i];
 330
 331        return n;
 332}
 333
 334static void update_sw_cntrs(struct msm_gpu *gpu)
 335{
 336        ktime_t time;
 337        uint32_t elapsed;
 338        unsigned long flags;
 339
 340        spin_lock_irqsave(&gpu->perf_lock, flags);
 341        if (!gpu->perfcntr_active)
 342                goto out;
 343
 344        time = ktime_get();
 345        elapsed = ktime_to_us(ktime_sub(time, gpu->last_sample.time));
 346
 347        gpu->totaltime += elapsed;
 348        if (gpu->last_sample.active)
 349                gpu->activetime += elapsed;
 350
 351        gpu->last_sample.active = msm_gpu_active(gpu);
 352        gpu->last_sample.time = time;
 353
 354out:
 355        spin_unlock_irqrestore(&gpu->perf_lock, flags);
 356}
 357
 358void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
 359{
 360        unsigned long flags;
 361
 362        pm_runtime_get_sync(&gpu->pdev->dev);
 363
 364        spin_lock_irqsave(&gpu->perf_lock, flags);
 365        /* we could dynamically enable/disable perfcntr registers too.. */
 366        gpu->last_sample.active = msm_gpu_active(gpu);
 367        gpu->last_sample.time = ktime_get();
 368        gpu->activetime = gpu->totaltime = 0;
 369        gpu->perfcntr_active = true;
 370        update_hw_cntrs(gpu, 0, NULL);
 371        spin_unlock_irqrestore(&gpu->perf_lock, flags);
 372}
 373
 374void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
 375{
 376        gpu->perfcntr_active = false;
 377        pm_runtime_put_sync(&gpu->pdev->dev);
 378}
 379
 380/* returns -errno or # of cntrs sampled */
 381int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
 382                uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs)
 383{
 384        unsigned long flags;
 385        int ret;
 386
 387        spin_lock_irqsave(&gpu->perf_lock, flags);
 388
 389        if (!gpu->perfcntr_active) {
 390                ret = -EINVAL;
 391                goto out;
 392        }
 393
 394        *activetime = gpu->activetime;
 395        *totaltime = gpu->totaltime;
 396
 397        gpu->activetime = gpu->totaltime = 0;
 398
 399        ret = update_hw_cntrs(gpu, ncntrs, cntrs);
 400
 401out:
 402        spin_unlock_irqrestore(&gpu->perf_lock, flags);
 403
 404        return ret;
 405}
 406
 407/*
 408 * Cmdstream submission/retirement:
 409 */
 410
 411static void retire_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
 412{
 413        int i;
 414
 415        for (i = 0; i < submit->nr_bos; i++) {
 416                struct msm_gem_object *msm_obj = submit->bos[i].obj;
 417                /* move to inactive: */
 418                msm_gem_move_to_inactive(&msm_obj->base);
 419                msm_gem_put_iova(&msm_obj->base, gpu->aspace);
 420                drm_gem_object_unreference(&msm_obj->base);
 421        }
 422
 423        pm_runtime_mark_last_busy(&gpu->pdev->dev);
 424        pm_runtime_put_autosuspend(&gpu->pdev->dev);
 425        msm_gem_submit_free(submit);
 426}
 427
 428static void retire_submits(struct msm_gpu *gpu)
 429{
 430        struct drm_device *dev = gpu->dev;
 431
 432        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 433
 434        while (!list_empty(&gpu->submit_list)) {
 435                struct msm_gem_submit *submit;
 436
 437                submit = list_first_entry(&gpu->submit_list,
 438                                struct msm_gem_submit, node);
 439
 440                if (dma_fence_is_signaled(submit->fence)) {
 441                        retire_submit(gpu, submit);
 442                } else {
 443                        break;
 444                }
 445        }
 446}
 447
 448static void retire_worker(struct work_struct *work)
 449{
 450        struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
 451        struct drm_device *dev = gpu->dev;
 452        uint32_t fence = gpu->funcs->last_fence(gpu);
 453
 454        msm_update_fence(gpu->fctx, fence);
 455
 456        mutex_lock(&dev->struct_mutex);
 457        retire_submits(gpu);
 458        mutex_unlock(&dev->struct_mutex);
 459}
 460
 461/* call from irq handler to schedule work to retire bo's */
 462void msm_gpu_retire(struct msm_gpu *gpu)
 463{
 464        struct msm_drm_private *priv = gpu->dev->dev_private;
 465        queue_work(priv->wq, &gpu->retire_work);
 466        update_sw_cntrs(gpu);
 467}
 468
 469/* add bo's to gpu's ring, and kick gpu: */
 470void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
 471                struct msm_file_private *ctx)
 472{
 473        struct drm_device *dev = gpu->dev;
 474        struct msm_drm_private *priv = dev->dev_private;
 475        int i;
 476
 477        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
 478
 479        pm_runtime_get_sync(&gpu->pdev->dev);
 480
 481        msm_gpu_hw_init(gpu);
 482
 483        list_add_tail(&submit->node, &gpu->submit_list);
 484
 485        msm_rd_dump_submit(submit);
 486
 487        update_sw_cntrs(gpu);
 488
 489        for (i = 0; i < submit->nr_bos; i++) {
 490                struct msm_gem_object *msm_obj = submit->bos[i].obj;
 491                uint64_t iova;
 492
 493                /* can't happen yet.. but when we add 2d support we'll have
 494                 * to deal w/ cross-ring synchronization:
 495                 */
 496                WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
 497
 498                /* submit takes a reference to the bo and iova until retired: */
 499                drm_gem_object_reference(&msm_obj->base);
 500                msm_gem_get_iova(&msm_obj->base,
 501                                submit->gpu->aspace, &iova);
 502
 503                if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
 504                        msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
 505                else if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
 506                        msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
 507        }
 508
 509        gpu->funcs->submit(gpu, submit, ctx);
 510        priv->lastctx = ctx;
 511
 512        hangcheck_timer_reset(gpu);
 513}
 514
 515/*
 516 * Init/Cleanup:
 517 */
 518
 519static irqreturn_t irq_handler(int irq, void *data)
 520{
 521        struct msm_gpu *gpu = data;
 522        return gpu->funcs->irq(gpu);
 523}
 524
 525static struct clk *get_clock(struct device *dev, const char *name)
 526{
 527        struct clk *clk = devm_clk_get(dev, name);
 528
 529        return IS_ERR(clk) ? NULL : clk;
 530}
 531
 532static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
 533{
 534        struct device *dev = &pdev->dev;
 535        struct property *prop;
 536        const char *name;
 537        int i = 0;
 538
 539        gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
 540        if (gpu->nr_clocks < 1) {
 541                gpu->nr_clocks = 0;
 542                return 0;
 543        }
 544
 545        gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
 546                GFP_KERNEL);
 547        if (!gpu->grp_clks)
 548                return -ENOMEM;
 549
 550        of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
 551                gpu->grp_clks[i] = get_clock(dev, name);
 552
 553                /* Remember the key clocks that we need to control later */
 554                if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
 555                        gpu->core_clk = gpu->grp_clks[i];
 556                else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
 557                        gpu->rbbmtimer_clk = gpu->grp_clks[i];
 558
 559                ++i;
 560        }
 561
 562        return 0;
 563}
 564
 565int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
 566                struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
 567                const char *name, struct msm_gpu_config *config)
 568{
 569        struct iommu_domain *iommu;
 570        int ret;
 571
 572        if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
 573                gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
 574
 575        gpu->dev = drm;
 576        gpu->funcs = funcs;
 577        gpu->name = name;
 578        gpu->fctx = msm_fence_context_alloc(drm, name);
 579        if (IS_ERR(gpu->fctx)) {
 580                ret = PTR_ERR(gpu->fctx);
 581                gpu->fctx = NULL;
 582                goto fail;
 583        }
 584
 585        INIT_LIST_HEAD(&gpu->active_list);
 586        INIT_WORK(&gpu->retire_work, retire_worker);
 587        INIT_WORK(&gpu->recover_work, recover_worker);
 588
 589        INIT_LIST_HEAD(&gpu->submit_list);
 590
 591        setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
 592                        (unsigned long)gpu);
 593
 594        spin_lock_init(&gpu->perf_lock);
 595
 596
 597        /* Map registers: */
 598        gpu->mmio = msm_ioremap(pdev, config->ioname, name);
 599        if (IS_ERR(gpu->mmio)) {
 600                ret = PTR_ERR(gpu->mmio);
 601                goto fail;
 602        }
 603
 604        /* Get Interrupt: */
 605        gpu->irq = platform_get_irq_byname(pdev, config->irqname);
 606        if (gpu->irq < 0) {
 607                ret = gpu->irq;
 608                dev_err(drm->dev, "failed to get irq: %d\n", ret);
 609                goto fail;
 610        }
 611
 612        ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
 613                        IRQF_TRIGGER_HIGH, gpu->name, gpu);
 614        if (ret) {
 615                dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
 616                goto fail;
 617        }
 618
 619        ret = get_clocks(pdev, gpu);
 620        if (ret)
 621                goto fail;
 622
 623        gpu->ebi1_clk = msm_clk_get(pdev, "bus");
 624        DBG("ebi1_clk: %p", gpu->ebi1_clk);
 625        if (IS_ERR(gpu->ebi1_clk))
 626                gpu->ebi1_clk = NULL;
 627
 628        /* Acquire regulators: */
 629        gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
 630        DBG("gpu_reg: %p", gpu->gpu_reg);
 631        if (IS_ERR(gpu->gpu_reg))
 632                gpu->gpu_reg = NULL;
 633
 634        gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
 635        DBG("gpu_cx: %p", gpu->gpu_cx);
 636        if (IS_ERR(gpu->gpu_cx))
 637                gpu->gpu_cx = NULL;
 638
 639        /* Setup IOMMU.. eventually we will (I think) do this once per context
 640         * and have separate page tables per context.  For now, to keep things
 641         * simple and to get something working, just use a single address space:
 642         */
 643        iommu = iommu_domain_alloc(&platform_bus_type);
 644        if (iommu) {
 645                iommu->geometry.aperture_start = config->va_start;
 646                iommu->geometry.aperture_end = config->va_end;
 647
 648                dev_info(drm->dev, "%s: using IOMMU\n", name);
 649                gpu->aspace = msm_gem_address_space_create(&pdev->dev,
 650                                iommu, "gpu");
 651                if (IS_ERR(gpu->aspace)) {
 652                        ret = PTR_ERR(gpu->aspace);
 653                        dev_err(drm->dev, "failed to init iommu: %d\n", ret);
 654                        gpu->aspace = NULL;
 655                        iommu_domain_free(iommu);
 656                        goto fail;
 657                }
 658
 659        } else {
 660                dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
 661        }
 662
 663        /* Create ringbuffer: */
 664        gpu->rb = msm_ringbuffer_new(gpu, config->ringsz);
 665        if (IS_ERR(gpu->rb)) {
 666                ret = PTR_ERR(gpu->rb);
 667                gpu->rb = NULL;
 668                dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
 669                goto fail;
 670        }
 671
 672        gpu->pdev = pdev;
 673        platform_set_drvdata(pdev, gpu);
 674
 675        bs_init(gpu);
 676
 677        return 0;
 678
 679fail:
 680        return ret;
 681}
 682
 683void msm_gpu_cleanup(struct msm_gpu *gpu)
 684{
 685        DBG("%s", gpu->name);
 686
 687        WARN_ON(!list_empty(&gpu->active_list));
 688
 689        bs_fini(gpu);
 690
 691        if (gpu->rb) {
 692                if (gpu->rb_iova)
 693                        msm_gem_put_iova(gpu->rb->bo, gpu->aspace);
 694                msm_ringbuffer_destroy(gpu->rb);
 695        }
 696
 697        if (gpu->fctx)
 698                msm_fence_context_free(gpu->fctx);
 699}
 700