linux/drivers/gpu/host1x/job.c
<<
>>
Prefs
   1/*
   2 * Tegra host1x Job
   3 *
   4 * Copyright (c) 2010-2015, NVIDIA Corporation.
   5 *
   6 * This program is free software; you can redistribute it and/or modify it
   7 * under the terms and conditions of the GNU General Public License,
   8 * version 2, as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope it will be useful, but WITHOUT
  11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
  13 * more details.
  14 *
  15 * You should have received a copy of the GNU General Public License
  16 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  17 */
  18
  19#include <linux/dma-mapping.h>
  20#include <linux/err.h>
  21#include <linux/host1x.h>
  22#include <linux/kref.h>
  23#include <linux/module.h>
  24#include <linux/scatterlist.h>
  25#include <linux/slab.h>
  26#include <linux/vmalloc.h>
  27#include <trace/events/host1x.h>
  28
  29#include "channel.h"
  30#include "dev.h"
  31#include "job.h"
  32#include "syncpt.h"
  33
  34#define HOST1X_WAIT_SYNCPT_OFFSET 0x8
  35
  36struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
  37                                    u32 num_cmdbufs, u32 num_relocs,
  38                                    u32 num_waitchks)
  39{
  40        struct host1x_job *job = NULL;
  41        unsigned int num_unpins = num_cmdbufs + num_relocs;
  42        u64 total;
  43        void *mem;
  44
  45        /* Check that we're not going to overflow */
  46        total = sizeof(struct host1x_job) +
  47                (u64)num_relocs * sizeof(struct host1x_reloc) +
  48                (u64)num_unpins * sizeof(struct host1x_job_unpin_data) +
  49                (u64)num_waitchks * sizeof(struct host1x_waitchk) +
  50                (u64)num_cmdbufs * sizeof(struct host1x_job_gather) +
  51                (u64)num_unpins * sizeof(dma_addr_t) +
  52                (u64)num_unpins * sizeof(u32 *);
  53        if (total > ULONG_MAX)
  54                return NULL;
  55
  56        mem = job = kzalloc(total, GFP_KERNEL);
  57        if (!job)
  58                return NULL;
  59
  60        kref_init(&job->ref);
  61        job->channel = ch;
  62
  63        /* Redistribute memory to the structs  */
  64        mem += sizeof(struct host1x_job);
  65        job->relocarray = num_relocs ? mem : NULL;
  66        mem += num_relocs * sizeof(struct host1x_reloc);
  67        job->unpins = num_unpins ? mem : NULL;
  68        mem += num_unpins * sizeof(struct host1x_job_unpin_data);
  69        job->waitchk = num_waitchks ? mem : NULL;
  70        mem += num_waitchks * sizeof(struct host1x_waitchk);
  71        job->gathers = num_cmdbufs ? mem : NULL;
  72        mem += num_cmdbufs * sizeof(struct host1x_job_gather);
  73        job->addr_phys = num_unpins ? mem : NULL;
  74
  75        job->reloc_addr_phys = job->addr_phys;
  76        job->gather_addr_phys = &job->addr_phys[num_relocs];
  77
  78        return job;
  79}
  80EXPORT_SYMBOL(host1x_job_alloc);
  81
  82struct host1x_job *host1x_job_get(struct host1x_job *job)
  83{
  84        kref_get(&job->ref);
  85        return job;
  86}
  87EXPORT_SYMBOL(host1x_job_get);
  88
  89static void job_free(struct kref *ref)
  90{
  91        struct host1x_job *job = container_of(ref, struct host1x_job, ref);
  92
  93        kfree(job);
  94}
  95
  96void host1x_job_put(struct host1x_job *job)
  97{
  98        kref_put(&job->ref, job_free);
  99}
 100EXPORT_SYMBOL(host1x_job_put);
 101
 102void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
 103                           u32 words, u32 offset)
 104{
 105        struct host1x_job_gather *cur_gather = &job->gathers[job->num_gathers];
 106
 107        cur_gather->words = words;
 108        cur_gather->bo = bo;
 109        cur_gather->offset = offset;
 110        job->num_gathers++;
 111}
 112EXPORT_SYMBOL(host1x_job_add_gather);
 113
 114/*
 115 * NULL an already satisfied WAIT_SYNCPT host method, by patching its
 116 * args in the command stream. The method data is changed to reference
 117 * a reserved (never given out or incr) HOST1X_SYNCPT_RESERVED syncpt
 118 * with a matching threshold value of 0, so is guaranteed to be popped
 119 * by the host HW.
 120 */
 121static void host1x_syncpt_patch_offset(struct host1x_syncpt *sp,
 122                                       struct host1x_bo *h, u32 offset)
 123{
 124        void *patch_addr = NULL;
 125
 126        /* patch the wait */
 127        patch_addr = host1x_bo_kmap(h, offset >> PAGE_SHIFT);
 128        if (patch_addr) {
 129                host1x_syncpt_patch_wait(sp,
 130                                         patch_addr + (offset & ~PAGE_MASK));
 131                host1x_bo_kunmap(h, offset >> PAGE_SHIFT, patch_addr);
 132        } else
 133                pr_err("Could not map cmdbuf for wait check\n");
 134}
 135
 136/*
 137 * Check driver supplied waitchk structs for syncpt thresholds
 138 * that have already been satisfied and NULL the comparison (to
 139 * avoid a wrap condition in the HW).
 140 */
 141static int do_waitchks(struct host1x_job *job, struct host1x *host,
 142                       struct host1x_job_gather *g)
 143{
 144        struct host1x_bo *patch = g->bo;
 145        int i;
 146
 147        /* compare syncpt vs wait threshold */
 148        for (i = 0; i < job->num_waitchk; i++) {
 149                struct host1x_waitchk *wait = &job->waitchk[i];
 150                struct host1x_syncpt *sp =
 151                        host1x_syncpt_get(host, wait->syncpt_id);
 152
 153                /* validate syncpt id */
 154                if (wait->syncpt_id > host1x_syncpt_nb_pts(host))
 155                        continue;
 156
 157                /* skip all other gathers */
 158                if (patch != wait->bo)
 159                        continue;
 160
 161                trace_host1x_syncpt_wait_check(wait->bo, wait->offset,
 162                                               wait->syncpt_id, wait->thresh,
 163                                               host1x_syncpt_read_min(sp));
 164
 165                if (host1x_syncpt_is_expired(sp, wait->thresh)) {
 166                        dev_dbg(host->dev,
 167                                "drop WAIT id %u (%s) thresh 0x%x, min 0x%x\n",
 168                                wait->syncpt_id, sp->name, wait->thresh,
 169                                host1x_syncpt_read_min(sp));
 170
 171                        host1x_syncpt_patch_offset(sp, patch,
 172                                                   g->offset + wait->offset);
 173                }
 174
 175                wait->bo = NULL;
 176        }
 177
 178        return 0;
 179}
 180
 181static unsigned int pin_job(struct host1x *host, struct host1x_job *job)
 182{
 183        unsigned int i;
 184        int err;
 185
 186        job->num_unpins = 0;
 187
 188        for (i = 0; i < job->num_relocs; i++) {
 189                struct host1x_reloc *reloc = &job->relocarray[i];
 190                struct sg_table *sgt;
 191                dma_addr_t phys_addr;
 192
 193                reloc->target.bo = host1x_bo_get(reloc->target.bo);
 194                if (!reloc->target.bo) {
 195                        err = -EINVAL;
 196                        goto unpin;
 197                }
 198
 199                phys_addr = host1x_bo_pin(reloc->target.bo, &sgt);
 200                if (!phys_addr) {
 201                        err = -EINVAL;
 202                        goto unpin;
 203                }
 204
 205                job->addr_phys[job->num_unpins] = phys_addr;
 206                job->unpins[job->num_unpins].bo = reloc->target.bo;
 207                job->unpins[job->num_unpins].sgt = sgt;
 208                job->num_unpins++;
 209        }
 210
 211        for (i = 0; i < job->num_gathers; i++) {
 212                struct host1x_job_gather *g = &job->gathers[i];
 213                size_t gather_size = 0;
 214                struct scatterlist *sg;
 215                struct sg_table *sgt;
 216                dma_addr_t phys_addr;
 217                unsigned long shift;
 218                struct iova *alloc;
 219                unsigned int j;
 220
 221                g->bo = host1x_bo_get(g->bo);
 222                if (!g->bo) {
 223                        err = -EINVAL;
 224                        goto unpin;
 225                }
 226
 227                phys_addr = host1x_bo_pin(g->bo, &sgt);
 228                if (!phys_addr) {
 229                        err = -EINVAL;
 230                        goto unpin;
 231                }
 232
 233                if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
 234                        for_each_sg(sgt->sgl, sg, sgt->nents, j)
 235                                gather_size += sg->length;
 236                        gather_size = iova_align(&host->iova, gather_size);
 237
 238                        shift = iova_shift(&host->iova);
 239                        alloc = alloc_iova(&host->iova, gather_size >> shift,
 240                                           host->iova_end >> shift, true);
 241                        if (!alloc) {
 242                                err = -ENOMEM;
 243                                goto unpin;
 244                        }
 245
 246                        err = iommu_map_sg(host->domain,
 247                                        iova_dma_addr(&host->iova, alloc),
 248                                        sgt->sgl, sgt->nents, IOMMU_READ);
 249                        if (err == 0) {
 250                                __free_iova(&host->iova, alloc);
 251                                err = -EINVAL;
 252                                goto unpin;
 253                        }
 254
 255                        job->addr_phys[job->num_unpins] =
 256                                iova_dma_addr(&host->iova, alloc);
 257                        job->unpins[job->num_unpins].size = gather_size;
 258                } else {
 259                        job->addr_phys[job->num_unpins] = phys_addr;
 260                }
 261
 262                job->gather_addr_phys[i] = job->addr_phys[job->num_unpins];
 263
 264                job->unpins[job->num_unpins].bo = g->bo;
 265                job->unpins[job->num_unpins].sgt = sgt;
 266                job->num_unpins++;
 267        }
 268
 269        return 0;
 270
 271unpin:
 272        host1x_job_unpin(job);
 273        return err;
 274}
 275
 276static int do_relocs(struct host1x_job *job, struct host1x_job_gather *g)
 277{
 278        int i = 0;
 279        u32 last_page = ~0;
 280        void *cmdbuf_page_addr = NULL;
 281        struct host1x_bo *cmdbuf = g->bo;
 282
 283        /* pin & patch the relocs for one gather */
 284        for (i = 0; i < job->num_relocs; i++) {
 285                struct host1x_reloc *reloc = &job->relocarray[i];
 286                u32 reloc_addr = (job->reloc_addr_phys[i] +
 287                                  reloc->target.offset) >> reloc->shift;
 288                u32 *target;
 289
 290                /* skip all other gathers */
 291                if (cmdbuf != reloc->cmdbuf.bo)
 292                        continue;
 293
 294                if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
 295                        target = (u32 *)job->gather_copy_mapped +
 296                                        reloc->cmdbuf.offset / sizeof(u32) +
 297                                                g->offset / sizeof(u32);
 298                        goto patch_reloc;
 299                }
 300
 301                if (last_page != reloc->cmdbuf.offset >> PAGE_SHIFT) {
 302                        if (cmdbuf_page_addr)
 303                                host1x_bo_kunmap(cmdbuf, last_page,
 304                                                 cmdbuf_page_addr);
 305
 306                        cmdbuf_page_addr = host1x_bo_kmap(cmdbuf,
 307                                        reloc->cmdbuf.offset >> PAGE_SHIFT);
 308                        last_page = reloc->cmdbuf.offset >> PAGE_SHIFT;
 309
 310                        if (unlikely(!cmdbuf_page_addr)) {
 311                                pr_err("Could not map cmdbuf for relocation\n");
 312                                return -ENOMEM;
 313                        }
 314                }
 315
 316                target = cmdbuf_page_addr + (reloc->cmdbuf.offset & ~PAGE_MASK);
 317patch_reloc:
 318                *target = reloc_addr;
 319        }
 320
 321        if (cmdbuf_page_addr)
 322                host1x_bo_kunmap(cmdbuf, last_page, cmdbuf_page_addr);
 323
 324        return 0;
 325}
 326
 327static bool check_reloc(struct host1x_reloc *reloc, struct host1x_bo *cmdbuf,
 328                        unsigned int offset)
 329{
 330        offset *= sizeof(u32);
 331
 332        if (reloc->cmdbuf.bo != cmdbuf || reloc->cmdbuf.offset != offset)
 333                return false;
 334
 335        /* relocation shift value validation isn't implemented yet */
 336        if (reloc->shift)
 337                return false;
 338
 339        return true;
 340}
 341
 342static bool check_wait(struct host1x_waitchk *wait, struct host1x_bo *cmdbuf,
 343                       unsigned int offset)
 344{
 345        offset *= sizeof(u32);
 346
 347        if (wait->bo != cmdbuf || wait->offset != offset)
 348                return false;
 349
 350        return true;
 351}
 352
 353struct host1x_firewall {
 354        struct host1x_job *job;
 355        struct device *dev;
 356
 357        unsigned int num_relocs;
 358        struct host1x_reloc *reloc;
 359
 360        unsigned int num_waitchks;
 361        struct host1x_waitchk *waitchk;
 362
 363        struct host1x_bo *cmdbuf;
 364        unsigned int offset;
 365
 366        u32 words;
 367        u32 class;
 368        u32 reg;
 369        u32 mask;
 370        u32 count;
 371};
 372
 373static int check_register(struct host1x_firewall *fw, unsigned long offset)
 374{
 375        if (!fw->job->is_addr_reg)
 376                return 0;
 377
 378        if (fw->job->is_addr_reg(fw->dev, fw->class, offset)) {
 379                if (!fw->num_relocs)
 380                        return -EINVAL;
 381
 382                if (!check_reloc(fw->reloc, fw->cmdbuf, fw->offset))
 383                        return -EINVAL;
 384
 385                fw->num_relocs--;
 386                fw->reloc++;
 387        }
 388
 389        if (offset == HOST1X_WAIT_SYNCPT_OFFSET) {
 390                if (fw->class != HOST1X_CLASS_HOST1X)
 391                        return -EINVAL;
 392
 393                if (!fw->num_waitchks)
 394                        return -EINVAL;
 395
 396                if (!check_wait(fw->waitchk, fw->cmdbuf, fw->offset))
 397                        return -EINVAL;
 398
 399                fw->num_waitchks--;
 400                fw->waitchk++;
 401        }
 402
 403        return 0;
 404}
 405
 406static int check_class(struct host1x_firewall *fw, u32 class)
 407{
 408        if (!fw->job->is_valid_class) {
 409                if (fw->class != class)
 410                        return -EINVAL;
 411        } else {
 412                if (!fw->job->is_valid_class(fw->class))
 413                        return -EINVAL;
 414        }
 415
 416        return 0;
 417}
 418
 419static int check_mask(struct host1x_firewall *fw)
 420{
 421        u32 mask = fw->mask;
 422        u32 reg = fw->reg;
 423        int ret;
 424
 425        while (mask) {
 426                if (fw->words == 0)
 427                        return -EINVAL;
 428
 429                if (mask & 1) {
 430                        ret = check_register(fw, reg);
 431                        if (ret < 0)
 432                                return ret;
 433
 434                        fw->words--;
 435                        fw->offset++;
 436                }
 437                mask >>= 1;
 438                reg++;
 439        }
 440
 441        return 0;
 442}
 443
 444static int check_incr(struct host1x_firewall *fw)
 445{
 446        u32 count = fw->count;
 447        u32 reg = fw->reg;
 448        int ret;
 449
 450        while (count) {
 451                if (fw->words == 0)
 452                        return -EINVAL;
 453
 454                ret = check_register(fw, reg);
 455                if (ret < 0)
 456                        return ret;
 457
 458                reg++;
 459                fw->words--;
 460                fw->offset++;
 461                count--;
 462        }
 463
 464        return 0;
 465}
 466
 467static int check_nonincr(struct host1x_firewall *fw)
 468{
 469        u32 count = fw->count;
 470        int ret;
 471
 472        while (count) {
 473                if (fw->words == 0)
 474                        return -EINVAL;
 475
 476                ret = check_register(fw, fw->reg);
 477                if (ret < 0)
 478                        return ret;
 479
 480                fw->words--;
 481                fw->offset++;
 482                count--;
 483        }
 484
 485        return 0;
 486}
 487
 488static int validate(struct host1x_firewall *fw, struct host1x_job_gather *g)
 489{
 490        u32 *cmdbuf_base = (u32 *)fw->job->gather_copy_mapped +
 491                (g->offset / sizeof(u32));
 492        u32 job_class = fw->class;
 493        int err = 0;
 494
 495        fw->words = g->words;
 496        fw->cmdbuf = g->bo;
 497        fw->offset = 0;
 498
 499        while (fw->words && !err) {
 500                u32 word = cmdbuf_base[fw->offset];
 501                u32 opcode = (word & 0xf0000000) >> 28;
 502
 503                fw->mask = 0;
 504                fw->reg = 0;
 505                fw->count = 0;
 506                fw->words--;
 507                fw->offset++;
 508
 509                switch (opcode) {
 510                case 0:
 511                        fw->class = word >> 6 & 0x3ff;
 512                        fw->mask = word & 0x3f;
 513                        fw->reg = word >> 16 & 0xfff;
 514                        err = check_class(fw, job_class);
 515                        if (!err)
 516                                err = check_mask(fw);
 517                        if (err)
 518                                goto out;
 519                        break;
 520                case 1:
 521                        fw->reg = word >> 16 & 0xfff;
 522                        fw->count = word & 0xffff;
 523                        err = check_incr(fw);
 524                        if (err)
 525                                goto out;
 526                        break;
 527
 528                case 2:
 529                        fw->reg = word >> 16 & 0xfff;
 530                        fw->count = word & 0xffff;
 531                        err = check_nonincr(fw);
 532                        if (err)
 533                                goto out;
 534                        break;
 535
 536                case 3:
 537                        fw->mask = word & 0xffff;
 538                        fw->reg = word >> 16 & 0xfff;
 539                        err = check_mask(fw);
 540                        if (err)
 541                                goto out;
 542                        break;
 543                case 4:
 544                case 14:
 545                        break;
 546                default:
 547                        err = -EINVAL;
 548                        break;
 549                }
 550        }
 551
 552out:
 553        return err;
 554}
 555
 556static inline int copy_gathers(struct host1x_job *job, struct device *dev)
 557{
 558        struct host1x_firewall fw;
 559        size_t size = 0;
 560        size_t offset = 0;
 561        int i;
 562
 563        fw.job = job;
 564        fw.dev = dev;
 565        fw.reloc = job->relocarray;
 566        fw.num_relocs = job->num_relocs;
 567        fw.waitchk = job->waitchk;
 568        fw.num_waitchks = job->num_waitchk;
 569        fw.class = job->class;
 570
 571        for (i = 0; i < job->num_gathers; i++) {
 572                struct host1x_job_gather *g = &job->gathers[i];
 573
 574                size += g->words * sizeof(u32);
 575        }
 576
 577        /*
 578         * Try a non-blocking allocation from a higher priority pools first,
 579         * as awaiting for the allocation here is a major performance hit.
 580         */
 581        job->gather_copy_mapped = dma_alloc_wc(dev, size, &job->gather_copy,
 582                                               GFP_NOWAIT);
 583
 584        /* the higher priority allocation failed, try the generic-blocking */
 585        if (!job->gather_copy_mapped)
 586                job->gather_copy_mapped = dma_alloc_wc(dev, size,
 587                                                       &job->gather_copy,
 588                                                       GFP_KERNEL);
 589        if (!job->gather_copy_mapped)
 590                return -ENOMEM;
 591
 592        job->gather_copy_size = size;
 593
 594        for (i = 0; i < job->num_gathers; i++) {
 595                struct host1x_job_gather *g = &job->gathers[i];
 596                void *gather;
 597
 598                /* Copy the gather */
 599                gather = host1x_bo_mmap(g->bo);
 600                memcpy(job->gather_copy_mapped + offset, gather + g->offset,
 601                       g->words * sizeof(u32));
 602                host1x_bo_munmap(g->bo, gather);
 603
 604                /* Store the location in the buffer */
 605                g->base = job->gather_copy;
 606                g->offset = offset;
 607
 608                /* Validate the job */
 609                if (validate(&fw, g))
 610                        return -EINVAL;
 611
 612                offset += g->words * sizeof(u32);
 613        }
 614
 615        /* No relocs and waitchks should remain at this point */
 616        if (fw.num_relocs || fw.num_waitchks)
 617                return -EINVAL;
 618
 619        return 0;
 620}
 621
 622int host1x_job_pin(struct host1x_job *job, struct device *dev)
 623{
 624        int err;
 625        unsigned int i, j;
 626        struct host1x *host = dev_get_drvdata(dev->parent);
 627        DECLARE_BITMAP(waitchk_mask, host1x_syncpt_nb_pts(host));
 628
 629        bitmap_zero(waitchk_mask, host1x_syncpt_nb_pts(host));
 630        for (i = 0; i < job->num_waitchk; i++) {
 631                u32 syncpt_id = job->waitchk[i].syncpt_id;
 632
 633                if (syncpt_id < host1x_syncpt_nb_pts(host))
 634                        set_bit(syncpt_id, waitchk_mask);
 635        }
 636
 637        /* get current syncpt values for waitchk */
 638        for_each_set_bit(i, waitchk_mask, host1x_syncpt_nb_pts(host))
 639                host1x_syncpt_load(host->syncpt + i);
 640
 641        /* pin memory */
 642        err = pin_job(host, job);
 643        if (err)
 644                goto out;
 645
 646        if (IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL)) {
 647                err = copy_gathers(job, dev);
 648                if (err)
 649                        goto out;
 650        }
 651
 652        /* patch gathers */
 653        for (i = 0; i < job->num_gathers; i++) {
 654                struct host1x_job_gather *g = &job->gathers[i];
 655
 656                /* process each gather mem only once */
 657                if (g->handled)
 658                        continue;
 659
 660                /* copy_gathers() sets gathers base if firewall is enabled */
 661                if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL))
 662                        g->base = job->gather_addr_phys[i];
 663
 664                for (j = i + 1; j < job->num_gathers; j++) {
 665                        if (job->gathers[j].bo == g->bo) {
 666                                job->gathers[j].handled = true;
 667                                job->gathers[j].base = g->base;
 668                        }
 669                }
 670
 671                err = do_relocs(job, g);
 672                if (err)
 673                        break;
 674
 675                err = do_waitchks(job, host, g);
 676                if (err)
 677                        break;
 678        }
 679
 680out:
 681        if (err)
 682                host1x_job_unpin(job);
 683        wmb();
 684
 685        return err;
 686}
 687EXPORT_SYMBOL(host1x_job_pin);
 688
 689void host1x_job_unpin(struct host1x_job *job)
 690{
 691        struct host1x *host = dev_get_drvdata(job->channel->dev->parent);
 692        unsigned int i;
 693
 694        for (i = 0; i < job->num_unpins; i++) {
 695                struct host1x_job_unpin_data *unpin = &job->unpins[i];
 696
 697                if (!IS_ENABLED(CONFIG_TEGRA_HOST1X_FIREWALL) && host->domain) {
 698                        iommu_unmap(host->domain, job->addr_phys[i],
 699                                    unpin->size);
 700                        free_iova(&host->iova,
 701                                iova_pfn(&host->iova, job->addr_phys[i]));
 702                }
 703
 704                host1x_bo_unpin(unpin->bo, unpin->sgt);
 705                host1x_bo_put(unpin->bo);
 706        }
 707
 708        job->num_unpins = 0;
 709
 710        if (job->gather_copy_size)
 711                dma_free_wc(job->channel->dev, job->gather_copy_size,
 712                            job->gather_copy_mapped, job->gather_copy);
 713}
 714EXPORT_SYMBOL(host1x_job_unpin);
 715
 716/*
 717 * Debug routine used to dump job entries
 718 */
 719void host1x_job_dump(struct device *dev, struct host1x_job *job)
 720{
 721        dev_dbg(dev, "    SYNCPT_ID   %d\n", job->syncpt_id);
 722        dev_dbg(dev, "    SYNCPT_VAL  %d\n", job->syncpt_end);
 723        dev_dbg(dev, "    FIRST_GET   0x%x\n", job->first_get);
 724        dev_dbg(dev, "    TIMEOUT     %d\n", job->timeout);
 725        dev_dbg(dev, "    NUM_SLOTS   %d\n", job->num_slots);
 726        dev_dbg(dev, "    NUM_HANDLES %d\n", job->num_unpins);
 727}
 728