linux/drivers/gpu/host1x/syncpt.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Tegra host1x Syncpoints
   4 *
   5 * Copyright (c) 2010-2015, NVIDIA Corporation.
   6 */
   7
   8#include <linux/module.h>
   9#include <linux/device.h>
  10#include <linux/slab.h>
  11
  12#include <trace/events/host1x.h>
  13
  14#include "syncpt.h"
  15#include "dev.h"
  16#include "intr.h"
  17#include "debug.h"
  18
  19#define SYNCPT_CHECK_PERIOD (2 * HZ)
  20#define MAX_STUCK_CHECK_COUNT 15
  21
  22static struct host1x_syncpt_base *
  23host1x_syncpt_base_request(struct host1x *host)
  24{
  25        struct host1x_syncpt_base *bases = host->bases;
  26        unsigned int i;
  27
  28        for (i = 0; i < host->info->nb_bases; i++)
  29                if (!bases[i].requested)
  30                        break;
  31
  32        if (i >= host->info->nb_bases)
  33                return NULL;
  34
  35        bases[i].requested = true;
  36        return &bases[i];
  37}
  38
  39static void host1x_syncpt_base_free(struct host1x_syncpt_base *base)
  40{
  41        if (base)
  42                base->requested = false;
  43}
  44
  45static struct host1x_syncpt *host1x_syncpt_alloc(struct host1x *host,
  46                                                 struct host1x_client *client,
  47                                                 unsigned long flags)
  48{
  49        struct host1x_syncpt *sp = host->syncpt;
  50        unsigned int i;
  51        char *name;
  52
  53        mutex_lock(&host->syncpt_mutex);
  54
  55        for (i = 0; i < host->info->nb_pts && sp->name; i++, sp++)
  56                ;
  57
  58        if (i >= host->info->nb_pts)
  59                goto unlock;
  60
  61        if (flags & HOST1X_SYNCPT_HAS_BASE) {
  62                sp->base = host1x_syncpt_base_request(host);
  63                if (!sp->base)
  64                        goto unlock;
  65        }
  66
  67        name = kasprintf(GFP_KERNEL, "%02u-%s", sp->id,
  68                         client ? dev_name(client->dev) : NULL);
  69        if (!name)
  70                goto free_base;
  71
  72        sp->client = client;
  73        sp->name = name;
  74
  75        if (flags & HOST1X_SYNCPT_CLIENT_MANAGED)
  76                sp->client_managed = true;
  77        else
  78                sp->client_managed = false;
  79
  80        mutex_unlock(&host->syncpt_mutex);
  81        return sp;
  82
  83free_base:
  84        host1x_syncpt_base_free(sp->base);
  85        sp->base = NULL;
  86unlock:
  87        mutex_unlock(&host->syncpt_mutex);
  88        return NULL;
  89}
  90
  91/**
  92 * host1x_syncpt_id() - retrieve syncpoint ID
  93 * @sp: host1x syncpoint
  94 *
  95 * Given a pointer to a struct host1x_syncpt, retrieves its ID. This ID is
  96 * often used as a value to program into registers that control how hardware
  97 * blocks interact with syncpoints.
  98 */
  99u32 host1x_syncpt_id(struct host1x_syncpt *sp)
 100{
 101        return sp->id;
 102}
 103EXPORT_SYMBOL(host1x_syncpt_id);
 104
 105/**
 106 * host1x_syncpt_incr_max() - update the value sent to hardware
 107 * @sp: host1x syncpoint
 108 * @incrs: number of increments
 109 */
 110u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs)
 111{
 112        return (u32)atomic_add_return(incrs, &sp->max_val);
 113}
 114EXPORT_SYMBOL(host1x_syncpt_incr_max);
 115
 116 /*
 117 * Write cached syncpoint and waitbase values to hardware.
 118 */
 119void host1x_syncpt_restore(struct host1x *host)
 120{
 121        struct host1x_syncpt *sp_base = host->syncpt;
 122        unsigned int i;
 123
 124        for (i = 0; i < host1x_syncpt_nb_pts(host); i++)
 125                host1x_hw_syncpt_restore(host, sp_base + i);
 126
 127        for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
 128                host1x_hw_syncpt_restore_wait_base(host, sp_base + i);
 129
 130        wmb();
 131}
 132
 133/*
 134 * Update the cached syncpoint and waitbase values by reading them
 135 * from the registers.
 136  */
 137void host1x_syncpt_save(struct host1x *host)
 138{
 139        struct host1x_syncpt *sp_base = host->syncpt;
 140        unsigned int i;
 141
 142        for (i = 0; i < host1x_syncpt_nb_pts(host); i++) {
 143                if (host1x_syncpt_client_managed(sp_base + i))
 144                        host1x_hw_syncpt_load(host, sp_base + i);
 145                else
 146                        WARN_ON(!host1x_syncpt_idle(sp_base + i));
 147        }
 148
 149        for (i = 0; i < host1x_syncpt_nb_bases(host); i++)
 150                host1x_hw_syncpt_load_wait_base(host, sp_base + i);
 151}
 152
 153/*
 154 * Updates the cached syncpoint value by reading a new value from the hardware
 155 * register
 156 */
 157u32 host1x_syncpt_load(struct host1x_syncpt *sp)
 158{
 159        u32 val;
 160
 161        val = host1x_hw_syncpt_load(sp->host, sp);
 162        trace_host1x_syncpt_load_min(sp->id, val);
 163
 164        return val;
 165}
 166
 167/*
 168 * Get the current syncpoint base
 169 */
 170u32 host1x_syncpt_load_wait_base(struct host1x_syncpt *sp)
 171{
 172        host1x_hw_syncpt_load_wait_base(sp->host, sp);
 173
 174        return sp->base_val;
 175}
 176
 177/**
 178 * host1x_syncpt_incr() - increment syncpoint value from CPU, updating cache
 179 * @sp: host1x syncpoint
 180 */
 181int host1x_syncpt_incr(struct host1x_syncpt *sp)
 182{
 183        return host1x_hw_syncpt_cpu_incr(sp->host, sp);
 184}
 185EXPORT_SYMBOL(host1x_syncpt_incr);
 186
 187/*
 188 * Updated sync point form hardware, and returns true if syncpoint is expired,
 189 * false if we may need to wait
 190 */
 191static bool syncpt_load_min_is_expired(struct host1x_syncpt *sp, u32 thresh)
 192{
 193        host1x_hw_syncpt_load(sp->host, sp);
 194
 195        return host1x_syncpt_is_expired(sp, thresh);
 196}
 197
 198/**
 199 * host1x_syncpt_wait() - wait for a syncpoint to reach a given value
 200 * @sp: host1x syncpoint
 201 * @thresh: threshold
 202 * @timeout: maximum time to wait for the syncpoint to reach the given value
 203 * @value: return location for the syncpoint value
 204 */
 205int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
 206                       u32 *value)
 207{
 208        DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
 209        void *ref;
 210        struct host1x_waitlist *waiter;
 211        int err = 0, check_count = 0;
 212        u32 val;
 213
 214        if (value)
 215                *value = 0;
 216
 217        /* first check cache */
 218        if (host1x_syncpt_is_expired(sp, thresh)) {
 219                if (value)
 220                        *value = host1x_syncpt_load(sp);
 221
 222                return 0;
 223        }
 224
 225        /* try to read from register */
 226        val = host1x_hw_syncpt_load(sp->host, sp);
 227        if (host1x_syncpt_is_expired(sp, thresh)) {
 228                if (value)
 229                        *value = val;
 230
 231                goto done;
 232        }
 233
 234        if (!timeout) {
 235                err = -EAGAIN;
 236                goto done;
 237        }
 238
 239        /* allocate a waiter */
 240        waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
 241        if (!waiter) {
 242                err = -ENOMEM;
 243                goto done;
 244        }
 245
 246        /* schedule a wakeup when the syncpoint value is reached */
 247        err = host1x_intr_add_action(sp->host, sp, thresh,
 248                                     HOST1X_INTR_ACTION_WAKEUP_INTERRUPTIBLE,
 249                                     &wq, waiter, &ref);
 250        if (err)
 251                goto done;
 252
 253        err = -EAGAIN;
 254        /* Caller-specified timeout may be impractically low */
 255        if (timeout < 0)
 256                timeout = LONG_MAX;
 257
 258        /* wait for the syncpoint, or timeout, or signal */
 259        while (timeout) {
 260                long check = min_t(long, SYNCPT_CHECK_PERIOD, timeout);
 261                int remain;
 262
 263                remain = wait_event_interruptible_timeout(wq,
 264                                syncpt_load_min_is_expired(sp, thresh),
 265                                check);
 266                if (remain > 0 || host1x_syncpt_is_expired(sp, thresh)) {
 267                        if (value)
 268                                *value = host1x_syncpt_load(sp);
 269
 270                        err = 0;
 271
 272                        break;
 273                }
 274
 275                if (remain < 0) {
 276                        err = remain;
 277                        break;
 278                }
 279
 280                timeout -= check;
 281
 282                if (timeout && check_count <= MAX_STUCK_CHECK_COUNT) {
 283                        dev_warn(sp->host->dev,
 284                                "%s: syncpoint id %u (%s) stuck waiting %d, timeout=%ld\n",
 285                                 current->comm, sp->id, sp->name,
 286                                 thresh, timeout);
 287
 288                        host1x_debug_dump_syncpts(sp->host);
 289
 290                        if (check_count == MAX_STUCK_CHECK_COUNT)
 291                                host1x_debug_dump(sp->host);
 292
 293                        check_count++;
 294                }
 295        }
 296
 297        host1x_intr_put_ref(sp->host, sp->id, ref);
 298
 299done:
 300        return err;
 301}
 302EXPORT_SYMBOL(host1x_syncpt_wait);
 303
 304/*
 305 * Returns true if syncpoint is expired, false if we may need to wait
 306 */
 307bool host1x_syncpt_is_expired(struct host1x_syncpt *sp, u32 thresh)
 308{
 309        u32 current_val;
 310        u32 future_val;
 311
 312        smp_rmb();
 313
 314        current_val = (u32)atomic_read(&sp->min_val);
 315        future_val = (u32)atomic_read(&sp->max_val);
 316
 317        /* Note the use of unsigned arithmetic here (mod 1<<32).
 318         *
 319         * c = current_val = min_val    = the current value of the syncpoint.
 320         * t = thresh                   = the value we are checking
 321         * f = future_val  = max_val    = the value c will reach when all
 322         *                                outstanding increments have completed.
 323         *
 324         * Note that c always chases f until it reaches f.
 325         *
 326         * Dtf = (f - t)
 327         * Dtc = (c - t)
 328         *
 329         *  Consider all cases:
 330         *
 331         *      A) .....c..t..f.....    Dtf < Dtc       need to wait
 332         *      B) .....c.....f..t..    Dtf > Dtc       expired
 333         *      C) ..t..c.....f.....    Dtf > Dtc       expired    (Dct very large)
 334         *
 335         *  Any case where f==c: always expired (for any t).    Dtf == Dcf
 336         *  Any case where t==c: always expired (for any f).    Dtf >= Dtc (because Dtc==0)
 337         *  Any case where t==f!=c: always wait.                Dtf <  Dtc (because Dtf==0,
 338         *                                                      Dtc!=0)
 339         *
 340         *  Other cases:
 341         *
 342         *      A) .....t..f..c.....    Dtf < Dtc       need to wait
 343         *      A) .....f..c..t.....    Dtf < Dtc       need to wait
 344         *      A) .....f..t..c.....    Dtf > Dtc       expired
 345         *
 346         *   So:
 347         *         Dtf >= Dtc implies EXPIRED   (return true)
 348         *         Dtf <  Dtc implies WAIT      (return false)
 349         *
 350         * Note: If t is expired then we *cannot* wait on it. We would wait
 351         * forever (hang the system).
 352         *
 353         * Note: do NOT get clever and remove the -thresh from both sides. It
 354         * is NOT the same.
 355         *
 356         * If future valueis zero, we have a client managed sync point. In that
 357         * case we do a direct comparison.
 358         */
 359        if (!host1x_syncpt_client_managed(sp))
 360                return future_val - thresh >= current_val - thresh;
 361        else
 362                return (s32)(current_val - thresh) >= 0;
 363}
 364
 365int host1x_syncpt_init(struct host1x *host)
 366{
 367        struct host1x_syncpt_base *bases;
 368        struct host1x_syncpt *syncpt;
 369        unsigned int i;
 370
 371        syncpt = devm_kcalloc(host->dev, host->info->nb_pts, sizeof(*syncpt),
 372                              GFP_KERNEL);
 373        if (!syncpt)
 374                return -ENOMEM;
 375
 376        bases = devm_kcalloc(host->dev, host->info->nb_bases, sizeof(*bases),
 377                             GFP_KERNEL);
 378        if (!bases)
 379                return -ENOMEM;
 380
 381        for (i = 0; i < host->info->nb_pts; i++) {
 382                syncpt[i].id = i;
 383                syncpt[i].host = host;
 384
 385                /*
 386                 * Unassign syncpt from channels for purposes of Tegra186
 387                 * syncpoint protection. This prevents any channel from
 388                 * accessing it until it is reassigned.
 389                 */
 390                host1x_hw_syncpt_assign_to_channel(host, &syncpt[i], NULL);
 391        }
 392
 393        for (i = 0; i < host->info->nb_bases; i++)
 394                bases[i].id = i;
 395
 396        mutex_init(&host->syncpt_mutex);
 397        host->syncpt = syncpt;
 398        host->bases = bases;
 399
 400        host1x_syncpt_restore(host);
 401        host1x_hw_syncpt_enable_protection(host);
 402
 403        /* Allocate sync point to use for clearing waits for expired fences */
 404        host->nop_sp = host1x_syncpt_alloc(host, NULL, 0);
 405        if (!host->nop_sp)
 406                return -ENOMEM;
 407
 408        return 0;
 409}
 410
 411/**
 412 * host1x_syncpt_request() - request a syncpoint
 413 * @client: client requesting the syncpoint
 414 * @flags: flags
 415 *
 416 * host1x client drivers can use this function to allocate a syncpoint for
 417 * subsequent use. A syncpoint returned by this function will be reserved for
 418 * use by the client exclusively. When no longer using a syncpoint, a host1x
 419 * client driver needs to release it using host1x_syncpt_free().
 420 */
 421struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
 422                                            unsigned long flags)
 423{
 424        struct host1x *host = dev_get_drvdata(client->parent->parent);
 425
 426        return host1x_syncpt_alloc(host, client, flags);
 427}
 428EXPORT_SYMBOL(host1x_syncpt_request);
 429
 430/**
 431 * host1x_syncpt_free() - free a requested syncpoint
 432 * @sp: host1x syncpoint
 433 *
 434 * Release a syncpoint previously allocated using host1x_syncpt_request(). A
 435 * host1x client driver should call this when the syncpoint is no longer in
 436 * use. Note that client drivers must ensure that the syncpoint doesn't remain
 437 * under the control of hardware after calling this function, otherwise two
 438 * clients may end up trying to access the same syncpoint concurrently.
 439 */
 440void host1x_syncpt_free(struct host1x_syncpt *sp)
 441{
 442        if (!sp)
 443                return;
 444
 445        mutex_lock(&sp->host->syncpt_mutex);
 446
 447        host1x_syncpt_base_free(sp->base);
 448        kfree(sp->name);
 449        sp->base = NULL;
 450        sp->client = NULL;
 451        sp->name = NULL;
 452        sp->client_managed = false;
 453
 454        mutex_unlock(&sp->host->syncpt_mutex);
 455}
 456EXPORT_SYMBOL(host1x_syncpt_free);
 457
 458void host1x_syncpt_deinit(struct host1x *host)
 459{
 460        struct host1x_syncpt *sp = host->syncpt;
 461        unsigned int i;
 462
 463        for (i = 0; i < host->info->nb_pts; i++, sp++)
 464                kfree(sp->name);
 465}
 466
 467/**
 468 * host1x_syncpt_read_max() - read maximum syncpoint value
 469 * @sp: host1x syncpoint
 470 *
 471 * The maximum syncpoint value indicates how many operations there are in
 472 * queue, either in channel or in a software thread.
 473 */
 474u32 host1x_syncpt_read_max(struct host1x_syncpt *sp)
 475{
 476        smp_rmb();
 477
 478        return (u32)atomic_read(&sp->max_val);
 479}
 480EXPORT_SYMBOL(host1x_syncpt_read_max);
 481
 482/**
 483 * host1x_syncpt_read_min() - read minimum syncpoint value
 484 * @sp: host1x syncpoint
 485 *
 486 * The minimum syncpoint value is a shadow of the current sync point value in
 487 * hardware.
 488 */
 489u32 host1x_syncpt_read_min(struct host1x_syncpt *sp)
 490{
 491        smp_rmb();
 492
 493        return (u32)atomic_read(&sp->min_val);
 494}
 495EXPORT_SYMBOL(host1x_syncpt_read_min);
 496
 497/**
 498 * host1x_syncpt_read() - read the current syncpoint value
 499 * @sp: host1x syncpoint
 500 */
 501u32 host1x_syncpt_read(struct host1x_syncpt *sp)
 502{
 503        return host1x_syncpt_load(sp);
 504}
 505EXPORT_SYMBOL(host1x_syncpt_read);
 506
 507unsigned int host1x_syncpt_nb_pts(struct host1x *host)
 508{
 509        return host->info->nb_pts;
 510}
 511
 512unsigned int host1x_syncpt_nb_bases(struct host1x *host)
 513{
 514        return host->info->nb_bases;
 515}
 516
 517unsigned int host1x_syncpt_nb_mlocks(struct host1x *host)
 518{
 519        return host->info->nb_mlocks;
 520}
 521
 522/**
 523 * host1x_syncpt_get() - obtain a syncpoint by ID
 524 * @host: host1x controller
 525 * @id: syncpoint ID
 526 */
 527struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, unsigned int id)
 528{
 529        if (id >= host->info->nb_pts)
 530                return NULL;
 531
 532        return host->syncpt + id;
 533}
 534EXPORT_SYMBOL(host1x_syncpt_get);
 535
 536/**
 537 * host1x_syncpt_get_base() - obtain the wait base associated with a syncpoint
 538 * @sp: host1x syncpoint
 539 */
 540struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp)
 541{
 542        return sp ? sp->base : NULL;
 543}
 544EXPORT_SYMBOL(host1x_syncpt_get_base);
 545
 546/**
 547 * host1x_syncpt_base_id() - retrieve the ID of a syncpoint wait base
 548 * @base: host1x syncpoint wait base
 549 */
 550u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base)
 551{
 552        return base->id;
 553}
 554EXPORT_SYMBOL(host1x_syncpt_base_id);
 555