linux/drivers/gpu/drm/i915/gt/sysfs_engines.c
<<
>>
Prefs
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2019 Intel Corporation
   4 */
   5
   6#include <linux/kobject.h>
   7#include <linux/sysfs.h>
   8
   9#include "i915_drv.h"
  10#include "intel_engine.h"
  11#include "intel_engine_heartbeat.h"
  12#include "sysfs_engines.h"
  13
  14struct kobj_engine {
  15        struct kobject base;
  16        struct intel_engine_cs *engine;
  17};
  18
  19static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
  20{
  21        return container_of(kobj, struct kobj_engine, base)->engine;
  22}
  23
  24static ssize_t
  25name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  26{
  27        return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
  28}
  29
  30static struct kobj_attribute name_attr =
  31__ATTR(name, 0444, name_show, NULL);
  32
  33static ssize_t
  34class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  35{
  36        return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
  37}
  38
  39static struct kobj_attribute class_attr =
  40__ATTR(class, 0444, class_show, NULL);
  41
  42static ssize_t
  43inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  44{
  45        return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
  46}
  47
  48static struct kobj_attribute inst_attr =
  49__ATTR(instance, 0444, inst_show, NULL);
  50
  51static ssize_t
  52mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  53{
  54        return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
  55}
  56
  57static struct kobj_attribute mmio_attr =
  58__ATTR(mmio_base, 0444, mmio_show, NULL);
  59
  60static const char * const vcs_caps[] = {
  61        [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
  62        [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
  63};
  64
  65static const char * const vecs_caps[] = {
  66        [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
  67};
  68
  69static ssize_t repr_trim(char *buf, ssize_t len)
  70{
  71        /* Trim off the trailing space and replace with a newline */
  72        if (len > PAGE_SIZE)
  73                len = PAGE_SIZE;
  74        if (len > 0)
  75                buf[len - 1] = '\n';
  76
  77        return len;
  78}
  79
  80static ssize_t
  81__caps_show(struct intel_engine_cs *engine,
  82            unsigned long caps, char *buf, bool show_unknown)
  83{
  84        const char * const *repr;
  85        int count, n;
  86        ssize_t len;
  87
  88        switch (engine->class) {
  89        case VIDEO_DECODE_CLASS:
  90                repr = vcs_caps;
  91                count = ARRAY_SIZE(vcs_caps);
  92                break;
  93
  94        case VIDEO_ENHANCEMENT_CLASS:
  95                repr = vecs_caps;
  96                count = ARRAY_SIZE(vecs_caps);
  97                break;
  98
  99        default:
 100                repr = NULL;
 101                count = 0;
 102                break;
 103        }
 104        GEM_BUG_ON(count > BITS_PER_LONG);
 105
 106        len = 0;
 107        for_each_set_bit(n, &caps, show_unknown ? BITS_PER_LONG : count) {
 108                if (n >= count || !repr[n]) {
 109                        if (GEM_WARN_ON(show_unknown))
 110                                len += snprintf(buf + len, PAGE_SIZE - len,
 111                                                "[%x] ", n);
 112                } else {
 113                        len += snprintf(buf + len, PAGE_SIZE - len,
 114                                        "%s ", repr[n]);
 115                }
 116                if (GEM_WARN_ON(len >= PAGE_SIZE))
 117                        break;
 118        }
 119        return repr_trim(buf, len);
 120}
 121
 122static ssize_t
 123caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 124{
 125        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 126
 127        return __caps_show(engine, engine->uabi_capabilities, buf, true);
 128}
 129
 130static struct kobj_attribute caps_attr =
 131__ATTR(capabilities, 0444, caps_show, NULL);
 132
 133static ssize_t
 134all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 135{
 136        return __caps_show(kobj_to_engine(kobj), -1, buf, false);
 137}
 138
 139static struct kobj_attribute all_caps_attr =
 140__ATTR(known_capabilities, 0444, all_caps_show, NULL);
 141
 142static ssize_t
 143max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
 144               const char *buf, size_t count)
 145{
 146        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 147        unsigned long long duration;
 148        int err;
 149
 150        /*
 151         * When waiting for a request, if is it currently being executed
 152         * on the GPU, we busywait for a short while before sleeping. The
 153         * premise is that most requests are short, and if it is already
 154         * executing then there is a good chance that it will complete
 155         * before we can setup the interrupt handler and go to sleep.
 156         * We try to offset the cost of going to sleep, by first spinning
 157         * on the request -- if it completed in less time than it would take
 158         * to go sleep, process the interrupt and return back to the client,
 159         * then we have saved the client some latency, albeit at the cost
 160         * of spinning on an expensive CPU core.
 161         *
 162         * While we try to avoid waiting at all for a request that is unlikely
 163         * to complete, deciding how long it is worth spinning is for is an
 164         * arbitrary decision: trading off power vs latency.
 165         */
 166
 167        err = kstrtoull(buf, 0, &duration);
 168        if (err)
 169                return err;
 170
 171        if (duration > jiffies_to_nsecs(2))
 172                return -EINVAL;
 173
 174        WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
 175
 176        return count;
 177}
 178
 179static ssize_t
 180max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 181{
 182        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 183
 184        return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
 185}
 186
 187static struct kobj_attribute max_spin_attr =
 188__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
 189
 190static ssize_t
 191max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 192{
 193        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 194
 195        return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
 196}
 197
 198static struct kobj_attribute max_spin_def =
 199__ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
 200
 201static ssize_t
 202timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
 203                const char *buf, size_t count)
 204{
 205        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 206        unsigned long long duration;
 207        int err;
 208
 209        /*
 210         * Execlists uses a scheduling quantum (a timeslice) to alternate
 211         * execution between ready-to-run contexts of equal priority. This
 212         * ensures that all users (though only if they of equal importance)
 213         * have the opportunity to run and prevents livelocks where contexts
 214         * may have implicit ordering due to userspace semaphores.
 215         */
 216
 217        err = kstrtoull(buf, 0, &duration);
 218        if (err)
 219                return err;
 220
 221        if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
 222                return -EINVAL;
 223
 224        WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
 225
 226        if (execlists_active(&engine->execlists))
 227                set_timer_ms(&engine->execlists.timer, duration);
 228
 229        return count;
 230}
 231
 232static ssize_t
 233timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 234{
 235        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 236
 237        return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
 238}
 239
 240static struct kobj_attribute timeslice_duration_attr =
 241__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
 242
 243static ssize_t
 244timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 245{
 246        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 247
 248        return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
 249}
 250
 251static struct kobj_attribute timeslice_duration_def =
 252__ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
 253
 254static ssize_t
 255stop_store(struct kobject *kobj, struct kobj_attribute *attr,
 256           const char *buf, size_t count)
 257{
 258        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 259        unsigned long long duration;
 260        int err;
 261
 262        /*
 263         * When we allow ourselves to sleep before a GPU reset after disabling
 264         * submission, even for a few milliseconds, gives an innocent context
 265         * the opportunity to clear the GPU before the reset occurs. However,
 266         * how long to sleep depends on the typical non-preemptible duration
 267         * (a similar problem to determining the ideal preempt-reset timeout
 268         * or even the heartbeat interval).
 269         */
 270
 271        err = kstrtoull(buf, 0, &duration);
 272        if (err)
 273                return err;
 274
 275        if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
 276                return -EINVAL;
 277
 278        WRITE_ONCE(engine->props.stop_timeout_ms, duration);
 279        return count;
 280}
 281
 282static ssize_t
 283stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 284{
 285        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 286
 287        return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
 288}
 289
 290static struct kobj_attribute stop_timeout_attr =
 291__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
 292
 293static ssize_t
 294stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 295{
 296        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 297
 298        return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms);
 299}
 300
 301static struct kobj_attribute stop_timeout_def =
 302__ATTR(stop_timeout_ms, 0444, stop_default, NULL);
 303
 304static ssize_t
 305preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
 306                      const char *buf, size_t count)
 307{
 308        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 309        unsigned long long timeout;
 310        int err;
 311
 312        /*
 313         * After initialising a preemption request, we give the current
 314         * resident a small amount of time to vacate the GPU. The preemption
 315         * request is for a higher priority context and should be immediate to
 316         * maintain high quality of service (and avoid priority inversion).
 317         * However, the preemption granularity of the GPU can be quite coarse
 318         * and so we need a compromise.
 319         */
 320
 321        err = kstrtoull(buf, 0, &timeout);
 322        if (err)
 323                return err;
 324
 325        if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
 326                return -EINVAL;
 327
 328        WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
 329
 330        if (READ_ONCE(engine->execlists.pending[0]))
 331                set_timer_ms(&engine->execlists.preempt, timeout);
 332
 333        return count;
 334}
 335
 336static ssize_t
 337preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
 338                     char *buf)
 339{
 340        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 341
 342        return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
 343}
 344
 345static struct kobj_attribute preempt_timeout_attr =
 346__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
 347
 348static ssize_t
 349preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
 350                        char *buf)
 351{
 352        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 353
 354        return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
 355}
 356
 357static struct kobj_attribute preempt_timeout_def =
 358__ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
 359
 360static ssize_t
 361heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
 362                const char *buf, size_t count)
 363{
 364        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 365        unsigned long long delay;
 366        int err;
 367
 368        /*
 369         * We monitor the health of the system via periodic heartbeat pulses.
 370         * The pulses also provide the opportunity to perform garbage
 371         * collection.  However, we interpret an incomplete pulse (a missed
 372         * heartbeat) as an indication that the system is no longer responsive,
 373         * i.e. hung, and perform an engine or full GPU reset. Given that the
 374         * preemption granularity can be very coarse on a system, the optimal
 375         * value for any workload is unknowable!
 376         */
 377
 378        err = kstrtoull(buf, 0, &delay);
 379        if (err)
 380                return err;
 381
 382        if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
 383                return -EINVAL;
 384
 385        err = intel_engine_set_heartbeat(engine, delay);
 386        if (err)
 387                return err;
 388
 389        return count;
 390}
 391
 392static ssize_t
 393heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 394{
 395        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 396
 397        return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
 398}
 399
 400static struct kobj_attribute heartbeat_interval_attr =
 401__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
 402
 403static ssize_t
 404heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 405{
 406        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 407
 408        return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
 409}
 410
 411static struct kobj_attribute heartbeat_interval_def =
 412__ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
 413
 414static void kobj_engine_release(struct kobject *kobj)
 415{
 416        kfree(kobj);
 417}
 418
 419static struct kobj_type kobj_engine_type = {
 420        .release = kobj_engine_release,
 421        .sysfs_ops = &kobj_sysfs_ops
 422};
 423
 424static struct kobject *
 425kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
 426{
 427        struct kobj_engine *ke;
 428
 429        ke = kzalloc(sizeof(*ke), GFP_KERNEL);
 430        if (!ke)
 431                return NULL;
 432
 433        kobject_init(&ke->base, &kobj_engine_type);
 434        ke->engine = engine;
 435
 436        if (kobject_add(&ke->base, dir, "%s", engine->name)) {
 437                kobject_put(&ke->base);
 438                return NULL;
 439        }
 440
 441        /* xfer ownership to sysfs tree */
 442        return &ke->base;
 443}
 444
 445static void add_defaults(struct kobj_engine *parent)
 446{
 447        static const struct attribute *files[] = {
 448                &max_spin_def.attr,
 449                &stop_timeout_def.attr,
 450#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
 451                &heartbeat_interval_def.attr,
 452#endif
 453                NULL
 454        };
 455        struct kobj_engine *ke;
 456
 457        ke = kzalloc(sizeof(*ke), GFP_KERNEL);
 458        if (!ke)
 459                return;
 460
 461        kobject_init(&ke->base, &kobj_engine_type);
 462        ke->engine = parent->engine;
 463
 464        if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
 465                kobject_put(&ke->base);
 466                return;
 467        }
 468
 469        if (sysfs_create_files(&ke->base, files))
 470                return;
 471
 472        if (intel_engine_has_timeslices(ke->engine) &&
 473            sysfs_create_file(&ke->base, &timeslice_duration_def.attr))
 474                return;
 475
 476        if (intel_engine_has_preempt_reset(ke->engine) &&
 477            sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
 478                return;
 479}
 480
 481void intel_engines_add_sysfs(struct drm_i915_private *i915)
 482{
 483        static const struct attribute *files[] = {
 484                &name_attr.attr,
 485                &class_attr.attr,
 486                &inst_attr.attr,
 487                &mmio_attr.attr,
 488                &caps_attr.attr,
 489                &all_caps_attr.attr,
 490                &max_spin_attr.attr,
 491                &stop_timeout_attr.attr,
 492#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
 493                &heartbeat_interval_attr.attr,
 494#endif
 495                NULL
 496        };
 497
 498        struct device *kdev = i915->drm.primary->kdev;
 499        struct intel_engine_cs *engine;
 500        struct kobject *dir;
 501
 502        dir = kobject_create_and_add("engine", &kdev->kobj);
 503        if (!dir)
 504                return;
 505
 506        for_each_uabi_engine(engine, i915) {
 507                struct kobject *kobj;
 508
 509                kobj = kobj_engine(dir, engine);
 510                if (!kobj)
 511                        goto err_engine;
 512
 513                if (sysfs_create_files(kobj, files))
 514                        goto err_object;
 515
 516                if (intel_engine_has_timeslices(engine) &&
 517                    sysfs_create_file(kobj, &timeslice_duration_attr.attr))
 518                        goto err_engine;
 519
 520                if (intel_engine_has_preempt_reset(engine) &&
 521                    sysfs_create_file(kobj, &preempt_timeout_attr.attr))
 522                        goto err_engine;
 523
 524                add_defaults(container_of(kobj, struct kobj_engine, base));
 525
 526                if (0) {
 527err_object:
 528                        kobject_put(kobj);
 529err_engine:
 530                        dev_err(kdev, "Failed to add sysfs engine '%s'\n",
 531                                engine->name);
 532                        break;
 533                }
 534        }
 535}
 536