linux/drivers/gpu/drm/i915/gt/sysfs_engines.c
<<
>>
Prefs
   1// SPDX-License-Identifier: MIT
   2/*
   3 * Copyright © 2019 Intel Corporation
   4 */
   5
   6#include <linux/kobject.h>
   7#include <linux/sysfs.h>
   8
   9#include "i915_drv.h"
  10#include "intel_engine.h"
  11#include "intel_engine_heartbeat.h"
  12#include "sysfs_engines.h"
  13
  14struct kobj_engine {
  15        struct kobject base;
  16        struct intel_engine_cs *engine;
  17};
  18
  19static struct intel_engine_cs *kobj_to_engine(struct kobject *kobj)
  20{
  21        return container_of(kobj, struct kobj_engine, base)->engine;
  22}
  23
  24static ssize_t
  25name_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  26{
  27        return sprintf(buf, "%s\n", kobj_to_engine(kobj)->name);
  28}
  29
  30static struct kobj_attribute name_attr =
  31__ATTR(name, 0444, name_show, NULL);
  32
  33static ssize_t
  34class_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  35{
  36        return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_class);
  37}
  38
  39static struct kobj_attribute class_attr =
  40__ATTR(class, 0444, class_show, NULL);
  41
  42static ssize_t
  43inst_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  44{
  45        return sprintf(buf, "%d\n", kobj_to_engine(kobj)->uabi_instance);
  46}
  47
  48static struct kobj_attribute inst_attr =
  49__ATTR(instance, 0444, inst_show, NULL);
  50
  51static ssize_t
  52mmio_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
  53{
  54        return sprintf(buf, "0x%x\n", kobj_to_engine(kobj)->mmio_base);
  55}
  56
  57static struct kobj_attribute mmio_attr =
  58__ATTR(mmio_base, 0444, mmio_show, NULL);
  59
  60static const char * const vcs_caps[] = {
  61        [ilog2(I915_VIDEO_CLASS_CAPABILITY_HEVC)] = "hevc",
  62        [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
  63};
  64
  65static const char * const vecs_caps[] = {
  66        [ilog2(I915_VIDEO_AND_ENHANCE_CLASS_CAPABILITY_SFC)] = "sfc",
  67};
  68
  69static ssize_t repr_trim(char *buf, ssize_t len)
  70{
  71        /* Trim off the trailing space and replace with a newline */
  72        if (len > PAGE_SIZE)
  73                len = PAGE_SIZE;
  74        if (len > 0)
  75                buf[len - 1] = '\n';
  76
  77        return len;
  78}
  79
  80static ssize_t
  81__caps_show(struct intel_engine_cs *engine,
  82            u32 caps, char *buf, bool show_unknown)
  83{
  84        const char * const *repr;
  85        int count, n;
  86        ssize_t len;
  87
  88        BUILD_BUG_ON(!typecheck(typeof(caps), engine->uabi_capabilities));
  89
  90        switch (engine->class) {
  91        case VIDEO_DECODE_CLASS:
  92                repr = vcs_caps;
  93                count = ARRAY_SIZE(vcs_caps);
  94                break;
  95
  96        case VIDEO_ENHANCEMENT_CLASS:
  97                repr = vecs_caps;
  98                count = ARRAY_SIZE(vecs_caps);
  99                break;
 100
 101        default:
 102                repr = NULL;
 103                count = 0;
 104                break;
 105        }
 106        GEM_BUG_ON(count > BITS_PER_TYPE(typeof(caps)));
 107
 108        len = 0;
 109        for_each_set_bit(n,
 110                         (unsigned long *)&caps,
 111                         show_unknown ? BITS_PER_TYPE(typeof(caps)) : count) {
 112                if (n >= count || !repr[n]) {
 113                        if (GEM_WARN_ON(show_unknown))
 114                                len += snprintf(buf + len, PAGE_SIZE - len,
 115                                                "[%x] ", n);
 116                } else {
 117                        len += snprintf(buf + len, PAGE_SIZE - len,
 118                                        "%s ", repr[n]);
 119                }
 120                if (GEM_WARN_ON(len >= PAGE_SIZE))
 121                        break;
 122        }
 123        return repr_trim(buf, len);
 124}
 125
 126static ssize_t
 127caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 128{
 129        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 130
 131        return __caps_show(engine, engine->uabi_capabilities, buf, true);
 132}
 133
 134static struct kobj_attribute caps_attr =
 135__ATTR(capabilities, 0444, caps_show, NULL);
 136
 137static ssize_t
 138all_caps_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 139{
 140        return __caps_show(kobj_to_engine(kobj), -1, buf, false);
 141}
 142
 143static struct kobj_attribute all_caps_attr =
 144__ATTR(known_capabilities, 0444, all_caps_show, NULL);
 145
 146static ssize_t
 147max_spin_store(struct kobject *kobj, struct kobj_attribute *attr,
 148               const char *buf, size_t count)
 149{
 150        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 151        unsigned long long duration;
 152        int err;
 153
 154        /*
 155         * When waiting for a request, if is it currently being executed
 156         * on the GPU, we busywait for a short while before sleeping. The
 157         * premise is that most requests are short, and if it is already
 158         * executing then there is a good chance that it will complete
 159         * before we can setup the interrupt handler and go to sleep.
 160         * We try to offset the cost of going to sleep, by first spinning
 161         * on the request -- if it completed in less time than it would take
 162         * to go sleep, process the interrupt and return back to the client,
 163         * then we have saved the client some latency, albeit at the cost
 164         * of spinning on an expensive CPU core.
 165         *
 166         * While we try to avoid waiting at all for a request that is unlikely
 167         * to complete, deciding how long it is worth spinning is for is an
 168         * arbitrary decision: trading off power vs latency.
 169         */
 170
 171        err = kstrtoull(buf, 0, &duration);
 172        if (err)
 173                return err;
 174
 175        if (duration > jiffies_to_nsecs(2))
 176                return -EINVAL;
 177
 178        WRITE_ONCE(engine->props.max_busywait_duration_ns, duration);
 179
 180        return count;
 181}
 182
 183static ssize_t
 184max_spin_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 185{
 186        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 187
 188        return sprintf(buf, "%lu\n", engine->props.max_busywait_duration_ns);
 189}
 190
 191static struct kobj_attribute max_spin_attr =
 192__ATTR(max_busywait_duration_ns, 0644, max_spin_show, max_spin_store);
 193
 194static ssize_t
 195max_spin_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 196{
 197        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 198
 199        return sprintf(buf, "%lu\n", engine->defaults.max_busywait_duration_ns);
 200}
 201
 202static struct kobj_attribute max_spin_def =
 203__ATTR(max_busywait_duration_ns, 0444, max_spin_default, NULL);
 204
 205static ssize_t
 206timeslice_store(struct kobject *kobj, struct kobj_attribute *attr,
 207                const char *buf, size_t count)
 208{
 209        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 210        unsigned long long duration;
 211        int err;
 212
 213        /*
 214         * Execlists uses a scheduling quantum (a timeslice) to alternate
 215         * execution between ready-to-run contexts of equal priority. This
 216         * ensures that all users (though only if they of equal importance)
 217         * have the opportunity to run and prevents livelocks where contexts
 218         * may have implicit ordering due to userspace semaphores.
 219         */
 220
 221        err = kstrtoull(buf, 0, &duration);
 222        if (err)
 223                return err;
 224
 225        if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
 226                return -EINVAL;
 227
 228        WRITE_ONCE(engine->props.timeslice_duration_ms, duration);
 229
 230        if (execlists_active(&engine->execlists))
 231                set_timer_ms(&engine->execlists.timer, duration);
 232
 233        return count;
 234}
 235
 236static ssize_t
 237timeslice_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 238{
 239        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 240
 241        return sprintf(buf, "%lu\n", engine->props.timeslice_duration_ms);
 242}
 243
 244static struct kobj_attribute timeslice_duration_attr =
 245__ATTR(timeslice_duration_ms, 0644, timeslice_show, timeslice_store);
 246
 247static ssize_t
 248timeslice_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 249{
 250        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 251
 252        return sprintf(buf, "%lu\n", engine->defaults.timeslice_duration_ms);
 253}
 254
 255static struct kobj_attribute timeslice_duration_def =
 256__ATTR(timeslice_duration_ms, 0444, timeslice_default, NULL);
 257
 258static ssize_t
 259stop_store(struct kobject *kobj, struct kobj_attribute *attr,
 260           const char *buf, size_t count)
 261{
 262        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 263        unsigned long long duration;
 264        int err;
 265
 266        /*
 267         * When we allow ourselves to sleep before a GPU reset after disabling
 268         * submission, even for a few milliseconds, gives an innocent context
 269         * the opportunity to clear the GPU before the reset occurs. However,
 270         * how long to sleep depends on the typical non-preemptible duration
 271         * (a similar problem to determining the ideal preempt-reset timeout
 272         * or even the heartbeat interval).
 273         */
 274
 275        err = kstrtoull(buf, 0, &duration);
 276        if (err)
 277                return err;
 278
 279        if (duration > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
 280                return -EINVAL;
 281
 282        WRITE_ONCE(engine->props.stop_timeout_ms, duration);
 283        return count;
 284}
 285
 286static ssize_t
 287stop_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 288{
 289        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 290
 291        return sprintf(buf, "%lu\n", engine->props.stop_timeout_ms);
 292}
 293
 294static struct kobj_attribute stop_timeout_attr =
 295__ATTR(stop_timeout_ms, 0644, stop_show, stop_store);
 296
 297static ssize_t
 298stop_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 299{
 300        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 301
 302        return sprintf(buf, "%lu\n", engine->defaults.stop_timeout_ms);
 303}
 304
 305static struct kobj_attribute stop_timeout_def =
 306__ATTR(stop_timeout_ms, 0444, stop_default, NULL);
 307
 308static ssize_t
 309preempt_timeout_store(struct kobject *kobj, struct kobj_attribute *attr,
 310                      const char *buf, size_t count)
 311{
 312        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 313        unsigned long long timeout;
 314        int err;
 315
 316        /*
 317         * After initialising a preemption request, we give the current
 318         * resident a small amount of time to vacate the GPU. The preemption
 319         * request is for a higher priority context and should be immediate to
 320         * maintain high quality of service (and avoid priority inversion).
 321         * However, the preemption granularity of the GPU can be quite coarse
 322         * and so we need a compromise.
 323         */
 324
 325        err = kstrtoull(buf, 0, &timeout);
 326        if (err)
 327                return err;
 328
 329        if (timeout > jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
 330                return -EINVAL;
 331
 332        WRITE_ONCE(engine->props.preempt_timeout_ms, timeout);
 333
 334        if (READ_ONCE(engine->execlists.pending[0]))
 335                set_timer_ms(&engine->execlists.preempt, timeout);
 336
 337        return count;
 338}
 339
 340static ssize_t
 341preempt_timeout_show(struct kobject *kobj, struct kobj_attribute *attr,
 342                     char *buf)
 343{
 344        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 345
 346        return sprintf(buf, "%lu\n", engine->props.preempt_timeout_ms);
 347}
 348
 349static struct kobj_attribute preempt_timeout_attr =
 350__ATTR(preempt_timeout_ms, 0644, preempt_timeout_show, preempt_timeout_store);
 351
 352static ssize_t
 353preempt_timeout_default(struct kobject *kobj, struct kobj_attribute *attr,
 354                        char *buf)
 355{
 356        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 357
 358        return sprintf(buf, "%lu\n", engine->defaults.preempt_timeout_ms);
 359}
 360
 361static struct kobj_attribute preempt_timeout_def =
 362__ATTR(preempt_timeout_ms, 0444, preempt_timeout_default, NULL);
 363
 364static ssize_t
 365heartbeat_store(struct kobject *kobj, struct kobj_attribute *attr,
 366                const char *buf, size_t count)
 367{
 368        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 369        unsigned long long delay;
 370        int err;
 371
 372        /*
 373         * We monitor the health of the system via periodic heartbeat pulses.
 374         * The pulses also provide the opportunity to perform garbage
 375         * collection.  However, we interpret an incomplete pulse (a missed
 376         * heartbeat) as an indication that the system is no longer responsive,
 377         * i.e. hung, and perform an engine or full GPU reset. Given that the
 378         * preemption granularity can be very coarse on a system, the optimal
 379         * value for any workload is unknowable!
 380         */
 381
 382        err = kstrtoull(buf, 0, &delay);
 383        if (err)
 384                return err;
 385
 386        if (delay >= jiffies_to_msecs(MAX_SCHEDULE_TIMEOUT))
 387                return -EINVAL;
 388
 389        err = intel_engine_set_heartbeat(engine, delay);
 390        if (err)
 391                return err;
 392
 393        return count;
 394}
 395
 396static ssize_t
 397heartbeat_show(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 398{
 399        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 400
 401        return sprintf(buf, "%lu\n", engine->props.heartbeat_interval_ms);
 402}
 403
 404static struct kobj_attribute heartbeat_interval_attr =
 405__ATTR(heartbeat_interval_ms, 0644, heartbeat_show, heartbeat_store);
 406
 407static ssize_t
 408heartbeat_default(struct kobject *kobj, struct kobj_attribute *attr, char *buf)
 409{
 410        struct intel_engine_cs *engine = kobj_to_engine(kobj);
 411
 412        return sprintf(buf, "%lu\n", engine->defaults.heartbeat_interval_ms);
 413}
 414
 415static struct kobj_attribute heartbeat_interval_def =
 416__ATTR(heartbeat_interval_ms, 0444, heartbeat_default, NULL);
 417
 418static void kobj_engine_release(struct kobject *kobj)
 419{
 420        kfree(kobj);
 421}
 422
 423static struct kobj_type kobj_engine_type = {
 424        .release = kobj_engine_release,
 425        .sysfs_ops = &kobj_sysfs_ops
 426};
 427
 428static struct kobject *
 429kobj_engine(struct kobject *dir, struct intel_engine_cs *engine)
 430{
 431        struct kobj_engine *ke;
 432
 433        ke = kzalloc(sizeof(*ke), GFP_KERNEL);
 434        if (!ke)
 435                return NULL;
 436
 437        kobject_init(&ke->base, &kobj_engine_type);
 438        ke->engine = engine;
 439
 440        if (kobject_add(&ke->base, dir, "%s", engine->name)) {
 441                kobject_put(&ke->base);
 442                return NULL;
 443        }
 444
 445        /* xfer ownership to sysfs tree */
 446        return &ke->base;
 447}
 448
 449static void add_defaults(struct kobj_engine *parent)
 450{
 451        static const struct attribute *files[] = {
 452                &max_spin_def.attr,
 453                &stop_timeout_def.attr,
 454#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
 455                &heartbeat_interval_def.attr,
 456#endif
 457                NULL
 458        };
 459        struct kobj_engine *ke;
 460
 461        ke = kzalloc(sizeof(*ke), GFP_KERNEL);
 462        if (!ke)
 463                return;
 464
 465        kobject_init(&ke->base, &kobj_engine_type);
 466        ke->engine = parent->engine;
 467
 468        if (kobject_add(&ke->base, &parent->base, "%s", ".defaults")) {
 469                kobject_put(&ke->base);
 470                return;
 471        }
 472
 473        if (sysfs_create_files(&ke->base, files))
 474                return;
 475
 476        if (intel_engine_has_timeslices(ke->engine) &&
 477            sysfs_create_file(&ke->base, &timeslice_duration_def.attr))
 478                return;
 479
 480        if (intel_engine_has_preempt_reset(ke->engine) &&
 481            sysfs_create_file(&ke->base, &preempt_timeout_def.attr))
 482                return;
 483}
 484
 485void intel_engines_add_sysfs(struct drm_i915_private *i915)
 486{
 487        static const struct attribute *files[] = {
 488                &name_attr.attr,
 489                &class_attr.attr,
 490                &inst_attr.attr,
 491                &mmio_attr.attr,
 492                &caps_attr.attr,
 493                &all_caps_attr.attr,
 494                &max_spin_attr.attr,
 495                &stop_timeout_attr.attr,
 496#if CONFIG_DRM_I915_HEARTBEAT_INTERVAL
 497                &heartbeat_interval_attr.attr,
 498#endif
 499                NULL
 500        };
 501
 502        struct device *kdev = i915->drm.primary->kdev;
 503        struct intel_engine_cs *engine;
 504        struct kobject *dir;
 505
 506        dir = kobject_create_and_add("engine", &kdev->kobj);
 507        if (!dir)
 508                return;
 509
 510        for_each_uabi_engine(engine, i915) {
 511                struct kobject *kobj;
 512
 513                kobj = kobj_engine(dir, engine);
 514                if (!kobj)
 515                        goto err_engine;
 516
 517                if (sysfs_create_files(kobj, files))
 518                        goto err_object;
 519
 520                if (intel_engine_has_timeslices(engine) &&
 521                    sysfs_create_file(kobj, &timeslice_duration_attr.attr))
 522                        goto err_engine;
 523
 524                if (intel_engine_has_preempt_reset(engine) &&
 525                    sysfs_create_file(kobj, &preempt_timeout_attr.attr))
 526                        goto err_engine;
 527
 528                add_defaults(container_of(kobj, struct kobj_engine, base));
 529
 530                if (0) {
 531err_object:
 532                        kobject_put(kobj);
 533err_engine:
 534                        dev_err(kdev, "Failed to add sysfs engine '%s'\n",
 535                                engine->name);
 536                        break;
 537                }
 538        }
 539}
 540