linux/drivers/gpu/drm/i915/i915_sysfs.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2012 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Ben Widawsky <ben@bwidawsk.net>
  25 *
  26 */
  27
  28#include <linux/device.h>
  29#include <linux/module.h>
  30#include <linux/stat.h>
  31#include <linux/sysfs.h>
  32#include "intel_drv.h"
  33#include "i915_drv.h"
  34
  35#define dev_to_drm_minor(d) dev_get_drvdata((d))
  36
  37#ifdef CONFIG_PM
  38static u32 calc_residency(struct drm_device *dev,
  39                          i915_reg_t reg)
  40{
  41        struct drm_i915_private *dev_priv = dev->dev_private;
  42        u64 raw_time; /* 32b value may overflow during fixed point math */
  43        u64 units = 128ULL, div = 100000ULL;
  44        u32 ret;
  45
  46        if (!intel_enable_rc6(dev))
  47                return 0;
  48
  49        intel_runtime_pm_get(dev_priv);
  50
  51        /* On VLV and CHV, residency time is in CZ units rather than 1.28us */
  52        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
  53                units = 1;
  54                div = dev_priv->czclk_freq;
  55
  56                if (I915_READ(VLV_COUNTER_CONTROL) & VLV_COUNT_RANGE_HIGH)
  57                        units <<= 8;
  58        } else if (IS_BROXTON(dev)) {
  59                units = 1;
  60                div = 1200;             /* 833.33ns */
  61        }
  62
  63        raw_time = I915_READ(reg) * units;
  64        ret = DIV_ROUND_UP_ULL(raw_time, div);
  65
  66        intel_runtime_pm_put(dev_priv);
  67        return ret;
  68}
  69
  70static ssize_t
  71show_rc6_mask(struct device *kdev, struct device_attribute *attr, char *buf)
  72{
  73        struct drm_minor *dminor = dev_to_drm_minor(kdev);
  74        return snprintf(buf, PAGE_SIZE, "%x\n", intel_enable_rc6(dminor->dev));
  75}
  76
  77static ssize_t
  78show_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  79{
  80        struct drm_minor *dminor = dev_get_drvdata(kdev);
  81        u32 rc6_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6);
  82        return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
  83}
  84
  85static ssize_t
  86show_rc6p_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  87{
  88        struct drm_minor *dminor = dev_to_drm_minor(kdev);
  89        u32 rc6p_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6p);
  90        return snprintf(buf, PAGE_SIZE, "%u\n", rc6p_residency);
  91}
  92
  93static ssize_t
  94show_rc6pp_ms(struct device *kdev, struct device_attribute *attr, char *buf)
  95{
  96        struct drm_minor *dminor = dev_to_drm_minor(kdev);
  97        u32 rc6pp_residency = calc_residency(dminor->dev, GEN6_GT_GFX_RC6pp);
  98        return snprintf(buf, PAGE_SIZE, "%u\n", rc6pp_residency);
  99}
 100
 101static ssize_t
 102show_media_rc6_ms(struct device *kdev, struct device_attribute *attr, char *buf)
 103{
 104        struct drm_minor *dminor = dev_get_drvdata(kdev);
 105        u32 rc6_residency = calc_residency(dminor->dev, VLV_GT_MEDIA_RC6);
 106        return snprintf(buf, PAGE_SIZE, "%u\n", rc6_residency);
 107}
 108
 109static DEVICE_ATTR(rc6_enable, S_IRUGO, show_rc6_mask, NULL);
 110static DEVICE_ATTR(rc6_residency_ms, S_IRUGO, show_rc6_ms, NULL);
 111static DEVICE_ATTR(rc6p_residency_ms, S_IRUGO, show_rc6p_ms, NULL);
 112static DEVICE_ATTR(rc6pp_residency_ms, S_IRUGO, show_rc6pp_ms, NULL);
 113static DEVICE_ATTR(media_rc6_residency_ms, S_IRUGO, show_media_rc6_ms, NULL);
 114
 115static struct attribute *rc6_attrs[] = {
 116        &dev_attr_rc6_enable.attr,
 117        &dev_attr_rc6_residency_ms.attr,
 118        NULL
 119};
 120
 121static struct attribute_group rc6_attr_group = {
 122        .name = power_group_name,
 123        .attrs =  rc6_attrs
 124};
 125
 126static struct attribute *rc6p_attrs[] = {
 127        &dev_attr_rc6p_residency_ms.attr,
 128        &dev_attr_rc6pp_residency_ms.attr,
 129        NULL
 130};
 131
 132static struct attribute_group rc6p_attr_group = {
 133        .name = power_group_name,
 134        .attrs =  rc6p_attrs
 135};
 136
 137static struct attribute *media_rc6_attrs[] = {
 138        &dev_attr_media_rc6_residency_ms.attr,
 139        NULL
 140};
 141
 142static struct attribute_group media_rc6_attr_group = {
 143        .name = power_group_name,
 144        .attrs =  media_rc6_attrs
 145};
 146#endif
 147
 148static int l3_access_valid(struct drm_device *dev, loff_t offset)
 149{
 150        if (!HAS_L3_DPF(dev))
 151                return -EPERM;
 152
 153        if (offset % 4 != 0)
 154                return -EINVAL;
 155
 156        if (offset >= GEN7_L3LOG_SIZE)
 157                return -ENXIO;
 158
 159        return 0;
 160}
 161
 162static ssize_t
 163i915_l3_read(struct file *filp, struct kobject *kobj,
 164             struct bin_attribute *attr, char *buf,
 165             loff_t offset, size_t count)
 166{
 167        struct device *dev = kobj_to_dev(kobj);
 168        struct drm_minor *dminor = dev_to_drm_minor(dev);
 169        struct drm_device *drm_dev = dminor->dev;
 170        struct drm_i915_private *dev_priv = drm_dev->dev_private;
 171        int slice = (int)(uintptr_t)attr->private;
 172        int ret;
 173
 174        count = round_down(count, 4);
 175
 176        ret = l3_access_valid(drm_dev, offset);
 177        if (ret)
 178                return ret;
 179
 180        count = min_t(size_t, GEN7_L3LOG_SIZE - offset, count);
 181
 182        ret = i915_mutex_lock_interruptible(drm_dev);
 183        if (ret)
 184                return ret;
 185
 186        if (dev_priv->l3_parity.remap_info[slice])
 187                memcpy(buf,
 188                       dev_priv->l3_parity.remap_info[slice] + (offset/4),
 189                       count);
 190        else
 191                memset(buf, 0, count);
 192
 193        mutex_unlock(&drm_dev->struct_mutex);
 194
 195        return count;
 196}
 197
 198static ssize_t
 199i915_l3_write(struct file *filp, struct kobject *kobj,
 200              struct bin_attribute *attr, char *buf,
 201              loff_t offset, size_t count)
 202{
 203        struct device *dev = kobj_to_dev(kobj);
 204        struct drm_minor *dminor = dev_to_drm_minor(dev);
 205        struct drm_device *drm_dev = dminor->dev;
 206        struct drm_i915_private *dev_priv = drm_dev->dev_private;
 207        struct intel_context *ctx;
 208        u32 *temp = NULL; /* Just here to make handling failures easy */
 209        int slice = (int)(uintptr_t)attr->private;
 210        int ret;
 211
 212        if (!HAS_HW_CONTEXTS(drm_dev))
 213                return -ENXIO;
 214
 215        ret = l3_access_valid(drm_dev, offset);
 216        if (ret)
 217                return ret;
 218
 219        ret = i915_mutex_lock_interruptible(drm_dev);
 220        if (ret)
 221                return ret;
 222
 223        if (!dev_priv->l3_parity.remap_info[slice]) {
 224                temp = kzalloc(GEN7_L3LOG_SIZE, GFP_KERNEL);
 225                if (!temp) {
 226                        mutex_unlock(&drm_dev->struct_mutex);
 227                        return -ENOMEM;
 228                }
 229        }
 230
 231        ret = i915_gpu_idle(drm_dev);
 232        if (ret) {
 233                kfree(temp);
 234                mutex_unlock(&drm_dev->struct_mutex);
 235                return ret;
 236        }
 237
 238        /* TODO: Ideally we really want a GPU reset here to make sure errors
 239         * aren't propagated. Since I cannot find a stable way to reset the GPU
 240         * at this point it is left as a TODO.
 241        */
 242        if (temp)
 243                dev_priv->l3_parity.remap_info[slice] = temp;
 244
 245        memcpy(dev_priv->l3_parity.remap_info[slice] + (offset/4), buf, count);
 246
 247        /* NB: We defer the remapping until we switch to the context */
 248        list_for_each_entry(ctx, &dev_priv->context_list, link)
 249                ctx->remap_slice |= (1<<slice);
 250
 251        mutex_unlock(&drm_dev->struct_mutex);
 252
 253        return count;
 254}
 255
 256static struct bin_attribute dpf_attrs = {
 257        .attr = {.name = "l3_parity", .mode = (S_IRUSR | S_IWUSR)},
 258        .size = GEN7_L3LOG_SIZE,
 259        .read = i915_l3_read,
 260        .write = i915_l3_write,
 261        .mmap = NULL,
 262        .private = (void *)0
 263};
 264
 265static struct bin_attribute dpf_attrs_1 = {
 266        .attr = {.name = "l3_parity_slice_1", .mode = (S_IRUSR | S_IWUSR)},
 267        .size = GEN7_L3LOG_SIZE,
 268        .read = i915_l3_read,
 269        .write = i915_l3_write,
 270        .mmap = NULL,
 271        .private = (void *)1
 272};
 273
 274static ssize_t gt_act_freq_mhz_show(struct device *kdev,
 275                                    struct device_attribute *attr, char *buf)
 276{
 277        struct drm_minor *minor = dev_to_drm_minor(kdev);
 278        struct drm_device *dev = minor->dev;
 279        struct drm_i915_private *dev_priv = dev->dev_private;
 280        int ret;
 281
 282        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 283
 284        intel_runtime_pm_get(dev_priv);
 285
 286        mutex_lock(&dev_priv->rps.hw_lock);
 287        if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) {
 288                u32 freq;
 289                freq = vlv_punit_read(dev_priv, PUNIT_REG_GPU_FREQ_STS);
 290                ret = intel_gpu_freq(dev_priv, (freq >> 8) & 0xff);
 291        } else {
 292                u32 rpstat = I915_READ(GEN6_RPSTAT1);
 293                if (IS_GEN9(dev_priv))
 294                        ret = (rpstat & GEN9_CAGF_MASK) >> GEN9_CAGF_SHIFT;
 295                else if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv))
 296                        ret = (rpstat & HSW_CAGF_MASK) >> HSW_CAGF_SHIFT;
 297                else
 298                        ret = (rpstat & GEN6_CAGF_MASK) >> GEN6_CAGF_SHIFT;
 299                ret = intel_gpu_freq(dev_priv, ret);
 300        }
 301        mutex_unlock(&dev_priv->rps.hw_lock);
 302
 303        intel_runtime_pm_put(dev_priv);
 304
 305        return snprintf(buf, PAGE_SIZE, "%d\n", ret);
 306}
 307
 308static ssize_t gt_cur_freq_mhz_show(struct device *kdev,
 309                                    struct device_attribute *attr, char *buf)
 310{
 311        struct drm_minor *minor = dev_to_drm_minor(kdev);
 312        struct drm_device *dev = minor->dev;
 313        struct drm_i915_private *dev_priv = dev->dev_private;
 314        int ret;
 315
 316        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 317
 318        intel_runtime_pm_get(dev_priv);
 319
 320        mutex_lock(&dev_priv->rps.hw_lock);
 321        ret = intel_gpu_freq(dev_priv, dev_priv->rps.cur_freq);
 322        mutex_unlock(&dev_priv->rps.hw_lock);
 323
 324        intel_runtime_pm_put(dev_priv);
 325
 326        return snprintf(buf, PAGE_SIZE, "%d\n", ret);
 327}
 328
 329static ssize_t vlv_rpe_freq_mhz_show(struct device *kdev,
 330                                     struct device_attribute *attr, char *buf)
 331{
 332        struct drm_minor *minor = dev_to_drm_minor(kdev);
 333        struct drm_device *dev = minor->dev;
 334        struct drm_i915_private *dev_priv = dev->dev_private;
 335
 336        return snprintf(buf, PAGE_SIZE,
 337                        "%d\n",
 338                        intel_gpu_freq(dev_priv, dev_priv->rps.efficient_freq));
 339}
 340
 341static ssize_t gt_max_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 342{
 343        struct drm_minor *minor = dev_to_drm_minor(kdev);
 344        struct drm_device *dev = minor->dev;
 345        struct drm_i915_private *dev_priv = dev->dev_private;
 346        int ret;
 347
 348        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 349
 350        mutex_lock(&dev_priv->rps.hw_lock);
 351        ret = intel_gpu_freq(dev_priv, dev_priv->rps.max_freq_softlimit);
 352        mutex_unlock(&dev_priv->rps.hw_lock);
 353
 354        return snprintf(buf, PAGE_SIZE, "%d\n", ret);
 355}
 356
 357static ssize_t gt_max_freq_mhz_store(struct device *kdev,
 358                                     struct device_attribute *attr,
 359                                     const char *buf, size_t count)
 360{
 361        struct drm_minor *minor = dev_to_drm_minor(kdev);
 362        struct drm_device *dev = minor->dev;
 363        struct drm_i915_private *dev_priv = dev->dev_private;
 364        u32 val;
 365        ssize_t ret;
 366
 367        ret = kstrtou32(buf, 0, &val);
 368        if (ret)
 369                return ret;
 370
 371        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 372
 373        mutex_lock(&dev_priv->rps.hw_lock);
 374
 375        val = intel_freq_opcode(dev_priv, val);
 376
 377        if (val < dev_priv->rps.min_freq ||
 378            val > dev_priv->rps.max_freq ||
 379            val < dev_priv->rps.min_freq_softlimit) {
 380                mutex_unlock(&dev_priv->rps.hw_lock);
 381                return -EINVAL;
 382        }
 383
 384        if (val > dev_priv->rps.rp0_freq)
 385                DRM_DEBUG("User requested overclocking to %d\n",
 386                          intel_gpu_freq(dev_priv, val));
 387
 388        dev_priv->rps.max_freq_softlimit = val;
 389
 390        val = clamp_t(int, dev_priv->rps.cur_freq,
 391                      dev_priv->rps.min_freq_softlimit,
 392                      dev_priv->rps.max_freq_softlimit);
 393
 394        /* We still need *_set_rps to process the new max_delay and
 395         * update the interrupt limits and PMINTRMSK even though
 396         * frequency request may be unchanged. */
 397        intel_set_rps(dev, val);
 398
 399        mutex_unlock(&dev_priv->rps.hw_lock);
 400
 401        return count;
 402}
 403
 404static ssize_t gt_min_freq_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 405{
 406        struct drm_minor *minor = dev_to_drm_minor(kdev);
 407        struct drm_device *dev = minor->dev;
 408        struct drm_i915_private *dev_priv = dev->dev_private;
 409        int ret;
 410
 411        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 412
 413        mutex_lock(&dev_priv->rps.hw_lock);
 414        ret = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq_softlimit);
 415        mutex_unlock(&dev_priv->rps.hw_lock);
 416
 417        return snprintf(buf, PAGE_SIZE, "%d\n", ret);
 418}
 419
 420static ssize_t gt_min_freq_mhz_store(struct device *kdev,
 421                                     struct device_attribute *attr,
 422                                     const char *buf, size_t count)
 423{
 424        struct drm_minor *minor = dev_to_drm_minor(kdev);
 425        struct drm_device *dev = minor->dev;
 426        struct drm_i915_private *dev_priv = dev->dev_private;
 427        u32 val;
 428        ssize_t ret;
 429
 430        ret = kstrtou32(buf, 0, &val);
 431        if (ret)
 432                return ret;
 433
 434        flush_delayed_work(&dev_priv->rps.delayed_resume_work);
 435
 436        mutex_lock(&dev_priv->rps.hw_lock);
 437
 438        val = intel_freq_opcode(dev_priv, val);
 439
 440        if (val < dev_priv->rps.min_freq ||
 441            val > dev_priv->rps.max_freq ||
 442            val > dev_priv->rps.max_freq_softlimit) {
 443                mutex_unlock(&dev_priv->rps.hw_lock);
 444                return -EINVAL;
 445        }
 446
 447        dev_priv->rps.min_freq_softlimit = val;
 448
 449        val = clamp_t(int, dev_priv->rps.cur_freq,
 450                      dev_priv->rps.min_freq_softlimit,
 451                      dev_priv->rps.max_freq_softlimit);
 452
 453        /* We still need *_set_rps to process the new min_delay and
 454         * update the interrupt limits and PMINTRMSK even though
 455         * frequency request may be unchanged. */
 456        intel_set_rps(dev, val);
 457
 458        mutex_unlock(&dev_priv->rps.hw_lock);
 459
 460        return count;
 461
 462}
 463
 464static DEVICE_ATTR(gt_act_freq_mhz, S_IRUGO, gt_act_freq_mhz_show, NULL);
 465static DEVICE_ATTR(gt_cur_freq_mhz, S_IRUGO, gt_cur_freq_mhz_show, NULL);
 466static DEVICE_ATTR(gt_max_freq_mhz, S_IRUGO | S_IWUSR, gt_max_freq_mhz_show, gt_max_freq_mhz_store);
 467static DEVICE_ATTR(gt_min_freq_mhz, S_IRUGO | S_IWUSR, gt_min_freq_mhz_show, gt_min_freq_mhz_store);
 468
 469static DEVICE_ATTR(vlv_rpe_freq_mhz, S_IRUGO, vlv_rpe_freq_mhz_show, NULL);
 470
 471static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf);
 472static DEVICE_ATTR(gt_RP0_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
 473static DEVICE_ATTR(gt_RP1_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
 474static DEVICE_ATTR(gt_RPn_freq_mhz, S_IRUGO, gt_rp_mhz_show, NULL);
 475
 476/* For now we have a static number of RP states */
 477static ssize_t gt_rp_mhz_show(struct device *kdev, struct device_attribute *attr, char *buf)
 478{
 479        struct drm_minor *minor = dev_to_drm_minor(kdev);
 480        struct drm_device *dev = minor->dev;
 481        struct drm_i915_private *dev_priv = dev->dev_private;
 482        u32 val;
 483
 484        if (attr == &dev_attr_gt_RP0_freq_mhz)
 485                val = intel_gpu_freq(dev_priv, dev_priv->rps.rp0_freq);
 486        else if (attr == &dev_attr_gt_RP1_freq_mhz)
 487                val = intel_gpu_freq(dev_priv, dev_priv->rps.rp1_freq);
 488        else if (attr == &dev_attr_gt_RPn_freq_mhz)
 489                val = intel_gpu_freq(dev_priv, dev_priv->rps.min_freq);
 490        else
 491                BUG();
 492
 493        return snprintf(buf, PAGE_SIZE, "%d\n", val);
 494}
 495
 496static const struct attribute *gen6_attrs[] = {
 497        &dev_attr_gt_act_freq_mhz.attr,
 498        &dev_attr_gt_cur_freq_mhz.attr,
 499        &dev_attr_gt_max_freq_mhz.attr,
 500        &dev_attr_gt_min_freq_mhz.attr,
 501        &dev_attr_gt_RP0_freq_mhz.attr,
 502        &dev_attr_gt_RP1_freq_mhz.attr,
 503        &dev_attr_gt_RPn_freq_mhz.attr,
 504        NULL,
 505};
 506
 507static const struct attribute *vlv_attrs[] = {
 508        &dev_attr_gt_act_freq_mhz.attr,
 509        &dev_attr_gt_cur_freq_mhz.attr,
 510        &dev_attr_gt_max_freq_mhz.attr,
 511        &dev_attr_gt_min_freq_mhz.attr,
 512        &dev_attr_gt_RP0_freq_mhz.attr,
 513        &dev_attr_gt_RP1_freq_mhz.attr,
 514        &dev_attr_gt_RPn_freq_mhz.attr,
 515        &dev_attr_vlv_rpe_freq_mhz.attr,
 516        NULL,
 517};
 518
 519static ssize_t error_state_read(struct file *filp, struct kobject *kobj,
 520                                struct bin_attribute *attr, char *buf,
 521                                loff_t off, size_t count)
 522{
 523
 524        struct device *kdev = kobj_to_dev(kobj);
 525        struct drm_minor *minor = dev_to_drm_minor(kdev);
 526        struct drm_device *dev = minor->dev;
 527        struct i915_error_state_file_priv error_priv;
 528        struct drm_i915_error_state_buf error_str;
 529        ssize_t ret_count = 0;
 530        int ret;
 531
 532        memset(&error_priv, 0, sizeof(error_priv));
 533
 534        ret = i915_error_state_buf_init(&error_str, to_i915(dev), count, off);
 535        if (ret)
 536                return ret;
 537
 538        error_priv.dev = dev;
 539        i915_error_state_get(dev, &error_priv);
 540
 541        ret = i915_error_state_to_str(&error_str, &error_priv);
 542        if (ret)
 543                goto out;
 544
 545        ret_count = count < error_str.bytes ? count : error_str.bytes;
 546
 547        memcpy(buf, error_str.buf, ret_count);
 548out:
 549        i915_error_state_put(&error_priv);
 550        i915_error_state_buf_release(&error_str);
 551
 552        return ret ?: ret_count;
 553}
 554
 555static ssize_t error_state_write(struct file *file, struct kobject *kobj,
 556                                 struct bin_attribute *attr, char *buf,
 557                                 loff_t off, size_t count)
 558{
 559        struct device *kdev = kobj_to_dev(kobj);
 560        struct drm_minor *minor = dev_to_drm_minor(kdev);
 561        struct drm_device *dev = minor->dev;
 562        int ret;
 563
 564        DRM_DEBUG_DRIVER("Resetting error state\n");
 565
 566        ret = mutex_lock_interruptible(&dev->struct_mutex);
 567        if (ret)
 568                return ret;
 569
 570        i915_destroy_error_state(dev);
 571        mutex_unlock(&dev->struct_mutex);
 572
 573        return count;
 574}
 575
 576static struct bin_attribute error_state_attr = {
 577        .attr.name = "error",
 578        .attr.mode = S_IRUSR | S_IWUSR,
 579        .size = 0,
 580        .read = error_state_read,
 581        .write = error_state_write,
 582};
 583
 584void i915_setup_sysfs(struct drm_device *dev)
 585{
 586        int ret;
 587
 588#ifdef CONFIG_PM
 589        if (HAS_RC6(dev)) {
 590                ret = sysfs_merge_group(&dev->primary->kdev->kobj,
 591                                        &rc6_attr_group);
 592                if (ret)
 593                        DRM_ERROR("RC6 residency sysfs setup failed\n");
 594        }
 595        if (HAS_RC6p(dev)) {
 596                ret = sysfs_merge_group(&dev->primary->kdev->kobj,
 597                                        &rc6p_attr_group);
 598                if (ret)
 599                        DRM_ERROR("RC6p residency sysfs setup failed\n");
 600        }
 601        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
 602                ret = sysfs_merge_group(&dev->primary->kdev->kobj,
 603                                        &media_rc6_attr_group);
 604                if (ret)
 605                        DRM_ERROR("Media RC6 residency sysfs setup failed\n");
 606        }
 607#endif
 608        if (HAS_L3_DPF(dev)) {
 609                ret = device_create_bin_file(dev->primary->kdev, &dpf_attrs);
 610                if (ret)
 611                        DRM_ERROR("l3 parity sysfs setup failed\n");
 612
 613                if (NUM_L3_SLICES(dev) > 1) {
 614                        ret = device_create_bin_file(dev->primary->kdev,
 615                                                     &dpf_attrs_1);
 616                        if (ret)
 617                                DRM_ERROR("l3 parity slice 1 setup failed\n");
 618                }
 619        }
 620
 621        ret = 0;
 622        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
 623                ret = sysfs_create_files(&dev->primary->kdev->kobj, vlv_attrs);
 624        else if (INTEL_INFO(dev)->gen >= 6)
 625                ret = sysfs_create_files(&dev->primary->kdev->kobj, gen6_attrs);
 626        if (ret)
 627                DRM_ERROR("RPS sysfs setup failed\n");
 628
 629        ret = sysfs_create_bin_file(&dev->primary->kdev->kobj,
 630                                    &error_state_attr);
 631        if (ret)
 632                DRM_ERROR("error_state sysfs setup failed\n");
 633}
 634
 635void i915_teardown_sysfs(struct drm_device *dev)
 636{
 637        sysfs_remove_bin_file(&dev->primary->kdev->kobj, &error_state_attr);
 638        if (IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev))
 639                sysfs_remove_files(&dev->primary->kdev->kobj, vlv_attrs);
 640        else
 641                sysfs_remove_files(&dev->primary->kdev->kobj, gen6_attrs);
 642        device_remove_bin_file(dev->primary->kdev,  &dpf_attrs_1);
 643        device_remove_bin_file(dev->primary->kdev,  &dpf_attrs);
 644#ifdef CONFIG_PM
 645        sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6_attr_group);
 646        sysfs_unmerge_group(&dev->primary->kdev->kobj, &rc6p_attr_group);
 647#endif
 648}
 649