linux/drivers/gpu/drm/i915/intel_runtime_pm.c
<<
>>
Prefs
   1/*
   2 * Copyright © 2012-2014 Intel Corporation
   3 *
   4 * Permission is hereby granted, free of charge, to any person obtaining a
   5 * copy of this software and associated documentation files (the "Software"),
   6 * to deal in the Software without restriction, including without limitation
   7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
   8 * and/or sell copies of the Software, and to permit persons to whom the
   9 * Software is furnished to do so, subject to the following conditions:
  10 *
  11 * The above copyright notice and this permission notice (including the next
  12 * paragraph) shall be included in all copies or substantial portions of the
  13 * Software.
  14 *
  15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
  18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
  20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
  21 * IN THE SOFTWARE.
  22 *
  23 * Authors:
  24 *    Eugeni Dodonov <eugeni.dodonov@intel.com>
  25 *    Daniel Vetter <daniel.vetter@ffwll.ch>
  26 *
  27 */
  28
  29#include <linux/pm_runtime.h>
  30
  31#include <drm/drm_print.h>
  32
  33#include "i915_drv.h"
  34#include "i915_trace.h"
  35
  36/**
  37 * DOC: runtime pm
  38 *
  39 * The i915 driver supports dynamic enabling and disabling of entire hardware
  40 * blocks at runtime. This is especially important on the display side where
  41 * software is supposed to control many power gates manually on recent hardware,
  42 * since on the GT side a lot of the power management is done by the hardware.
  43 * But even there some manual control at the device level is required.
  44 *
  45 * Since i915 supports a diverse set of platforms with a unified codebase and
  46 * hardware engineers just love to shuffle functionality around between power
  47 * domains there's a sizeable amount of indirection required. This file provides
  48 * generic functions to the driver for grabbing and releasing references for
  49 * abstract power domains. It then maps those to the actual power wells
  50 * present for a given platform.
  51 */
  52
  53#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
  54
  55#include <linux/sort.h>
  56
  57#define STACKDEPTH 8
  58
  59static noinline depot_stack_handle_t __save_depot_stack(void)
  60{
  61        unsigned long entries[STACKDEPTH];
  62        unsigned int n;
  63
  64        n = stack_trace_save(entries, ARRAY_SIZE(entries), 1);
  65        return stack_depot_save(entries, n, GFP_NOWAIT | __GFP_NOWARN);
  66}
  67
  68static void __print_depot_stack(depot_stack_handle_t stack,
  69                                char *buf, int sz, int indent)
  70{
  71        unsigned long *entries;
  72        unsigned int nr_entries;
  73
  74        nr_entries = stack_depot_fetch(stack, &entries);
  75        stack_trace_snprint(buf, sz, entries, nr_entries, indent);
  76}
  77
  78static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
  79{
  80        spin_lock_init(&rpm->debug.lock);
  81}
  82
  83static noinline depot_stack_handle_t
  84track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
  85{
  86        depot_stack_handle_t stack, *stacks;
  87        unsigned long flags;
  88
  89        if (!rpm->available)
  90                return -1;
  91
  92        stack = __save_depot_stack();
  93        if (!stack)
  94                return -1;
  95
  96        spin_lock_irqsave(&rpm->debug.lock, flags);
  97
  98        if (!rpm->debug.count)
  99                rpm->debug.last_acquire = stack;
 100
 101        stacks = krealloc(rpm->debug.owners,
 102                          (rpm->debug.count + 1) * sizeof(*stacks),
 103                          GFP_NOWAIT | __GFP_NOWARN);
 104        if (stacks) {
 105                stacks[rpm->debug.count++] = stack;
 106                rpm->debug.owners = stacks;
 107        } else {
 108                stack = -1;
 109        }
 110
 111        spin_unlock_irqrestore(&rpm->debug.lock, flags);
 112
 113        return stack;
 114}
 115
 116static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
 117                                             depot_stack_handle_t stack)
 118{
 119        struct drm_i915_private *i915 = container_of(rpm,
 120                                                     struct drm_i915_private,
 121                                                     runtime_pm);
 122        unsigned long flags, n;
 123        bool found = false;
 124
 125        if (unlikely(stack == -1))
 126                return;
 127
 128        spin_lock_irqsave(&rpm->debug.lock, flags);
 129        for (n = rpm->debug.count; n--; ) {
 130                if (rpm->debug.owners[n] == stack) {
 131                        memmove(rpm->debug.owners + n,
 132                                rpm->debug.owners + n + 1,
 133                                (--rpm->debug.count - n) * sizeof(stack));
 134                        found = true;
 135                        break;
 136                }
 137        }
 138        spin_unlock_irqrestore(&rpm->debug.lock, flags);
 139
 140        if (drm_WARN(&i915->drm, !found,
 141                     "Unmatched wakeref (tracking %lu), count %u\n",
 142                     rpm->debug.count, atomic_read(&rpm->wakeref_count))) {
 143                char *buf;
 144
 145                buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
 146                if (!buf)
 147                        return;
 148
 149                __print_depot_stack(stack, buf, PAGE_SIZE, 2);
 150                DRM_DEBUG_DRIVER("wakeref %x from\n%s", stack, buf);
 151
 152                stack = READ_ONCE(rpm->debug.last_release);
 153                if (stack) {
 154                        __print_depot_stack(stack, buf, PAGE_SIZE, 2);
 155                        DRM_DEBUG_DRIVER("wakeref last released at\n%s", buf);
 156                }
 157
 158                kfree(buf);
 159        }
 160}
 161
 162static int cmphandle(const void *_a, const void *_b)
 163{
 164        const depot_stack_handle_t * const a = _a, * const b = _b;
 165
 166        if (*a < *b)
 167                return -1;
 168        else if (*a > *b)
 169                return 1;
 170        else
 171                return 0;
 172}
 173
 174static void
 175__print_intel_runtime_pm_wakeref(struct drm_printer *p,
 176                                 const struct intel_runtime_pm_debug *dbg)
 177{
 178        unsigned long i;
 179        char *buf;
 180
 181        buf = kmalloc(PAGE_SIZE, GFP_NOWAIT | __GFP_NOWARN);
 182        if (!buf)
 183                return;
 184
 185        if (dbg->last_acquire) {
 186                __print_depot_stack(dbg->last_acquire, buf, PAGE_SIZE, 2);
 187                drm_printf(p, "Wakeref last acquired:\n%s", buf);
 188        }
 189
 190        if (dbg->last_release) {
 191                __print_depot_stack(dbg->last_release, buf, PAGE_SIZE, 2);
 192                drm_printf(p, "Wakeref last released:\n%s", buf);
 193        }
 194
 195        drm_printf(p, "Wakeref count: %lu\n", dbg->count);
 196
 197        sort(dbg->owners, dbg->count, sizeof(*dbg->owners), cmphandle, NULL);
 198
 199        for (i = 0; i < dbg->count; i++) {
 200                depot_stack_handle_t stack = dbg->owners[i];
 201                unsigned long rep;
 202
 203                rep = 1;
 204                while (i + 1 < dbg->count && dbg->owners[i + 1] == stack)
 205                        rep++, i++;
 206                __print_depot_stack(stack, buf, PAGE_SIZE, 2);
 207                drm_printf(p, "Wakeref x%lu taken at:\n%s", rep, buf);
 208        }
 209
 210        kfree(buf);
 211}
 212
 213static noinline void
 214__untrack_all_wakerefs(struct intel_runtime_pm_debug *debug,
 215                       struct intel_runtime_pm_debug *saved)
 216{
 217        *saved = *debug;
 218
 219        debug->owners = NULL;
 220        debug->count = 0;
 221        debug->last_release = __save_depot_stack();
 222}
 223
 224static void
 225dump_and_free_wakeref_tracking(struct intel_runtime_pm_debug *debug)
 226{
 227        if (debug->count) {
 228                struct drm_printer p = drm_debug_printer("i915");
 229
 230                __print_intel_runtime_pm_wakeref(&p, debug);
 231        }
 232
 233        kfree(debug->owners);
 234}
 235
 236static noinline void
 237__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
 238{
 239        struct intel_runtime_pm_debug dbg = {};
 240        unsigned long flags;
 241
 242        if (!atomic_dec_and_lock_irqsave(&rpm->wakeref_count,
 243                                         &rpm->debug.lock,
 244                                         flags))
 245                return;
 246
 247        __untrack_all_wakerefs(&rpm->debug, &dbg);
 248        spin_unlock_irqrestore(&rpm->debug.lock, flags);
 249
 250        dump_and_free_wakeref_tracking(&dbg);
 251}
 252
 253static noinline void
 254untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
 255{
 256        struct intel_runtime_pm_debug dbg = {};
 257        unsigned long flags;
 258
 259        spin_lock_irqsave(&rpm->debug.lock, flags);
 260        __untrack_all_wakerefs(&rpm->debug, &dbg);
 261        spin_unlock_irqrestore(&rpm->debug.lock, flags);
 262
 263        dump_and_free_wakeref_tracking(&dbg);
 264}
 265
 266void print_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
 267                                    struct drm_printer *p)
 268{
 269        struct intel_runtime_pm_debug dbg = {};
 270
 271        do {
 272                unsigned long alloc = dbg.count;
 273                depot_stack_handle_t *s;
 274
 275                spin_lock_irq(&rpm->debug.lock);
 276                dbg.count = rpm->debug.count;
 277                if (dbg.count <= alloc) {
 278                        memcpy(dbg.owners,
 279                               rpm->debug.owners,
 280                               dbg.count * sizeof(*s));
 281                }
 282                dbg.last_acquire = rpm->debug.last_acquire;
 283                dbg.last_release = rpm->debug.last_release;
 284                spin_unlock_irq(&rpm->debug.lock);
 285                if (dbg.count <= alloc)
 286                        break;
 287
 288                s = krealloc(dbg.owners,
 289                             dbg.count * sizeof(*s),
 290                             GFP_NOWAIT | __GFP_NOWARN);
 291                if (!s)
 292                        goto out;
 293
 294                dbg.owners = s;
 295        } while (1);
 296
 297        __print_intel_runtime_pm_wakeref(p, &dbg);
 298
 299out:
 300        kfree(dbg.owners);
 301}
 302
 303#else
 304
 305static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
 306{
 307}
 308
 309static depot_stack_handle_t
 310track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm)
 311{
 312        return -1;
 313}
 314
 315static void untrack_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm,
 316                                             intel_wakeref_t wref)
 317{
 318}
 319
 320static void
 321__intel_wakeref_dec_and_check_tracking(struct intel_runtime_pm *rpm)
 322{
 323        atomic_dec(&rpm->wakeref_count);
 324}
 325
 326static void
 327untrack_all_intel_runtime_pm_wakerefs(struct intel_runtime_pm *rpm)
 328{
 329}
 330
 331#endif
 332
 333static void
 334intel_runtime_pm_acquire(struct intel_runtime_pm *rpm, bool wakelock)
 335{
 336        if (wakelock) {
 337                atomic_add(1 + INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
 338                assert_rpm_wakelock_held(rpm);
 339        } else {
 340                atomic_inc(&rpm->wakeref_count);
 341                assert_rpm_raw_wakeref_held(rpm);
 342        }
 343}
 344
 345static void
 346intel_runtime_pm_release(struct intel_runtime_pm *rpm, int wakelock)
 347{
 348        if (wakelock) {
 349                assert_rpm_wakelock_held(rpm);
 350                atomic_sub(INTEL_RPM_WAKELOCK_BIAS, &rpm->wakeref_count);
 351        } else {
 352                assert_rpm_raw_wakeref_held(rpm);
 353        }
 354
 355        __intel_wakeref_dec_and_check_tracking(rpm);
 356}
 357
 358static intel_wakeref_t __intel_runtime_pm_get(struct intel_runtime_pm *rpm,
 359                                              bool wakelock)
 360{
 361        struct drm_i915_private *i915 = container_of(rpm,
 362                                                     struct drm_i915_private,
 363                                                     runtime_pm);
 364        int ret;
 365
 366        ret = pm_runtime_get_sync(rpm->kdev);
 367        drm_WARN_ONCE(&i915->drm, ret < 0,
 368                      "pm_runtime_get_sync() failed: %d\n", ret);
 369
 370        intel_runtime_pm_acquire(rpm, wakelock);
 371
 372        return track_intel_runtime_pm_wakeref(rpm);
 373}
 374
 375/**
 376 * intel_runtime_pm_get_raw - grab a raw runtime pm reference
 377 * @rpm: the intel_runtime_pm structure
 378 *
 379 * This is the unlocked version of intel_display_power_is_enabled() and should
 380 * only be used from error capture and recovery code where deadlocks are
 381 * possible.
 382 * This function grabs a device-level runtime pm reference (mostly used for
 383 * asynchronous PM management from display code) and ensures that it is powered
 384 * up. Raw references are not considered during wakelock assert checks.
 385 *
 386 * Any runtime pm reference obtained by this function must have a symmetric
 387 * call to intel_runtime_pm_put_raw() to release the reference again.
 388 *
 389 * Returns: the wakeref cookie to pass to intel_runtime_pm_put_raw(), evaluates
 390 * as True if the wakeref was acquired, or False otherwise.
 391 */
 392intel_wakeref_t intel_runtime_pm_get_raw(struct intel_runtime_pm *rpm)
 393{
 394        return __intel_runtime_pm_get(rpm, false);
 395}
 396
 397/**
 398 * intel_runtime_pm_get - grab a runtime pm reference
 399 * @rpm: the intel_runtime_pm structure
 400 *
 401 * This function grabs a device-level runtime pm reference (mostly used for GEM
 402 * code to ensure the GTT or GT is on) and ensures that it is powered up.
 403 *
 404 * Any runtime pm reference obtained by this function must have a symmetric
 405 * call to intel_runtime_pm_put() to release the reference again.
 406 *
 407 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
 408 */
 409intel_wakeref_t intel_runtime_pm_get(struct intel_runtime_pm *rpm)
 410{
 411        return __intel_runtime_pm_get(rpm, true);
 412}
 413
 414/**
 415 * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
 416 * @rpm: the intel_runtime_pm structure
 417 *
 418 * This function grabs a device-level runtime pm reference if the device is
 419 * already in use and ensures that it is powered up. It is illegal to try
 420 * and access the HW should intel_runtime_pm_get_if_in_use() report failure.
 421 *
 422 * Any runtime pm reference obtained by this function must have a symmetric
 423 * call to intel_runtime_pm_put() to release the reference again.
 424 *
 425 * Returns: the wakeref cookie to pass to intel_runtime_pm_put(), evaluates
 426 * as True if the wakeref was acquired, or False otherwise.
 427 */
 428intel_wakeref_t intel_runtime_pm_get_if_in_use(struct intel_runtime_pm *rpm)
 429{
 430        if (IS_ENABLED(CONFIG_PM)) {
 431                /*
 432                 * In cases runtime PM is disabled by the RPM core and we get
 433                 * an -EINVAL return value we are not supposed to call this
 434                 * function, since the power state is undefined. This applies
 435                 * atm to the late/early system suspend/resume handlers.
 436                 */
 437                if (pm_runtime_get_if_in_use(rpm->kdev) <= 0)
 438                        return 0;
 439        }
 440
 441        intel_runtime_pm_acquire(rpm, true);
 442
 443        return track_intel_runtime_pm_wakeref(rpm);
 444}
 445
 446/**
 447 * intel_runtime_pm_get_noresume - grab a runtime pm reference
 448 * @rpm: the intel_runtime_pm structure
 449 *
 450 * This function grabs a device-level runtime pm reference (mostly used for GEM
 451 * code to ensure the GTT or GT is on).
 452 *
 453 * It will _not_ power up the device but instead only check that it's powered
 454 * on.  Therefore it is only valid to call this functions from contexts where
 455 * the device is known to be powered up and where trying to power it up would
 456 * result in hilarity and deadlocks. That pretty much means only the system
 457 * suspend/resume code where this is used to grab runtime pm references for
 458 * delayed setup down in work items.
 459 *
 460 * Any runtime pm reference obtained by this function must have a symmetric
 461 * call to intel_runtime_pm_put() to release the reference again.
 462 *
 463 * Returns: the wakeref cookie to pass to intel_runtime_pm_put()
 464 */
 465intel_wakeref_t intel_runtime_pm_get_noresume(struct intel_runtime_pm *rpm)
 466{
 467        assert_rpm_wakelock_held(rpm);
 468        pm_runtime_get_noresume(rpm->kdev);
 469
 470        intel_runtime_pm_acquire(rpm, true);
 471
 472        return track_intel_runtime_pm_wakeref(rpm);
 473}
 474
 475static void __intel_runtime_pm_put(struct intel_runtime_pm *rpm,
 476                                   intel_wakeref_t wref,
 477                                   bool wakelock)
 478{
 479        struct device *kdev = rpm->kdev;
 480
 481        untrack_intel_runtime_pm_wakeref(rpm, wref);
 482
 483        intel_runtime_pm_release(rpm, wakelock);
 484
 485        pm_runtime_mark_last_busy(kdev);
 486        pm_runtime_put_autosuspend(kdev);
 487}
 488
 489/**
 490 * intel_runtime_pm_put_raw - release a raw runtime pm reference
 491 * @rpm: the intel_runtime_pm structure
 492 * @wref: wakeref acquired for the reference that is being released
 493 *
 494 * This function drops the device-level runtime pm reference obtained by
 495 * intel_runtime_pm_get_raw() and might power down the corresponding
 496 * hardware block right away if this is the last reference.
 497 */
 498void
 499intel_runtime_pm_put_raw(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
 500{
 501        __intel_runtime_pm_put(rpm, wref, false);
 502}
 503
 504/**
 505 * intel_runtime_pm_put_unchecked - release an unchecked runtime pm reference
 506 * @rpm: the intel_runtime_pm structure
 507 *
 508 * This function drops the device-level runtime pm reference obtained by
 509 * intel_runtime_pm_get() and might power down the corresponding
 510 * hardware block right away if this is the last reference.
 511 *
 512 * This function exists only for historical reasons and should be avoided in
 513 * new code, as the correctness of its use cannot be checked. Always use
 514 * intel_runtime_pm_put() instead.
 515 */
 516void intel_runtime_pm_put_unchecked(struct intel_runtime_pm *rpm)
 517{
 518        __intel_runtime_pm_put(rpm, -1, true);
 519}
 520
 521#if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM)
 522/**
 523 * intel_runtime_pm_put - release a runtime pm reference
 524 * @rpm: the intel_runtime_pm structure
 525 * @wref: wakeref acquired for the reference that is being released
 526 *
 527 * This function drops the device-level runtime pm reference obtained by
 528 * intel_runtime_pm_get() and might power down the corresponding
 529 * hardware block right away if this is the last reference.
 530 */
 531void intel_runtime_pm_put(struct intel_runtime_pm *rpm, intel_wakeref_t wref)
 532{
 533        __intel_runtime_pm_put(rpm, wref, true);
 534}
 535#endif
 536
 537/**
 538 * intel_runtime_pm_enable - enable runtime pm
 539 * @rpm: the intel_runtime_pm structure
 540 *
 541 * This function enables runtime pm at the end of the driver load sequence.
 542 *
 543 * Note that this function does currently not enable runtime pm for the
 544 * subordinate display power domains. That is done by
 545 * intel_power_domains_enable().
 546 */
 547void intel_runtime_pm_enable(struct intel_runtime_pm *rpm)
 548{
 549        struct drm_i915_private *i915 = container_of(rpm,
 550                                                     struct drm_i915_private,
 551                                                     runtime_pm);
 552        struct device *kdev = rpm->kdev;
 553
 554        /*
 555         * Disable the system suspend direct complete optimization, which can
 556         * leave the device suspended skipping the driver's suspend handlers
 557         * if the device was already runtime suspended. This is needed due to
 558         * the difference in our runtime and system suspend sequence and
 559         * becaue the HDA driver may require us to enable the audio power
 560         * domain during system suspend.
 561         */
 562        dev_pm_set_driver_flags(kdev, DPM_FLAG_NO_DIRECT_COMPLETE);
 563
 564        pm_runtime_set_autosuspend_delay(kdev, 10000); /* 10s */
 565        pm_runtime_mark_last_busy(kdev);
 566
 567        /*
 568         * Take a permanent reference to disable the RPM functionality and drop
 569         * it only when unloading the driver. Use the low level get/put helpers,
 570         * so the driver's own RPM reference tracking asserts also work on
 571         * platforms without RPM support.
 572         */
 573        if (!rpm->available) {
 574                int ret;
 575
 576                pm_runtime_dont_use_autosuspend(kdev);
 577                ret = pm_runtime_get_sync(kdev);
 578                drm_WARN(&i915->drm, ret < 0,
 579                         "pm_runtime_get_sync() failed: %d\n", ret);
 580        } else {
 581                pm_runtime_use_autosuspend(kdev);
 582        }
 583
 584        /*
 585         * The core calls the driver load handler with an RPM reference held.
 586         * We drop that here and will reacquire it during unloading in
 587         * intel_power_domains_fini().
 588         */
 589        pm_runtime_put_autosuspend(kdev);
 590}
 591
 592void intel_runtime_pm_disable(struct intel_runtime_pm *rpm)
 593{
 594        struct drm_i915_private *i915 = container_of(rpm,
 595                                                     struct drm_i915_private,
 596                                                     runtime_pm);
 597        struct device *kdev = rpm->kdev;
 598
 599        /* Transfer rpm ownership back to core */
 600        drm_WARN(&i915->drm, pm_runtime_get_sync(kdev) < 0,
 601                 "Failed to pass rpm ownership back to core\n");
 602
 603        pm_runtime_dont_use_autosuspend(kdev);
 604
 605        if (!rpm->available)
 606                pm_runtime_put(kdev);
 607}
 608
 609void intel_runtime_pm_driver_release(struct intel_runtime_pm *rpm)
 610{
 611        struct drm_i915_private *i915 = container_of(rpm,
 612                                                     struct drm_i915_private,
 613                                                     runtime_pm);
 614        int count = atomic_read(&rpm->wakeref_count);
 615
 616        drm_WARN(&i915->drm, count,
 617                 "i915 raw-wakerefs=%d wakelocks=%d on cleanup\n",
 618                 intel_rpm_raw_wakeref_count(count),
 619                 intel_rpm_wakelock_count(count));
 620
 621        untrack_all_intel_runtime_pm_wakerefs(rpm);
 622}
 623
 624void intel_runtime_pm_init_early(struct intel_runtime_pm *rpm)
 625{
 626        struct drm_i915_private *i915 =
 627                        container_of(rpm, struct drm_i915_private, runtime_pm);
 628        struct pci_dev *pdev = i915->drm.pdev;
 629        struct device *kdev = &pdev->dev;
 630
 631        rpm->kdev = kdev;
 632        rpm->available = HAS_RUNTIME_PM(i915);
 633
 634        init_intel_runtime_pm_wakeref(rpm);
 635}
 636