linux/arch/arm/common/bL_switcher.c
<<
>>
Prefs
   1/*
   2 * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
   3 *
   4 * Created by:  Nicolas Pitre, March 2012
   5 * Copyright:   (C) 2012-2013  Linaro Limited
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11
  12#include <linux/atomic.h>
  13#include <linux/init.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/sched.h>
  17#include <linux/interrupt.h>
  18#include <linux/cpu_pm.h>
  19#include <linux/cpu.h>
  20#include <linux/cpumask.h>
  21#include <linux/kthread.h>
  22#include <linux/wait.h>
  23#include <linux/time.h>
  24#include <linux/clockchips.h>
  25#include <linux/hrtimer.h>
  26#include <linux/tick.h>
  27#include <linux/notifier.h>
  28#include <linux/mm.h>
  29#include <linux/mutex.h>
  30#include <linux/smp.h>
  31#include <linux/spinlock.h>
  32#include <linux/string.h>
  33#include <linux/sysfs.h>
  34#include <linux/irqchip/arm-gic.h>
  35#include <linux/moduleparam.h>
  36
  37#include <asm/smp_plat.h>
  38#include <asm/cputype.h>
  39#include <asm/suspend.h>
  40#include <asm/mcpm.h>
  41#include <asm/bL_switcher.h>
  42
  43#define CREATE_TRACE_POINTS
  44#include <trace/events/power_cpu_migrate.h>
  45
  46
  47/*
  48 * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
  49 * __attribute_const__ and we don't want the compiler to assume any
  50 * constness here as the value _does_ change along some code paths.
  51 */
  52
  53static int read_mpidr(void)
  54{
  55        unsigned int id;
  56        asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
  57        return id & MPIDR_HWID_BITMASK;
  58}
  59
  60/*
  61 * bL switcher core code.
  62 */
  63
  64static void bL_do_switch(void *_arg)
  65{
  66        unsigned ib_mpidr, ib_cpu, ib_cluster;
  67        long volatile handshake, **handshake_ptr = _arg;
  68
  69        pr_debug("%s\n", __func__);
  70
  71        ib_mpidr = cpu_logical_map(smp_processor_id());
  72        ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
  73        ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
  74
  75        /* Advertise our handshake location */
  76        if (handshake_ptr) {
  77                handshake = 0;
  78                *handshake_ptr = &handshake;
  79        } else
  80                handshake = -1;
  81
  82        /*
  83         * Our state has been saved at this point.  Let's release our
  84         * inbound CPU.
  85         */
  86        mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume);
  87        sev();
  88
  89        /*
  90         * From this point, we must assume that our counterpart CPU might
  91         * have taken over in its parallel world already, as if execution
  92         * just returned from cpu_suspend().  It is therefore important to
  93         * be very careful not to make any change the other guy is not
  94         * expecting.  This is why we need stack isolation.
  95         *
  96         * Fancy under cover tasks could be performed here.  For now
  97         * we have none.
  98         */
  99
 100        /*
 101         * Let's wait until our inbound is alive.
 102         */
 103        while (!handshake) {
 104                wfe();
 105                smp_mb();
 106        }
 107
 108        /* Let's put ourself down. */
 109        mcpm_cpu_power_down();
 110
 111        /* should never get here */
 112        BUG();
 113}
 114
 115/*
 116 * Stack isolation.  To ensure 'current' remains valid, we just use another
 117 * piece of our thread's stack space which should be fairly lightly used.
 118 * The selected area starts just above the thread_info structure located
 119 * at the very bottom of the stack, aligned to a cache line, and indexed
 120 * with the cluster number.
 121 */
 122#define STACK_SIZE 512
 123extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
 124static int bL_switchpoint(unsigned long _arg)
 125{
 126        unsigned int mpidr = read_mpidr();
 127        unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
 128        void *stack = current_thread_info() + 1;
 129        stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
 130        stack += clusterid * STACK_SIZE + STACK_SIZE;
 131        call_with_stack(bL_do_switch, (void *)_arg, stack);
 132        BUG();
 133}
 134
 135/*
 136 * Generic switcher interface
 137 */
 138
 139static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
 140static int bL_switcher_cpu_pairing[NR_CPUS];
 141
 142/*
 143 * bL_switch_to - Switch to a specific cluster for the current CPU
 144 * @new_cluster_id: the ID of the cluster to switch to.
 145 *
 146 * This function must be called on the CPU to be switched.
 147 * Returns 0 on success, else a negative status code.
 148 */
 149static int bL_switch_to(unsigned int new_cluster_id)
 150{
 151        unsigned int mpidr, this_cpu, that_cpu;
 152        unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
 153        struct completion inbound_alive;
 154        long volatile *handshake_ptr;
 155        int ipi_nr, ret;
 156
 157        this_cpu = smp_processor_id();
 158        ob_mpidr = read_mpidr();
 159        ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0);
 160        ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1);
 161        BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr);
 162
 163        if (new_cluster_id == ob_cluster)
 164                return 0;
 165
 166        that_cpu = bL_switcher_cpu_pairing[this_cpu];
 167        ib_mpidr = cpu_logical_map(that_cpu);
 168        ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
 169        ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
 170
 171        pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
 172                 this_cpu, ob_mpidr, ib_mpidr);
 173
 174        this_cpu = smp_processor_id();
 175
 176        /* Close the gate for our entry vectors */
 177        mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
 178        mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
 179
 180        /* Install our "inbound alive" notifier. */
 181        init_completion(&inbound_alive);
 182        ipi_nr = register_ipi_completion(&inbound_alive, this_cpu);
 183        ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]);
 184        mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr);
 185
 186        /*
 187         * Let's wake up the inbound CPU now in case it requires some delay
 188         * to come online, but leave it gated in our entry vector code.
 189         */
 190        ret = mcpm_cpu_power_up(ib_cpu, ib_cluster);
 191        if (ret) {
 192                pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
 193                return ret;
 194        }
 195
 196        /*
 197         * Raise a SGI on the inbound CPU to make sure it doesn't stall
 198         * in a possible WFI, such as in bL_power_down().
 199         */
 200        gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0);
 201
 202        /*
 203         * Wait for the inbound to come up.  This allows for other
 204         * tasks to be scheduled in the mean time.
 205         */
 206        wait_for_completion(&inbound_alive);
 207        mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0);
 208
 209        /*
 210         * From this point we are entering the switch critical zone
 211         * and can't take any interrupts anymore.
 212         */
 213        local_irq_disable();
 214        local_fiq_disable();
 215        trace_cpu_migrate_begin(ktime_get_real_ns(), ob_mpidr);
 216
 217        /* redirect GIC's SGIs to our counterpart */
 218        gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
 219
 220        tick_suspend_local();
 221
 222        ret = cpu_pm_enter();
 223
 224        /* we can not tolerate errors at this point */
 225        if (ret)
 226                panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
 227
 228        /* Swap the physical CPUs in the logical map for this logical CPU. */
 229        cpu_logical_map(this_cpu) = ib_mpidr;
 230        cpu_logical_map(that_cpu) = ob_mpidr;
 231
 232        /* Let's do the actual CPU switch. */
 233        ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint);
 234        if (ret > 0)
 235                panic("%s: cpu_suspend() returned %d\n", __func__, ret);
 236
 237        /* We are executing on the inbound CPU at this point */
 238        mpidr = read_mpidr();
 239        pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr);
 240        BUG_ON(mpidr != ib_mpidr);
 241
 242        mcpm_cpu_powered_up();
 243
 244        ret = cpu_pm_exit();
 245
 246        tick_resume_local();
 247
 248        trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr);
 249        local_fiq_enable();
 250        local_irq_enable();
 251
 252        *handshake_ptr = 1;
 253        dsb_sev();
 254
 255        if (ret)
 256                pr_err("%s exiting with error %d\n", __func__, ret);
 257        return ret;
 258}
 259
 260struct bL_thread {
 261        spinlock_t lock;
 262        struct task_struct *task;
 263        wait_queue_head_t wq;
 264        int wanted_cluster;
 265        struct completion started;
 266        bL_switch_completion_handler completer;
 267        void *completer_cookie;
 268};
 269
 270static struct bL_thread bL_threads[NR_CPUS];
 271
 272static int bL_switcher_thread(void *arg)
 273{
 274        struct bL_thread *t = arg;
 275        struct sched_param param = { .sched_priority = 1 };
 276        int cluster;
 277        bL_switch_completion_handler completer;
 278        void *completer_cookie;
 279
 280        sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
 281        complete(&t->started);
 282
 283        do {
 284                if (signal_pending(current))
 285                        flush_signals(current);
 286                wait_event_interruptible(t->wq,
 287                                t->wanted_cluster != -1 ||
 288                                kthread_should_stop());
 289
 290                spin_lock(&t->lock);
 291                cluster = t->wanted_cluster;
 292                completer = t->completer;
 293                completer_cookie = t->completer_cookie;
 294                t->wanted_cluster = -1;
 295                t->completer = NULL;
 296                spin_unlock(&t->lock);
 297
 298                if (cluster != -1) {
 299                        bL_switch_to(cluster);
 300
 301                        if (completer)
 302                                completer(completer_cookie);
 303                }
 304        } while (!kthread_should_stop());
 305
 306        return 0;
 307}
 308
 309static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
 310{
 311        struct task_struct *task;
 312
 313        task = kthread_create_on_node(bL_switcher_thread, arg,
 314                                      cpu_to_node(cpu), "kswitcher_%d", cpu);
 315        if (!IS_ERR(task)) {
 316                kthread_bind(task, cpu);
 317                wake_up_process(task);
 318        } else
 319                pr_err("%s failed for CPU %d\n", __func__, cpu);
 320        return task;
 321}
 322
 323/*
 324 * bL_switch_request_cb - Switch to a specific cluster for the given CPU,
 325 *      with completion notification via a callback
 326 *
 327 * @cpu: the CPU to switch
 328 * @new_cluster_id: the ID of the cluster to switch to.
 329 * @completer: switch completion callback.  if non-NULL,
 330 *      @completer(@completer_cookie) will be called on completion of
 331 *      the switch, in non-atomic context.
 332 * @completer_cookie: opaque context argument for @completer.
 333 *
 334 * This function causes a cluster switch on the given CPU by waking up
 335 * the appropriate switcher thread.  This function may or may not return
 336 * before the switch has occurred.
 337 *
 338 * If a @completer callback function is supplied, it will be called when
 339 * the switch is complete.  This can be used to determine asynchronously
 340 * when the switch is complete, regardless of when bL_switch_request()
 341 * returns.  When @completer is supplied, no new switch request is permitted
 342 * for the affected CPU until after the switch is complete, and @completer
 343 * has returned.
 344 */
 345int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
 346                         bL_switch_completion_handler completer,
 347                         void *completer_cookie)
 348{
 349        struct bL_thread *t;
 350
 351        if (cpu >= ARRAY_SIZE(bL_threads)) {
 352                pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
 353                return -EINVAL;
 354        }
 355
 356        t = &bL_threads[cpu];
 357
 358        if (IS_ERR(t->task))
 359                return PTR_ERR(t->task);
 360        if (!t->task)
 361                return -ESRCH;
 362
 363        spin_lock(&t->lock);
 364        if (t->completer) {
 365                spin_unlock(&t->lock);
 366                return -EBUSY;
 367        }
 368        t->completer = completer;
 369        t->completer_cookie = completer_cookie;
 370        t->wanted_cluster = new_cluster_id;
 371        spin_unlock(&t->lock);
 372        wake_up(&t->wq);
 373        return 0;
 374}
 375EXPORT_SYMBOL_GPL(bL_switch_request_cb);
 376
 377/*
 378 * Activation and configuration code.
 379 */
 380
 381static DEFINE_MUTEX(bL_switcher_activation_lock);
 382static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier);
 383static unsigned int bL_switcher_active;
 384static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
 385static cpumask_t bL_switcher_removed_logical_cpus;
 386
 387int bL_switcher_register_notifier(struct notifier_block *nb)
 388{
 389        return blocking_notifier_chain_register(&bL_activation_notifier, nb);
 390}
 391EXPORT_SYMBOL_GPL(bL_switcher_register_notifier);
 392
 393int bL_switcher_unregister_notifier(struct notifier_block *nb)
 394{
 395        return blocking_notifier_chain_unregister(&bL_activation_notifier, nb);
 396}
 397EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier);
 398
 399static int bL_activation_notify(unsigned long val)
 400{
 401        int ret;
 402
 403        ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL);
 404        if (ret & NOTIFY_STOP_MASK)
 405                pr_err("%s: notifier chain failed with status 0x%x\n",
 406                        __func__, ret);
 407        return notifier_to_errno(ret);
 408}
 409
 410static void bL_switcher_restore_cpus(void)
 411{
 412        int i;
 413
 414        for_each_cpu(i, &bL_switcher_removed_logical_cpus) {
 415                struct device *cpu_dev = get_cpu_device(i);
 416                int ret = device_online(cpu_dev);
 417                if (ret)
 418                        dev_err(cpu_dev, "switcher: unable to restore CPU\n");
 419        }
 420}
 421
 422static int bL_switcher_halve_cpus(void)
 423{
 424        int i, j, cluster_0, gic_id, ret;
 425        unsigned int cpu, cluster, mask;
 426        cpumask_t available_cpus;
 427
 428        /* First pass to validate what we have */
 429        mask = 0;
 430        for_each_online_cpu(i) {
 431                cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
 432                cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
 433                if (cluster >= 2) {
 434                        pr_err("%s: only dual cluster systems are supported\n", __func__);
 435                        return -EINVAL;
 436                }
 437                if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
 438                        return -EINVAL;
 439                mask |= (1 << cluster);
 440        }
 441        if (mask != 3) {
 442                pr_err("%s: no CPU pairing possible\n", __func__);
 443                return -EINVAL;
 444        }
 445
 446        /*
 447         * Now let's do the pairing.  We match each CPU with another CPU
 448         * from a different cluster.  To get a uniform scheduling behavior
 449         * without fiddling with CPU topology and compute capacity data,
 450         * we'll use logical CPUs initially belonging to the same cluster.
 451         */
 452        memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
 453        cpumask_copy(&available_cpus, cpu_online_mask);
 454        cluster_0 = -1;
 455        for_each_cpu(i, &available_cpus) {
 456                int match = -1;
 457                cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
 458                if (cluster_0 == -1)
 459                        cluster_0 = cluster;
 460                if (cluster != cluster_0)
 461                        continue;
 462                cpumask_clear_cpu(i, &available_cpus);
 463                for_each_cpu(j, &available_cpus) {
 464                        cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
 465                        /*
 466                         * Let's remember the last match to create "odd"
 467                         * pairings on purpose in order for other code not
 468                         * to assume any relation between physical and
 469                         * logical CPU numbers.
 470                         */
 471                        if (cluster != cluster_0)
 472                                match = j;
 473                }
 474                if (match != -1) {
 475                        bL_switcher_cpu_pairing[i] = match;
 476                        cpumask_clear_cpu(match, &available_cpus);
 477                        pr_info("CPU%d paired with CPU%d\n", i, match);
 478                }
 479        }
 480
 481        /*
 482         * Now we disable the unwanted CPUs i.e. everything that has no
 483         * pairing information (that includes the pairing counterparts).
 484         */
 485        cpumask_clear(&bL_switcher_removed_logical_cpus);
 486        for_each_online_cpu(i) {
 487                cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
 488                cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
 489
 490                /* Let's take note of the GIC ID for this CPU */
 491                gic_id = gic_get_cpu_id(i);
 492                if (gic_id < 0) {
 493                        pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
 494                        bL_switcher_restore_cpus();
 495                        return -EINVAL;
 496                }
 497                bL_gic_id[cpu][cluster] = gic_id;
 498                pr_info("GIC ID for CPU %u cluster %u is %u\n",
 499                        cpu, cluster, gic_id);
 500
 501                if (bL_switcher_cpu_pairing[i] != -1) {
 502                        bL_switcher_cpu_original_cluster[i] = cluster;
 503                        continue;
 504                }
 505
 506                ret = device_offline(get_cpu_device(i));
 507                if (ret) {
 508                        bL_switcher_restore_cpus();
 509                        return ret;
 510                }
 511                cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
 512        }
 513
 514        return 0;
 515}
 516
 517/* Determine the logical CPU a given physical CPU is grouped on. */
 518int bL_switcher_get_logical_index(u32 mpidr)
 519{
 520        int cpu;
 521
 522        if (!bL_switcher_active)
 523                return -EUNATCH;
 524
 525        mpidr &= MPIDR_HWID_BITMASK;
 526        for_each_online_cpu(cpu) {
 527                int pairing = bL_switcher_cpu_pairing[cpu];
 528                if (pairing == -1)
 529                        continue;
 530                if ((mpidr == cpu_logical_map(cpu)) ||
 531                    (mpidr == cpu_logical_map(pairing)))
 532                        return cpu;
 533        }
 534        return -EINVAL;
 535}
 536
 537static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
 538{
 539        trace_cpu_migrate_current(ktime_get_real_ns(), read_mpidr());
 540}
 541
 542int bL_switcher_trace_trigger(void)
 543{
 544        int ret;
 545
 546        preempt_disable();
 547
 548        bL_switcher_trace_trigger_cpu(NULL);
 549        ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
 550
 551        preempt_enable();
 552
 553        return ret;
 554}
 555EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
 556
 557static int bL_switcher_enable(void)
 558{
 559        int cpu, ret;
 560
 561        mutex_lock(&bL_switcher_activation_lock);
 562        lock_device_hotplug();
 563        if (bL_switcher_active) {
 564                unlock_device_hotplug();
 565                mutex_unlock(&bL_switcher_activation_lock);
 566                return 0;
 567        }
 568
 569        pr_info("big.LITTLE switcher initializing\n");
 570
 571        ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE);
 572        if (ret)
 573                goto error;
 574
 575        ret = bL_switcher_halve_cpus();
 576        if (ret)
 577                goto error;
 578
 579        bL_switcher_trace_trigger();
 580
 581        for_each_online_cpu(cpu) {
 582                struct bL_thread *t = &bL_threads[cpu];
 583                spin_lock_init(&t->lock);
 584                init_waitqueue_head(&t->wq);
 585                init_completion(&t->started);
 586                t->wanted_cluster = -1;
 587                t->task = bL_switcher_thread_create(cpu, t);
 588        }
 589
 590        bL_switcher_active = 1;
 591        bL_activation_notify(BL_NOTIFY_POST_ENABLE);
 592        pr_info("big.LITTLE switcher initialized\n");
 593        goto out;
 594
 595error:
 596        pr_warn("big.LITTLE switcher initialization failed\n");
 597        bL_activation_notify(BL_NOTIFY_POST_DISABLE);
 598
 599out:
 600        unlock_device_hotplug();
 601        mutex_unlock(&bL_switcher_activation_lock);
 602        return ret;
 603}
 604
 605#ifdef CONFIG_SYSFS
 606
 607static void bL_switcher_disable(void)
 608{
 609        unsigned int cpu, cluster;
 610        struct bL_thread *t;
 611        struct task_struct *task;
 612
 613        mutex_lock(&bL_switcher_activation_lock);
 614        lock_device_hotplug();
 615
 616        if (!bL_switcher_active)
 617                goto out;
 618
 619        if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) {
 620                bL_activation_notify(BL_NOTIFY_POST_ENABLE);
 621                goto out;
 622        }
 623
 624        bL_switcher_active = 0;
 625
 626        /*
 627         * To deactivate the switcher, we must shut down the switcher
 628         * threads to prevent any other requests from being accepted.
 629         * Then, if the final cluster for given logical CPU is not the
 630         * same as the original one, we'll recreate a switcher thread
 631         * just for the purpose of switching the CPU back without any
 632         * possibility for interference from external requests.
 633         */
 634        for_each_online_cpu(cpu) {
 635                t = &bL_threads[cpu];
 636                task = t->task;
 637                t->task = NULL;
 638                if (!task || IS_ERR(task))
 639                        continue;
 640                kthread_stop(task);
 641                /* no more switch may happen on this CPU at this point */
 642                cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
 643                if (cluster == bL_switcher_cpu_original_cluster[cpu])
 644                        continue;
 645                init_completion(&t->started);
 646                t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
 647                task = bL_switcher_thread_create(cpu, t);
 648                if (!IS_ERR(task)) {
 649                        wait_for_completion(&t->started);
 650                        kthread_stop(task);
 651                        cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
 652                        if (cluster == bL_switcher_cpu_original_cluster[cpu])
 653                                continue;
 654                }
 655                /* If execution gets here, we're in trouble. */
 656                pr_crit("%s: unable to restore original cluster for CPU %d\n",
 657                        __func__, cpu);
 658                pr_crit("%s: CPU %d can't be restored\n",
 659                        __func__, bL_switcher_cpu_pairing[cpu]);
 660                cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
 661                                  &bL_switcher_removed_logical_cpus);
 662        }
 663
 664        bL_switcher_restore_cpus();
 665        bL_switcher_trace_trigger();
 666
 667        bL_activation_notify(BL_NOTIFY_POST_DISABLE);
 668
 669out:
 670        unlock_device_hotplug();
 671        mutex_unlock(&bL_switcher_activation_lock);
 672}
 673
 674static ssize_t bL_switcher_active_show(struct kobject *kobj,
 675                struct kobj_attribute *attr, char *buf)
 676{
 677        return sprintf(buf, "%u\n", bL_switcher_active);
 678}
 679
 680static ssize_t bL_switcher_active_store(struct kobject *kobj,
 681                struct kobj_attribute *attr, const char *buf, size_t count)
 682{
 683        int ret;
 684
 685        switch (buf[0]) {
 686        case '0':
 687                bL_switcher_disable();
 688                ret = 0;
 689                break;
 690        case '1':
 691                ret = bL_switcher_enable();
 692                break;
 693        default:
 694                ret = -EINVAL;
 695        }
 696
 697        return (ret >= 0) ? count : ret;
 698}
 699
 700static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj,
 701                struct kobj_attribute *attr, const char *buf, size_t count)
 702{
 703        int ret = bL_switcher_trace_trigger();
 704
 705        return ret ? ret : count;
 706}
 707
 708static struct kobj_attribute bL_switcher_active_attr =
 709        __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store);
 710
 711static struct kobj_attribute bL_switcher_trace_trigger_attr =
 712        __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store);
 713
 714static struct attribute *bL_switcher_attrs[] = {
 715        &bL_switcher_active_attr.attr,
 716        &bL_switcher_trace_trigger_attr.attr,
 717        NULL,
 718};
 719
 720static struct attribute_group bL_switcher_attr_group = {
 721        .attrs = bL_switcher_attrs,
 722};
 723
 724static struct kobject *bL_switcher_kobj;
 725
 726static int __init bL_switcher_sysfs_init(void)
 727{
 728        int ret;
 729
 730        bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj);
 731        if (!bL_switcher_kobj)
 732                return -ENOMEM;
 733        ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group);
 734        if (ret)
 735                kobject_put(bL_switcher_kobj);
 736        return ret;
 737}
 738
 739#endif  /* CONFIG_SYSFS */
 740
 741bool bL_switcher_get_enabled(void)
 742{
 743        mutex_lock(&bL_switcher_activation_lock);
 744
 745        return bL_switcher_active;
 746}
 747EXPORT_SYMBOL_GPL(bL_switcher_get_enabled);
 748
 749void bL_switcher_put_enabled(void)
 750{
 751        mutex_unlock(&bL_switcher_activation_lock);
 752}
 753EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
 754
 755/*
 756 * Veto any CPU hotplug operation on those CPUs we've removed
 757 * while the switcher is active.
 758 * We're just not ready to deal with that given the trickery involved.
 759 */
 760static int bL_switcher_hotplug_callback(struct notifier_block *nfb,
 761                                        unsigned long action, void *hcpu)
 762{
 763        if (bL_switcher_active) {
 764                int pairing = bL_switcher_cpu_pairing[(unsigned long)hcpu];
 765                switch (action & 0xf) {
 766                case CPU_UP_PREPARE:
 767                case CPU_DOWN_PREPARE:
 768                        if (pairing == -1)
 769                                return NOTIFY_BAD;
 770                }
 771        }
 772        return NOTIFY_DONE;
 773}
 774
 775static bool no_bL_switcher;
 776core_param(no_bL_switcher, no_bL_switcher, bool, 0644);
 777
 778static int __init bL_switcher_init(void)
 779{
 780        int ret;
 781
 782        if (!mcpm_is_available())
 783                return -ENODEV;
 784
 785        cpu_notifier(bL_switcher_hotplug_callback, 0);
 786
 787        if (!no_bL_switcher) {
 788                ret = bL_switcher_enable();
 789                if (ret)
 790                        return ret;
 791        }
 792
 793#ifdef CONFIG_SYSFS
 794        ret = bL_switcher_sysfs_init();
 795        if (ret)
 796                pr_err("%s: unable to create sysfs entry\n", __func__);
 797#endif
 798
 799        return 0;
 800}
 801
 802late_initcall(bL_switcher_init);
 803