linux/arch/arm/common/bL_switcher.c
<<
>>
Prefs
   1/*
   2 * arch/arm/common/bL_switcher.c -- big.LITTLE cluster switcher core driver
   3 *
   4 * Created by:  Nicolas Pitre, March 2012
   5 * Copyright:   (C) 2012-2013  Linaro Limited
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 */
  11
  12#include <linux/atomic.h>
  13#include <linux/init.h>
  14#include <linux/kernel.h>
  15#include <linux/module.h>
  16#include <linux/sched/signal.h>
  17#include <uapi/linux/sched/types.h>
  18#include <linux/interrupt.h>
  19#include <linux/cpu_pm.h>
  20#include <linux/cpu.h>
  21#include <linux/cpumask.h>
  22#include <linux/kthread.h>
  23#include <linux/wait.h>
  24#include <linux/time.h>
  25#include <linux/clockchips.h>
  26#include <linux/hrtimer.h>
  27#include <linux/tick.h>
  28#include <linux/notifier.h>
  29#include <linux/mm.h>
  30#include <linux/mutex.h>
  31#include <linux/smp.h>
  32#include <linux/spinlock.h>
  33#include <linux/string.h>
  34#include <linux/sysfs.h>
  35#include <linux/irqchip/arm-gic.h>
  36#include <linux/moduleparam.h>
  37
  38#include <asm/smp_plat.h>
  39#include <asm/cputype.h>
  40#include <asm/suspend.h>
  41#include <asm/mcpm.h>
  42#include <asm/bL_switcher.h>
  43
  44#define CREATE_TRACE_POINTS
  45#include <trace/events/power_cpu_migrate.h>
  46
  47
  48/*
  49 * Use our own MPIDR accessors as the generic ones in asm/cputype.h have
  50 * __attribute_const__ and we don't want the compiler to assume any
  51 * constness here as the value _does_ change along some code paths.
  52 */
  53
  54static int read_mpidr(void)
  55{
  56        unsigned int id;
  57        asm volatile ("mrc p15, 0, %0, c0, c0, 5" : "=r" (id));
  58        return id & MPIDR_HWID_BITMASK;
  59}
  60
  61/*
  62 * bL switcher core code.
  63 */
  64
  65static void bL_do_switch(void *_arg)
  66{
  67        unsigned ib_mpidr, ib_cpu, ib_cluster;
  68        long volatile handshake, **handshake_ptr = _arg;
  69
  70        pr_debug("%s\n", __func__);
  71
  72        ib_mpidr = cpu_logical_map(smp_processor_id());
  73        ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
  74        ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
  75
  76        /* Advertise our handshake location */
  77        if (handshake_ptr) {
  78                handshake = 0;
  79                *handshake_ptr = &handshake;
  80        } else
  81                handshake = -1;
  82
  83        /*
  84         * Our state has been saved at this point.  Let's release our
  85         * inbound CPU.
  86         */
  87        mcpm_set_entry_vector(ib_cpu, ib_cluster, cpu_resume);
  88        sev();
  89
  90        /*
  91         * From this point, we must assume that our counterpart CPU might
  92         * have taken over in its parallel world already, as if execution
  93         * just returned from cpu_suspend().  It is therefore important to
  94         * be very careful not to make any change the other guy is not
  95         * expecting.  This is why we need stack isolation.
  96         *
  97         * Fancy under cover tasks could be performed here.  For now
  98         * we have none.
  99         */
 100
 101        /*
 102         * Let's wait until our inbound is alive.
 103         */
 104        while (!handshake) {
 105                wfe();
 106                smp_mb();
 107        }
 108
 109        /* Let's put ourself down. */
 110        mcpm_cpu_power_down();
 111
 112        /* should never get here */
 113        BUG();
 114}
 115
 116/*
 117 * Stack isolation.  To ensure 'current' remains valid, we just use another
 118 * piece of our thread's stack space which should be fairly lightly used.
 119 * The selected area starts just above the thread_info structure located
 120 * at the very bottom of the stack, aligned to a cache line, and indexed
 121 * with the cluster number.
 122 */
 123#define STACK_SIZE 512
 124extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
 125static int bL_switchpoint(unsigned long _arg)
 126{
 127        unsigned int mpidr = read_mpidr();
 128        unsigned int clusterid = MPIDR_AFFINITY_LEVEL(mpidr, 1);
 129        void *stack = current_thread_info() + 1;
 130        stack = PTR_ALIGN(stack, L1_CACHE_BYTES);
 131        stack += clusterid * STACK_SIZE + STACK_SIZE;
 132        call_with_stack(bL_do_switch, (void *)_arg, stack);
 133        BUG();
 134}
 135
 136/*
 137 * Generic switcher interface
 138 */
 139
 140static unsigned int bL_gic_id[MAX_CPUS_PER_CLUSTER][MAX_NR_CLUSTERS];
 141static int bL_switcher_cpu_pairing[NR_CPUS];
 142
 143/*
 144 * bL_switch_to - Switch to a specific cluster for the current CPU
 145 * @new_cluster_id: the ID of the cluster to switch to.
 146 *
 147 * This function must be called on the CPU to be switched.
 148 * Returns 0 on success, else a negative status code.
 149 */
 150static int bL_switch_to(unsigned int new_cluster_id)
 151{
 152        unsigned int mpidr, this_cpu, that_cpu;
 153        unsigned int ob_mpidr, ob_cpu, ob_cluster, ib_mpidr, ib_cpu, ib_cluster;
 154        struct completion inbound_alive;
 155        long volatile *handshake_ptr;
 156        int ipi_nr, ret;
 157
 158        this_cpu = smp_processor_id();
 159        ob_mpidr = read_mpidr();
 160        ob_cpu = MPIDR_AFFINITY_LEVEL(ob_mpidr, 0);
 161        ob_cluster = MPIDR_AFFINITY_LEVEL(ob_mpidr, 1);
 162        BUG_ON(cpu_logical_map(this_cpu) != ob_mpidr);
 163
 164        if (new_cluster_id == ob_cluster)
 165                return 0;
 166
 167        that_cpu = bL_switcher_cpu_pairing[this_cpu];
 168        ib_mpidr = cpu_logical_map(that_cpu);
 169        ib_cpu = MPIDR_AFFINITY_LEVEL(ib_mpidr, 0);
 170        ib_cluster = MPIDR_AFFINITY_LEVEL(ib_mpidr, 1);
 171
 172        pr_debug("before switch: CPU %d MPIDR %#x -> %#x\n",
 173                 this_cpu, ob_mpidr, ib_mpidr);
 174
 175        this_cpu = smp_processor_id();
 176
 177        /* Close the gate for our entry vectors */
 178        mcpm_set_entry_vector(ob_cpu, ob_cluster, NULL);
 179        mcpm_set_entry_vector(ib_cpu, ib_cluster, NULL);
 180
 181        /* Install our "inbound alive" notifier. */
 182        init_completion(&inbound_alive);
 183        ipi_nr = register_ipi_completion(&inbound_alive, this_cpu);
 184        ipi_nr |= ((1 << 16) << bL_gic_id[ob_cpu][ob_cluster]);
 185        mcpm_set_early_poke(ib_cpu, ib_cluster, gic_get_sgir_physaddr(), ipi_nr);
 186
 187        /*
 188         * Let's wake up the inbound CPU now in case it requires some delay
 189         * to come online, but leave it gated in our entry vector code.
 190         */
 191        ret = mcpm_cpu_power_up(ib_cpu, ib_cluster);
 192        if (ret) {
 193                pr_err("%s: mcpm_cpu_power_up() returned %d\n", __func__, ret);
 194                return ret;
 195        }
 196
 197        /*
 198         * Raise a SGI on the inbound CPU to make sure it doesn't stall
 199         * in a possible WFI, such as in bL_power_down().
 200         */
 201        gic_send_sgi(bL_gic_id[ib_cpu][ib_cluster], 0);
 202
 203        /*
 204         * Wait for the inbound to come up.  This allows for other
 205         * tasks to be scheduled in the mean time.
 206         */
 207        wait_for_completion(&inbound_alive);
 208        mcpm_set_early_poke(ib_cpu, ib_cluster, 0, 0);
 209
 210        /*
 211         * From this point we are entering the switch critical zone
 212         * and can't take any interrupts anymore.
 213         */
 214        local_irq_disable();
 215        local_fiq_disable();
 216        trace_cpu_migrate_begin(ktime_get_real_ns(), ob_mpidr);
 217
 218        /* redirect GIC's SGIs to our counterpart */
 219        gic_migrate_target(bL_gic_id[ib_cpu][ib_cluster]);
 220
 221        tick_suspend_local();
 222
 223        ret = cpu_pm_enter();
 224
 225        /* we can not tolerate errors at this point */
 226        if (ret)
 227                panic("%s: cpu_pm_enter() returned %d\n", __func__, ret);
 228
 229        /* Swap the physical CPUs in the logical map for this logical CPU. */
 230        cpu_logical_map(this_cpu) = ib_mpidr;
 231        cpu_logical_map(that_cpu) = ob_mpidr;
 232
 233        /* Let's do the actual CPU switch. */
 234        ret = cpu_suspend((unsigned long)&handshake_ptr, bL_switchpoint);
 235        if (ret > 0)
 236                panic("%s: cpu_suspend() returned %d\n", __func__, ret);
 237
 238        /* We are executing on the inbound CPU at this point */
 239        mpidr = read_mpidr();
 240        pr_debug("after switch: CPU %d MPIDR %#x\n", this_cpu, mpidr);
 241        BUG_ON(mpidr != ib_mpidr);
 242
 243        mcpm_cpu_powered_up();
 244
 245        ret = cpu_pm_exit();
 246
 247        tick_resume_local();
 248
 249        trace_cpu_migrate_finish(ktime_get_real_ns(), ib_mpidr);
 250        local_fiq_enable();
 251        local_irq_enable();
 252
 253        *handshake_ptr = 1;
 254        dsb_sev();
 255
 256        if (ret)
 257                pr_err("%s exiting with error %d\n", __func__, ret);
 258        return ret;
 259}
 260
 261struct bL_thread {
 262        spinlock_t lock;
 263        struct task_struct *task;
 264        wait_queue_head_t wq;
 265        int wanted_cluster;
 266        struct completion started;
 267        bL_switch_completion_handler completer;
 268        void *completer_cookie;
 269};
 270
 271static struct bL_thread bL_threads[NR_CPUS];
 272
 273static int bL_switcher_thread(void *arg)
 274{
 275        struct bL_thread *t = arg;
 276        struct sched_param param = { .sched_priority = 1 };
 277        int cluster;
 278        bL_switch_completion_handler completer;
 279        void *completer_cookie;
 280
 281        sched_setscheduler_nocheck(current, SCHED_FIFO, &param);
 282        complete(&t->started);
 283
 284        do {
 285                if (signal_pending(current))
 286                        flush_signals(current);
 287                wait_event_interruptible(t->wq,
 288                                t->wanted_cluster != -1 ||
 289                                kthread_should_stop());
 290
 291                spin_lock(&t->lock);
 292                cluster = t->wanted_cluster;
 293                completer = t->completer;
 294                completer_cookie = t->completer_cookie;
 295                t->wanted_cluster = -1;
 296                t->completer = NULL;
 297                spin_unlock(&t->lock);
 298
 299                if (cluster != -1) {
 300                        bL_switch_to(cluster);
 301
 302                        if (completer)
 303                                completer(completer_cookie);
 304                }
 305        } while (!kthread_should_stop());
 306
 307        return 0;
 308}
 309
 310static struct task_struct *bL_switcher_thread_create(int cpu, void *arg)
 311{
 312        struct task_struct *task;
 313
 314        task = kthread_create_on_node(bL_switcher_thread, arg,
 315                                      cpu_to_node(cpu), "kswitcher_%d", cpu);
 316        if (!IS_ERR(task)) {
 317                kthread_bind(task, cpu);
 318                wake_up_process(task);
 319        } else
 320                pr_err("%s failed for CPU %d\n", __func__, cpu);
 321        return task;
 322}
 323
 324/*
 325 * bL_switch_request_cb - Switch to a specific cluster for the given CPU,
 326 *      with completion notification via a callback
 327 *
 328 * @cpu: the CPU to switch
 329 * @new_cluster_id: the ID of the cluster to switch to.
 330 * @completer: switch completion callback.  if non-NULL,
 331 *      @completer(@completer_cookie) will be called on completion of
 332 *      the switch, in non-atomic context.
 333 * @completer_cookie: opaque context argument for @completer.
 334 *
 335 * This function causes a cluster switch on the given CPU by waking up
 336 * the appropriate switcher thread.  This function may or may not return
 337 * before the switch has occurred.
 338 *
 339 * If a @completer callback function is supplied, it will be called when
 340 * the switch is complete.  This can be used to determine asynchronously
 341 * when the switch is complete, regardless of when bL_switch_request()
 342 * returns.  When @completer is supplied, no new switch request is permitted
 343 * for the affected CPU until after the switch is complete, and @completer
 344 * has returned.
 345 */
 346int bL_switch_request_cb(unsigned int cpu, unsigned int new_cluster_id,
 347                         bL_switch_completion_handler completer,
 348                         void *completer_cookie)
 349{
 350        struct bL_thread *t;
 351
 352        if (cpu >= ARRAY_SIZE(bL_threads)) {
 353                pr_err("%s: cpu %d out of bounds\n", __func__, cpu);
 354                return -EINVAL;
 355        }
 356
 357        t = &bL_threads[cpu];
 358
 359        if (IS_ERR(t->task))
 360                return PTR_ERR(t->task);
 361        if (!t->task)
 362                return -ESRCH;
 363
 364        spin_lock(&t->lock);
 365        if (t->completer) {
 366                spin_unlock(&t->lock);
 367                return -EBUSY;
 368        }
 369        t->completer = completer;
 370        t->completer_cookie = completer_cookie;
 371        t->wanted_cluster = new_cluster_id;
 372        spin_unlock(&t->lock);
 373        wake_up(&t->wq);
 374        return 0;
 375}
 376EXPORT_SYMBOL_GPL(bL_switch_request_cb);
 377
 378/*
 379 * Activation and configuration code.
 380 */
 381
 382static DEFINE_MUTEX(bL_switcher_activation_lock);
 383static BLOCKING_NOTIFIER_HEAD(bL_activation_notifier);
 384static unsigned int bL_switcher_active;
 385static unsigned int bL_switcher_cpu_original_cluster[NR_CPUS];
 386static cpumask_t bL_switcher_removed_logical_cpus;
 387
 388int bL_switcher_register_notifier(struct notifier_block *nb)
 389{
 390        return blocking_notifier_chain_register(&bL_activation_notifier, nb);
 391}
 392EXPORT_SYMBOL_GPL(bL_switcher_register_notifier);
 393
 394int bL_switcher_unregister_notifier(struct notifier_block *nb)
 395{
 396        return blocking_notifier_chain_unregister(&bL_activation_notifier, nb);
 397}
 398EXPORT_SYMBOL_GPL(bL_switcher_unregister_notifier);
 399
 400static int bL_activation_notify(unsigned long val)
 401{
 402        int ret;
 403
 404        ret = blocking_notifier_call_chain(&bL_activation_notifier, val, NULL);
 405        if (ret & NOTIFY_STOP_MASK)
 406                pr_err("%s: notifier chain failed with status 0x%x\n",
 407                        __func__, ret);
 408        return notifier_to_errno(ret);
 409}
 410
 411static void bL_switcher_restore_cpus(void)
 412{
 413        int i;
 414
 415        for_each_cpu(i, &bL_switcher_removed_logical_cpus) {
 416                struct device *cpu_dev = get_cpu_device(i);
 417                int ret = device_online(cpu_dev);
 418                if (ret)
 419                        dev_err(cpu_dev, "switcher: unable to restore CPU\n");
 420        }
 421}
 422
 423static int bL_switcher_halve_cpus(void)
 424{
 425        int i, j, cluster_0, gic_id, ret;
 426        unsigned int cpu, cluster, mask;
 427        cpumask_t available_cpus;
 428
 429        /* First pass to validate what we have */
 430        mask = 0;
 431        for_each_online_cpu(i) {
 432                cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
 433                cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
 434                if (cluster >= 2) {
 435                        pr_err("%s: only dual cluster systems are supported\n", __func__);
 436                        return -EINVAL;
 437                }
 438                if (WARN_ON(cpu >= MAX_CPUS_PER_CLUSTER))
 439                        return -EINVAL;
 440                mask |= (1 << cluster);
 441        }
 442        if (mask != 3) {
 443                pr_err("%s: no CPU pairing possible\n", __func__);
 444                return -EINVAL;
 445        }
 446
 447        /*
 448         * Now let's do the pairing.  We match each CPU with another CPU
 449         * from a different cluster.  To get a uniform scheduling behavior
 450         * without fiddling with CPU topology and compute capacity data,
 451         * we'll use logical CPUs initially belonging to the same cluster.
 452         */
 453        memset(bL_switcher_cpu_pairing, -1, sizeof(bL_switcher_cpu_pairing));
 454        cpumask_copy(&available_cpus, cpu_online_mask);
 455        cluster_0 = -1;
 456        for_each_cpu(i, &available_cpus) {
 457                int match = -1;
 458                cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
 459                if (cluster_0 == -1)
 460                        cluster_0 = cluster;
 461                if (cluster != cluster_0)
 462                        continue;
 463                cpumask_clear_cpu(i, &available_cpus);
 464                for_each_cpu(j, &available_cpus) {
 465                        cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(j), 1);
 466                        /*
 467                         * Let's remember the last match to create "odd"
 468                         * pairings on purpose in order for other code not
 469                         * to assume any relation between physical and
 470                         * logical CPU numbers.
 471                         */
 472                        if (cluster != cluster_0)
 473                                match = j;
 474                }
 475                if (match != -1) {
 476                        bL_switcher_cpu_pairing[i] = match;
 477                        cpumask_clear_cpu(match, &available_cpus);
 478                        pr_info("CPU%d paired with CPU%d\n", i, match);
 479                }
 480        }
 481
 482        /*
 483         * Now we disable the unwanted CPUs i.e. everything that has no
 484         * pairing information (that includes the pairing counterparts).
 485         */
 486        cpumask_clear(&bL_switcher_removed_logical_cpus);
 487        for_each_online_cpu(i) {
 488                cpu = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 0);
 489                cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(i), 1);
 490
 491                /* Let's take note of the GIC ID for this CPU */
 492                gic_id = gic_get_cpu_id(i);
 493                if (gic_id < 0) {
 494                        pr_err("%s: bad GIC ID for CPU %d\n", __func__, i);
 495                        bL_switcher_restore_cpus();
 496                        return -EINVAL;
 497                }
 498                bL_gic_id[cpu][cluster] = gic_id;
 499                pr_info("GIC ID for CPU %u cluster %u is %u\n",
 500                        cpu, cluster, gic_id);
 501
 502                if (bL_switcher_cpu_pairing[i] != -1) {
 503                        bL_switcher_cpu_original_cluster[i] = cluster;
 504                        continue;
 505                }
 506
 507                ret = device_offline(get_cpu_device(i));
 508                if (ret) {
 509                        bL_switcher_restore_cpus();
 510                        return ret;
 511                }
 512                cpumask_set_cpu(i, &bL_switcher_removed_logical_cpus);
 513        }
 514
 515        return 0;
 516}
 517
 518/* Determine the logical CPU a given physical CPU is grouped on. */
 519int bL_switcher_get_logical_index(u32 mpidr)
 520{
 521        int cpu;
 522
 523        if (!bL_switcher_active)
 524                return -EUNATCH;
 525
 526        mpidr &= MPIDR_HWID_BITMASK;
 527        for_each_online_cpu(cpu) {
 528                int pairing = bL_switcher_cpu_pairing[cpu];
 529                if (pairing == -1)
 530                        continue;
 531                if ((mpidr == cpu_logical_map(cpu)) ||
 532                    (mpidr == cpu_logical_map(pairing)))
 533                        return cpu;
 534        }
 535        return -EINVAL;
 536}
 537
 538static void bL_switcher_trace_trigger_cpu(void *__always_unused info)
 539{
 540        trace_cpu_migrate_current(ktime_get_real_ns(), read_mpidr());
 541}
 542
 543int bL_switcher_trace_trigger(void)
 544{
 545        int ret;
 546
 547        preempt_disable();
 548
 549        bL_switcher_trace_trigger_cpu(NULL);
 550        ret = smp_call_function(bL_switcher_trace_trigger_cpu, NULL, true);
 551
 552        preempt_enable();
 553
 554        return ret;
 555}
 556EXPORT_SYMBOL_GPL(bL_switcher_trace_trigger);
 557
 558static int bL_switcher_enable(void)
 559{
 560        int cpu, ret;
 561
 562        mutex_lock(&bL_switcher_activation_lock);
 563        lock_device_hotplug();
 564        if (bL_switcher_active) {
 565                unlock_device_hotplug();
 566                mutex_unlock(&bL_switcher_activation_lock);
 567                return 0;
 568        }
 569
 570        pr_info("big.LITTLE switcher initializing\n");
 571
 572        ret = bL_activation_notify(BL_NOTIFY_PRE_ENABLE);
 573        if (ret)
 574                goto error;
 575
 576        ret = bL_switcher_halve_cpus();
 577        if (ret)
 578                goto error;
 579
 580        bL_switcher_trace_trigger();
 581
 582        for_each_online_cpu(cpu) {
 583                struct bL_thread *t = &bL_threads[cpu];
 584                spin_lock_init(&t->lock);
 585                init_waitqueue_head(&t->wq);
 586                init_completion(&t->started);
 587                t->wanted_cluster = -1;
 588                t->task = bL_switcher_thread_create(cpu, t);
 589        }
 590
 591        bL_switcher_active = 1;
 592        bL_activation_notify(BL_NOTIFY_POST_ENABLE);
 593        pr_info("big.LITTLE switcher initialized\n");
 594        goto out;
 595
 596error:
 597        pr_warn("big.LITTLE switcher initialization failed\n");
 598        bL_activation_notify(BL_NOTIFY_POST_DISABLE);
 599
 600out:
 601        unlock_device_hotplug();
 602        mutex_unlock(&bL_switcher_activation_lock);
 603        return ret;
 604}
 605
 606#ifdef CONFIG_SYSFS
 607
 608static void bL_switcher_disable(void)
 609{
 610        unsigned int cpu, cluster;
 611        struct bL_thread *t;
 612        struct task_struct *task;
 613
 614        mutex_lock(&bL_switcher_activation_lock);
 615        lock_device_hotplug();
 616
 617        if (!bL_switcher_active)
 618                goto out;
 619
 620        if (bL_activation_notify(BL_NOTIFY_PRE_DISABLE) != 0) {
 621                bL_activation_notify(BL_NOTIFY_POST_ENABLE);
 622                goto out;
 623        }
 624
 625        bL_switcher_active = 0;
 626
 627        /*
 628         * To deactivate the switcher, we must shut down the switcher
 629         * threads to prevent any other requests from being accepted.
 630         * Then, if the final cluster for given logical CPU is not the
 631         * same as the original one, we'll recreate a switcher thread
 632         * just for the purpose of switching the CPU back without any
 633         * possibility for interference from external requests.
 634         */
 635        for_each_online_cpu(cpu) {
 636                t = &bL_threads[cpu];
 637                task = t->task;
 638                t->task = NULL;
 639                if (!task || IS_ERR(task))
 640                        continue;
 641                kthread_stop(task);
 642                /* no more switch may happen on this CPU at this point */
 643                cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
 644                if (cluster == bL_switcher_cpu_original_cluster[cpu])
 645                        continue;
 646                init_completion(&t->started);
 647                t->wanted_cluster = bL_switcher_cpu_original_cluster[cpu];
 648                task = bL_switcher_thread_create(cpu, t);
 649                if (!IS_ERR(task)) {
 650                        wait_for_completion(&t->started);
 651                        kthread_stop(task);
 652                        cluster = MPIDR_AFFINITY_LEVEL(cpu_logical_map(cpu), 1);
 653                        if (cluster == bL_switcher_cpu_original_cluster[cpu])
 654                                continue;
 655                }
 656                /* If execution gets here, we're in trouble. */
 657                pr_crit("%s: unable to restore original cluster for CPU %d\n",
 658                        __func__, cpu);
 659                pr_crit("%s: CPU %d can't be restored\n",
 660                        __func__, bL_switcher_cpu_pairing[cpu]);
 661                cpumask_clear_cpu(bL_switcher_cpu_pairing[cpu],
 662                                  &bL_switcher_removed_logical_cpus);
 663        }
 664
 665        bL_switcher_restore_cpus();
 666        bL_switcher_trace_trigger();
 667
 668        bL_activation_notify(BL_NOTIFY_POST_DISABLE);
 669
 670out:
 671        unlock_device_hotplug();
 672        mutex_unlock(&bL_switcher_activation_lock);
 673}
 674
 675static ssize_t bL_switcher_active_show(struct kobject *kobj,
 676                struct kobj_attribute *attr, char *buf)
 677{
 678        return sprintf(buf, "%u\n", bL_switcher_active);
 679}
 680
 681static ssize_t bL_switcher_active_store(struct kobject *kobj,
 682                struct kobj_attribute *attr, const char *buf, size_t count)
 683{
 684        int ret;
 685
 686        switch (buf[0]) {
 687        case '0':
 688                bL_switcher_disable();
 689                ret = 0;
 690                break;
 691        case '1':
 692                ret = bL_switcher_enable();
 693                break;
 694        default:
 695                ret = -EINVAL;
 696        }
 697
 698        return (ret >= 0) ? count : ret;
 699}
 700
 701static ssize_t bL_switcher_trace_trigger_store(struct kobject *kobj,
 702                struct kobj_attribute *attr, const char *buf, size_t count)
 703{
 704        int ret = bL_switcher_trace_trigger();
 705
 706        return ret ? ret : count;
 707}
 708
 709static struct kobj_attribute bL_switcher_active_attr =
 710        __ATTR(active, 0644, bL_switcher_active_show, bL_switcher_active_store);
 711
 712static struct kobj_attribute bL_switcher_trace_trigger_attr =
 713        __ATTR(trace_trigger, 0200, NULL, bL_switcher_trace_trigger_store);
 714
 715static struct attribute *bL_switcher_attrs[] = {
 716        &bL_switcher_active_attr.attr,
 717        &bL_switcher_trace_trigger_attr.attr,
 718        NULL,
 719};
 720
 721static struct attribute_group bL_switcher_attr_group = {
 722        .attrs = bL_switcher_attrs,
 723};
 724
 725static struct kobject *bL_switcher_kobj;
 726
 727static int __init bL_switcher_sysfs_init(void)
 728{
 729        int ret;
 730
 731        bL_switcher_kobj = kobject_create_and_add("bL_switcher", kernel_kobj);
 732        if (!bL_switcher_kobj)
 733                return -ENOMEM;
 734        ret = sysfs_create_group(bL_switcher_kobj, &bL_switcher_attr_group);
 735        if (ret)
 736                kobject_put(bL_switcher_kobj);
 737        return ret;
 738}
 739
 740#endif  /* CONFIG_SYSFS */
 741
 742bool bL_switcher_get_enabled(void)
 743{
 744        mutex_lock(&bL_switcher_activation_lock);
 745
 746        return bL_switcher_active;
 747}
 748EXPORT_SYMBOL_GPL(bL_switcher_get_enabled);
 749
 750void bL_switcher_put_enabled(void)
 751{
 752        mutex_unlock(&bL_switcher_activation_lock);
 753}
 754EXPORT_SYMBOL_GPL(bL_switcher_put_enabled);
 755
 756/*
 757 * Veto any CPU hotplug operation on those CPUs we've removed
 758 * while the switcher is active.
 759 * We're just not ready to deal with that given the trickery involved.
 760 */
 761static int bL_switcher_cpu_pre(unsigned int cpu)
 762{
 763        int pairing;
 764
 765        if (!bL_switcher_active)
 766                return 0;
 767
 768        pairing = bL_switcher_cpu_pairing[cpu];
 769
 770        if (pairing == -1)
 771                return -EINVAL;
 772        return 0;
 773}
 774
 775static bool no_bL_switcher;
 776core_param(no_bL_switcher, no_bL_switcher, bool, 0644);
 777
 778static int __init bL_switcher_init(void)
 779{
 780        int ret;
 781
 782        if (!mcpm_is_available())
 783                return -ENODEV;
 784
 785        cpuhp_setup_state_nocalls(CPUHP_ARM_BL_PREPARE, "arm/bl:prepare",
 786                                  bL_switcher_cpu_pre, NULL);
 787        ret = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "arm/bl:predown",
 788                                        NULL, bL_switcher_cpu_pre);
 789        if (ret < 0) {
 790                cpuhp_remove_state_nocalls(CPUHP_ARM_BL_PREPARE);
 791                pr_err("bL_switcher: Failed to allocate a hotplug state\n");
 792                return ret;
 793        }
 794        if (!no_bL_switcher) {
 795                ret = bL_switcher_enable();
 796                if (ret)
 797                        return ret;
 798        }
 799
 800#ifdef CONFIG_SYSFS
 801        ret = bL_switcher_sysfs_init();
 802        if (ret)
 803                pr_err("%s: unable to create sysfs entry\n", __func__);
 804#endif
 805
 806        return 0;
 807}
 808
 809late_initcall(bL_switcher_init);
 810