linux/arch/mips/kernel/sync-r4k.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Count register synchronisation.
   4 *
   5 * All CPUs will have their count registers synchronised to the CPU0 next time
   6 * value. This can cause a small timewarp for CPU0. All other CPU's should
   7 * not have done anything significant (but they may have had interrupts
   8 * enabled briefly - prom_smp_finish() should not be responsible for enabling
   9 * interrupts...)
  10 */
  11
  12#include <linux/kernel.h>
  13#include <linux/irqflags.h>
  14#include <linux/cpumask.h>
  15
  16#include <asm/r4k-timer.h>
  17#include <linux/atomic.h>
  18#include <asm/barrier.h>
  19#include <asm/mipsregs.h>
  20
  21static unsigned int initcount = 0;
  22static atomic_t count_count_start = ATOMIC_INIT(0);
  23static atomic_t count_count_stop = ATOMIC_INIT(0);
  24
  25#define COUNTON 100
  26#define NR_LOOPS 3
  27
  28void synchronise_count_master(int cpu)
  29{
  30        int i;
  31        unsigned long flags;
  32
  33        pr_info("Synchronize counters for CPU %u: ", cpu);
  34
  35        local_irq_save(flags);
  36
  37        /*
  38         * We loop a few times to get a primed instruction cache,
  39         * then the last pass is more or less synchronised and
  40         * the master and slaves each set their cycle counters to a known
  41         * value all at once. This reduces the chance of having random offsets
  42         * between the processors, and guarantees that the maximum
  43         * delay between the cycle counters is never bigger than
  44         * the latency of information-passing (cachelines) between
  45         * two CPUs.
  46         */
  47
  48        for (i = 0; i < NR_LOOPS; i++) {
  49                /* slaves loop on '!= 2' */
  50                while (atomic_read(&count_count_start) != 1)
  51                        mb();
  52                atomic_set(&count_count_stop, 0);
  53                smp_wmb();
  54
  55                /* Let the slave writes its count register */
  56                atomic_inc(&count_count_start);
  57
  58                /* Count will be initialised to current timer */
  59                if (i == 1)
  60                        initcount = read_c0_count();
  61
  62                /*
  63                 * Everyone initialises count in the last loop:
  64                 */
  65                if (i == NR_LOOPS-1)
  66                        write_c0_count(initcount);
  67
  68                /*
  69                 * Wait for slave to leave the synchronization point:
  70                 */
  71                while (atomic_read(&count_count_stop) != 1)
  72                        mb();
  73                atomic_set(&count_count_start, 0);
  74                smp_wmb();
  75                atomic_inc(&count_count_stop);
  76        }
  77        /* Arrange for an interrupt in a short while */
  78        write_c0_compare(read_c0_count() + COUNTON);
  79
  80        local_irq_restore(flags);
  81
  82        /*
  83         * i386 code reported the skew here, but the
  84         * count registers were almost certainly out of sync
  85         * so no point in alarming people
  86         */
  87        pr_cont("done.\n");
  88}
  89
  90void synchronise_count_slave(int cpu)
  91{
  92        int i;
  93
  94        /*
  95         * Not every cpu is online at the time this gets called,
  96         * so we first wait for the master to say everyone is ready
  97         */
  98
  99        for (i = 0; i < NR_LOOPS; i++) {
 100                atomic_inc(&count_count_start);
 101                while (atomic_read(&count_count_start) != 2)
 102                        mb();
 103
 104                /*
 105                 * Everyone initialises count in the last loop:
 106                 */
 107                if (i == NR_LOOPS-1)
 108                        write_c0_count(initcount);
 109
 110                atomic_inc(&count_count_stop);
 111                while (atomic_read(&count_count_stop) != 2)
 112                        mb();
 113        }
 114        /* Arrange for an interrupt in a short while */
 115        write_c0_compare(read_c0_count() + COUNTON);
 116}
 117#undef NR_LOOPS
 118