linux/arch/powerpc/platforms/cell/spufs/sched.c
<<
>>
Prefs
   1/* sched.c - SPU scheduler.
   2 *
   3 * Copyright (C) IBM 2005
   4 * Author: Mark Nutter <mnutter@us.ibm.com>
   5 *
   6 * 2006-03-31   NUMA domains added.
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License as published by
  10 * the Free Software Foundation; either version 2, or (at your option)
  11 * any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  21 */
  22
  23#undef DEBUG
  24
  25#include <linux/errno.h>
  26#include <linux/sched.h>
  27#include <linux/kernel.h>
  28#include <linux/mm.h>
  29#include <linux/slab.h>
  30#include <linux/completion.h>
  31#include <linux/vmalloc.h>
  32#include <linux/smp.h>
  33#include <linux/stddef.h>
  34#include <linux/unistd.h>
  35#include <linux/numa.h>
  36#include <linux/mutex.h>
  37#include <linux/notifier.h>
  38#include <linux/kthread.h>
  39#include <linux/pid_namespace.h>
  40#include <linux/proc_fs.h>
  41#include <linux/seq_file.h>
  42
  43#include <asm/io.h>
  44#include <asm/mmu_context.h>
  45#include <asm/spu.h>
  46#include <asm/spu_csa.h>
  47#include <asm/spu_priv1.h>
  48#include "spufs.h"
  49#define CREATE_TRACE_POINTS
  50#include "sputrace.h"
  51
  52struct spu_prio_array {
  53        DECLARE_BITMAP(bitmap, MAX_PRIO);
  54        struct list_head runq[MAX_PRIO];
  55        spinlock_t runq_lock;
  56        int nr_waiting;
  57};
  58
  59static unsigned long spu_avenrun[3];
  60static struct spu_prio_array *spu_prio;
  61static struct task_struct *spusched_task;
  62static struct timer_list spusched_timer;
  63static struct timer_list spuloadavg_timer;
  64
  65/*
  66 * Priority of a normal, non-rt, non-niced'd process (aka nice level 0).
  67 */
  68#define NORMAL_PRIO             120
  69
  70/*
  71 * Frequency of the spu scheduler tick.  By default we do one SPU scheduler
  72 * tick for every 10 CPU scheduler ticks.
  73 */
  74#define SPUSCHED_TICK           (10)
  75
  76/*
  77 * These are the 'tuning knobs' of the scheduler:
  78 *
  79 * Minimum timeslice is 5 msecs (or 1 spu scheduler tick, whichever is
  80 * larger), default timeslice is 100 msecs, maximum timeslice is 800 msecs.
  81 */
  82#define MIN_SPU_TIMESLICE       max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
  83#define DEF_SPU_TIMESLICE       (100 * HZ / (1000 * SPUSCHED_TICK))
  84
  85#define MAX_USER_PRIO           (MAX_PRIO - MAX_RT_PRIO)
  86#define SCALE_PRIO(x, prio) \
  87        max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
  88
  89/*
  90 * scale user-nice values [ -20 ... 0 ... 19 ] to time slice values:
  91 * [800ms ... 100ms ... 5ms]
  92 *
  93 * The higher a thread's priority, the bigger timeslices
  94 * it gets during one round of execution. But even the lowest
  95 * priority thread gets MIN_TIMESLICE worth of execution time.
  96 */
  97void spu_set_timeslice(struct spu_context *ctx)
  98{
  99        if (ctx->prio < NORMAL_PRIO)
 100                ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
 101        else
 102                ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
 103}
 104
 105/*
 106 * Update scheduling information from the owning thread.
 107 */
 108void __spu_update_sched_info(struct spu_context *ctx)
 109{
 110        /*
 111         * assert that the context is not on the runqueue, so it is safe
 112         * to change its scheduling parameters.
 113         */
 114        BUG_ON(!list_empty(&ctx->rq));
 115
 116        /*
 117         * 32-Bit assignments are atomic on powerpc, and we don't care about
 118         * memory ordering here because retrieving the controlling thread is
 119         * per definition racy.
 120         */
 121        ctx->tid = current->pid;
 122
 123        /*
 124         * We do our own priority calculations, so we normally want
 125         * ->static_prio to start with. Unfortunately this field
 126         * contains junk for threads with a realtime scheduling
 127         * policy so we have to look at ->prio in this case.
 128         */
 129        if (rt_prio(current->prio))
 130                ctx->prio = current->prio;
 131        else
 132                ctx->prio = current->static_prio;
 133        ctx->policy = current->policy;
 134
 135        /*
 136         * TO DO: the context may be loaded, so we may need to activate
 137         * it again on a different node. But it shouldn't hurt anything
 138         * to update its parameters, because we know that the scheduler
 139         * is not actively looking at this field, since it is not on the
 140         * runqueue. The context will be rescheduled on the proper node
 141         * if it is timesliced or preempted.
 142         */
 143        cpumask_copy(&ctx->cpus_allowed, tsk_cpus_allowed(current));
 144
 145        /* Save the current cpu id for spu interrupt routing. */
 146        ctx->last_ran = raw_smp_processor_id();
 147}
 148
 149void spu_update_sched_info(struct spu_context *ctx)
 150{
 151        int node;
 152
 153        if (ctx->state == SPU_STATE_RUNNABLE) {
 154                node = ctx->spu->node;
 155
 156                /*
 157                 * Take list_mutex to sync with find_victim().
 158                 */
 159                mutex_lock(&cbe_spu_info[node].list_mutex);
 160                __spu_update_sched_info(ctx);
 161                mutex_unlock(&cbe_spu_info[node].list_mutex);
 162        } else {
 163                __spu_update_sched_info(ctx);
 164        }
 165}
 166
 167static int __node_allowed(struct spu_context *ctx, int node)
 168{
 169        if (nr_cpus_node(node)) {
 170                const struct cpumask *mask = cpumask_of_node(node);
 171
 172                if (cpumask_intersects(mask, &ctx->cpus_allowed))
 173                        return 1;
 174        }
 175
 176        return 0;
 177}
 178
 179static int node_allowed(struct spu_context *ctx, int node)
 180{
 181        int rval;
 182
 183        spin_lock(&spu_prio->runq_lock);
 184        rval = __node_allowed(ctx, node);
 185        spin_unlock(&spu_prio->runq_lock);
 186
 187        return rval;
 188}
 189
 190void do_notify_spus_active(void)
 191{
 192        int node;
 193
 194        /*
 195         * Wake up the active spu_contexts.
 196         *
 197         * When the awakened processes see their "notify_active" flag is set,
 198         * they will call spu_switch_notify().
 199         */
 200        for_each_online_node(node) {
 201                struct spu *spu;
 202
 203                mutex_lock(&cbe_spu_info[node].list_mutex);
 204                list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 205                        if (spu->alloc_state != SPU_FREE) {
 206                                struct spu_context *ctx = spu->ctx;
 207                                set_bit(SPU_SCHED_NOTIFY_ACTIVE,
 208                                        &ctx->sched_flags);
 209                                mb();
 210                                wake_up_all(&ctx->stop_wq);
 211                        }
 212                }
 213                mutex_unlock(&cbe_spu_info[node].list_mutex);
 214        }
 215}
 216
 217/**
 218 * spu_bind_context - bind spu context to physical spu
 219 * @spu:        physical spu to bind to
 220 * @ctx:        context to bind
 221 */
 222static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
 223{
 224        spu_context_trace(spu_bind_context__enter, ctx, spu);
 225
 226        spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
 227
 228        if (ctx->flags & SPU_CREATE_NOSCHED)
 229                atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
 230
 231        ctx->stats.slb_flt_base = spu->stats.slb_flt;
 232        ctx->stats.class2_intr_base = spu->stats.class2_intr;
 233
 234        spu_associate_mm(spu, ctx->owner);
 235
 236        spin_lock_irq(&spu->register_lock);
 237        spu->ctx = ctx;
 238        spu->flags = 0;
 239        ctx->spu = spu;
 240        ctx->ops = &spu_hw_ops;
 241        spu->pid = current->pid;
 242        spu->tgid = current->tgid;
 243        spu->ibox_callback = spufs_ibox_callback;
 244        spu->wbox_callback = spufs_wbox_callback;
 245        spu->stop_callback = spufs_stop_callback;
 246        spu->mfc_callback = spufs_mfc_callback;
 247        spin_unlock_irq(&spu->register_lock);
 248
 249        spu_unmap_mappings(ctx);
 250
 251        spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
 252        spu_restore(&ctx->csa, spu);
 253        spu->timestamp = jiffies;
 254        spu_switch_notify(spu, ctx);
 255        ctx->state = SPU_STATE_RUNNABLE;
 256
 257        spuctx_switch_state(ctx, SPU_UTIL_USER);
 258}
 259
 260/*
 261 * Must be used with the list_mutex held.
 262 */
 263static inline int sched_spu(struct spu *spu)
 264{
 265        BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
 266
 267        return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
 268}
 269
 270static void aff_merge_remaining_ctxs(struct spu_gang *gang)
 271{
 272        struct spu_context *ctx;
 273
 274        list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
 275                if (list_empty(&ctx->aff_list))
 276                        list_add(&ctx->aff_list, &gang->aff_list_head);
 277        }
 278        gang->aff_flags |= AFF_MERGED;
 279}
 280
 281static void aff_set_offsets(struct spu_gang *gang)
 282{
 283        struct spu_context *ctx;
 284        int offset;
 285
 286        offset = -1;
 287        list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
 288                                                                aff_list) {
 289                if (&ctx->aff_list == &gang->aff_list_head)
 290                        break;
 291                ctx->aff_offset = offset--;
 292        }
 293
 294        offset = 0;
 295        list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
 296                if (&ctx->aff_list == &gang->aff_list_head)
 297                        break;
 298                ctx->aff_offset = offset++;
 299        }
 300
 301        gang->aff_flags |= AFF_OFFSETS_SET;
 302}
 303
 304static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
 305                 int group_size, int lowest_offset)
 306{
 307        struct spu *spu;
 308        int node, n;
 309
 310        /*
 311         * TODO: A better algorithm could be used to find a good spu to be
 312         *       used as reference location for the ctxs chain.
 313         */
 314        node = cpu_to_node(raw_smp_processor_id());
 315        for (n = 0; n < MAX_NUMNODES; n++, node++) {
 316                /*
 317                 * "available_spus" counts how many spus are not potentially
 318                 * going to be used by other affinity gangs whose reference
 319                 * context is already in place. Although this code seeks to
 320                 * avoid having affinity gangs with a summed amount of
 321                 * contexts bigger than the amount of spus in the node,
 322                 * this may happen sporadically. In this case, available_spus
 323                 * becomes negative, which is harmless.
 324                 */
 325                int available_spus;
 326
 327                node = (node < MAX_NUMNODES) ? node : 0;
 328                if (!node_allowed(ctx, node))
 329                        continue;
 330
 331                available_spus = 0;
 332                mutex_lock(&cbe_spu_info[node].list_mutex);
 333                list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 334                        if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
 335                                        && spu->ctx->gang->aff_ref_spu)
 336                                available_spus -= spu->ctx->gang->contexts;
 337                        available_spus++;
 338                }
 339                if (available_spus < ctx->gang->contexts) {
 340                        mutex_unlock(&cbe_spu_info[node].list_mutex);
 341                        continue;
 342                }
 343
 344                list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 345                        if ((!mem_aff || spu->has_mem_affinity) &&
 346                                                        sched_spu(spu)) {
 347                                mutex_unlock(&cbe_spu_info[node].list_mutex);
 348                                return spu;
 349                        }
 350                }
 351                mutex_unlock(&cbe_spu_info[node].list_mutex);
 352        }
 353        return NULL;
 354}
 355
 356static void aff_set_ref_point_location(struct spu_gang *gang)
 357{
 358        int mem_aff, gs, lowest_offset;
 359        struct spu_context *ctx;
 360        struct spu *tmp;
 361
 362        mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
 363        lowest_offset = 0;
 364        gs = 0;
 365
 366        list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
 367                gs++;
 368
 369        list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
 370                                                                aff_list) {
 371                if (&ctx->aff_list == &gang->aff_list_head)
 372                        break;
 373                lowest_offset = ctx->aff_offset;
 374        }
 375
 376        gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
 377                                                        lowest_offset);
 378}
 379
 380static struct spu *ctx_location(struct spu *ref, int offset, int node)
 381{
 382        struct spu *spu;
 383
 384        spu = NULL;
 385        if (offset >= 0) {
 386                list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
 387                        BUG_ON(spu->node != node);
 388                        if (offset == 0)
 389                                break;
 390                        if (sched_spu(spu))
 391                                offset--;
 392                }
 393        } else {
 394                list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
 395                        BUG_ON(spu->node != node);
 396                        if (offset == 0)
 397                                break;
 398                        if (sched_spu(spu))
 399                                offset++;
 400                }
 401        }
 402
 403        return spu;
 404}
 405
 406/*
 407 * affinity_check is called each time a context is going to be scheduled.
 408 * It returns the spu ptr on which the context must run.
 409 */
 410static int has_affinity(struct spu_context *ctx)
 411{
 412        struct spu_gang *gang = ctx->gang;
 413
 414        if (list_empty(&ctx->aff_list))
 415                return 0;
 416
 417        if (atomic_read(&ctx->gang->aff_sched_count) == 0)
 418                ctx->gang->aff_ref_spu = NULL;
 419
 420        if (!gang->aff_ref_spu) {
 421                if (!(gang->aff_flags & AFF_MERGED))
 422                        aff_merge_remaining_ctxs(gang);
 423                if (!(gang->aff_flags & AFF_OFFSETS_SET))
 424                        aff_set_offsets(gang);
 425                aff_set_ref_point_location(gang);
 426        }
 427
 428        return gang->aff_ref_spu != NULL;
 429}
 430
 431/**
 432 * spu_unbind_context - unbind spu context from physical spu
 433 * @spu:        physical spu to unbind from
 434 * @ctx:        context to unbind
 435 */
 436static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
 437{
 438        u32 status;
 439
 440        spu_context_trace(spu_unbind_context__enter, ctx, spu);
 441
 442        spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
 443
 444        if (spu->ctx->flags & SPU_CREATE_NOSCHED)
 445                atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
 446
 447        if (ctx->gang)
 448                /*
 449                 * If ctx->gang->aff_sched_count is positive, SPU affinity is
 450                 * being considered in this gang. Using atomic_dec_if_positive
 451                 * allow us to skip an explicit check for affinity in this gang
 452                 */
 453                atomic_dec_if_positive(&ctx->gang->aff_sched_count);
 454
 455        spu_switch_notify(spu, NULL);
 456        spu_unmap_mappings(ctx);
 457        spu_save(&ctx->csa, spu);
 458        spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
 459
 460        spin_lock_irq(&spu->register_lock);
 461        spu->timestamp = jiffies;
 462        ctx->state = SPU_STATE_SAVED;
 463        spu->ibox_callback = NULL;
 464        spu->wbox_callback = NULL;
 465        spu->stop_callback = NULL;
 466        spu->mfc_callback = NULL;
 467        spu->pid = 0;
 468        spu->tgid = 0;
 469        ctx->ops = &spu_backing_ops;
 470        spu->flags = 0;
 471        spu->ctx = NULL;
 472        spin_unlock_irq(&spu->register_lock);
 473
 474        spu_associate_mm(spu, NULL);
 475
 476        ctx->stats.slb_flt +=
 477                (spu->stats.slb_flt - ctx->stats.slb_flt_base);
 478        ctx->stats.class2_intr +=
 479                (spu->stats.class2_intr - ctx->stats.class2_intr_base);
 480
 481        /* This maps the underlying spu state to idle */
 482        spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
 483        ctx->spu = NULL;
 484
 485        if (spu_stopped(ctx, &status))
 486                wake_up_all(&ctx->stop_wq);
 487}
 488
 489/**
 490 * spu_add_to_rq - add a context to the runqueue
 491 * @ctx:       context to add
 492 */
 493static void __spu_add_to_rq(struct spu_context *ctx)
 494{
 495        /*
 496         * Unfortunately this code path can be called from multiple threads
 497         * on behalf of a single context due to the way the problem state
 498         * mmap support works.
 499         *
 500         * Fortunately we need to wake up all these threads at the same time
 501         * and can simply skip the runqueue addition for every but the first
 502         * thread getting into this codepath.
 503         *
 504         * It's still quite hacky, and long-term we should proxy all other
 505         * threads through the owner thread so that spu_run is in control
 506         * of all the scheduling activity for a given context.
 507         */
 508        if (list_empty(&ctx->rq)) {
 509                list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
 510                set_bit(ctx->prio, spu_prio->bitmap);
 511                if (!spu_prio->nr_waiting++)
 512                        mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
 513        }
 514}
 515
 516static void spu_add_to_rq(struct spu_context *ctx)
 517{
 518        spin_lock(&spu_prio->runq_lock);
 519        __spu_add_to_rq(ctx);
 520        spin_unlock(&spu_prio->runq_lock);
 521}
 522
 523static void __spu_del_from_rq(struct spu_context *ctx)
 524{
 525        int prio = ctx->prio;
 526
 527        if (!list_empty(&ctx->rq)) {
 528                if (!--spu_prio->nr_waiting)
 529                        del_timer(&spusched_timer);
 530                list_del_init(&ctx->rq);
 531
 532                if (list_empty(&spu_prio->runq[prio]))
 533                        clear_bit(prio, spu_prio->bitmap);
 534        }
 535}
 536
 537void spu_del_from_rq(struct spu_context *ctx)
 538{
 539        spin_lock(&spu_prio->runq_lock);
 540        __spu_del_from_rq(ctx);
 541        spin_unlock(&spu_prio->runq_lock);
 542}
 543
 544static void spu_prio_wait(struct spu_context *ctx)
 545{
 546        DEFINE_WAIT(wait);
 547
 548        /*
 549         * The caller must explicitly wait for a context to be loaded
 550         * if the nosched flag is set.  If NOSCHED is not set, the caller
 551         * queues the context and waits for an spu event or error.
 552         */
 553        BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
 554
 555        spin_lock(&spu_prio->runq_lock);
 556        prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
 557        if (!signal_pending(current)) {
 558                __spu_add_to_rq(ctx);
 559                spin_unlock(&spu_prio->runq_lock);
 560                mutex_unlock(&ctx->state_mutex);
 561                schedule();
 562                mutex_lock(&ctx->state_mutex);
 563                spin_lock(&spu_prio->runq_lock);
 564                __spu_del_from_rq(ctx);
 565        }
 566        spin_unlock(&spu_prio->runq_lock);
 567        __set_current_state(TASK_RUNNING);
 568        remove_wait_queue(&ctx->stop_wq, &wait);
 569}
 570
 571static struct spu *spu_get_idle(struct spu_context *ctx)
 572{
 573        struct spu *spu, *aff_ref_spu;
 574        int node, n;
 575
 576        spu_context_nospu_trace(spu_get_idle__enter, ctx);
 577
 578        if (ctx->gang) {
 579                mutex_lock(&ctx->gang->aff_mutex);
 580                if (has_affinity(ctx)) {
 581                        aff_ref_spu = ctx->gang->aff_ref_spu;
 582                        atomic_inc(&ctx->gang->aff_sched_count);
 583                        mutex_unlock(&ctx->gang->aff_mutex);
 584                        node = aff_ref_spu->node;
 585
 586                        mutex_lock(&cbe_spu_info[node].list_mutex);
 587                        spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
 588                        if (spu && spu->alloc_state == SPU_FREE)
 589                                goto found;
 590                        mutex_unlock(&cbe_spu_info[node].list_mutex);
 591
 592                        atomic_dec(&ctx->gang->aff_sched_count);
 593                        goto not_found;
 594                }
 595                mutex_unlock(&ctx->gang->aff_mutex);
 596        }
 597        node = cpu_to_node(raw_smp_processor_id());
 598        for (n = 0; n < MAX_NUMNODES; n++, node++) {
 599                node = (node < MAX_NUMNODES) ? node : 0;
 600                if (!node_allowed(ctx, node))
 601                        continue;
 602
 603                mutex_lock(&cbe_spu_info[node].list_mutex);
 604                list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 605                        if (spu->alloc_state == SPU_FREE)
 606                                goto found;
 607                }
 608                mutex_unlock(&cbe_spu_info[node].list_mutex);
 609        }
 610
 611 not_found:
 612        spu_context_nospu_trace(spu_get_idle__not_found, ctx);
 613        return NULL;
 614
 615 found:
 616        spu->alloc_state = SPU_USED;
 617        mutex_unlock(&cbe_spu_info[node].list_mutex);
 618        spu_context_trace(spu_get_idle__found, ctx, spu);
 619        spu_init_channels(spu);
 620        return spu;
 621}
 622
 623/**
 624 * find_victim - find a lower priority context to preempt
 625 * @ctx:        canidate context for running
 626 *
 627 * Returns the freed physical spu to run the new context on.
 628 */
 629static struct spu *find_victim(struct spu_context *ctx)
 630{
 631        struct spu_context *victim = NULL;
 632        struct spu *spu;
 633        int node, n;
 634
 635        spu_context_nospu_trace(spu_find_victim__enter, ctx);
 636
 637        /*
 638         * Look for a possible preemption candidate on the local node first.
 639         * If there is no candidate look at the other nodes.  This isn't
 640         * exactly fair, but so far the whole spu scheduler tries to keep
 641         * a strong node affinity.  We might want to fine-tune this in
 642         * the future.
 643         */
 644 restart:
 645        node = cpu_to_node(raw_smp_processor_id());
 646        for (n = 0; n < MAX_NUMNODES; n++, node++) {
 647                node = (node < MAX_NUMNODES) ? node : 0;
 648                if (!node_allowed(ctx, node))
 649                        continue;
 650
 651                mutex_lock(&cbe_spu_info[node].list_mutex);
 652                list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
 653                        struct spu_context *tmp = spu->ctx;
 654
 655                        if (tmp && tmp->prio > ctx->prio &&
 656                            !(tmp->flags & SPU_CREATE_NOSCHED) &&
 657                            (!victim || tmp->prio > victim->prio)) {
 658                                victim = spu->ctx;
 659                        }
 660                }
 661                if (victim)
 662                        get_spu_context(victim);
 663                mutex_unlock(&cbe_spu_info[node].list_mutex);
 664
 665                if (victim) {
 666                        /*
 667                         * This nests ctx->state_mutex, but we always lock
 668                         * higher priority contexts before lower priority
 669                         * ones, so this is safe until we introduce
 670                         * priority inheritance schemes.
 671                         *
 672                         * XXX if the highest priority context is locked,
 673                         * this can loop a long time.  Might be better to
 674                         * look at another context or give up after X retries.
 675                         */
 676                        if (!mutex_trylock(&victim->state_mutex)) {
 677                                put_spu_context(victim);
 678                                victim = NULL;
 679                                goto restart;
 680                        }
 681
 682                        spu = victim->spu;
 683                        if (!spu || victim->prio <= ctx->prio) {
 684                                /*
 685                                 * This race can happen because we've dropped
 686                                 * the active list mutex.  Not a problem, just
 687                                 * restart the search.
 688                                 */
 689                                mutex_unlock(&victim->state_mutex);
 690                                put_spu_context(victim);
 691                                victim = NULL;
 692                                goto restart;
 693                        }
 694
 695                        spu_context_trace(__spu_deactivate__unload, ctx, spu);
 696
 697                        mutex_lock(&cbe_spu_info[node].list_mutex);
 698                        cbe_spu_info[node].nr_active--;
 699                        spu_unbind_context(spu, victim);
 700                        mutex_unlock(&cbe_spu_info[node].list_mutex);
 701
 702                        victim->stats.invol_ctx_switch++;
 703                        spu->stats.invol_ctx_switch++;
 704                        if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags))
 705                                spu_add_to_rq(victim);
 706
 707                        mutex_unlock(&victim->state_mutex);
 708                        put_spu_context(victim);
 709
 710                        return spu;
 711                }
 712        }
 713
 714        return NULL;
 715}
 716
 717static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
 718{
 719        int node = spu->node;
 720        int success = 0;
 721
 722        spu_set_timeslice(ctx);
 723
 724        mutex_lock(&cbe_spu_info[node].list_mutex);
 725        if (spu->ctx == NULL) {
 726                spu_bind_context(spu, ctx);
 727                cbe_spu_info[node].nr_active++;
 728                spu->alloc_state = SPU_USED;
 729                success = 1;
 730        }
 731        mutex_unlock(&cbe_spu_info[node].list_mutex);
 732
 733        if (success)
 734                wake_up_all(&ctx->run_wq);
 735        else
 736                spu_add_to_rq(ctx);
 737}
 738
 739static void spu_schedule(struct spu *spu, struct spu_context *ctx)
 740{
 741        /* not a candidate for interruptible because it's called either
 742           from the scheduler thread or from spu_deactivate */
 743        mutex_lock(&ctx->state_mutex);
 744        if (ctx->state == SPU_STATE_SAVED)
 745                __spu_schedule(spu, ctx);
 746        spu_release(ctx);
 747}
 748
 749/**
 750 * spu_unschedule - remove a context from a spu, and possibly release it.
 751 * @spu:        The SPU to unschedule from
 752 * @ctx:        The context currently scheduled on the SPU
 753 * @free_spu    Whether to free the SPU for other contexts
 754 *
 755 * Unbinds the context @ctx from the SPU @spu. If @free_spu is non-zero, the
 756 * SPU is made available for other contexts (ie, may be returned by
 757 * spu_get_idle). If this is zero, the caller is expected to schedule another
 758 * context to this spu.
 759 *
 760 * Should be called with ctx->state_mutex held.
 761 */
 762static void spu_unschedule(struct spu *spu, struct spu_context *ctx,
 763                int free_spu)
 764{
 765        int node = spu->node;
 766
 767        mutex_lock(&cbe_spu_info[node].list_mutex);
 768        cbe_spu_info[node].nr_active--;
 769        if (free_spu)
 770                spu->alloc_state = SPU_FREE;
 771        spu_unbind_context(spu, ctx);
 772        ctx->stats.invol_ctx_switch++;
 773        spu->stats.invol_ctx_switch++;
 774        mutex_unlock(&cbe_spu_info[node].list_mutex);
 775}
 776
 777/**
 778 * spu_activate - find a free spu for a context and execute it
 779 * @ctx:        spu context to schedule
 780 * @flags:      flags (currently ignored)
 781 *
 782 * Tries to find a free spu to run @ctx.  If no free spu is available
 783 * add the context to the runqueue so it gets woken up once an spu
 784 * is available.
 785 */
 786int spu_activate(struct spu_context *ctx, unsigned long flags)
 787{
 788        struct spu *spu;
 789
 790        /*
 791         * If there are multiple threads waiting for a single context
 792         * only one actually binds the context while the others will
 793         * only be able to acquire the state_mutex once the context
 794         * already is in runnable state.
 795         */
 796        if (ctx->spu)
 797                return 0;
 798
 799spu_activate_top:
 800        if (signal_pending(current))
 801                return -ERESTARTSYS;
 802
 803        spu = spu_get_idle(ctx);
 804        /*
 805         * If this is a realtime thread we try to get it running by
 806         * preempting a lower priority thread.
 807         */
 808        if (!spu && rt_prio(ctx->prio))
 809                spu = find_victim(ctx);
 810        if (spu) {
 811                unsigned long runcntl;
 812
 813                runcntl = ctx->ops->runcntl_read(ctx);
 814                __spu_schedule(spu, ctx);
 815                if (runcntl & SPU_RUNCNTL_RUNNABLE)
 816                        spuctx_switch_state(ctx, SPU_UTIL_USER);
 817
 818                return 0;
 819        }
 820
 821        if (ctx->flags & SPU_CREATE_NOSCHED) {
 822                spu_prio_wait(ctx);
 823                goto spu_activate_top;
 824        }
 825
 826        spu_add_to_rq(ctx);
 827
 828        return 0;
 829}
 830
 831/**
 832 * grab_runnable_context - try to find a runnable context
 833 *
 834 * Remove the highest priority context on the runqueue and return it
 835 * to the caller.  Returns %NULL if no runnable context was found.
 836 */
 837static struct spu_context *grab_runnable_context(int prio, int node)
 838{
 839        struct spu_context *ctx;
 840        int best;
 841
 842        spin_lock(&spu_prio->runq_lock);
 843        best = find_first_bit(spu_prio->bitmap, prio);
 844        while (best < prio) {
 845                struct list_head *rq = &spu_prio->runq[best];
 846
 847                list_for_each_entry(ctx, rq, rq) {
 848                        /* XXX(hch): check for affinity here as well */
 849                        if (__node_allowed(ctx, node)) {
 850                                __spu_del_from_rq(ctx);
 851                                goto found;
 852                        }
 853                }
 854                best++;
 855        }
 856        ctx = NULL;
 857 found:
 858        spin_unlock(&spu_prio->runq_lock);
 859        return ctx;
 860}
 861
 862static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
 863{
 864        struct spu *spu = ctx->spu;
 865        struct spu_context *new = NULL;
 866
 867        if (spu) {
 868                new = grab_runnable_context(max_prio, spu->node);
 869                if (new || force) {
 870                        spu_unschedule(spu, ctx, new == NULL);
 871                        if (new) {
 872                                if (new->flags & SPU_CREATE_NOSCHED)
 873                                        wake_up(&new->stop_wq);
 874                                else {
 875                                        spu_release(ctx);
 876                                        spu_schedule(spu, new);
 877                                        /* this one can't easily be made
 878                                           interruptible */
 879                                        mutex_lock(&ctx->state_mutex);
 880                                }
 881                        }
 882                }
 883        }
 884
 885        return new != NULL;
 886}
 887
 888/**
 889 * spu_deactivate - unbind a context from it's physical spu
 890 * @ctx:        spu context to unbind
 891 *
 892 * Unbind @ctx from the physical spu it is running on and schedule
 893 * the highest priority context to run on the freed physical spu.
 894 */
 895void spu_deactivate(struct spu_context *ctx)
 896{
 897        spu_context_nospu_trace(spu_deactivate__enter, ctx);
 898        __spu_deactivate(ctx, 1, MAX_PRIO);
 899}
 900
 901/**
 902 * spu_yield -  yield a physical spu if others are waiting
 903 * @ctx:        spu context to yield
 904 *
 905 * Check if there is a higher priority context waiting and if yes
 906 * unbind @ctx from the physical spu and schedule the highest
 907 * priority context to run on the freed physical spu instead.
 908 */
 909void spu_yield(struct spu_context *ctx)
 910{
 911        spu_context_nospu_trace(spu_yield__enter, ctx);
 912        if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
 913                mutex_lock(&ctx->state_mutex);
 914                __spu_deactivate(ctx, 0, MAX_PRIO);
 915                mutex_unlock(&ctx->state_mutex);
 916        }
 917}
 918
 919static noinline void spusched_tick(struct spu_context *ctx)
 920{
 921        struct spu_context *new = NULL;
 922        struct spu *spu = NULL;
 923
 924        if (spu_acquire(ctx))
 925                BUG();  /* a kernel thread never has signals pending */
 926
 927        if (ctx->state != SPU_STATE_RUNNABLE)
 928                goto out;
 929        if (ctx->flags & SPU_CREATE_NOSCHED)
 930                goto out;
 931        if (ctx->policy == SCHED_FIFO)
 932                goto out;
 933
 934        if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
 935                goto out;
 936
 937        spu = ctx->spu;
 938
 939        spu_context_trace(spusched_tick__preempt, ctx, spu);
 940
 941        new = grab_runnable_context(ctx->prio + 1, spu->node);
 942        if (new) {
 943                spu_unschedule(spu, ctx, 0);
 944                if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
 945                        spu_add_to_rq(ctx);
 946        } else {
 947                spu_context_nospu_trace(spusched_tick__newslice, ctx);
 948                if (!ctx->time_slice)
 949                        ctx->time_slice++;
 950        }
 951out:
 952        spu_release(ctx);
 953
 954        if (new)
 955                spu_schedule(spu, new);
 956}
 957
 958/**
 959 * count_active_contexts - count nr of active tasks
 960 *
 961 * Return the number of tasks currently running or waiting to run.
 962 *
 963 * Note that we don't take runq_lock / list_mutex here.  Reading
 964 * a single 32bit value is atomic on powerpc, and we don't care
 965 * about memory ordering issues here.
 966 */
 967static unsigned long count_active_contexts(void)
 968{
 969        int nr_active = 0, node;
 970
 971        for (node = 0; node < MAX_NUMNODES; node++)
 972                nr_active += cbe_spu_info[node].nr_active;
 973        nr_active += spu_prio->nr_waiting;
 974
 975        return nr_active;
 976}
 977
 978/**
 979 * spu_calc_load - update the avenrun load estimates.
 980 *
 981 * No locking against reading these values from userspace, as for
 982 * the CPU loadavg code.
 983 */
 984static void spu_calc_load(void)
 985{
 986        unsigned long active_tasks; /* fixed-point */
 987
 988        active_tasks = count_active_contexts() * FIXED_1;
 989        CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
 990        CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
 991        CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
 992}
 993
 994static void spusched_wake(unsigned long data)
 995{
 996        mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
 997        wake_up_process(spusched_task);
 998}
 999
1000static void spuloadavg_wake(unsigned long data)
1001{
1002        mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
1003        spu_calc_load();
1004}
1005
1006static int spusched_thread(void *unused)
1007{
1008        struct spu *spu;
1009        int node;
1010
1011        while (!kthread_should_stop()) {
1012                set_current_state(TASK_INTERRUPTIBLE);
1013                schedule();
1014                for (node = 0; node < MAX_NUMNODES; node++) {
1015                        struct mutex *mtx = &cbe_spu_info[node].list_mutex;
1016
1017                        mutex_lock(mtx);
1018                        list_for_each_entry(spu, &cbe_spu_info[node].spus,
1019                                        cbe_list) {
1020                                struct spu_context *ctx = spu->ctx;
1021
1022                                if (ctx) {
1023                                        get_spu_context(ctx);
1024                                        mutex_unlock(mtx);
1025                                        spusched_tick(ctx);
1026                                        mutex_lock(mtx);
1027                                        put_spu_context(ctx);
1028                                }
1029                        }
1030                        mutex_unlock(mtx);
1031                }
1032        }
1033
1034        return 0;
1035}
1036
1037void spuctx_switch_state(struct spu_context *ctx,
1038                enum spu_utilization_state new_state)
1039{
1040        unsigned long long curtime;
1041        signed long long delta;
1042        struct timespec ts;
1043        struct spu *spu;
1044        enum spu_utilization_state old_state;
1045        int node;
1046
1047        ktime_get_ts(&ts);
1048        curtime = timespec_to_ns(&ts);
1049        delta = curtime - ctx->stats.tstamp;
1050
1051        WARN_ON(!mutex_is_locked(&ctx->state_mutex));
1052        WARN_ON(delta < 0);
1053
1054        spu = ctx->spu;
1055        old_state = ctx->stats.util_state;
1056        ctx->stats.util_state = new_state;
1057        ctx->stats.tstamp = curtime;
1058
1059        /*
1060         * Update the physical SPU utilization statistics.
1061         */
1062        if (spu) {
1063                ctx->stats.times[old_state] += delta;
1064                spu->stats.times[old_state] += delta;
1065                spu->stats.util_state = new_state;
1066                spu->stats.tstamp = curtime;
1067                node = spu->node;
1068                if (old_state == SPU_UTIL_USER)
1069                        atomic_dec(&cbe_spu_info[node].busy_spus);
1070                if (new_state == SPU_UTIL_USER)
1071                        atomic_inc(&cbe_spu_info[node].busy_spus);
1072        }
1073}
1074
1075#define LOAD_INT(x) ((x) >> FSHIFT)
1076#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
1077
1078static int show_spu_loadavg(struct seq_file *s, void *private)
1079{
1080        int a, b, c;
1081
1082        a = spu_avenrun[0] + (FIXED_1/200);
1083        b = spu_avenrun[1] + (FIXED_1/200);
1084        c = spu_avenrun[2] + (FIXED_1/200);
1085
1086        /*
1087         * Note that last_pid doesn't really make much sense for the
1088         * SPU loadavg (it even seems very odd on the CPU side...),
1089         * but we include it here to have a 100% compatible interface.
1090         */
1091        seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
1092                LOAD_INT(a), LOAD_FRAC(a),
1093                LOAD_INT(b), LOAD_FRAC(b),
1094                LOAD_INT(c), LOAD_FRAC(c),
1095                count_active_contexts(),
1096                atomic_read(&nr_spu_contexts),
1097                current->nsproxy->pid_ns->last_pid);
1098        return 0;
1099}
1100
1101static int spu_loadavg_open(struct inode *inode, struct file *file)
1102{
1103        return single_open(file, show_spu_loadavg, NULL);
1104}
1105
1106static const struct file_operations spu_loadavg_fops = {
1107        .open           = spu_loadavg_open,
1108        .read           = seq_read,
1109        .llseek         = seq_lseek,
1110        .release        = single_release,
1111};
1112
1113int __init spu_sched_init(void)
1114{
1115        struct proc_dir_entry *entry;
1116        int err = -ENOMEM, i;
1117
1118        spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
1119        if (!spu_prio)
1120                goto out;
1121
1122        for (i = 0; i < MAX_PRIO; i++) {
1123                INIT_LIST_HEAD(&spu_prio->runq[i]);
1124                __clear_bit(i, spu_prio->bitmap);
1125        }
1126        spin_lock_init(&spu_prio->runq_lock);
1127
1128        setup_timer(&spusched_timer, spusched_wake, 0);
1129        setup_timer(&spuloadavg_timer, spuloadavg_wake, 0);
1130
1131        spusched_task = kthread_run(spusched_thread, NULL, "spusched");
1132        if (IS_ERR(spusched_task)) {
1133                err = PTR_ERR(spusched_task);
1134                goto out_free_spu_prio;
1135        }
1136
1137        mod_timer(&spuloadavg_timer, 0);
1138
1139        entry = proc_create("spu_loadavg", 0, NULL, &spu_loadavg_fops);
1140        if (!entry)
1141                goto out_stop_kthread;
1142
1143        pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
1144                        SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
1145        return 0;
1146
1147 out_stop_kthread:
1148        kthread_stop(spusched_task);
1149 out_free_spu_prio:
1150        kfree(spu_prio);
1151 out:
1152        return err;
1153}
1154
1155void spu_sched_exit(void)
1156{
1157        struct spu *spu;
1158        int node;
1159
1160        remove_proc_entry("spu_loadavg", NULL);
1161
1162        del_timer_sync(&spusched_timer);
1163        del_timer_sync(&spuloadavg_timer);
1164        kthread_stop(spusched_task);
1165
1166        for (node = 0; node < MAX_NUMNODES; node++) {
1167                mutex_lock(&cbe_spu_info[node].list_mutex);
1168                list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
1169                        if (spu->alloc_state != SPU_FREE)
1170                                spu->alloc_state = SPU_FREE;
1171                mutex_unlock(&cbe_spu_info[node].list_mutex);
1172        }
1173        kfree(spu_prio);
1174}
1175