1
2
3
4
5
6
7
8
9
10#undef DEBUG
11
12#include <linux/errno.h>
13#include <linux/sched/signal.h>
14#include <linux/sched/loadavg.h>
15#include <linux/sched/rt.h>
16#include <linux/kernel.h>
17#include <linux/mm.h>
18#include <linux/slab.h>
19#include <linux/completion.h>
20#include <linux/vmalloc.h>
21#include <linux/smp.h>
22#include <linux/stddef.h>
23#include <linux/unistd.h>
24#include <linux/numa.h>
25#include <linux/mutex.h>
26#include <linux/notifier.h>
27#include <linux/kthread.h>
28#include <linux/pid_namespace.h>
29#include <linux/proc_fs.h>
30#include <linux/seq_file.h>
31
32#include <asm/io.h>
33#include <asm/mmu_context.h>
34#include <asm/spu.h>
35#include <asm/spu_csa.h>
36#include <asm/spu_priv1.h>
37#include "spufs.h"
38#define CREATE_TRACE_POINTS
39#include "sputrace.h"
40
41struct spu_prio_array {
42 DECLARE_BITMAP(bitmap, MAX_PRIO);
43 struct list_head runq[MAX_PRIO];
44 spinlock_t runq_lock;
45 int nr_waiting;
46};
47
48static unsigned long spu_avenrun[3];
49static struct spu_prio_array *spu_prio;
50static struct task_struct *spusched_task;
51static struct timer_list spusched_timer;
52static struct timer_list spuloadavg_timer;
53
54
55
56
57#define NORMAL_PRIO 120
58
59
60
61
62
63#define SPUSCHED_TICK (10)
64
65
66
67
68
69
70
71#define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
72#define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
73
74#define SCALE_PRIO(x, prio) \
75 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
76
77
78
79
80
81
82
83
84
85void spu_set_timeslice(struct spu_context *ctx)
86{
87 if (ctx->prio < NORMAL_PRIO)
88 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
89 else
90 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
91}
92
93
94
95
96void __spu_update_sched_info(struct spu_context *ctx)
97{
98
99
100
101
102 BUG_ON(!list_empty(&ctx->rq));
103
104
105
106
107
108
109 ctx->tid = current->pid;
110
111
112
113
114
115
116
117 if (rt_prio(current->prio))
118 ctx->prio = current->prio;
119 else
120 ctx->prio = current->static_prio;
121 ctx->policy = current->policy;
122
123
124
125
126
127
128
129
130
131 cpumask_copy(&ctx->cpus_allowed, current->cpus_ptr);
132
133
134 ctx->last_ran = raw_smp_processor_id();
135}
136
137void spu_update_sched_info(struct spu_context *ctx)
138{
139 int node;
140
141 if (ctx->state == SPU_STATE_RUNNABLE) {
142 node = ctx->spu->node;
143
144
145
146
147 mutex_lock(&cbe_spu_info[node].list_mutex);
148 __spu_update_sched_info(ctx);
149 mutex_unlock(&cbe_spu_info[node].list_mutex);
150 } else {
151 __spu_update_sched_info(ctx);
152 }
153}
154
155static int __node_allowed(struct spu_context *ctx, int node)
156{
157 if (nr_cpus_node(node)) {
158 const struct cpumask *mask = cpumask_of_node(node);
159
160 if (cpumask_intersects(mask, &ctx->cpus_allowed))
161 return 1;
162 }
163
164 return 0;
165}
166
167static int node_allowed(struct spu_context *ctx, int node)
168{
169 int rval;
170
171 spin_lock(&spu_prio->runq_lock);
172 rval = __node_allowed(ctx, node);
173 spin_unlock(&spu_prio->runq_lock);
174
175 return rval;
176}
177
178void do_notify_spus_active(void)
179{
180 int node;
181
182
183
184
185
186
187
188 for_each_online_node(node) {
189 struct spu *spu;
190
191 mutex_lock(&cbe_spu_info[node].list_mutex);
192 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
193 if (spu->alloc_state != SPU_FREE) {
194 struct spu_context *ctx = spu->ctx;
195 set_bit(SPU_SCHED_NOTIFY_ACTIVE,
196 &ctx->sched_flags);
197 mb();
198 wake_up_all(&ctx->stop_wq);
199 }
200 }
201 mutex_unlock(&cbe_spu_info[node].list_mutex);
202 }
203}
204
205
206
207
208
209
210static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
211{
212 spu_context_trace(spu_bind_context__enter, ctx, spu);
213
214 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
215
216 if (ctx->flags & SPU_CREATE_NOSCHED)
217 atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
218
219 ctx->stats.slb_flt_base = spu->stats.slb_flt;
220 ctx->stats.class2_intr_base = spu->stats.class2_intr;
221
222 spu_associate_mm(spu, ctx->owner);
223
224 spin_lock_irq(&spu->register_lock);
225 spu->ctx = ctx;
226 spu->flags = 0;
227 ctx->spu = spu;
228 ctx->ops = &spu_hw_ops;
229 spu->pid = current->pid;
230 spu->tgid = current->tgid;
231 spu->ibox_callback = spufs_ibox_callback;
232 spu->wbox_callback = spufs_wbox_callback;
233 spu->stop_callback = spufs_stop_callback;
234 spu->mfc_callback = spufs_mfc_callback;
235 spin_unlock_irq(&spu->register_lock);
236
237 spu_unmap_mappings(ctx);
238
239 spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
240 spu_restore(&ctx->csa, spu);
241 spu->timestamp = jiffies;
242 spu_switch_notify(spu, ctx);
243 ctx->state = SPU_STATE_RUNNABLE;
244
245 spuctx_switch_state(ctx, SPU_UTIL_USER);
246}
247
248
249
250
251static inline int sched_spu(struct spu *spu)
252{
253 BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
254
255 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
256}
257
258static void aff_merge_remaining_ctxs(struct spu_gang *gang)
259{
260 struct spu_context *ctx;
261
262 list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
263 if (list_empty(&ctx->aff_list))
264 list_add(&ctx->aff_list, &gang->aff_list_head);
265 }
266 gang->aff_flags |= AFF_MERGED;
267}
268
269static void aff_set_offsets(struct spu_gang *gang)
270{
271 struct spu_context *ctx;
272 int offset;
273
274 offset = -1;
275 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
276 aff_list) {
277 if (&ctx->aff_list == &gang->aff_list_head)
278 break;
279 ctx->aff_offset = offset--;
280 }
281
282 offset = 0;
283 list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
284 if (&ctx->aff_list == &gang->aff_list_head)
285 break;
286 ctx->aff_offset = offset++;
287 }
288
289 gang->aff_flags |= AFF_OFFSETS_SET;
290}
291
292static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
293 int group_size, int lowest_offset)
294{
295 struct spu *spu;
296 int node, n;
297
298
299
300
301
302 node = cpu_to_node(raw_smp_processor_id());
303 for (n = 0; n < MAX_NUMNODES; n++, node++) {
304
305
306
307
308
309
310
311
312
313 int available_spus;
314
315 node = (node < MAX_NUMNODES) ? node : 0;
316 if (!node_allowed(ctx, node))
317 continue;
318
319 available_spus = 0;
320 mutex_lock(&cbe_spu_info[node].list_mutex);
321 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
322 if (spu->ctx && spu->ctx->gang && !spu->ctx->aff_offset
323 && spu->ctx->gang->aff_ref_spu)
324 available_spus -= spu->ctx->gang->contexts;
325 available_spus++;
326 }
327 if (available_spus < ctx->gang->contexts) {
328 mutex_unlock(&cbe_spu_info[node].list_mutex);
329 continue;
330 }
331
332 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
333 if ((!mem_aff || spu->has_mem_affinity) &&
334 sched_spu(spu)) {
335 mutex_unlock(&cbe_spu_info[node].list_mutex);
336 return spu;
337 }
338 }
339 mutex_unlock(&cbe_spu_info[node].list_mutex);
340 }
341 return NULL;
342}
343
344static void aff_set_ref_point_location(struct spu_gang *gang)
345{
346 int mem_aff, gs, lowest_offset;
347 struct spu_context *ctx;
348 struct spu *tmp;
349
350 mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
351 lowest_offset = 0;
352 gs = 0;
353
354 list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
355 gs++;
356
357 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
358 aff_list) {
359 if (&ctx->aff_list == &gang->aff_list_head)
360 break;
361 lowest_offset = ctx->aff_offset;
362 }
363
364 gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
365 lowest_offset);
366}
367
368static struct spu *ctx_location(struct spu *ref, int offset, int node)
369{
370 struct spu *spu;
371
372 spu = NULL;
373 if (offset >= 0) {
374 list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
375 BUG_ON(spu->node != node);
376 if (offset == 0)
377 break;
378 if (sched_spu(spu))
379 offset--;
380 }
381 } else {
382 list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
383 BUG_ON(spu->node != node);
384 if (offset == 0)
385 break;
386 if (sched_spu(spu))
387 offset++;
388 }
389 }
390
391 return spu;
392}
393
394
395
396
397
398static int has_affinity(struct spu_context *ctx)
399{
400 struct spu_gang *gang = ctx->gang;
401
402 if (list_empty(&ctx->aff_list))
403 return 0;
404
405 if (atomic_read(&ctx->gang->aff_sched_count) == 0)
406 ctx->gang->aff_ref_spu = NULL;
407
408 if (!gang->aff_ref_spu) {
409 if (!(gang->aff_flags & AFF_MERGED))
410 aff_merge_remaining_ctxs(gang);
411 if (!(gang->aff_flags & AFF_OFFSETS_SET))
412 aff_set_offsets(gang);
413 aff_set_ref_point_location(gang);
414 }
415
416 return gang->aff_ref_spu != NULL;
417}
418
419
420
421
422
423
424static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
425{
426 u32 status;
427
428 spu_context_trace(spu_unbind_context__enter, ctx, spu);
429
430 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
431
432 if (spu->ctx->flags & SPU_CREATE_NOSCHED)
433 atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
434
435 if (ctx->gang)
436
437
438
439
440
441 atomic_dec_if_positive(&ctx->gang->aff_sched_count);
442
443 spu_switch_notify(spu, NULL);
444 spu_unmap_mappings(ctx);
445 spu_save(&ctx->csa, spu);
446 spu_switch_log_notify(spu, ctx, SWITCH_LOG_STOP, 0);
447
448 spin_lock_irq(&spu->register_lock);
449 spu->timestamp = jiffies;
450 ctx->state = SPU_STATE_SAVED;
451 spu->ibox_callback = NULL;
452 spu->wbox_callback = NULL;
453 spu->stop_callback = NULL;
454 spu->mfc_callback = NULL;
455 spu->pid = 0;
456 spu->tgid = 0;
457 ctx->ops = &spu_backing_ops;
458 spu->flags = 0;
459 spu->ctx = NULL;
460 spin_unlock_irq(&spu->register_lock);
461
462 spu_associate_mm(spu, NULL);
463
464 ctx->stats.slb_flt +=
465 (spu->stats.slb_flt - ctx->stats.slb_flt_base);
466 ctx->stats.class2_intr +=
467 (spu->stats.class2_intr - ctx->stats.class2_intr_base);
468
469
470 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
471 ctx->spu = NULL;
472
473 if (spu_stopped(ctx, &status))
474 wake_up_all(&ctx->stop_wq);
475}
476
477
478
479
480
481static void __spu_add_to_rq(struct spu_context *ctx)
482{
483
484
485
486
487
488
489
490
491
492
493
494
495
496 if (list_empty(&ctx->rq)) {
497 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
498 set_bit(ctx->prio, spu_prio->bitmap);
499 if (!spu_prio->nr_waiting++)
500 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
501 }
502}
503
504static void spu_add_to_rq(struct spu_context *ctx)
505{
506 spin_lock(&spu_prio->runq_lock);
507 __spu_add_to_rq(ctx);
508 spin_unlock(&spu_prio->runq_lock);
509}
510
511static void __spu_del_from_rq(struct spu_context *ctx)
512{
513 int prio = ctx->prio;
514
515 if (!list_empty(&ctx->rq)) {
516 if (!--spu_prio->nr_waiting)
517 del_timer(&spusched_timer);
518 list_del_init(&ctx->rq);
519
520 if (list_empty(&spu_prio->runq[prio]))
521 clear_bit(prio, spu_prio->bitmap);
522 }
523}
524
525void spu_del_from_rq(struct spu_context *ctx)
526{
527 spin_lock(&spu_prio->runq_lock);
528 __spu_del_from_rq(ctx);
529 spin_unlock(&spu_prio->runq_lock);
530}
531
532static void spu_prio_wait(struct spu_context *ctx)
533{
534 DEFINE_WAIT(wait);
535
536
537
538
539
540
541 BUG_ON(!(ctx->flags & SPU_CREATE_NOSCHED));
542
543 spin_lock(&spu_prio->runq_lock);
544 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
545 if (!signal_pending(current)) {
546 __spu_add_to_rq(ctx);
547 spin_unlock(&spu_prio->runq_lock);
548 mutex_unlock(&ctx->state_mutex);
549 schedule();
550 mutex_lock(&ctx->state_mutex);
551 spin_lock(&spu_prio->runq_lock);
552 __spu_del_from_rq(ctx);
553 }
554 spin_unlock(&spu_prio->runq_lock);
555 __set_current_state(TASK_RUNNING);
556 remove_wait_queue(&ctx->stop_wq, &wait);
557}
558
559static struct spu *spu_get_idle(struct spu_context *ctx)
560{
561 struct spu *spu, *aff_ref_spu;
562 int node, n;
563
564 spu_context_nospu_trace(spu_get_idle__enter, ctx);
565
566 if (ctx->gang) {
567 mutex_lock(&ctx->gang->aff_mutex);
568 if (has_affinity(ctx)) {
569 aff_ref_spu = ctx->gang->aff_ref_spu;
570 atomic_inc(&ctx->gang->aff_sched_count);
571 mutex_unlock(&ctx->gang->aff_mutex);
572 node = aff_ref_spu->node;
573
574 mutex_lock(&cbe_spu_info[node].list_mutex);
575 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
576 if (spu && spu->alloc_state == SPU_FREE)
577 goto found;
578 mutex_unlock(&cbe_spu_info[node].list_mutex);
579
580 atomic_dec(&ctx->gang->aff_sched_count);
581 goto not_found;
582 }
583 mutex_unlock(&ctx->gang->aff_mutex);
584 }
585 node = cpu_to_node(raw_smp_processor_id());
586 for (n = 0; n < MAX_NUMNODES; n++, node++) {
587 node = (node < MAX_NUMNODES) ? node : 0;
588 if (!node_allowed(ctx, node))
589 continue;
590
591 mutex_lock(&cbe_spu_info[node].list_mutex);
592 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
593 if (spu->alloc_state == SPU_FREE)
594 goto found;
595 }
596 mutex_unlock(&cbe_spu_info[node].list_mutex);
597 }
598
599 not_found:
600 spu_context_nospu_trace(spu_get_idle__not_found, ctx);
601 return NULL;
602
603 found:
604 spu->alloc_state = SPU_USED;
605 mutex_unlock(&cbe_spu_info[node].list_mutex);
606 spu_context_trace(spu_get_idle__found, ctx, spu);
607 spu_init_channels(spu);
608 return spu;
609}
610
611
612
613
614
615
616
617static struct spu *find_victim(struct spu_context *ctx)
618{
619 struct spu_context *victim = NULL;
620 struct spu *spu;
621 int node, n;
622
623 spu_context_nospu_trace(spu_find_victim__enter, ctx);
624
625
626
627
628
629
630
631
632 restart:
633 node = cpu_to_node(raw_smp_processor_id());
634 for (n = 0; n < MAX_NUMNODES; n++, node++) {
635 node = (node < MAX_NUMNODES) ? node : 0;
636 if (!node_allowed(ctx, node))
637 continue;
638
639 mutex_lock(&cbe_spu_info[node].list_mutex);
640 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
641 struct spu_context *tmp = spu->ctx;
642
643 if (tmp && tmp->prio > ctx->prio &&
644 !(tmp->flags & SPU_CREATE_NOSCHED) &&
645 (!victim || tmp->prio > victim->prio)) {
646 victim = spu->ctx;
647 }
648 }
649 if (victim)
650 get_spu_context(victim);
651 mutex_unlock(&cbe_spu_info[node].list_mutex);
652
653 if (victim) {
654
655
656
657
658
659
660
661
662
663
664 if (!mutex_trylock(&victim->state_mutex)) {
665 put_spu_context(victim);
666 victim = NULL;
667 goto restart;
668 }
669
670 spu = victim->spu;
671 if (!spu || victim->prio <= ctx->prio) {
672
673
674
675
676
677 mutex_unlock(&victim->state_mutex);
678 put_spu_context(victim);
679 victim = NULL;
680 goto restart;
681 }
682
683 spu_context_trace(__spu_deactivate__unload, ctx, spu);
684
685 mutex_lock(&cbe_spu_info[node].list_mutex);
686 cbe_spu_info[node].nr_active--;
687 spu_unbind_context(spu, victim);
688 mutex_unlock(&cbe_spu_info[node].list_mutex);
689
690 victim->stats.invol_ctx_switch++;
691 spu->stats.invol_ctx_switch++;
692 if (test_bit(SPU_SCHED_SPU_RUN, &victim->sched_flags))
693 spu_add_to_rq(victim);
694
695 mutex_unlock(&victim->state_mutex);
696 put_spu_context(victim);
697
698 return spu;
699 }
700 }
701
702 return NULL;
703}
704
705static void __spu_schedule(struct spu *spu, struct spu_context *ctx)
706{
707 int node = spu->node;
708 int success = 0;
709
710 spu_set_timeslice(ctx);
711
712 mutex_lock(&cbe_spu_info[node].list_mutex);
713 if (spu->ctx == NULL) {
714 spu_bind_context(spu, ctx);
715 cbe_spu_info[node].nr_active++;
716 spu->alloc_state = SPU_USED;
717 success = 1;
718 }
719 mutex_unlock(&cbe_spu_info[node].list_mutex);
720
721 if (success)
722 wake_up_all(&ctx->run_wq);
723 else
724 spu_add_to_rq(ctx);
725}
726
727static void spu_schedule(struct spu *spu, struct spu_context *ctx)
728{
729
730
731 mutex_lock(&ctx->state_mutex);
732 if (ctx->state == SPU_STATE_SAVED)
733 __spu_schedule(spu, ctx);
734 spu_release(ctx);
735}
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750static void spu_unschedule(struct spu *spu, struct spu_context *ctx,
751 int free_spu)
752{
753 int node = spu->node;
754
755 mutex_lock(&cbe_spu_info[node].list_mutex);
756 cbe_spu_info[node].nr_active--;
757 if (free_spu)
758 spu->alloc_state = SPU_FREE;
759 spu_unbind_context(spu, ctx);
760 ctx->stats.invol_ctx_switch++;
761 spu->stats.invol_ctx_switch++;
762 mutex_unlock(&cbe_spu_info[node].list_mutex);
763}
764
765
766
767
768
769
770
771
772
773
774int spu_activate(struct spu_context *ctx, unsigned long flags)
775{
776 struct spu *spu;
777
778
779
780
781
782
783
784 if (ctx->spu)
785 return 0;
786
787spu_activate_top:
788 if (signal_pending(current))
789 return -ERESTARTSYS;
790
791 spu = spu_get_idle(ctx);
792
793
794
795
796 if (!spu && rt_prio(ctx->prio))
797 spu = find_victim(ctx);
798 if (spu) {
799 unsigned long runcntl;
800
801 runcntl = ctx->ops->runcntl_read(ctx);
802 __spu_schedule(spu, ctx);
803 if (runcntl & SPU_RUNCNTL_RUNNABLE)
804 spuctx_switch_state(ctx, SPU_UTIL_USER);
805
806 return 0;
807 }
808
809 if (ctx->flags & SPU_CREATE_NOSCHED) {
810 spu_prio_wait(ctx);
811 goto spu_activate_top;
812 }
813
814 spu_add_to_rq(ctx);
815
816 return 0;
817}
818
819
820
821
822
823
824
825static struct spu_context *grab_runnable_context(int prio, int node)
826{
827 struct spu_context *ctx;
828 int best;
829
830 spin_lock(&spu_prio->runq_lock);
831 best = find_first_bit(spu_prio->bitmap, prio);
832 while (best < prio) {
833 struct list_head *rq = &spu_prio->runq[best];
834
835 list_for_each_entry(ctx, rq, rq) {
836
837 if (__node_allowed(ctx, node)) {
838 __spu_del_from_rq(ctx);
839 goto found;
840 }
841 }
842 best++;
843 }
844 ctx = NULL;
845 found:
846 spin_unlock(&spu_prio->runq_lock);
847 return ctx;
848}
849
850static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
851{
852 struct spu *spu = ctx->spu;
853 struct spu_context *new = NULL;
854
855 if (spu) {
856 new = grab_runnable_context(max_prio, spu->node);
857 if (new || force) {
858 spu_unschedule(spu, ctx, new == NULL);
859 if (new) {
860 if (new->flags & SPU_CREATE_NOSCHED)
861 wake_up(&new->stop_wq);
862 else {
863 spu_release(ctx);
864 spu_schedule(spu, new);
865
866
867 mutex_lock(&ctx->state_mutex);
868 }
869 }
870 }
871 }
872
873 return new != NULL;
874}
875
876
877
878
879
880
881
882
883void spu_deactivate(struct spu_context *ctx)
884{
885 spu_context_nospu_trace(spu_deactivate__enter, ctx);
886 __spu_deactivate(ctx, 1, MAX_PRIO);
887}
888
889
890
891
892
893
894
895
896
897void spu_yield(struct spu_context *ctx)
898{
899 spu_context_nospu_trace(spu_yield__enter, ctx);
900 if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
901 mutex_lock(&ctx->state_mutex);
902 __spu_deactivate(ctx, 0, MAX_PRIO);
903 mutex_unlock(&ctx->state_mutex);
904 }
905}
906
907static noinline void spusched_tick(struct spu_context *ctx)
908{
909 struct spu_context *new = NULL;
910 struct spu *spu = NULL;
911
912 if (spu_acquire(ctx))
913 BUG();
914
915 if (ctx->state != SPU_STATE_RUNNABLE)
916 goto out;
917 if (ctx->flags & SPU_CREATE_NOSCHED)
918 goto out;
919 if (ctx->policy == SCHED_FIFO)
920 goto out;
921
922 if (--ctx->time_slice && test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
923 goto out;
924
925 spu = ctx->spu;
926
927 spu_context_trace(spusched_tick__preempt, ctx, spu);
928
929 new = grab_runnable_context(ctx->prio + 1, spu->node);
930 if (new) {
931 spu_unschedule(spu, ctx, 0);
932 if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
933 spu_add_to_rq(ctx);
934 } else {
935 spu_context_nospu_trace(spusched_tick__newslice, ctx);
936 if (!ctx->time_slice)
937 ctx->time_slice++;
938 }
939out:
940 spu_release(ctx);
941
942 if (new)
943 spu_schedule(spu, new);
944}
945
946
947
948
949
950
951
952
953
954
955static unsigned long count_active_contexts(void)
956{
957 int nr_active = 0, node;
958
959 for (node = 0; node < MAX_NUMNODES; node++)
960 nr_active += cbe_spu_info[node].nr_active;
961 nr_active += spu_prio->nr_waiting;
962
963 return nr_active;
964}
965
966
967
968
969
970
971
972static void spu_calc_load(void)
973{
974 unsigned long active_tasks;
975
976 active_tasks = count_active_contexts() * FIXED_1;
977 spu_avenrun[0] = calc_load(spu_avenrun[0], EXP_1, active_tasks);
978 spu_avenrun[1] = calc_load(spu_avenrun[1], EXP_5, active_tasks);
979 spu_avenrun[2] = calc_load(spu_avenrun[2], EXP_15, active_tasks);
980}
981
982static void spusched_wake(struct timer_list *unused)
983{
984 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
985 wake_up_process(spusched_task);
986}
987
988static void spuloadavg_wake(struct timer_list *unused)
989{
990 mod_timer(&spuloadavg_timer, jiffies + LOAD_FREQ);
991 spu_calc_load();
992}
993
994static int spusched_thread(void *unused)
995{
996 struct spu *spu;
997 int node;
998
999 while (!kthread_should_stop()) {
1000 set_current_state(TASK_INTERRUPTIBLE);
1001 schedule();
1002 for (node = 0; node < MAX_NUMNODES; node++) {
1003 struct mutex *mtx = &cbe_spu_info[node].list_mutex;
1004
1005 mutex_lock(mtx);
1006 list_for_each_entry(spu, &cbe_spu_info[node].spus,
1007 cbe_list) {
1008 struct spu_context *ctx = spu->ctx;
1009
1010 if (ctx) {
1011 get_spu_context(ctx);
1012 mutex_unlock(mtx);
1013 spusched_tick(ctx);
1014 mutex_lock(mtx);
1015 put_spu_context(ctx);
1016 }
1017 }
1018 mutex_unlock(mtx);
1019 }
1020 }
1021
1022 return 0;
1023}
1024
1025void spuctx_switch_state(struct spu_context *ctx,
1026 enum spu_utilization_state new_state)
1027{
1028 unsigned long long curtime;
1029 signed long long delta;
1030 struct spu *spu;
1031 enum spu_utilization_state old_state;
1032 int node;
1033
1034 curtime = ktime_get_ns();
1035 delta = curtime - ctx->stats.tstamp;
1036
1037 WARN_ON(!mutex_is_locked(&ctx->state_mutex));
1038 WARN_ON(delta < 0);
1039
1040 spu = ctx->spu;
1041 old_state = ctx->stats.util_state;
1042 ctx->stats.util_state = new_state;
1043 ctx->stats.tstamp = curtime;
1044
1045
1046
1047
1048 if (spu) {
1049 ctx->stats.times[old_state] += delta;
1050 spu->stats.times[old_state] += delta;
1051 spu->stats.util_state = new_state;
1052 spu->stats.tstamp = curtime;
1053 node = spu->node;
1054 if (old_state == SPU_UTIL_USER)
1055 atomic_dec(&cbe_spu_info[node].busy_spus);
1056 if (new_state == SPU_UTIL_USER)
1057 atomic_inc(&cbe_spu_info[node].busy_spus);
1058 }
1059}
1060
1061static int show_spu_loadavg(struct seq_file *s, void *private)
1062{
1063 int a, b, c;
1064
1065 a = spu_avenrun[0] + (FIXED_1/200);
1066 b = spu_avenrun[1] + (FIXED_1/200);
1067 c = spu_avenrun[2] + (FIXED_1/200);
1068
1069
1070
1071
1072
1073
1074 seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
1075 LOAD_INT(a), LOAD_FRAC(a),
1076 LOAD_INT(b), LOAD_FRAC(b),
1077 LOAD_INT(c), LOAD_FRAC(c),
1078 count_active_contexts(),
1079 atomic_read(&nr_spu_contexts),
1080 idr_get_cursor(&task_active_pid_ns(current)->idr) - 1);
1081 return 0;
1082};
1083
1084int __init spu_sched_init(void)
1085{
1086 struct proc_dir_entry *entry;
1087 int err = -ENOMEM, i;
1088
1089 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
1090 if (!spu_prio)
1091 goto out;
1092
1093 for (i = 0; i < MAX_PRIO; i++) {
1094 INIT_LIST_HEAD(&spu_prio->runq[i]);
1095 __clear_bit(i, spu_prio->bitmap);
1096 }
1097 spin_lock_init(&spu_prio->runq_lock);
1098
1099 timer_setup(&spusched_timer, spusched_wake, 0);
1100 timer_setup(&spuloadavg_timer, spuloadavg_wake, 0);
1101
1102 spusched_task = kthread_run(spusched_thread, NULL, "spusched");
1103 if (IS_ERR(spusched_task)) {
1104 err = PTR_ERR(spusched_task);
1105 goto out_free_spu_prio;
1106 }
1107
1108 mod_timer(&spuloadavg_timer, 0);
1109
1110 entry = proc_create_single("spu_loadavg", 0, NULL, show_spu_loadavg);
1111 if (!entry)
1112 goto out_stop_kthread;
1113
1114 pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
1115 SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
1116 return 0;
1117
1118 out_stop_kthread:
1119 kthread_stop(spusched_task);
1120 out_free_spu_prio:
1121 kfree(spu_prio);
1122 out:
1123 return err;
1124}
1125
1126void spu_sched_exit(void)
1127{
1128 struct spu *spu;
1129 int node;
1130
1131 remove_proc_entry("spu_loadavg", NULL);
1132
1133 del_timer_sync(&spusched_timer);
1134 del_timer_sync(&spuloadavg_timer);
1135 kthread_stop(spusched_task);
1136
1137 for (node = 0; node < MAX_NUMNODES; node++) {
1138 mutex_lock(&cbe_spu_info[node].list_mutex);
1139 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
1140 if (spu->alloc_state != SPU_FREE)
1141 spu->alloc_state = SPU_FREE;
1142 mutex_unlock(&cbe_spu_info[node].list_mutex);
1143 }
1144 kfree(spu_prio);
1145}
1146