1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#undef DEBUG
24
25#include <linux/module.h>
26#include <linux/errno.h>
27#include <linux/sched.h>
28#include <linux/kernel.h>
29#include <linux/mm.h>
30#include <linux/completion.h>
31#include <linux/vmalloc.h>
32#include <linux/smp.h>
33#include <linux/stddef.h>
34#include <linux/unistd.h>
35#include <linux/numa.h>
36#include <linux/mutex.h>
37#include <linux/notifier.h>
38#include <linux/kthread.h>
39#include <linux/pid_namespace.h>
40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
42
43#include <asm/io.h>
44#include <asm/mmu_context.h>
45#include <asm/spu.h>
46#include <asm/spu_csa.h>
47#include <asm/spu_priv1.h>
48#include "spufs.h"
49
50struct spu_prio_array {
51 DECLARE_BITMAP(bitmap, MAX_PRIO);
52 struct list_head runq[MAX_PRIO];
53 spinlock_t runq_lock;
54 int nr_waiting;
55};
56
57static unsigned long spu_avenrun[3];
58static struct spu_prio_array *spu_prio;
59static struct task_struct *spusched_task;
60static struct timer_list spusched_timer;
61
62
63
64
65#define NORMAL_PRIO 120
66
67
68
69
70
71#define SPUSCHED_TICK (10)
72
73
74
75
76
77
78
79#define MIN_SPU_TIMESLICE max(5 * HZ / (1000 * SPUSCHED_TICK), 1)
80#define DEF_SPU_TIMESLICE (100 * HZ / (1000 * SPUSCHED_TICK))
81
82#define MAX_USER_PRIO (MAX_PRIO - MAX_RT_PRIO)
83#define SCALE_PRIO(x, prio) \
84 max(x * (MAX_PRIO - prio) / (MAX_USER_PRIO / 2), MIN_SPU_TIMESLICE)
85
86
87
88
89
90
91
92
93
94void spu_set_timeslice(struct spu_context *ctx)
95{
96 if (ctx->prio < NORMAL_PRIO)
97 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE * 4, ctx->prio);
98 else
99 ctx->time_slice = SCALE_PRIO(DEF_SPU_TIMESLICE, ctx->prio);
100}
101
102
103
104
105void __spu_update_sched_info(struct spu_context *ctx)
106{
107
108
109
110
111
112 ctx->tid = current->pid;
113
114
115
116
117
118
119
120 if (rt_prio(current->prio))
121 ctx->prio = current->prio;
122 else
123 ctx->prio = current->static_prio;
124 ctx->policy = current->policy;
125
126
127
128
129
130
131
132 spin_lock(&spu_prio->runq_lock);
133 ctx->cpus_allowed = current->cpus_allowed;
134 spin_unlock(&spu_prio->runq_lock);
135}
136
137void spu_update_sched_info(struct spu_context *ctx)
138{
139 int node = ctx->spu->node;
140
141 mutex_lock(&cbe_spu_info[node].list_mutex);
142 __spu_update_sched_info(ctx);
143 mutex_unlock(&cbe_spu_info[node].list_mutex);
144}
145
146static int __node_allowed(struct spu_context *ctx, int node)
147{
148 if (nr_cpus_node(node)) {
149 cpumask_t mask = node_to_cpumask(node);
150
151 if (cpus_intersects(mask, ctx->cpus_allowed))
152 return 1;
153 }
154
155 return 0;
156}
157
158static int node_allowed(struct spu_context *ctx, int node)
159{
160 int rval;
161
162 spin_lock(&spu_prio->runq_lock);
163 rval = __node_allowed(ctx, node);
164 spin_unlock(&spu_prio->runq_lock);
165
166 return rval;
167}
168
169void do_notify_spus_active(void)
170{
171 int node;
172
173
174
175
176
177
178
179 for_each_online_node(node) {
180 struct spu *spu;
181
182 mutex_lock(&cbe_spu_info[node].list_mutex);
183 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
184 if (spu->alloc_state != SPU_FREE) {
185 struct spu_context *ctx = spu->ctx;
186 set_bit(SPU_SCHED_NOTIFY_ACTIVE,
187 &ctx->sched_flags);
188 mb();
189 wake_up_all(&ctx->stop_wq);
190 }
191 }
192 mutex_unlock(&cbe_spu_info[node].list_mutex);
193 }
194}
195
196
197
198
199
200
201static void spu_bind_context(struct spu *spu, struct spu_context *ctx)
202{
203 pr_debug("%s: pid=%d SPU=%d NODE=%d\n", __FUNCTION__, current->pid,
204 spu->number, spu->node);
205 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
206
207 if (ctx->flags & SPU_CREATE_NOSCHED)
208 atomic_inc(&cbe_spu_info[spu->node].reserved_spus);
209
210 ctx->stats.slb_flt_base = spu->stats.slb_flt;
211 ctx->stats.class2_intr_base = spu->stats.class2_intr;
212
213 spu->ctx = ctx;
214 spu->flags = 0;
215 ctx->spu = spu;
216 ctx->ops = &spu_hw_ops;
217 spu->pid = current->pid;
218 spu->tgid = current->tgid;
219 spu_associate_mm(spu, ctx->owner);
220 spu->ibox_callback = spufs_ibox_callback;
221 spu->wbox_callback = spufs_wbox_callback;
222 spu->stop_callback = spufs_stop_callback;
223 spu->mfc_callback = spufs_mfc_callback;
224 spu->dma_callback = spufs_dma_callback;
225 mb();
226 spu_unmap_mappings(ctx);
227 spu_restore(&ctx->csa, spu);
228 spu->timestamp = jiffies;
229 spu_cpu_affinity_set(spu, raw_smp_processor_id());
230 spu_switch_notify(spu, ctx);
231 ctx->state = SPU_STATE_RUNNABLE;
232
233 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
234}
235
236
237
238
239static inline int sched_spu(struct spu *spu)
240{
241 BUG_ON(!mutex_is_locked(&cbe_spu_info[spu->node].list_mutex));
242
243 return (!spu->ctx || !(spu->ctx->flags & SPU_CREATE_NOSCHED));
244}
245
246static void aff_merge_remaining_ctxs(struct spu_gang *gang)
247{
248 struct spu_context *ctx;
249
250 list_for_each_entry(ctx, &gang->aff_list_head, aff_list) {
251 if (list_empty(&ctx->aff_list))
252 list_add(&ctx->aff_list, &gang->aff_list_head);
253 }
254 gang->aff_flags |= AFF_MERGED;
255}
256
257static void aff_set_offsets(struct spu_gang *gang)
258{
259 struct spu_context *ctx;
260 int offset;
261
262 offset = -1;
263 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
264 aff_list) {
265 if (&ctx->aff_list == &gang->aff_list_head)
266 break;
267 ctx->aff_offset = offset--;
268 }
269
270 offset = 0;
271 list_for_each_entry(ctx, gang->aff_ref_ctx->aff_list.prev, aff_list) {
272 if (&ctx->aff_list == &gang->aff_list_head)
273 break;
274 ctx->aff_offset = offset++;
275 }
276
277 gang->aff_flags |= AFF_OFFSETS_SET;
278}
279
280static struct spu *aff_ref_location(struct spu_context *ctx, int mem_aff,
281 int group_size, int lowest_offset)
282{
283 struct spu *spu;
284 int node, n;
285
286
287
288
289
290 node = cpu_to_node(raw_smp_processor_id());
291 for (n = 0; n < MAX_NUMNODES; n++, node++) {
292 node = (node < MAX_NUMNODES) ? node : 0;
293 if (!node_allowed(ctx, node))
294 continue;
295 mutex_lock(&cbe_spu_info[node].list_mutex);
296 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
297 if ((!mem_aff || spu->has_mem_affinity) &&
298 sched_spu(spu)) {
299 mutex_unlock(&cbe_spu_info[node].list_mutex);
300 return spu;
301 }
302 }
303 mutex_unlock(&cbe_spu_info[node].list_mutex);
304 }
305 return NULL;
306}
307
308static void aff_set_ref_point_location(struct spu_gang *gang)
309{
310 int mem_aff, gs, lowest_offset;
311 struct spu_context *ctx;
312 struct spu *tmp;
313
314 mem_aff = gang->aff_ref_ctx->flags & SPU_CREATE_AFFINITY_MEM;
315 lowest_offset = 0;
316 gs = 0;
317
318 list_for_each_entry(tmp, &gang->aff_list_head, aff_list)
319 gs++;
320
321 list_for_each_entry_reverse(ctx, &gang->aff_ref_ctx->aff_list,
322 aff_list) {
323 if (&ctx->aff_list == &gang->aff_list_head)
324 break;
325 lowest_offset = ctx->aff_offset;
326 }
327
328 gang->aff_ref_spu = aff_ref_location(gang->aff_ref_ctx, mem_aff, gs,
329 lowest_offset);
330}
331
332static struct spu *ctx_location(struct spu *ref, int offset, int node)
333{
334 struct spu *spu;
335
336 spu = NULL;
337 if (offset >= 0) {
338 list_for_each_entry(spu, ref->aff_list.prev, aff_list) {
339 BUG_ON(spu->node != node);
340 if (offset == 0)
341 break;
342 if (sched_spu(spu))
343 offset--;
344 }
345 } else {
346 list_for_each_entry_reverse(spu, ref->aff_list.next, aff_list) {
347 BUG_ON(spu->node != node);
348 if (offset == 0)
349 break;
350 if (sched_spu(spu))
351 offset++;
352 }
353 }
354
355 return spu;
356}
357
358
359
360
361
362static int has_affinity(struct spu_context *ctx)
363{
364 struct spu_gang *gang = ctx->gang;
365
366 if (list_empty(&ctx->aff_list))
367 return 0;
368
369 if (!gang->aff_ref_spu) {
370 if (!(gang->aff_flags & AFF_MERGED))
371 aff_merge_remaining_ctxs(gang);
372 if (!(gang->aff_flags & AFF_OFFSETS_SET))
373 aff_set_offsets(gang);
374 aff_set_ref_point_location(gang);
375 }
376
377 return gang->aff_ref_spu != NULL;
378}
379
380
381
382
383
384
385static void spu_unbind_context(struct spu *spu, struct spu_context *ctx)
386{
387 pr_debug("%s: unbind pid=%d SPU=%d NODE=%d\n", __FUNCTION__,
388 spu->pid, spu->number, spu->node);
389 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
390
391 if (spu->ctx->flags & SPU_CREATE_NOSCHED)
392 atomic_dec(&cbe_spu_info[spu->node].reserved_spus);
393
394 if (ctx->gang){
395 mutex_lock(&ctx->gang->aff_mutex);
396 if (has_affinity(ctx)) {
397 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
398 ctx->gang->aff_ref_spu = NULL;
399 }
400 mutex_unlock(&ctx->gang->aff_mutex);
401 }
402
403 spu_switch_notify(spu, NULL);
404 spu_unmap_mappings(ctx);
405 spu_save(&ctx->csa, spu);
406 spu->timestamp = jiffies;
407 ctx->state = SPU_STATE_SAVED;
408 spu->ibox_callback = NULL;
409 spu->wbox_callback = NULL;
410 spu->stop_callback = NULL;
411 spu->mfc_callback = NULL;
412 spu->dma_callback = NULL;
413 spu_associate_mm(spu, NULL);
414 spu->pid = 0;
415 spu->tgid = 0;
416 ctx->ops = &spu_backing_ops;
417 spu->flags = 0;
418 spu->ctx = NULL;
419
420 ctx->stats.slb_flt +=
421 (spu->stats.slb_flt - ctx->stats.slb_flt_base);
422 ctx->stats.class2_intr +=
423 (spu->stats.class2_intr - ctx->stats.class2_intr_base);
424
425
426 spuctx_switch_state(ctx, SPU_UTIL_IDLE_LOADED);
427 ctx->spu = NULL;
428}
429
430
431
432
433
434static void __spu_add_to_rq(struct spu_context *ctx)
435{
436
437
438
439
440
441
442
443
444
445
446
447
448
449 if (list_empty(&ctx->rq)) {
450 list_add_tail(&ctx->rq, &spu_prio->runq[ctx->prio]);
451 set_bit(ctx->prio, spu_prio->bitmap);
452 if (!spu_prio->nr_waiting++)
453 __mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
454 }
455}
456
457static void __spu_del_from_rq(struct spu_context *ctx)
458{
459 int prio = ctx->prio;
460
461 if (!list_empty(&ctx->rq)) {
462 if (!--spu_prio->nr_waiting)
463 del_timer(&spusched_timer);
464 list_del_init(&ctx->rq);
465
466 if (list_empty(&spu_prio->runq[prio]))
467 clear_bit(prio, spu_prio->bitmap);
468 }
469}
470
471static void spu_prio_wait(struct spu_context *ctx)
472{
473 DEFINE_WAIT(wait);
474
475 spin_lock(&spu_prio->runq_lock);
476 prepare_to_wait_exclusive(&ctx->stop_wq, &wait, TASK_INTERRUPTIBLE);
477 if (!signal_pending(current)) {
478 __spu_add_to_rq(ctx);
479 spin_unlock(&spu_prio->runq_lock);
480 mutex_unlock(&ctx->state_mutex);
481 schedule();
482 mutex_lock(&ctx->state_mutex);
483 spin_lock(&spu_prio->runq_lock);
484 __spu_del_from_rq(ctx);
485 }
486 spin_unlock(&spu_prio->runq_lock);
487 __set_current_state(TASK_RUNNING);
488 remove_wait_queue(&ctx->stop_wq, &wait);
489}
490
491static struct spu *spu_get_idle(struct spu_context *ctx)
492{
493 struct spu *spu, *aff_ref_spu;
494 int node, n;
495
496 if (ctx->gang) {
497 mutex_lock(&ctx->gang->aff_mutex);
498 if (has_affinity(ctx)) {
499 aff_ref_spu = ctx->gang->aff_ref_spu;
500 atomic_inc(&ctx->gang->aff_sched_count);
501 mutex_unlock(&ctx->gang->aff_mutex);
502 node = aff_ref_spu->node;
503
504 mutex_lock(&cbe_spu_info[node].list_mutex);
505 spu = ctx_location(aff_ref_spu, ctx->aff_offset, node);
506 if (spu && spu->alloc_state == SPU_FREE)
507 goto found;
508 mutex_unlock(&cbe_spu_info[node].list_mutex);
509
510 mutex_lock(&ctx->gang->aff_mutex);
511 if (atomic_dec_and_test(&ctx->gang->aff_sched_count))
512 ctx->gang->aff_ref_spu = NULL;
513 mutex_unlock(&ctx->gang->aff_mutex);
514
515 return NULL;
516 }
517 mutex_unlock(&ctx->gang->aff_mutex);
518 }
519 node = cpu_to_node(raw_smp_processor_id());
520 for (n = 0; n < MAX_NUMNODES; n++, node++) {
521 node = (node < MAX_NUMNODES) ? node : 0;
522 if (!node_allowed(ctx, node))
523 continue;
524
525 mutex_lock(&cbe_spu_info[node].list_mutex);
526 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
527 if (spu->alloc_state == SPU_FREE)
528 goto found;
529 }
530 mutex_unlock(&cbe_spu_info[node].list_mutex);
531 }
532
533 return NULL;
534
535 found:
536 spu->alloc_state = SPU_USED;
537 mutex_unlock(&cbe_spu_info[node].list_mutex);
538 pr_debug("Got SPU %d %d\n", spu->number, spu->node);
539 spu_init_channels(spu);
540 return spu;
541}
542
543
544
545
546
547
548
549static struct spu *find_victim(struct spu_context *ctx)
550{
551 struct spu_context *victim = NULL;
552 struct spu *spu;
553 int node, n;
554
555
556
557
558
559
560
561
562 restart:
563 node = cpu_to_node(raw_smp_processor_id());
564 for (n = 0; n < MAX_NUMNODES; n++, node++) {
565 node = (node < MAX_NUMNODES) ? node : 0;
566 if (!node_allowed(ctx, node))
567 continue;
568
569 mutex_lock(&cbe_spu_info[node].list_mutex);
570 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
571 struct spu_context *tmp = spu->ctx;
572
573 if (tmp && tmp->prio > ctx->prio &&
574 (!victim || tmp->prio > victim->prio))
575 victim = spu->ctx;
576 }
577 mutex_unlock(&cbe_spu_info[node].list_mutex);
578
579 if (victim) {
580
581
582
583
584
585
586 if (!mutex_trylock(&victim->state_mutex)) {
587 victim = NULL;
588 goto restart;
589 }
590
591 spu = victim->spu;
592 if (!spu) {
593
594
595
596
597
598 mutex_unlock(&victim->state_mutex);
599 victim = NULL;
600 goto restart;
601 }
602
603 mutex_lock(&cbe_spu_info[node].list_mutex);
604 cbe_spu_info[node].nr_active--;
605 spu_unbind_context(spu, victim);
606 mutex_unlock(&cbe_spu_info[node].list_mutex);
607
608 victim->stats.invol_ctx_switch++;
609 spu->stats.invol_ctx_switch++;
610 mutex_unlock(&victim->state_mutex);
611
612
613
614
615
616 wake_up(&victim->stop_wq);
617 return spu;
618 }
619 }
620
621 return NULL;
622}
623
624
625
626
627
628
629
630
631
632
633int spu_activate(struct spu_context *ctx, unsigned long flags)
634{
635 do {
636 struct spu *spu;
637
638
639
640
641
642
643
644 if (ctx->spu)
645 return 0;
646
647 spu = spu_get_idle(ctx);
648
649
650
651
652 if (!spu && rt_prio(ctx->prio))
653 spu = find_victim(ctx);
654 if (spu) {
655 int node = spu->node;
656
657 mutex_lock(&cbe_spu_info[node].list_mutex);
658 spu_bind_context(spu, ctx);
659 cbe_spu_info[node].nr_active++;
660 mutex_unlock(&cbe_spu_info[node].list_mutex);
661 return 0;
662 }
663
664 spu_prio_wait(ctx);
665 } while (!signal_pending(current));
666
667 return -ERESTARTSYS;
668}
669
670
671
672
673
674
675
676static struct spu_context *grab_runnable_context(int prio, int node)
677{
678 struct spu_context *ctx;
679 int best;
680
681 spin_lock(&spu_prio->runq_lock);
682 best = find_first_bit(spu_prio->bitmap, prio);
683 while (best < prio) {
684 struct list_head *rq = &spu_prio->runq[best];
685
686 list_for_each_entry(ctx, rq, rq) {
687
688 if (__node_allowed(ctx, node)) {
689 __spu_del_from_rq(ctx);
690 goto found;
691 }
692 }
693 best++;
694 }
695 ctx = NULL;
696 found:
697 spin_unlock(&spu_prio->runq_lock);
698 return ctx;
699}
700
701static int __spu_deactivate(struct spu_context *ctx, int force, int max_prio)
702{
703 struct spu *spu = ctx->spu;
704 struct spu_context *new = NULL;
705
706 if (spu) {
707 new = grab_runnable_context(max_prio, spu->node);
708 if (new || force) {
709 int node = spu->node;
710
711 mutex_lock(&cbe_spu_info[node].list_mutex);
712 spu_unbind_context(spu, ctx);
713 spu->alloc_state = SPU_FREE;
714 cbe_spu_info[node].nr_active--;
715 mutex_unlock(&cbe_spu_info[node].list_mutex);
716
717 ctx->stats.vol_ctx_switch++;
718 spu->stats.vol_ctx_switch++;
719
720 if (new)
721 wake_up(&new->stop_wq);
722 }
723
724 }
725
726 return new != NULL;
727}
728
729
730
731
732
733
734
735
736void spu_deactivate(struct spu_context *ctx)
737{
738 __spu_deactivate(ctx, 1, MAX_PRIO);
739}
740
741
742
743
744
745
746
747
748
749void spu_yield(struct spu_context *ctx)
750{
751 if (!(ctx->flags & SPU_CREATE_NOSCHED)) {
752 mutex_lock(&ctx->state_mutex);
753 __spu_deactivate(ctx, 0, MAX_PRIO);
754 mutex_unlock(&ctx->state_mutex);
755 }
756}
757
758static noinline void spusched_tick(struct spu_context *ctx)
759{
760 if (ctx->flags & SPU_CREATE_NOSCHED)
761 return;
762 if (ctx->policy == SCHED_FIFO)
763 return;
764
765 if (--ctx->time_slice)
766 return;
767
768
769
770
771
772
773 if (mutex_trylock(&ctx->state_mutex)) {
774 struct spu *spu = ctx->spu;
775 struct spu_context *new;
776
777 new = grab_runnable_context(ctx->prio + 1, spu->node);
778 if (new) {
779 spu_unbind_context(spu, ctx);
780 ctx->stats.invol_ctx_switch++;
781 spu->stats.invol_ctx_switch++;
782 spu->alloc_state = SPU_FREE;
783 cbe_spu_info[spu->node].nr_active--;
784 wake_up(&new->stop_wq);
785
786
787
788
789
790 wake_up(&ctx->stop_wq);
791 }
792 spu_set_timeslice(ctx);
793 mutex_unlock(&ctx->state_mutex);
794 } else {
795 ctx->time_slice++;
796 }
797}
798
799
800
801
802
803
804
805
806
807
808static unsigned long count_active_contexts(void)
809{
810 int nr_active = 0, node;
811
812 for (node = 0; node < MAX_NUMNODES; node++)
813 nr_active += cbe_spu_info[node].nr_active;
814 nr_active += spu_prio->nr_waiting;
815
816 return nr_active;
817}
818
819
820
821
822
823
824
825
826static void spu_calc_load(unsigned long ticks)
827{
828 unsigned long active_tasks;
829 static int count = LOAD_FREQ;
830
831 count -= ticks;
832
833 if (unlikely(count < 0)) {
834 active_tasks = count_active_contexts() * FIXED_1;
835 do {
836 CALC_LOAD(spu_avenrun[0], EXP_1, active_tasks);
837 CALC_LOAD(spu_avenrun[1], EXP_5, active_tasks);
838 CALC_LOAD(spu_avenrun[2], EXP_15, active_tasks);
839 count += LOAD_FREQ;
840 } while (count < 0);
841 }
842}
843
844static void spusched_wake(unsigned long data)
845{
846 mod_timer(&spusched_timer, jiffies + SPUSCHED_TICK);
847 wake_up_process(spusched_task);
848 spu_calc_load(SPUSCHED_TICK);
849}
850
851static int spusched_thread(void *unused)
852{
853 struct spu *spu;
854 int node;
855
856 while (!kthread_should_stop()) {
857 set_current_state(TASK_INTERRUPTIBLE);
858 schedule();
859 for (node = 0; node < MAX_NUMNODES; node++) {
860 mutex_lock(&cbe_spu_info[node].list_mutex);
861 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
862 if (spu->ctx)
863 spusched_tick(spu->ctx);
864 mutex_unlock(&cbe_spu_info[node].list_mutex);
865 }
866 }
867
868 return 0;
869}
870
871#define LOAD_INT(x) ((x) >> FSHIFT)
872#define LOAD_FRAC(x) LOAD_INT(((x) & (FIXED_1-1)) * 100)
873
874static int show_spu_loadavg(struct seq_file *s, void *private)
875{
876 int a, b, c;
877
878 a = spu_avenrun[0] + (FIXED_1/200);
879 b = spu_avenrun[1] + (FIXED_1/200);
880 c = spu_avenrun[2] + (FIXED_1/200);
881
882
883
884
885
886
887 seq_printf(s, "%d.%02d %d.%02d %d.%02d %ld/%d %d\n",
888 LOAD_INT(a), LOAD_FRAC(a),
889 LOAD_INT(b), LOAD_FRAC(b),
890 LOAD_INT(c), LOAD_FRAC(c),
891 count_active_contexts(),
892 atomic_read(&nr_spu_contexts),
893 current->nsproxy->pid_ns->last_pid);
894 return 0;
895}
896
897static int spu_loadavg_open(struct inode *inode, struct file *file)
898{
899 return single_open(file, show_spu_loadavg, NULL);
900}
901
902static const struct file_operations spu_loadavg_fops = {
903 .open = spu_loadavg_open,
904 .read = seq_read,
905 .llseek = seq_lseek,
906 .release = single_release,
907};
908
909int __init spu_sched_init(void)
910{
911 struct proc_dir_entry *entry;
912 int err = -ENOMEM, i;
913
914 spu_prio = kzalloc(sizeof(struct spu_prio_array), GFP_KERNEL);
915 if (!spu_prio)
916 goto out;
917
918 for (i = 0; i < MAX_PRIO; i++) {
919 INIT_LIST_HEAD(&spu_prio->runq[i]);
920 __clear_bit(i, spu_prio->bitmap);
921 }
922 spin_lock_init(&spu_prio->runq_lock);
923
924 setup_timer(&spusched_timer, spusched_wake, 0);
925
926 spusched_task = kthread_run(spusched_thread, NULL, "spusched");
927 if (IS_ERR(spusched_task)) {
928 err = PTR_ERR(spusched_task);
929 goto out_free_spu_prio;
930 }
931
932 entry = create_proc_entry("spu_loadavg", 0, NULL);
933 if (!entry)
934 goto out_stop_kthread;
935 entry->proc_fops = &spu_loadavg_fops;
936
937 pr_debug("spusched: tick: %d, min ticks: %d, default ticks: %d\n",
938 SPUSCHED_TICK, MIN_SPU_TIMESLICE, DEF_SPU_TIMESLICE);
939 return 0;
940
941 out_stop_kthread:
942 kthread_stop(spusched_task);
943 out_free_spu_prio:
944 kfree(spu_prio);
945 out:
946 return err;
947}
948
949void spu_sched_exit(void)
950{
951 struct spu *spu;
952 int node;
953
954 remove_proc_entry("spu_loadavg", NULL);
955
956 del_timer_sync(&spusched_timer);
957 kthread_stop(spusched_task);
958
959 for (node = 0; node < MAX_NUMNODES; node++) {
960 mutex_lock(&cbe_spu_info[node].list_mutex);
961 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list)
962 if (spu->alloc_state != SPU_FREE)
963 spu->alloc_state = SPU_FREE;
964 mutex_unlock(&cbe_spu_info[node].list_mutex);
965 }
966 kfree(spu_prio);
967}
968