1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35#include <linux/export.h>
36#include <linux/errno.h>
37#include <linux/hardirq.h>
38#include <linux/sched.h>
39#include <linux/kernel.h>
40#include <linux/mm.h>
41#include <linux/vmalloc.h>
42#include <linux/smp.h>
43#include <linux/stddef.h>
44#include <linux/unistd.h>
45
46#include <asm/io.h>
47#include <asm/spu.h>
48#include <asm/spu_priv1.h>
49#include <asm/spu_csa.h>
50#include <asm/mmu_context.h>
51
52#include "spufs.h"
53
54#include "spu_save_dump.h"
55#include "spu_restore_dump.h"
56
57#if 0
58#define POLL_WHILE_TRUE(_c) { \
59 do { \
60 } while (_c); \
61 }
62#else
63#define RELAX_SPIN_COUNT 1000
64#define POLL_WHILE_TRUE(_c) { \
65 do { \
66 int _i; \
67 for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
68 cpu_relax(); \
69 } \
70 if (unlikely(_c)) yield(); \
71 else break; \
72 } while (_c); \
73 }
74#endif
75
76#define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
77
78static inline void acquire_spu_lock(struct spu *spu)
79{
80
81
82
83
84
85}
86
87static inline void release_spu_lock(struct spu *spu)
88{
89
90
91
92
93}
94
95static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
96{
97 struct spu_problem __iomem *prob = spu->problem;
98 u32 isolate_state;
99
100
101
102
103
104
105
106 isolate_state = SPU_STATUS_ISOLATED_STATE |
107 SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
108 return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
109}
110
111static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
112{
113
114
115
116
117
118
119
120
121
122
123
124 spin_lock_irq(&spu->register_lock);
125 if (csa) {
126 csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
127 csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
128 csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
129 }
130 spu_int_mask_set(spu, 0, 0ul);
131 spu_int_mask_set(spu, 1, 0ul);
132 spu_int_mask_set(spu, 2, 0ul);
133 eieio();
134 spin_unlock_irq(&spu->register_lock);
135
136
137
138
139
140
141 set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
142 clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
143 synchronize_irq(spu->irqs[0]);
144 synchronize_irq(spu->irqs[1]);
145 synchronize_irq(spu->irqs[2]);
146}
147
148static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
149{
150
151
152
153
154
155
156
157
158
159}
160
161static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu)
162{
163
164
165
166
167
168
169
170}
171
172static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
173{
174
175
176
177
178
179}
180
181static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
182{
183 struct spu_priv2 __iomem *priv2 = spu->priv2;
184
185
186
187
188 switch (in_be64(&priv2->mfc_control_RW) &
189 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) {
190 case MFC_CNTL_SUSPEND_IN_PROGRESS:
191 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
192 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
193 MFC_CNTL_SUSPEND_COMPLETE);
194
195 case MFC_CNTL_SUSPEND_COMPLETE:
196 if (csa)
197 csa->priv2.mfc_control_RW =
198 in_be64(&priv2->mfc_control_RW) |
199 MFC_CNTL_SUSPEND_DMA_QUEUE;
200 break;
201 case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
202 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
203 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
204 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
205 MFC_CNTL_SUSPEND_COMPLETE);
206 if (csa)
207 csa->priv2.mfc_control_RW =
208 in_be64(&priv2->mfc_control_RW) &
209 ~MFC_CNTL_SUSPEND_DMA_QUEUE &
210 ~MFC_CNTL_SUSPEND_MASK;
211 break;
212 }
213}
214
215static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
216{
217 struct spu_problem __iomem *prob = spu->problem;
218
219
220
221
222
223 csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW);
224}
225
226static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
227{
228
229
230
231 csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
232}
233
234static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
235{
236 struct spu_problem __iomem *prob = spu->problem;
237
238
239
240
241 if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {
242 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
243 } else {
244 u32 stopped;
245
246 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
247 eieio();
248 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
249 SPU_STATUS_RUNNING);
250 stopped =
251 SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
252 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
253 if ((in_be32(&prob->spu_status_R) & stopped) == 0)
254 csa->prob.spu_status_R = SPU_STATUS_RUNNING;
255 else
256 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
257 }
258}
259
260static inline void save_mfc_stopped_status(struct spu_state *csa,
261 struct spu *spu)
262{
263 struct spu_priv2 __iomem *priv2 = spu->priv2;
264 const u64 mask = MFC_CNTL_DECREMENTER_RUNNING |
265 MFC_CNTL_DMA_QUEUES_EMPTY;
266
267
268
269
270
271
272
273 csa->priv2.mfc_control_RW &= ~mask;
274 csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask;
275}
276
277static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
278{
279 struct spu_priv2 __iomem *priv2 = spu->priv2;
280
281
282
283
284
285 out_be64(&priv2->mfc_control_RW,
286 MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK);
287 eieio();
288}
289
290static inline void save_timebase(struct spu_state *csa, struct spu *spu)
291{
292
293
294
295
296 csa->suspend_time = get_cycles();
297}
298
299static inline void remove_other_spu_access(struct spu_state *csa,
300 struct spu *spu)
301{
302
303
304
305
306}
307
308static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
309{
310 struct spu_problem __iomem *prob = spu->problem;
311
312
313
314
315
316
317 out_be64(&prob->spc_mssync_RW, 1UL);
318 POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING);
319}
320
321static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
322{
323
324
325
326
327
328
329 spu_tlb_invalidate(spu);
330 mb();
331}
332
333static inline void handle_pending_interrupts(struct spu_state *csa,
334 struct spu *spu)
335{
336
337
338
339
340
341
342
343
344
345}
346
347static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu)
348{
349 struct spu_priv2 __iomem *priv2 = spu->priv2;
350 int i;
351
352
353
354
355
356 if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {
357 for (i = 0; i < 8; i++) {
358 csa->priv2.puq[i].mfc_cq_data0_RW =
359 in_be64(&priv2->puq[i].mfc_cq_data0_RW);
360 csa->priv2.puq[i].mfc_cq_data1_RW =
361 in_be64(&priv2->puq[i].mfc_cq_data1_RW);
362 csa->priv2.puq[i].mfc_cq_data2_RW =
363 in_be64(&priv2->puq[i].mfc_cq_data2_RW);
364 csa->priv2.puq[i].mfc_cq_data3_RW =
365 in_be64(&priv2->puq[i].mfc_cq_data3_RW);
366 }
367 for (i = 0; i < 16; i++) {
368 csa->priv2.spuq[i].mfc_cq_data0_RW =
369 in_be64(&priv2->spuq[i].mfc_cq_data0_RW);
370 csa->priv2.spuq[i].mfc_cq_data1_RW =
371 in_be64(&priv2->spuq[i].mfc_cq_data1_RW);
372 csa->priv2.spuq[i].mfc_cq_data2_RW =
373 in_be64(&priv2->spuq[i].mfc_cq_data2_RW);
374 csa->priv2.spuq[i].mfc_cq_data3_RW =
375 in_be64(&priv2->spuq[i].mfc_cq_data3_RW);
376 }
377 }
378}
379
380static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu)
381{
382 struct spu_problem __iomem *prob = spu->problem;
383
384
385
386
387
388 csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW);
389}
390
391static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
392{
393 struct spu_problem __iomem *prob = spu->problem;
394
395
396
397
398
399 csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
400}
401
402static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu)
403{
404 struct spu_problem __iomem *prob = spu->problem;
405
406
407
408
409
410
411
412 csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R);
413}
414
415static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
416{
417 struct spu_priv2 __iomem *priv2 = spu->priv2;
418
419
420
421
422
423 csa->priv2.spu_tag_status_query_RW =
424 in_be64(&priv2->spu_tag_status_query_RW);
425}
426
427static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
428{
429 struct spu_priv2 __iomem *priv2 = spu->priv2;
430
431
432
433
434
435 csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);
436 csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);
437}
438
439static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
440{
441 struct spu_priv2 __iomem *priv2 = spu->priv2;
442
443
444
445
446
447 csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);
448}
449
450static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
451{
452
453
454
455
456 csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
457}
458
459static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
460{
461
462
463
464
465
466 spu_mfc_tclass_id_set(spu, 0x10000000);
467 eieio();
468}
469
470static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
471{
472 struct spu_priv2 __iomem *priv2 = spu->priv2;
473
474
475
476
477
478 out_be64(&priv2->mfc_control_RW,
479 MFC_CNTL_PURGE_DMA_REQUEST |
480 MFC_CNTL_SUSPEND_MASK);
481 eieio();
482}
483
484static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
485{
486 struct spu_priv2 __iomem *priv2 = spu->priv2;
487
488
489
490
491
492 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
493 MFC_CNTL_PURGE_DMA_STATUS_MASK) ==
494 MFC_CNTL_PURGE_DMA_COMPLETE);
495}
496
497static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
498{
499
500
501
502
503
504
505
506
507
508
509
510 spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
511 MFC_STATE1_RELOCATE_MASK |
512 MFC_STATE1_BUS_TLBIE_MASK));
513}
514
515static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
516{
517 struct spu_problem __iomem *prob = spu->problem;
518
519
520
521
522 csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);
523}
524
525static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu)
526{
527 struct spu_priv2 __iomem *priv2 = spu->priv2;
528
529
530
531
532 csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);
533}
534
535static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu)
536{
537 struct spu_priv2 __iomem *priv2 = spu->priv2;
538
539
540
541
542
543 out_be64(&priv2->spu_privcntl_RW, 0UL);
544 eieio();
545}
546
547static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu)
548{
549 struct spu_priv2 __iomem *priv2 = spu->priv2;
550
551
552
553
554 csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);
555}
556
557static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu)
558{
559 struct spu_priv2 __iomem *priv2 = spu->priv2;
560
561
562
563
564
565 out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);
566 eieio();
567}
568
569static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu)
570{
571 struct spu_priv2 __iomem *priv2 = spu->priv2;
572
573
574
575
576 csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);
577}
578
579static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
580{
581
582
583
584
585}
586
587static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
588{
589
590
591
592
593 csa->priv1.resource_allocation_groupID_RW =
594 spu_resource_allocation_groupID_get(spu);
595 csa->priv1.resource_allocation_enable_RW =
596 spu_resource_allocation_enable_get(spu);
597}
598
599static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
600{
601 struct spu_problem __iomem *prob = spu->problem;
602
603
604
605
606 csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);
607}
608
609static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu)
610{
611 struct spu_problem __iomem *prob = spu->problem;
612
613
614
615
616 csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);
617}
618
619static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
620{
621 struct spu_priv2 __iomem *priv2 = spu->priv2;
622
623
624
625
626 csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);
627}
628
629static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
630{
631 struct spu_priv2 __iomem *priv2 = spu->priv2;
632 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
633 int i;
634
635
636
637
638
639 out_be64(&priv2->spu_chnlcntptr_RW, 1);
640 csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);
641
642
643 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
644 idx = ch_indices[i];
645 out_be64(&priv2->spu_chnlcntptr_RW, idx);
646 eieio();
647 csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW);
648 csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW);
649 out_be64(&priv2->spu_chnldata_RW, 0UL);
650 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
651 eieio();
652 }
653}
654
655static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)
656{
657 struct spu_priv2 __iomem *priv2 = spu->priv2;
658 int i;
659
660
661
662
663 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
664 eieio();
665 csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
666 for (i = 0; i < 4; i++) {
667 csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);
668 }
669 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
670 eieio();
671}
672
673static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu)
674{
675 struct spu_priv2 __iomem *priv2 = spu->priv2;
676
677
678
679
680 out_be64(&priv2->spu_chnlcntptr_RW, 21UL);
681 eieio();
682 csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);
683 eieio();
684}
685
686static inline void reset_ch(struct spu_state *csa, struct spu *spu)
687{
688 struct spu_priv2 __iomem *priv2 = spu->priv2;
689 u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL };
690 u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL };
691 u64 idx;
692 int i;
693
694
695
696
697 for (i = 0; i < 4; i++) {
698 idx = ch_indices[i];
699 out_be64(&priv2->spu_chnlcntptr_RW, idx);
700 eieio();
701 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
702 eieio();
703 }
704}
705
706static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
707{
708 struct spu_priv2 __iomem *priv2 = spu->priv2;
709
710
711
712
713
714 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
715}
716
717static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu,
718 unsigned int *code, int code_size)
719{
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734 spu_invalidate_slbs(spu);
735 spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size);
736}
737
738static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
739{
740
741
742
743
744
745
746
747
748
749 if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags))
750 csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
751 clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
752 mb();
753}
754
755static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
756{
757 unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
758 CLASS1_ENABLE_STORAGE_FAULT_INTR;
759
760
761
762
763
764
765
766
767
768 spin_lock_irq(&spu->register_lock);
769 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
770 spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
771 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
772 spu_int_mask_set(spu, 0, 0ul);
773 spu_int_mask_set(spu, 1, class1_mask);
774 spu_int_mask_set(spu, 2, 0ul);
775 spin_unlock_irq(&spu->register_lock);
776}
777
778static inline int send_mfc_dma(struct spu *spu, unsigned long ea,
779 unsigned int ls_offset, unsigned int size,
780 unsigned int tag, unsigned int rclass,
781 unsigned int cmd)
782{
783 struct spu_problem __iomem *prob = spu->problem;
784 union mfc_tag_size_class_cmd command;
785 unsigned int transfer_size;
786 volatile unsigned int status = 0x0;
787
788 while (size > 0) {
789 transfer_size =
790 (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;
791 command.u.mfc_size = transfer_size;
792 command.u.mfc_tag = tag;
793 command.u.mfc_rclassid = rclass;
794 command.u.mfc_cmd = cmd;
795 do {
796 out_be32(&prob->mfc_lsa_W, ls_offset);
797 out_be64(&prob->mfc_ea_W, ea);
798 out_be64(&prob->mfc_union_W.all64, command.all64);
799 status =
800 in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
801 if (unlikely(status & 0x2)) {
802 cpu_relax();
803 }
804 } while (status & 0x3);
805 size -= transfer_size;
806 ea += transfer_size;
807 ls_offset += transfer_size;
808 }
809 return 0;
810}
811
812static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu)
813{
814 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
815 unsigned int ls_offset = 0x0;
816 unsigned int size = 16384;
817 unsigned int tag = 0;
818 unsigned int rclass = 0;
819 unsigned int cmd = MFC_PUT_CMD;
820
821
822
823
824
825 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
826}
827
828static inline void set_spu_npc(struct spu_state *csa, struct spu *spu)
829{
830 struct spu_problem __iomem *prob = spu->problem;
831
832
833
834
835
836
837
838
839
840
841 out_be32(&prob->spu_npc_RW, 0);
842 eieio();
843}
844
845static inline void set_signot1(struct spu_state *csa, struct spu *spu)
846{
847 struct spu_problem __iomem *prob = spu->problem;
848 union {
849 u64 ull;
850 u32 ui[2];
851 } addr64;
852
853
854
855
856
857
858 addr64.ull = (u64) csa->lscsa;
859 out_be32(&prob->signal_notify1, addr64.ui[0]);
860 eieio();
861}
862
863static inline void set_signot2(struct spu_state *csa, struct spu *spu)
864{
865 struct spu_problem __iomem *prob = spu->problem;
866 union {
867 u64 ull;
868 u32 ui[2];
869 } addr64;
870
871
872
873
874
875
876 addr64.ull = (u64) csa->lscsa;
877 out_be32(&prob->signal_notify2, addr64.ui[1]);
878 eieio();
879}
880
881static inline void send_save_code(struct spu_state *csa, struct spu *spu)
882{
883 unsigned long addr = (unsigned long)&spu_save_code[0];
884 unsigned int ls_offset = 0x0;
885 unsigned int size = sizeof(spu_save_code);
886 unsigned int tag = 0;
887 unsigned int rclass = 0;
888 unsigned int cmd = MFC_GETFS_CMD;
889
890
891
892
893
894 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
895}
896
897static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
898{
899 struct spu_problem __iomem *prob = spu->problem;
900
901
902
903
904
905
906 out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));
907 eieio();
908}
909
910static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
911{
912 struct spu_problem __iomem *prob = spu->problem;
913 u32 mask = MFC_TAGID_TO_TAGMASK(0);
914 unsigned long flags;
915
916
917
918
919
920
921
922
923
924
925 POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
926
927 local_irq_save(flags);
928 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
929 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
930 local_irq_restore(flags);
931}
932
933static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
934{
935 struct spu_problem __iomem *prob = spu->problem;
936 unsigned long flags;
937
938
939
940
941
942
943
944 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
945
946 local_irq_save(flags);
947 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
948 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
949 local_irq_restore(flags);
950}
951
952static inline int check_save_status(struct spu_state *csa, struct spu *spu)
953{
954 struct spu_problem __iomem *prob = spu->problem;
955 u32 complete;
956
957
958
959
960
961
962 complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
963 SPU_STATUS_STOPPED_BY_STOP);
964 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
965}
966
967static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
968{
969
970
971
972
973}
974
975static inline void suspend_mfc_and_halt_decr(struct spu_state *csa,
976 struct spu *spu)
977{
978 struct spu_priv2 __iomem *priv2 = spu->priv2;
979
980
981
982
983
984 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
985 MFC_CNTL_DECREMENTER_HALTED);
986 eieio();
987}
988
989static inline void wait_suspend_mfc_complete(struct spu_state *csa,
990 struct spu *spu)
991{
992 struct spu_priv2 __iomem *priv2 = spu->priv2;
993
994
995
996
997
998 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
999 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
1000 MFC_CNTL_SUSPEND_COMPLETE);
1001}
1002
1003static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
1004{
1005 struct spu_problem __iomem *prob = spu->problem;
1006
1007
1008
1009
1010
1011
1012
1013
1014 if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
1015 if (in_be32(&prob->spu_status_R) &
1016 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1017 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1018 SPU_STATUS_RUNNING);
1019 }
1020 if ((in_be32(&prob->spu_status_R) &
1021 SPU_STATUS_ISOLATED_LOAD_STATUS)
1022 || (in_be32(&prob->spu_status_R) &
1023 SPU_STATUS_ISOLATED_STATE)) {
1024 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1025 eieio();
1026 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1027 SPU_STATUS_RUNNING);
1028 out_be32(&prob->spu_runcntl_RW, 0x2);
1029 eieio();
1030 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1031 SPU_STATUS_RUNNING);
1032 }
1033 if (in_be32(&prob->spu_status_R) &
1034 SPU_STATUS_WAITING_FOR_CHANNEL) {
1035 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1036 eieio();
1037 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1038 SPU_STATUS_RUNNING);
1039 }
1040 return 1;
1041 }
1042 return 0;
1043}
1044
1045static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1046{
1047 struct spu_problem __iomem *prob = spu->problem;
1048
1049
1050
1051
1052
1053 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1054 if (in_be32(&prob->spu_status_R) &
1055 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1056 spu_mfc_sr1_set(spu,
1057 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1058 eieio();
1059 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1060 eieio();
1061 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1062 SPU_STATUS_RUNNING);
1063 }
1064 if ((in_be32(&prob->spu_status_R) &
1065 SPU_STATUS_ISOLATED_LOAD_STATUS)
1066 || (in_be32(&prob->spu_status_R) &
1067 SPU_STATUS_ISOLATED_STATE)) {
1068 spu_mfc_sr1_set(spu,
1069 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1070 eieio();
1071 out_be32(&prob->spu_runcntl_RW, 0x2);
1072 eieio();
1073 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1074 SPU_STATUS_RUNNING);
1075 }
1076 }
1077}
1078
1079static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
1080{
1081 struct spu_priv2 __iomem *priv2 = spu->priv2;
1082 u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1083 u64 idx;
1084 int i;
1085
1086
1087
1088
1089
1090 out_be64(&priv2->spu_chnlcntptr_RW, 1);
1091 out_be64(&priv2->spu_chnldata_RW, 0UL);
1092
1093
1094 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1095 idx = ch_indices[i];
1096 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1097 eieio();
1098 out_be64(&priv2->spu_chnldata_RW, 0UL);
1099 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
1100 eieio();
1101 }
1102}
1103
1104static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu)
1105{
1106 struct spu_priv2 __iomem *priv2 = spu->priv2;
1107 u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
1108 u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
1109 u64 idx;
1110 int i;
1111
1112
1113
1114
1115 for (i = 0; i < 5; i++) {
1116 idx = ch_indices[i];
1117 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1118 eieio();
1119 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1120 eieio();
1121 }
1122}
1123
1124static inline void setup_spu_status_part1(struct spu_state *csa,
1125 struct spu *spu)
1126{
1127 u32 status_P = SPU_STATUS_STOPPED_BY_STOP;
1128 u32 status_I = SPU_STATUS_INVALID_INSTR;
1129 u32 status_H = SPU_STATUS_STOPPED_BY_HALT;
1130 u32 status_S = SPU_STATUS_SINGLE_STEP;
1131 u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;
1132 u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;
1133 u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;
1134 u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;
1135 u32 status_code;
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149 status_code =
1150 (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;
1151 if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {
1152
1153
1154
1155
1156
1157 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;
1158 csa->lscsa->stopped_status.slot[1] = status_code;
1159
1160 } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {
1161
1162
1163
1164
1165
1166 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;
1167 csa->lscsa->stopped_status.slot[1] = status_code;
1168
1169 } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {
1170
1171
1172
1173
1174 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;
1175 csa->lscsa->stopped_status.slot[1] = status_code;
1176
1177 } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {
1178
1179
1180
1181
1182 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;
1183 csa->lscsa->stopped_status.slot[1] = status_code;
1184
1185 } else if ((csa->prob.spu_status_R & status_P) == status_P) {
1186
1187
1188
1189
1190 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;
1191 csa->lscsa->stopped_status.slot[1] = status_code;
1192
1193 } else if ((csa->prob.spu_status_R & status_H) == status_H) {
1194
1195
1196
1197
1198 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;
1199
1200 } else if ((csa->prob.spu_status_R & status_S) == status_S) {
1201
1202
1203
1204 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;
1205
1206 } else if ((csa->prob.spu_status_R & status_I) == status_I) {
1207
1208
1209
1210
1211 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;
1212
1213 }
1214}
1215
1216static inline void setup_spu_status_part2(struct spu_state *csa,
1217 struct spu *spu)
1218{
1219 u32 mask;
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231 mask = SPU_STATUS_INVALID_INSTR |
1232 SPU_STATUS_SINGLE_STEP |
1233 SPU_STATUS_STOPPED_BY_HALT |
1234 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1235 if (!(csa->prob.spu_status_R & mask)) {
1236 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;
1237 }
1238}
1239
1240static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
1241{
1242
1243
1244
1245
1246 spu_resource_allocation_groupID_set(spu,
1247 csa->priv1.resource_allocation_groupID_RW);
1248 spu_resource_allocation_enable_set(spu,
1249 csa->priv1.resource_allocation_enable_RW);
1250}
1251
1252static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
1253{
1254 unsigned long addr = (unsigned long)&spu_restore_code[0];
1255 unsigned int ls_offset = 0x0;
1256 unsigned int size = sizeof(spu_restore_code);
1257 unsigned int tag = 0;
1258 unsigned int rclass = 0;
1259 unsigned int cmd = MFC_GETFS_CMD;
1260
1261
1262
1263
1264
1265 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1266}
1267
1268static inline void setup_decr(struct spu_state *csa, struct spu *spu)
1269{
1270
1271
1272
1273
1274
1275
1276
1277 if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {
1278 cycles_t resume_time = get_cycles();
1279 cycles_t delta_time = resume_time - csa->suspend_time;
1280
1281 csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING;
1282 if (csa->lscsa->decr.slot[0] < delta_time) {
1283 csa->lscsa->decr_status.slot[0] |=
1284 SPU_DECR_STATUS_WRAPPED;
1285 }
1286
1287 csa->lscsa->decr.slot[0] -= delta_time;
1288 } else {
1289 csa->lscsa->decr_status.slot[0] = 0;
1290 }
1291}
1292
1293static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu)
1294{
1295
1296
1297
1298 csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;
1299}
1300
1301static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu)
1302{
1303
1304
1305
1306 csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;
1307}
1308
1309static inline int check_restore_status(struct spu_state *csa, struct spu *spu)
1310{
1311 struct spu_problem __iomem *prob = spu->problem;
1312 u32 complete;
1313
1314
1315
1316
1317
1318
1319 complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
1320 SPU_STATUS_STOPPED_BY_STOP);
1321 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
1322}
1323
1324static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu)
1325{
1326 struct spu_priv2 __iomem *priv2 = spu->priv2;
1327
1328
1329
1330
1331 out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);
1332 eieio();
1333}
1334
1335static inline void restore_status_part1(struct spu_state *csa, struct spu *spu)
1336{
1337 struct spu_problem __iomem *prob = spu->problem;
1338 u32 mask;
1339
1340
1341
1342
1343
1344 mask = SPU_STATUS_INVALID_INSTR |
1345 SPU_STATUS_SINGLE_STEP |
1346 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
1347 if (csa->prob.spu_status_R & mask) {
1348 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1349 eieio();
1350 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1351 SPU_STATUS_RUNNING);
1352 }
1353}
1354
1355static inline void restore_status_part2(struct spu_state *csa, struct spu *spu)
1356{
1357 struct spu_problem __iomem *prob = spu->problem;
1358 u32 mask;
1359
1360
1361
1362
1363
1364
1365
1366 mask = SPU_STATUS_INVALID_INSTR |
1367 SPU_STATUS_SINGLE_STEP |
1368 SPU_STATUS_STOPPED_BY_HALT |
1369 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1370 if (!(csa->prob.spu_status_R & mask)) {
1371 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1372 eieio();
1373 POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &
1374 SPU_STATUS_RUNNING);
1375 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1376 eieio();
1377 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1378 SPU_STATUS_RUNNING);
1379 }
1380}
1381
1382static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
1383{
1384 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
1385 unsigned int ls_offset = 0x0;
1386 unsigned int size = 16384;
1387 unsigned int tag = 0;
1388 unsigned int rclass = 0;
1389 unsigned int cmd = MFC_GET_CMD;
1390
1391
1392
1393
1394
1395 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1396}
1397
1398static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
1399{
1400 struct spu_priv2 __iomem *priv2 = spu->priv2;
1401
1402
1403
1404
1405
1406 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
1407 eieio();
1408}
1409
1410static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1411{
1412
1413
1414
1415
1416
1417
1418
1419
1420 spin_lock_irq(&spu->register_lock);
1421 spu_int_mask_set(spu, 0, 0ul);
1422 spu_int_mask_set(spu, 1, 0ul);
1423 spu_int_mask_set(spu, 2, 0ul);
1424 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
1425 spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
1426 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
1427 spin_unlock_irq(&spu->register_lock);
1428}
1429
1430static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu)
1431{
1432 struct spu_priv2 __iomem *priv2 = spu->priv2;
1433 int i;
1434
1435
1436
1437
1438
1439 if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {
1440 for (i = 0; i < 8; i++) {
1441 out_be64(&priv2->puq[i].mfc_cq_data0_RW,
1442 csa->priv2.puq[i].mfc_cq_data0_RW);
1443 out_be64(&priv2->puq[i].mfc_cq_data1_RW,
1444 csa->priv2.puq[i].mfc_cq_data1_RW);
1445 out_be64(&priv2->puq[i].mfc_cq_data2_RW,
1446 csa->priv2.puq[i].mfc_cq_data2_RW);
1447 out_be64(&priv2->puq[i].mfc_cq_data3_RW,
1448 csa->priv2.puq[i].mfc_cq_data3_RW);
1449 }
1450 for (i = 0; i < 16; i++) {
1451 out_be64(&priv2->spuq[i].mfc_cq_data0_RW,
1452 csa->priv2.spuq[i].mfc_cq_data0_RW);
1453 out_be64(&priv2->spuq[i].mfc_cq_data1_RW,
1454 csa->priv2.spuq[i].mfc_cq_data1_RW);
1455 out_be64(&priv2->spuq[i].mfc_cq_data2_RW,
1456 csa->priv2.spuq[i].mfc_cq_data2_RW);
1457 out_be64(&priv2->spuq[i].mfc_cq_data3_RW,
1458 csa->priv2.spuq[i].mfc_cq_data3_RW);
1459 }
1460 }
1461 eieio();
1462}
1463
1464static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu)
1465{
1466 struct spu_problem __iomem *prob = spu->problem;
1467
1468
1469
1470
1471 out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);
1472 eieio();
1473}
1474
1475static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu)
1476{
1477 struct spu_problem __iomem *prob = spu->problem;
1478
1479
1480
1481
1482 out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);
1483 eieio();
1484}
1485
1486static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
1487{
1488 struct spu_priv2 __iomem *priv2 = spu->priv2;
1489
1490
1491
1492
1493 out_be64(&priv2->spu_tag_status_query_RW,
1494 csa->priv2.spu_tag_status_query_RW);
1495 eieio();
1496}
1497
1498static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
1499{
1500 struct spu_priv2 __iomem *priv2 = spu->priv2;
1501
1502
1503
1504
1505
1506 out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);
1507 out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);
1508 eieio();
1509}
1510
1511static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
1512{
1513 struct spu_priv2 __iomem *priv2 = spu->priv2;
1514
1515
1516
1517
1518 out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);
1519}
1520
1521static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
1522{
1523
1524
1525
1526 spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
1527 eieio();
1528}
1529
1530static inline void set_llr_event(struct spu_state *csa, struct spu *spu)
1531{
1532 u64 ch0_cnt, ch0_data;
1533 u64 ch1_data;
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543 ch0_cnt = csa->spu_chnlcnt_RW[0];
1544 ch0_data = csa->spu_chnldata_RW[0];
1545 ch1_data = csa->spu_chnldata_RW[1];
1546 csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;
1547 if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&
1548 (ch1_data & MFC_LLR_LOST_EVENT)) {
1549 csa->spu_chnlcnt_RW[0] = 1;
1550 }
1551}
1552
1553static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
1554{
1555
1556
1557
1558
1559
1560 if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED))
1561 return;
1562
1563 if ((csa->spu_chnlcnt_RW[0] == 0) &&
1564 (csa->spu_chnldata_RW[1] & 0x20) &&
1565 !(csa->spu_chnldata_RW[0] & 0x20))
1566 csa->spu_chnlcnt_RW[0] = 1;
1567
1568 csa->spu_chnldata_RW[0] |= 0x20;
1569}
1570
1571static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
1572{
1573 struct spu_priv2 __iomem *priv2 = spu->priv2;
1574 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1575 int i;
1576
1577
1578
1579
1580 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1581 idx = ch_indices[i];
1582 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1583 eieio();
1584 out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);
1585 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);
1586 eieio();
1587 }
1588}
1589
1590static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu)
1591{
1592 struct spu_priv2 __iomem *priv2 = spu->priv2;
1593 u64 ch_indices[3] = { 9UL, 21UL, 23UL };
1594 u64 ch_counts[3] = { 1UL, 16UL, 1UL };
1595 u64 idx;
1596 int i;
1597
1598
1599
1600
1601 ch_counts[0] = 1UL;
1602 ch_counts[1] = csa->spu_chnlcnt_RW[21];
1603 ch_counts[2] = 1UL;
1604 for (i = 0; i < 3; i++) {
1605 idx = ch_indices[i];
1606 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1607 eieio();
1608 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1609 eieio();
1610 }
1611}
1612
1613static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu)
1614{
1615 struct spu_priv2 __iomem *priv2 = spu->priv2;
1616
1617
1618
1619
1620 out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);
1621 eieio();
1622}
1623
1624static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu)
1625{
1626 struct spu_priv2 __iomem *priv2 = spu->priv2;
1627
1628
1629
1630
1631 out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);
1632 eieio();
1633}
1634
1635static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu)
1636{
1637
1638
1639
1640
1641}
1642
1643static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu)
1644{
1645 struct spu_problem __iomem *prob = spu->problem;
1646
1647
1648
1649
1650 out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);
1651 eieio();
1652}
1653
1654static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
1655{
1656 struct spu_priv2 __iomem *priv2 = spu->priv2;
1657 int i;
1658
1659
1660
1661
1662 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
1663 eieio();
1664 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
1665 for (i = 0; i < 4; i++) {
1666 out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
1667 }
1668 eieio();
1669}
1670
1671static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
1672{
1673 struct spu_problem __iomem *prob = spu->problem;
1674 u32 dummy = 0;
1675
1676
1677
1678
1679
1680 if ((csa->prob.mb_stat_R & 0xFF) == 0) {
1681 dummy = in_be32(&prob->pu_mb_R);
1682 eieio();
1683 }
1684}
1685
1686static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
1687{
1688 struct spu_priv2 __iomem *priv2 = spu->priv2;
1689 u64 dummy = 0UL;
1690
1691
1692
1693
1694
1695 if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
1696 dummy = in_be64(&priv2->puint_mb_R);
1697 eieio();
1698 spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
1699 eieio();
1700 }
1701}
1702
1703static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
1704{
1705
1706
1707
1708 spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
1709 eieio();
1710}
1711
1712static inline void set_int_route(struct spu_state *csa, struct spu *spu)
1713{
1714 struct spu_context *ctx = spu->ctx;
1715
1716 spu_cpu_affinity_set(spu, ctx->last_ran);
1717}
1718
1719static inline void restore_other_spu_access(struct spu_state *csa,
1720 struct spu *spu)
1721{
1722
1723
1724
1725}
1726
1727static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu)
1728{
1729 struct spu_problem __iomem *prob = spu->problem;
1730
1731
1732
1733
1734
1735 if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {
1736 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1737 eieio();
1738 }
1739}
1740
1741static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
1742{
1743 struct spu_priv2 __iomem *priv2 = spu->priv2;
1744
1745
1746
1747
1748 out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
1749 eieio();
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759}
1760
1761static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
1762{
1763
1764
1765
1766
1767
1768
1769}
1770
1771static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
1772{
1773
1774
1775
1776
1777}
1778
1779static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
1780{
1781
1782
1783
1784 spin_lock_irq(&spu->register_lock);
1785 spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
1786 spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
1787 spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
1788 spin_unlock_irq(&spu->register_lock);
1789}
1790
1791static int quiece_spu(struct spu_state *prev, struct spu *spu)
1792{
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803 if (check_spu_isolate(prev, spu)) {
1804 return 2;
1805 }
1806 disable_interrupts(prev, spu);
1807 set_watchdog_timer(prev, spu);
1808 inhibit_user_access(prev, spu);
1809 if (check_spu_isolate(prev, spu)) {
1810 return 6;
1811 }
1812 set_switch_pending(prev, spu);
1813 save_mfc_cntl(prev, spu);
1814 save_spu_runcntl(prev, spu);
1815 save_mfc_sr1(prev, spu);
1816 save_spu_status(prev, spu);
1817 save_mfc_stopped_status(prev, spu);
1818 halt_mfc_decr(prev, spu);
1819 save_timebase(prev, spu);
1820 remove_other_spu_access(prev, spu);
1821 do_mfc_mssync(prev, spu);
1822 issue_mfc_tlbie(prev, spu);
1823 handle_pending_interrupts(prev, spu);
1824
1825 return 0;
1826}
1827
1828static void save_csa(struct spu_state *prev, struct spu *spu)
1829{
1830
1831
1832
1833
1834
1835 save_mfc_queues(prev, spu);
1836 save_ppu_querymask(prev, spu);
1837 save_ppu_querytype(prev, spu);
1838 save_ppu_tagstatus(prev, spu);
1839 save_mfc_csr_tsq(prev, spu);
1840 save_mfc_csr_cmd(prev, spu);
1841 save_mfc_csr_ato(prev, spu);
1842 save_mfc_tclass_id(prev, spu);
1843 set_mfc_tclass_id(prev, spu);
1844 save_mfc_cmd(prev, spu);
1845 purge_mfc_queue(prev, spu);
1846 wait_purge_complete(prev, spu);
1847 setup_mfc_sr1(prev, spu);
1848 save_spu_npc(prev, spu);
1849 save_spu_privcntl(prev, spu);
1850 reset_spu_privcntl(prev, spu);
1851 save_spu_lslr(prev, spu);
1852 reset_spu_lslr(prev, spu);
1853 save_spu_cfg(prev, spu);
1854 save_pm_trace(prev, spu);
1855 save_mfc_rag(prev, spu);
1856 save_ppu_mb_stat(prev, spu);
1857 save_ppu_mb(prev, spu);
1858 save_ppuint_mb(prev, spu);
1859 save_ch_part1(prev, spu);
1860 save_spu_mb(prev, spu);
1861 reset_ch(prev, spu);
1862}
1863
1864static void save_lscsa(struct spu_state *prev, struct spu *spu)
1865{
1866
1867
1868
1869
1870
1871
1872 resume_mfc_queue(prev, spu);
1873
1874 setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code));
1875 set_switch_active(prev, spu);
1876 enable_interrupts(prev, spu);
1877 save_ls_16kb(prev, spu);
1878 set_spu_npc(prev, spu);
1879 set_signot1(prev, spu);
1880 set_signot2(prev, spu);
1881 send_save_code(prev, spu);
1882 set_ppu_querymask(prev, spu);
1883 wait_tag_complete(prev, spu);
1884 wait_spu_stopped(prev, spu);
1885}
1886
1887static void force_spu_isolate_exit(struct spu *spu)
1888{
1889 struct spu_problem __iomem *prob = spu->problem;
1890 struct spu_priv2 __iomem *priv2 = spu->priv2;
1891
1892
1893 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1894 iobarrier_rw();
1895 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
1896
1897
1898 spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1899 iobarrier_w();
1900
1901
1902 out_be64(&priv2->spu_privcntl_RW, 4LL);
1903 iobarrier_w();
1904 out_be32(&prob->spu_runcntl_RW, 2);
1905 iobarrier_rw();
1906 POLL_WHILE_FALSE((in_be32(&prob->spu_status_R)
1907 & SPU_STATUS_STOPPED_BY_STOP));
1908
1909
1910 out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);
1911 iobarrier_w();
1912}
1913
1914
1915
1916
1917
1918
1919static void stop_spu_isolate(struct spu *spu)
1920{
1921 struct spu_problem __iomem *prob = spu->problem;
1922
1923 if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {
1924
1925
1926
1927
1928 force_spu_isolate_exit(spu);
1929 }
1930}
1931
1932static void harvest(struct spu_state *prev, struct spu *spu)
1933{
1934
1935
1936
1937
1938
1939
1940 disable_interrupts(prev, spu);
1941 inhibit_user_access(prev, spu);
1942 terminate_spu_app(prev, spu);
1943 set_switch_pending(prev, spu);
1944 stop_spu_isolate(spu);
1945 remove_other_spu_access(prev, spu);
1946 suspend_mfc_and_halt_decr(prev, spu);
1947 wait_suspend_mfc_complete(prev, spu);
1948 if (!suspend_spe(prev, spu))
1949 clear_spu_status(prev, spu);
1950 do_mfc_mssync(prev, spu);
1951 issue_mfc_tlbie(prev, spu);
1952 handle_pending_interrupts(prev, spu);
1953 purge_mfc_queue(prev, spu);
1954 wait_purge_complete(prev, spu);
1955 reset_spu_privcntl(prev, spu);
1956 reset_spu_lslr(prev, spu);
1957 setup_mfc_sr1(prev, spu);
1958 spu_invalidate_slbs(spu);
1959 reset_ch_part1(prev, spu);
1960 reset_ch_part2(prev, spu);
1961 enable_interrupts(prev, spu);
1962 set_switch_active(prev, spu);
1963 set_mfc_tclass_id(prev, spu);
1964 resume_mfc_queue(prev, spu);
1965}
1966
1967static void restore_lscsa(struct spu_state *next, struct spu *spu)
1968{
1969
1970
1971
1972
1973
1974
1975 set_watchdog_timer(next, spu);
1976 setup_spu_status_part1(next, spu);
1977 setup_spu_status_part2(next, spu);
1978 restore_mfc_rag(next, spu);
1979
1980 setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code));
1981 set_spu_npc(next, spu);
1982 set_signot1(next, spu);
1983 set_signot2(next, spu);
1984 setup_decr(next, spu);
1985 setup_ppu_mb(next, spu);
1986 setup_ppuint_mb(next, spu);
1987 send_restore_code(next, spu);
1988 set_ppu_querymask(next, spu);
1989 wait_tag_complete(next, spu);
1990 wait_spu_stopped(next, spu);
1991}
1992
1993static void restore_csa(struct spu_state *next, struct spu *spu)
1994{
1995
1996
1997
1998
1999
2000 restore_spu_privcntl(next, spu);
2001 restore_status_part1(next, spu);
2002 restore_status_part2(next, spu);
2003 restore_ls_16kb(next, spu);
2004 wait_tag_complete(next, spu);
2005 suspend_mfc(next, spu);
2006 wait_suspend_mfc_complete(next, spu);
2007 issue_mfc_tlbie(next, spu);
2008 clear_interrupts(next, spu);
2009 restore_mfc_queues(next, spu);
2010 restore_ppu_querymask(next, spu);
2011 restore_ppu_querytype(next, spu);
2012 restore_mfc_csr_tsq(next, spu);
2013 restore_mfc_csr_cmd(next, spu);
2014 restore_mfc_csr_ato(next, spu);
2015 restore_mfc_tclass_id(next, spu);
2016 set_llr_event(next, spu);
2017 restore_decr_wrapped(next, spu);
2018 restore_ch_part1(next, spu);
2019 restore_ch_part2(next, spu);
2020 restore_spu_lslr(next, spu);
2021 restore_spu_cfg(next, spu);
2022 restore_pm_trace(next, spu);
2023 restore_spu_npc(next, spu);
2024 restore_spu_mb(next, spu);
2025 check_ppu_mb_stat(next, spu);
2026 check_ppuint_mb_stat(next, spu);
2027 spu_invalidate_slbs(spu);
2028 restore_mfc_sr1(next, spu);
2029 set_int_route(next, spu);
2030 restore_other_spu_access(next, spu);
2031 restore_spu_runcntl(next, spu);
2032 restore_mfc_cntl(next, spu);
2033 enable_user_access(next, spu);
2034 reset_switch_active(next, spu);
2035 reenable_interrupts(next, spu);
2036}
2037
2038static int __do_spu_save(struct spu_state *prev, struct spu *spu)
2039{
2040 int rc;
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054 rc = quiece_spu(prev, spu);
2055 switch (rc) {
2056 default:
2057 case 2:
2058 case 6:
2059 harvest(prev, spu);
2060 return rc;
2061 break;
2062 case 0:
2063 break;
2064 }
2065 save_csa(prev, spu);
2066 save_lscsa(prev, spu);
2067 return check_save_status(prev, spu);
2068}
2069
2070static int __do_spu_restore(struct spu_state *next, struct spu *spu)
2071{
2072 int rc;
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085 restore_lscsa(next, spu);
2086 rc = check_restore_status(next, spu);
2087 switch (rc) {
2088 default:
2089
2090 return rc;
2091 break;
2092 case 0:
2093
2094 break;
2095 }
2096 restore_csa(next, spu);
2097
2098 return 0;
2099}
2100
2101
2102
2103
2104
2105
2106
2107
2108int spu_save(struct spu_state *prev, struct spu *spu)
2109{
2110 int rc;
2111
2112 acquire_spu_lock(spu);
2113 rc = __do_spu_save(prev, spu);
2114 release_spu_lock(spu);
2115 if (rc != 0 && rc != 2 && rc != 6) {
2116 panic("%s failed on SPU[%d], rc=%d.\n",
2117 __func__, spu->number, rc);
2118 }
2119 return 0;
2120}
2121EXPORT_SYMBOL_GPL(spu_save);
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132int spu_restore(struct spu_state *new, struct spu *spu)
2133{
2134 int rc;
2135
2136 acquire_spu_lock(spu);
2137 harvest(NULL, spu);
2138 spu->slb_replace = 0;
2139 rc = __do_spu_restore(new, spu);
2140 release_spu_lock(spu);
2141 if (rc) {
2142 panic("%s failed on SPU[%d] rc=%d.\n",
2143 __func__, spu->number, rc);
2144 }
2145 return rc;
2146}
2147EXPORT_SYMBOL_GPL(spu_restore);
2148
2149static void init_prob(struct spu_state *csa)
2150{
2151 csa->spu_chnlcnt_RW[9] = 1;
2152 csa->spu_chnlcnt_RW[21] = 16;
2153 csa->spu_chnlcnt_RW[23] = 1;
2154 csa->spu_chnlcnt_RW[28] = 1;
2155 csa->spu_chnlcnt_RW[30] = 1;
2156 csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
2157 csa->prob.mb_stat_R = 0x000400;
2158}
2159
2160static void init_priv1(struct spu_state *csa)
2161{
2162
2163 csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
2164 MFC_STATE1_MASTER_RUN_CONTROL_MASK |
2165 MFC_STATE1_PROBLEM_STATE_MASK |
2166 MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
2167
2168
2169 csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
2170 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
2171 CLASS0_ENABLE_SPU_ERROR_INTR;
2172 csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
2173 CLASS1_ENABLE_STORAGE_FAULT_INTR;
2174 csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
2175 CLASS2_ENABLE_SPU_HALT_INTR |
2176 CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR;
2177}
2178
2179static void init_priv2(struct spu_state *csa)
2180{
2181 csa->priv2.spu_lslr_RW = LS_ADDR_MASK;
2182 csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |
2183 MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |
2184 MFC_CNTL_DMA_QUEUES_EMPTY_MASK;
2185}
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198int spu_init_csa(struct spu_state *csa)
2199{
2200 int rc;
2201
2202 if (!csa)
2203 return -EINVAL;
2204 memset(csa, 0, sizeof(struct spu_state));
2205
2206 rc = spu_alloc_lscsa(csa);
2207 if (rc)
2208 return rc;
2209
2210 spin_lock_init(&csa->register_lock);
2211
2212 init_prob(csa);
2213 init_priv1(csa);
2214 init_priv2(csa);
2215
2216 return 0;
2217}
2218
2219void spu_fini_csa(struct spu_state *csa)
2220{
2221 spu_free_lscsa(csa);
2222}
2223