1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/export.h>
22#include <linux/errno.h>
23#include <linux/hardirq.h>
24#include <linux/sched.h>
25#include <linux/kernel.h>
26#include <linux/mm.h>
27#include <linux/vmalloc.h>
28#include <linux/smp.h>
29#include <linux/stddef.h>
30#include <linux/unistd.h>
31
32#include <asm/io.h>
33#include <asm/spu.h>
34#include <asm/spu_priv1.h>
35#include <asm/spu_csa.h>
36#include <asm/mmu_context.h>
37
38#include "spufs.h"
39
40#include "spu_save_dump.h"
41#include "spu_restore_dump.h"
42
43#if 0
44#define POLL_WHILE_TRUE(_c) { \
45 do { \
46 } while (_c); \
47 }
48#else
49#define RELAX_SPIN_COUNT 1000
50#define POLL_WHILE_TRUE(_c) { \
51 do { \
52 int _i; \
53 for (_i=0; _i<RELAX_SPIN_COUNT && (_c); _i++) { \
54 cpu_relax(); \
55 } \
56 if (unlikely(_c)) yield(); \
57 else break; \
58 } while (_c); \
59 }
60#endif
61
62#define POLL_WHILE_FALSE(_c) POLL_WHILE_TRUE(!(_c))
63
64static inline void acquire_spu_lock(struct spu *spu)
65{
66
67
68
69
70
71}
72
73static inline void release_spu_lock(struct spu *spu)
74{
75
76
77
78
79}
80
81static inline int check_spu_isolate(struct spu_state *csa, struct spu *spu)
82{
83 struct spu_problem __iomem *prob = spu->problem;
84 u32 isolate_state;
85
86
87
88
89
90
91
92 isolate_state = SPU_STATUS_ISOLATED_STATE |
93 SPU_STATUS_ISOLATED_LOAD_STATUS | SPU_STATUS_ISOLATED_EXIT_STATUS;
94 return (in_be32(&prob->spu_status_R) & isolate_state) ? 1 : 0;
95}
96
97static inline void disable_interrupts(struct spu_state *csa, struct spu *spu)
98{
99
100
101
102
103
104
105
106
107
108
109
110 spin_lock_irq(&spu->register_lock);
111 if (csa) {
112 csa->priv1.int_mask_class0_RW = spu_int_mask_get(spu, 0);
113 csa->priv1.int_mask_class1_RW = spu_int_mask_get(spu, 1);
114 csa->priv1.int_mask_class2_RW = spu_int_mask_get(spu, 2);
115 }
116 spu_int_mask_set(spu, 0, 0ul);
117 spu_int_mask_set(spu, 1, 0ul);
118 spu_int_mask_set(spu, 2, 0ul);
119 eieio();
120 spin_unlock_irq(&spu->register_lock);
121
122
123
124
125
126
127 set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
128 clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
129 synchronize_irq(spu->irqs[0]);
130 synchronize_irq(spu->irqs[1]);
131 synchronize_irq(spu->irqs[2]);
132}
133
134static inline void set_watchdog_timer(struct spu_state *csa, struct spu *spu)
135{
136
137
138
139
140
141
142
143
144
145}
146
147static inline void inhibit_user_access(struct spu_state *csa, struct spu *spu)
148{
149
150
151
152
153
154
155
156}
157
158static inline void set_switch_pending(struct spu_state *csa, struct spu *spu)
159{
160
161
162
163
164
165}
166
167static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
168{
169 struct spu_priv2 __iomem *priv2 = spu->priv2;
170
171
172
173
174 switch (in_be64(&priv2->mfc_control_RW) &
175 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) {
176 case MFC_CNTL_SUSPEND_IN_PROGRESS:
177 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
178 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
179 MFC_CNTL_SUSPEND_COMPLETE);
180 fallthrough;
181 case MFC_CNTL_SUSPEND_COMPLETE:
182 if (csa)
183 csa->priv2.mfc_control_RW =
184 in_be64(&priv2->mfc_control_RW) |
185 MFC_CNTL_SUSPEND_DMA_QUEUE;
186 break;
187 case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
188 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
189 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
190 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
191 MFC_CNTL_SUSPEND_COMPLETE);
192 if (csa)
193 csa->priv2.mfc_control_RW =
194 in_be64(&priv2->mfc_control_RW) &
195 ~MFC_CNTL_SUSPEND_DMA_QUEUE &
196 ~MFC_CNTL_SUSPEND_MASK;
197 break;
198 }
199}
200
201static inline void save_spu_runcntl(struct spu_state *csa, struct spu *spu)
202{
203 struct spu_problem __iomem *prob = spu->problem;
204
205
206
207
208
209 csa->prob.spu_runcntl_RW = in_be32(&prob->spu_runcntl_RW);
210}
211
212static inline void save_mfc_sr1(struct spu_state *csa, struct spu *spu)
213{
214
215
216
217 csa->priv1.mfc_sr1_RW = spu_mfc_sr1_get(spu);
218}
219
220static inline void save_spu_status(struct spu_state *csa, struct spu *spu)
221{
222 struct spu_problem __iomem *prob = spu->problem;
223
224
225
226
227 if ((in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) == 0) {
228 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
229 } else {
230 u32 stopped;
231
232 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
233 eieio();
234 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
235 SPU_STATUS_RUNNING);
236 stopped =
237 SPU_STATUS_INVALID_INSTR | SPU_STATUS_SINGLE_STEP |
238 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
239 if ((in_be32(&prob->spu_status_R) & stopped) == 0)
240 csa->prob.spu_status_R = SPU_STATUS_RUNNING;
241 else
242 csa->prob.spu_status_R = in_be32(&prob->spu_status_R);
243 }
244}
245
246static inline void save_mfc_stopped_status(struct spu_state *csa,
247 struct spu *spu)
248{
249 struct spu_priv2 __iomem *priv2 = spu->priv2;
250 const u64 mask = MFC_CNTL_DECREMENTER_RUNNING |
251 MFC_CNTL_DMA_QUEUES_EMPTY;
252
253
254
255
256
257
258
259 csa->priv2.mfc_control_RW &= ~mask;
260 csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask;
261}
262
263static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
264{
265 struct spu_priv2 __iomem *priv2 = spu->priv2;
266
267
268
269
270
271 out_be64(&priv2->mfc_control_RW,
272 MFC_CNTL_DECREMENTER_HALTED | MFC_CNTL_SUSPEND_MASK);
273 eieio();
274}
275
276static inline void save_timebase(struct spu_state *csa, struct spu *spu)
277{
278
279
280
281
282 csa->suspend_time = get_cycles();
283}
284
285static inline void remove_other_spu_access(struct spu_state *csa,
286 struct spu *spu)
287{
288
289
290
291
292}
293
294static inline void do_mfc_mssync(struct spu_state *csa, struct spu *spu)
295{
296 struct spu_problem __iomem *prob = spu->problem;
297
298
299
300
301
302
303 out_be64(&prob->spc_mssync_RW, 1UL);
304 POLL_WHILE_TRUE(in_be64(&prob->spc_mssync_RW) & MS_SYNC_PENDING);
305}
306
307static inline void issue_mfc_tlbie(struct spu_state *csa, struct spu *spu)
308{
309
310
311
312
313
314
315 spu_tlb_invalidate(spu);
316 mb();
317}
318
319static inline void handle_pending_interrupts(struct spu_state *csa,
320 struct spu *spu)
321{
322
323
324
325
326
327
328
329
330
331}
332
333static inline void save_mfc_queues(struct spu_state *csa, struct spu *spu)
334{
335 struct spu_priv2 __iomem *priv2 = spu->priv2;
336 int i;
337
338
339
340
341
342 if ((in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DMA_QUEUES_EMPTY) == 0) {
343 for (i = 0; i < 8; i++) {
344 csa->priv2.puq[i].mfc_cq_data0_RW =
345 in_be64(&priv2->puq[i].mfc_cq_data0_RW);
346 csa->priv2.puq[i].mfc_cq_data1_RW =
347 in_be64(&priv2->puq[i].mfc_cq_data1_RW);
348 csa->priv2.puq[i].mfc_cq_data2_RW =
349 in_be64(&priv2->puq[i].mfc_cq_data2_RW);
350 csa->priv2.puq[i].mfc_cq_data3_RW =
351 in_be64(&priv2->puq[i].mfc_cq_data3_RW);
352 }
353 for (i = 0; i < 16; i++) {
354 csa->priv2.spuq[i].mfc_cq_data0_RW =
355 in_be64(&priv2->spuq[i].mfc_cq_data0_RW);
356 csa->priv2.spuq[i].mfc_cq_data1_RW =
357 in_be64(&priv2->spuq[i].mfc_cq_data1_RW);
358 csa->priv2.spuq[i].mfc_cq_data2_RW =
359 in_be64(&priv2->spuq[i].mfc_cq_data2_RW);
360 csa->priv2.spuq[i].mfc_cq_data3_RW =
361 in_be64(&priv2->spuq[i].mfc_cq_data3_RW);
362 }
363 }
364}
365
366static inline void save_ppu_querymask(struct spu_state *csa, struct spu *spu)
367{
368 struct spu_problem __iomem *prob = spu->problem;
369
370
371
372
373
374 csa->prob.dma_querymask_RW = in_be32(&prob->dma_querymask_RW);
375}
376
377static inline void save_ppu_querytype(struct spu_state *csa, struct spu *spu)
378{
379 struct spu_problem __iomem *prob = spu->problem;
380
381
382
383
384
385 csa->prob.dma_querytype_RW = in_be32(&prob->dma_querytype_RW);
386}
387
388static inline void save_ppu_tagstatus(struct spu_state *csa, struct spu *spu)
389{
390 struct spu_problem __iomem *prob = spu->problem;
391
392
393
394
395
396
397
398 csa->prob.dma_tagstatus_R = in_be32(&prob->dma_tagstatus_R);
399}
400
401static inline void save_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
402{
403 struct spu_priv2 __iomem *priv2 = spu->priv2;
404
405
406
407
408
409 csa->priv2.spu_tag_status_query_RW =
410 in_be64(&priv2->spu_tag_status_query_RW);
411}
412
413static inline void save_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
414{
415 struct spu_priv2 __iomem *priv2 = spu->priv2;
416
417
418
419
420
421 csa->priv2.spu_cmd_buf1_RW = in_be64(&priv2->spu_cmd_buf1_RW);
422 csa->priv2.spu_cmd_buf2_RW = in_be64(&priv2->spu_cmd_buf2_RW);
423}
424
425static inline void save_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
426{
427 struct spu_priv2 __iomem *priv2 = spu->priv2;
428
429
430
431
432
433 csa->priv2.spu_atomic_status_RW = in_be64(&priv2->spu_atomic_status_RW);
434}
435
436static inline void save_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
437{
438
439
440
441
442 csa->priv1.mfc_tclass_id_RW = spu_mfc_tclass_id_get(spu);
443}
444
445static inline void set_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
446{
447
448
449
450
451
452 spu_mfc_tclass_id_set(spu, 0x10000000);
453 eieio();
454}
455
456static inline void purge_mfc_queue(struct spu_state *csa, struct spu *spu)
457{
458 struct spu_priv2 __iomem *priv2 = spu->priv2;
459
460
461
462
463
464 out_be64(&priv2->mfc_control_RW,
465 MFC_CNTL_PURGE_DMA_REQUEST |
466 MFC_CNTL_SUSPEND_MASK);
467 eieio();
468}
469
470static inline void wait_purge_complete(struct spu_state *csa, struct spu *spu)
471{
472 struct spu_priv2 __iomem *priv2 = spu->priv2;
473
474
475
476
477
478 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
479 MFC_CNTL_PURGE_DMA_STATUS_MASK) ==
480 MFC_CNTL_PURGE_DMA_COMPLETE);
481}
482
483static inline void setup_mfc_sr1(struct spu_state *csa, struct spu *spu)
484{
485
486
487
488
489
490
491
492
493
494
495
496 spu_mfc_sr1_set(spu, (MFC_STATE1_MASTER_RUN_CONTROL_MASK |
497 MFC_STATE1_RELOCATE_MASK |
498 MFC_STATE1_BUS_TLBIE_MASK));
499}
500
501static inline void save_spu_npc(struct spu_state *csa, struct spu *spu)
502{
503 struct spu_problem __iomem *prob = spu->problem;
504
505
506
507
508 csa->prob.spu_npc_RW = in_be32(&prob->spu_npc_RW);
509}
510
511static inline void save_spu_privcntl(struct spu_state *csa, struct spu *spu)
512{
513 struct spu_priv2 __iomem *priv2 = spu->priv2;
514
515
516
517
518 csa->priv2.spu_privcntl_RW = in_be64(&priv2->spu_privcntl_RW);
519}
520
521static inline void reset_spu_privcntl(struct spu_state *csa, struct spu *spu)
522{
523 struct spu_priv2 __iomem *priv2 = spu->priv2;
524
525
526
527
528
529 out_be64(&priv2->spu_privcntl_RW, 0UL);
530 eieio();
531}
532
533static inline void save_spu_lslr(struct spu_state *csa, struct spu *spu)
534{
535 struct spu_priv2 __iomem *priv2 = spu->priv2;
536
537
538
539
540 csa->priv2.spu_lslr_RW = in_be64(&priv2->spu_lslr_RW);
541}
542
543static inline void reset_spu_lslr(struct spu_state *csa, struct spu *spu)
544{
545 struct spu_priv2 __iomem *priv2 = spu->priv2;
546
547
548
549
550
551 out_be64(&priv2->spu_lslr_RW, LS_ADDR_MASK);
552 eieio();
553}
554
555static inline void save_spu_cfg(struct spu_state *csa, struct spu *spu)
556{
557 struct spu_priv2 __iomem *priv2 = spu->priv2;
558
559
560
561
562 csa->priv2.spu_cfg_RW = in_be64(&priv2->spu_cfg_RW);
563}
564
565static inline void save_pm_trace(struct spu_state *csa, struct spu *spu)
566{
567
568
569
570
571}
572
573static inline void save_mfc_rag(struct spu_state *csa, struct spu *spu)
574{
575
576
577
578
579 csa->priv1.resource_allocation_groupID_RW =
580 spu_resource_allocation_groupID_get(spu);
581 csa->priv1.resource_allocation_enable_RW =
582 spu_resource_allocation_enable_get(spu);
583}
584
585static inline void save_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
586{
587 struct spu_problem __iomem *prob = spu->problem;
588
589
590
591
592 csa->prob.mb_stat_R = in_be32(&prob->mb_stat_R);
593}
594
595static inline void save_ppu_mb(struct spu_state *csa, struct spu *spu)
596{
597 struct spu_problem __iomem *prob = spu->problem;
598
599
600
601
602 csa->prob.pu_mb_R = in_be32(&prob->pu_mb_R);
603}
604
605static inline void save_ppuint_mb(struct spu_state *csa, struct spu *spu)
606{
607 struct spu_priv2 __iomem *priv2 = spu->priv2;
608
609
610
611
612 csa->priv2.puint_mb_R = in_be64(&priv2->puint_mb_R);
613}
614
615static inline void save_ch_part1(struct spu_state *csa, struct spu *spu)
616{
617 struct spu_priv2 __iomem *priv2 = spu->priv2;
618 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
619 int i;
620
621
622
623
624
625 out_be64(&priv2->spu_chnlcntptr_RW, 1);
626 csa->spu_chnldata_RW[1] = in_be64(&priv2->spu_chnldata_RW);
627
628
629 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
630 idx = ch_indices[i];
631 out_be64(&priv2->spu_chnlcntptr_RW, idx);
632 eieio();
633 csa->spu_chnldata_RW[idx] = in_be64(&priv2->spu_chnldata_RW);
634 csa->spu_chnlcnt_RW[idx] = in_be64(&priv2->spu_chnlcnt_RW);
635 out_be64(&priv2->spu_chnldata_RW, 0UL);
636 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
637 eieio();
638 }
639}
640
641static inline void save_spu_mb(struct spu_state *csa, struct spu *spu)
642{
643 struct spu_priv2 __iomem *priv2 = spu->priv2;
644 int i;
645
646
647
648
649 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
650 eieio();
651 csa->spu_chnlcnt_RW[29] = in_be64(&priv2->spu_chnlcnt_RW);
652 for (i = 0; i < 4; i++) {
653 csa->spu_mailbox_data[i] = in_be64(&priv2->spu_chnldata_RW);
654 }
655 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
656 eieio();
657}
658
659static inline void save_mfc_cmd(struct spu_state *csa, struct spu *spu)
660{
661 struct spu_priv2 __iomem *priv2 = spu->priv2;
662
663
664
665
666 out_be64(&priv2->spu_chnlcntptr_RW, 21UL);
667 eieio();
668 csa->spu_chnlcnt_RW[21] = in_be64(&priv2->spu_chnlcnt_RW);
669 eieio();
670}
671
672static inline void reset_ch(struct spu_state *csa, struct spu *spu)
673{
674 struct spu_priv2 __iomem *priv2 = spu->priv2;
675 u64 ch_indices[4] = { 21UL, 23UL, 28UL, 30UL };
676 u64 ch_counts[4] = { 16UL, 1UL, 1UL, 1UL };
677 u64 idx;
678 int i;
679
680
681
682
683 for (i = 0; i < 4; i++) {
684 idx = ch_indices[i];
685 out_be64(&priv2->spu_chnlcntptr_RW, idx);
686 eieio();
687 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
688 eieio();
689 }
690}
691
692static inline void resume_mfc_queue(struct spu_state *csa, struct spu *spu)
693{
694 struct spu_priv2 __iomem *priv2 = spu->priv2;
695
696
697
698
699
700 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESUME_DMA_QUEUE);
701}
702
703static inline void setup_mfc_slbs(struct spu_state *csa, struct spu *spu,
704 unsigned int *code, int code_size)
705{
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720 spu_invalidate_slbs(spu);
721 spu_setup_kernel_slbs(spu, csa->lscsa, code, code_size);
722}
723
724static inline void set_switch_active(struct spu_state *csa, struct spu *spu)
725{
726
727
728
729
730
731
732
733
734
735 if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags))
736 csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
737 clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
738 mb();
739}
740
741static inline void enable_interrupts(struct spu_state *csa, struct spu *spu)
742{
743 unsigned long class1_mask = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
744 CLASS1_ENABLE_STORAGE_FAULT_INTR;
745
746
747
748
749
750
751
752
753
754 spin_lock_irq(&spu->register_lock);
755 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
756 spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
757 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
758 spu_int_mask_set(spu, 0, 0ul);
759 spu_int_mask_set(spu, 1, class1_mask);
760 spu_int_mask_set(spu, 2, 0ul);
761 spin_unlock_irq(&spu->register_lock);
762}
763
764static inline int send_mfc_dma(struct spu *spu, unsigned long ea,
765 unsigned int ls_offset, unsigned int size,
766 unsigned int tag, unsigned int rclass,
767 unsigned int cmd)
768{
769 struct spu_problem __iomem *prob = spu->problem;
770 union mfc_tag_size_class_cmd command;
771 unsigned int transfer_size;
772 volatile unsigned int status = 0x0;
773
774 while (size > 0) {
775 transfer_size =
776 (size > MFC_MAX_DMA_SIZE) ? MFC_MAX_DMA_SIZE : size;
777 command.u.mfc_size = transfer_size;
778 command.u.mfc_tag = tag;
779 command.u.mfc_rclassid = rclass;
780 command.u.mfc_cmd = cmd;
781 do {
782 out_be32(&prob->mfc_lsa_W, ls_offset);
783 out_be64(&prob->mfc_ea_W, ea);
784 out_be64(&prob->mfc_union_W.all64, command.all64);
785 status =
786 in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
787 if (unlikely(status & 0x2)) {
788 cpu_relax();
789 }
790 } while (status & 0x3);
791 size -= transfer_size;
792 ea += transfer_size;
793 ls_offset += transfer_size;
794 }
795 return 0;
796}
797
798static inline void save_ls_16kb(struct spu_state *csa, struct spu *spu)
799{
800 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
801 unsigned int ls_offset = 0x0;
802 unsigned int size = 16384;
803 unsigned int tag = 0;
804 unsigned int rclass = 0;
805 unsigned int cmd = MFC_PUT_CMD;
806
807
808
809
810
811 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
812}
813
814static inline void set_spu_npc(struct spu_state *csa, struct spu *spu)
815{
816 struct spu_problem __iomem *prob = spu->problem;
817
818
819
820
821
822
823
824
825
826
827 out_be32(&prob->spu_npc_RW, 0);
828 eieio();
829}
830
831static inline void set_signot1(struct spu_state *csa, struct spu *spu)
832{
833 struct spu_problem __iomem *prob = spu->problem;
834 union {
835 u64 ull;
836 u32 ui[2];
837 } addr64;
838
839
840
841
842
843
844 addr64.ull = (u64) csa->lscsa;
845 out_be32(&prob->signal_notify1, addr64.ui[0]);
846 eieio();
847}
848
849static inline void set_signot2(struct spu_state *csa, struct spu *spu)
850{
851 struct spu_problem __iomem *prob = spu->problem;
852 union {
853 u64 ull;
854 u32 ui[2];
855 } addr64;
856
857
858
859
860
861
862 addr64.ull = (u64) csa->lscsa;
863 out_be32(&prob->signal_notify2, addr64.ui[1]);
864 eieio();
865}
866
867static inline void send_save_code(struct spu_state *csa, struct spu *spu)
868{
869 unsigned long addr = (unsigned long)&spu_save_code[0];
870 unsigned int ls_offset = 0x0;
871 unsigned int size = sizeof(spu_save_code);
872 unsigned int tag = 0;
873 unsigned int rclass = 0;
874 unsigned int cmd = MFC_GETFS_CMD;
875
876
877
878
879
880 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
881}
882
883static inline void set_ppu_querymask(struct spu_state *csa, struct spu *spu)
884{
885 struct spu_problem __iomem *prob = spu->problem;
886
887
888
889
890
891
892 out_be32(&prob->dma_querymask_RW, MFC_TAGID_TO_TAGMASK(0));
893 eieio();
894}
895
896static inline void wait_tag_complete(struct spu_state *csa, struct spu *spu)
897{
898 struct spu_problem __iomem *prob = spu->problem;
899 u32 mask = MFC_TAGID_TO_TAGMASK(0);
900 unsigned long flags;
901
902
903
904
905
906
907
908
909
910
911 POLL_WHILE_FALSE(in_be32(&prob->dma_tagstatus_R) & mask);
912
913 local_irq_save(flags);
914 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
915 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
916 local_irq_restore(flags);
917}
918
919static inline void wait_spu_stopped(struct spu_state *csa, struct spu *spu)
920{
921 struct spu_problem __iomem *prob = spu->problem;
922 unsigned long flags;
923
924
925
926
927
928
929
930 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
931
932 local_irq_save(flags);
933 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
934 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
935 local_irq_restore(flags);
936}
937
938static inline int check_save_status(struct spu_state *csa, struct spu *spu)
939{
940 struct spu_problem __iomem *prob = spu->problem;
941 u32 complete;
942
943
944
945
946
947
948 complete = ((SPU_SAVE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
949 SPU_STATUS_STOPPED_BY_STOP);
950 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
951}
952
953static inline void terminate_spu_app(struct spu_state *csa, struct spu *spu)
954{
955
956
957
958
959}
960
961static inline void suspend_mfc_and_halt_decr(struct spu_state *csa,
962 struct spu *spu)
963{
964 struct spu_priv2 __iomem *priv2 = spu->priv2;
965
966
967
968
969
970 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE |
971 MFC_CNTL_DECREMENTER_HALTED);
972 eieio();
973}
974
975static inline void wait_suspend_mfc_complete(struct spu_state *csa,
976 struct spu *spu)
977{
978 struct spu_priv2 __iomem *priv2 = spu->priv2;
979
980
981
982
983
984 POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
985 MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
986 MFC_CNTL_SUSPEND_COMPLETE);
987}
988
989static inline int suspend_spe(struct spu_state *csa, struct spu *spu)
990{
991 struct spu_problem __iomem *prob = spu->problem;
992
993
994
995
996
997
998
999
1000 if (in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING) {
1001 if (in_be32(&prob->spu_status_R) &
1002 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1003 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1004 SPU_STATUS_RUNNING);
1005 }
1006 if ((in_be32(&prob->spu_status_R) &
1007 SPU_STATUS_ISOLATED_LOAD_STATUS)
1008 || (in_be32(&prob->spu_status_R) &
1009 SPU_STATUS_ISOLATED_STATE)) {
1010 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1011 eieio();
1012 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1013 SPU_STATUS_RUNNING);
1014 out_be32(&prob->spu_runcntl_RW, 0x2);
1015 eieio();
1016 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1017 SPU_STATUS_RUNNING);
1018 }
1019 if (in_be32(&prob->spu_status_R) &
1020 SPU_STATUS_WAITING_FOR_CHANNEL) {
1021 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1022 eieio();
1023 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1024 SPU_STATUS_RUNNING);
1025 }
1026 return 1;
1027 }
1028 return 0;
1029}
1030
1031static inline void clear_spu_status(struct spu_state *csa, struct spu *spu)
1032{
1033 struct spu_problem __iomem *prob = spu->problem;
1034
1035
1036
1037
1038
1039 if (!(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING)) {
1040 if (in_be32(&prob->spu_status_R) &
1041 SPU_STATUS_ISOLATED_EXIT_STATUS) {
1042 spu_mfc_sr1_set(spu,
1043 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1044 eieio();
1045 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1046 eieio();
1047 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1048 SPU_STATUS_RUNNING);
1049 }
1050 if ((in_be32(&prob->spu_status_R) &
1051 SPU_STATUS_ISOLATED_LOAD_STATUS)
1052 || (in_be32(&prob->spu_status_R) &
1053 SPU_STATUS_ISOLATED_STATE)) {
1054 spu_mfc_sr1_set(spu,
1055 MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1056 eieio();
1057 out_be32(&prob->spu_runcntl_RW, 0x2);
1058 eieio();
1059 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1060 SPU_STATUS_RUNNING);
1061 }
1062 }
1063}
1064
1065static inline void reset_ch_part1(struct spu_state *csa, struct spu *spu)
1066{
1067 struct spu_priv2 __iomem *priv2 = spu->priv2;
1068 u64 ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1069 u64 idx;
1070 int i;
1071
1072
1073
1074
1075
1076 out_be64(&priv2->spu_chnlcntptr_RW, 1);
1077 out_be64(&priv2->spu_chnldata_RW, 0UL);
1078
1079
1080 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1081 idx = ch_indices[i];
1082 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1083 eieio();
1084 out_be64(&priv2->spu_chnldata_RW, 0UL);
1085 out_be64(&priv2->spu_chnlcnt_RW, 0UL);
1086 eieio();
1087 }
1088}
1089
1090static inline void reset_ch_part2(struct spu_state *csa, struct spu *spu)
1091{
1092 struct spu_priv2 __iomem *priv2 = spu->priv2;
1093 u64 ch_indices[5] = { 21UL, 23UL, 28UL, 29UL, 30UL };
1094 u64 ch_counts[5] = { 16UL, 1UL, 1UL, 0UL, 1UL };
1095 u64 idx;
1096 int i;
1097
1098
1099
1100
1101 for (i = 0; i < 5; i++) {
1102 idx = ch_indices[i];
1103 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1104 eieio();
1105 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1106 eieio();
1107 }
1108}
1109
1110static inline void setup_spu_status_part1(struct spu_state *csa,
1111 struct spu *spu)
1112{
1113 u32 status_P = SPU_STATUS_STOPPED_BY_STOP;
1114 u32 status_I = SPU_STATUS_INVALID_INSTR;
1115 u32 status_H = SPU_STATUS_STOPPED_BY_HALT;
1116 u32 status_S = SPU_STATUS_SINGLE_STEP;
1117 u32 status_S_I = SPU_STATUS_SINGLE_STEP | SPU_STATUS_INVALID_INSTR;
1118 u32 status_S_P = SPU_STATUS_SINGLE_STEP | SPU_STATUS_STOPPED_BY_STOP;
1119 u32 status_P_H = SPU_STATUS_STOPPED_BY_HALT |SPU_STATUS_STOPPED_BY_STOP;
1120 u32 status_P_I = SPU_STATUS_STOPPED_BY_STOP |SPU_STATUS_INVALID_INSTR;
1121 u32 status_code;
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135 status_code =
1136 (csa->prob.spu_status_R >> SPU_STOP_STATUS_SHIFT) & 0xFFFF;
1137 if ((csa->prob.spu_status_R & status_P_I) == status_P_I) {
1138
1139
1140
1141
1142
1143 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_I;
1144 csa->lscsa->stopped_status.slot[1] = status_code;
1145
1146 } else if ((csa->prob.spu_status_R & status_P_H) == status_P_H) {
1147
1148
1149
1150
1151
1152 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P_H;
1153 csa->lscsa->stopped_status.slot[1] = status_code;
1154
1155 } else if ((csa->prob.spu_status_R & status_S_P) == status_S_P) {
1156
1157
1158
1159
1160 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_P;
1161 csa->lscsa->stopped_status.slot[1] = status_code;
1162
1163 } else if ((csa->prob.spu_status_R & status_S_I) == status_S_I) {
1164
1165
1166
1167
1168 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S_I;
1169 csa->lscsa->stopped_status.slot[1] = status_code;
1170
1171 } else if ((csa->prob.spu_status_R & status_P) == status_P) {
1172
1173
1174
1175
1176 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_P;
1177 csa->lscsa->stopped_status.slot[1] = status_code;
1178
1179 } else if ((csa->prob.spu_status_R & status_H) == status_H) {
1180
1181
1182
1183
1184 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_H;
1185
1186 } else if ((csa->prob.spu_status_R & status_S) == status_S) {
1187
1188
1189
1190 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_S;
1191
1192 } else if ((csa->prob.spu_status_R & status_I) == status_I) {
1193
1194
1195
1196
1197 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_I;
1198
1199 }
1200}
1201
1202static inline void setup_spu_status_part2(struct spu_state *csa,
1203 struct spu *spu)
1204{
1205 u32 mask;
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217 mask = SPU_STATUS_INVALID_INSTR |
1218 SPU_STATUS_SINGLE_STEP |
1219 SPU_STATUS_STOPPED_BY_HALT |
1220 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1221 if (!(csa->prob.spu_status_R & mask)) {
1222 csa->lscsa->stopped_status.slot[0] = SPU_STOPPED_STATUS_R;
1223 }
1224}
1225
1226static inline void restore_mfc_rag(struct spu_state *csa, struct spu *spu)
1227{
1228
1229
1230
1231
1232 spu_resource_allocation_groupID_set(spu,
1233 csa->priv1.resource_allocation_groupID_RW);
1234 spu_resource_allocation_enable_set(spu,
1235 csa->priv1.resource_allocation_enable_RW);
1236}
1237
1238static inline void send_restore_code(struct spu_state *csa, struct spu *spu)
1239{
1240 unsigned long addr = (unsigned long)&spu_restore_code[0];
1241 unsigned int ls_offset = 0x0;
1242 unsigned int size = sizeof(spu_restore_code);
1243 unsigned int tag = 0;
1244 unsigned int rclass = 0;
1245 unsigned int cmd = MFC_GETFS_CMD;
1246
1247
1248
1249
1250
1251 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1252}
1253
1254static inline void setup_decr(struct spu_state *csa, struct spu *spu)
1255{
1256
1257
1258
1259
1260
1261
1262
1263 if (csa->priv2.mfc_control_RW & MFC_CNTL_DECREMENTER_RUNNING) {
1264 cycles_t resume_time = get_cycles();
1265 cycles_t delta_time = resume_time - csa->suspend_time;
1266
1267 csa->lscsa->decr_status.slot[0] = SPU_DECR_STATUS_RUNNING;
1268 if (csa->lscsa->decr.slot[0] < delta_time) {
1269 csa->lscsa->decr_status.slot[0] |=
1270 SPU_DECR_STATUS_WRAPPED;
1271 }
1272
1273 csa->lscsa->decr.slot[0] -= delta_time;
1274 } else {
1275 csa->lscsa->decr_status.slot[0] = 0;
1276 }
1277}
1278
1279static inline void setup_ppu_mb(struct spu_state *csa, struct spu *spu)
1280{
1281
1282
1283
1284 csa->lscsa->ppu_mb.slot[0] = csa->prob.pu_mb_R;
1285}
1286
1287static inline void setup_ppuint_mb(struct spu_state *csa, struct spu *spu)
1288{
1289
1290
1291
1292 csa->lscsa->ppuint_mb.slot[0] = csa->priv2.puint_mb_R;
1293}
1294
1295static inline int check_restore_status(struct spu_state *csa, struct spu *spu)
1296{
1297 struct spu_problem __iomem *prob = spu->problem;
1298 u32 complete;
1299
1300
1301
1302
1303
1304
1305 complete = ((SPU_RESTORE_COMPLETE << SPU_STOP_STATUS_SHIFT) |
1306 SPU_STATUS_STOPPED_BY_STOP);
1307 return (in_be32(&prob->spu_status_R) != complete) ? 1 : 0;
1308}
1309
1310static inline void restore_spu_privcntl(struct spu_state *csa, struct spu *spu)
1311{
1312 struct spu_priv2 __iomem *priv2 = spu->priv2;
1313
1314
1315
1316
1317 out_be64(&priv2->spu_privcntl_RW, csa->priv2.spu_privcntl_RW);
1318 eieio();
1319}
1320
1321static inline void restore_status_part1(struct spu_state *csa, struct spu *spu)
1322{
1323 struct spu_problem __iomem *prob = spu->problem;
1324 u32 mask;
1325
1326
1327
1328
1329
1330 mask = SPU_STATUS_INVALID_INSTR |
1331 SPU_STATUS_SINGLE_STEP |
1332 SPU_STATUS_STOPPED_BY_HALT | SPU_STATUS_STOPPED_BY_STOP;
1333 if (csa->prob.spu_status_R & mask) {
1334 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1335 eieio();
1336 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1337 SPU_STATUS_RUNNING);
1338 }
1339}
1340
1341static inline void restore_status_part2(struct spu_state *csa, struct spu *spu)
1342{
1343 struct spu_problem __iomem *prob = spu->problem;
1344 u32 mask;
1345
1346
1347
1348
1349
1350
1351
1352 mask = SPU_STATUS_INVALID_INSTR |
1353 SPU_STATUS_SINGLE_STEP |
1354 SPU_STATUS_STOPPED_BY_HALT |
1355 SPU_STATUS_STOPPED_BY_STOP | SPU_STATUS_RUNNING;
1356 if (!(csa->prob.spu_status_R & mask)) {
1357 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1358 eieio();
1359 POLL_WHILE_FALSE(in_be32(&prob->spu_status_R) &
1360 SPU_STATUS_RUNNING);
1361 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1362 eieio();
1363 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) &
1364 SPU_STATUS_RUNNING);
1365 }
1366}
1367
1368static inline void restore_ls_16kb(struct spu_state *csa, struct spu *spu)
1369{
1370 unsigned long addr = (unsigned long)&csa->lscsa->ls[0];
1371 unsigned int ls_offset = 0x0;
1372 unsigned int size = 16384;
1373 unsigned int tag = 0;
1374 unsigned int rclass = 0;
1375 unsigned int cmd = MFC_GET_CMD;
1376
1377
1378
1379
1380
1381 send_mfc_dma(spu, addr, ls_offset, size, tag, rclass, cmd);
1382}
1383
1384static inline void suspend_mfc(struct spu_state *csa, struct spu *spu)
1385{
1386 struct spu_priv2 __iomem *priv2 = spu->priv2;
1387
1388
1389
1390
1391
1392 out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
1393 eieio();
1394}
1395
1396static inline void clear_interrupts(struct spu_state *csa, struct spu *spu)
1397{
1398
1399
1400
1401
1402
1403
1404
1405
1406 spin_lock_irq(&spu->register_lock);
1407 spu_int_mask_set(spu, 0, 0ul);
1408 spu_int_mask_set(spu, 1, 0ul);
1409 spu_int_mask_set(spu, 2, 0ul);
1410 spu_int_stat_clear(spu, 0, CLASS0_INTR_MASK);
1411 spu_int_stat_clear(spu, 1, CLASS1_INTR_MASK);
1412 spu_int_stat_clear(spu, 2, CLASS2_INTR_MASK);
1413 spin_unlock_irq(&spu->register_lock);
1414}
1415
1416static inline void restore_mfc_queues(struct spu_state *csa, struct spu *spu)
1417{
1418 struct spu_priv2 __iomem *priv2 = spu->priv2;
1419 int i;
1420
1421
1422
1423
1424
1425 if ((csa->priv2.mfc_control_RW & MFC_CNTL_DMA_QUEUES_EMPTY_MASK) == 0) {
1426 for (i = 0; i < 8; i++) {
1427 out_be64(&priv2->puq[i].mfc_cq_data0_RW,
1428 csa->priv2.puq[i].mfc_cq_data0_RW);
1429 out_be64(&priv2->puq[i].mfc_cq_data1_RW,
1430 csa->priv2.puq[i].mfc_cq_data1_RW);
1431 out_be64(&priv2->puq[i].mfc_cq_data2_RW,
1432 csa->priv2.puq[i].mfc_cq_data2_RW);
1433 out_be64(&priv2->puq[i].mfc_cq_data3_RW,
1434 csa->priv2.puq[i].mfc_cq_data3_RW);
1435 }
1436 for (i = 0; i < 16; i++) {
1437 out_be64(&priv2->spuq[i].mfc_cq_data0_RW,
1438 csa->priv2.spuq[i].mfc_cq_data0_RW);
1439 out_be64(&priv2->spuq[i].mfc_cq_data1_RW,
1440 csa->priv2.spuq[i].mfc_cq_data1_RW);
1441 out_be64(&priv2->spuq[i].mfc_cq_data2_RW,
1442 csa->priv2.spuq[i].mfc_cq_data2_RW);
1443 out_be64(&priv2->spuq[i].mfc_cq_data3_RW,
1444 csa->priv2.spuq[i].mfc_cq_data3_RW);
1445 }
1446 }
1447 eieio();
1448}
1449
1450static inline void restore_ppu_querymask(struct spu_state *csa, struct spu *spu)
1451{
1452 struct spu_problem __iomem *prob = spu->problem;
1453
1454
1455
1456
1457 out_be32(&prob->dma_querymask_RW, csa->prob.dma_querymask_RW);
1458 eieio();
1459}
1460
1461static inline void restore_ppu_querytype(struct spu_state *csa, struct spu *spu)
1462{
1463 struct spu_problem __iomem *prob = spu->problem;
1464
1465
1466
1467
1468 out_be32(&prob->dma_querytype_RW, csa->prob.dma_querytype_RW);
1469 eieio();
1470}
1471
1472static inline void restore_mfc_csr_tsq(struct spu_state *csa, struct spu *spu)
1473{
1474 struct spu_priv2 __iomem *priv2 = spu->priv2;
1475
1476
1477
1478
1479 out_be64(&priv2->spu_tag_status_query_RW,
1480 csa->priv2.spu_tag_status_query_RW);
1481 eieio();
1482}
1483
1484static inline void restore_mfc_csr_cmd(struct spu_state *csa, struct spu *spu)
1485{
1486 struct spu_priv2 __iomem *priv2 = spu->priv2;
1487
1488
1489
1490
1491
1492 out_be64(&priv2->spu_cmd_buf1_RW, csa->priv2.spu_cmd_buf1_RW);
1493 out_be64(&priv2->spu_cmd_buf2_RW, csa->priv2.spu_cmd_buf2_RW);
1494 eieio();
1495}
1496
1497static inline void restore_mfc_csr_ato(struct spu_state *csa, struct spu *spu)
1498{
1499 struct spu_priv2 __iomem *priv2 = spu->priv2;
1500
1501
1502
1503
1504 out_be64(&priv2->spu_atomic_status_RW, csa->priv2.spu_atomic_status_RW);
1505}
1506
1507static inline void restore_mfc_tclass_id(struct spu_state *csa, struct spu *spu)
1508{
1509
1510
1511
1512 spu_mfc_tclass_id_set(spu, csa->priv1.mfc_tclass_id_RW);
1513 eieio();
1514}
1515
1516static inline void set_llr_event(struct spu_state *csa, struct spu *spu)
1517{
1518 u64 ch0_cnt, ch0_data;
1519 u64 ch1_data;
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529 ch0_cnt = csa->spu_chnlcnt_RW[0];
1530 ch0_data = csa->spu_chnldata_RW[0];
1531 ch1_data = csa->spu_chnldata_RW[1];
1532 csa->spu_chnldata_RW[0] |= MFC_LLR_LOST_EVENT;
1533 if ((ch0_cnt == 0) && !(ch0_data & MFC_LLR_LOST_EVENT) &&
1534 (ch1_data & MFC_LLR_LOST_EVENT)) {
1535 csa->spu_chnlcnt_RW[0] = 1;
1536 }
1537}
1538
1539static inline void restore_decr_wrapped(struct spu_state *csa, struct spu *spu)
1540{
1541
1542
1543
1544
1545
1546 if (!(csa->lscsa->decr_status.slot[0] & SPU_DECR_STATUS_WRAPPED))
1547 return;
1548
1549 if ((csa->spu_chnlcnt_RW[0] == 0) &&
1550 (csa->spu_chnldata_RW[1] & 0x20) &&
1551 !(csa->spu_chnldata_RW[0] & 0x20))
1552 csa->spu_chnlcnt_RW[0] = 1;
1553
1554 csa->spu_chnldata_RW[0] |= 0x20;
1555}
1556
1557static inline void restore_ch_part1(struct spu_state *csa, struct spu *spu)
1558{
1559 struct spu_priv2 __iomem *priv2 = spu->priv2;
1560 u64 idx, ch_indices[] = { 0UL, 3UL, 4UL, 24UL, 25UL, 27UL };
1561 int i;
1562
1563
1564
1565
1566 for (i = 0; i < ARRAY_SIZE(ch_indices); i++) {
1567 idx = ch_indices[i];
1568 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1569 eieio();
1570 out_be64(&priv2->spu_chnldata_RW, csa->spu_chnldata_RW[idx]);
1571 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[idx]);
1572 eieio();
1573 }
1574}
1575
1576static inline void restore_ch_part2(struct spu_state *csa, struct spu *spu)
1577{
1578 struct spu_priv2 __iomem *priv2 = spu->priv2;
1579 u64 ch_indices[3] = { 9UL, 21UL, 23UL };
1580 u64 ch_counts[3] = { 1UL, 16UL, 1UL };
1581 u64 idx;
1582 int i;
1583
1584
1585
1586
1587 ch_counts[0] = 1UL;
1588 ch_counts[1] = csa->spu_chnlcnt_RW[21];
1589 ch_counts[2] = 1UL;
1590 for (i = 0; i < 3; i++) {
1591 idx = ch_indices[i];
1592 out_be64(&priv2->spu_chnlcntptr_RW, idx);
1593 eieio();
1594 out_be64(&priv2->spu_chnlcnt_RW, ch_counts[i]);
1595 eieio();
1596 }
1597}
1598
1599static inline void restore_spu_lslr(struct spu_state *csa, struct spu *spu)
1600{
1601 struct spu_priv2 __iomem *priv2 = spu->priv2;
1602
1603
1604
1605
1606 out_be64(&priv2->spu_lslr_RW, csa->priv2.spu_lslr_RW);
1607 eieio();
1608}
1609
1610static inline void restore_spu_cfg(struct spu_state *csa, struct spu *spu)
1611{
1612 struct spu_priv2 __iomem *priv2 = spu->priv2;
1613
1614
1615
1616
1617 out_be64(&priv2->spu_cfg_RW, csa->priv2.spu_cfg_RW);
1618 eieio();
1619}
1620
1621static inline void restore_pm_trace(struct spu_state *csa, struct spu *spu)
1622{
1623
1624
1625
1626
1627}
1628
1629static inline void restore_spu_npc(struct spu_state *csa, struct spu *spu)
1630{
1631 struct spu_problem __iomem *prob = spu->problem;
1632
1633
1634
1635
1636 out_be32(&prob->spu_npc_RW, csa->prob.spu_npc_RW);
1637 eieio();
1638}
1639
1640static inline void restore_spu_mb(struct spu_state *csa, struct spu *spu)
1641{
1642 struct spu_priv2 __iomem *priv2 = spu->priv2;
1643 int i;
1644
1645
1646
1647
1648 out_be64(&priv2->spu_chnlcntptr_RW, 29UL);
1649 eieio();
1650 out_be64(&priv2->spu_chnlcnt_RW, csa->spu_chnlcnt_RW[29]);
1651 for (i = 0; i < 4; i++) {
1652 out_be64(&priv2->spu_chnldata_RW, csa->spu_mailbox_data[i]);
1653 }
1654 eieio();
1655}
1656
1657static inline void check_ppu_mb_stat(struct spu_state *csa, struct spu *spu)
1658{
1659 struct spu_problem __iomem *prob = spu->problem;
1660 u32 dummy = 0;
1661
1662
1663
1664
1665
1666 if ((csa->prob.mb_stat_R & 0xFF) == 0) {
1667 dummy = in_be32(&prob->pu_mb_R);
1668 eieio();
1669 }
1670}
1671
1672static inline void check_ppuint_mb_stat(struct spu_state *csa, struct spu *spu)
1673{
1674 struct spu_priv2 __iomem *priv2 = spu->priv2;
1675 u64 dummy = 0UL;
1676
1677
1678
1679
1680
1681 if ((csa->prob.mb_stat_R & 0xFF0000) == 0) {
1682 dummy = in_be64(&priv2->puint_mb_R);
1683 eieio();
1684 spu_int_stat_clear(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
1685 eieio();
1686 }
1687}
1688
1689static inline void restore_mfc_sr1(struct spu_state *csa, struct spu *spu)
1690{
1691
1692
1693
1694 spu_mfc_sr1_set(spu, csa->priv1.mfc_sr1_RW);
1695 eieio();
1696}
1697
1698static inline void set_int_route(struct spu_state *csa, struct spu *spu)
1699{
1700 struct spu_context *ctx = spu->ctx;
1701
1702 spu_cpu_affinity_set(spu, ctx->last_ran);
1703}
1704
1705static inline void restore_other_spu_access(struct spu_state *csa,
1706 struct spu *spu)
1707{
1708
1709
1710
1711}
1712
1713static inline void restore_spu_runcntl(struct spu_state *csa, struct spu *spu)
1714{
1715 struct spu_problem __iomem *prob = spu->problem;
1716
1717
1718
1719
1720
1721 if (csa->prob.spu_status_R & SPU_STATUS_RUNNING) {
1722 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_RUNNABLE);
1723 eieio();
1724 }
1725}
1726
1727static inline void restore_mfc_cntl(struct spu_state *csa, struct spu *spu)
1728{
1729 struct spu_priv2 __iomem *priv2 = spu->priv2;
1730
1731
1732
1733
1734 out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
1735 eieio();
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745}
1746
1747static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
1748{
1749
1750
1751
1752
1753
1754
1755}
1756
1757static inline void reset_switch_active(struct spu_state *csa, struct spu *spu)
1758{
1759
1760
1761
1762
1763}
1764
1765static inline void reenable_interrupts(struct spu_state *csa, struct spu *spu)
1766{
1767
1768
1769
1770 spin_lock_irq(&spu->register_lock);
1771 spu_int_mask_set(spu, 0, csa->priv1.int_mask_class0_RW);
1772 spu_int_mask_set(spu, 1, csa->priv1.int_mask_class1_RW);
1773 spu_int_mask_set(spu, 2, csa->priv1.int_mask_class2_RW);
1774 spin_unlock_irq(&spu->register_lock);
1775}
1776
1777static int quiece_spu(struct spu_state *prev, struct spu *spu)
1778{
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789 if (check_spu_isolate(prev, spu)) {
1790 return 2;
1791 }
1792 disable_interrupts(prev, spu);
1793 set_watchdog_timer(prev, spu);
1794 inhibit_user_access(prev, spu);
1795 if (check_spu_isolate(prev, spu)) {
1796 return 6;
1797 }
1798 set_switch_pending(prev, spu);
1799 save_mfc_cntl(prev, spu);
1800 save_spu_runcntl(prev, spu);
1801 save_mfc_sr1(prev, spu);
1802 save_spu_status(prev, spu);
1803 save_mfc_stopped_status(prev, spu);
1804 halt_mfc_decr(prev, spu);
1805 save_timebase(prev, spu);
1806 remove_other_spu_access(prev, spu);
1807 do_mfc_mssync(prev, spu);
1808 issue_mfc_tlbie(prev, spu);
1809 handle_pending_interrupts(prev, spu);
1810
1811 return 0;
1812}
1813
1814static void save_csa(struct spu_state *prev, struct spu *spu)
1815{
1816
1817
1818
1819
1820
1821 save_mfc_queues(prev, spu);
1822 save_ppu_querymask(prev, spu);
1823 save_ppu_querytype(prev, spu);
1824 save_ppu_tagstatus(prev, spu);
1825 save_mfc_csr_tsq(prev, spu);
1826 save_mfc_csr_cmd(prev, spu);
1827 save_mfc_csr_ato(prev, spu);
1828 save_mfc_tclass_id(prev, spu);
1829 set_mfc_tclass_id(prev, spu);
1830 save_mfc_cmd(prev, spu);
1831 purge_mfc_queue(prev, spu);
1832 wait_purge_complete(prev, spu);
1833 setup_mfc_sr1(prev, spu);
1834 save_spu_npc(prev, spu);
1835 save_spu_privcntl(prev, spu);
1836 reset_spu_privcntl(prev, spu);
1837 save_spu_lslr(prev, spu);
1838 reset_spu_lslr(prev, spu);
1839 save_spu_cfg(prev, spu);
1840 save_pm_trace(prev, spu);
1841 save_mfc_rag(prev, spu);
1842 save_ppu_mb_stat(prev, spu);
1843 save_ppu_mb(prev, spu);
1844 save_ppuint_mb(prev, spu);
1845 save_ch_part1(prev, spu);
1846 save_spu_mb(prev, spu);
1847 reset_ch(prev, spu);
1848}
1849
1850static void save_lscsa(struct spu_state *prev, struct spu *spu)
1851{
1852
1853
1854
1855
1856
1857
1858 resume_mfc_queue(prev, spu);
1859
1860 setup_mfc_slbs(prev, spu, spu_save_code, sizeof(spu_save_code));
1861 set_switch_active(prev, spu);
1862 enable_interrupts(prev, spu);
1863 save_ls_16kb(prev, spu);
1864 set_spu_npc(prev, spu);
1865 set_signot1(prev, spu);
1866 set_signot2(prev, spu);
1867 send_save_code(prev, spu);
1868 set_ppu_querymask(prev, spu);
1869 wait_tag_complete(prev, spu);
1870 wait_spu_stopped(prev, spu);
1871}
1872
1873static void force_spu_isolate_exit(struct spu *spu)
1874{
1875 struct spu_problem __iomem *prob = spu->problem;
1876 struct spu_priv2 __iomem *priv2 = spu->priv2;
1877
1878
1879 out_be32(&prob->spu_runcntl_RW, SPU_RUNCNTL_STOP);
1880 iobarrier_rw();
1881 POLL_WHILE_TRUE(in_be32(&prob->spu_status_R) & SPU_STATUS_RUNNING);
1882
1883
1884 spu_mfc_sr1_set(spu, MFC_STATE1_MASTER_RUN_CONTROL_MASK);
1885 iobarrier_w();
1886
1887
1888 out_be64(&priv2->spu_privcntl_RW, 4LL);
1889 iobarrier_w();
1890 out_be32(&prob->spu_runcntl_RW, 2);
1891 iobarrier_rw();
1892 POLL_WHILE_FALSE((in_be32(&prob->spu_status_R)
1893 & SPU_STATUS_STOPPED_BY_STOP));
1894
1895
1896 out_be64(&priv2->spu_privcntl_RW, SPU_PRIVCNT_LOAD_REQUEST_NORMAL);
1897 iobarrier_w();
1898}
1899
1900
1901
1902
1903
1904
1905static void stop_spu_isolate(struct spu *spu)
1906{
1907 struct spu_problem __iomem *prob = spu->problem;
1908
1909 if (in_be32(&prob->spu_status_R) & SPU_STATUS_ISOLATED_STATE) {
1910
1911
1912
1913
1914 force_spu_isolate_exit(spu);
1915 }
1916}
1917
1918static void harvest(struct spu_state *prev, struct spu *spu)
1919{
1920
1921
1922
1923
1924
1925
1926 disable_interrupts(prev, spu);
1927 inhibit_user_access(prev, spu);
1928 terminate_spu_app(prev, spu);
1929 set_switch_pending(prev, spu);
1930 stop_spu_isolate(spu);
1931 remove_other_spu_access(prev, spu);
1932 suspend_mfc_and_halt_decr(prev, spu);
1933 wait_suspend_mfc_complete(prev, spu);
1934 if (!suspend_spe(prev, spu))
1935 clear_spu_status(prev, spu);
1936 do_mfc_mssync(prev, spu);
1937 issue_mfc_tlbie(prev, spu);
1938 handle_pending_interrupts(prev, spu);
1939 purge_mfc_queue(prev, spu);
1940 wait_purge_complete(prev, spu);
1941 reset_spu_privcntl(prev, spu);
1942 reset_spu_lslr(prev, spu);
1943 setup_mfc_sr1(prev, spu);
1944 spu_invalidate_slbs(spu);
1945 reset_ch_part1(prev, spu);
1946 reset_ch_part2(prev, spu);
1947 enable_interrupts(prev, spu);
1948 set_switch_active(prev, spu);
1949 set_mfc_tclass_id(prev, spu);
1950 resume_mfc_queue(prev, spu);
1951}
1952
1953static void restore_lscsa(struct spu_state *next, struct spu *spu)
1954{
1955
1956
1957
1958
1959
1960
1961 set_watchdog_timer(next, spu);
1962 setup_spu_status_part1(next, spu);
1963 setup_spu_status_part2(next, spu);
1964 restore_mfc_rag(next, spu);
1965
1966 setup_mfc_slbs(next, spu, spu_restore_code, sizeof(spu_restore_code));
1967 set_spu_npc(next, spu);
1968 set_signot1(next, spu);
1969 set_signot2(next, spu);
1970 setup_decr(next, spu);
1971 setup_ppu_mb(next, spu);
1972 setup_ppuint_mb(next, spu);
1973 send_restore_code(next, spu);
1974 set_ppu_querymask(next, spu);
1975 wait_tag_complete(next, spu);
1976 wait_spu_stopped(next, spu);
1977}
1978
1979static void restore_csa(struct spu_state *next, struct spu *spu)
1980{
1981
1982
1983
1984
1985
1986 restore_spu_privcntl(next, spu);
1987 restore_status_part1(next, spu);
1988 restore_status_part2(next, spu);
1989 restore_ls_16kb(next, spu);
1990 wait_tag_complete(next, spu);
1991 suspend_mfc(next, spu);
1992 wait_suspend_mfc_complete(next, spu);
1993 issue_mfc_tlbie(next, spu);
1994 clear_interrupts(next, spu);
1995 restore_mfc_queues(next, spu);
1996 restore_ppu_querymask(next, spu);
1997 restore_ppu_querytype(next, spu);
1998 restore_mfc_csr_tsq(next, spu);
1999 restore_mfc_csr_cmd(next, spu);
2000 restore_mfc_csr_ato(next, spu);
2001 restore_mfc_tclass_id(next, spu);
2002 set_llr_event(next, spu);
2003 restore_decr_wrapped(next, spu);
2004 restore_ch_part1(next, spu);
2005 restore_ch_part2(next, spu);
2006 restore_spu_lslr(next, spu);
2007 restore_spu_cfg(next, spu);
2008 restore_pm_trace(next, spu);
2009 restore_spu_npc(next, spu);
2010 restore_spu_mb(next, spu);
2011 check_ppu_mb_stat(next, spu);
2012 check_ppuint_mb_stat(next, spu);
2013 spu_invalidate_slbs(spu);
2014 restore_mfc_sr1(next, spu);
2015 set_int_route(next, spu);
2016 restore_other_spu_access(next, spu);
2017 restore_spu_runcntl(next, spu);
2018 restore_mfc_cntl(next, spu);
2019 enable_user_access(next, spu);
2020 reset_switch_active(next, spu);
2021 reenable_interrupts(next, spu);
2022}
2023
2024static int __do_spu_save(struct spu_state *prev, struct spu *spu)
2025{
2026 int rc;
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040 rc = quiece_spu(prev, spu);
2041 switch (rc) {
2042 default:
2043 case 2:
2044 case 6:
2045 harvest(prev, spu);
2046 return rc;
2047 break;
2048 case 0:
2049 break;
2050 }
2051 save_csa(prev, spu);
2052 save_lscsa(prev, spu);
2053 return check_save_status(prev, spu);
2054}
2055
2056static int __do_spu_restore(struct spu_state *next, struct spu *spu)
2057{
2058 int rc;
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071 restore_lscsa(next, spu);
2072 rc = check_restore_status(next, spu);
2073 switch (rc) {
2074 default:
2075
2076 return rc;
2077 break;
2078 case 0:
2079
2080 break;
2081 }
2082 restore_csa(next, spu);
2083
2084 return 0;
2085}
2086
2087
2088
2089
2090
2091
2092
2093
2094int spu_save(struct spu_state *prev, struct spu *spu)
2095{
2096 int rc;
2097
2098 acquire_spu_lock(spu);
2099 rc = __do_spu_save(prev, spu);
2100 release_spu_lock(spu);
2101 if (rc != 0 && rc != 2 && rc != 6) {
2102 panic("%s failed on SPU[%d], rc=%d.\n",
2103 __func__, spu->number, rc);
2104 }
2105 return 0;
2106}
2107EXPORT_SYMBOL_GPL(spu_save);
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118int spu_restore(struct spu_state *new, struct spu *spu)
2119{
2120 int rc;
2121
2122 acquire_spu_lock(spu);
2123 harvest(NULL, spu);
2124 spu->slb_replace = 0;
2125 rc = __do_spu_restore(new, spu);
2126 release_spu_lock(spu);
2127 if (rc) {
2128 panic("%s failed on SPU[%d] rc=%d.\n",
2129 __func__, spu->number, rc);
2130 }
2131 return rc;
2132}
2133EXPORT_SYMBOL_GPL(spu_restore);
2134
2135static void init_prob(struct spu_state *csa)
2136{
2137 csa->spu_chnlcnt_RW[9] = 1;
2138 csa->spu_chnlcnt_RW[21] = 16;
2139 csa->spu_chnlcnt_RW[23] = 1;
2140 csa->spu_chnlcnt_RW[28] = 1;
2141 csa->spu_chnlcnt_RW[30] = 1;
2142 csa->prob.spu_runcntl_RW = SPU_RUNCNTL_STOP;
2143 csa->prob.mb_stat_R = 0x000400;
2144}
2145
2146static void init_priv1(struct spu_state *csa)
2147{
2148
2149 csa->priv1.mfc_sr1_RW = MFC_STATE1_LOCAL_STORAGE_DECODE_MASK |
2150 MFC_STATE1_MASTER_RUN_CONTROL_MASK |
2151 MFC_STATE1_PROBLEM_STATE_MASK |
2152 MFC_STATE1_RELOCATE_MASK | MFC_STATE1_BUS_TLBIE_MASK;
2153
2154
2155 csa->priv1.int_mask_class0_RW = CLASS0_ENABLE_DMA_ALIGNMENT_INTR |
2156 CLASS0_ENABLE_INVALID_DMA_COMMAND_INTR |
2157 CLASS0_ENABLE_SPU_ERROR_INTR;
2158 csa->priv1.int_mask_class1_RW = CLASS1_ENABLE_SEGMENT_FAULT_INTR |
2159 CLASS1_ENABLE_STORAGE_FAULT_INTR;
2160 csa->priv1.int_mask_class2_RW = CLASS2_ENABLE_SPU_STOP_INTR |
2161 CLASS2_ENABLE_SPU_HALT_INTR |
2162 CLASS2_ENABLE_SPU_DMA_TAG_GROUP_COMPLETE_INTR;
2163}
2164
2165static void init_priv2(struct spu_state *csa)
2166{
2167 csa->priv2.spu_lslr_RW = LS_ADDR_MASK;
2168 csa->priv2.mfc_control_RW = MFC_CNTL_RESUME_DMA_QUEUE |
2169 MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION |
2170 MFC_CNTL_DMA_QUEUES_EMPTY_MASK;
2171}
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184int spu_init_csa(struct spu_state *csa)
2185{
2186 int rc;
2187
2188 if (!csa)
2189 return -EINVAL;
2190 memset(csa, 0, sizeof(struct spu_state));
2191
2192 rc = spu_alloc_lscsa(csa);
2193 if (rc)
2194 return rc;
2195
2196 spin_lock_init(&csa->register_lock);
2197
2198 init_prob(csa);
2199 init_priv1(csa);
2200 init_priv2(csa);
2201
2202 return 0;
2203}
2204
2205void spu_fini_csa(struct spu_state *csa)
2206{
2207 spu_free_lscsa(csa);
2208}
2209