1
2
3
4
5
6
7
8
9#include <linux/sched/signal.h>
10#include <linux/mm.h>
11
12#include <asm/spu.h>
13#include <asm/spu_csa.h>
14
15#include "spufs.h"
16
17
18
19
20
21
22
23static void spufs_handle_event(struct spu_context *ctx,
24 unsigned long ea, int type)
25{
26 if (ctx->flags & SPU_CREATE_EVENTS_ENABLED) {
27 ctx->event_return |= type;
28 wake_up_all(&ctx->stop_wq);
29 return;
30 }
31
32 switch (type) {
33 case SPE_EVENT_INVALID_DMA:
34 force_sig_fault(SIGBUS, BUS_OBJERR, NULL, current);
35 break;
36 case SPE_EVENT_SPE_DATA_STORAGE:
37 ctx->ops->restart_dma(ctx);
38 force_sig_fault(SIGSEGV, SEGV_ACCERR, (void __user *)ea,
39 current);
40 break;
41 case SPE_EVENT_DMA_ALIGNMENT:
42
43 force_sig_fault(SIGBUS, BUS_ADRALN, NULL, current);
44 break;
45 case SPE_EVENT_SPE_ERROR:
46 force_sig_fault(
47 SIGILL, ILL_ILLOPC,
48 (void __user *)(unsigned long)
49 ctx->ops->npc_read(ctx) - 4, current);
50 break;
51 }
52}
53
54int spufs_handle_class0(struct spu_context *ctx)
55{
56 unsigned long stat = ctx->csa.class_0_pending & CLASS0_INTR_MASK;
57
58 if (likely(!stat))
59 return 0;
60
61 if (stat & CLASS0_DMA_ALIGNMENT_INTR)
62 spufs_handle_event(ctx, ctx->csa.class_0_dar,
63 SPE_EVENT_DMA_ALIGNMENT);
64
65 if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
66 spufs_handle_event(ctx, ctx->csa.class_0_dar,
67 SPE_EVENT_INVALID_DMA);
68
69 if (stat & CLASS0_SPU_ERROR_INTR)
70 spufs_handle_event(ctx, ctx->csa.class_0_dar,
71 SPE_EVENT_SPE_ERROR);
72
73 ctx->csa.class_0_pending = 0;
74
75 return -EIO;
76}
77
78
79
80
81
82
83
84
85
86
87int spufs_handle_class1(struct spu_context *ctx)
88{
89 u64 ea, dsisr, access;
90 unsigned long flags;
91 vm_fault_t flt = 0;
92 int ret;
93
94
95
96
97
98
99
100
101
102
103 ea = ctx->csa.class_1_dar;
104 dsisr = ctx->csa.class_1_dsisr;
105
106 if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
107 return 0;
108
109 spuctx_switch_state(ctx, SPU_UTIL_IOWAIT);
110
111 pr_debug("ctx %p: ea %016llx, dsisr %016llx state %d\n", ctx, ea,
112 dsisr, ctx->state);
113
114 ctx->stats.hash_flt++;
115 if (ctx->state == SPU_STATE_RUNNABLE)
116 ctx->spu->stats.hash_flt++;
117
118
119 spu_release(ctx);
120
121 access = (_PAGE_PRESENT | _PAGE_READ);
122 access |= (dsisr & MFC_DSISR_ACCESS_PUT) ? _PAGE_WRITE : 0UL;
123 local_irq_save(flags);
124 ret = hash_page(ea, access, 0x300, dsisr);
125 local_irq_restore(flags);
126
127
128 if (ret)
129 ret = copro_handle_mm_fault(current->mm, ea, dsisr, &flt);
130
131
132
133
134
135 mutex_lock(&ctx->state_mutex);
136
137
138
139
140
141
142 ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0;
143
144
145
146
147
148
149 if (!ret) {
150 if (flt & VM_FAULT_MAJOR)
151 ctx->stats.maj_flt++;
152 else
153 ctx->stats.min_flt++;
154 if (ctx->state == SPU_STATE_RUNNABLE) {
155 if (flt & VM_FAULT_MAJOR)
156 ctx->spu->stats.maj_flt++;
157 else
158 ctx->spu->stats.min_flt++;
159 }
160
161 if (ctx->spu)
162 ctx->ops->restart_dma(ctx);
163 } else
164 spufs_handle_event(ctx, ea, SPE_EVENT_SPE_DATA_STORAGE);
165
166 spuctx_switch_state(ctx, SPU_UTIL_SYSTEM);
167 return ret;
168}
169