1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24#include <linux/module.h>
25#include <linux/errno.h>
26#include <linux/sched.h>
27#include <linux/kernel.h>
28#include <linux/mm.h>
29#include <linux/vmalloc.h>
30#include <linux/smp.h>
31#include <linux/stddef.h>
32#include <linux/unistd.h>
33#include <linux/poll.h>
34
35#include <asm/io.h>
36#include <asm/spu.h>
37#include <asm/spu_csa.h>
38#include <asm/spu_info.h>
39#include <asm/mmu_context.h>
40#include "spufs.h"
41
42
43
44
45
46
47
48static void gen_spu_event(struct spu_context *ctx, u32 event)
49{
50 u64 ch0_cnt;
51 u64 ch0_data;
52 u64 ch1_data;
53
54 ch0_cnt = ctx->csa.spu_chnlcnt_RW[0];
55 ch0_data = ctx->csa.spu_chnldata_RW[0];
56 ch1_data = ctx->csa.spu_chnldata_RW[1];
57 ctx->csa.spu_chnldata_RW[0] |= event;
58 if ((ch0_cnt == 0) && !(ch0_data & event) && (ch1_data & event)) {
59 ctx->csa.spu_chnlcnt_RW[0] = 1;
60 }
61}
62
63static int spu_backing_mbox_read(struct spu_context *ctx, u32 * data)
64{
65 u32 mbox_stat;
66 int ret = 0;
67
68 spin_lock(&ctx->csa.register_lock);
69 mbox_stat = ctx->csa.prob.mb_stat_R;
70 if (mbox_stat & 0x0000ff) {
71
72
73
74
75 *data = ctx->csa.prob.pu_mb_R;
76 ctx->csa.prob.mb_stat_R &= ~(0x0000ff);
77 ctx->csa.spu_chnlcnt_RW[28] = 1;
78 gen_spu_event(ctx, MFC_PU_MAILBOX_AVAILABLE_EVENT);
79 ret = 4;
80 }
81 spin_unlock(&ctx->csa.register_lock);
82 return ret;
83}
84
85static u32 spu_backing_mbox_stat_read(struct spu_context *ctx)
86{
87 return ctx->csa.prob.mb_stat_R;
88}
89
90static unsigned int spu_backing_mbox_stat_poll(struct spu_context *ctx,
91 unsigned int events)
92{
93 int ret;
94 u32 stat;
95
96 ret = 0;
97 spin_lock_irq(&ctx->csa.register_lock);
98 stat = ctx->csa.prob.mb_stat_R;
99
100
101
102
103
104
105 if (events & (POLLIN | POLLRDNORM)) {
106 if (stat & 0xff0000)
107 ret |= POLLIN | POLLRDNORM;
108 else {
109 ctx->csa.priv1.int_stat_class2_RW &=
110 ~CLASS2_MAILBOX_INTR;
111 ctx->csa.priv1.int_mask_class2_RW |=
112 CLASS2_ENABLE_MAILBOX_INTR;
113 }
114 }
115 if (events & (POLLOUT | POLLWRNORM)) {
116 if (stat & 0x00ff00)
117 ret = POLLOUT | POLLWRNORM;
118 else {
119 ctx->csa.priv1.int_stat_class2_RW &=
120 ~CLASS2_MAILBOX_THRESHOLD_INTR;
121 ctx->csa.priv1.int_mask_class2_RW |=
122 CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
123 }
124 }
125 spin_unlock_irq(&ctx->csa.register_lock);
126 return ret;
127}
128
129static int spu_backing_ibox_read(struct spu_context *ctx, u32 * data)
130{
131 int ret;
132
133 spin_lock(&ctx->csa.register_lock);
134 if (ctx->csa.prob.mb_stat_R & 0xff0000) {
135
136
137
138
139 *data = ctx->csa.priv2.puint_mb_R;
140 ctx->csa.prob.mb_stat_R &= ~(0xff0000);
141 ctx->csa.spu_chnlcnt_RW[30] = 1;
142 gen_spu_event(ctx, MFC_PU_INT_MAILBOX_AVAILABLE_EVENT);
143 ret = 4;
144 } else {
145
146 ctx->csa.priv1.int_mask_class2_RW |= CLASS2_ENABLE_MAILBOX_INTR;
147 ret = 0;
148 }
149 spin_unlock(&ctx->csa.register_lock);
150 return ret;
151}
152
153static int spu_backing_wbox_write(struct spu_context *ctx, u32 data)
154{
155 int ret;
156
157 spin_lock(&ctx->csa.register_lock);
158 if ((ctx->csa.prob.mb_stat_R) & 0x00ff00) {
159 int slot = ctx->csa.spu_chnlcnt_RW[29];
160 int avail = (ctx->csa.prob.mb_stat_R & 0x00ff00) >> 8;
161
162
163
164
165
166 BUG_ON(avail != (4 - slot));
167 ctx->csa.spu_mailbox_data[slot] = data;
168 ctx->csa.spu_chnlcnt_RW[29] = ++slot;
169 ctx->csa.prob.mb_stat_R &= ~(0x00ff00);
170 ctx->csa.prob.mb_stat_R |= (((4 - slot) & 0xff) << 8);
171 gen_spu_event(ctx, MFC_SPU_MAILBOX_WRITTEN_EVENT);
172 ret = 4;
173 } else {
174
175
176 ctx->csa.priv1.int_mask_class2_RW |=
177 CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR;
178 ret = 0;
179 }
180 spin_unlock(&ctx->csa.register_lock);
181 return ret;
182}
183
184static u32 spu_backing_signal1_read(struct spu_context *ctx)
185{
186 return ctx->csa.spu_chnldata_RW[3];
187}
188
189static void spu_backing_signal1_write(struct spu_context *ctx, u32 data)
190{
191 spin_lock(&ctx->csa.register_lock);
192 if (ctx->csa.priv2.spu_cfg_RW & 0x1)
193 ctx->csa.spu_chnldata_RW[3] |= data;
194 else
195 ctx->csa.spu_chnldata_RW[3] = data;
196 ctx->csa.spu_chnlcnt_RW[3] = 1;
197 gen_spu_event(ctx, MFC_SIGNAL_1_EVENT);
198 spin_unlock(&ctx->csa.register_lock);
199}
200
201static u32 spu_backing_signal2_read(struct spu_context *ctx)
202{
203 return ctx->csa.spu_chnldata_RW[4];
204}
205
206static void spu_backing_signal2_write(struct spu_context *ctx, u32 data)
207{
208 spin_lock(&ctx->csa.register_lock);
209 if (ctx->csa.priv2.spu_cfg_RW & 0x2)
210 ctx->csa.spu_chnldata_RW[4] |= data;
211 else
212 ctx->csa.spu_chnldata_RW[4] = data;
213 ctx->csa.spu_chnlcnt_RW[4] = 1;
214 gen_spu_event(ctx, MFC_SIGNAL_2_EVENT);
215 spin_unlock(&ctx->csa.register_lock);
216}
217
218static void spu_backing_signal1_type_set(struct spu_context *ctx, u64 val)
219{
220 u64 tmp;
221
222 spin_lock(&ctx->csa.register_lock);
223 tmp = ctx->csa.priv2.spu_cfg_RW;
224 if (val)
225 tmp |= 1;
226 else
227 tmp &= ~1;
228 ctx->csa.priv2.spu_cfg_RW = tmp;
229 spin_unlock(&ctx->csa.register_lock);
230}
231
232static u64 spu_backing_signal1_type_get(struct spu_context *ctx)
233{
234 return ((ctx->csa.priv2.spu_cfg_RW & 1) != 0);
235}
236
237static void spu_backing_signal2_type_set(struct spu_context *ctx, u64 val)
238{
239 u64 tmp;
240
241 spin_lock(&ctx->csa.register_lock);
242 tmp = ctx->csa.priv2.spu_cfg_RW;
243 if (val)
244 tmp |= 2;
245 else
246 tmp &= ~2;
247 ctx->csa.priv2.spu_cfg_RW = tmp;
248 spin_unlock(&ctx->csa.register_lock);
249}
250
251static u64 spu_backing_signal2_type_get(struct spu_context *ctx)
252{
253 return ((ctx->csa.priv2.spu_cfg_RW & 2) != 0);
254}
255
256static u32 spu_backing_npc_read(struct spu_context *ctx)
257{
258 return ctx->csa.prob.spu_npc_RW;
259}
260
261static void spu_backing_npc_write(struct spu_context *ctx, u32 val)
262{
263 ctx->csa.prob.spu_npc_RW = val;
264}
265
266static u32 spu_backing_status_read(struct spu_context *ctx)
267{
268 return ctx->csa.prob.spu_status_R;
269}
270
271static char *spu_backing_get_ls(struct spu_context *ctx)
272{
273 return ctx->csa.lscsa->ls;
274}
275
276static void spu_backing_privcntl_write(struct spu_context *ctx, u64 val)
277{
278 ctx->csa.priv2.spu_privcntl_RW = val;
279}
280
281static u32 spu_backing_runcntl_read(struct spu_context *ctx)
282{
283 return ctx->csa.prob.spu_runcntl_RW;
284}
285
286static void spu_backing_runcntl_write(struct spu_context *ctx, u32 val)
287{
288 spin_lock(&ctx->csa.register_lock);
289 ctx->csa.prob.spu_runcntl_RW = val;
290 if (val & SPU_RUNCNTL_RUNNABLE) {
291 ctx->csa.prob.spu_status_R &=
292 ~SPU_STATUS_STOPPED_BY_STOP &
293 ~SPU_STATUS_STOPPED_BY_HALT &
294 ~SPU_STATUS_SINGLE_STEP &
295 ~SPU_STATUS_INVALID_INSTR &
296 ~SPU_STATUS_INVALID_CH;
297 ctx->csa.prob.spu_status_R |= SPU_STATUS_RUNNING;
298 } else {
299 ctx->csa.prob.spu_status_R &= ~SPU_STATUS_RUNNING;
300 }
301 spin_unlock(&ctx->csa.register_lock);
302}
303
304static void spu_backing_runcntl_stop(struct spu_context *ctx)
305{
306 spu_backing_runcntl_write(ctx, SPU_RUNCNTL_STOP);
307}
308
309static void spu_backing_master_start(struct spu_context *ctx)
310{
311 struct spu_state *csa = &ctx->csa;
312 u64 sr1;
313
314 spin_lock(&csa->register_lock);
315 sr1 = csa->priv1.mfc_sr1_RW | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
316 csa->priv1.mfc_sr1_RW = sr1;
317 spin_unlock(&csa->register_lock);
318}
319
320static void spu_backing_master_stop(struct spu_context *ctx)
321{
322 struct spu_state *csa = &ctx->csa;
323 u64 sr1;
324
325 spin_lock(&csa->register_lock);
326 sr1 = csa->priv1.mfc_sr1_RW & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
327 csa->priv1.mfc_sr1_RW = sr1;
328 spin_unlock(&csa->register_lock);
329}
330
331static int spu_backing_set_mfc_query(struct spu_context * ctx, u32 mask,
332 u32 mode)
333{
334 struct spu_problem_collapsed *prob = &ctx->csa.prob;
335 int ret;
336
337 spin_lock(&ctx->csa.register_lock);
338 ret = -EAGAIN;
339 if (prob->dma_querytype_RW)
340 goto out;
341 ret = 0;
342
343 prob->dma_querymask_RW = mask;
344 prob->dma_querytype_RW = mode;
345
346
347
348
349
350 ctx->csa.prob.dma_tagstatus_R &= mask;
351out:
352 spin_unlock(&ctx->csa.register_lock);
353
354 return ret;
355}
356
357static u32 spu_backing_read_mfc_tagstatus(struct spu_context * ctx)
358{
359 return ctx->csa.prob.dma_tagstatus_R;
360}
361
362static u32 spu_backing_get_mfc_free_elements(struct spu_context *ctx)
363{
364 return ctx->csa.prob.dma_qstatus_R;
365}
366
367static int spu_backing_send_mfc_command(struct spu_context *ctx,
368 struct mfc_dma_command *cmd)
369{
370 int ret;
371
372 spin_lock(&ctx->csa.register_lock);
373 ret = -EAGAIN;
374
375 spin_unlock(&ctx->csa.register_lock);
376
377 return ret;
378}
379
380static void spu_backing_restart_dma(struct spu_context *ctx)
381{
382 ctx->csa.priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
383}
384
385struct spu_context_ops spu_backing_ops = {
386 .mbox_read = spu_backing_mbox_read,
387 .mbox_stat_read = spu_backing_mbox_stat_read,
388 .mbox_stat_poll = spu_backing_mbox_stat_poll,
389 .ibox_read = spu_backing_ibox_read,
390 .wbox_write = spu_backing_wbox_write,
391 .signal1_read = spu_backing_signal1_read,
392 .signal1_write = spu_backing_signal1_write,
393 .signal2_read = spu_backing_signal2_read,
394 .signal2_write = spu_backing_signal2_write,
395 .signal1_type_set = spu_backing_signal1_type_set,
396 .signal1_type_get = spu_backing_signal1_type_get,
397 .signal2_type_set = spu_backing_signal2_type_set,
398 .signal2_type_get = spu_backing_signal2_type_get,
399 .npc_read = spu_backing_npc_read,
400 .npc_write = spu_backing_npc_write,
401 .status_read = spu_backing_status_read,
402 .get_ls = spu_backing_get_ls,
403 .privcntl_write = spu_backing_privcntl_write,
404 .runcntl_read = spu_backing_runcntl_read,
405 .runcntl_write = spu_backing_runcntl_write,
406 .runcntl_stop = spu_backing_runcntl_stop,
407 .master_start = spu_backing_master_start,
408 .master_stop = spu_backing_master_stop,
409 .set_mfc_query = spu_backing_set_mfc_query,
410 .read_mfc_tagstatus = spu_backing_read_mfc_tagstatus,
411 .get_mfc_free_elements = spu_backing_get_mfc_free_elements,
412 .send_mfc_command = spu_backing_send_mfc_command,
413 .restart_dma = spu_backing_restart_dma,
414};
415