1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/errno.h>
22#include <linux/sched.h>
23#include <linux/kernel.h>
24#include <linux/mm.h>
25#include <linux/poll.h>
26#include <linux/smp.h>
27#include <linux/stddef.h>
28#include <linux/unistd.h>
29
30#include <asm/io.h>
31#include <asm/spu.h>
32#include <asm/spu_priv1.h>
33#include <asm/spu_csa.h>
34#include <asm/mmu_context.h>
35#include "spufs.h"
36
37static int spu_hw_mbox_read(struct spu_context *ctx, u32 * data)
38{
39 struct spu *spu = ctx->spu;
40 struct spu_problem __iomem *prob = spu->problem;
41 u32 mbox_stat;
42 int ret = 0;
43
44 spin_lock_irq(&spu->register_lock);
45 mbox_stat = in_be32(&prob->mb_stat_R);
46 if (mbox_stat & 0x0000ff) {
47 *data = in_be32(&prob->pu_mb_R);
48 ret = 4;
49 }
50 spin_unlock_irq(&spu->register_lock);
51 return ret;
52}
53
54static u32 spu_hw_mbox_stat_read(struct spu_context *ctx)
55{
56 return in_be32(&ctx->spu->problem->mb_stat_R);
57}
58
59static unsigned int spu_hw_mbox_stat_poll(struct spu_context *ctx,
60 unsigned int events)
61{
62 struct spu *spu = ctx->spu;
63 int ret = 0;
64 u32 stat;
65
66 spin_lock_irq(&spu->register_lock);
67 stat = in_be32(&spu->problem->mb_stat_R);
68
69
70
71
72
73
74 if (events & (POLLIN | POLLRDNORM)) {
75 if (stat & 0xff0000)
76 ret |= POLLIN | POLLRDNORM;
77 else {
78 spu_int_stat_clear(spu, 2, CLASS2_MAILBOX_INTR);
79 spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
80 }
81 }
82 if (events & (POLLOUT | POLLWRNORM)) {
83 if (stat & 0x00ff00)
84 ret = POLLOUT | POLLWRNORM;
85 else {
86 spu_int_stat_clear(spu, 2,
87 CLASS2_MAILBOX_THRESHOLD_INTR);
88 spu_int_mask_or(spu, 2,
89 CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
90 }
91 }
92 spin_unlock_irq(&spu->register_lock);
93 return ret;
94}
95
96static int spu_hw_ibox_read(struct spu_context *ctx, u32 * data)
97{
98 struct spu *spu = ctx->spu;
99 struct spu_problem __iomem *prob = spu->problem;
100 struct spu_priv2 __iomem *priv2 = spu->priv2;
101 int ret;
102
103 spin_lock_irq(&spu->register_lock);
104 if (in_be32(&prob->mb_stat_R) & 0xff0000) {
105
106 *data = in_be64(&priv2->puint_mb_R);
107 ret = 4;
108 } else {
109
110 spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_INTR);
111 ret = 0;
112 }
113 spin_unlock_irq(&spu->register_lock);
114 return ret;
115}
116
117static int spu_hw_wbox_write(struct spu_context *ctx, u32 data)
118{
119 struct spu *spu = ctx->spu;
120 struct spu_problem __iomem *prob = spu->problem;
121 int ret;
122
123 spin_lock_irq(&spu->register_lock);
124 if (in_be32(&prob->mb_stat_R) & 0x00ff00) {
125
126 out_be32(&prob->spu_mb_W, data);
127 ret = 4;
128 } else {
129
130
131 spu_int_mask_or(spu, 2, CLASS2_ENABLE_MAILBOX_THRESHOLD_INTR);
132 ret = 0;
133 }
134 spin_unlock_irq(&spu->register_lock);
135 return ret;
136}
137
138static void spu_hw_signal1_write(struct spu_context *ctx, u32 data)
139{
140 out_be32(&ctx->spu->problem->signal_notify1, data);
141}
142
143static void spu_hw_signal2_write(struct spu_context *ctx, u32 data)
144{
145 out_be32(&ctx->spu->problem->signal_notify2, data);
146}
147
148static void spu_hw_signal1_type_set(struct spu_context *ctx, u64 val)
149{
150 struct spu *spu = ctx->spu;
151 struct spu_priv2 __iomem *priv2 = spu->priv2;
152 u64 tmp;
153
154 spin_lock_irq(&spu->register_lock);
155 tmp = in_be64(&priv2->spu_cfg_RW);
156 if (val)
157 tmp |= 1;
158 else
159 tmp &= ~1;
160 out_be64(&priv2->spu_cfg_RW, tmp);
161 spin_unlock_irq(&spu->register_lock);
162}
163
164static u64 spu_hw_signal1_type_get(struct spu_context *ctx)
165{
166 return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 1) != 0);
167}
168
169static void spu_hw_signal2_type_set(struct spu_context *ctx, u64 val)
170{
171 struct spu *spu = ctx->spu;
172 struct spu_priv2 __iomem *priv2 = spu->priv2;
173 u64 tmp;
174
175 spin_lock_irq(&spu->register_lock);
176 tmp = in_be64(&priv2->spu_cfg_RW);
177 if (val)
178 tmp |= 2;
179 else
180 tmp &= ~2;
181 out_be64(&priv2->spu_cfg_RW, tmp);
182 spin_unlock_irq(&spu->register_lock);
183}
184
185static u64 spu_hw_signal2_type_get(struct spu_context *ctx)
186{
187 return ((in_be64(&ctx->spu->priv2->spu_cfg_RW) & 2) != 0);
188}
189
190static u32 spu_hw_npc_read(struct spu_context *ctx)
191{
192 return in_be32(&ctx->spu->problem->spu_npc_RW);
193}
194
195static void spu_hw_npc_write(struct spu_context *ctx, u32 val)
196{
197 out_be32(&ctx->spu->problem->spu_npc_RW, val);
198}
199
200static u32 spu_hw_status_read(struct spu_context *ctx)
201{
202 return in_be32(&ctx->spu->problem->spu_status_R);
203}
204
205static char *spu_hw_get_ls(struct spu_context *ctx)
206{
207 return ctx->spu->local_store;
208}
209
210static void spu_hw_privcntl_write(struct spu_context *ctx, u64 val)
211{
212 out_be64(&ctx->spu->priv2->spu_privcntl_RW, val);
213}
214
215static u32 spu_hw_runcntl_read(struct spu_context *ctx)
216{
217 return in_be32(&ctx->spu->problem->spu_runcntl_RW);
218}
219
220static void spu_hw_runcntl_write(struct spu_context *ctx, u32 val)
221{
222 spin_lock_irq(&ctx->spu->register_lock);
223 if (val & SPU_RUNCNTL_ISOLATE)
224 spu_hw_privcntl_write(ctx,
225 SPU_PRIVCNT_LOAD_REQUEST_ENABLE_MASK);
226 out_be32(&ctx->spu->problem->spu_runcntl_RW, val);
227 spin_unlock_irq(&ctx->spu->register_lock);
228}
229
230static void spu_hw_runcntl_stop(struct spu_context *ctx)
231{
232 spin_lock_irq(&ctx->spu->register_lock);
233 out_be32(&ctx->spu->problem->spu_runcntl_RW, SPU_RUNCNTL_STOP);
234 while (in_be32(&ctx->spu->problem->spu_status_R) & SPU_STATUS_RUNNING)
235 cpu_relax();
236 spin_unlock_irq(&ctx->spu->register_lock);
237}
238
239static void spu_hw_master_start(struct spu_context *ctx)
240{
241 struct spu *spu = ctx->spu;
242 u64 sr1;
243
244 spin_lock_irq(&spu->register_lock);
245 sr1 = spu_mfc_sr1_get(spu) | MFC_STATE1_MASTER_RUN_CONTROL_MASK;
246 spu_mfc_sr1_set(spu, sr1);
247 spin_unlock_irq(&spu->register_lock);
248}
249
250static void spu_hw_master_stop(struct spu_context *ctx)
251{
252 struct spu *spu = ctx->spu;
253 u64 sr1;
254
255 spin_lock_irq(&spu->register_lock);
256 sr1 = spu_mfc_sr1_get(spu) & ~MFC_STATE1_MASTER_RUN_CONTROL_MASK;
257 spu_mfc_sr1_set(spu, sr1);
258 spin_unlock_irq(&spu->register_lock);
259}
260
261static int spu_hw_set_mfc_query(struct spu_context * ctx, u32 mask, u32 mode)
262{
263 struct spu_problem __iomem *prob = ctx->spu->problem;
264 int ret;
265
266 spin_lock_irq(&ctx->spu->register_lock);
267 ret = -EAGAIN;
268 if (in_be32(&prob->dma_querytype_RW))
269 goto out;
270 ret = 0;
271 out_be32(&prob->dma_querymask_RW, mask);
272 out_be32(&prob->dma_querytype_RW, mode);
273out:
274 spin_unlock_irq(&ctx->spu->register_lock);
275 return ret;
276}
277
278static u32 spu_hw_read_mfc_tagstatus(struct spu_context * ctx)
279{
280 return in_be32(&ctx->spu->problem->dma_tagstatus_R);
281}
282
283static u32 spu_hw_get_mfc_free_elements(struct spu_context *ctx)
284{
285 return in_be32(&ctx->spu->problem->dma_qstatus_R);
286}
287
288static int spu_hw_send_mfc_command(struct spu_context *ctx,
289 struct mfc_dma_command *cmd)
290{
291 u32 status;
292 struct spu_problem __iomem *prob = ctx->spu->problem;
293
294 spin_lock_irq(&ctx->spu->register_lock);
295 out_be32(&prob->mfc_lsa_W, cmd->lsa);
296 out_be64(&prob->mfc_ea_W, cmd->ea);
297 out_be32(&prob->mfc_union_W.by32.mfc_size_tag32,
298 cmd->size << 16 | cmd->tag);
299 out_be32(&prob->mfc_union_W.by32.mfc_class_cmd32,
300 cmd->class << 16 | cmd->cmd);
301 status = in_be32(&prob->mfc_union_W.by32.mfc_class_cmd32);
302 spin_unlock_irq(&ctx->spu->register_lock);
303
304 switch (status & 0xffff) {
305 case 0:
306 return 0;
307 case 2:
308 return -EAGAIN;
309 default:
310 return -EINVAL;
311 }
312}
313
314static void spu_hw_restart_dma(struct spu_context *ctx)
315{
316 struct spu_priv2 __iomem *priv2 = ctx->spu->priv2;
317
318 if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &ctx->spu->flags))
319 out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
320}
321
322struct spu_context_ops spu_hw_ops = {
323 .mbox_read = spu_hw_mbox_read,
324 .mbox_stat_read = spu_hw_mbox_stat_read,
325 .mbox_stat_poll = spu_hw_mbox_stat_poll,
326 .ibox_read = spu_hw_ibox_read,
327 .wbox_write = spu_hw_wbox_write,
328 .signal1_write = spu_hw_signal1_write,
329 .signal2_write = spu_hw_signal2_write,
330 .signal1_type_set = spu_hw_signal1_type_set,
331 .signal1_type_get = spu_hw_signal1_type_get,
332 .signal2_type_set = spu_hw_signal2_type_set,
333 .signal2_type_get = spu_hw_signal2_type_get,
334 .npc_read = spu_hw_npc_read,
335 .npc_write = spu_hw_npc_write,
336 .status_read = spu_hw_status_read,
337 .get_ls = spu_hw_get_ls,
338 .privcntl_write = spu_hw_privcntl_write,
339 .runcntl_read = spu_hw_runcntl_read,
340 .runcntl_write = spu_hw_runcntl_write,
341 .runcntl_stop = spu_hw_runcntl_stop,
342 .master_start = spu_hw_master_start,
343 .master_stop = spu_hw_master_stop,
344 .set_mfc_query = spu_hw_set_mfc_query,
345 .read_mfc_tagstatus = spu_hw_read_mfc_tagstatus,
346 .get_mfc_free_elements = spu_hw_get_mfc_free_elements,
347 .send_mfc_command = spu_hw_send_mfc_command,
348 .restart_dma = spu_hw_restart_dma,
349};
350