1
2
3
4
5
6
7#include <linux/kernel.h>
8#include <linux/types.h>
9#include <linux/mm.h>
10#include <linux/sched.h>
11#include <linux/pci.h>
12#include <linux/init.h>
13#include <linux/bitops.h>
14
15#include <asm/ptrace.h>
16#include <asm/system.h>
17#include <asm/dma.h>
18#include <asm/irq.h>
19#include <asm/mmu_context.h>
20#include <asm/io.h>
21#include <asm/pgtable.h>
22#include <asm/core_marvel.h>
23#include <asm/hwrpb.h>
24#include <asm/tlbflush.h>
25#include <asm/vga.h>
26#include <asm/rtc.h>
27
28#include "proto.h"
29#include "err_impl.h"
30#include "irq_impl.h"
31#include "pci_impl.h"
32#include "machvec_impl.h"
33
34#if NR_IRQS < MARVEL_NR_IRQS
35# error NR_IRQS < MARVEL_NR_IRQS !!!
36#endif
37
38
39
40
41
42static void
43io7_device_interrupt(unsigned long vector)
44{
45 unsigned int pid;
46 unsigned int irq;
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62 pid = vector >> 16;
63 irq = ((vector & 0xffff) - 0x800) >> 4;
64
65 irq += 16;
66 irq &= MARVEL_IRQ_VEC_IRQ_MASK;
67 irq |= pid << MARVEL_IRQ_VEC_PE_SHIFT;
68
69 handle_irq(irq);
70}
71
72static volatile unsigned long *
73io7_get_irq_ctl(unsigned int irq, struct io7 **pio7)
74{
75 volatile unsigned long *ctl;
76 unsigned int pid;
77 struct io7 *io7;
78
79 pid = irq >> MARVEL_IRQ_VEC_PE_SHIFT;
80
81 if (!(io7 = marvel_find_io7(pid))) {
82 printk(KERN_ERR
83 "%s for nonexistent io7 -- vec %x, pid %d\n",
84 __func__, irq, pid);
85 return NULL;
86 }
87
88 irq &= MARVEL_IRQ_VEC_IRQ_MASK;
89 irq -= 16;
90
91 if (irq >= 0x180) {
92 printk(KERN_ERR
93 "%s for invalid irq -- pid %d adjusted irq %x\n",
94 __func__, pid, irq);
95 return NULL;
96 }
97
98 ctl = &io7->csrs->PO7_LSI_CTL[irq & 0xff].csr;
99 if (irq >= 0x80)
100 ctl = &io7->csrs->PO7_MSI_CTL[((irq - 0x80) >> 5) & 0x0f].csr;
101
102 if (pio7) *pio7 = io7;
103 return ctl;
104}
105
106static void
107io7_enable_irq(unsigned int irq)
108{
109 volatile unsigned long *ctl;
110 struct io7 *io7;
111
112 ctl = io7_get_irq_ctl(irq, &io7);
113 if (!ctl || !io7) {
114 printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
115 __func__, irq);
116 return;
117 }
118
119 spin_lock(&io7->irq_lock);
120 *ctl |= 1UL << 24;
121 mb();
122 *ctl;
123 spin_unlock(&io7->irq_lock);
124}
125
126static void
127io7_disable_irq(unsigned int irq)
128{
129 volatile unsigned long *ctl;
130 struct io7 *io7;
131
132 ctl = io7_get_irq_ctl(irq, &io7);
133 if (!ctl || !io7) {
134 printk(KERN_ERR "%s: get_ctl failed for irq %x\n",
135 __func__, irq);
136 return;
137 }
138
139 spin_lock(&io7->irq_lock);
140 *ctl &= ~(1UL << 24);
141 mb();
142 *ctl;
143 spin_unlock(&io7->irq_lock);
144}
145
146static unsigned int
147io7_startup_irq(unsigned int irq)
148{
149 io7_enable_irq(irq);
150 return 0;
151}
152
153static void
154io7_end_irq(unsigned int irq)
155{
156 if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
157 io7_enable_irq(irq);
158}
159
160static void
161marvel_irq_noop(unsigned int irq)
162{
163 return;
164}
165
166static unsigned int
167marvel_irq_noop_return(unsigned int irq)
168{
169 return 0;
170}
171
172static struct irq_chip marvel_legacy_irq_type = {
173 .name = "LEGACY",
174 .startup = marvel_irq_noop_return,
175 .shutdown = marvel_irq_noop,
176 .enable = marvel_irq_noop,
177 .disable = marvel_irq_noop,
178 .ack = marvel_irq_noop,
179 .end = marvel_irq_noop,
180};
181
182static struct irq_chip io7_lsi_irq_type = {
183 .name = "LSI",
184 .startup = io7_startup_irq,
185 .shutdown = io7_disable_irq,
186 .enable = io7_enable_irq,
187 .disable = io7_disable_irq,
188 .ack = io7_disable_irq,
189 .end = io7_end_irq,
190};
191
192static struct irq_chip io7_msi_irq_type = {
193 .name = "MSI",
194 .startup = io7_startup_irq,
195 .shutdown = io7_disable_irq,
196 .enable = io7_enable_irq,
197 .disable = io7_disable_irq,
198 .ack = marvel_irq_noop,
199 .end = io7_end_irq,
200};
201
202static void
203io7_redirect_irq(struct io7 *io7,
204 volatile unsigned long *csr,
205 unsigned int where)
206{
207 unsigned long val;
208
209 val = *csr;
210 val &= ~(0x1ffUL << 24);
211 val |= ((unsigned long)where << 24);
212
213 *csr = val;
214 mb();
215 *csr;
216}
217
218static void
219io7_redirect_one_lsi(struct io7 *io7, unsigned int which, unsigned int where)
220{
221 unsigned long val;
222
223
224
225
226 val = io7->csrs->PO7_LSI_CTL[which].csr;
227 val &= ~(0x1ffUL << 14);
228 val |= ((unsigned long)where << 14);
229
230 io7->csrs->PO7_LSI_CTL[which].csr = val;
231 mb();
232 io7->csrs->PO7_LSI_CTL[which].csr;
233}
234
235static void
236io7_redirect_one_msi(struct io7 *io7, unsigned int which, unsigned int where)
237{
238 unsigned long val;
239
240
241
242
243 val = io7->csrs->PO7_MSI_CTL[which].csr;
244 val &= ~(0x1ffUL << 14);
245 val |= ((unsigned long)where << 14);
246
247 io7->csrs->PO7_MSI_CTL[which].csr = val;
248 mb();
249 io7->csrs->PO7_MSI_CTL[which].csr;
250}
251
252static void __init
253init_one_io7_lsi(struct io7 *io7, unsigned int which, unsigned int where)
254{
255
256
257
258 io7->csrs->PO7_LSI_CTL[which].csr = ((unsigned long)where << 14);
259 mb();
260 io7->csrs->PO7_LSI_CTL[which].csr;
261}
262
263static void __init
264init_one_io7_msi(struct io7 *io7, unsigned int which, unsigned int where)
265{
266
267
268
269 io7->csrs->PO7_MSI_CTL[which].csr = ((unsigned long)where << 14);
270 mb();
271 io7->csrs->PO7_MSI_CTL[which].csr;
272}
273
274static void __init
275init_io7_irqs(struct io7 *io7,
276 struct irq_chip *lsi_ops,
277 struct irq_chip *msi_ops)
278{
279 long base = (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT) + 16;
280 long i;
281
282 printk("Initializing interrupts for IO7 at PE %u - base %lx\n",
283 io7->pe, base);
284
285
286
287
288
289
290
291
292
293
294 printk(" Interrupts reported to CPU at PE %u\n", boot_cpuid);
295
296 spin_lock(&io7->irq_lock);
297
298
299 io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, boot_cpuid);
300 io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, boot_cpuid);
301 io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, boot_cpuid);
302 io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, boot_cpuid);
303 io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, boot_cpuid);
304
305
306 for (i = 0; i < 128; ++i) {
307 irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL;
308 irq_desc[base + i].chip = lsi_ops;
309 }
310
311
312 for (i = 0; i < 0x60; ++i)
313 init_one_io7_lsi(io7, i, boot_cpuid);
314
315 init_one_io7_lsi(io7, 0x74, boot_cpuid);
316 init_one_io7_lsi(io7, 0x75, boot_cpuid);
317
318
319
320 for (i = 128; i < (128 + 512); ++i) {
321 irq_desc[base + i].status = IRQ_DISABLED | IRQ_LEVEL;
322 irq_desc[base + i].chip = msi_ops;
323 }
324
325 for (i = 0; i < 16; ++i)
326 init_one_io7_msi(io7, i, boot_cpuid);
327
328 spin_unlock(&io7->irq_lock);
329}
330
331static void __init
332marvel_init_irq(void)
333{
334 int i;
335 struct io7 *io7 = NULL;
336
337
338 for (i = 0; i < 16; ++i) {
339 irq_desc[i].status = IRQ_DISABLED;
340 irq_desc[i].chip = &marvel_legacy_irq_type;
341 }
342
343
344 for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; )
345 init_io7_irqs(io7, &io7_lsi_irq_type, &io7_msi_irq_type);
346}
347
348static int
349marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin)
350{
351 struct pci_controller *hose = dev->sysdata;
352 struct io7_port *io7_port = hose->sysdata;
353 struct io7 *io7 = io7_port->io7;
354 int msi_loc, msi_data_off;
355 u16 msg_ctl;
356 u16 msg_dat;
357 u8 intline;
358 int irq;
359
360 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
361 irq = intline;
362
363 msi_loc = pci_find_capability(dev, PCI_CAP_ID_MSI);
364 msg_ctl = 0;
365 if (msi_loc)
366 pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl);
367
368 if (msg_ctl & PCI_MSI_FLAGS_ENABLE) {
369 msi_data_off = PCI_MSI_DATA_32;
370 if (msg_ctl & PCI_MSI_FLAGS_64BIT)
371 msi_data_off = PCI_MSI_DATA_64;
372 pci_read_config_word(dev, msi_loc + msi_data_off, &msg_dat);
373
374 irq = msg_dat & 0x1ff;
375 irq += 0x80;
376
377#if 1
378 printk("PCI:%d:%d:%d (hose %d) is using MSI\n",
379 dev->bus->number,
380 PCI_SLOT(dev->devfn),
381 PCI_FUNC(dev->devfn),
382 hose->index);
383 printk(" %d message(s) from 0x%04x\n",
384 1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4),
385 msg_dat);
386 printk(" reporting on %d IRQ(s) from %d (0x%x)\n",
387 1 << ((msg_ctl & PCI_MSI_FLAGS_QSIZE) >> 4),
388 (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT),
389 (irq + 16) | (io7->pe << MARVEL_IRQ_VEC_PE_SHIFT));
390#endif
391
392#if 0
393 pci_write_config_word(dev, msi_loc + PCI_MSI_FLAGS,
394 msg_ctl & ~PCI_MSI_FLAGS_ENABLE);
395 pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
396 irq = intline;
397
398 printk(" forcing LSI interrupt on irq %d [0x%x]\n", irq, irq);
399#endif
400 }
401
402 irq += 16;
403 irq |= io7->pe << MARVEL_IRQ_VEC_PE_SHIFT;
404
405 return irq;
406}
407
408static void __init
409marvel_init_pci(void)
410{
411 struct io7 *io7;
412
413 marvel_register_error_handlers();
414
415 pci_probe_only = 1;
416 common_init_pci();
417 locate_and_init_vga(NULL);
418
419
420 for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; )
421 io7_clear_errors(io7);
422}
423
424static void __init
425marvel_init_rtc(void)
426{
427 init_rtc_irq();
428}
429
430struct marvel_rtc_time {
431 struct rtc_time *time;
432 int retval;
433};
434
435#ifdef CONFIG_SMP
436static void
437smp_get_rtc_time(void *data)
438{
439 struct marvel_rtc_time *mrt = data;
440 mrt->retval = __get_rtc_time(mrt->time);
441}
442
443static void
444smp_set_rtc_time(void *data)
445{
446 struct marvel_rtc_time *mrt = data;
447 mrt->retval = __set_rtc_time(mrt->time);
448}
449#endif
450
451static unsigned int
452marvel_get_rtc_time(struct rtc_time *time)
453{
454#ifdef CONFIG_SMP
455 struct marvel_rtc_time mrt;
456
457 if (smp_processor_id() != boot_cpuid) {
458 mrt.time = time;
459 smp_call_function_single(boot_cpuid, smp_get_rtc_time, &mrt, 1);
460 return mrt.retval;
461 }
462#endif
463 return __get_rtc_time(time);
464}
465
466static int
467marvel_set_rtc_time(struct rtc_time *time)
468{
469#ifdef CONFIG_SMP
470 struct marvel_rtc_time mrt;
471
472 if (smp_processor_id() != boot_cpuid) {
473 mrt.time = time;
474 smp_call_function_single(boot_cpuid, smp_set_rtc_time, &mrt, 1);
475 return mrt.retval;
476 }
477#endif
478 return __set_rtc_time(time);
479}
480
481static void
482marvel_smp_callin(void)
483{
484 int cpuid = hard_smp_processor_id();
485 struct io7 *io7 = marvel_find_io7(cpuid);
486 unsigned int i;
487
488 if (!io7)
489 return;
490
491
492
493
494 printk("Redirecting IO7 interrupts to local CPU at PE %u\n", cpuid);
495
496
497 io7_redirect_irq(io7, &io7->csrs->HLT_CTL.csr, cpuid);
498 io7_redirect_irq(io7, &io7->csrs->HPI_CTL.csr, cpuid);
499 io7_redirect_irq(io7, &io7->csrs->CRD_CTL.csr, cpuid);
500 io7_redirect_irq(io7, &io7->csrs->STV_CTL.csr, cpuid);
501 io7_redirect_irq(io7, &io7->csrs->HEI_CTL.csr, cpuid);
502
503
504 for (i = 0; i < 0x60; ++i)
505 io7_redirect_one_lsi(io7, i, cpuid);
506
507 io7_redirect_one_lsi(io7, 0x74, cpuid);
508 io7_redirect_one_lsi(io7, 0x75, cpuid);
509
510
511 for (i = 0; i < 16; ++i)
512 io7_redirect_one_msi(io7, i, cpuid);
513}
514
515
516
517
518struct alpha_machine_vector marvel_ev7_mv __initmv = {
519 .vector_name = "MARVEL/EV7",
520 DO_EV7_MMU,
521 .rtc_port = 0x70,
522 .rtc_get_time = marvel_get_rtc_time,
523 .rtc_set_time = marvel_set_rtc_time,
524 DO_MARVEL_IO,
525 .machine_check = marvel_machine_check,
526 .max_isa_dma_address = ALPHA_MAX_ISA_DMA_ADDRESS,
527 .min_io_address = DEFAULT_IO_BASE,
528 .min_mem_address = DEFAULT_MEM_BASE,
529 .pci_dac_offset = IO7_DAC_OFFSET,
530
531 .nr_irqs = MARVEL_NR_IRQS,
532 .device_interrupt = io7_device_interrupt,
533
534 .agp_info = marvel_agp_info,
535
536 .smp_callin = marvel_smp_callin,
537 .init_arch = marvel_init_arch,
538 .init_irq = marvel_init_irq,
539 .init_rtc = marvel_init_rtc,
540 .init_pci = marvel_init_pci,
541 .kill_arch = marvel_kill_arch,
542 .pci_map_irq = marvel_map_irq,
543 .pci_swizzle = common_swizzle,
544
545 .pa_to_nid = marvel_pa_to_nid,
546 .cpuid_to_nid = marvel_cpuid_to_nid,
547 .node_mem_start = marvel_node_mem_start,
548 .node_mem_size = marvel_node_mem_size,
549};
550ALIAS_MV(marvel_ev7)
551