1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/bitops.h>
22#include <linux/bug.h>
23#include <linux/compiler.h>
24#include <linux/delay.h>
25#include <linux/device.h>
26#include <linux/dma-mapping.h>
27#include <linux/firewire.h>
28#include <linux/firewire-constants.h>
29#include <linux/init.h>
30#include <linux/interrupt.h>
31#include <linux/io.h>
32#include <linux/kernel.h>
33#include <linux/list.h>
34#include <linux/mm.h>
35#include <linux/module.h>
36#include <linux/moduleparam.h>
37#include <linux/mutex.h>
38#include <linux/pci.h>
39#include <linux/pci_ids.h>
40#include <linux/slab.h>
41#include <linux/spinlock.h>
42#include <linux/string.h>
43#include <linux/time.h>
44#include <linux/vmalloc.h>
45#include <linux/workqueue.h>
46
47#include <asm/byteorder.h>
48#include <asm/page.h>
49
50#ifdef CONFIG_PPC_PMAC
51#include <asm/pmac_feature.h>
52#endif
53
54#include "core.h"
55#include "ohci.h"
56
57#define ohci_info(ohci, f, args...) dev_info(ohci->card.device, f, ##args)
58#define ohci_notice(ohci, f, args...) dev_notice(ohci->card.device, f, ##args)
59#define ohci_err(ohci, f, args...) dev_err(ohci->card.device, f, ##args)
60
61#define DESCRIPTOR_OUTPUT_MORE 0
62#define DESCRIPTOR_OUTPUT_LAST (1 << 12)
63#define DESCRIPTOR_INPUT_MORE (2 << 12)
64#define DESCRIPTOR_INPUT_LAST (3 << 12)
65#define DESCRIPTOR_STATUS (1 << 11)
66#define DESCRIPTOR_KEY_IMMEDIATE (2 << 8)
67#define DESCRIPTOR_PING (1 << 7)
68#define DESCRIPTOR_YY (1 << 6)
69#define DESCRIPTOR_NO_IRQ (0 << 4)
70#define DESCRIPTOR_IRQ_ERROR (1 << 4)
71#define DESCRIPTOR_IRQ_ALWAYS (3 << 4)
72#define DESCRIPTOR_BRANCH_ALWAYS (3 << 2)
73#define DESCRIPTOR_WAIT (3 << 0)
74
75#define DESCRIPTOR_CMD (0xf << 12)
76
77struct descriptor {
78 __le16 req_count;
79 __le16 control;
80 __le32 data_address;
81 __le32 branch_address;
82 __le16 res_count;
83 __le16 transfer_status;
84} __attribute__((aligned(16)));
85
86#define CONTROL_SET(regs) (regs)
87#define CONTROL_CLEAR(regs) ((regs) + 4)
88#define COMMAND_PTR(regs) ((regs) + 12)
89#define CONTEXT_MATCH(regs) ((regs) + 16)
90
91#define AR_BUFFER_SIZE (32*1024)
92#define AR_BUFFERS_MIN DIV_ROUND_UP(AR_BUFFER_SIZE, PAGE_SIZE)
93
94#define AR_BUFFERS (AR_BUFFERS_MIN >= 2 ? AR_BUFFERS_MIN : 2)
95
96#define MAX_ASYNC_PAYLOAD 4096
97#define MAX_AR_PACKET_SIZE (16 + MAX_ASYNC_PAYLOAD + 4)
98#define AR_WRAPAROUND_PAGES DIV_ROUND_UP(MAX_AR_PACKET_SIZE, PAGE_SIZE)
99
100struct ar_context {
101 struct fw_ohci *ohci;
102 struct page *pages[AR_BUFFERS];
103 void *buffer;
104 struct descriptor *descriptors;
105 dma_addr_t descriptors_bus;
106 void *pointer;
107 unsigned int last_buffer_index;
108 u32 regs;
109 struct tasklet_struct tasklet;
110};
111
112struct context;
113
114typedef int (*descriptor_callback_t)(struct context *ctx,
115 struct descriptor *d,
116 struct descriptor *last);
117
118
119
120
121
122struct descriptor_buffer {
123 struct list_head list;
124 dma_addr_t buffer_bus;
125 size_t buffer_size;
126 size_t used;
127 struct descriptor buffer[0];
128};
129
130struct context {
131 struct fw_ohci *ohci;
132 u32 regs;
133 int total_allocation;
134 u32 current_bus;
135 bool running;
136 bool flushing;
137
138
139
140
141
142
143 struct list_head buffer_list;
144
145
146
147
148
149 struct descriptor_buffer *buffer_tail;
150
151
152
153
154
155 struct descriptor *last;
156
157
158
159
160
161 struct descriptor *prev;
162 int prev_z;
163
164 descriptor_callback_t callback;
165
166 struct tasklet_struct tasklet;
167};
168
169#define IT_HEADER_SY(v) ((v) << 0)
170#define IT_HEADER_TCODE(v) ((v) << 4)
171#define IT_HEADER_CHANNEL(v) ((v) << 8)
172#define IT_HEADER_TAG(v) ((v) << 14)
173#define IT_HEADER_SPEED(v) ((v) << 16)
174#define IT_HEADER_DATA_LENGTH(v) ((v) << 16)
175
176struct iso_context {
177 struct fw_iso_context base;
178 struct context context;
179 void *header;
180 size_t header_length;
181 unsigned long flushing_completions;
182 u32 mc_buffer_bus;
183 u16 mc_completed;
184 u16 last_timestamp;
185 u8 sync;
186 u8 tags;
187};
188
189#define CONFIG_ROM_SIZE 1024
190
191struct fw_ohci {
192 struct fw_card card;
193
194 __iomem char *registers;
195 int node_id;
196 int generation;
197 int request_generation;
198 unsigned quirks;
199 unsigned int pri_req_max;
200 u32 bus_time;
201 bool bus_time_running;
202 bool is_root;
203 bool csr_state_setclear_abdicate;
204 int n_ir;
205 int n_it;
206
207
208
209
210 spinlock_t lock;
211
212 struct mutex phy_reg_mutex;
213
214 void *misc_buffer;
215 dma_addr_t misc_buffer_bus;
216
217 struct ar_context ar_request_ctx;
218 struct ar_context ar_response_ctx;
219 struct context at_request_ctx;
220 struct context at_response_ctx;
221
222 u32 it_context_support;
223 u32 it_context_mask;
224 struct iso_context *it_context_list;
225 u64 ir_context_channels;
226 u32 ir_context_support;
227 u32 ir_context_mask;
228 struct iso_context *ir_context_list;
229 u64 mc_channels;
230 bool mc_allocated;
231
232 __be32 *config_rom;
233 dma_addr_t config_rom_bus;
234 __be32 *next_config_rom;
235 dma_addr_t next_config_rom_bus;
236 __be32 next_header;
237
238 __le32 *self_id_cpu;
239 dma_addr_t self_id_bus;
240 struct work_struct bus_reset_work;
241
242 u32 self_id_buffer[512];
243};
244
245static inline struct fw_ohci *fw_ohci(struct fw_card *card)
246{
247 return container_of(card, struct fw_ohci, card);
248}
249
250#define IT_CONTEXT_CYCLE_MATCH_ENABLE 0x80000000
251#define IR_CONTEXT_BUFFER_FILL 0x80000000
252#define IR_CONTEXT_ISOCH_HEADER 0x40000000
253#define IR_CONTEXT_CYCLE_MATCH_ENABLE 0x20000000
254#define IR_CONTEXT_MULTI_CHANNEL_MODE 0x10000000
255#define IR_CONTEXT_DUAL_BUFFER_MODE 0x08000000
256
257#define CONTEXT_RUN 0x8000
258#define CONTEXT_WAKE 0x1000
259#define CONTEXT_DEAD 0x0800
260#define CONTEXT_ACTIVE 0x0400
261
262#define OHCI1394_MAX_AT_REQ_RETRIES 0xf
263#define OHCI1394_MAX_AT_RESP_RETRIES 0x2
264#define OHCI1394_MAX_PHYS_RESP_RETRIES 0x8
265
266#define OHCI1394_REGISTER_SIZE 0x800
267#define OHCI1394_PCI_HCI_Control 0x40
268#define SELF_ID_BUF_SIZE 0x800
269#define OHCI_TCODE_PHY_PACKET 0x0e
270#define OHCI_VERSION_1_1 0x010010
271
272static char ohci_driver_name[] = KBUILD_MODNAME;
273
274#define PCI_DEVICE_ID_AGERE_FW643 0x5901
275#define PCI_DEVICE_ID_CREATIVE_SB1394 0x4001
276#define PCI_DEVICE_ID_JMICRON_JMB38X_FW 0x2380
277#define PCI_DEVICE_ID_TI_TSB12LV22 0x8009
278#define PCI_DEVICE_ID_TI_TSB12LV26 0x8020
279#define PCI_DEVICE_ID_TI_TSB82AA2 0x8025
280#define PCI_DEVICE_ID_VIA_VT630X 0x3044
281#define PCI_VENDOR_ID_PINNACLE_SYSTEMS 0x11bd
282#define PCI_REV_ID_VIA_VT6306 0x46
283
284#define QUIRK_CYCLE_TIMER 1
285#define QUIRK_RESET_PACKET 2
286#define QUIRK_BE_HEADERS 4
287#define QUIRK_NO_1394A 8
288#define QUIRK_NO_MSI 16
289#define QUIRK_TI_SLLZ059 32
290#define QUIRK_IR_WAKE 64
291#define QUIRK_PHY_LCTRL_TIMEOUT 128
292
293
294static const struct {
295 unsigned short vendor, device, revision, flags;
296} ohci_quirks[] = {
297 {PCI_VENDOR_ID_AL, PCI_ANY_ID, PCI_ANY_ID,
298 QUIRK_CYCLE_TIMER},
299
300 {PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_FW, PCI_ANY_ID,
301 QUIRK_BE_HEADERS},
302
303 {PCI_VENDOR_ID_ATT, PCI_DEVICE_ID_AGERE_FW643, 6,
304 QUIRK_PHY_LCTRL_TIMEOUT | QUIRK_NO_MSI},
305
306 {PCI_VENDOR_ID_ATT, PCI_ANY_ID, PCI_ANY_ID,
307 QUIRK_PHY_LCTRL_TIMEOUT},
308
309 {PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_SB1394, PCI_ANY_ID,
310 QUIRK_RESET_PACKET},
311
312 {PCI_VENDOR_ID_JMICRON, PCI_DEVICE_ID_JMICRON_JMB38X_FW, PCI_ANY_ID,
313 QUIRK_NO_MSI},
314
315 {PCI_VENDOR_ID_NEC, PCI_ANY_ID, PCI_ANY_ID,
316 QUIRK_CYCLE_TIMER},
317
318 {PCI_VENDOR_ID_O2, PCI_ANY_ID, PCI_ANY_ID,
319 QUIRK_NO_MSI},
320
321 {PCI_VENDOR_ID_RICOH, PCI_ANY_ID, PCI_ANY_ID,
322 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
323
324 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV22, PCI_ANY_ID,
325 QUIRK_CYCLE_TIMER | QUIRK_RESET_PACKET | QUIRK_NO_1394A},
326
327 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB12LV26, PCI_ANY_ID,
328 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
329
330 {PCI_VENDOR_ID_TI, PCI_DEVICE_ID_TI_TSB82AA2, PCI_ANY_ID,
331 QUIRK_RESET_PACKET | QUIRK_TI_SLLZ059},
332
333 {PCI_VENDOR_ID_TI, PCI_ANY_ID, PCI_ANY_ID,
334 QUIRK_RESET_PACKET},
335
336 {PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_VT630X, PCI_REV_ID_VIA_VT6306,
337 QUIRK_CYCLE_TIMER | QUIRK_IR_WAKE},
338
339 {PCI_VENDOR_ID_VIA, PCI_ANY_ID, PCI_ANY_ID,
340 QUIRK_CYCLE_TIMER | QUIRK_NO_MSI},
341};
342
343
344static int param_quirks;
345module_param_named(quirks, param_quirks, int, 0644);
346MODULE_PARM_DESC(quirks, "Chip quirks (default = 0"
347 ", nonatomic cycle timer = " __stringify(QUIRK_CYCLE_TIMER)
348 ", reset packet generation = " __stringify(QUIRK_RESET_PACKET)
349 ", AR/selfID endianness = " __stringify(QUIRK_BE_HEADERS)
350 ", no 1394a enhancements = " __stringify(QUIRK_NO_1394A)
351 ", disable MSI = " __stringify(QUIRK_NO_MSI)
352 ", TI SLLZ059 erratum = " __stringify(QUIRK_TI_SLLZ059)
353 ", IR wake unreliable = " __stringify(QUIRK_IR_WAKE)
354 ", phy LCtrl timeout = " __stringify(QUIRK_PHY_LCTRL_TIMEOUT)
355 ")");
356
357#define OHCI_PARAM_DEBUG_AT_AR 1
358#define OHCI_PARAM_DEBUG_SELFIDS 2
359#define OHCI_PARAM_DEBUG_IRQS 4
360#define OHCI_PARAM_DEBUG_BUSRESETS 8
361
362static int param_debug;
363module_param_named(debug, param_debug, int, 0644);
364MODULE_PARM_DESC(debug, "Verbose logging (default = 0"
365 ", AT/AR events = " __stringify(OHCI_PARAM_DEBUG_AT_AR)
366 ", self-IDs = " __stringify(OHCI_PARAM_DEBUG_SELFIDS)
367 ", IRQs = " __stringify(OHCI_PARAM_DEBUG_IRQS)
368 ", busReset events = " __stringify(OHCI_PARAM_DEBUG_BUSRESETS)
369 ", or a combination, or all = -1)");
370
371static void log_irqs(struct fw_ohci *ohci, u32 evt)
372{
373 if (likely(!(param_debug &
374 (OHCI_PARAM_DEBUG_IRQS | OHCI_PARAM_DEBUG_BUSRESETS))))
375 return;
376
377 if (!(param_debug & OHCI_PARAM_DEBUG_IRQS) &&
378 !(evt & OHCI1394_busReset))
379 return;
380
381 ohci_notice(ohci, "IRQ %08x%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s\n", evt,
382 evt & OHCI1394_selfIDComplete ? " selfID" : "",
383 evt & OHCI1394_RQPkt ? " AR_req" : "",
384 evt & OHCI1394_RSPkt ? " AR_resp" : "",
385 evt & OHCI1394_reqTxComplete ? " AT_req" : "",
386 evt & OHCI1394_respTxComplete ? " AT_resp" : "",
387 evt & OHCI1394_isochRx ? " IR" : "",
388 evt & OHCI1394_isochTx ? " IT" : "",
389 evt & OHCI1394_postedWriteErr ? " postedWriteErr" : "",
390 evt & OHCI1394_cycleTooLong ? " cycleTooLong" : "",
391 evt & OHCI1394_cycle64Seconds ? " cycle64Seconds" : "",
392 evt & OHCI1394_cycleInconsistent ? " cycleInconsistent" : "",
393 evt & OHCI1394_regAccessFail ? " regAccessFail" : "",
394 evt & OHCI1394_unrecoverableError ? " unrecoverableError" : "",
395 evt & OHCI1394_busReset ? " busReset" : "",
396 evt & ~(OHCI1394_selfIDComplete | OHCI1394_RQPkt |
397 OHCI1394_RSPkt | OHCI1394_reqTxComplete |
398 OHCI1394_respTxComplete | OHCI1394_isochRx |
399 OHCI1394_isochTx | OHCI1394_postedWriteErr |
400 OHCI1394_cycleTooLong | OHCI1394_cycle64Seconds |
401 OHCI1394_cycleInconsistent |
402 OHCI1394_regAccessFail | OHCI1394_busReset)
403 ? " ?" : "");
404}
405
406static const char *speed[] = {
407 [0] = "S100", [1] = "S200", [2] = "S400", [3] = "beta",
408};
409static const char *power[] = {
410 [0] = "+0W", [1] = "+15W", [2] = "+30W", [3] = "+45W",
411 [4] = "-3W", [5] = " ?W", [6] = "-3..-6W", [7] = "-3..-10W",
412};
413static const char port[] = { '.', '-', 'p', 'c', };
414
415static char _p(u32 *s, int shift)
416{
417 return port[*s >> shift & 3];
418}
419
420static void log_selfids(struct fw_ohci *ohci, int generation, int self_id_count)
421{
422 u32 *s;
423
424 if (likely(!(param_debug & OHCI_PARAM_DEBUG_SELFIDS)))
425 return;
426
427 ohci_notice(ohci, "%d selfIDs, generation %d, local node ID %04x\n",
428 self_id_count, generation, ohci->node_id);
429
430 for (s = ohci->self_id_buffer; self_id_count--; ++s)
431 if ((*s & 1 << 23) == 0)
432 ohci_notice(ohci,
433 "selfID 0: %08x, phy %d [%c%c%c] %s gc=%d %s %s%s%s\n",
434 *s, *s >> 24 & 63, _p(s, 6), _p(s, 4), _p(s, 2),
435 speed[*s >> 14 & 3], *s >> 16 & 63,
436 power[*s >> 8 & 7], *s >> 22 & 1 ? "L" : "",
437 *s >> 11 & 1 ? "c" : "", *s & 2 ? "i" : "");
438 else
439 ohci_notice(ohci,
440 "selfID n: %08x, phy %d [%c%c%c%c%c%c%c%c]\n",
441 *s, *s >> 24 & 63,
442 _p(s, 16), _p(s, 14), _p(s, 12), _p(s, 10),
443 _p(s, 8), _p(s, 6), _p(s, 4), _p(s, 2));
444}
445
446static const char *evts[] = {
447 [0x00] = "evt_no_status", [0x01] = "-reserved-",
448 [0x02] = "evt_long_packet", [0x03] = "evt_missing_ack",
449 [0x04] = "evt_underrun", [0x05] = "evt_overrun",
450 [0x06] = "evt_descriptor_read", [0x07] = "evt_data_read",
451 [0x08] = "evt_data_write", [0x09] = "evt_bus_reset",
452 [0x0a] = "evt_timeout", [0x0b] = "evt_tcode_err",
453 [0x0c] = "-reserved-", [0x0d] = "-reserved-",
454 [0x0e] = "evt_unknown", [0x0f] = "evt_flushed",
455 [0x10] = "-reserved-", [0x11] = "ack_complete",
456 [0x12] = "ack_pending ", [0x13] = "-reserved-",
457 [0x14] = "ack_busy_X", [0x15] = "ack_busy_A",
458 [0x16] = "ack_busy_B", [0x17] = "-reserved-",
459 [0x18] = "-reserved-", [0x19] = "-reserved-",
460 [0x1a] = "-reserved-", [0x1b] = "ack_tardy",
461 [0x1c] = "-reserved-", [0x1d] = "ack_data_error",
462 [0x1e] = "ack_type_error", [0x1f] = "-reserved-",
463 [0x20] = "pending/cancelled",
464};
465static const char *tcodes[] = {
466 [0x0] = "QW req", [0x1] = "BW req",
467 [0x2] = "W resp", [0x3] = "-reserved-",
468 [0x4] = "QR req", [0x5] = "BR req",
469 [0x6] = "QR resp", [0x7] = "BR resp",
470 [0x8] = "cycle start", [0x9] = "Lk req",
471 [0xa] = "async stream packet", [0xb] = "Lk resp",
472 [0xc] = "-reserved-", [0xd] = "-reserved-",
473 [0xe] = "link internal", [0xf] = "-reserved-",
474};
475
476static void log_ar_at_event(struct fw_ohci *ohci,
477 char dir, int speed, u32 *header, int evt)
478{
479 int tcode = header[0] >> 4 & 0xf;
480 char specific[12];
481
482 if (likely(!(param_debug & OHCI_PARAM_DEBUG_AT_AR)))
483 return;
484
485 if (unlikely(evt >= ARRAY_SIZE(evts)))
486 evt = 0x1f;
487
488 if (evt == OHCI1394_evt_bus_reset) {
489 ohci_notice(ohci, "A%c evt_bus_reset, generation %d\n",
490 dir, (header[2] >> 16) & 0xff);
491 return;
492 }
493
494 switch (tcode) {
495 case 0x0: case 0x6: case 0x8:
496 snprintf(specific, sizeof(specific), " = %08x",
497 be32_to_cpu((__force __be32)header[3]));
498 break;
499 case 0x1: case 0x5: case 0x7: case 0x9: case 0xb:
500 snprintf(specific, sizeof(specific), " %x,%x",
501 header[3] >> 16, header[3] & 0xffff);
502 break;
503 default:
504 specific[0] = '\0';
505 }
506
507 switch (tcode) {
508 case 0xa:
509 ohci_notice(ohci, "A%c %s, %s\n",
510 dir, evts[evt], tcodes[tcode]);
511 break;
512 case 0xe:
513 ohci_notice(ohci, "A%c %s, PHY %08x %08x\n",
514 dir, evts[evt], header[1], header[2]);
515 break;
516 case 0x0: case 0x1: case 0x4: case 0x5: case 0x9:
517 ohci_notice(ohci,
518 "A%c spd %x tl %02x, %04x -> %04x, %s, %s, %04x%08x%s\n",
519 dir, speed, header[0] >> 10 & 0x3f,
520 header[1] >> 16, header[0] >> 16, evts[evt],
521 tcodes[tcode], header[1] & 0xffff, header[2], specific);
522 break;
523 default:
524 ohci_notice(ohci,
525 "A%c spd %x tl %02x, %04x -> %04x, %s, %s%s\n",
526 dir, speed, header[0] >> 10 & 0x3f,
527 header[1] >> 16, header[0] >> 16, evts[evt],
528 tcodes[tcode], specific);
529 }
530}
531
532static inline void reg_write(const struct fw_ohci *ohci, int offset, u32 data)
533{
534 writel(data, ohci->registers + offset);
535}
536
537static inline u32 reg_read(const struct fw_ohci *ohci, int offset)
538{
539 return readl(ohci->registers + offset);
540}
541
542static inline void flush_writes(const struct fw_ohci *ohci)
543{
544
545 reg_read(ohci, OHCI1394_Version);
546}
547
548
549
550
551
552
553
554static int read_phy_reg(struct fw_ohci *ohci, int addr)
555{
556 u32 val;
557 int i;
558
559 reg_write(ohci, OHCI1394_PhyControl, OHCI1394_PhyControl_Read(addr));
560 for (i = 0; i < 3 + 100; i++) {
561 val = reg_read(ohci, OHCI1394_PhyControl);
562 if (!~val)
563 return -ENODEV;
564
565 if (val & OHCI1394_PhyControl_ReadDone)
566 return OHCI1394_PhyControl_ReadData(val);
567
568
569
570
571
572 if (i >= 3)
573 msleep(1);
574 }
575 ohci_err(ohci, "failed to read phy reg %d\n", addr);
576 dump_stack();
577
578 return -EBUSY;
579}
580
581static int write_phy_reg(const struct fw_ohci *ohci, int addr, u32 val)
582{
583 int i;
584
585 reg_write(ohci, OHCI1394_PhyControl,
586 OHCI1394_PhyControl_Write(addr, val));
587 for (i = 0; i < 3 + 100; i++) {
588 val = reg_read(ohci, OHCI1394_PhyControl);
589 if (!~val)
590 return -ENODEV;
591
592 if (!(val & OHCI1394_PhyControl_WritePending))
593 return 0;
594
595 if (i >= 3)
596 msleep(1);
597 }
598 ohci_err(ohci, "failed to write phy reg %d, val %u\n", addr, val);
599 dump_stack();
600
601 return -EBUSY;
602}
603
604static int update_phy_reg(struct fw_ohci *ohci, int addr,
605 int clear_bits, int set_bits)
606{
607 int ret = read_phy_reg(ohci, addr);
608 if (ret < 0)
609 return ret;
610
611
612
613
614
615 if (addr == 5)
616 clear_bits |= PHY_INT_STATUS_BITS;
617
618 return write_phy_reg(ohci, addr, (ret & ~clear_bits) | set_bits);
619}
620
621static int read_paged_phy_reg(struct fw_ohci *ohci, int page, int addr)
622{
623 int ret;
624
625 ret = update_phy_reg(ohci, 7, PHY_PAGE_SELECT, page << 5);
626 if (ret < 0)
627 return ret;
628
629 return read_phy_reg(ohci, addr);
630}
631
632static int ohci_read_phy_reg(struct fw_card *card, int addr)
633{
634 struct fw_ohci *ohci = fw_ohci(card);
635 int ret;
636
637 mutex_lock(&ohci->phy_reg_mutex);
638 ret = read_phy_reg(ohci, addr);
639 mutex_unlock(&ohci->phy_reg_mutex);
640
641 return ret;
642}
643
644static int ohci_update_phy_reg(struct fw_card *card, int addr,
645 int clear_bits, int set_bits)
646{
647 struct fw_ohci *ohci = fw_ohci(card);
648 int ret;
649
650 mutex_lock(&ohci->phy_reg_mutex);
651 ret = update_phy_reg(ohci, addr, clear_bits, set_bits);
652 mutex_unlock(&ohci->phy_reg_mutex);
653
654 return ret;
655}
656
657static inline dma_addr_t ar_buffer_bus(struct ar_context *ctx, unsigned int i)
658{
659 return page_private(ctx->pages[i]);
660}
661
662static void ar_context_link_page(struct ar_context *ctx, unsigned int index)
663{
664 struct descriptor *d;
665
666 d = &ctx->descriptors[index];
667 d->branch_address &= cpu_to_le32(~0xf);
668 d->res_count = cpu_to_le16(PAGE_SIZE);
669 d->transfer_status = 0;
670
671 wmb();
672 d = &ctx->descriptors[ctx->last_buffer_index];
673 d->branch_address |= cpu_to_le32(1);
674
675 ctx->last_buffer_index = index;
676
677 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
678}
679
680static void ar_context_release(struct ar_context *ctx)
681{
682 unsigned int i;
683
684 if (ctx->buffer)
685 vm_unmap_ram(ctx->buffer, AR_BUFFERS + AR_WRAPAROUND_PAGES);
686
687 for (i = 0; i < AR_BUFFERS; i++)
688 if (ctx->pages[i]) {
689 dma_unmap_page(ctx->ohci->card.device,
690 ar_buffer_bus(ctx, i),
691 PAGE_SIZE, DMA_FROM_DEVICE);
692 __free_page(ctx->pages[i]);
693 }
694}
695
696static void ar_context_abort(struct ar_context *ctx, const char *error_msg)
697{
698 struct fw_ohci *ohci = ctx->ohci;
699
700 if (reg_read(ohci, CONTROL_CLEAR(ctx->regs)) & CONTEXT_RUN) {
701 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
702 flush_writes(ohci);
703
704 ohci_err(ohci, "AR error: %s; DMA stopped\n", error_msg);
705 }
706
707}
708
709static inline unsigned int ar_next_buffer_index(unsigned int index)
710{
711 return (index + 1) % AR_BUFFERS;
712}
713
714static inline unsigned int ar_prev_buffer_index(unsigned int index)
715{
716 return (index - 1 + AR_BUFFERS) % AR_BUFFERS;
717}
718
719static inline unsigned int ar_first_buffer_index(struct ar_context *ctx)
720{
721 return ar_next_buffer_index(ctx->last_buffer_index);
722}
723
724
725
726
727
728static unsigned int ar_search_last_active_buffer(struct ar_context *ctx,
729 unsigned int *buffer_offset)
730{
731 unsigned int i, next_i, last = ctx->last_buffer_index;
732 __le16 res_count, next_res_count;
733
734 i = ar_first_buffer_index(ctx);
735 res_count = ACCESS_ONCE(ctx->descriptors[i].res_count);
736
737
738 while (i != last && res_count == 0) {
739
740
741 next_i = ar_next_buffer_index(i);
742 rmb();
743 next_res_count = ACCESS_ONCE(
744 ctx->descriptors[next_i].res_count);
745
746
747
748
749 if (next_res_count == cpu_to_le16(PAGE_SIZE)) {
750
751
752
753
754
755
756
757 if (MAX_AR_PACKET_SIZE > PAGE_SIZE && i != last) {
758 next_i = ar_next_buffer_index(next_i);
759 rmb();
760 next_res_count = ACCESS_ONCE(
761 ctx->descriptors[next_i].res_count);
762 if (next_res_count != cpu_to_le16(PAGE_SIZE))
763 goto next_buffer_is_active;
764 }
765
766 break;
767 }
768
769next_buffer_is_active:
770 i = next_i;
771 res_count = next_res_count;
772 }
773
774 rmb();
775
776 *buffer_offset = PAGE_SIZE - le16_to_cpu(res_count);
777 if (*buffer_offset > PAGE_SIZE) {
778 *buffer_offset = 0;
779 ar_context_abort(ctx, "corrupted descriptor");
780 }
781
782 return i;
783}
784
785static void ar_sync_buffers_for_cpu(struct ar_context *ctx,
786 unsigned int end_buffer_index,
787 unsigned int end_buffer_offset)
788{
789 unsigned int i;
790
791 i = ar_first_buffer_index(ctx);
792 while (i != end_buffer_index) {
793 dma_sync_single_for_cpu(ctx->ohci->card.device,
794 ar_buffer_bus(ctx, i),
795 PAGE_SIZE, DMA_FROM_DEVICE);
796 i = ar_next_buffer_index(i);
797 }
798 if (end_buffer_offset > 0)
799 dma_sync_single_for_cpu(ctx->ohci->card.device,
800 ar_buffer_bus(ctx, i),
801 end_buffer_offset, DMA_FROM_DEVICE);
802}
803
804#if defined(CONFIG_PPC_PMAC) && defined(CONFIG_PPC32)
805#define cond_le32_to_cpu(v) \
806 (ohci->quirks & QUIRK_BE_HEADERS ? (__force __u32)(v) : le32_to_cpu(v))
807#else
808#define cond_le32_to_cpu(v) le32_to_cpu(v)
809#endif
810
811static __le32 *handle_ar_packet(struct ar_context *ctx, __le32 *buffer)
812{
813 struct fw_ohci *ohci = ctx->ohci;
814 struct fw_packet p;
815 u32 status, length, tcode;
816 int evt;
817
818 p.header[0] = cond_le32_to_cpu(buffer[0]);
819 p.header[1] = cond_le32_to_cpu(buffer[1]);
820 p.header[2] = cond_le32_to_cpu(buffer[2]);
821
822 tcode = (p.header[0] >> 4) & 0x0f;
823 switch (tcode) {
824 case TCODE_WRITE_QUADLET_REQUEST:
825 case TCODE_READ_QUADLET_RESPONSE:
826 p.header[3] = (__force __u32) buffer[3];
827 p.header_length = 16;
828 p.payload_length = 0;
829 break;
830
831 case TCODE_READ_BLOCK_REQUEST :
832 p.header[3] = cond_le32_to_cpu(buffer[3]);
833 p.header_length = 16;
834 p.payload_length = 0;
835 break;
836
837 case TCODE_WRITE_BLOCK_REQUEST:
838 case TCODE_READ_BLOCK_RESPONSE:
839 case TCODE_LOCK_REQUEST:
840 case TCODE_LOCK_RESPONSE:
841 p.header[3] = cond_le32_to_cpu(buffer[3]);
842 p.header_length = 16;
843 p.payload_length = p.header[3] >> 16;
844 if (p.payload_length > MAX_ASYNC_PAYLOAD) {
845 ar_context_abort(ctx, "invalid packet length");
846 return NULL;
847 }
848 break;
849
850 case TCODE_WRITE_RESPONSE:
851 case TCODE_READ_QUADLET_REQUEST:
852 case OHCI_TCODE_PHY_PACKET:
853 p.header_length = 12;
854 p.payload_length = 0;
855 break;
856
857 default:
858 ar_context_abort(ctx, "invalid tcode");
859 return NULL;
860 }
861
862 p.payload = (void *) buffer + p.header_length;
863
864
865 length = (p.header_length + p.payload_length + 3) / 4;
866 status = cond_le32_to_cpu(buffer[length]);
867 evt = (status >> 16) & 0x1f;
868
869 p.ack = evt - 16;
870 p.speed = (status >> 21) & 0x7;
871 p.timestamp = status & 0xffff;
872 p.generation = ohci->request_generation;
873
874 log_ar_at_event(ohci, 'R', p.speed, p.header, evt);
875
876
877
878
879
880 if (evt == OHCI1394_evt_no_status &&
881 (p.header[0] & 0xff) == (OHCI1394_phy_tcode << 4))
882 p.ack = ACK_COMPLETE;
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897 if (evt == OHCI1394_evt_bus_reset) {
898 if (!(ohci->quirks & QUIRK_RESET_PACKET))
899 ohci->request_generation = (p.header[2] >> 16) & 0xff;
900 } else if (ctx == &ohci->ar_request_ctx) {
901 fw_core_handle_request(&ohci->card, &p);
902 } else {
903 fw_core_handle_response(&ohci->card, &p);
904 }
905
906 return buffer + length + 1;
907}
908
909static void *handle_ar_packets(struct ar_context *ctx, void *p, void *end)
910{
911 void *next;
912
913 while (p < end) {
914 next = handle_ar_packet(ctx, p);
915 if (!next)
916 return p;
917 p = next;
918 }
919
920 return p;
921}
922
923static void ar_recycle_buffers(struct ar_context *ctx, unsigned int end_buffer)
924{
925 unsigned int i;
926
927 i = ar_first_buffer_index(ctx);
928 while (i != end_buffer) {
929 dma_sync_single_for_device(ctx->ohci->card.device,
930 ar_buffer_bus(ctx, i),
931 PAGE_SIZE, DMA_FROM_DEVICE);
932 ar_context_link_page(ctx, i);
933 i = ar_next_buffer_index(i);
934 }
935}
936
937static void ar_context_tasklet(unsigned long data)
938{
939 struct ar_context *ctx = (struct ar_context *)data;
940 unsigned int end_buffer_index, end_buffer_offset;
941 void *p, *end;
942
943 p = ctx->pointer;
944 if (!p)
945 return;
946
947 end_buffer_index = ar_search_last_active_buffer(ctx,
948 &end_buffer_offset);
949 ar_sync_buffers_for_cpu(ctx, end_buffer_index, end_buffer_offset);
950 end = ctx->buffer + end_buffer_index * PAGE_SIZE + end_buffer_offset;
951
952 if (end_buffer_index < ar_first_buffer_index(ctx)) {
953
954
955
956
957
958
959 void *buffer_end = ctx->buffer + AR_BUFFERS * PAGE_SIZE;
960 p = handle_ar_packets(ctx, p, buffer_end);
961 if (p < buffer_end)
962 goto error;
963
964 p -= AR_BUFFERS * PAGE_SIZE;
965 }
966
967 p = handle_ar_packets(ctx, p, end);
968 if (p != end) {
969 if (p > end)
970 ar_context_abort(ctx, "inconsistent descriptor");
971 goto error;
972 }
973
974 ctx->pointer = p;
975 ar_recycle_buffers(ctx, end_buffer_index);
976
977 return;
978
979error:
980 ctx->pointer = NULL;
981}
982
983static int ar_context_init(struct ar_context *ctx, struct fw_ohci *ohci,
984 unsigned int descriptors_offset, u32 regs)
985{
986 unsigned int i;
987 dma_addr_t dma_addr;
988 struct page *pages[AR_BUFFERS + AR_WRAPAROUND_PAGES];
989 struct descriptor *d;
990
991 ctx->regs = regs;
992 ctx->ohci = ohci;
993 tasklet_init(&ctx->tasklet, ar_context_tasklet, (unsigned long)ctx);
994
995 for (i = 0; i < AR_BUFFERS; i++) {
996 ctx->pages[i] = alloc_page(GFP_KERNEL | GFP_DMA32);
997 if (!ctx->pages[i])
998 goto out_of_memory;
999 dma_addr = dma_map_page(ohci->card.device, ctx->pages[i],
1000 0, PAGE_SIZE, DMA_FROM_DEVICE);
1001 if (dma_mapping_error(ohci->card.device, dma_addr)) {
1002 __free_page(ctx->pages[i]);
1003 ctx->pages[i] = NULL;
1004 goto out_of_memory;
1005 }
1006 set_page_private(ctx->pages[i], dma_addr);
1007 }
1008
1009 for (i = 0; i < AR_BUFFERS; i++)
1010 pages[i] = ctx->pages[i];
1011 for (i = 0; i < AR_WRAPAROUND_PAGES; i++)
1012 pages[AR_BUFFERS + i] = ctx->pages[i];
1013 ctx->buffer = vm_map_ram(pages, AR_BUFFERS + AR_WRAPAROUND_PAGES,
1014 -1, PAGE_KERNEL);
1015 if (!ctx->buffer)
1016 goto out_of_memory;
1017
1018 ctx->descriptors = ohci->misc_buffer + descriptors_offset;
1019 ctx->descriptors_bus = ohci->misc_buffer_bus + descriptors_offset;
1020
1021 for (i = 0; i < AR_BUFFERS; i++) {
1022 d = &ctx->descriptors[i];
1023 d->req_count = cpu_to_le16(PAGE_SIZE);
1024 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
1025 DESCRIPTOR_STATUS |
1026 DESCRIPTOR_BRANCH_ALWAYS);
1027 d->data_address = cpu_to_le32(ar_buffer_bus(ctx, i));
1028 d->branch_address = cpu_to_le32(ctx->descriptors_bus +
1029 ar_next_buffer_index(i) * sizeof(struct descriptor));
1030 }
1031
1032 return 0;
1033
1034out_of_memory:
1035 ar_context_release(ctx);
1036
1037 return -ENOMEM;
1038}
1039
1040static void ar_context_run(struct ar_context *ctx)
1041{
1042 unsigned int i;
1043
1044 for (i = 0; i < AR_BUFFERS; i++)
1045 ar_context_link_page(ctx, i);
1046
1047 ctx->pointer = ctx->buffer;
1048
1049 reg_write(ctx->ohci, COMMAND_PTR(ctx->regs), ctx->descriptors_bus | 1);
1050 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN);
1051}
1052
1053static struct descriptor *find_branch_descriptor(struct descriptor *d, int z)
1054{
1055 __le16 branch;
1056
1057 branch = d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS);
1058
1059
1060 if (z == 2 && branch == cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
1061 return d;
1062 else
1063 return d + z - 1;
1064}
1065
1066static void context_tasklet(unsigned long data)
1067{
1068 struct context *ctx = (struct context *) data;
1069 struct descriptor *d, *last;
1070 u32 address;
1071 int z;
1072 struct descriptor_buffer *desc;
1073
1074 desc = list_entry(ctx->buffer_list.next,
1075 struct descriptor_buffer, list);
1076 last = ctx->last;
1077 while (last->branch_address != 0) {
1078 struct descriptor_buffer *old_desc = desc;
1079 address = le32_to_cpu(last->branch_address);
1080 z = address & 0xf;
1081 address &= ~0xf;
1082 ctx->current_bus = address;
1083
1084
1085
1086 if (address < desc->buffer_bus ||
1087 address >= desc->buffer_bus + desc->used)
1088 desc = list_entry(desc->list.next,
1089 struct descriptor_buffer, list);
1090 d = desc->buffer + (address - desc->buffer_bus) / sizeof(*d);
1091 last = find_branch_descriptor(d, z);
1092
1093 if (!ctx->callback(ctx, d, last))
1094 break;
1095
1096 if (old_desc != desc) {
1097
1098
1099 unsigned long flags;
1100 old_desc->used = 0;
1101 spin_lock_irqsave(&ctx->ohci->lock, flags);
1102 list_move_tail(&old_desc->list, &ctx->buffer_list);
1103 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1104 }
1105 ctx->last = last;
1106 }
1107}
1108
1109
1110
1111
1112
1113static int context_add_buffer(struct context *ctx)
1114{
1115 struct descriptor_buffer *desc;
1116 dma_addr_t uninitialized_var(bus_addr);
1117 int offset;
1118
1119
1120
1121
1122
1123 if (ctx->total_allocation >= 16*1024*1024)
1124 return -ENOMEM;
1125
1126 desc = dma_alloc_coherent(ctx->ohci->card.device, PAGE_SIZE,
1127 &bus_addr, GFP_ATOMIC);
1128 if (!desc)
1129 return -ENOMEM;
1130
1131 offset = (void *)&desc->buffer - (void *)desc;
1132 desc->buffer_size = PAGE_SIZE - offset;
1133 desc->buffer_bus = bus_addr + offset;
1134 desc->used = 0;
1135
1136 list_add_tail(&desc->list, &ctx->buffer_list);
1137 ctx->total_allocation += PAGE_SIZE;
1138
1139 return 0;
1140}
1141
1142static int context_init(struct context *ctx, struct fw_ohci *ohci,
1143 u32 regs, descriptor_callback_t callback)
1144{
1145 ctx->ohci = ohci;
1146 ctx->regs = regs;
1147 ctx->total_allocation = 0;
1148
1149 INIT_LIST_HEAD(&ctx->buffer_list);
1150 if (context_add_buffer(ctx) < 0)
1151 return -ENOMEM;
1152
1153 ctx->buffer_tail = list_entry(ctx->buffer_list.next,
1154 struct descriptor_buffer, list);
1155
1156 tasklet_init(&ctx->tasklet, context_tasklet, (unsigned long)ctx);
1157 ctx->callback = callback;
1158
1159
1160
1161
1162
1163
1164 memset(ctx->buffer_tail->buffer, 0, sizeof(*ctx->buffer_tail->buffer));
1165 ctx->buffer_tail->buffer->control = cpu_to_le16(DESCRIPTOR_OUTPUT_LAST);
1166 ctx->buffer_tail->buffer->transfer_status = cpu_to_le16(0x8011);
1167 ctx->buffer_tail->used += sizeof(*ctx->buffer_tail->buffer);
1168 ctx->last = ctx->buffer_tail->buffer;
1169 ctx->prev = ctx->buffer_tail->buffer;
1170 ctx->prev_z = 1;
1171
1172 return 0;
1173}
1174
1175static void context_release(struct context *ctx)
1176{
1177 struct fw_card *card = &ctx->ohci->card;
1178 struct descriptor_buffer *desc, *tmp;
1179
1180 list_for_each_entry_safe(desc, tmp, &ctx->buffer_list, list)
1181 dma_free_coherent(card->device, PAGE_SIZE, desc,
1182 desc->buffer_bus -
1183 ((void *)&desc->buffer - (void *)desc));
1184}
1185
1186
1187static struct descriptor *context_get_descriptors(struct context *ctx,
1188 int z, dma_addr_t *d_bus)
1189{
1190 struct descriptor *d = NULL;
1191 struct descriptor_buffer *desc = ctx->buffer_tail;
1192
1193 if (z * sizeof(*d) > desc->buffer_size)
1194 return NULL;
1195
1196 if (z * sizeof(*d) > desc->buffer_size - desc->used) {
1197
1198
1199
1200 if (desc->list.next == &ctx->buffer_list) {
1201
1202
1203 if (context_add_buffer(ctx) < 0)
1204 return NULL;
1205 }
1206 desc = list_entry(desc->list.next,
1207 struct descriptor_buffer, list);
1208 ctx->buffer_tail = desc;
1209 }
1210
1211 d = desc->buffer + desc->used / sizeof(*d);
1212 memset(d, 0, z * sizeof(*d));
1213 *d_bus = desc->buffer_bus + desc->used;
1214
1215 return d;
1216}
1217
1218static void context_run(struct context *ctx, u32 extra)
1219{
1220 struct fw_ohci *ohci = ctx->ohci;
1221
1222 reg_write(ohci, COMMAND_PTR(ctx->regs),
1223 le32_to_cpu(ctx->last->branch_address));
1224 reg_write(ohci, CONTROL_CLEAR(ctx->regs), ~0);
1225 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_RUN | extra);
1226 ctx->running = true;
1227 flush_writes(ohci);
1228}
1229
1230static void context_append(struct context *ctx,
1231 struct descriptor *d, int z, int extra)
1232{
1233 dma_addr_t d_bus;
1234 struct descriptor_buffer *desc = ctx->buffer_tail;
1235 struct descriptor *d_branch;
1236
1237 d_bus = desc->buffer_bus + (d - desc->buffer) * sizeof(*d);
1238
1239 desc->used += (z + extra) * sizeof(*d);
1240
1241 wmb();
1242
1243 d_branch = find_branch_descriptor(ctx->prev, ctx->prev_z);
1244 d_branch->branch_address = cpu_to_le32(d_bus | z);
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255 if (unlikely(ctx->ohci->quirks & QUIRK_IR_WAKE) &&
1256 d_branch != ctx->prev &&
1257 (ctx->prev->control & cpu_to_le16(DESCRIPTOR_CMD)) ==
1258 cpu_to_le16(DESCRIPTOR_INPUT_MORE)) {
1259 ctx->prev->branch_address = cpu_to_le32(d_bus | z);
1260 }
1261
1262 ctx->prev = d;
1263 ctx->prev_z = z;
1264}
1265
1266static void context_stop(struct context *ctx)
1267{
1268 struct fw_ohci *ohci = ctx->ohci;
1269 u32 reg;
1270 int i;
1271
1272 reg_write(ohci, CONTROL_CLEAR(ctx->regs), CONTEXT_RUN);
1273 ctx->running = false;
1274
1275 for (i = 0; i < 1000; i++) {
1276 reg = reg_read(ohci, CONTROL_SET(ctx->regs));
1277 if ((reg & CONTEXT_ACTIVE) == 0)
1278 return;
1279
1280 if (i)
1281 udelay(10);
1282 }
1283 ohci_err(ohci, "DMA context still active (0x%08x)\n", reg);
1284}
1285
1286struct driver_data {
1287 u8 inline_data[8];
1288 struct fw_packet *packet;
1289};
1290
1291
1292
1293
1294
1295
1296static int at_context_queue_packet(struct context *ctx,
1297 struct fw_packet *packet)
1298{
1299 struct fw_ohci *ohci = ctx->ohci;
1300 dma_addr_t d_bus, uninitialized_var(payload_bus);
1301 struct driver_data *driver_data;
1302 struct descriptor *d, *last;
1303 __le32 *header;
1304 int z, tcode;
1305
1306 d = context_get_descriptors(ctx, 4, &d_bus);
1307 if (d == NULL) {
1308 packet->ack = RCODE_SEND_ERROR;
1309 return -1;
1310 }
1311
1312 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
1313 d[0].res_count = cpu_to_le16(packet->timestamp);
1314
1315
1316
1317
1318
1319
1320
1321 tcode = (packet->header[0] >> 4) & 0x0f;
1322 header = (__le32 *) &d[1];
1323 switch (tcode) {
1324 case TCODE_WRITE_QUADLET_REQUEST:
1325 case TCODE_WRITE_BLOCK_REQUEST:
1326 case TCODE_WRITE_RESPONSE:
1327 case TCODE_READ_QUADLET_REQUEST:
1328 case TCODE_READ_BLOCK_REQUEST:
1329 case TCODE_READ_QUADLET_RESPONSE:
1330 case TCODE_READ_BLOCK_RESPONSE:
1331 case TCODE_LOCK_REQUEST:
1332 case TCODE_LOCK_RESPONSE:
1333 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1334 (packet->speed << 16));
1335 header[1] = cpu_to_le32((packet->header[1] & 0xffff) |
1336 (packet->header[0] & 0xffff0000));
1337 header[2] = cpu_to_le32(packet->header[2]);
1338
1339 if (TCODE_IS_BLOCK_PACKET(tcode))
1340 header[3] = cpu_to_le32(packet->header[3]);
1341 else
1342 header[3] = (__force __le32) packet->header[3];
1343
1344 d[0].req_count = cpu_to_le16(packet->header_length);
1345 break;
1346
1347 case TCODE_LINK_INTERNAL:
1348 header[0] = cpu_to_le32((OHCI1394_phy_tcode << 4) |
1349 (packet->speed << 16));
1350 header[1] = cpu_to_le32(packet->header[1]);
1351 header[2] = cpu_to_le32(packet->header[2]);
1352 d[0].req_count = cpu_to_le16(12);
1353
1354 if (is_ping_packet(&packet->header[1]))
1355 d[0].control |= cpu_to_le16(DESCRIPTOR_PING);
1356 break;
1357
1358 case TCODE_STREAM_DATA:
1359 header[0] = cpu_to_le32((packet->header[0] & 0xffff) |
1360 (packet->speed << 16));
1361 header[1] = cpu_to_le32(packet->header[0] & 0xffff0000);
1362 d[0].req_count = cpu_to_le16(8);
1363 break;
1364
1365 default:
1366
1367 packet->ack = RCODE_SEND_ERROR;
1368 return -1;
1369 }
1370
1371 BUILD_BUG_ON(sizeof(struct driver_data) > sizeof(struct descriptor));
1372 driver_data = (struct driver_data *) &d[3];
1373 driver_data->packet = packet;
1374 packet->driver_data = driver_data;
1375
1376 if (packet->payload_length > 0) {
1377 if (packet->payload_length > sizeof(driver_data->inline_data)) {
1378 payload_bus = dma_map_single(ohci->card.device,
1379 packet->payload,
1380 packet->payload_length,
1381 DMA_TO_DEVICE);
1382 if (dma_mapping_error(ohci->card.device, payload_bus)) {
1383 packet->ack = RCODE_SEND_ERROR;
1384 return -1;
1385 }
1386 packet->payload_bus = payload_bus;
1387 packet->payload_mapped = true;
1388 } else {
1389 memcpy(driver_data->inline_data, packet->payload,
1390 packet->payload_length);
1391 payload_bus = d_bus + 3 * sizeof(*d);
1392 }
1393
1394 d[2].req_count = cpu_to_le16(packet->payload_length);
1395 d[2].data_address = cpu_to_le32(payload_bus);
1396 last = &d[2];
1397 z = 3;
1398 } else {
1399 last = &d[0];
1400 z = 2;
1401 }
1402
1403 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
1404 DESCRIPTOR_IRQ_ALWAYS |
1405 DESCRIPTOR_BRANCH_ALWAYS);
1406
1407
1408 if (ohci->generation != packet->generation) {
1409 if (packet->payload_mapped)
1410 dma_unmap_single(ohci->card.device, payload_bus,
1411 packet->payload_length, DMA_TO_DEVICE);
1412 packet->ack = RCODE_GENERATION;
1413 return -1;
1414 }
1415
1416 context_append(ctx, d, z, 4 - z);
1417
1418 if (ctx->running)
1419 reg_write(ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
1420 else
1421 context_run(ctx, 0);
1422
1423 return 0;
1424}
1425
1426static void at_context_flush(struct context *ctx)
1427{
1428 tasklet_disable(&ctx->tasklet);
1429
1430 ctx->flushing = true;
1431 context_tasklet((unsigned long)ctx);
1432 ctx->flushing = false;
1433
1434 tasklet_enable(&ctx->tasklet);
1435}
1436
1437static int handle_at_packet(struct context *context,
1438 struct descriptor *d,
1439 struct descriptor *last)
1440{
1441 struct driver_data *driver_data;
1442 struct fw_packet *packet;
1443 struct fw_ohci *ohci = context->ohci;
1444 int evt;
1445
1446 if (last->transfer_status == 0 && !context->flushing)
1447
1448 return 0;
1449
1450 driver_data = (struct driver_data *) &d[3];
1451 packet = driver_data->packet;
1452 if (packet == NULL)
1453
1454 return 1;
1455
1456 if (packet->payload_mapped)
1457 dma_unmap_single(ohci->card.device, packet->payload_bus,
1458 packet->payload_length, DMA_TO_DEVICE);
1459
1460 evt = le16_to_cpu(last->transfer_status) & 0x1f;
1461 packet->timestamp = le16_to_cpu(last->res_count);
1462
1463 log_ar_at_event(ohci, 'T', packet->speed, packet->header, evt);
1464
1465 switch (evt) {
1466 case OHCI1394_evt_timeout:
1467
1468 packet->ack = RCODE_CANCELLED;
1469 break;
1470
1471 case OHCI1394_evt_flushed:
1472
1473
1474
1475
1476 packet->ack = RCODE_GENERATION;
1477 break;
1478
1479 case OHCI1394_evt_missing_ack:
1480 if (context->flushing)
1481 packet->ack = RCODE_GENERATION;
1482 else {
1483
1484
1485
1486
1487 packet->ack = RCODE_NO_ACK;
1488 }
1489 break;
1490
1491 case ACK_COMPLETE + 0x10:
1492 case ACK_PENDING + 0x10:
1493 case ACK_BUSY_X + 0x10:
1494 case ACK_BUSY_A + 0x10:
1495 case ACK_BUSY_B + 0x10:
1496 case ACK_DATA_ERROR + 0x10:
1497 case ACK_TYPE_ERROR + 0x10:
1498 packet->ack = evt - 0x10;
1499 break;
1500
1501 case OHCI1394_evt_no_status:
1502 if (context->flushing) {
1503 packet->ack = RCODE_GENERATION;
1504 break;
1505 }
1506
1507
1508 default:
1509 packet->ack = RCODE_SEND_ERROR;
1510 break;
1511 }
1512
1513 packet->callback(packet, &ohci->card, packet->ack);
1514
1515 return 1;
1516}
1517
1518#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
1519#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
1520#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
1521#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
1522#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
1523
1524static void handle_local_rom(struct fw_ohci *ohci,
1525 struct fw_packet *packet, u32 csr)
1526{
1527 struct fw_packet response;
1528 int tcode, length, i;
1529
1530 tcode = HEADER_GET_TCODE(packet->header[0]);
1531 if (TCODE_IS_BLOCK_PACKET(tcode))
1532 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1533 else
1534 length = 4;
1535
1536 i = csr - CSR_CONFIG_ROM;
1537 if (i + length > CONFIG_ROM_SIZE) {
1538 fw_fill_response(&response, packet->header,
1539 RCODE_ADDRESS_ERROR, NULL, 0);
1540 } else if (!TCODE_IS_READ_REQUEST(tcode)) {
1541 fw_fill_response(&response, packet->header,
1542 RCODE_TYPE_ERROR, NULL, 0);
1543 } else {
1544 fw_fill_response(&response, packet->header, RCODE_COMPLETE,
1545 (void *) ohci->config_rom + i, length);
1546 }
1547
1548 fw_core_handle_response(&ohci->card, &response);
1549}
1550
1551static void handle_local_lock(struct fw_ohci *ohci,
1552 struct fw_packet *packet, u32 csr)
1553{
1554 struct fw_packet response;
1555 int tcode, length, ext_tcode, sel, try;
1556 __be32 *payload, lock_old;
1557 u32 lock_arg, lock_data;
1558
1559 tcode = HEADER_GET_TCODE(packet->header[0]);
1560 length = HEADER_GET_DATA_LENGTH(packet->header[3]);
1561 payload = packet->payload;
1562 ext_tcode = HEADER_GET_EXTENDED_TCODE(packet->header[3]);
1563
1564 if (tcode == TCODE_LOCK_REQUEST &&
1565 ext_tcode == EXTCODE_COMPARE_SWAP && length == 8) {
1566 lock_arg = be32_to_cpu(payload[0]);
1567 lock_data = be32_to_cpu(payload[1]);
1568 } else if (tcode == TCODE_READ_QUADLET_REQUEST) {
1569 lock_arg = 0;
1570 lock_data = 0;
1571 } else {
1572 fw_fill_response(&response, packet->header,
1573 RCODE_TYPE_ERROR, NULL, 0);
1574 goto out;
1575 }
1576
1577 sel = (csr - CSR_BUS_MANAGER_ID) / 4;
1578 reg_write(ohci, OHCI1394_CSRData, lock_data);
1579 reg_write(ohci, OHCI1394_CSRCompareData, lock_arg);
1580 reg_write(ohci, OHCI1394_CSRControl, sel);
1581
1582 for (try = 0; try < 20; try++)
1583 if (reg_read(ohci, OHCI1394_CSRControl) & 0x80000000) {
1584 lock_old = cpu_to_be32(reg_read(ohci,
1585 OHCI1394_CSRData));
1586 fw_fill_response(&response, packet->header,
1587 RCODE_COMPLETE,
1588 &lock_old, sizeof(lock_old));
1589 goto out;
1590 }
1591
1592 ohci_err(ohci, "swap not done (CSR lock timeout)\n");
1593 fw_fill_response(&response, packet->header, RCODE_BUSY, NULL, 0);
1594
1595 out:
1596 fw_core_handle_response(&ohci->card, &response);
1597}
1598
1599static void handle_local_request(struct context *ctx, struct fw_packet *packet)
1600{
1601 u64 offset, csr;
1602
1603 if (ctx == &ctx->ohci->at_request_ctx) {
1604 packet->ack = ACK_PENDING;
1605 packet->callback(packet, &ctx->ohci->card, packet->ack);
1606 }
1607
1608 offset =
1609 ((unsigned long long)
1610 HEADER_GET_OFFSET_HIGH(packet->header[1]) << 32) |
1611 packet->header[2];
1612 csr = offset - CSR_REGISTER_BASE;
1613
1614
1615 if (csr >= CSR_CONFIG_ROM && csr < CSR_CONFIG_ROM_END)
1616 handle_local_rom(ctx->ohci, packet, csr);
1617 else switch (csr) {
1618 case CSR_BUS_MANAGER_ID:
1619 case CSR_BANDWIDTH_AVAILABLE:
1620 case CSR_CHANNELS_AVAILABLE_HI:
1621 case CSR_CHANNELS_AVAILABLE_LO:
1622 handle_local_lock(ctx->ohci, packet, csr);
1623 break;
1624 default:
1625 if (ctx == &ctx->ohci->at_request_ctx)
1626 fw_core_handle_request(&ctx->ohci->card, packet);
1627 else
1628 fw_core_handle_response(&ctx->ohci->card, packet);
1629 break;
1630 }
1631
1632 if (ctx == &ctx->ohci->at_response_ctx) {
1633 packet->ack = ACK_COMPLETE;
1634 packet->callback(packet, &ctx->ohci->card, packet->ack);
1635 }
1636}
1637
1638static void at_context_transmit(struct context *ctx, struct fw_packet *packet)
1639{
1640 unsigned long flags;
1641 int ret;
1642
1643 spin_lock_irqsave(&ctx->ohci->lock, flags);
1644
1645 if (HEADER_GET_DESTINATION(packet->header[0]) == ctx->ohci->node_id &&
1646 ctx->ohci->generation == packet->generation) {
1647 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1648 handle_local_request(ctx, packet);
1649 return;
1650 }
1651
1652 ret = at_context_queue_packet(ctx, packet);
1653 spin_unlock_irqrestore(&ctx->ohci->lock, flags);
1654
1655 if (ret < 0)
1656 packet->callback(packet, &ctx->ohci->card, packet->ack);
1657
1658}
1659
1660static void detect_dead_context(struct fw_ohci *ohci,
1661 const char *name, unsigned int regs)
1662{
1663 u32 ctl;
1664
1665 ctl = reg_read(ohci, CONTROL_SET(regs));
1666 if (ctl & CONTEXT_DEAD)
1667 ohci_err(ohci, "DMA context %s has stopped, error code: %s\n",
1668 name, evts[ctl & 0x1f]);
1669}
1670
1671static void handle_dead_contexts(struct fw_ohci *ohci)
1672{
1673 unsigned int i;
1674 char name[8];
1675
1676 detect_dead_context(ohci, "ATReq", OHCI1394_AsReqTrContextBase);
1677 detect_dead_context(ohci, "ATRsp", OHCI1394_AsRspTrContextBase);
1678 detect_dead_context(ohci, "ARReq", OHCI1394_AsReqRcvContextBase);
1679 detect_dead_context(ohci, "ARRsp", OHCI1394_AsRspRcvContextBase);
1680 for (i = 0; i < 32; ++i) {
1681 if (!(ohci->it_context_support & (1 << i)))
1682 continue;
1683 sprintf(name, "IT%u", i);
1684 detect_dead_context(ohci, name, OHCI1394_IsoXmitContextBase(i));
1685 }
1686 for (i = 0; i < 32; ++i) {
1687 if (!(ohci->ir_context_support & (1 << i)))
1688 continue;
1689 sprintf(name, "IR%u", i);
1690 detect_dead_context(ohci, name, OHCI1394_IsoRcvContextBase(i));
1691 }
1692
1693}
1694
1695static u32 cycle_timer_ticks(u32 cycle_timer)
1696{
1697 u32 ticks;
1698
1699 ticks = cycle_timer & 0xfff;
1700 ticks += 3072 * ((cycle_timer >> 12) & 0x1fff);
1701 ticks += (3072 * 8000) * (cycle_timer >> 25);
1702
1703 return ticks;
1704}
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721static u32 get_cycle_time(struct fw_ohci *ohci)
1722{
1723 u32 c0, c1, c2;
1724 u32 t0, t1, t2;
1725 s32 diff01, diff12;
1726 int i;
1727
1728 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1729
1730 if (ohci->quirks & QUIRK_CYCLE_TIMER) {
1731 i = 0;
1732 c1 = c2;
1733 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1734 do {
1735 c0 = c1;
1736 c1 = c2;
1737 c2 = reg_read(ohci, OHCI1394_IsochronousCycleTimer);
1738 t0 = cycle_timer_ticks(c0);
1739 t1 = cycle_timer_ticks(c1);
1740 t2 = cycle_timer_ticks(c2);
1741 diff01 = t1 - t0;
1742 diff12 = t2 - t1;
1743 } while ((diff01 <= 0 || diff12 <= 0 ||
1744 diff01 / diff12 >= 2 || diff12 / diff01 >= 2)
1745 && i++ < 20);
1746 }
1747
1748 return c2;
1749}
1750
1751
1752
1753
1754
1755
1756
1757static u32 update_bus_time(struct fw_ohci *ohci)
1758{
1759 u32 cycle_time_seconds = get_cycle_time(ohci) >> 25;
1760
1761 if (unlikely(!ohci->bus_time_running)) {
1762 reg_write(ohci, OHCI1394_IntMaskSet, OHCI1394_cycle64Seconds);
1763 ohci->bus_time = (lower_32_bits(get_seconds()) & ~0x7f) |
1764 (cycle_time_seconds & 0x40);
1765 ohci->bus_time_running = true;
1766 }
1767
1768 if ((ohci->bus_time & 0x40) != (cycle_time_seconds & 0x40))
1769 ohci->bus_time += 0x40;
1770
1771 return ohci->bus_time | cycle_time_seconds;
1772}
1773
1774static int get_status_for_port(struct fw_ohci *ohci, int port_index)
1775{
1776 int reg;
1777
1778 mutex_lock(&ohci->phy_reg_mutex);
1779 reg = write_phy_reg(ohci, 7, port_index);
1780 if (reg >= 0)
1781 reg = read_phy_reg(ohci, 8);
1782 mutex_unlock(&ohci->phy_reg_mutex);
1783 if (reg < 0)
1784 return reg;
1785
1786 switch (reg & 0x0f) {
1787 case 0x06:
1788 return 2;
1789 case 0x0e:
1790 return 3;
1791 }
1792 return 1;
1793}
1794
1795static int get_self_id_pos(struct fw_ohci *ohci, u32 self_id,
1796 int self_id_count)
1797{
1798 int i;
1799 u32 entry;
1800
1801 for (i = 0; i < self_id_count; i++) {
1802 entry = ohci->self_id_buffer[i];
1803 if ((self_id & 0xff000000) == (entry & 0xff000000))
1804 return -1;
1805 if ((self_id & 0xff000000) < (entry & 0xff000000))
1806 return i;
1807 }
1808 return i;
1809}
1810
1811static int initiated_reset(struct fw_ohci *ohci)
1812{
1813 int reg;
1814 int ret = 0;
1815
1816 mutex_lock(&ohci->phy_reg_mutex);
1817 reg = write_phy_reg(ohci, 7, 0xe0);
1818 if (reg >= 0) {
1819 reg = read_phy_reg(ohci, 8);
1820 reg |= 0x40;
1821 reg = write_phy_reg(ohci, 8, reg);
1822 if (reg >= 0) {
1823 reg = read_phy_reg(ohci, 12);
1824 if (reg >= 0) {
1825 if ((reg & 0x08) == 0x08) {
1826
1827 ret = 0x2;
1828 }
1829 }
1830 }
1831 }
1832 mutex_unlock(&ohci->phy_reg_mutex);
1833 return ret;
1834}
1835
1836
1837
1838
1839
1840
1841static int find_and_insert_self_id(struct fw_ohci *ohci, int self_id_count)
1842{
1843 int reg, i, pos, status;
1844
1845 u32 self_id = 0x8040c800;
1846
1847 reg = reg_read(ohci, OHCI1394_NodeID);
1848 if (!(reg & OHCI1394_NodeID_idValid)) {
1849 ohci_notice(ohci,
1850 "node ID not valid, new bus reset in progress\n");
1851 return -EBUSY;
1852 }
1853 self_id |= ((reg & 0x3f) << 24);
1854
1855 reg = ohci_read_phy_reg(&ohci->card, 4);
1856 if (reg < 0)
1857 return reg;
1858 self_id |= ((reg & 0x07) << 8);
1859
1860 reg = ohci_read_phy_reg(&ohci->card, 1);
1861 if (reg < 0)
1862 return reg;
1863 self_id |= ((reg & 0x3f) << 16);
1864
1865 for (i = 0; i < 3; i++) {
1866 status = get_status_for_port(ohci, i);
1867 if (status < 0)
1868 return status;
1869 self_id |= ((status & 0x3) << (6 - (i * 2)));
1870 }
1871
1872 self_id |= initiated_reset(ohci);
1873
1874 pos = get_self_id_pos(ohci, self_id, self_id_count);
1875 if (pos >= 0) {
1876 memmove(&(ohci->self_id_buffer[pos+1]),
1877 &(ohci->self_id_buffer[pos]),
1878 (self_id_count - pos) * sizeof(*ohci->self_id_buffer));
1879 ohci->self_id_buffer[pos] = self_id;
1880 self_id_count++;
1881 }
1882 return self_id_count;
1883}
1884
1885static void bus_reset_work(struct work_struct *work)
1886{
1887 struct fw_ohci *ohci =
1888 container_of(work, struct fw_ohci, bus_reset_work);
1889 int self_id_count, generation, new_generation, i, j;
1890 u32 reg;
1891 void *free_rom = NULL;
1892 dma_addr_t free_rom_bus = 0;
1893 bool is_new_root;
1894
1895 reg = reg_read(ohci, OHCI1394_NodeID);
1896 if (!(reg & OHCI1394_NodeID_idValid)) {
1897 ohci_notice(ohci,
1898 "node ID not valid, new bus reset in progress\n");
1899 return;
1900 }
1901 if ((reg & OHCI1394_NodeID_nodeNumber) == 63) {
1902 ohci_notice(ohci, "malconfigured bus\n");
1903 return;
1904 }
1905 ohci->node_id = reg & (OHCI1394_NodeID_busNumber |
1906 OHCI1394_NodeID_nodeNumber);
1907
1908 is_new_root = (reg & OHCI1394_NodeID_root) != 0;
1909 if (!(ohci->is_root && is_new_root))
1910 reg_write(ohci, OHCI1394_LinkControlSet,
1911 OHCI1394_LinkControl_cycleMaster);
1912 ohci->is_root = is_new_root;
1913
1914 reg = reg_read(ohci, OHCI1394_SelfIDCount);
1915 if (reg & OHCI1394_SelfIDCount_selfIDError) {
1916 ohci_notice(ohci, "self ID receive error\n");
1917 return;
1918 }
1919
1920
1921
1922
1923
1924
1925 self_id_count = (reg >> 3) & 0xff;
1926
1927 if (self_id_count > 252) {
1928 ohci_notice(ohci, "bad selfIDSize (%08x)\n", reg);
1929 return;
1930 }
1931
1932 generation = (cond_le32_to_cpu(ohci->self_id_cpu[0]) >> 16) & 0xff;
1933 rmb();
1934
1935 for (i = 1, j = 0; j < self_id_count; i += 2, j++) {
1936 u32 id = cond_le32_to_cpu(ohci->self_id_cpu[i]);
1937 u32 id2 = cond_le32_to_cpu(ohci->self_id_cpu[i + 1]);
1938
1939 if (id != ~id2) {
1940
1941
1942
1943
1944
1945
1946
1947 if (id == 0xffff008f) {
1948 ohci_notice(ohci, "ignoring spurious self IDs\n");
1949 self_id_count = j;
1950 break;
1951 }
1952
1953 ohci_notice(ohci, "bad self ID %d/%d (%08x != ~%08x)\n",
1954 j, self_id_count, id, id2);
1955 return;
1956 }
1957 ohci->self_id_buffer[j] = id;
1958 }
1959
1960 if (ohci->quirks & QUIRK_TI_SLLZ059) {
1961 self_id_count = find_and_insert_self_id(ohci, self_id_count);
1962 if (self_id_count < 0) {
1963 ohci_notice(ohci,
1964 "could not construct local self ID\n");
1965 return;
1966 }
1967 }
1968
1969 if (self_id_count == 0) {
1970 ohci_notice(ohci, "no self IDs\n");
1971 return;
1972 }
1973 rmb();
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989 new_generation = (reg_read(ohci, OHCI1394_SelfIDCount) >> 16) & 0xff;
1990 if (new_generation != generation) {
1991 ohci_notice(ohci, "new bus reset, discarding self ids\n");
1992 return;
1993 }
1994
1995
1996 spin_lock_irq(&ohci->lock);
1997
1998 ohci->generation = -1;
1999 context_stop(&ohci->at_request_ctx);
2000 context_stop(&ohci->at_response_ctx);
2001
2002 spin_unlock_irq(&ohci->lock);
2003
2004
2005
2006
2007
2008
2009 at_context_flush(&ohci->at_request_ctx);
2010 at_context_flush(&ohci->at_response_ctx);
2011
2012 spin_lock_irq(&ohci->lock);
2013
2014 ohci->generation = generation;
2015 reg_write(ohci, OHCI1394_IntEventClear, OHCI1394_busReset);
2016
2017 if (ohci->quirks & QUIRK_RESET_PACKET)
2018 ohci->request_generation = generation;
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029 if (ohci->next_config_rom != NULL) {
2030 if (ohci->next_config_rom != ohci->config_rom) {
2031 free_rom = ohci->config_rom;
2032 free_rom_bus = ohci->config_rom_bus;
2033 }
2034 ohci->config_rom = ohci->next_config_rom;
2035 ohci->config_rom_bus = ohci->next_config_rom_bus;
2036 ohci->next_config_rom = NULL;
2037
2038
2039
2040
2041
2042
2043
2044 reg_write(ohci, OHCI1394_BusOptions,
2045 be32_to_cpu(ohci->config_rom[2]));
2046 ohci->config_rom[0] = ohci->next_header;
2047 reg_write(ohci, OHCI1394_ConfigROMhdr,
2048 be32_to_cpu(ohci->next_header));
2049 }
2050
2051#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
2052 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, ~0);
2053 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, ~0);
2054#endif
2055
2056 spin_unlock_irq(&ohci->lock);
2057
2058 if (free_rom)
2059 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2060 free_rom, free_rom_bus);
2061
2062 log_selfids(ohci, generation, self_id_count);
2063
2064 fw_core_handle_bus_reset(&ohci->card, ohci->node_id, generation,
2065 self_id_count, ohci->self_id_buffer,
2066 ohci->csr_state_setclear_abdicate);
2067 ohci->csr_state_setclear_abdicate = false;
2068}
2069
2070static irqreturn_t irq_handler(int irq, void *data)
2071{
2072 struct fw_ohci *ohci = data;
2073 u32 event, iso_event;
2074 int i;
2075
2076 event = reg_read(ohci, OHCI1394_IntEventClear);
2077
2078 if (!event || !~event)
2079 return IRQ_NONE;
2080
2081
2082
2083
2084
2085 reg_write(ohci, OHCI1394_IntEventClear,
2086 event & ~(OHCI1394_busReset | OHCI1394_postedWriteErr));
2087 log_irqs(ohci, event);
2088
2089 if (event & OHCI1394_selfIDComplete)
2090 queue_work(fw_workqueue, &ohci->bus_reset_work);
2091
2092 if (event & OHCI1394_RQPkt)
2093 tasklet_schedule(&ohci->ar_request_ctx.tasklet);
2094
2095 if (event & OHCI1394_RSPkt)
2096 tasklet_schedule(&ohci->ar_response_ctx.tasklet);
2097
2098 if (event & OHCI1394_reqTxComplete)
2099 tasklet_schedule(&ohci->at_request_ctx.tasklet);
2100
2101 if (event & OHCI1394_respTxComplete)
2102 tasklet_schedule(&ohci->at_response_ctx.tasklet);
2103
2104 if (event & OHCI1394_isochRx) {
2105 iso_event = reg_read(ohci, OHCI1394_IsoRecvIntEventClear);
2106 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, iso_event);
2107
2108 while (iso_event) {
2109 i = ffs(iso_event) - 1;
2110 tasklet_schedule(
2111 &ohci->ir_context_list[i].context.tasklet);
2112 iso_event &= ~(1 << i);
2113 }
2114 }
2115
2116 if (event & OHCI1394_isochTx) {
2117 iso_event = reg_read(ohci, OHCI1394_IsoXmitIntEventClear);
2118 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, iso_event);
2119
2120 while (iso_event) {
2121 i = ffs(iso_event) - 1;
2122 tasklet_schedule(
2123 &ohci->it_context_list[i].context.tasklet);
2124 iso_event &= ~(1 << i);
2125 }
2126 }
2127
2128 if (unlikely(event & OHCI1394_regAccessFail))
2129 ohci_err(ohci, "register access failure\n");
2130
2131 if (unlikely(event & OHCI1394_postedWriteErr)) {
2132 reg_read(ohci, OHCI1394_PostedWriteAddressHi);
2133 reg_read(ohci, OHCI1394_PostedWriteAddressLo);
2134 reg_write(ohci, OHCI1394_IntEventClear,
2135 OHCI1394_postedWriteErr);
2136 if (printk_ratelimit())
2137 ohci_err(ohci, "PCI posted write error\n");
2138 }
2139
2140 if (unlikely(event & OHCI1394_cycleTooLong)) {
2141 if (printk_ratelimit())
2142 ohci_notice(ohci, "isochronous cycle too long\n");
2143 reg_write(ohci, OHCI1394_LinkControlSet,
2144 OHCI1394_LinkControl_cycleMaster);
2145 }
2146
2147 if (unlikely(event & OHCI1394_cycleInconsistent)) {
2148
2149
2150
2151
2152
2153
2154 if (printk_ratelimit())
2155 ohci_notice(ohci, "isochronous cycle inconsistent\n");
2156 }
2157
2158 if (unlikely(event & OHCI1394_unrecoverableError))
2159 handle_dead_contexts(ohci);
2160
2161 if (event & OHCI1394_cycle64Seconds) {
2162 spin_lock(&ohci->lock);
2163 update_bus_time(ohci);
2164 spin_unlock(&ohci->lock);
2165 } else
2166 flush_writes(ohci);
2167
2168 return IRQ_HANDLED;
2169}
2170
2171static int software_reset(struct fw_ohci *ohci)
2172{
2173 u32 val;
2174 int i;
2175
2176 reg_write(ohci, OHCI1394_HCControlSet, OHCI1394_HCControl_softReset);
2177 for (i = 0; i < 500; i++) {
2178 val = reg_read(ohci, OHCI1394_HCControlSet);
2179 if (!~val)
2180 return -ENODEV;
2181
2182 if (!(val & OHCI1394_HCControl_softReset))
2183 return 0;
2184
2185 msleep(1);
2186 }
2187
2188 return -EBUSY;
2189}
2190
2191static void copy_config_rom(__be32 *dest, const __be32 *src, size_t length)
2192{
2193 size_t size = length * 4;
2194
2195 memcpy(dest, src, size);
2196 if (size < CONFIG_ROM_SIZE)
2197 memset(&dest[length], 0, CONFIG_ROM_SIZE - size);
2198}
2199
2200static int configure_1394a_enhancements(struct fw_ohci *ohci)
2201{
2202 bool enable_1394a;
2203 int ret, clear, set, offset;
2204
2205
2206 if (!(reg_read(ohci, OHCI1394_HCControlSet) &
2207 OHCI1394_HCControl_programPhyEnable))
2208 return 0;
2209
2210
2211 enable_1394a = false;
2212 ret = read_phy_reg(ohci, 2);
2213 if (ret < 0)
2214 return ret;
2215 if ((ret & PHY_EXTENDED_REGISTERS) == PHY_EXTENDED_REGISTERS) {
2216 ret = read_paged_phy_reg(ohci, 1, 8);
2217 if (ret < 0)
2218 return ret;
2219 if (ret >= 1)
2220 enable_1394a = true;
2221 }
2222
2223 if (ohci->quirks & QUIRK_NO_1394A)
2224 enable_1394a = false;
2225
2226
2227 if (enable_1394a) {
2228 clear = 0;
2229 set = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2230 } else {
2231 clear = PHY_ENABLE_ACCEL | PHY_ENABLE_MULTI;
2232 set = 0;
2233 }
2234 ret = update_phy_reg(ohci, 5, clear, set);
2235 if (ret < 0)
2236 return ret;
2237
2238 if (enable_1394a)
2239 offset = OHCI1394_HCControlSet;
2240 else
2241 offset = OHCI1394_HCControlClear;
2242 reg_write(ohci, offset, OHCI1394_HCControl_aPhyEnhanceEnable);
2243
2244
2245 reg_write(ohci, OHCI1394_HCControlClear,
2246 OHCI1394_HCControl_programPhyEnable);
2247
2248 return 0;
2249}
2250
2251static int probe_tsb41ba3d(struct fw_ohci *ohci)
2252{
2253
2254 static const u8 id[] = { 0x08, 0x00, 0x28, 0x83, 0x30, 0x05, };
2255 int reg, i;
2256
2257 reg = read_phy_reg(ohci, 2);
2258 if (reg < 0)
2259 return reg;
2260 if ((reg & PHY_EXTENDED_REGISTERS) != PHY_EXTENDED_REGISTERS)
2261 return 0;
2262
2263 for (i = ARRAY_SIZE(id) - 1; i >= 0; i--) {
2264 reg = read_paged_phy_reg(ohci, 1, i + 10);
2265 if (reg < 0)
2266 return reg;
2267 if (reg != id[i])
2268 return 0;
2269 }
2270 return 1;
2271}
2272
2273static int ohci_enable(struct fw_card *card,
2274 const __be32 *config_rom, size_t length)
2275{
2276 struct fw_ohci *ohci = fw_ohci(card);
2277 u32 lps, version, irqs;
2278 int i, ret;
2279
2280 if (software_reset(ohci)) {
2281 ohci_err(ohci, "failed to reset ohci card\n");
2282 return -EBUSY;
2283 }
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301 reg_write(ohci, OHCI1394_HCControlSet,
2302 OHCI1394_HCControl_LPS |
2303 OHCI1394_HCControl_postedWriteEnable);
2304 flush_writes(ohci);
2305
2306 if (!(ohci->quirks & QUIRK_PHY_LCTRL_TIMEOUT))
2307 msleep(50);
2308
2309 for (lps = 0, i = 0; !lps && i < 150; i++) {
2310 msleep(1);
2311 lps = reg_read(ohci, OHCI1394_HCControlSet) &
2312 OHCI1394_HCControl_LPS;
2313 }
2314
2315 if (!lps) {
2316 ohci_err(ohci, "failed to set Link Power Status\n");
2317 return -EIO;
2318 }
2319
2320 if (ohci->quirks & QUIRK_TI_SLLZ059) {
2321 ret = probe_tsb41ba3d(ohci);
2322 if (ret < 0)
2323 return ret;
2324 if (ret)
2325 ohci_notice(ohci, "local TSB41BA3D phy\n");
2326 else
2327 ohci->quirks &= ~QUIRK_TI_SLLZ059;
2328 }
2329
2330 reg_write(ohci, OHCI1394_HCControlClear,
2331 OHCI1394_HCControl_noByteSwapData);
2332
2333 reg_write(ohci, OHCI1394_SelfIDBuffer, ohci->self_id_bus);
2334 reg_write(ohci, OHCI1394_LinkControlSet,
2335 OHCI1394_LinkControl_cycleTimerEnable |
2336 OHCI1394_LinkControl_cycleMaster);
2337
2338 reg_write(ohci, OHCI1394_ATRetries,
2339 OHCI1394_MAX_AT_REQ_RETRIES |
2340 (OHCI1394_MAX_AT_RESP_RETRIES << 4) |
2341 (OHCI1394_MAX_PHYS_RESP_RETRIES << 8) |
2342 (200 << 16));
2343
2344 ohci->bus_time_running = false;
2345
2346 for (i = 0; i < 32; i++)
2347 if (ohci->ir_context_support & (1 << i))
2348 reg_write(ohci, OHCI1394_IsoRcvContextControlClear(i),
2349 IR_CONTEXT_MULTI_CHANNEL_MODE);
2350
2351 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
2352 if (version >= OHCI_VERSION_1_1) {
2353 reg_write(ohci, OHCI1394_InitialChannelsAvailableHi,
2354 0xfffffffe);
2355 card->broadcast_channel_auto_allocated = true;
2356 }
2357
2358
2359 reg_write(ohci, OHCI1394_FairnessControl, 0x3f);
2360 ohci->pri_req_max = reg_read(ohci, OHCI1394_FairnessControl) & 0x3f;
2361 reg_write(ohci, OHCI1394_FairnessControl, 0);
2362 card->priority_budget_implemented = ohci->pri_req_max != 0;
2363
2364 reg_write(ohci, OHCI1394_PhyUpperBound, 0x00010000);
2365 reg_write(ohci, OHCI1394_IntEventClear, ~0);
2366 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
2367
2368 ret = configure_1394a_enhancements(ohci);
2369 if (ret < 0)
2370 return ret;
2371
2372
2373 ret = ohci_update_phy_reg(card, 4, 0, PHY_LINK_ACTIVE | PHY_CONTENDER);
2374 if (ret < 0)
2375 return ret;
2376
2377
2378
2379
2380
2381
2382
2383
2384
2385
2386
2387
2388
2389
2390
2391
2392
2393
2394
2395
2396 if (config_rom) {
2397 ohci->next_config_rom =
2398 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2399 &ohci->next_config_rom_bus,
2400 GFP_KERNEL);
2401 if (ohci->next_config_rom == NULL)
2402 return -ENOMEM;
2403
2404 copy_config_rom(ohci->next_config_rom, config_rom, length);
2405 } else {
2406
2407
2408
2409
2410 ohci->next_config_rom = ohci->config_rom;
2411 ohci->next_config_rom_bus = ohci->config_rom_bus;
2412 }
2413
2414 ohci->next_header = ohci->next_config_rom[0];
2415 ohci->next_config_rom[0] = 0;
2416 reg_write(ohci, OHCI1394_ConfigROMhdr, 0);
2417 reg_write(ohci, OHCI1394_BusOptions,
2418 be32_to_cpu(ohci->next_config_rom[2]));
2419 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2420
2421 reg_write(ohci, OHCI1394_AsReqFilterHiSet, 0x80000000);
2422
2423 irqs = OHCI1394_reqTxComplete | OHCI1394_respTxComplete |
2424 OHCI1394_RQPkt | OHCI1394_RSPkt |
2425 OHCI1394_isochTx | OHCI1394_isochRx |
2426 OHCI1394_postedWriteErr |
2427 OHCI1394_selfIDComplete |
2428 OHCI1394_regAccessFail |
2429 OHCI1394_cycleInconsistent |
2430 OHCI1394_unrecoverableError |
2431 OHCI1394_cycleTooLong |
2432 OHCI1394_masterIntEnable;
2433 if (param_debug & OHCI_PARAM_DEBUG_BUSRESETS)
2434 irqs |= OHCI1394_busReset;
2435 reg_write(ohci, OHCI1394_IntMaskSet, irqs);
2436
2437 reg_write(ohci, OHCI1394_HCControlSet,
2438 OHCI1394_HCControl_linkEnable |
2439 OHCI1394_HCControl_BIBimageValid);
2440
2441 reg_write(ohci, OHCI1394_LinkControlSet,
2442 OHCI1394_LinkControl_rcvSelfID |
2443 OHCI1394_LinkControl_rcvPhyPkt);
2444
2445 ar_context_run(&ohci->ar_request_ctx);
2446 ar_context_run(&ohci->ar_response_ctx);
2447
2448 flush_writes(ohci);
2449
2450
2451 fw_schedule_bus_reset(&ohci->card, false, true);
2452
2453 return 0;
2454}
2455
2456static int ohci_set_config_rom(struct fw_card *card,
2457 const __be32 *config_rom, size_t length)
2458{
2459 struct fw_ohci *ohci;
2460 __be32 *next_config_rom;
2461 dma_addr_t uninitialized_var(next_config_rom_bus);
2462
2463 ohci = fw_ohci(card);
2464
2465
2466
2467
2468
2469
2470
2471
2472
2473
2474
2475
2476
2477
2478
2479
2480
2481
2482
2483
2484
2485
2486
2487
2488
2489
2490
2491
2492 next_config_rom =
2493 dma_alloc_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2494 &next_config_rom_bus, GFP_KERNEL);
2495 if (next_config_rom == NULL)
2496 return -ENOMEM;
2497
2498 spin_lock_irq(&ohci->lock);
2499
2500
2501
2502
2503
2504
2505
2506
2507
2508
2509
2510
2511 if (ohci->next_config_rom == NULL) {
2512 ohci->next_config_rom = next_config_rom;
2513 ohci->next_config_rom_bus = next_config_rom_bus;
2514 next_config_rom = NULL;
2515 }
2516
2517 copy_config_rom(ohci->next_config_rom, config_rom, length);
2518
2519 ohci->next_header = config_rom[0];
2520 ohci->next_config_rom[0] = 0;
2521
2522 reg_write(ohci, OHCI1394_ConfigROMmap, ohci->next_config_rom_bus);
2523
2524 spin_unlock_irq(&ohci->lock);
2525
2526
2527 if (next_config_rom != NULL)
2528 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
2529 next_config_rom, next_config_rom_bus);
2530
2531
2532
2533
2534
2535
2536
2537
2538
2539 fw_schedule_bus_reset(&ohci->card, true, true);
2540
2541 return 0;
2542}
2543
2544static void ohci_send_request(struct fw_card *card, struct fw_packet *packet)
2545{
2546 struct fw_ohci *ohci = fw_ohci(card);
2547
2548 at_context_transmit(&ohci->at_request_ctx, packet);
2549}
2550
2551static void ohci_send_response(struct fw_card *card, struct fw_packet *packet)
2552{
2553 struct fw_ohci *ohci = fw_ohci(card);
2554
2555 at_context_transmit(&ohci->at_response_ctx, packet);
2556}
2557
2558static int ohci_cancel_packet(struct fw_card *card, struct fw_packet *packet)
2559{
2560 struct fw_ohci *ohci = fw_ohci(card);
2561 struct context *ctx = &ohci->at_request_ctx;
2562 struct driver_data *driver_data = packet->driver_data;
2563 int ret = -ENOENT;
2564
2565 tasklet_disable(&ctx->tasklet);
2566
2567 if (packet->ack != 0)
2568 goto out;
2569
2570 if (packet->payload_mapped)
2571 dma_unmap_single(ohci->card.device, packet->payload_bus,
2572 packet->payload_length, DMA_TO_DEVICE);
2573
2574 log_ar_at_event(ohci, 'T', packet->speed, packet->header, 0x20);
2575 driver_data->packet = NULL;
2576 packet->ack = RCODE_CANCELLED;
2577 packet->callback(packet, &ohci->card, packet->ack);
2578 ret = 0;
2579 out:
2580 tasklet_enable(&ctx->tasklet);
2581
2582 return ret;
2583}
2584
2585static int ohci_enable_phys_dma(struct fw_card *card,
2586 int node_id, int generation)
2587{
2588#ifdef CONFIG_FIREWIRE_OHCI_REMOTE_DMA
2589 return 0;
2590#else
2591 struct fw_ohci *ohci = fw_ohci(card);
2592 unsigned long flags;
2593 int n, ret = 0;
2594
2595
2596
2597
2598
2599
2600 spin_lock_irqsave(&ohci->lock, flags);
2601
2602 if (ohci->generation != generation) {
2603 ret = -ESTALE;
2604 goto out;
2605 }
2606
2607
2608
2609
2610
2611
2612 n = (node_id & 0xffc0) == LOCAL_BUS ? node_id & 0x3f : 63;
2613 if (n < 32)
2614 reg_write(ohci, OHCI1394_PhyReqFilterLoSet, 1 << n);
2615 else
2616 reg_write(ohci, OHCI1394_PhyReqFilterHiSet, 1 << (n - 32));
2617
2618 flush_writes(ohci);
2619 out:
2620 spin_unlock_irqrestore(&ohci->lock, flags);
2621
2622 return ret;
2623#endif
2624}
2625
2626static u32 ohci_read_csr(struct fw_card *card, int csr_offset)
2627{
2628 struct fw_ohci *ohci = fw_ohci(card);
2629 unsigned long flags;
2630 u32 value;
2631
2632 switch (csr_offset) {
2633 case CSR_STATE_CLEAR:
2634 case CSR_STATE_SET:
2635 if (ohci->is_root &&
2636 (reg_read(ohci, OHCI1394_LinkControlSet) &
2637 OHCI1394_LinkControl_cycleMaster))
2638 value = CSR_STATE_BIT_CMSTR;
2639 else
2640 value = 0;
2641 if (ohci->csr_state_setclear_abdicate)
2642 value |= CSR_STATE_BIT_ABDICATE;
2643
2644 return value;
2645
2646 case CSR_NODE_IDS:
2647 return reg_read(ohci, OHCI1394_NodeID) << 16;
2648
2649 case CSR_CYCLE_TIME:
2650 return get_cycle_time(ohci);
2651
2652 case CSR_BUS_TIME:
2653
2654
2655
2656
2657
2658 spin_lock_irqsave(&ohci->lock, flags);
2659 value = update_bus_time(ohci);
2660 spin_unlock_irqrestore(&ohci->lock, flags);
2661 return value;
2662
2663 case CSR_BUSY_TIMEOUT:
2664 value = reg_read(ohci, OHCI1394_ATRetries);
2665 return (value >> 4) & 0x0ffff00f;
2666
2667 case CSR_PRIORITY_BUDGET:
2668 return (reg_read(ohci, OHCI1394_FairnessControl) & 0x3f) |
2669 (ohci->pri_req_max << 8);
2670
2671 default:
2672 WARN_ON(1);
2673 return 0;
2674 }
2675}
2676
2677static void ohci_write_csr(struct fw_card *card, int csr_offset, u32 value)
2678{
2679 struct fw_ohci *ohci = fw_ohci(card);
2680 unsigned long flags;
2681
2682 switch (csr_offset) {
2683 case CSR_STATE_CLEAR:
2684 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2685 reg_write(ohci, OHCI1394_LinkControlClear,
2686 OHCI1394_LinkControl_cycleMaster);
2687 flush_writes(ohci);
2688 }
2689 if (value & CSR_STATE_BIT_ABDICATE)
2690 ohci->csr_state_setclear_abdicate = false;
2691 break;
2692
2693 case CSR_STATE_SET:
2694 if ((value & CSR_STATE_BIT_CMSTR) && ohci->is_root) {
2695 reg_write(ohci, OHCI1394_LinkControlSet,
2696 OHCI1394_LinkControl_cycleMaster);
2697 flush_writes(ohci);
2698 }
2699 if (value & CSR_STATE_BIT_ABDICATE)
2700 ohci->csr_state_setclear_abdicate = true;
2701 break;
2702
2703 case CSR_NODE_IDS:
2704 reg_write(ohci, OHCI1394_NodeID, value >> 16);
2705 flush_writes(ohci);
2706 break;
2707
2708 case CSR_CYCLE_TIME:
2709 reg_write(ohci, OHCI1394_IsochronousCycleTimer, value);
2710 reg_write(ohci, OHCI1394_IntEventSet,
2711 OHCI1394_cycleInconsistent);
2712 flush_writes(ohci);
2713 break;
2714
2715 case CSR_BUS_TIME:
2716 spin_lock_irqsave(&ohci->lock, flags);
2717 ohci->bus_time = (update_bus_time(ohci) & 0x40) |
2718 (value & ~0x7f);
2719 spin_unlock_irqrestore(&ohci->lock, flags);
2720 break;
2721
2722 case CSR_BUSY_TIMEOUT:
2723 value = (value & 0xf) | ((value & 0xf) << 4) |
2724 ((value & 0xf) << 8) | ((value & 0x0ffff000) << 4);
2725 reg_write(ohci, OHCI1394_ATRetries, value);
2726 flush_writes(ohci);
2727 break;
2728
2729 case CSR_PRIORITY_BUDGET:
2730 reg_write(ohci, OHCI1394_FairnessControl, value & 0x3f);
2731 flush_writes(ohci);
2732 break;
2733
2734 default:
2735 WARN_ON(1);
2736 break;
2737 }
2738}
2739
2740static void flush_iso_completions(struct iso_context *ctx)
2741{
2742 ctx->base.callback.sc(&ctx->base, ctx->last_timestamp,
2743 ctx->header_length, ctx->header,
2744 ctx->base.callback_data);
2745 ctx->header_length = 0;
2746}
2747
2748static void copy_iso_headers(struct iso_context *ctx, const u32 *dma_hdr)
2749{
2750 u32 *ctx_hdr;
2751
2752 if (ctx->header_length + ctx->base.header_size > PAGE_SIZE)
2753 flush_iso_completions(ctx);
2754
2755 ctx_hdr = ctx->header + ctx->header_length;
2756 ctx->last_timestamp = (u16)le32_to_cpu((__force __le32)dma_hdr[0]);
2757
2758
2759
2760
2761
2762
2763 if (ctx->base.header_size > 0)
2764 ctx_hdr[0] = swab32(dma_hdr[1]);
2765 if (ctx->base.header_size > 4)
2766 ctx_hdr[1] = swab32(dma_hdr[0]);
2767 if (ctx->base.header_size > 8)
2768 memcpy(&ctx_hdr[2], &dma_hdr[2], ctx->base.header_size - 8);
2769 ctx->header_length += ctx->base.header_size;
2770}
2771
2772static int handle_ir_packet_per_buffer(struct context *context,
2773 struct descriptor *d,
2774 struct descriptor *last)
2775{
2776 struct iso_context *ctx =
2777 container_of(context, struct iso_context, context);
2778 struct descriptor *pd;
2779 u32 buffer_dma;
2780
2781 for (pd = d; pd <= last; pd++)
2782 if (pd->transfer_status)
2783 break;
2784 if (pd > last)
2785
2786 return 0;
2787
2788 while (!(d->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))) {
2789 d++;
2790 buffer_dma = le32_to_cpu(d->data_address);
2791 dma_sync_single_range_for_cpu(context->ohci->card.device,
2792 buffer_dma & PAGE_MASK,
2793 buffer_dma & ~PAGE_MASK,
2794 le16_to_cpu(d->req_count),
2795 DMA_FROM_DEVICE);
2796 }
2797
2798 copy_iso_headers(ctx, (u32 *) (last + 1));
2799
2800 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
2801 flush_iso_completions(ctx);
2802
2803 return 1;
2804}
2805
2806
2807static int handle_ir_buffer_fill(struct context *context,
2808 struct descriptor *d,
2809 struct descriptor *last)
2810{
2811 struct iso_context *ctx =
2812 container_of(context, struct iso_context, context);
2813 unsigned int req_count, res_count, completed;
2814 u32 buffer_dma;
2815
2816 req_count = le16_to_cpu(last->req_count);
2817 res_count = le16_to_cpu(ACCESS_ONCE(last->res_count));
2818 completed = req_count - res_count;
2819 buffer_dma = le32_to_cpu(last->data_address);
2820
2821 if (completed > 0) {
2822 ctx->mc_buffer_bus = buffer_dma;
2823 ctx->mc_completed = completed;
2824 }
2825
2826 if (res_count != 0)
2827
2828 return 0;
2829
2830 dma_sync_single_range_for_cpu(context->ohci->card.device,
2831 buffer_dma & PAGE_MASK,
2832 buffer_dma & ~PAGE_MASK,
2833 completed, DMA_FROM_DEVICE);
2834
2835 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS)) {
2836 ctx->base.callback.mc(&ctx->base,
2837 buffer_dma + completed,
2838 ctx->base.callback_data);
2839 ctx->mc_completed = 0;
2840 }
2841
2842 return 1;
2843}
2844
2845static void flush_ir_buffer_fill(struct iso_context *ctx)
2846{
2847 dma_sync_single_range_for_cpu(ctx->context.ohci->card.device,
2848 ctx->mc_buffer_bus & PAGE_MASK,
2849 ctx->mc_buffer_bus & ~PAGE_MASK,
2850 ctx->mc_completed, DMA_FROM_DEVICE);
2851
2852 ctx->base.callback.mc(&ctx->base,
2853 ctx->mc_buffer_bus + ctx->mc_completed,
2854 ctx->base.callback_data);
2855 ctx->mc_completed = 0;
2856}
2857
2858static inline void sync_it_packet_for_cpu(struct context *context,
2859 struct descriptor *pd)
2860{
2861 __le16 control;
2862 u32 buffer_dma;
2863
2864
2865 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2866 return;
2867
2868
2869 pd += 2;
2870
2871
2872
2873
2874
2875
2876 if ((le32_to_cpu(pd->data_address) & PAGE_MASK) ==
2877 (context->current_bus & PAGE_MASK)) {
2878 if (pd->control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS))
2879 return;
2880 pd++;
2881 }
2882
2883 do {
2884 buffer_dma = le32_to_cpu(pd->data_address);
2885 dma_sync_single_range_for_cpu(context->ohci->card.device,
2886 buffer_dma & PAGE_MASK,
2887 buffer_dma & ~PAGE_MASK,
2888 le16_to_cpu(pd->req_count),
2889 DMA_TO_DEVICE);
2890 control = pd->control;
2891 pd++;
2892 } while (!(control & cpu_to_le16(DESCRIPTOR_BRANCH_ALWAYS)));
2893}
2894
2895static int handle_it_packet(struct context *context,
2896 struct descriptor *d,
2897 struct descriptor *last)
2898{
2899 struct iso_context *ctx =
2900 container_of(context, struct iso_context, context);
2901 struct descriptor *pd;
2902 __be32 *ctx_hdr;
2903
2904 for (pd = d; pd <= last; pd++)
2905 if (pd->transfer_status)
2906 break;
2907 if (pd > last)
2908
2909 return 0;
2910
2911 sync_it_packet_for_cpu(context, d);
2912
2913 if (ctx->header_length + 4 > PAGE_SIZE)
2914 flush_iso_completions(ctx);
2915
2916 ctx_hdr = ctx->header + ctx->header_length;
2917 ctx->last_timestamp = le16_to_cpu(last->res_count);
2918
2919 *ctx_hdr = cpu_to_be32((le16_to_cpu(pd->transfer_status) << 16) |
2920 le16_to_cpu(pd->res_count));
2921 ctx->header_length += 4;
2922
2923 if (last->control & cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS))
2924 flush_iso_completions(ctx);
2925
2926 return 1;
2927}
2928
2929static void set_multichannel_mask(struct fw_ohci *ohci, u64 channels)
2930{
2931 u32 hi = channels >> 32, lo = channels;
2932
2933 reg_write(ohci, OHCI1394_IRMultiChanMaskHiClear, ~hi);
2934 reg_write(ohci, OHCI1394_IRMultiChanMaskLoClear, ~lo);
2935 reg_write(ohci, OHCI1394_IRMultiChanMaskHiSet, hi);
2936 reg_write(ohci, OHCI1394_IRMultiChanMaskLoSet, lo);
2937 mmiowb();
2938 ohci->mc_channels = channels;
2939}
2940
2941static struct fw_iso_context *ohci_allocate_iso_context(struct fw_card *card,
2942 int type, int channel, size_t header_size)
2943{
2944 struct fw_ohci *ohci = fw_ohci(card);
2945 struct iso_context *uninitialized_var(ctx);
2946 descriptor_callback_t uninitialized_var(callback);
2947 u64 *uninitialized_var(channels);
2948 u32 *uninitialized_var(mask), uninitialized_var(regs);
2949 int index, ret = -EBUSY;
2950
2951 spin_lock_irq(&ohci->lock);
2952
2953 switch (type) {
2954 case FW_ISO_CONTEXT_TRANSMIT:
2955 mask = &ohci->it_context_mask;
2956 callback = handle_it_packet;
2957 index = ffs(*mask) - 1;
2958 if (index >= 0) {
2959 *mask &= ~(1 << index);
2960 regs = OHCI1394_IsoXmitContextBase(index);
2961 ctx = &ohci->it_context_list[index];
2962 }
2963 break;
2964
2965 case FW_ISO_CONTEXT_RECEIVE:
2966 channels = &ohci->ir_context_channels;
2967 mask = &ohci->ir_context_mask;
2968 callback = handle_ir_packet_per_buffer;
2969 index = *channels & 1ULL << channel ? ffs(*mask) - 1 : -1;
2970 if (index >= 0) {
2971 *channels &= ~(1ULL << channel);
2972 *mask &= ~(1 << index);
2973 regs = OHCI1394_IsoRcvContextBase(index);
2974 ctx = &ohci->ir_context_list[index];
2975 }
2976 break;
2977
2978 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
2979 mask = &ohci->ir_context_mask;
2980 callback = handle_ir_buffer_fill;
2981 index = !ohci->mc_allocated ? ffs(*mask) - 1 : -1;
2982 if (index >= 0) {
2983 ohci->mc_allocated = true;
2984 *mask &= ~(1 << index);
2985 regs = OHCI1394_IsoRcvContextBase(index);
2986 ctx = &ohci->ir_context_list[index];
2987 }
2988 break;
2989
2990 default:
2991 index = -1;
2992 ret = -ENOSYS;
2993 }
2994
2995 spin_unlock_irq(&ohci->lock);
2996
2997 if (index < 0)
2998 return ERR_PTR(ret);
2999
3000 memset(ctx, 0, sizeof(*ctx));
3001 ctx->header_length = 0;
3002 ctx->header = (void *) __get_free_page(GFP_KERNEL);
3003 if (ctx->header == NULL) {
3004 ret = -ENOMEM;
3005 goto out;
3006 }
3007 ret = context_init(&ctx->context, ohci, regs, callback);
3008 if (ret < 0)
3009 goto out_with_header;
3010
3011 if (type == FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL) {
3012 set_multichannel_mask(ohci, 0);
3013 ctx->mc_completed = 0;
3014 }
3015
3016 return &ctx->base;
3017
3018 out_with_header:
3019 free_page((unsigned long)ctx->header);
3020 out:
3021 spin_lock_irq(&ohci->lock);
3022
3023 switch (type) {
3024 case FW_ISO_CONTEXT_RECEIVE:
3025 *channels |= 1ULL << channel;
3026 break;
3027
3028 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3029 ohci->mc_allocated = false;
3030 break;
3031 }
3032 *mask |= 1 << index;
3033
3034 spin_unlock_irq(&ohci->lock);
3035
3036 return ERR_PTR(ret);
3037}
3038
3039static int ohci_start_iso(struct fw_iso_context *base,
3040 s32 cycle, u32 sync, u32 tags)
3041{
3042 struct iso_context *ctx = container_of(base, struct iso_context, base);
3043 struct fw_ohci *ohci = ctx->context.ohci;
3044 u32 control = IR_CONTEXT_ISOCH_HEADER, match;
3045 int index;
3046
3047
3048 if (ctx->context.last->branch_address == 0)
3049 return -ENODATA;
3050
3051 switch (ctx->base.type) {
3052 case FW_ISO_CONTEXT_TRANSMIT:
3053 index = ctx - ohci->it_context_list;
3054 match = 0;
3055 if (cycle >= 0)
3056 match = IT_CONTEXT_CYCLE_MATCH_ENABLE |
3057 (cycle & 0x7fff) << 16;
3058
3059 reg_write(ohci, OHCI1394_IsoXmitIntEventClear, 1 << index);
3060 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, 1 << index);
3061 context_run(&ctx->context, match);
3062 break;
3063
3064 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3065 control |= IR_CONTEXT_BUFFER_FILL|IR_CONTEXT_MULTI_CHANNEL_MODE;
3066
3067 case FW_ISO_CONTEXT_RECEIVE:
3068 index = ctx - ohci->ir_context_list;
3069 match = (tags << 28) | (sync << 8) | ctx->base.channel;
3070 if (cycle >= 0) {
3071 match |= (cycle & 0x07fff) << 12;
3072 control |= IR_CONTEXT_CYCLE_MATCH_ENABLE;
3073 }
3074
3075 reg_write(ohci, OHCI1394_IsoRecvIntEventClear, 1 << index);
3076 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, 1 << index);
3077 reg_write(ohci, CONTEXT_MATCH(ctx->context.regs), match);
3078 context_run(&ctx->context, control);
3079
3080 ctx->sync = sync;
3081 ctx->tags = tags;
3082
3083 break;
3084 }
3085
3086 return 0;
3087}
3088
3089static int ohci_stop_iso(struct fw_iso_context *base)
3090{
3091 struct fw_ohci *ohci = fw_ohci(base->card);
3092 struct iso_context *ctx = container_of(base, struct iso_context, base);
3093 int index;
3094
3095 switch (ctx->base.type) {
3096 case FW_ISO_CONTEXT_TRANSMIT:
3097 index = ctx - ohci->it_context_list;
3098 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, 1 << index);
3099 break;
3100
3101 case FW_ISO_CONTEXT_RECEIVE:
3102 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3103 index = ctx - ohci->ir_context_list;
3104 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, 1 << index);
3105 break;
3106 }
3107 flush_writes(ohci);
3108 context_stop(&ctx->context);
3109 tasklet_kill(&ctx->context.tasklet);
3110
3111 return 0;
3112}
3113
3114static void ohci_free_iso_context(struct fw_iso_context *base)
3115{
3116 struct fw_ohci *ohci = fw_ohci(base->card);
3117 struct iso_context *ctx = container_of(base, struct iso_context, base);
3118 unsigned long flags;
3119 int index;
3120
3121 ohci_stop_iso(base);
3122 context_release(&ctx->context);
3123 free_page((unsigned long)ctx->header);
3124
3125 spin_lock_irqsave(&ohci->lock, flags);
3126
3127 switch (base->type) {
3128 case FW_ISO_CONTEXT_TRANSMIT:
3129 index = ctx - ohci->it_context_list;
3130 ohci->it_context_mask |= 1 << index;
3131 break;
3132
3133 case FW_ISO_CONTEXT_RECEIVE:
3134 index = ctx - ohci->ir_context_list;
3135 ohci->ir_context_mask |= 1 << index;
3136 ohci->ir_context_channels |= 1ULL << base->channel;
3137 break;
3138
3139 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3140 index = ctx - ohci->ir_context_list;
3141 ohci->ir_context_mask |= 1 << index;
3142 ohci->ir_context_channels |= ohci->mc_channels;
3143 ohci->mc_channels = 0;
3144 ohci->mc_allocated = false;
3145 break;
3146 }
3147
3148 spin_unlock_irqrestore(&ohci->lock, flags);
3149}
3150
3151static int ohci_set_iso_channels(struct fw_iso_context *base, u64 *channels)
3152{
3153 struct fw_ohci *ohci = fw_ohci(base->card);
3154 unsigned long flags;
3155 int ret;
3156
3157 switch (base->type) {
3158 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3159
3160 spin_lock_irqsave(&ohci->lock, flags);
3161
3162
3163 if (~ohci->ir_context_channels & ~ohci->mc_channels & *channels) {
3164 *channels = ohci->ir_context_channels;
3165 ret = -EBUSY;
3166 } else {
3167 set_multichannel_mask(ohci, *channels);
3168 ret = 0;
3169 }
3170
3171 spin_unlock_irqrestore(&ohci->lock, flags);
3172
3173 break;
3174 default:
3175 ret = -EINVAL;
3176 }
3177
3178 return ret;
3179}
3180
3181#ifdef CONFIG_PM
3182static void ohci_resume_iso_dma(struct fw_ohci *ohci)
3183{
3184 int i;
3185 struct iso_context *ctx;
3186
3187 for (i = 0 ; i < ohci->n_ir ; i++) {
3188 ctx = &ohci->ir_context_list[i];
3189 if (ctx->context.running)
3190 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3191 }
3192
3193 for (i = 0 ; i < ohci->n_it ; i++) {
3194 ctx = &ohci->it_context_list[i];
3195 if (ctx->context.running)
3196 ohci_start_iso(&ctx->base, 0, ctx->sync, ctx->tags);
3197 }
3198}
3199#endif
3200
3201static int queue_iso_transmit(struct iso_context *ctx,
3202 struct fw_iso_packet *packet,
3203 struct fw_iso_buffer *buffer,
3204 unsigned long payload)
3205{
3206 struct descriptor *d, *last, *pd;
3207 struct fw_iso_packet *p;
3208 __le32 *header;
3209 dma_addr_t d_bus, page_bus;
3210 u32 z, header_z, payload_z, irq;
3211 u32 payload_index, payload_end_index, next_page_index;
3212 int page, end_page, i, length, offset;
3213
3214 p = packet;
3215 payload_index = payload;
3216
3217 if (p->skip)
3218 z = 1;
3219 else
3220 z = 2;
3221 if (p->header_length > 0)
3222 z++;
3223
3224
3225 end_page = PAGE_ALIGN(payload_index + p->payload_length) >> PAGE_SHIFT;
3226 if (p->payload_length > 0)
3227 payload_z = end_page - (payload_index >> PAGE_SHIFT);
3228 else
3229 payload_z = 0;
3230
3231 z += payload_z;
3232
3233
3234 header_z = DIV_ROUND_UP(p->header_length, sizeof(*d));
3235
3236 d = context_get_descriptors(&ctx->context, z + header_z, &d_bus);
3237 if (d == NULL)
3238 return -ENOMEM;
3239
3240 if (!p->skip) {
3241 d[0].control = cpu_to_le16(DESCRIPTOR_KEY_IMMEDIATE);
3242 d[0].req_count = cpu_to_le16(8);
3243
3244
3245
3246
3247
3248
3249
3250 d[0].branch_address = cpu_to_le32(d_bus | z);
3251
3252 header = (__le32 *) &d[1];
3253 header[0] = cpu_to_le32(IT_HEADER_SY(p->sy) |
3254 IT_HEADER_TAG(p->tag) |
3255 IT_HEADER_TCODE(TCODE_STREAM_DATA) |
3256 IT_HEADER_CHANNEL(ctx->base.channel) |
3257 IT_HEADER_SPEED(ctx->base.speed));
3258 header[1] =
3259 cpu_to_le32(IT_HEADER_DATA_LENGTH(p->header_length +
3260 p->payload_length));
3261 }
3262
3263 if (p->header_length > 0) {
3264 d[2].req_count = cpu_to_le16(p->header_length);
3265 d[2].data_address = cpu_to_le32(d_bus + z * sizeof(*d));
3266 memcpy(&d[z], p->header, p->header_length);
3267 }
3268
3269 pd = d + z - payload_z;
3270 payload_end_index = payload_index + p->payload_length;
3271 for (i = 0; i < payload_z; i++) {
3272 page = payload_index >> PAGE_SHIFT;
3273 offset = payload_index & ~PAGE_MASK;
3274 next_page_index = (page + 1) << PAGE_SHIFT;
3275 length =
3276 min(next_page_index, payload_end_index) - payload_index;
3277 pd[i].req_count = cpu_to_le16(length);
3278
3279 page_bus = page_private(buffer->pages[page]);
3280 pd[i].data_address = cpu_to_le32(page_bus + offset);
3281
3282 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3283 page_bus, offset, length,
3284 DMA_TO_DEVICE);
3285
3286 payload_index += length;
3287 }
3288
3289 if (p->interrupt)
3290 irq = DESCRIPTOR_IRQ_ALWAYS;
3291 else
3292 irq = DESCRIPTOR_NO_IRQ;
3293
3294 last = z == 2 ? d : d + z - 1;
3295 last->control |= cpu_to_le16(DESCRIPTOR_OUTPUT_LAST |
3296 DESCRIPTOR_STATUS |
3297 DESCRIPTOR_BRANCH_ALWAYS |
3298 irq);
3299
3300 context_append(&ctx->context, d, z, header_z);
3301
3302 return 0;
3303}
3304
3305static int queue_iso_packet_per_buffer(struct iso_context *ctx,
3306 struct fw_iso_packet *packet,
3307 struct fw_iso_buffer *buffer,
3308 unsigned long payload)
3309{
3310 struct device *device = ctx->context.ohci->card.device;
3311 struct descriptor *d, *pd;
3312 dma_addr_t d_bus, page_bus;
3313 u32 z, header_z, rest;
3314 int i, j, length;
3315 int page, offset, packet_count, header_size, payload_per_buffer;
3316
3317
3318
3319
3320
3321 packet_count = packet->header_length / ctx->base.header_size;
3322 header_size = max(ctx->base.header_size, (size_t)8);
3323
3324
3325 header_z = DIV_ROUND_UP(header_size, sizeof(*d));
3326 page = payload >> PAGE_SHIFT;
3327 offset = payload & ~PAGE_MASK;
3328 payload_per_buffer = packet->payload_length / packet_count;
3329
3330 for (i = 0; i < packet_count; i++) {
3331
3332 z = DIV_ROUND_UP(payload_per_buffer + offset, PAGE_SIZE) + 1;
3333 d = context_get_descriptors(&ctx->context,
3334 z + header_z, &d_bus);
3335 if (d == NULL)
3336 return -ENOMEM;
3337
3338 d->control = cpu_to_le16(DESCRIPTOR_STATUS |
3339 DESCRIPTOR_INPUT_MORE);
3340 if (packet->skip && i == 0)
3341 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3342 d->req_count = cpu_to_le16(header_size);
3343 d->res_count = d->req_count;
3344 d->transfer_status = 0;
3345 d->data_address = cpu_to_le32(d_bus + (z * sizeof(*d)));
3346
3347 rest = payload_per_buffer;
3348 pd = d;
3349 for (j = 1; j < z; j++) {
3350 pd++;
3351 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3352 DESCRIPTOR_INPUT_MORE);
3353
3354 if (offset + rest < PAGE_SIZE)
3355 length = rest;
3356 else
3357 length = PAGE_SIZE - offset;
3358 pd->req_count = cpu_to_le16(length);
3359 pd->res_count = pd->req_count;
3360 pd->transfer_status = 0;
3361
3362 page_bus = page_private(buffer->pages[page]);
3363 pd->data_address = cpu_to_le32(page_bus + offset);
3364
3365 dma_sync_single_range_for_device(device, page_bus,
3366 offset, length,
3367 DMA_FROM_DEVICE);
3368
3369 offset = (offset + length) & ~PAGE_MASK;
3370 rest -= length;
3371 if (offset == 0)
3372 page++;
3373 }
3374 pd->control = cpu_to_le16(DESCRIPTOR_STATUS |
3375 DESCRIPTOR_INPUT_LAST |
3376 DESCRIPTOR_BRANCH_ALWAYS);
3377 if (packet->interrupt && i == packet_count - 1)
3378 pd->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3379
3380 context_append(&ctx->context, d, z, header_z);
3381 }
3382
3383 return 0;
3384}
3385
3386static int queue_iso_buffer_fill(struct iso_context *ctx,
3387 struct fw_iso_packet *packet,
3388 struct fw_iso_buffer *buffer,
3389 unsigned long payload)
3390{
3391 struct descriptor *d;
3392 dma_addr_t d_bus, page_bus;
3393 int page, offset, rest, z, i, length;
3394
3395 page = payload >> PAGE_SHIFT;
3396 offset = payload & ~PAGE_MASK;
3397 rest = packet->payload_length;
3398
3399
3400 z = DIV_ROUND_UP(offset + rest, PAGE_SIZE);
3401
3402 if (WARN_ON(offset & 3 || rest & 3 || page + z > buffer->page_count))
3403 return -EFAULT;
3404
3405 for (i = 0; i < z; i++) {
3406 d = context_get_descriptors(&ctx->context, 1, &d_bus);
3407 if (d == NULL)
3408 return -ENOMEM;
3409
3410 d->control = cpu_to_le16(DESCRIPTOR_INPUT_MORE |
3411 DESCRIPTOR_BRANCH_ALWAYS);
3412 if (packet->skip && i == 0)
3413 d->control |= cpu_to_le16(DESCRIPTOR_WAIT);
3414 if (packet->interrupt && i == z - 1)
3415 d->control |= cpu_to_le16(DESCRIPTOR_IRQ_ALWAYS);
3416
3417 if (offset + rest < PAGE_SIZE)
3418 length = rest;
3419 else
3420 length = PAGE_SIZE - offset;
3421 d->req_count = cpu_to_le16(length);
3422 d->res_count = d->req_count;
3423 d->transfer_status = 0;
3424
3425 page_bus = page_private(buffer->pages[page]);
3426 d->data_address = cpu_to_le32(page_bus + offset);
3427
3428 dma_sync_single_range_for_device(ctx->context.ohci->card.device,
3429 page_bus, offset, length,
3430 DMA_FROM_DEVICE);
3431
3432 rest -= length;
3433 offset = 0;
3434 page++;
3435
3436 context_append(&ctx->context, d, 1, 0);
3437 }
3438
3439 return 0;
3440}
3441
3442static int ohci_queue_iso(struct fw_iso_context *base,
3443 struct fw_iso_packet *packet,
3444 struct fw_iso_buffer *buffer,
3445 unsigned long payload)
3446{
3447 struct iso_context *ctx = container_of(base, struct iso_context, base);
3448 unsigned long flags;
3449 int ret = -ENOSYS;
3450
3451 spin_lock_irqsave(&ctx->context.ohci->lock, flags);
3452 switch (base->type) {
3453 case FW_ISO_CONTEXT_TRANSMIT:
3454 ret = queue_iso_transmit(ctx, packet, buffer, payload);
3455 break;
3456 case FW_ISO_CONTEXT_RECEIVE:
3457 ret = queue_iso_packet_per_buffer(ctx, packet, buffer, payload);
3458 break;
3459 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3460 ret = queue_iso_buffer_fill(ctx, packet, buffer, payload);
3461 break;
3462 }
3463 spin_unlock_irqrestore(&ctx->context.ohci->lock, flags);
3464
3465 return ret;
3466}
3467
3468static void ohci_flush_queue_iso(struct fw_iso_context *base)
3469{
3470 struct context *ctx =
3471 &container_of(base, struct iso_context, base)->context;
3472
3473 reg_write(ctx->ohci, CONTROL_SET(ctx->regs), CONTEXT_WAKE);
3474}
3475
3476static int ohci_flush_iso_completions(struct fw_iso_context *base)
3477{
3478 struct iso_context *ctx = container_of(base, struct iso_context, base);
3479 int ret = 0;
3480
3481 tasklet_disable(&ctx->context.tasklet);
3482
3483 if (!test_and_set_bit_lock(0, &ctx->flushing_completions)) {
3484 context_tasklet((unsigned long)&ctx->context);
3485
3486 switch (base->type) {
3487 case FW_ISO_CONTEXT_TRANSMIT:
3488 case FW_ISO_CONTEXT_RECEIVE:
3489 if (ctx->header_length != 0)
3490 flush_iso_completions(ctx);
3491 break;
3492 case FW_ISO_CONTEXT_RECEIVE_MULTICHANNEL:
3493 if (ctx->mc_completed != 0)
3494 flush_ir_buffer_fill(ctx);
3495 break;
3496 default:
3497 ret = -ENOSYS;
3498 }
3499
3500 clear_bit_unlock(0, &ctx->flushing_completions);
3501 smp_mb__after_clear_bit();
3502 }
3503
3504 tasklet_enable(&ctx->context.tasklet);
3505
3506 return ret;
3507}
3508
3509static const struct fw_card_driver ohci_driver = {
3510 .enable = ohci_enable,
3511 .read_phy_reg = ohci_read_phy_reg,
3512 .update_phy_reg = ohci_update_phy_reg,
3513 .set_config_rom = ohci_set_config_rom,
3514 .send_request = ohci_send_request,
3515 .send_response = ohci_send_response,
3516 .cancel_packet = ohci_cancel_packet,
3517 .enable_phys_dma = ohci_enable_phys_dma,
3518 .read_csr = ohci_read_csr,
3519 .write_csr = ohci_write_csr,
3520
3521 .allocate_iso_context = ohci_allocate_iso_context,
3522 .free_iso_context = ohci_free_iso_context,
3523 .set_iso_channels = ohci_set_iso_channels,
3524 .queue_iso = ohci_queue_iso,
3525 .flush_queue_iso = ohci_flush_queue_iso,
3526 .flush_iso_completions = ohci_flush_iso_completions,
3527 .start_iso = ohci_start_iso,
3528 .stop_iso = ohci_stop_iso,
3529};
3530
3531#ifdef CONFIG_PPC_PMAC
3532static void pmac_ohci_on(struct pci_dev *dev)
3533{
3534 if (machine_is(powermac)) {
3535 struct device_node *ofn = pci_device_to_OF_node(dev);
3536
3537 if (ofn) {
3538 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 1);
3539 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 1);
3540 }
3541 }
3542}
3543
3544static void pmac_ohci_off(struct pci_dev *dev)
3545{
3546 if (machine_is(powermac)) {
3547 struct device_node *ofn = pci_device_to_OF_node(dev);
3548
3549 if (ofn) {
3550 pmac_call_feature(PMAC_FTR_1394_ENABLE, ofn, 0, 0);
3551 pmac_call_feature(PMAC_FTR_1394_CABLE_POWER, ofn, 0, 0);
3552 }
3553 }
3554}
3555#else
3556static inline void pmac_ohci_on(struct pci_dev *dev) {}
3557static inline void pmac_ohci_off(struct pci_dev *dev) {}
3558#endif
3559
3560static int pci_probe(struct pci_dev *dev,
3561 const struct pci_device_id *ent)
3562{
3563 struct fw_ohci *ohci;
3564 u32 bus_options, max_receive, link_speed, version;
3565 u64 guid;
3566 int i, err;
3567 size_t size;
3568
3569 if (dev->vendor == PCI_VENDOR_ID_PINNACLE_SYSTEMS) {
3570 dev_err(&dev->dev, "Pinnacle MovieBoard is not yet supported\n");
3571 return -ENOSYS;
3572 }
3573
3574 ohci = kzalloc(sizeof(*ohci), GFP_KERNEL);
3575 if (ohci == NULL) {
3576 err = -ENOMEM;
3577 goto fail;
3578 }
3579
3580 fw_card_initialize(&ohci->card, &ohci_driver, &dev->dev);
3581
3582 pmac_ohci_on(dev);
3583
3584 err = pci_enable_device(dev);
3585 if (err) {
3586 dev_err(&dev->dev, "failed to enable OHCI hardware\n");
3587 goto fail_free;
3588 }
3589
3590 pci_set_master(dev);
3591 pci_write_config_dword(dev, OHCI1394_PCI_HCI_Control, 0);
3592 pci_set_drvdata(dev, ohci);
3593
3594 spin_lock_init(&ohci->lock);
3595 mutex_init(&ohci->phy_reg_mutex);
3596
3597 INIT_WORK(&ohci->bus_reset_work, bus_reset_work);
3598
3599 if (!(pci_resource_flags(dev, 0) & IORESOURCE_MEM) ||
3600 pci_resource_len(dev, 0) < OHCI1394_REGISTER_SIZE) {
3601 ohci_err(ohci, "invalid MMIO resource\n");
3602 err = -ENXIO;
3603 goto fail_disable;
3604 }
3605
3606 err = pci_request_region(dev, 0, ohci_driver_name);
3607 if (err) {
3608 ohci_err(ohci, "MMIO resource unavailable\n");
3609 goto fail_disable;
3610 }
3611
3612 ohci->registers = pci_iomap(dev, 0, OHCI1394_REGISTER_SIZE);
3613 if (ohci->registers == NULL) {
3614 ohci_err(ohci, "failed to remap registers\n");
3615 err = -ENXIO;
3616 goto fail_iomem;
3617 }
3618
3619 for (i = 0; i < ARRAY_SIZE(ohci_quirks); i++)
3620 if ((ohci_quirks[i].vendor == dev->vendor) &&
3621 (ohci_quirks[i].device == (unsigned short)PCI_ANY_ID ||
3622 ohci_quirks[i].device == dev->device) &&
3623 (ohci_quirks[i].revision == (unsigned short)PCI_ANY_ID ||
3624 ohci_quirks[i].revision >= dev->revision)) {
3625 ohci->quirks = ohci_quirks[i].flags;
3626 break;
3627 }
3628 if (param_quirks)
3629 ohci->quirks = param_quirks;
3630
3631
3632
3633
3634
3635
3636 BUILD_BUG_ON(AR_BUFFERS * sizeof(struct descriptor) > PAGE_SIZE/4);
3637 BUILD_BUG_ON(SELF_ID_BUF_SIZE > PAGE_SIZE/2);
3638 ohci->misc_buffer = dma_alloc_coherent(ohci->card.device,
3639 PAGE_SIZE,
3640 &ohci->misc_buffer_bus,
3641 GFP_KERNEL);
3642 if (!ohci->misc_buffer) {
3643 err = -ENOMEM;
3644 goto fail_iounmap;
3645 }
3646
3647 err = ar_context_init(&ohci->ar_request_ctx, ohci, 0,
3648 OHCI1394_AsReqRcvContextControlSet);
3649 if (err < 0)
3650 goto fail_misc_buf;
3651
3652 err = ar_context_init(&ohci->ar_response_ctx, ohci, PAGE_SIZE/4,
3653 OHCI1394_AsRspRcvContextControlSet);
3654 if (err < 0)
3655 goto fail_arreq_ctx;
3656
3657 err = context_init(&ohci->at_request_ctx, ohci,
3658 OHCI1394_AsReqTrContextControlSet, handle_at_packet);
3659 if (err < 0)
3660 goto fail_arrsp_ctx;
3661
3662 err = context_init(&ohci->at_response_ctx, ohci,
3663 OHCI1394_AsRspTrContextControlSet, handle_at_packet);
3664 if (err < 0)
3665 goto fail_atreq_ctx;
3666
3667 reg_write(ohci, OHCI1394_IsoRecvIntMaskSet, ~0);
3668 ohci->ir_context_channels = ~0ULL;
3669 ohci->ir_context_support = reg_read(ohci, OHCI1394_IsoRecvIntMaskSet);
3670 reg_write(ohci, OHCI1394_IsoRecvIntMaskClear, ~0);
3671 ohci->ir_context_mask = ohci->ir_context_support;
3672 ohci->n_ir = hweight32(ohci->ir_context_mask);
3673 size = sizeof(struct iso_context) * ohci->n_ir;
3674 ohci->ir_context_list = kzalloc(size, GFP_KERNEL);
3675
3676 reg_write(ohci, OHCI1394_IsoXmitIntMaskSet, ~0);
3677 ohci->it_context_support = reg_read(ohci, OHCI1394_IsoXmitIntMaskSet);
3678 reg_write(ohci, OHCI1394_IsoXmitIntMaskClear, ~0);
3679 ohci->it_context_mask = ohci->it_context_support;
3680 ohci->n_it = hweight32(ohci->it_context_mask);
3681 size = sizeof(struct iso_context) * ohci->n_it;
3682 ohci->it_context_list = kzalloc(size, GFP_KERNEL);
3683
3684 if (ohci->it_context_list == NULL || ohci->ir_context_list == NULL) {
3685 err = -ENOMEM;
3686 goto fail_contexts;
3687 }
3688
3689 ohci->self_id_cpu = ohci->misc_buffer + PAGE_SIZE/2;
3690 ohci->self_id_bus = ohci->misc_buffer_bus + PAGE_SIZE/2;
3691
3692 bus_options = reg_read(ohci, OHCI1394_BusOptions);
3693 max_receive = (bus_options >> 12) & 0xf;
3694 link_speed = bus_options & 0x7;
3695 guid = ((u64) reg_read(ohci, OHCI1394_GUIDHi) << 32) |
3696 reg_read(ohci, OHCI1394_GUIDLo);
3697
3698 if (!(ohci->quirks & QUIRK_NO_MSI))
3699 pci_enable_msi(dev);
3700 if (request_irq(dev->irq, irq_handler,
3701 pci_dev_msi_enabled(dev) ? 0 : IRQF_SHARED,
3702 ohci_driver_name, ohci)) {
3703 ohci_err(ohci, "failed to allocate interrupt %d\n", dev->irq);
3704 err = -EIO;
3705 goto fail_msi;
3706 }
3707
3708 err = fw_card_add(&ohci->card, max_receive, link_speed, guid);
3709 if (err)
3710 goto fail_irq;
3711
3712 version = reg_read(ohci, OHCI1394_Version) & 0x00ff00ff;
3713 ohci_notice(ohci,
3714 "added OHCI v%x.%x device as card %d, "
3715 "%d IR + %d IT contexts, quirks 0x%x\n",
3716 version >> 16, version & 0xff, ohci->card.index,
3717 ohci->n_ir, ohci->n_it, ohci->quirks);
3718
3719 return 0;
3720
3721 fail_irq:
3722 free_irq(dev->irq, ohci);
3723 fail_msi:
3724 pci_disable_msi(dev);
3725 fail_contexts:
3726 kfree(ohci->ir_context_list);
3727 kfree(ohci->it_context_list);
3728 context_release(&ohci->at_response_ctx);
3729 fail_atreq_ctx:
3730 context_release(&ohci->at_request_ctx);
3731 fail_arrsp_ctx:
3732 ar_context_release(&ohci->ar_response_ctx);
3733 fail_arreq_ctx:
3734 ar_context_release(&ohci->ar_request_ctx);
3735 fail_misc_buf:
3736 dma_free_coherent(ohci->card.device, PAGE_SIZE,
3737 ohci->misc_buffer, ohci->misc_buffer_bus);
3738 fail_iounmap:
3739 pci_iounmap(dev, ohci->registers);
3740 fail_iomem:
3741 pci_release_region(dev, 0);
3742 fail_disable:
3743 pci_disable_device(dev);
3744 fail_free:
3745 kfree(ohci);
3746 pmac_ohci_off(dev);
3747 fail:
3748 return err;
3749}
3750
3751static void pci_remove(struct pci_dev *dev)
3752{
3753 struct fw_ohci *ohci = pci_get_drvdata(dev);
3754
3755
3756
3757
3758
3759 if (reg_read(ohci, OHCI1394_HCControlSet) & OHCI1394_HCControl_LPS) {
3760 reg_write(ohci, OHCI1394_IntMaskClear, ~0);
3761 flush_writes(ohci);
3762 }
3763 cancel_work_sync(&ohci->bus_reset_work);
3764 fw_core_remove_card(&ohci->card);
3765
3766
3767
3768
3769
3770
3771 software_reset(ohci);
3772 free_irq(dev->irq, ohci);
3773
3774 if (ohci->next_config_rom && ohci->next_config_rom != ohci->config_rom)
3775 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
3776 ohci->next_config_rom, ohci->next_config_rom_bus);
3777 if (ohci->config_rom)
3778 dma_free_coherent(ohci->card.device, CONFIG_ROM_SIZE,
3779 ohci->config_rom, ohci->config_rom_bus);
3780 ar_context_release(&ohci->ar_request_ctx);
3781 ar_context_release(&ohci->ar_response_ctx);
3782 dma_free_coherent(ohci->card.device, PAGE_SIZE,
3783 ohci->misc_buffer, ohci->misc_buffer_bus);
3784 context_release(&ohci->at_request_ctx);
3785 context_release(&ohci->at_response_ctx);
3786 kfree(ohci->it_context_list);
3787 kfree(ohci->ir_context_list);
3788 pci_disable_msi(dev);
3789 pci_iounmap(dev, ohci->registers);
3790 pci_release_region(dev, 0);
3791 pci_disable_device(dev);
3792 kfree(ohci);
3793 pmac_ohci_off(dev);
3794
3795 dev_notice(&dev->dev, "removed fw-ohci device\n");
3796}
3797
3798#ifdef CONFIG_PM
3799static int pci_suspend(struct pci_dev *dev, pm_message_t state)
3800{
3801 struct fw_ohci *ohci = pci_get_drvdata(dev);
3802 int err;
3803
3804 software_reset(ohci);
3805 err = pci_save_state(dev);
3806 if (err) {
3807 ohci_err(ohci, "pci_save_state failed\n");
3808 return err;
3809 }
3810 err = pci_set_power_state(dev, pci_choose_state(dev, state));
3811 if (err)
3812 ohci_err(ohci, "pci_set_power_state failed with %d\n", err);
3813 pmac_ohci_off(dev);
3814
3815 return 0;
3816}
3817
3818static int pci_resume(struct pci_dev *dev)
3819{
3820 struct fw_ohci *ohci = pci_get_drvdata(dev);
3821 int err;
3822
3823 pmac_ohci_on(dev);
3824 pci_set_power_state(dev, PCI_D0);
3825 pci_restore_state(dev);
3826 err = pci_enable_device(dev);
3827 if (err) {
3828 ohci_err(ohci, "pci_enable_device failed\n");
3829 return err;
3830 }
3831
3832
3833 if (!reg_read(ohci, OHCI1394_GUIDLo) &&
3834 !reg_read(ohci, OHCI1394_GUIDHi)) {
3835 reg_write(ohci, OHCI1394_GUIDLo, (u32)ohci->card.guid);
3836 reg_write(ohci, OHCI1394_GUIDHi, (u32)(ohci->card.guid >> 32));
3837 }
3838
3839 err = ohci_enable(&ohci->card, NULL, 0);
3840 if (err)
3841 return err;
3842
3843 ohci_resume_iso_dma(ohci);
3844
3845 return 0;
3846}
3847#endif
3848
3849static const struct pci_device_id pci_table[] = {
3850 { PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_FIREWIRE_OHCI, ~0) },
3851 { }
3852};
3853
3854MODULE_DEVICE_TABLE(pci, pci_table);
3855
3856static struct pci_driver fw_ohci_pci_driver = {
3857 .name = ohci_driver_name,
3858 .id_table = pci_table,
3859 .probe = pci_probe,
3860 .remove = pci_remove,
3861#ifdef CONFIG_PM
3862 .resume = pci_resume,
3863 .suspend = pci_suspend,
3864#endif
3865};
3866
3867module_pci_driver(fw_ohci_pci_driver);
3868
3869MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
3870MODULE_DESCRIPTION("Driver for PCI OHCI IEEE1394 controllers");
3871MODULE_LICENSE("GPL");
3872
3873
3874MODULE_ALIAS("ohci1394");
3875