1
2
3
4
5
6
7
8#include <linux/bug.h>
9#include <linux/completion.h>
10#include <linux/device.h>
11#include <linux/errno.h>
12#include <linux/firewire.h>
13#include <linux/firewire-constants.h>
14#include <linux/fs.h>
15#include <linux/init.h>
16#include <linux/idr.h>
17#include <linux/jiffies.h>
18#include <linux/kernel.h>
19#include <linux/list.h>
20#include <linux/module.h>
21#include <linux/rculist.h>
22#include <linux/slab.h>
23#include <linux/spinlock.h>
24#include <linux/string.h>
25#include <linux/timer.h>
26#include <linux/types.h>
27#include <linux/workqueue.h>
28
29#include <asm/byteorder.h>
30
31#include "core.h"
32
33#define HEADER_PRI(pri) ((pri) << 0)
34#define HEADER_TCODE(tcode) ((tcode) << 4)
35#define HEADER_RETRY(retry) ((retry) << 8)
36#define HEADER_TLABEL(tlabel) ((tlabel) << 10)
37#define HEADER_DESTINATION(destination) ((destination) << 16)
38#define HEADER_SOURCE(source) ((source) << 16)
39#define HEADER_RCODE(rcode) ((rcode) << 12)
40#define HEADER_OFFSET_HIGH(offset_high) ((offset_high) << 0)
41#define HEADER_DATA_LENGTH(length) ((length) << 16)
42#define HEADER_EXTENDED_TCODE(tcode) ((tcode) << 0)
43
44#define HEADER_GET_TCODE(q) (((q) >> 4) & 0x0f)
45#define HEADER_GET_TLABEL(q) (((q) >> 10) & 0x3f)
46#define HEADER_GET_RCODE(q) (((q) >> 12) & 0x0f)
47#define HEADER_GET_DESTINATION(q) (((q) >> 16) & 0xffff)
48#define HEADER_GET_SOURCE(q) (((q) >> 16) & 0xffff)
49#define HEADER_GET_OFFSET_HIGH(q) (((q) >> 0) & 0xffff)
50#define HEADER_GET_DATA_LENGTH(q) (((q) >> 16) & 0xffff)
51#define HEADER_GET_EXTENDED_TCODE(q) (((q) >> 0) & 0xffff)
52
53#define HEADER_DESTINATION_IS_BROADCAST(q) \
54 (((q) & HEADER_DESTINATION(0x3f)) == HEADER_DESTINATION(0x3f))
55
56#define PHY_PACKET_CONFIG 0x0
57#define PHY_PACKET_LINK_ON 0x1
58#define PHY_PACKET_SELF_ID 0x2
59
60#define PHY_CONFIG_GAP_COUNT(gap_count) (((gap_count) << 16) | (1 << 22))
61#define PHY_CONFIG_ROOT_ID(node_id) ((((node_id) & 0x3f) << 24) | (1 << 23))
62#define PHY_IDENTIFIER(id) ((id) << 30)
63
64
65static int try_cancel_split_timeout(struct fw_transaction *t)
66{
67 if (t->is_split_transaction)
68 return del_timer(&t->split_timeout_timer);
69 else
70 return 1;
71}
72
73static int close_transaction(struct fw_transaction *transaction,
74 struct fw_card *card, int rcode)
75{
76 struct fw_transaction *t;
77 unsigned long flags;
78
79 spin_lock_irqsave(&card->lock, flags);
80 list_for_each_entry(t, &card->transaction_list, link) {
81 if (t == transaction) {
82 if (!try_cancel_split_timeout(t)) {
83 spin_unlock_irqrestore(&card->lock, flags);
84 goto timed_out;
85 }
86 list_del_init(&t->link);
87 card->tlabel_mask &= ~(1ULL << t->tlabel);
88 break;
89 }
90 }
91 spin_unlock_irqrestore(&card->lock, flags);
92
93 if (&t->link != &card->transaction_list) {
94 t->callback(card, rcode, NULL, 0, t->callback_data);
95 return 0;
96 }
97
98 timed_out:
99 return -ENOENT;
100}
101
102
103
104
105
106int fw_cancel_transaction(struct fw_card *card,
107 struct fw_transaction *transaction)
108{
109
110
111
112
113
114
115 if (card->driver->cancel_packet(card, &transaction->packet) == 0)
116 return 0;
117
118
119
120
121
122
123 return close_transaction(transaction, card, RCODE_CANCELLED);
124}
125EXPORT_SYMBOL(fw_cancel_transaction);
126
127static void split_transaction_timeout_callback(struct timer_list *timer)
128{
129 struct fw_transaction *t = from_timer(t, timer, split_timeout_timer);
130 struct fw_card *card = t->card;
131 unsigned long flags;
132
133 spin_lock_irqsave(&card->lock, flags);
134 if (list_empty(&t->link)) {
135 spin_unlock_irqrestore(&card->lock, flags);
136 return;
137 }
138 list_del(&t->link);
139 card->tlabel_mask &= ~(1ULL << t->tlabel);
140 spin_unlock_irqrestore(&card->lock, flags);
141
142 t->callback(card, RCODE_CANCELLED, NULL, 0, t->callback_data);
143}
144
145static void start_split_transaction_timeout(struct fw_transaction *t,
146 struct fw_card *card)
147{
148 unsigned long flags;
149
150 spin_lock_irqsave(&card->lock, flags);
151
152 if (list_empty(&t->link) || WARN_ON(t->is_split_transaction)) {
153 spin_unlock_irqrestore(&card->lock, flags);
154 return;
155 }
156
157 t->is_split_transaction = true;
158 mod_timer(&t->split_timeout_timer,
159 jiffies + card->split_timeout_jiffies);
160
161 spin_unlock_irqrestore(&card->lock, flags);
162}
163
164static void transmit_complete_callback(struct fw_packet *packet,
165 struct fw_card *card, int status)
166{
167 struct fw_transaction *t =
168 container_of(packet, struct fw_transaction, packet);
169
170 switch (status) {
171 case ACK_COMPLETE:
172 close_transaction(t, card, RCODE_COMPLETE);
173 break;
174 case ACK_PENDING:
175 start_split_transaction_timeout(t, card);
176 break;
177 case ACK_BUSY_X:
178 case ACK_BUSY_A:
179 case ACK_BUSY_B:
180 close_transaction(t, card, RCODE_BUSY);
181 break;
182 case ACK_DATA_ERROR:
183 close_transaction(t, card, RCODE_DATA_ERROR);
184 break;
185 case ACK_TYPE_ERROR:
186 close_transaction(t, card, RCODE_TYPE_ERROR);
187 break;
188 default:
189
190
191
192
193 close_transaction(t, card, status);
194 break;
195 }
196}
197
198static void fw_fill_request(struct fw_packet *packet, int tcode, int tlabel,
199 int destination_id, int source_id, int generation, int speed,
200 unsigned long long offset, void *payload, size_t length)
201{
202 int ext_tcode;
203
204 if (tcode == TCODE_STREAM_DATA) {
205 packet->header[0] =
206 HEADER_DATA_LENGTH(length) |
207 destination_id |
208 HEADER_TCODE(TCODE_STREAM_DATA);
209 packet->header_length = 4;
210 packet->payload = payload;
211 packet->payload_length = length;
212
213 goto common;
214 }
215
216 if (tcode > 0x10) {
217 ext_tcode = tcode & ~0x10;
218 tcode = TCODE_LOCK_REQUEST;
219 } else
220 ext_tcode = 0;
221
222 packet->header[0] =
223 HEADER_RETRY(RETRY_X) |
224 HEADER_TLABEL(tlabel) |
225 HEADER_TCODE(tcode) |
226 HEADER_DESTINATION(destination_id);
227 packet->header[1] =
228 HEADER_OFFSET_HIGH(offset >> 32) | HEADER_SOURCE(source_id);
229 packet->header[2] =
230 offset;
231
232 switch (tcode) {
233 case TCODE_WRITE_QUADLET_REQUEST:
234 packet->header[3] = *(u32 *)payload;
235 packet->header_length = 16;
236 packet->payload_length = 0;
237 break;
238
239 case TCODE_LOCK_REQUEST:
240 case TCODE_WRITE_BLOCK_REQUEST:
241 packet->header[3] =
242 HEADER_DATA_LENGTH(length) |
243 HEADER_EXTENDED_TCODE(ext_tcode);
244 packet->header_length = 16;
245 packet->payload = payload;
246 packet->payload_length = length;
247 break;
248
249 case TCODE_READ_QUADLET_REQUEST:
250 packet->header_length = 12;
251 packet->payload_length = 0;
252 break;
253
254 case TCODE_READ_BLOCK_REQUEST:
255 packet->header[3] =
256 HEADER_DATA_LENGTH(length) |
257 HEADER_EXTENDED_TCODE(ext_tcode);
258 packet->header_length = 16;
259 packet->payload_length = 0;
260 break;
261
262 default:
263 WARN(1, "wrong tcode %d\n", tcode);
264 }
265 common:
266 packet->speed = speed;
267 packet->generation = generation;
268 packet->ack = 0;
269 packet->payload_mapped = false;
270}
271
272static int allocate_tlabel(struct fw_card *card)
273{
274 int tlabel;
275
276 tlabel = card->current_tlabel;
277 while (card->tlabel_mask & (1ULL << tlabel)) {
278 tlabel = (tlabel + 1) & 0x3f;
279 if (tlabel == card->current_tlabel)
280 return -EBUSY;
281 }
282
283 card->current_tlabel = (tlabel + 1) & 0x3f;
284 card->tlabel_mask |= 1ULL << tlabel;
285
286 return tlabel;
287}
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337void fw_send_request(struct fw_card *card, struct fw_transaction *t, int tcode,
338 int destination_id, int generation, int speed,
339 unsigned long long offset, void *payload, size_t length,
340 fw_transaction_callback_t callback, void *callback_data)
341{
342 unsigned long flags;
343 int tlabel;
344
345
346
347
348
349
350 spin_lock_irqsave(&card->lock, flags);
351
352 tlabel = allocate_tlabel(card);
353 if (tlabel < 0) {
354 spin_unlock_irqrestore(&card->lock, flags);
355 callback(card, RCODE_SEND_ERROR, NULL, 0, callback_data);
356 return;
357 }
358
359 t->node_id = destination_id;
360 t->tlabel = tlabel;
361 t->card = card;
362 t->is_split_transaction = false;
363 timer_setup(&t->split_timeout_timer,
364 split_transaction_timeout_callback, 0);
365 t->callback = callback;
366 t->callback_data = callback_data;
367
368 fw_fill_request(&t->packet, tcode, t->tlabel,
369 destination_id, card->node_id, generation,
370 speed, offset, payload, length);
371 t->packet.callback = transmit_complete_callback;
372
373 list_add_tail(&t->link, &card->transaction_list);
374
375 spin_unlock_irqrestore(&card->lock, flags);
376
377 card->driver->send_request(card, &t->packet);
378}
379EXPORT_SYMBOL(fw_send_request);
380
381struct transaction_callback_data {
382 struct completion done;
383 void *payload;
384 int rcode;
385};
386
387static void transaction_callback(struct fw_card *card, int rcode,
388 void *payload, size_t length, void *data)
389{
390 struct transaction_callback_data *d = data;
391
392 if (rcode == RCODE_COMPLETE)
393 memcpy(d->payload, payload, length);
394 d->rcode = rcode;
395 complete(&d->done);
396}
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414int fw_run_transaction(struct fw_card *card, int tcode, int destination_id,
415 int generation, int speed, unsigned long long offset,
416 void *payload, size_t length)
417{
418 struct transaction_callback_data d;
419 struct fw_transaction t;
420
421 timer_setup_on_stack(&t.split_timeout_timer, NULL, 0);
422 init_completion(&d.done);
423 d.payload = payload;
424 fw_send_request(card, &t, tcode, destination_id, generation, speed,
425 offset, payload, length, transaction_callback, &d);
426 wait_for_completion(&d.done);
427 destroy_timer_on_stack(&t.split_timeout_timer);
428
429 return d.rcode;
430}
431EXPORT_SYMBOL(fw_run_transaction);
432
433static DEFINE_MUTEX(phy_config_mutex);
434static DECLARE_COMPLETION(phy_config_done);
435
436static void transmit_phy_packet_callback(struct fw_packet *packet,
437 struct fw_card *card, int status)
438{
439 complete(&phy_config_done);
440}
441
442static struct fw_packet phy_config_packet = {
443 .header_length = 12,
444 .header[0] = TCODE_LINK_INTERNAL << 4,
445 .payload_length = 0,
446 .speed = SCODE_100,
447 .callback = transmit_phy_packet_callback,
448};
449
450void fw_send_phy_config(struct fw_card *card,
451 int node_id, int generation, int gap_count)
452{
453 long timeout = DIV_ROUND_UP(HZ, 10);
454 u32 data = PHY_IDENTIFIER(PHY_PACKET_CONFIG);
455
456 if (node_id != FW_PHY_CONFIG_NO_NODE_ID)
457 data |= PHY_CONFIG_ROOT_ID(node_id);
458
459 if (gap_count == FW_PHY_CONFIG_CURRENT_GAP_COUNT) {
460 gap_count = card->driver->read_phy_reg(card, 1);
461 if (gap_count < 0)
462 return;
463
464 gap_count &= 63;
465 if (gap_count == 63)
466 return;
467 }
468 data |= PHY_CONFIG_GAP_COUNT(gap_count);
469
470 mutex_lock(&phy_config_mutex);
471
472 phy_config_packet.header[1] = data;
473 phy_config_packet.header[2] = ~data;
474 phy_config_packet.generation = generation;
475 reinit_completion(&phy_config_done);
476
477 card->driver->send_request(card, &phy_config_packet);
478 wait_for_completion_timeout(&phy_config_done, timeout);
479
480 mutex_unlock(&phy_config_mutex);
481}
482
483static struct fw_address_handler *lookup_overlapping_address_handler(
484 struct list_head *list, unsigned long long offset, size_t length)
485{
486 struct fw_address_handler *handler;
487
488 list_for_each_entry_rcu(handler, list, link) {
489 if (handler->offset < offset + length &&
490 offset < handler->offset + handler->length)
491 return handler;
492 }
493
494 return NULL;
495}
496
497static bool is_enclosing_handler(struct fw_address_handler *handler,
498 unsigned long long offset, size_t length)
499{
500 return handler->offset <= offset &&
501 offset + length <= handler->offset + handler->length;
502}
503
504static struct fw_address_handler *lookup_enclosing_address_handler(
505 struct list_head *list, unsigned long long offset, size_t length)
506{
507 struct fw_address_handler *handler;
508
509 list_for_each_entry_rcu(handler, list, link) {
510 if (is_enclosing_handler(handler, offset, length))
511 return handler;
512 }
513
514 return NULL;
515}
516
517static DEFINE_SPINLOCK(address_handler_list_lock);
518static LIST_HEAD(address_handler_list);
519
520const struct fw_address_region fw_high_memory_region =
521 { .start = FW_MAX_PHYSICAL_RANGE, .end = 0xffffe0000000ULL, };
522EXPORT_SYMBOL(fw_high_memory_region);
523
524static const struct fw_address_region low_memory_region =
525 { .start = 0x000000000000ULL, .end = FW_MAX_PHYSICAL_RANGE, };
526
527#if 0
528const struct fw_address_region fw_private_region =
529 { .start = 0xffffe0000000ULL, .end = 0xfffff0000000ULL, };
530const struct fw_address_region fw_csr_region =
531 { .start = CSR_REGISTER_BASE,
532 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM_END, };
533const struct fw_address_region fw_unit_space_region =
534 { .start = 0xfffff0000900ULL, .end = 0x1000000000000ULL, };
535#endif
536
537static bool is_in_fcp_region(u64 offset, size_t length)
538{
539 return offset >= (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
540 offset + length <= (CSR_REGISTER_BASE | CSR_FCP_END);
541}
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562int fw_core_add_address_handler(struct fw_address_handler *handler,
563 const struct fw_address_region *region)
564{
565 struct fw_address_handler *other;
566 int ret = -EBUSY;
567
568 if (region->start & 0xffff000000000003ULL ||
569 region->start >= region->end ||
570 region->end > 0x0001000000000000ULL ||
571 handler->length & 3 ||
572 handler->length == 0)
573 return -EINVAL;
574
575 spin_lock(&address_handler_list_lock);
576
577 handler->offset = region->start;
578 while (handler->offset + handler->length <= region->end) {
579 if (is_in_fcp_region(handler->offset, handler->length))
580 other = NULL;
581 else
582 other = lookup_overlapping_address_handler
583 (&address_handler_list,
584 handler->offset, handler->length);
585 if (other != NULL) {
586 handler->offset += other->length;
587 } else {
588 list_add_tail_rcu(&handler->link, &address_handler_list);
589 ret = 0;
590 break;
591 }
592 }
593
594 spin_unlock(&address_handler_list_lock);
595
596 return ret;
597}
598EXPORT_SYMBOL(fw_core_add_address_handler);
599
600
601
602
603
604
605
606
607
608
609void fw_core_remove_address_handler(struct fw_address_handler *handler)
610{
611 spin_lock(&address_handler_list_lock);
612 list_del_rcu(&handler->link);
613 spin_unlock(&address_handler_list_lock);
614 synchronize_rcu();
615}
616EXPORT_SYMBOL(fw_core_remove_address_handler);
617
618struct fw_request {
619 struct fw_packet response;
620 u32 request_header[4];
621 int ack;
622 u32 length;
623 u32 data[];
624};
625
626static void free_response_callback(struct fw_packet *packet,
627 struct fw_card *card, int status)
628{
629 struct fw_request *request;
630
631 request = container_of(packet, struct fw_request, response);
632 kfree(request);
633}
634
635int fw_get_response_length(struct fw_request *r)
636{
637 int tcode, ext_tcode, data_length;
638
639 tcode = HEADER_GET_TCODE(r->request_header[0]);
640
641 switch (tcode) {
642 case TCODE_WRITE_QUADLET_REQUEST:
643 case TCODE_WRITE_BLOCK_REQUEST:
644 return 0;
645
646 case TCODE_READ_QUADLET_REQUEST:
647 return 4;
648
649 case TCODE_READ_BLOCK_REQUEST:
650 data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
651 return data_length;
652
653 case TCODE_LOCK_REQUEST:
654 ext_tcode = HEADER_GET_EXTENDED_TCODE(r->request_header[3]);
655 data_length = HEADER_GET_DATA_LENGTH(r->request_header[3]);
656 switch (ext_tcode) {
657 case EXTCODE_FETCH_ADD:
658 case EXTCODE_LITTLE_ADD:
659 return data_length;
660 default:
661 return data_length / 2;
662 }
663
664 default:
665 WARN(1, "wrong tcode %d\n", tcode);
666 return 0;
667 }
668}
669
670void fw_fill_response(struct fw_packet *response, u32 *request_header,
671 int rcode, void *payload, size_t length)
672{
673 int tcode, tlabel, extended_tcode, source, destination;
674
675 tcode = HEADER_GET_TCODE(request_header[0]);
676 tlabel = HEADER_GET_TLABEL(request_header[0]);
677 source = HEADER_GET_DESTINATION(request_header[0]);
678 destination = HEADER_GET_SOURCE(request_header[1]);
679 extended_tcode = HEADER_GET_EXTENDED_TCODE(request_header[3]);
680
681 response->header[0] =
682 HEADER_RETRY(RETRY_1) |
683 HEADER_TLABEL(tlabel) |
684 HEADER_DESTINATION(destination);
685 response->header[1] =
686 HEADER_SOURCE(source) |
687 HEADER_RCODE(rcode);
688 response->header[2] = 0;
689
690 switch (tcode) {
691 case TCODE_WRITE_QUADLET_REQUEST:
692 case TCODE_WRITE_BLOCK_REQUEST:
693 response->header[0] |= HEADER_TCODE(TCODE_WRITE_RESPONSE);
694 response->header_length = 12;
695 response->payload_length = 0;
696 break;
697
698 case TCODE_READ_QUADLET_REQUEST:
699 response->header[0] |=
700 HEADER_TCODE(TCODE_READ_QUADLET_RESPONSE);
701 if (payload != NULL)
702 response->header[3] = *(u32 *)payload;
703 else
704 response->header[3] = 0;
705 response->header_length = 16;
706 response->payload_length = 0;
707 break;
708
709 case TCODE_READ_BLOCK_REQUEST:
710 case TCODE_LOCK_REQUEST:
711 response->header[0] |= HEADER_TCODE(tcode + 2);
712 response->header[3] =
713 HEADER_DATA_LENGTH(length) |
714 HEADER_EXTENDED_TCODE(extended_tcode);
715 response->header_length = 16;
716 response->payload = payload;
717 response->payload_length = length;
718 break;
719
720 default:
721 WARN(1, "wrong tcode %d\n", tcode);
722 }
723
724 response->payload_mapped = false;
725}
726EXPORT_SYMBOL(fw_fill_response);
727
728static u32 compute_split_timeout_timestamp(struct fw_card *card,
729 u32 request_timestamp)
730{
731 unsigned int cycles;
732 u32 timestamp;
733
734 cycles = card->split_timeout_cycles;
735 cycles += request_timestamp & 0x1fff;
736
737 timestamp = request_timestamp & ~0x1fff;
738 timestamp += (cycles / 8000) << 13;
739 timestamp |= cycles % 8000;
740
741 return timestamp;
742}
743
744static struct fw_request *allocate_request(struct fw_card *card,
745 struct fw_packet *p)
746{
747 struct fw_request *request;
748 u32 *data, length;
749 int request_tcode;
750
751 request_tcode = HEADER_GET_TCODE(p->header[0]);
752 switch (request_tcode) {
753 case TCODE_WRITE_QUADLET_REQUEST:
754 data = &p->header[3];
755 length = 4;
756 break;
757
758 case TCODE_WRITE_BLOCK_REQUEST:
759 case TCODE_LOCK_REQUEST:
760 data = p->payload;
761 length = HEADER_GET_DATA_LENGTH(p->header[3]);
762 break;
763
764 case TCODE_READ_QUADLET_REQUEST:
765 data = NULL;
766 length = 4;
767 break;
768
769 case TCODE_READ_BLOCK_REQUEST:
770 data = NULL;
771 length = HEADER_GET_DATA_LENGTH(p->header[3]);
772 break;
773
774 default:
775 fw_notice(card, "ERROR - corrupt request received - %08x %08x %08x\n",
776 p->header[0], p->header[1], p->header[2]);
777 return NULL;
778 }
779
780 request = kmalloc(sizeof(*request) + length, GFP_ATOMIC);
781 if (request == NULL)
782 return NULL;
783
784 request->response.speed = p->speed;
785 request->response.timestamp =
786 compute_split_timeout_timestamp(card, p->timestamp);
787 request->response.generation = p->generation;
788 request->response.ack = 0;
789 request->response.callback = free_response_callback;
790 request->ack = p->ack;
791 request->length = length;
792 if (data)
793 memcpy(request->data, data, length);
794
795 memcpy(request->request_header, p->header, sizeof(p->header));
796
797 return request;
798}
799
800void fw_send_response(struct fw_card *card,
801 struct fw_request *request, int rcode)
802{
803 if (WARN_ONCE(!request, "invalid for FCP address handlers"))
804 return;
805
806
807 if (request->ack != ACK_PENDING ||
808 HEADER_DESTINATION_IS_BROADCAST(request->request_header[0])) {
809 kfree(request);
810 return;
811 }
812
813 if (rcode == RCODE_COMPLETE)
814 fw_fill_response(&request->response, request->request_header,
815 rcode, request->data,
816 fw_get_response_length(request));
817 else
818 fw_fill_response(&request->response, request->request_header,
819 rcode, NULL, 0);
820
821 card->driver->send_response(card, &request->response);
822}
823EXPORT_SYMBOL(fw_send_response);
824
825
826
827
828
829int fw_get_request_speed(struct fw_request *request)
830{
831 return request->response.speed;
832}
833EXPORT_SYMBOL(fw_get_request_speed);
834
835static void handle_exclusive_region_request(struct fw_card *card,
836 struct fw_packet *p,
837 struct fw_request *request,
838 unsigned long long offset)
839{
840 struct fw_address_handler *handler;
841 int tcode, destination, source;
842
843 destination = HEADER_GET_DESTINATION(p->header[0]);
844 source = HEADER_GET_SOURCE(p->header[1]);
845 tcode = HEADER_GET_TCODE(p->header[0]);
846 if (tcode == TCODE_LOCK_REQUEST)
847 tcode = 0x10 + HEADER_GET_EXTENDED_TCODE(p->header[3]);
848
849 rcu_read_lock();
850 handler = lookup_enclosing_address_handler(&address_handler_list,
851 offset, request->length);
852 if (handler)
853 handler->address_callback(card, request,
854 tcode, destination, source,
855 p->generation, offset,
856 request->data, request->length,
857 handler->callback_data);
858 rcu_read_unlock();
859
860 if (!handler)
861 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
862}
863
864static void handle_fcp_region_request(struct fw_card *card,
865 struct fw_packet *p,
866 struct fw_request *request,
867 unsigned long long offset)
868{
869 struct fw_address_handler *handler;
870 int tcode, destination, source;
871
872 if ((offset != (CSR_REGISTER_BASE | CSR_FCP_COMMAND) &&
873 offset != (CSR_REGISTER_BASE | CSR_FCP_RESPONSE)) ||
874 request->length > 0x200) {
875 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
876
877 return;
878 }
879
880 tcode = HEADER_GET_TCODE(p->header[0]);
881 destination = HEADER_GET_DESTINATION(p->header[0]);
882 source = HEADER_GET_SOURCE(p->header[1]);
883
884 if (tcode != TCODE_WRITE_QUADLET_REQUEST &&
885 tcode != TCODE_WRITE_BLOCK_REQUEST) {
886 fw_send_response(card, request, RCODE_TYPE_ERROR);
887
888 return;
889 }
890
891 rcu_read_lock();
892 list_for_each_entry_rcu(handler, &address_handler_list, link) {
893 if (is_enclosing_handler(handler, offset, request->length))
894 handler->address_callback(card, NULL, tcode,
895 destination, source,
896 p->generation, offset,
897 request->data,
898 request->length,
899 handler->callback_data);
900 }
901 rcu_read_unlock();
902
903 fw_send_response(card, request, RCODE_COMPLETE);
904}
905
906void fw_core_handle_request(struct fw_card *card, struct fw_packet *p)
907{
908 struct fw_request *request;
909 unsigned long long offset;
910
911 if (p->ack != ACK_PENDING && p->ack != ACK_COMPLETE)
912 return;
913
914 if (TCODE_IS_LINK_INTERNAL(HEADER_GET_TCODE(p->header[0]))) {
915 fw_cdev_handle_phy_packet(card, p);
916 return;
917 }
918
919 request = allocate_request(card, p);
920 if (request == NULL) {
921
922 return;
923 }
924
925 offset = ((u64)HEADER_GET_OFFSET_HIGH(p->header[1]) << 32) |
926 p->header[2];
927
928 if (!is_in_fcp_region(offset, request->length))
929 handle_exclusive_region_request(card, p, request, offset);
930 else
931 handle_fcp_region_request(card, p, request, offset);
932
933}
934EXPORT_SYMBOL(fw_core_handle_request);
935
936void fw_core_handle_response(struct fw_card *card, struct fw_packet *p)
937{
938 struct fw_transaction *t;
939 unsigned long flags;
940 u32 *data;
941 size_t data_length;
942 int tcode, tlabel, source, rcode;
943
944 tcode = HEADER_GET_TCODE(p->header[0]);
945 tlabel = HEADER_GET_TLABEL(p->header[0]);
946 source = HEADER_GET_SOURCE(p->header[1]);
947 rcode = HEADER_GET_RCODE(p->header[1]);
948
949 spin_lock_irqsave(&card->lock, flags);
950 list_for_each_entry(t, &card->transaction_list, link) {
951 if (t->node_id == source && t->tlabel == tlabel) {
952 if (!try_cancel_split_timeout(t)) {
953 spin_unlock_irqrestore(&card->lock, flags);
954 goto timed_out;
955 }
956 list_del_init(&t->link);
957 card->tlabel_mask &= ~(1ULL << t->tlabel);
958 break;
959 }
960 }
961 spin_unlock_irqrestore(&card->lock, flags);
962
963 if (&t->link == &card->transaction_list) {
964 timed_out:
965 fw_notice(card, "unsolicited response (source %x, tlabel %x)\n",
966 source, tlabel);
967 return;
968 }
969
970
971
972
973
974
975 switch (tcode) {
976 case TCODE_READ_QUADLET_RESPONSE:
977 data = (u32 *) &p->header[3];
978 data_length = 4;
979 break;
980
981 case TCODE_WRITE_RESPONSE:
982 data = NULL;
983 data_length = 0;
984 break;
985
986 case TCODE_READ_BLOCK_RESPONSE:
987 case TCODE_LOCK_RESPONSE:
988 data = p->payload;
989 data_length = HEADER_GET_DATA_LENGTH(p->header[3]);
990 break;
991
992 default:
993
994 data = NULL;
995 data_length = 0;
996 break;
997 }
998
999
1000
1001
1002
1003 card->driver->cancel_packet(card, &t->packet);
1004
1005 t->callback(card, rcode, data, data_length, t->callback_data);
1006}
1007EXPORT_SYMBOL(fw_core_handle_response);
1008
1009
1010
1011
1012
1013const char *fw_rcode_string(int rcode)
1014{
1015 static const char *const names[] = {
1016 [RCODE_COMPLETE] = "no error",
1017 [RCODE_CONFLICT_ERROR] = "conflict error",
1018 [RCODE_DATA_ERROR] = "data error",
1019 [RCODE_TYPE_ERROR] = "type error",
1020 [RCODE_ADDRESS_ERROR] = "address error",
1021 [RCODE_SEND_ERROR] = "send error",
1022 [RCODE_CANCELLED] = "timeout",
1023 [RCODE_BUSY] = "busy",
1024 [RCODE_GENERATION] = "bus reset",
1025 [RCODE_NO_ACK] = "no ack",
1026 };
1027
1028 if ((unsigned int)rcode < ARRAY_SIZE(names) && names[rcode])
1029 return names[rcode];
1030 else
1031 return "unknown";
1032}
1033EXPORT_SYMBOL(fw_rcode_string);
1034
1035static const struct fw_address_region topology_map_region =
1036 { .start = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP,
1037 .end = CSR_REGISTER_BASE | CSR_TOPOLOGY_MAP_END, };
1038
1039static void handle_topology_map(struct fw_card *card, struct fw_request *request,
1040 int tcode, int destination, int source, int generation,
1041 unsigned long long offset, void *payload, size_t length,
1042 void *callback_data)
1043{
1044 int start;
1045
1046 if (!TCODE_IS_READ_REQUEST(tcode)) {
1047 fw_send_response(card, request, RCODE_TYPE_ERROR);
1048 return;
1049 }
1050
1051 if ((offset & 3) > 0 || (length & 3) > 0) {
1052 fw_send_response(card, request, RCODE_ADDRESS_ERROR);
1053 return;
1054 }
1055
1056 start = (offset - topology_map_region.start) / 4;
1057 memcpy(payload, &card->topology_map[start], length);
1058
1059 fw_send_response(card, request, RCODE_COMPLETE);
1060}
1061
1062static struct fw_address_handler topology_map = {
1063 .length = 0x400,
1064 .address_callback = handle_topology_map,
1065};
1066
1067static const struct fw_address_region registers_region =
1068 { .start = CSR_REGISTER_BASE,
1069 .end = CSR_REGISTER_BASE | CSR_CONFIG_ROM, };
1070
1071static void update_split_timeout(struct fw_card *card)
1072{
1073 unsigned int cycles;
1074
1075 cycles = card->split_timeout_hi * 8000 + (card->split_timeout_lo >> 19);
1076
1077
1078 cycles = clamp(cycles, 800u, 3u * 8000u);
1079
1080 card->split_timeout_cycles = cycles;
1081 card->split_timeout_jiffies = DIV_ROUND_UP(cycles * HZ, 8000);
1082}
1083
1084static void handle_registers(struct fw_card *card, struct fw_request *request,
1085 int tcode, int destination, int source, int generation,
1086 unsigned long long offset, void *payload, size_t length,
1087 void *callback_data)
1088{
1089 int reg = offset & ~CSR_REGISTER_BASE;
1090 __be32 *data = payload;
1091 int rcode = RCODE_COMPLETE;
1092 unsigned long flags;
1093
1094 switch (reg) {
1095 case CSR_PRIORITY_BUDGET:
1096 if (!card->priority_budget_implemented) {
1097 rcode = RCODE_ADDRESS_ERROR;
1098 break;
1099 }
1100 fallthrough;
1101
1102 case CSR_NODE_IDS:
1103
1104
1105
1106
1107 fallthrough;
1108
1109 case CSR_STATE_CLEAR:
1110 case CSR_STATE_SET:
1111 case CSR_CYCLE_TIME:
1112 case CSR_BUS_TIME:
1113 case CSR_BUSY_TIMEOUT:
1114 if (tcode == TCODE_READ_QUADLET_REQUEST)
1115 *data = cpu_to_be32(card->driver->read_csr(card, reg));
1116 else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1117 card->driver->write_csr(card, reg, be32_to_cpu(*data));
1118 else
1119 rcode = RCODE_TYPE_ERROR;
1120 break;
1121
1122 case CSR_RESET_START:
1123 if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1124 card->driver->write_csr(card, CSR_STATE_CLEAR,
1125 CSR_STATE_BIT_ABDICATE);
1126 else
1127 rcode = RCODE_TYPE_ERROR;
1128 break;
1129
1130 case CSR_SPLIT_TIMEOUT_HI:
1131 if (tcode == TCODE_READ_QUADLET_REQUEST) {
1132 *data = cpu_to_be32(card->split_timeout_hi);
1133 } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
1134 spin_lock_irqsave(&card->lock, flags);
1135 card->split_timeout_hi = be32_to_cpu(*data) & 7;
1136 update_split_timeout(card);
1137 spin_unlock_irqrestore(&card->lock, flags);
1138 } else {
1139 rcode = RCODE_TYPE_ERROR;
1140 }
1141 break;
1142
1143 case CSR_SPLIT_TIMEOUT_LO:
1144 if (tcode == TCODE_READ_QUADLET_REQUEST) {
1145 *data = cpu_to_be32(card->split_timeout_lo);
1146 } else if (tcode == TCODE_WRITE_QUADLET_REQUEST) {
1147 spin_lock_irqsave(&card->lock, flags);
1148 card->split_timeout_lo =
1149 be32_to_cpu(*data) & 0xfff80000;
1150 update_split_timeout(card);
1151 spin_unlock_irqrestore(&card->lock, flags);
1152 } else {
1153 rcode = RCODE_TYPE_ERROR;
1154 }
1155 break;
1156
1157 case CSR_MAINT_UTILITY:
1158 if (tcode == TCODE_READ_QUADLET_REQUEST)
1159 *data = card->maint_utility_register;
1160 else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1161 card->maint_utility_register = *data;
1162 else
1163 rcode = RCODE_TYPE_ERROR;
1164 break;
1165
1166 case CSR_BROADCAST_CHANNEL:
1167 if (tcode == TCODE_READ_QUADLET_REQUEST)
1168 *data = cpu_to_be32(card->broadcast_channel);
1169 else if (tcode == TCODE_WRITE_QUADLET_REQUEST)
1170 card->broadcast_channel =
1171 (be32_to_cpu(*data) & BROADCAST_CHANNEL_VALID) |
1172 BROADCAST_CHANNEL_INITIAL;
1173 else
1174 rcode = RCODE_TYPE_ERROR;
1175 break;
1176
1177 case CSR_BUS_MANAGER_ID:
1178 case CSR_BANDWIDTH_AVAILABLE:
1179 case CSR_CHANNELS_AVAILABLE_HI:
1180 case CSR_CHANNELS_AVAILABLE_LO:
1181
1182
1183
1184
1185
1186
1187
1188 BUG();
1189 break;
1190
1191 default:
1192 rcode = RCODE_ADDRESS_ERROR;
1193 break;
1194 }
1195
1196 fw_send_response(card, request, rcode);
1197}
1198
1199static struct fw_address_handler registers = {
1200 .length = 0x400,
1201 .address_callback = handle_registers,
1202};
1203
1204static void handle_low_memory(struct fw_card *card, struct fw_request *request,
1205 int tcode, int destination, int source, int generation,
1206 unsigned long long offset, void *payload, size_t length,
1207 void *callback_data)
1208{
1209
1210
1211
1212
1213 fw_send_response(card, request, RCODE_TYPE_ERROR);
1214}
1215
1216static struct fw_address_handler low_memory = {
1217 .length = FW_MAX_PHYSICAL_RANGE,
1218 .address_callback = handle_low_memory,
1219};
1220
1221MODULE_AUTHOR("Kristian Hoegsberg <krh@bitplanet.net>");
1222MODULE_DESCRIPTION("Core IEEE1394 transaction logic");
1223MODULE_LICENSE("GPL");
1224
1225static const u32 vendor_textual_descriptor[] = {
1226
1227 0x00060000,
1228 0x00000000,
1229 0x00000000,
1230 0x4c696e75,
1231 0x78204669,
1232 0x72657769,
1233 0x72650000,
1234};
1235
1236static const u32 model_textual_descriptor[] = {
1237
1238 0x00030000,
1239 0x00000000,
1240 0x00000000,
1241 0x4a756a75,
1242};
1243
1244static struct fw_descriptor vendor_id_descriptor = {
1245 .length = ARRAY_SIZE(vendor_textual_descriptor),
1246 .immediate = 0x03001f11,
1247 .key = 0x81000000,
1248 .data = vendor_textual_descriptor,
1249};
1250
1251static struct fw_descriptor model_id_descriptor = {
1252 .length = ARRAY_SIZE(model_textual_descriptor),
1253 .immediate = 0x17023901,
1254 .key = 0x81000000,
1255 .data = model_textual_descriptor,
1256};
1257
1258static int __init fw_core_init(void)
1259{
1260 int ret;
1261
1262 fw_workqueue = alloc_workqueue("firewire", WQ_MEM_RECLAIM, 0);
1263 if (!fw_workqueue)
1264 return -ENOMEM;
1265
1266 ret = bus_register(&fw_bus_type);
1267 if (ret < 0) {
1268 destroy_workqueue(fw_workqueue);
1269 return ret;
1270 }
1271
1272 fw_cdev_major = register_chrdev(0, "firewire", &fw_device_ops);
1273 if (fw_cdev_major < 0) {
1274 bus_unregister(&fw_bus_type);
1275 destroy_workqueue(fw_workqueue);
1276 return fw_cdev_major;
1277 }
1278
1279 fw_core_add_address_handler(&topology_map, &topology_map_region);
1280 fw_core_add_address_handler(®isters, ®isters_region);
1281 fw_core_add_address_handler(&low_memory, &low_memory_region);
1282 fw_core_add_descriptor(&vendor_id_descriptor);
1283 fw_core_add_descriptor(&model_id_descriptor);
1284
1285 return 0;
1286}
1287
1288static void __exit fw_core_cleanup(void)
1289{
1290 unregister_chrdev(fw_cdev_major, "firewire");
1291 bus_unregister(&fw_bus_type);
1292 destroy_workqueue(fw_workqueue);
1293 idr_destroy(&fw_device_idr);
1294}
1295
1296module_init(fw_core_init);
1297module_exit(fw_core_cleanup);
1298