1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21#include <linux/bug.h>
22#include <linux/errno.h>
23#include <linux/firewire.h>
24#include <linux/firewire-constants.h>
25#include <linux/jiffies.h>
26#include <linux/kernel.h>
27#include <linux/list.h>
28#include <linux/module.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31
32#include <linux/atomic.h>
33#include <asm/byteorder.h>
34
35#include "core.h"
36
37#define SELF_ID_PHY_ID(q) (((q) >> 24) & 0x3f)
38#define SELF_ID_EXTENDED(q) (((q) >> 23) & 0x01)
39#define SELF_ID_LINK_ON(q) (((q) >> 22) & 0x01)
40#define SELF_ID_GAP_COUNT(q) (((q) >> 16) & 0x3f)
41#define SELF_ID_PHY_SPEED(q) (((q) >> 14) & 0x03)
42#define SELF_ID_CONTENDER(q) (((q) >> 11) & 0x01)
43#define SELF_ID_PHY_INITIATOR(q) (((q) >> 1) & 0x01)
44#define SELF_ID_MORE_PACKETS(q) (((q) >> 0) & 0x01)
45
46#define SELF_ID_EXT_SEQUENCE(q) (((q) >> 20) & 0x07)
47
48#define SELFID_PORT_CHILD 0x3
49#define SELFID_PORT_PARENT 0x2
50#define SELFID_PORT_NCONN 0x1
51#define SELFID_PORT_NONE 0x0
52
53static u32 *count_ports(u32 *sid, int *total_port_count, int *child_port_count)
54{
55 u32 q;
56 int port_type, shift, seq;
57
58 *total_port_count = 0;
59 *child_port_count = 0;
60
61 shift = 6;
62 q = *sid;
63 seq = 0;
64
65 while (1) {
66 port_type = (q >> shift) & 0x03;
67 switch (port_type) {
68 case SELFID_PORT_CHILD:
69 (*child_port_count)++;
70 case SELFID_PORT_PARENT:
71 case SELFID_PORT_NCONN:
72 (*total_port_count)++;
73 case SELFID_PORT_NONE:
74 break;
75 }
76
77 shift -= 2;
78 if (shift == 0) {
79 if (!SELF_ID_MORE_PACKETS(q))
80 return sid + 1;
81
82 shift = 16;
83 sid++;
84 q = *sid;
85
86
87
88
89
90
91
92
93 if (!SELF_ID_EXTENDED(q) ||
94 seq != SELF_ID_EXT_SEQUENCE(q))
95 return NULL;
96
97 seq++;
98 }
99 }
100}
101
102static int get_port_type(u32 *sid, int port_index)
103{
104 int index, shift;
105
106 index = (port_index + 5) / 8;
107 shift = 16 - ((port_index + 5) & 7) * 2;
108 return (sid[index] >> shift) & 0x03;
109}
110
111static struct fw_node *fw_node_create(u32 sid, int port_count, int color)
112{
113 struct fw_node *node;
114
115 node = kzalloc(struct_size(node, ports, port_count), GFP_ATOMIC);
116 if (node == NULL)
117 return NULL;
118
119 node->color = color;
120 node->node_id = LOCAL_BUS | SELF_ID_PHY_ID(sid);
121 node->link_on = SELF_ID_LINK_ON(sid);
122 node->phy_speed = SELF_ID_PHY_SPEED(sid);
123 node->initiated_reset = SELF_ID_PHY_INITIATOR(sid);
124 node->port_count = port_count;
125
126 refcount_set(&node->ref_count, 1);
127 INIT_LIST_HEAD(&node->link);
128
129 return node;
130}
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146static void update_hop_count(struct fw_node *node)
147{
148 int depths[2] = { -1, -1 };
149 int max_child_hops = 0;
150 int i;
151
152 for (i = 0; i < node->port_count; i++) {
153 if (node->ports[i] == NULL)
154 continue;
155
156 if (node->ports[i]->max_hops > max_child_hops)
157 max_child_hops = node->ports[i]->max_hops;
158
159 if (node->ports[i]->max_depth > depths[0]) {
160 depths[1] = depths[0];
161 depths[0] = node->ports[i]->max_depth;
162 } else if (node->ports[i]->max_depth > depths[1])
163 depths[1] = node->ports[i]->max_depth;
164 }
165
166 node->max_depth = depths[0] + 1;
167 node->max_hops = max(max_child_hops, depths[0] + depths[1] + 2);
168}
169
170static inline struct fw_node *fw_node(struct list_head *l)
171{
172 return list_entry(l, struct fw_node, link);
173}
174
175
176
177
178
179
180
181
182static struct fw_node *build_tree(struct fw_card *card,
183 u32 *sid, int self_id_count)
184{
185 struct fw_node *node, *child, *local_node, *irm_node;
186 struct list_head stack, *h;
187 u32 *next_sid, *end, q;
188 int i, port_count, child_port_count, phy_id, parent_count, stack_depth;
189 int gap_count;
190 bool beta_repeaters_present;
191
192 local_node = NULL;
193 node = NULL;
194 INIT_LIST_HEAD(&stack);
195 stack_depth = 0;
196 end = sid + self_id_count;
197 phy_id = 0;
198 irm_node = NULL;
199 gap_count = SELF_ID_GAP_COUNT(*sid);
200 beta_repeaters_present = false;
201
202 while (sid < end) {
203 next_sid = count_ports(sid, &port_count, &child_port_count);
204
205 if (next_sid == NULL) {
206 fw_err(card, "inconsistent extended self IDs\n");
207 return NULL;
208 }
209
210 q = *sid;
211 if (phy_id != SELF_ID_PHY_ID(q)) {
212 fw_err(card, "PHY ID mismatch in self ID: %d != %d\n",
213 phy_id, SELF_ID_PHY_ID(q));
214 return NULL;
215 }
216
217 if (child_port_count > stack_depth) {
218 fw_err(card, "topology stack underflow\n");
219 return NULL;
220 }
221
222
223
224
225
226 for (i = 0, h = &stack; i < child_port_count; i++)
227 h = h->prev;
228
229
230
231
232 child = fw_node(h);
233
234 node = fw_node_create(q, port_count, card->color);
235 if (node == NULL) {
236 fw_err(card, "out of memory while building topology\n");
237 return NULL;
238 }
239
240 if (phy_id == (card->node_id & 0x3f))
241 local_node = node;
242
243 if (SELF_ID_CONTENDER(q))
244 irm_node = node;
245
246 parent_count = 0;
247
248 for (i = 0; i < port_count; i++) {
249 switch (get_port_type(sid, i)) {
250 case SELFID_PORT_PARENT:
251
252
253
254
255
256
257
258
259
260
261 parent_count++;
262 node->color = i;
263 break;
264
265 case SELFID_PORT_CHILD:
266 node->ports[i] = child;
267
268
269
270
271 child->ports[child->color] = node;
272 child->color = card->color;
273 child = fw_node(child->link.next);
274 break;
275 }
276 }
277
278
279
280
281
282
283 if ((next_sid == end && parent_count != 0) ||
284 (next_sid < end && parent_count != 1)) {
285 fw_err(card, "parent port inconsistency for node %d: "
286 "parent_count=%d\n", phy_id, parent_count);
287 return NULL;
288 }
289
290
291 __list_del(h->prev, &stack);
292 list_add_tail(&node->link, &stack);
293 stack_depth += 1 - child_port_count;
294
295 if (node->phy_speed == SCODE_BETA &&
296 parent_count + child_port_count > 1)
297 beta_repeaters_present = true;
298
299
300
301
302
303 if (SELF_ID_GAP_COUNT(q) != gap_count)
304 gap_count = 0;
305
306 update_hop_count(node);
307
308 sid = next_sid;
309 phy_id++;
310 }
311
312 card->root_node = node;
313 card->irm_node = irm_node;
314 card->gap_count = gap_count;
315 card->beta_repeaters_present = beta_repeaters_present;
316
317 return local_node;
318}
319
320typedef void (*fw_node_callback_t)(struct fw_card * card,
321 struct fw_node * node,
322 struct fw_node * parent);
323
324static void for_each_fw_node(struct fw_card *card, struct fw_node *root,
325 fw_node_callback_t callback)
326{
327 struct list_head list;
328 struct fw_node *node, *next, *child, *parent;
329 int i;
330
331 INIT_LIST_HEAD(&list);
332
333 fw_node_get(root);
334 list_add_tail(&root->link, &list);
335 parent = NULL;
336 list_for_each_entry(node, &list, link) {
337 node->color = card->color;
338
339 for (i = 0; i < node->port_count; i++) {
340 child = node->ports[i];
341 if (!child)
342 continue;
343 if (child->color == card->color)
344 parent = child;
345 else {
346 fw_node_get(child);
347 list_add_tail(&child->link, &list);
348 }
349 }
350
351 callback(card, node, parent);
352 }
353
354 list_for_each_entry_safe(node, next, &list, link)
355 fw_node_put(node);
356}
357
358static void report_lost_node(struct fw_card *card,
359 struct fw_node *node, struct fw_node *parent)
360{
361 fw_node_event(card, node, FW_NODE_DESTROYED);
362 fw_node_put(node);
363
364
365 card->bm_retries = 0;
366}
367
368static void report_found_node(struct fw_card *card,
369 struct fw_node *node, struct fw_node *parent)
370{
371 int b_path = (node->phy_speed == SCODE_BETA);
372
373 if (parent != NULL) {
374
375 node->max_speed = parent->max_speed < node->phy_speed ?
376 parent->max_speed : node->phy_speed;
377 node->b_path = parent->b_path && b_path;
378 } else {
379 node->max_speed = node->phy_speed;
380 node->b_path = b_path;
381 }
382
383 fw_node_event(card, node, FW_NODE_CREATED);
384
385
386 card->bm_retries = 0;
387}
388
389void fw_destroy_nodes(struct fw_card *card)
390{
391 unsigned long flags;
392
393 spin_lock_irqsave(&card->lock, flags);
394 card->color++;
395 if (card->local_node != NULL)
396 for_each_fw_node(card, card->local_node, report_lost_node);
397 card->local_node = NULL;
398 spin_unlock_irqrestore(&card->lock, flags);
399}
400
401static void move_tree(struct fw_node *node0, struct fw_node *node1, int port)
402{
403 struct fw_node *tree;
404 int i;
405
406 tree = node1->ports[port];
407 node0->ports[port] = tree;
408 for (i = 0; i < tree->port_count; i++) {
409 if (tree->ports[i] == node1) {
410 tree->ports[i] = node0;
411 break;
412 }
413 }
414}
415
416
417
418
419
420
421static void update_tree(struct fw_card *card, struct fw_node *root)
422{
423 struct list_head list0, list1;
424 struct fw_node *node0, *node1, *next1;
425 int i, event;
426
427 INIT_LIST_HEAD(&list0);
428 list_add_tail(&card->local_node->link, &list0);
429 INIT_LIST_HEAD(&list1);
430 list_add_tail(&root->link, &list1);
431
432 node0 = fw_node(list0.next);
433 node1 = fw_node(list1.next);
434
435 while (&node0->link != &list0) {
436 WARN_ON(node0->port_count != node1->port_count);
437
438 if (node0->link_on && !node1->link_on)
439 event = FW_NODE_LINK_OFF;
440 else if (!node0->link_on && node1->link_on)
441 event = FW_NODE_LINK_ON;
442 else if (node1->initiated_reset && node1->link_on)
443 event = FW_NODE_INITIATED_RESET;
444 else
445 event = FW_NODE_UPDATED;
446
447 node0->node_id = node1->node_id;
448 node0->color = card->color;
449 node0->link_on = node1->link_on;
450 node0->initiated_reset = node1->initiated_reset;
451 node0->max_hops = node1->max_hops;
452 node1->color = card->color;
453 fw_node_event(card, node0, event);
454
455 if (card->root_node == node1)
456 card->root_node = node0;
457 if (card->irm_node == node1)
458 card->irm_node = node0;
459
460 for (i = 0; i < node0->port_count; i++) {
461 if (node0->ports[i] && node1->ports[i]) {
462
463
464
465
466
467 if (node0->ports[i]->color == card->color)
468 continue;
469 list_add_tail(&node0->ports[i]->link, &list0);
470 list_add_tail(&node1->ports[i]->link, &list1);
471 } else if (node0->ports[i]) {
472
473
474
475
476
477
478
479 for_each_fw_node(card, node0->ports[i],
480 report_lost_node);
481 node0->ports[i] = NULL;
482 } else if (node1->ports[i]) {
483
484
485
486
487
488
489 move_tree(node0, node1, i);
490 for_each_fw_node(card, node0->ports[i],
491 report_found_node);
492 }
493 }
494
495 node0 = fw_node(node0->link.next);
496 next1 = fw_node(node1->link.next);
497 fw_node_put(node1);
498 node1 = next1;
499 }
500}
501
502static void update_topology_map(struct fw_card *card,
503 u32 *self_ids, int self_id_count)
504{
505 int node_count = (card->root_node->node_id & 0x3f) + 1;
506 __be32 *map = card->topology_map;
507
508 *map++ = cpu_to_be32((self_id_count + 2) << 16);
509 *map++ = cpu_to_be32(be32_to_cpu(card->topology_map[1]) + 1);
510 *map++ = cpu_to_be32((node_count << 16) | self_id_count);
511
512 while (self_id_count--)
513 *map++ = cpu_to_be32p(self_ids++);
514
515 fw_compute_block_crc(card->topology_map);
516}
517
518void fw_core_handle_bus_reset(struct fw_card *card, int node_id, int generation,
519 int self_id_count, u32 *self_ids, bool bm_abdicate)
520{
521 struct fw_node *local_node;
522 unsigned long flags;
523
524
525
526
527
528
529 if (!is_next_generation(generation, card->generation) &&
530 card->local_node != NULL) {
531 fw_destroy_nodes(card);
532 card->bm_retries = 0;
533 }
534
535 spin_lock_irqsave(&card->lock, flags);
536
537 card->broadcast_channel_allocated = card->broadcast_channel_auto_allocated;
538 card->node_id = node_id;
539
540
541
542
543 smp_wmb();
544 card->generation = generation;
545 card->reset_jiffies = get_jiffies_64();
546 card->bm_node_id = 0xffff;
547 card->bm_abdicate = bm_abdicate;
548 fw_schedule_bm_work(card, 0);
549
550 local_node = build_tree(card, self_ids, self_id_count);
551
552 update_topology_map(card, self_ids, self_id_count);
553
554 card->color++;
555
556 if (local_node == NULL) {
557 fw_err(card, "topology build failed\n");
558
559 } else if (card->local_node == NULL) {
560 card->local_node = local_node;
561 for_each_fw_node(card, local_node, report_found_node);
562 } else {
563 update_tree(card, local_node);
564 }
565
566 spin_unlock_irqrestore(&card->lock, flags);
567}
568EXPORT_SYMBOL(fw_core_handle_bus_reset);
569