1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/module.h>
17#include <linux/i2o.h>
18#include <linux/delay.h>
19#include <linux/string.h>
20#include <linux/slab.h>
21#include "core.h"
22
23
24
25
26
27
28
29
30
31
32
33
34
35static inline int i2o_device_issue_claim(struct i2o_device *dev, u32 cmd,
36 u32 type)
37{
38 struct i2o_message *msg;
39
40 msg = i2o_msg_get_wait(dev->iop, I2O_TIMEOUT_MESSAGE_GET);
41 if (IS_ERR(msg))
42 return PTR_ERR(msg);
43
44 msg->u.head[0] = cpu_to_le32(FIVE_WORD_MSG_SIZE | SGL_OFFSET_0);
45 msg->u.head[1] =
46 cpu_to_le32(cmd << 24 | HOST_TID << 12 | dev->lct_data.tid);
47 msg->body[0] = cpu_to_le32(type);
48
49 return i2o_msg_post_wait(dev->iop, msg, 60);
50}
51
52
53
54
55
56
57
58
59
60int i2o_device_claim(struct i2o_device *dev)
61{
62 int rc = 0;
63
64 mutex_lock(&dev->lock);
65
66 rc = i2o_device_issue_claim(dev, I2O_CMD_UTIL_CLAIM, I2O_CLAIM_PRIMARY);
67 if (!rc)
68 pr_debug("i2o: claim of device %d succeeded\n",
69 dev->lct_data.tid);
70 else
71 pr_debug("i2o: claim of device %d failed %d\n",
72 dev->lct_data.tid, rc);
73
74 mutex_unlock(&dev->lock);
75
76 return rc;
77}
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92int i2o_device_claim_release(struct i2o_device *dev)
93{
94 int tries;
95 int rc = 0;
96
97 mutex_lock(&dev->lock);
98
99
100
101
102
103 for (tries = 0; tries < 10; tries++) {
104 rc = i2o_device_issue_claim(dev, I2O_CMD_UTIL_RELEASE,
105 I2O_CLAIM_PRIMARY);
106 if (!rc)
107 break;
108
109 ssleep(1);
110 }
111
112 if (!rc)
113 pr_debug("i2o: claim release of device %d succeeded\n",
114 dev->lct_data.tid);
115 else
116 pr_debug("i2o: claim release of device %d failed %d\n",
117 dev->lct_data.tid, rc);
118
119 mutex_unlock(&dev->lock);
120
121 return rc;
122}
123
124
125
126
127
128
129
130
131static void i2o_device_release(struct device *dev)
132{
133 struct i2o_device *i2o_dev = to_i2o_device(dev);
134
135 pr_debug("i2o: device %s released\n", dev_name(dev));
136
137 kfree(i2o_dev);
138}
139
140
141
142
143
144
145
146
147
148static ssize_t i2o_device_show_class_id(struct device *dev,
149 struct device_attribute *attr,
150 char *buf)
151{
152 struct i2o_device *i2o_dev = to_i2o_device(dev);
153
154 sprintf(buf, "0x%03x\n", i2o_dev->lct_data.class_id);
155 return strlen(buf) + 1;
156}
157
158
159
160
161
162
163
164
165
166static ssize_t i2o_device_show_tid(struct device *dev,
167 struct device_attribute *attr, char *buf)
168{
169 struct i2o_device *i2o_dev = to_i2o_device(dev);
170
171 sprintf(buf, "0x%03x\n", i2o_dev->lct_data.tid);
172 return strlen(buf) + 1;
173}
174
175
176struct device_attribute i2o_device_attrs[] = {
177 __ATTR(class_id, S_IRUGO, i2o_device_show_class_id, NULL),
178 __ATTR(tid, S_IRUGO, i2o_device_show_tid, NULL),
179 __ATTR_NULL
180};
181
182
183
184
185
186
187
188
189
190static struct i2o_device *i2o_device_alloc(void)
191{
192 struct i2o_device *dev;
193
194 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
195 if (!dev)
196 return ERR_PTR(-ENOMEM);
197
198 INIT_LIST_HEAD(&dev->list);
199 mutex_init(&dev->lock);
200
201 dev->device.bus = &i2o_bus_type;
202 dev->device.release = &i2o_device_release;
203
204 return dev;
205}
206
207
208
209
210
211
212
213
214
215
216
217static int i2o_device_add(struct i2o_controller *c, i2o_lct_entry *entry)
218{
219 struct i2o_device *i2o_dev, *tmp;
220 int rc;
221
222 i2o_dev = i2o_device_alloc();
223 if (IS_ERR(i2o_dev)) {
224 printk(KERN_ERR "i2o: unable to allocate i2o device\n");
225 return PTR_ERR(i2o_dev);
226 }
227
228 i2o_dev->lct_data = *entry;
229
230 dev_set_name(&i2o_dev->device, "%d:%03x", c->unit,
231 i2o_dev->lct_data.tid);
232
233 i2o_dev->iop = c;
234 i2o_dev->device.parent = &c->device;
235
236 rc = device_register(&i2o_dev->device);
237 if (rc)
238 goto err;
239
240 list_add_tail(&i2o_dev->list, &c->devices);
241
242
243 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.user_tid);
244 if (tmp && (tmp != i2o_dev)) {
245 rc = sysfs_create_link(&i2o_dev->device.kobj,
246 &tmp->device.kobj, "user");
247 if (rc)
248 goto unreg_dev;
249 }
250
251
252 list_for_each_entry(tmp, &c->devices, list)
253 if ((tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
254 && (tmp != i2o_dev)) {
255 rc = sysfs_create_link(&tmp->device.kobj,
256 &i2o_dev->device.kobj, "user");
257 if (rc)
258 goto rmlink1;
259 }
260
261
262 tmp = i2o_iop_find_device(i2o_dev->iop, i2o_dev->lct_data.parent_tid);
263 if (tmp && (tmp != i2o_dev)) {
264 rc = sysfs_create_link(&i2o_dev->device.kobj,
265 &tmp->device.kobj, "parent");
266 if (rc)
267 goto rmlink1;
268 }
269
270
271 list_for_each_entry(tmp, &c->devices, list)
272 if ((tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
273 && (tmp != i2o_dev)) {
274 rc = sysfs_create_link(&tmp->device.kobj,
275 &i2o_dev->device.kobj, "parent");
276 if (rc)
277 goto rmlink2;
278 }
279
280 i2o_driver_notify_device_add_all(i2o_dev);
281
282 pr_debug("i2o: device %s added\n", dev_name(&i2o_dev->device));
283
284 return 0;
285
286rmlink2:
287
288
289
290
291 list_for_each_entry(tmp, &c->devices, list) {
292 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
293 sysfs_remove_link(&tmp->device.kobj, "parent");
294 }
295 sysfs_remove_link(&i2o_dev->device.kobj, "parent");
296rmlink1:
297 list_for_each_entry(tmp, &c->devices, list)
298 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
299 sysfs_remove_link(&tmp->device.kobj, "user");
300 sysfs_remove_link(&i2o_dev->device.kobj, "user");
301unreg_dev:
302 list_del(&i2o_dev->list);
303 device_unregister(&i2o_dev->device);
304err:
305 kfree(i2o_dev);
306 return rc;
307}
308
309
310
311
312
313
314
315
316
317void i2o_device_remove(struct i2o_device *i2o_dev)
318{
319 struct i2o_device *tmp;
320 struct i2o_controller *c = i2o_dev->iop;
321
322 i2o_driver_notify_device_remove_all(i2o_dev);
323
324 sysfs_remove_link(&i2o_dev->device.kobj, "parent");
325 sysfs_remove_link(&i2o_dev->device.kobj, "user");
326
327 list_for_each_entry(tmp, &c->devices, list) {
328 if (tmp->lct_data.parent_tid == i2o_dev->lct_data.tid)
329 sysfs_remove_link(&tmp->device.kobj, "parent");
330 if (tmp->lct_data.user_tid == i2o_dev->lct_data.tid)
331 sysfs_remove_link(&tmp->device.kobj, "user");
332 }
333 list_del(&i2o_dev->list);
334
335 device_unregister(&i2o_dev->device);
336}
337
338
339
340
341
342
343
344
345
346
347
348int i2o_device_parse_lct(struct i2o_controller *c)
349{
350 struct i2o_device *dev, *tmp;
351 i2o_lct *lct;
352 u32 *dlct = c->dlct.virt;
353 int max = 0, i = 0;
354 u16 table_size;
355 u32 buf;
356
357 mutex_lock(&c->lct_lock);
358
359 kfree(c->lct);
360
361 buf = le32_to_cpu(*dlct++);
362 table_size = buf & 0xffff;
363
364 lct = c->lct = kmalloc(table_size * 4, GFP_KERNEL);
365 if (!lct) {
366 mutex_unlock(&c->lct_lock);
367 return -ENOMEM;
368 }
369
370 lct->lct_ver = buf >> 28;
371 lct->boot_tid = buf >> 16 & 0xfff;
372 lct->table_size = table_size;
373 lct->change_ind = le32_to_cpu(*dlct++);
374 lct->iop_flags = le32_to_cpu(*dlct++);
375
376 table_size -= 3;
377
378 pr_debug("%s: LCT has %d entries (LCT size: %d)\n", c->name, max,
379 lct->table_size);
380
381 while (table_size > 0) {
382 i2o_lct_entry *entry = &lct->lct_entry[max];
383 int found = 0;
384
385 buf = le32_to_cpu(*dlct++);
386 entry->entry_size = buf & 0xffff;
387 entry->tid = buf >> 16 & 0xfff;
388
389 entry->change_ind = le32_to_cpu(*dlct++);
390 entry->device_flags = le32_to_cpu(*dlct++);
391
392 buf = le32_to_cpu(*dlct++);
393 entry->class_id = buf & 0xfff;
394 entry->version = buf >> 12 & 0xf;
395 entry->vendor_id = buf >> 16;
396
397 entry->sub_class = le32_to_cpu(*dlct++);
398
399 buf = le32_to_cpu(*dlct++);
400 entry->user_tid = buf & 0xfff;
401 entry->parent_tid = buf >> 12 & 0xfff;
402 entry->bios_info = buf >> 24;
403
404 memcpy(&entry->identity_tag, dlct, 8);
405 dlct += 2;
406
407 entry->event_capabilities = le32_to_cpu(*dlct++);
408
409
410 list_for_each_entry_safe(dev, tmp, &c->devices, list) {
411 if (entry->tid == dev->lct_data.tid) {
412 found = 1;
413 break;
414 }
415 }
416
417 if (!found)
418 i2o_device_add(c, entry);
419
420 table_size -= 9;
421 max++;
422 }
423
424
425 list_for_each_entry_safe(dev, tmp, &c->devices, list) {
426 int found = 0;
427
428 for (i = 0; i < max; i++) {
429 if (lct->lct_entry[i].tid == dev->lct_data.tid) {
430 found = 1;
431 break;
432 }
433 }
434
435 if (!found)
436 i2o_device_remove(dev);
437 }
438
439 mutex_unlock(&c->lct_lock);
440
441 return 0;
442}
443
444
445
446
447
448
449
450
451
452
453
454
455
456int i2o_parm_issue(struct i2o_device *i2o_dev, int cmd, void *oplist,
457 int oplen, void *reslist, int reslen)
458{
459 struct i2o_message *msg;
460 int i = 0;
461 int rc;
462 struct i2o_dma res;
463 struct i2o_controller *c = i2o_dev->iop;
464 struct device *dev = &c->pdev->dev;
465
466 res.virt = NULL;
467
468 if (i2o_dma_alloc(dev, &res, reslen))
469 return -ENOMEM;
470
471 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
472 if (IS_ERR(msg)) {
473 i2o_dma_free(dev, &res);
474 return PTR_ERR(msg);
475 }
476
477 i = 0;
478 msg->u.head[1] =
479 cpu_to_le32(cmd << 24 | HOST_TID << 12 | i2o_dev->lct_data.tid);
480 msg->body[i++] = cpu_to_le32(0x00000000);
481 msg->body[i++] = cpu_to_le32(0x4C000000 | oplen);
482 memcpy(&msg->body[i], oplist, oplen);
483 i += (oplen / 4 + (oplen % 4 ? 1 : 0));
484 msg->body[i++] = cpu_to_le32(0xD0000000 | res.len);
485 msg->body[i++] = cpu_to_le32(res.phys);
486
487 msg->u.head[0] =
488 cpu_to_le32(I2O_MESSAGE_SIZE(i + sizeof(struct i2o_message) / 4) |
489 SGL_OFFSET_5);
490
491 rc = i2o_msg_post_wait_mem(c, msg, 10, &res);
492
493
494 if (rc == -ETIMEDOUT)
495 return rc;
496
497 memcpy(reslist, res.virt, res.len);
498 i2o_dma_free(dev, &res);
499
500 return rc;
501}
502
503
504
505
506int i2o_parm_field_get(struct i2o_device *i2o_dev, int group, int field,
507 void *buf, int buflen)
508{
509 u32 opblk[] = { cpu_to_le32(0x00000001),
510 cpu_to_le32((u16) group << 16 | I2O_PARAMS_FIELD_GET),
511 cpu_to_le32((s16) field << 16 | 0x00000001)
512 };
513 u8 *resblk;
514 int rc;
515
516 resblk = kmalloc(buflen + 8, GFP_KERNEL);
517 if (!resblk)
518 return -ENOMEM;
519
520 rc = i2o_parm_issue(i2o_dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
521 sizeof(opblk), resblk, buflen + 8);
522
523 memcpy(buf, resblk + 8, buflen);
524
525 kfree(resblk);
526
527 return rc;
528}
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546int i2o_parm_table_get(struct i2o_device *dev, int oper, int group,
547 int fieldcount, void *ibuf, int ibuflen, void *resblk,
548 int reslen)
549{
550 u16 *opblk;
551 int size;
552
553 size = 10 + ibuflen;
554 if (size % 4)
555 size += 4 - size % 4;
556
557 opblk = kmalloc(size, GFP_KERNEL);
558 if (opblk == NULL) {
559 printk(KERN_ERR "i2o: no memory for query buffer.\n");
560 return -ENOMEM;
561 }
562
563 opblk[0] = 1;
564 opblk[1] = 0;
565 opblk[2] = oper;
566 opblk[3] = group;
567 opblk[4] = fieldcount;
568 memcpy(opblk + 5, ibuf, ibuflen);
569
570 size = i2o_parm_issue(dev, I2O_CMD_UTIL_PARAMS_GET, opblk,
571 size, resblk, reslen);
572
573 kfree(opblk);
574 if (size > reslen)
575 return reslen;
576
577 return size;
578}
579
580EXPORT_SYMBOL(i2o_device_claim);
581EXPORT_SYMBOL(i2o_device_claim_release);
582EXPORT_SYMBOL(i2o_parm_field_get);
583EXPORT_SYMBOL(i2o_parm_table_get);
584EXPORT_SYMBOL(i2o_parm_issue);
585