1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30#include <linux/module.h>
31#include <linux/i2o.h>
32#include <linux/delay.h>
33#include <linux/workqueue.h>
34#include <linux/string.h>
35#include <linux/slab.h>
36#include <linux/sched.h>
37#include <asm/param.h>
38#include "core.h"
39
40#define OSM_NAME "exec-osm"
41
42struct i2o_driver i2o_exec_driver;
43
44
45static LIST_HEAD(i2o_exec_wait_list);
46
47
48struct i2o_exec_wait {
49 wait_queue_head_t *wq;
50 struct i2o_dma dma;
51 u32 tcntxt;
52 int complete;
53 u32 m;
54 struct i2o_message *msg;
55 struct list_head list;
56 spinlock_t lock;
57};
58
59
60struct i2o_exec_lct_notify_work {
61 struct work_struct work;
62 struct i2o_controller *c;
63
64};
65
66
67static struct i2o_class_id i2o_exec_class_id[] = {
68 {I2O_CLASS_EXECUTIVE},
69 {I2O_CLASS_END}
70};
71
72
73
74
75
76
77
78
79
80static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
81{
82 struct i2o_exec_wait *wait;
83
84 wait = kzalloc(sizeof(*wait), GFP_KERNEL);
85 if (!wait)
86 return NULL;
87
88 INIT_LIST_HEAD(&wait->list);
89 spin_lock_init(&wait->lock);
90
91 return wait;
92};
93
94
95
96
97
98static void i2o_exec_wait_free(struct i2o_exec_wait *wait)
99{
100 kfree(wait);
101};
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
123 unsigned long timeout, struct i2o_dma *dma)
124{
125 DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq);
126 struct i2o_exec_wait *wait;
127 static u32 tcntxt = 0x80000000;
128 unsigned long flags;
129 int rc = 0;
130
131 wait = i2o_exec_wait_alloc();
132 if (!wait) {
133 i2o_msg_nop(c, msg);
134 return -ENOMEM;
135 }
136
137 if (tcntxt == 0xffffffff)
138 tcntxt = 0x80000000;
139
140 if (dma)
141 wait->dma = *dma;
142
143
144
145
146
147
148 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
149 wait->tcntxt = tcntxt++;
150 msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
151
152 wait->wq = &wq;
153
154
155
156
157 list_add(&wait->list, &i2o_exec_wait_list);
158
159
160
161
162
163 i2o_msg_post(c, msg);
164
165 wait_event_interruptible_timeout(wq, wait->complete, timeout * HZ);
166
167 spin_lock_irqsave(&wait->lock, flags);
168
169 wait->wq = NULL;
170
171 if (wait->complete)
172 rc = le32_to_cpu(wait->msg->body[0]) >> 24;
173 else {
174
175
176
177
178
179
180
181 if (dma)
182 dma->virt = NULL;
183
184 rc = -ETIMEDOUT;
185 }
186
187 spin_unlock_irqrestore(&wait->lock, flags);
188
189 if (rc != -ETIMEDOUT) {
190 i2o_flush_reply(c, wait->m);
191 i2o_exec_wait_free(wait);
192 }
193
194 return rc;
195};
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
217 struct i2o_message *msg, u32 context)
218{
219 struct i2o_exec_wait *wait, *tmp;
220 unsigned long flags;
221 int rc = 1;
222
223
224
225
226
227
228
229
230 list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) {
231 if (wait->tcntxt == context) {
232 spin_lock_irqsave(&wait->lock, flags);
233
234 list_del(&wait->list);
235
236 wait->m = m;
237 wait->msg = msg;
238 wait->complete = 1;
239
240 if (wait->wq)
241 rc = 0;
242 else
243 rc = -1;
244
245 spin_unlock_irqrestore(&wait->lock, flags);
246
247 if (rc) {
248 struct device *dev;
249
250 dev = &c->pdev->dev;
251
252 pr_debug("%s: timedout reply received!\n",
253 c->name);
254 i2o_dma_free(dev, &wait->dma);
255 i2o_exec_wait_free(wait);
256 } else
257 wake_up_interruptible(wait->wq);
258
259 return rc;
260 }
261 }
262
263 osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name,
264 context);
265
266 return -1;
267};
268
269
270
271
272
273
274
275
276
277static ssize_t i2o_exec_show_vendor_id(struct device *d,
278 struct device_attribute *attr, char *buf)
279{
280 struct i2o_device *dev = to_i2o_device(d);
281 u16 id;
282
283 if (!i2o_parm_field_get(dev, 0x0000, 0, &id, 2)) {
284 sprintf(buf, "0x%04x", le16_to_cpu(id));
285 return strlen(buf) + 1;
286 }
287
288 return 0;
289};
290
291
292
293
294
295
296
297
298
299static ssize_t i2o_exec_show_product_id(struct device *d,
300 struct device_attribute *attr,
301 char *buf)
302{
303 struct i2o_device *dev = to_i2o_device(d);
304 u16 id;
305
306 if (!i2o_parm_field_get(dev, 0x0000, 1, &id, 2)) {
307 sprintf(buf, "0x%04x", le16_to_cpu(id));
308 return strlen(buf) + 1;
309 }
310
311 return 0;
312};
313
314
315static DEVICE_ATTR(vendor_id, S_IRUGO, i2o_exec_show_vendor_id, NULL);
316static DEVICE_ATTR(product_id, S_IRUGO, i2o_exec_show_product_id, NULL);
317
318
319
320
321
322
323
324
325
326
327static int i2o_exec_probe(struct device *dev)
328{
329 struct i2o_device *i2o_dev = to_i2o_device(dev);
330 int rc;
331
332 rc = i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff);
333 if (rc) goto err_out;
334
335 rc = device_create_file(dev, &dev_attr_vendor_id);
336 if (rc) goto err_evtreg;
337 rc = device_create_file(dev, &dev_attr_product_id);
338 if (rc) goto err_vid;
339
340 i2o_dev->iop->exec = i2o_dev;
341
342 return 0;
343
344err_vid:
345 device_remove_file(dev, &dev_attr_vendor_id);
346err_evtreg:
347 i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0);
348err_out:
349 return rc;
350};
351
352
353
354
355
356
357
358
359
360static int i2o_exec_remove(struct device *dev)
361{
362 device_remove_file(dev, &dev_attr_product_id);
363 device_remove_file(dev, &dev_attr_vendor_id);
364
365 i2o_event_register(to_i2o_device(dev), &i2o_exec_driver, 0, 0);
366
367 return 0;
368};
369
370#ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES
371
372
373
374
375
376
377
378
379
380
381static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
382{
383 i2o_status_block *sb = c->status_block.virt;
384 struct device *dev;
385 struct i2o_message *msg;
386
387 mutex_lock(&c->lct_lock);
388
389 dev = &c->pdev->dev;
390
391 if (i2o_dma_realloc(dev, &c->dlct,
392 le32_to_cpu(sb->expected_lct_size))) {
393 mutex_unlock(&c->lct_lock);
394 return -ENOMEM;
395 }
396
397 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
398 if (IS_ERR(msg)) {
399 mutex_unlock(&c->lct_lock);
400 return PTR_ERR(msg);
401 }
402
403 msg->u.head[0] = cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
404 msg->u.head[1] = cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
405 ADAPTER_TID);
406 msg->u.s.icntxt = cpu_to_le32(i2o_exec_driver.context);
407 msg->u.s.tcntxt = cpu_to_le32(0x00000000);
408 msg->body[0] = cpu_to_le32(0xffffffff);
409 msg->body[1] = cpu_to_le32(change_ind);
410 msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
411 msg->body[3] = cpu_to_le32(c->dlct.phys);
412
413 i2o_msg_post(c, msg);
414
415 mutex_unlock(&c->lct_lock);
416
417 return 0;
418}
419#endif
420
421
422
423
424
425
426
427
428
429static void i2o_exec_lct_modified(struct work_struct *_work)
430{
431 struct i2o_exec_lct_notify_work *work =
432 container_of(_work, struct i2o_exec_lct_notify_work, work);
433 u32 change_ind = 0;
434 struct i2o_controller *c = work->c;
435
436 kfree(work);
437
438 if (i2o_device_parse_lct(c) != -EAGAIN)
439 change_ind = c->lct->change_ind + 1;
440
441#ifdef CONFIG_I2O_LCT_NOTIFY_ON_CHANGES
442 i2o_exec_lct_notify(c, change_ind);
443#endif
444};
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460static int i2o_exec_reply(struct i2o_controller *c, u32 m,
461 struct i2o_message *msg)
462{
463 u32 context;
464
465 if (le32_to_cpu(msg->u.head[0]) & MSG_FAIL) {
466 struct i2o_message __iomem *pmsg;
467 u32 pm;
468
469
470
471
472
473
474 pm = le32_to_cpu(msg->body[3]);
475 pmsg = i2o_msg_in_to_virt(c, pm);
476 context = readl(&pmsg->u.s.tcntxt);
477
478 i2o_report_status(KERN_INFO, "i2o_core", msg);
479
480
481 i2o_msg_nop_mfa(c, pm);
482 } else
483 context = le32_to_cpu(msg->u.s.tcntxt);
484
485 if (context & 0x80000000)
486 return i2o_msg_post_wait_complete(c, m, msg, context);
487
488 if ((le32_to_cpu(msg->u.head[1]) >> 24) == I2O_CMD_LCT_NOTIFY) {
489 struct i2o_exec_lct_notify_work *work;
490
491 pr_debug("%s: LCT notify received\n", c->name);
492
493 work = kmalloc(sizeof(*work), GFP_ATOMIC);
494 if (!work)
495 return -ENOMEM;
496
497 work->c = c;
498
499 INIT_WORK(&work->work, i2o_exec_lct_modified);
500 queue_work(i2o_exec_driver.event_queue, &work->work);
501 return 1;
502 }
503
504
505
506
507
508
509
510 printk(KERN_WARNING "%s: Unsolicited message reply sent to core!"
511 "Message dumped to syslog\n", c->name);
512 i2o_dump_message(msg);
513
514 return -EFAULT;
515}
516
517
518
519
520
521
522
523
524static void i2o_exec_event(struct work_struct *work)
525{
526 struct i2o_event *evt = container_of(work, struct i2o_event, work);
527
528 if (likely(evt->i2o_dev))
529 osm_debug("Event received from device: %d\n",
530 evt->i2o_dev->lct_data.tid);
531 kfree(evt);
532};
533
534
535
536
537
538
539
540
541
542
543
544int i2o_exec_lct_get(struct i2o_controller *c)
545{
546 struct i2o_message *msg;
547 int i = 0;
548 int rc = -EAGAIN;
549
550 for (i = 1; i <= I2O_LCT_GET_TRIES; i++) {
551 msg = i2o_msg_get_wait(c, I2O_TIMEOUT_MESSAGE_GET);
552 if (IS_ERR(msg))
553 return PTR_ERR(msg);
554
555 msg->u.head[0] =
556 cpu_to_le32(EIGHT_WORD_MSG_SIZE | SGL_OFFSET_6);
557 msg->u.head[1] =
558 cpu_to_le32(I2O_CMD_LCT_NOTIFY << 24 | HOST_TID << 12 |
559 ADAPTER_TID);
560 msg->body[0] = cpu_to_le32(0xffffffff);
561 msg->body[1] = cpu_to_le32(0x00000000);
562 msg->body[2] = cpu_to_le32(0xd0000000 | c->dlct.len);
563 msg->body[3] = cpu_to_le32(c->dlct.phys);
564
565 rc = i2o_msg_post_wait(c, msg, I2O_TIMEOUT_LCT_GET);
566 if (rc < 0)
567 break;
568
569 rc = i2o_device_parse_lct(c);
570 if (rc != -EAGAIN)
571 break;
572 }
573
574 return rc;
575}
576
577
578struct i2o_driver i2o_exec_driver = {
579 .name = OSM_NAME,
580 .reply = i2o_exec_reply,
581 .event = i2o_exec_event,
582 .classes = i2o_exec_class_id,
583 .driver = {
584 .probe = i2o_exec_probe,
585 .remove = i2o_exec_remove,
586 },
587};
588
589
590
591
592
593
594
595
596int __init i2o_exec_init(void)
597{
598 return i2o_driver_register(&i2o_exec_driver);
599};
600
601
602
603
604
605
606void i2o_exec_exit(void)
607{
608 i2o_driver_unregister(&i2o_exec_driver);
609};
610
611EXPORT_SYMBOL(i2o_msg_post_wait_mem);
612EXPORT_SYMBOL(i2o_exec_lct_get);
613