1
2
3
4
5
6
7
8
9
10#define KMSG_COMPONENT "dasd-eckd"
11
12#include <linux/init.h>
13#include <linux/fs.h>
14#include <linux/kernel.h>
15#include <linux/miscdevice.h>
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/device.h>
19#include <linux/poll.h>
20#include <linux/mutex.h>
21#include <linux/err.h>
22#include <linux/slab.h>
23
24#include <linux/uaccess.h>
25#include <linux/atomic.h>
26#include <asm/ebcdic.h>
27
28#include "dasd_int.h"
29#include "dasd_eckd.h"
30
31#ifdef PRINTK_HEADER
32#undef PRINTK_HEADER
33#endif
34#define PRINTK_HEADER "dasd(eer):"
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83static int eer_pages = 5;
84module_param(eer_pages, int, S_IRUGO|S_IWUSR);
85
86struct eerbuffer {
87 struct list_head list;
88 char **buffer;
89 int buffersize;
90 int buffer_page_count;
91 int head;
92 int tail;
93 int residual;
94};
95
96static LIST_HEAD(bufferlist);
97static DEFINE_SPINLOCK(bufferlock);
98static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
99
100
101
102
103
104static int dasd_eer_get_free_bytes(struct eerbuffer *eerb)
105{
106 if (eerb->head < eerb->tail)
107 return eerb->tail - eerb->head - 1;
108 return eerb->buffersize - eerb->head + eerb->tail -1;
109}
110
111
112
113
114
115static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
116{
117
118 if (eerb->head >= eerb->tail)
119 return eerb->head - eerb->tail;
120 return eerb->buffersize - eerb->tail + eerb->head;
121}
122
123
124
125
126
127
128
129static void dasd_eer_write_buffer(struct eerbuffer *eerb,
130 char *data, int count)
131{
132
133 unsigned long headindex,localhead;
134 unsigned long rest, len;
135 char *nextdata;
136
137 nextdata = data;
138 rest = count;
139 while (rest > 0) {
140 headindex = eerb->head / PAGE_SIZE;
141 localhead = eerb->head % PAGE_SIZE;
142 len = min(rest, PAGE_SIZE - localhead);
143 memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
144 nextdata += len;
145 rest -= len;
146 eerb->head += len;
147 if (eerb->head == eerb->buffersize)
148 eerb->head = 0;
149 BUG_ON(eerb->head > eerb->buffersize);
150 }
151}
152
153
154
155
156static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count)
157{
158
159 unsigned long tailindex,localtail;
160 unsigned long rest, len, finalcount;
161 char *nextdata;
162
163 finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
164 nextdata = data;
165 rest = finalcount;
166 while (rest > 0) {
167 tailindex = eerb->tail / PAGE_SIZE;
168 localtail = eerb->tail % PAGE_SIZE;
169 len = min(rest, PAGE_SIZE - localtail);
170 memcpy(nextdata, eerb->buffer[tailindex] + localtail, len);
171 nextdata += len;
172 rest -= len;
173 eerb->tail += len;
174 if (eerb->tail == eerb->buffersize)
175 eerb->tail = 0;
176 BUG_ON(eerb->tail > eerb->buffersize);
177 }
178 return finalcount;
179}
180
181
182
183
184
185
186
187
188static int dasd_eer_start_record(struct eerbuffer *eerb, int count)
189{
190 int tailcount;
191
192 if (count + sizeof(count) > eerb->buffersize)
193 return -ENOMEM;
194 while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
195 if (eerb->residual > 0) {
196 eerb->tail += eerb->residual;
197 if (eerb->tail >= eerb->buffersize)
198 eerb->tail -= eerb->buffersize;
199 eerb->residual = -1;
200 }
201 dasd_eer_read_buffer(eerb, (char *) &tailcount,
202 sizeof(tailcount));
203 eerb->tail += tailcount;
204 if (eerb->tail >= eerb->buffersize)
205 eerb->tail -= eerb->buffersize;
206 }
207 dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count));
208
209 return 0;
210};
211
212
213
214
215static void dasd_eer_free_buffer_pages(char **buf, int no_pages)
216{
217 int i;
218
219 for (i = 0; i < no_pages; i++)
220 free_page((unsigned long) buf[i]);
221}
222
223
224
225
226static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
227{
228 int i;
229
230 for (i = 0; i < no_pages; i++) {
231 buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
232 if (!buf[i]) {
233 dasd_eer_free_buffer_pages(buf, i);
234 return -ENOMEM;
235 }
236 }
237 return 0;
238}
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275#define SNSS_DATA_SIZE 44
276
277#define DASD_EER_BUSID_SIZE 10
278struct dasd_eer_header {
279 __u32 total_size;
280 __u32 trigger;
281 __u64 tv_sec;
282 __u64 tv_usec;
283 char busid[DASD_EER_BUSID_SIZE];
284} __attribute__ ((packed));
285
286
287
288
289
290
291
292
293static void dasd_eer_write_standard_trigger(struct dasd_device *device,
294 struct dasd_ccw_req *cqr,
295 int trigger)
296{
297 struct dasd_ccw_req *temp_cqr;
298 int data_size;
299 struct timespec64 ts;
300 struct dasd_eer_header header;
301 unsigned long flags;
302 struct eerbuffer *eerb;
303 char *sense;
304
305
306 data_size = 0;
307 for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
308 if (dasd_get_sense(&temp_cqr->irb))
309 data_size += 32;
310
311 header.total_size = sizeof(header) + data_size + 4;
312 header.trigger = trigger;
313 ktime_get_real_ts64(&ts);
314 header.tv_sec = ts.tv_sec;
315 header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
316 strlcpy(header.busid, dev_name(&device->cdev->dev),
317 DASD_EER_BUSID_SIZE);
318
319 spin_lock_irqsave(&bufferlock, flags);
320 list_for_each_entry(eerb, &bufferlist, list) {
321 dasd_eer_start_record(eerb, header.total_size);
322 dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
323 for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) {
324 sense = dasd_get_sense(&temp_cqr->irb);
325 if (sense)
326 dasd_eer_write_buffer(eerb, sense, 32);
327 }
328 dasd_eer_write_buffer(eerb, "EOR", 4);
329 }
330 spin_unlock_irqrestore(&bufferlock, flags);
331 wake_up_interruptible(&dasd_eer_read_wait_queue);
332}
333
334
335
336
337static void dasd_eer_write_snss_trigger(struct dasd_device *device,
338 struct dasd_ccw_req *cqr,
339 int trigger)
340{
341 int data_size;
342 int snss_rc;
343 struct timespec64 ts;
344 struct dasd_eer_header header;
345 unsigned long flags;
346 struct eerbuffer *eerb;
347
348 snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
349 if (snss_rc)
350 data_size = 0;
351 else
352 data_size = SNSS_DATA_SIZE;
353
354 header.total_size = sizeof(header) + data_size + 4;
355 header.trigger = DASD_EER_STATECHANGE;
356 ktime_get_real_ts64(&ts);
357 header.tv_sec = ts.tv_sec;
358 header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
359 strlcpy(header.busid, dev_name(&device->cdev->dev),
360 DASD_EER_BUSID_SIZE);
361
362 spin_lock_irqsave(&bufferlock, flags);
363 list_for_each_entry(eerb, &bufferlist, list) {
364 dasd_eer_start_record(eerb, header.total_size);
365 dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header));
366 if (!snss_rc)
367 dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE);
368 dasd_eer_write_buffer(eerb, "EOR", 4);
369 }
370 spin_unlock_irqrestore(&bufferlock, flags);
371 wake_up_interruptible(&dasd_eer_read_wait_queue);
372}
373
374
375
376
377
378void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
379 unsigned int id)
380{
381 if (!device->eer_cqr)
382 return;
383 switch (id) {
384 case DASD_EER_FATALERROR:
385 case DASD_EER_PPRCSUSPEND:
386 dasd_eer_write_standard_trigger(device, cqr, id);
387 break;
388 case DASD_EER_NOPATH:
389 dasd_eer_write_standard_trigger(device, NULL, id);
390 break;
391 case DASD_EER_STATECHANGE:
392 dasd_eer_write_snss_trigger(device, cqr, id);
393 break;
394 default:
395 dasd_eer_write_standard_trigger(device, NULL, id);
396 break;
397 }
398}
399EXPORT_SYMBOL(dasd_eer_write);
400
401
402
403
404
405void dasd_eer_snss(struct dasd_device *device)
406{
407 struct dasd_ccw_req *cqr;
408
409 cqr = device->eer_cqr;
410 if (!cqr)
411 return;
412 if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) {
413
414 set_bit(DASD_FLAG_EER_SNSS, &device->flags);
415 return;
416 }
417
418 clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
419 cqr->status = DASD_CQR_QUEUED;
420 list_add(&cqr->devlist, &device->ccw_queue);
421 dasd_schedule_device_bh(device);
422}
423
424
425
426
427static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
428{
429 struct dasd_device *device = cqr->startdev;
430 unsigned long flags;
431
432 dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
433 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
434 if (device->eer_cqr == cqr) {
435 clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
436 if (test_bit(DASD_FLAG_EER_SNSS, &device->flags))
437
438 dasd_eer_snss(device);
439 cqr = NULL;
440 }
441 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
442 if (cqr)
443
444
445
446
447
448
449
450 dasd_sfree_request(cqr, device);
451}
452
453
454
455
456int dasd_eer_enable(struct dasd_device *device)
457{
458 struct dasd_ccw_req *cqr = NULL;
459 unsigned long flags;
460 struct ccw1 *ccw;
461 int rc = 0;
462
463 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
464 if (device->eer_cqr)
465 goto out;
466 else if (!device->discipline ||
467 strcmp(device->discipline->name, "ECKD"))
468 rc = -EMEDIUMTYPE;
469 else if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
470 rc = -EBUSY;
471
472 if (rc)
473 goto out;
474
475 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 ,
476 SNSS_DATA_SIZE, device, NULL);
477 if (IS_ERR(cqr)) {
478 rc = -ENOMEM;
479 cqr = NULL;
480 goto out;
481 }
482
483 cqr->startdev = device;
484 cqr->retries = 255;
485 cqr->expires = 10 * HZ;
486 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
487 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
488
489 ccw = cqr->cpaddr;
490 ccw->cmd_code = DASD_ECKD_CCW_SNSS;
491 ccw->count = SNSS_DATA_SIZE;
492 ccw->flags = 0;
493 ccw->cda = (__u32)(addr_t) cqr->data;
494
495 cqr->buildclk = get_tod_clock();
496 cqr->status = DASD_CQR_FILLED;
497 cqr->callback = dasd_eer_snss_cb;
498
499 if (!device->eer_cqr) {
500 device->eer_cqr = cqr;
501 cqr = NULL;
502 }
503
504out:
505 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
506
507 if (cqr)
508 dasd_sfree_request(cqr, device);
509
510 return rc;
511}
512
513
514
515
516void dasd_eer_disable(struct dasd_device *device)
517{
518 struct dasd_ccw_req *cqr;
519 unsigned long flags;
520 int in_use;
521
522 if (!device->eer_cqr)
523 return;
524 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
525 cqr = device->eer_cqr;
526 device->eer_cqr = NULL;
527 clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
528 in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
529 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
530 if (cqr && !in_use)
531 dasd_sfree_request(cqr, device);
532}
533
534
535
536
537
538
539
540
541
542
543static char readbuffer[PAGE_SIZE];
544static DEFINE_MUTEX(readbuffer_mutex);
545
546static int dasd_eer_open(struct inode *inp, struct file *filp)
547{
548 struct eerbuffer *eerb;
549 unsigned long flags;
550
551 eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
552 if (!eerb)
553 return -ENOMEM;
554 eerb->buffer_page_count = eer_pages;
555 if (eerb->buffer_page_count < 1 ||
556 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
557 kfree(eerb);
558 DBF_EVENT(DBF_WARNING, "can't open device since module "
559 "parameter eer_pages is smaller than 1 or"
560 " bigger than %d", (int)(INT_MAX / PAGE_SIZE));
561 return -EINVAL;
562 }
563 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
564 eerb->buffer = kmalloc_array(eerb->buffer_page_count, sizeof(char *),
565 GFP_KERNEL);
566 if (!eerb->buffer) {
567 kfree(eerb);
568 return -ENOMEM;
569 }
570 if (dasd_eer_allocate_buffer_pages(eerb->buffer,
571 eerb->buffer_page_count)) {
572 kfree(eerb->buffer);
573 kfree(eerb);
574 return -ENOMEM;
575 }
576 filp->private_data = eerb;
577 spin_lock_irqsave(&bufferlock, flags);
578 list_add(&eerb->list, &bufferlist);
579 spin_unlock_irqrestore(&bufferlock, flags);
580
581 return nonseekable_open(inp,filp);
582}
583
584static int dasd_eer_close(struct inode *inp, struct file *filp)
585{
586 struct eerbuffer *eerb;
587 unsigned long flags;
588
589 eerb = (struct eerbuffer *) filp->private_data;
590 spin_lock_irqsave(&bufferlock, flags);
591 list_del(&eerb->list);
592 spin_unlock_irqrestore(&bufferlock, flags);
593 dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
594 kfree(eerb->buffer);
595 kfree(eerb);
596
597 return 0;
598}
599
600static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
601 size_t count, loff_t *ppos)
602{
603 int tc,rc;
604 int tailcount,effective_count;
605 unsigned long flags;
606 struct eerbuffer *eerb;
607
608 eerb = (struct eerbuffer *) filp->private_data;
609 if (mutex_lock_interruptible(&readbuffer_mutex))
610 return -ERESTARTSYS;
611
612 spin_lock_irqsave(&bufferlock, flags);
613
614 if (eerb->residual < 0) {
615
616 eerb->residual = 0;
617 spin_unlock_irqrestore(&bufferlock, flags);
618 mutex_unlock(&readbuffer_mutex);
619 return -EIO;
620 } else if (eerb->residual > 0) {
621
622 effective_count = min(eerb->residual, (int) count);
623 eerb->residual -= effective_count;
624 } else {
625 tc = 0;
626 while (!tc) {
627 tc = dasd_eer_read_buffer(eerb, (char *) &tailcount,
628 sizeof(tailcount));
629 if (!tc) {
630
631 spin_unlock_irqrestore(&bufferlock, flags);
632 mutex_unlock(&readbuffer_mutex);
633 if (filp->f_flags & O_NONBLOCK)
634 return -EAGAIN;
635 rc = wait_event_interruptible(
636 dasd_eer_read_wait_queue,
637 eerb->head != eerb->tail);
638 if (rc)
639 return rc;
640 if (mutex_lock_interruptible(&readbuffer_mutex))
641 return -ERESTARTSYS;
642 spin_lock_irqsave(&bufferlock, flags);
643 }
644 }
645 WARN_ON(tc != sizeof(tailcount));
646 effective_count = min(tailcount,(int)count);
647 eerb->residual = tailcount - effective_count;
648 }
649
650 tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count);
651 WARN_ON(tc != effective_count);
652
653 spin_unlock_irqrestore(&bufferlock, flags);
654
655 if (copy_to_user(buf, readbuffer, effective_count)) {
656 mutex_unlock(&readbuffer_mutex);
657 return -EFAULT;
658 }
659
660 mutex_unlock(&readbuffer_mutex);
661 return effective_count;
662}
663
664static __poll_t dasd_eer_poll(struct file *filp, poll_table *ptable)
665{
666 __poll_t mask;
667 unsigned long flags;
668 struct eerbuffer *eerb;
669
670 eerb = (struct eerbuffer *) filp->private_data;
671 poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
672 spin_lock_irqsave(&bufferlock, flags);
673 if (eerb->head != eerb->tail)
674 mask = EPOLLIN | EPOLLRDNORM ;
675 else
676 mask = 0;
677 spin_unlock_irqrestore(&bufferlock, flags);
678 return mask;
679}
680
681static const struct file_operations dasd_eer_fops = {
682 .open = &dasd_eer_open,
683 .release = &dasd_eer_close,
684 .read = &dasd_eer_read,
685 .poll = &dasd_eer_poll,
686 .owner = THIS_MODULE,
687 .llseek = noop_llseek,
688};
689
690static struct miscdevice *dasd_eer_dev = NULL;
691
692int __init dasd_eer_init(void)
693{
694 int rc;
695
696 dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
697 if (!dasd_eer_dev)
698 return -ENOMEM;
699
700 dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
701 dasd_eer_dev->name = "dasd_eer";
702 dasd_eer_dev->fops = &dasd_eer_fops;
703
704 rc = misc_register(dasd_eer_dev);
705 if (rc) {
706 kfree(dasd_eer_dev);
707 dasd_eer_dev = NULL;
708 DBF_EVENT(DBF_ERR, "%s", "dasd_eer_init could not "
709 "register misc device");
710 return rc;
711 }
712
713 return 0;
714}
715
716void dasd_eer_exit(void)
717{
718 if (dasd_eer_dev) {
719 misc_deregister(dasd_eer_dev);
720 kfree(dasd_eer_dev);
721 dasd_eer_dev = NULL;
722 }
723}
724