1
2
3
4
5
6
7
8
9
10#define KMSG_COMPONENT "dasd-eckd"
11
12#include <linux/init.h>
13#include <linux/fs.h>
14#include <linux/kernel.h>
15#include <linux/miscdevice.h>
16#include <linux/module.h>
17#include <linux/moduleparam.h>
18#include <linux/device.h>
19#include <linux/poll.h>
20#include <linux/mutex.h>
21#include <linux/err.h>
22#include <linux/slab.h>
23
24#include <linux/uaccess.h>
25#include <linux/atomic.h>
26#include <asm/ebcdic.h>
27
28#include "dasd_int.h"
29#include "dasd_eckd.h"
30
31#ifdef PRINTK_HEADER
32#undef PRINTK_HEADER
33#endif
34#define PRINTK_HEADER "dasd(eer):"
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83static int eer_pages = 5;
84module_param(eer_pages, int, S_IRUGO|S_IWUSR);
85
86struct eerbuffer {
87 struct list_head list;
88 char **buffer;
89 int buffersize;
90 int buffer_page_count;
91 int head;
92 int tail;
93 int residual;
94};
95
96static LIST_HEAD(bufferlist);
97static DEFINE_SPINLOCK(bufferlock);
98static DECLARE_WAIT_QUEUE_HEAD(dasd_eer_read_wait_queue);
99
100
101
102
103
104static int dasd_eer_get_free_bytes(struct eerbuffer *eerb)
105{
106 if (eerb->head < eerb->tail)
107 return eerb->tail - eerb->head - 1;
108 return eerb->buffersize - eerb->head + eerb->tail -1;
109}
110
111
112
113
114
115static int dasd_eer_get_filled_bytes(struct eerbuffer *eerb)
116{
117
118 if (eerb->head >= eerb->tail)
119 return eerb->head - eerb->tail;
120 return eerb->buffersize - eerb->tail + eerb->head;
121}
122
123
124
125
126
127
128
129static void dasd_eer_write_buffer(struct eerbuffer *eerb,
130 char *data, int count)
131{
132
133 unsigned long headindex,localhead;
134 unsigned long rest, len;
135 char *nextdata;
136
137 nextdata = data;
138 rest = count;
139 while (rest > 0) {
140 headindex = eerb->head / PAGE_SIZE;
141 localhead = eerb->head % PAGE_SIZE;
142 len = min(rest, PAGE_SIZE - localhead);
143 memcpy(eerb->buffer[headindex]+localhead, nextdata, len);
144 nextdata += len;
145 rest -= len;
146 eerb->head += len;
147 if (eerb->head == eerb->buffersize)
148 eerb->head = 0;
149 BUG_ON(eerb->head > eerb->buffersize);
150 }
151}
152
153
154
155
156static int dasd_eer_read_buffer(struct eerbuffer *eerb, char *data, int count)
157{
158
159 unsigned long tailindex,localtail;
160 unsigned long rest, len, finalcount;
161 char *nextdata;
162
163 finalcount = min(count, dasd_eer_get_filled_bytes(eerb));
164 nextdata = data;
165 rest = finalcount;
166 while (rest > 0) {
167 tailindex = eerb->tail / PAGE_SIZE;
168 localtail = eerb->tail % PAGE_SIZE;
169 len = min(rest, PAGE_SIZE - localtail);
170 memcpy(nextdata, eerb->buffer[tailindex] + localtail, len);
171 nextdata += len;
172 rest -= len;
173 eerb->tail += len;
174 if (eerb->tail == eerb->buffersize)
175 eerb->tail = 0;
176 BUG_ON(eerb->tail > eerb->buffersize);
177 }
178 return finalcount;
179}
180
181
182
183
184
185
186
187
188static int dasd_eer_start_record(struct eerbuffer *eerb, int count)
189{
190 int tailcount;
191
192 if (count + sizeof(count) > eerb->buffersize)
193 return -ENOMEM;
194 while (dasd_eer_get_free_bytes(eerb) < count + sizeof(count)) {
195 if (eerb->residual > 0) {
196 eerb->tail += eerb->residual;
197 if (eerb->tail >= eerb->buffersize)
198 eerb->tail -= eerb->buffersize;
199 eerb->residual = -1;
200 }
201 dasd_eer_read_buffer(eerb, (char *) &tailcount,
202 sizeof(tailcount));
203 eerb->tail += tailcount;
204 if (eerb->tail >= eerb->buffersize)
205 eerb->tail -= eerb->buffersize;
206 }
207 dasd_eer_write_buffer(eerb, (char*) &count, sizeof(count));
208
209 return 0;
210};
211
212
213
214
215static void dasd_eer_free_buffer_pages(char **buf, int no_pages)
216{
217 int i;
218
219 for (i = 0; i < no_pages; i++)
220 free_page((unsigned long) buf[i]);
221}
222
223
224
225
226static int dasd_eer_allocate_buffer_pages(char **buf, int no_pages)
227{
228 int i;
229
230 for (i = 0; i < no_pages; i++) {
231 buf[i] = (char *) get_zeroed_page(GFP_KERNEL);
232 if (!buf[i]) {
233 dasd_eer_free_buffer_pages(buf, i);
234 return -ENOMEM;
235 }
236 }
237 return 0;
238}
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275#define SNSS_DATA_SIZE 44
276
277#define DASD_EER_BUSID_SIZE 10
278struct dasd_eer_header {
279 __u32 total_size;
280 __u32 trigger;
281 __u64 tv_sec;
282 __u64 tv_usec;
283 char busid[DASD_EER_BUSID_SIZE];
284} __attribute__ ((packed));
285
286
287
288
289
290
291
292
293static void dasd_eer_write_standard_trigger(struct dasd_device *device,
294 struct dasd_ccw_req *cqr,
295 int trigger)
296{
297 struct dasd_ccw_req *temp_cqr;
298 int data_size;
299 struct timespec64 ts;
300 struct dasd_eer_header header;
301 unsigned long flags;
302 struct eerbuffer *eerb;
303 char *sense;
304
305
306 data_size = 0;
307 for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers)
308 if (dasd_get_sense(&temp_cqr->irb))
309 data_size += 32;
310
311 header.total_size = sizeof(header) + data_size + 4;
312 header.trigger = trigger;
313 ktime_get_real_ts64(&ts);
314 header.tv_sec = ts.tv_sec;
315 header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
316 strlcpy(header.busid, dev_name(&device->cdev->dev),
317 DASD_EER_BUSID_SIZE);
318
319 spin_lock_irqsave(&bufferlock, flags);
320 list_for_each_entry(eerb, &bufferlist, list) {
321 dasd_eer_start_record(eerb, header.total_size);
322 dasd_eer_write_buffer(eerb, (char *) &header, sizeof(header));
323 for (temp_cqr = cqr; temp_cqr; temp_cqr = temp_cqr->refers) {
324 sense = dasd_get_sense(&temp_cqr->irb);
325 if (sense)
326 dasd_eer_write_buffer(eerb, sense, 32);
327 }
328 dasd_eer_write_buffer(eerb, "EOR", 4);
329 }
330 spin_unlock_irqrestore(&bufferlock, flags);
331 wake_up_interruptible(&dasd_eer_read_wait_queue);
332}
333
334
335
336
337static void dasd_eer_write_snss_trigger(struct dasd_device *device,
338 struct dasd_ccw_req *cqr,
339 int trigger)
340{
341 int data_size;
342 int snss_rc;
343 struct timespec64 ts;
344 struct dasd_eer_header header;
345 unsigned long flags;
346 struct eerbuffer *eerb;
347
348 snss_rc = (cqr->status == DASD_CQR_DONE) ? 0 : -EIO;
349 if (snss_rc)
350 data_size = 0;
351 else
352 data_size = SNSS_DATA_SIZE;
353
354 header.total_size = sizeof(header) + data_size + 4;
355 header.trigger = DASD_EER_STATECHANGE;
356 ktime_get_real_ts64(&ts);
357 header.tv_sec = ts.tv_sec;
358 header.tv_usec = ts.tv_nsec / NSEC_PER_USEC;
359 strlcpy(header.busid, dev_name(&device->cdev->dev),
360 DASD_EER_BUSID_SIZE);
361
362 spin_lock_irqsave(&bufferlock, flags);
363 list_for_each_entry(eerb, &bufferlist, list) {
364 dasd_eer_start_record(eerb, header.total_size);
365 dasd_eer_write_buffer(eerb, (char *) &header , sizeof(header));
366 if (!snss_rc)
367 dasd_eer_write_buffer(eerb, cqr->data, SNSS_DATA_SIZE);
368 dasd_eer_write_buffer(eerb, "EOR", 4);
369 }
370 spin_unlock_irqrestore(&bufferlock, flags);
371 wake_up_interruptible(&dasd_eer_read_wait_queue);
372}
373
374
375
376
377
378void dasd_eer_write(struct dasd_device *device, struct dasd_ccw_req *cqr,
379 unsigned int id)
380{
381 if (!device->eer_cqr)
382 return;
383 switch (id) {
384 case DASD_EER_FATALERROR:
385 case DASD_EER_PPRCSUSPEND:
386 dasd_eer_write_standard_trigger(device, cqr, id);
387 break;
388 case DASD_EER_NOPATH:
389 case DASD_EER_NOSPC:
390 dasd_eer_write_standard_trigger(device, NULL, id);
391 break;
392 case DASD_EER_STATECHANGE:
393 dasd_eer_write_snss_trigger(device, cqr, id);
394 break;
395 default:
396 dasd_eer_write_standard_trigger(device, NULL, id);
397 break;
398 }
399}
400EXPORT_SYMBOL(dasd_eer_write);
401
402
403
404
405
406void dasd_eer_snss(struct dasd_device *device)
407{
408 struct dasd_ccw_req *cqr;
409
410 cqr = device->eer_cqr;
411 if (!cqr)
412 return;
413 if (test_and_set_bit(DASD_FLAG_EER_IN_USE, &device->flags)) {
414
415 set_bit(DASD_FLAG_EER_SNSS, &device->flags);
416 return;
417 }
418
419 clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
420 cqr->status = DASD_CQR_QUEUED;
421 list_add(&cqr->devlist, &device->ccw_queue);
422 dasd_schedule_device_bh(device);
423}
424
425
426
427
428static void dasd_eer_snss_cb(struct dasd_ccw_req *cqr, void *data)
429{
430 struct dasd_device *device = cqr->startdev;
431 unsigned long flags;
432
433 dasd_eer_write(device, cqr, DASD_EER_STATECHANGE);
434 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
435 if (device->eer_cqr == cqr) {
436 clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
437 if (test_bit(DASD_FLAG_EER_SNSS, &device->flags))
438
439 dasd_eer_snss(device);
440 cqr = NULL;
441 }
442 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
443 if (cqr)
444
445
446
447
448
449
450
451 dasd_sfree_request(cqr, device);
452}
453
454
455
456
457int dasd_eer_enable(struct dasd_device *device)
458{
459 struct dasd_ccw_req *cqr = NULL;
460 unsigned long flags;
461 struct ccw1 *ccw;
462 int rc = 0;
463
464 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
465 if (device->eer_cqr)
466 goto out;
467 else if (!device->discipline ||
468 strcmp(device->discipline->name, "ECKD"))
469 rc = -EMEDIUMTYPE;
470 else if (test_bit(DASD_FLAG_OFFLINE, &device->flags))
471 rc = -EBUSY;
472
473 if (rc)
474 goto out;
475
476 cqr = dasd_smalloc_request(DASD_ECKD_MAGIC, 1 ,
477 SNSS_DATA_SIZE, device, NULL);
478 if (IS_ERR(cqr)) {
479 rc = -ENOMEM;
480 cqr = NULL;
481 goto out;
482 }
483
484 cqr->startdev = device;
485 cqr->retries = 255;
486 cqr->expires = 10 * HZ;
487 clear_bit(DASD_CQR_FLAGS_USE_ERP, &cqr->flags);
488 set_bit(DASD_CQR_ALLOW_SLOCK, &cqr->flags);
489
490 ccw = cqr->cpaddr;
491 ccw->cmd_code = DASD_ECKD_CCW_SNSS;
492 ccw->count = SNSS_DATA_SIZE;
493 ccw->flags = 0;
494 ccw->cda = (__u32)(addr_t) cqr->data;
495
496 cqr->buildclk = get_tod_clock();
497 cqr->status = DASD_CQR_FILLED;
498 cqr->callback = dasd_eer_snss_cb;
499
500 if (!device->eer_cqr) {
501 device->eer_cqr = cqr;
502 cqr = NULL;
503 }
504
505out:
506 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
507
508 if (cqr)
509 dasd_sfree_request(cqr, device);
510
511 return rc;
512}
513
514
515
516
517void dasd_eer_disable(struct dasd_device *device)
518{
519 struct dasd_ccw_req *cqr;
520 unsigned long flags;
521 int in_use;
522
523 if (!device->eer_cqr)
524 return;
525 spin_lock_irqsave(get_ccwdev_lock(device->cdev), flags);
526 cqr = device->eer_cqr;
527 device->eer_cqr = NULL;
528 clear_bit(DASD_FLAG_EER_SNSS, &device->flags);
529 in_use = test_and_clear_bit(DASD_FLAG_EER_IN_USE, &device->flags);
530 spin_unlock_irqrestore(get_ccwdev_lock(device->cdev), flags);
531 if (cqr && !in_use)
532 dasd_sfree_request(cqr, device);
533}
534
535
536
537
538
539
540
541
542
543
544static char readbuffer[PAGE_SIZE];
545static DEFINE_MUTEX(readbuffer_mutex);
546
547static int dasd_eer_open(struct inode *inp, struct file *filp)
548{
549 struct eerbuffer *eerb;
550 unsigned long flags;
551
552 eerb = kzalloc(sizeof(struct eerbuffer), GFP_KERNEL);
553 if (!eerb)
554 return -ENOMEM;
555 eerb->buffer_page_count = eer_pages;
556 if (eerb->buffer_page_count < 1 ||
557 eerb->buffer_page_count > INT_MAX / PAGE_SIZE) {
558 kfree(eerb);
559 DBF_EVENT(DBF_WARNING, "can't open device since module "
560 "parameter eer_pages is smaller than 1 or"
561 " bigger than %d", (int)(INT_MAX / PAGE_SIZE));
562 return -EINVAL;
563 }
564 eerb->buffersize = eerb->buffer_page_count * PAGE_SIZE;
565 eerb->buffer = kmalloc_array(eerb->buffer_page_count, sizeof(char *),
566 GFP_KERNEL);
567 if (!eerb->buffer) {
568 kfree(eerb);
569 return -ENOMEM;
570 }
571 if (dasd_eer_allocate_buffer_pages(eerb->buffer,
572 eerb->buffer_page_count)) {
573 kfree(eerb->buffer);
574 kfree(eerb);
575 return -ENOMEM;
576 }
577 filp->private_data = eerb;
578 spin_lock_irqsave(&bufferlock, flags);
579 list_add(&eerb->list, &bufferlist);
580 spin_unlock_irqrestore(&bufferlock, flags);
581
582 return nonseekable_open(inp,filp);
583}
584
585static int dasd_eer_close(struct inode *inp, struct file *filp)
586{
587 struct eerbuffer *eerb;
588 unsigned long flags;
589
590 eerb = (struct eerbuffer *) filp->private_data;
591 spin_lock_irqsave(&bufferlock, flags);
592 list_del(&eerb->list);
593 spin_unlock_irqrestore(&bufferlock, flags);
594 dasd_eer_free_buffer_pages(eerb->buffer, eerb->buffer_page_count);
595 kfree(eerb->buffer);
596 kfree(eerb);
597
598 return 0;
599}
600
601static ssize_t dasd_eer_read(struct file *filp, char __user *buf,
602 size_t count, loff_t *ppos)
603{
604 int tc,rc;
605 int tailcount,effective_count;
606 unsigned long flags;
607 struct eerbuffer *eerb;
608
609 eerb = (struct eerbuffer *) filp->private_data;
610 if (mutex_lock_interruptible(&readbuffer_mutex))
611 return -ERESTARTSYS;
612
613 spin_lock_irqsave(&bufferlock, flags);
614
615 if (eerb->residual < 0) {
616
617 eerb->residual = 0;
618 spin_unlock_irqrestore(&bufferlock, flags);
619 mutex_unlock(&readbuffer_mutex);
620 return -EIO;
621 } else if (eerb->residual > 0) {
622
623 effective_count = min(eerb->residual, (int) count);
624 eerb->residual -= effective_count;
625 } else {
626 tc = 0;
627 while (!tc) {
628 tc = dasd_eer_read_buffer(eerb, (char *) &tailcount,
629 sizeof(tailcount));
630 if (!tc) {
631
632 spin_unlock_irqrestore(&bufferlock, flags);
633 mutex_unlock(&readbuffer_mutex);
634 if (filp->f_flags & O_NONBLOCK)
635 return -EAGAIN;
636 rc = wait_event_interruptible(
637 dasd_eer_read_wait_queue,
638 eerb->head != eerb->tail);
639 if (rc)
640 return rc;
641 if (mutex_lock_interruptible(&readbuffer_mutex))
642 return -ERESTARTSYS;
643 spin_lock_irqsave(&bufferlock, flags);
644 }
645 }
646 WARN_ON(tc != sizeof(tailcount));
647 effective_count = min(tailcount,(int)count);
648 eerb->residual = tailcount - effective_count;
649 }
650
651 tc = dasd_eer_read_buffer(eerb, readbuffer, effective_count);
652 WARN_ON(tc != effective_count);
653
654 spin_unlock_irqrestore(&bufferlock, flags);
655
656 if (copy_to_user(buf, readbuffer, effective_count)) {
657 mutex_unlock(&readbuffer_mutex);
658 return -EFAULT;
659 }
660
661 mutex_unlock(&readbuffer_mutex);
662 return effective_count;
663}
664
665static __poll_t dasd_eer_poll(struct file *filp, poll_table *ptable)
666{
667 __poll_t mask;
668 unsigned long flags;
669 struct eerbuffer *eerb;
670
671 eerb = (struct eerbuffer *) filp->private_data;
672 poll_wait(filp, &dasd_eer_read_wait_queue, ptable);
673 spin_lock_irqsave(&bufferlock, flags);
674 if (eerb->head != eerb->tail)
675 mask = EPOLLIN | EPOLLRDNORM ;
676 else
677 mask = 0;
678 spin_unlock_irqrestore(&bufferlock, flags);
679 return mask;
680}
681
682static const struct file_operations dasd_eer_fops = {
683 .open = &dasd_eer_open,
684 .release = &dasd_eer_close,
685 .read = &dasd_eer_read,
686 .poll = &dasd_eer_poll,
687 .owner = THIS_MODULE,
688 .llseek = noop_llseek,
689};
690
691static struct miscdevice *dasd_eer_dev = NULL;
692
693int __init dasd_eer_init(void)
694{
695 int rc;
696
697 dasd_eer_dev = kzalloc(sizeof(*dasd_eer_dev), GFP_KERNEL);
698 if (!dasd_eer_dev)
699 return -ENOMEM;
700
701 dasd_eer_dev->minor = MISC_DYNAMIC_MINOR;
702 dasd_eer_dev->name = "dasd_eer";
703 dasd_eer_dev->fops = &dasd_eer_fops;
704
705 rc = misc_register(dasd_eer_dev);
706 if (rc) {
707 kfree(dasd_eer_dev);
708 dasd_eer_dev = NULL;
709 DBF_EVENT(DBF_ERR, "%s", "dasd_eer_init could not "
710 "register misc device");
711 return rc;
712 }
713
714 return 0;
715}
716
717void dasd_eer_exit(void)
718{
719 if (dasd_eer_dev) {
720 misc_deregister(dasd_eer_dev);
721 kfree(dasd_eer_dev);
722 dasd_eer_dev = NULL;
723 }
724}
725