1
2
3
4
5
6
7
8#define KMSG_COMPONENT "scm_block"
9#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
10
11#include <linux/interrupt.h>
12#include <linux/spinlock.h>
13#include <linux/mempool.h>
14#include <linux/module.h>
15#include <linux/blkdev.h>
16#include <linux/genhd.h>
17#include <linux/slab.h>
18#include <linux/list.h>
19#include <asm/eadm.h>
20#include "scm_blk.h"
21
22debug_info_t *scm_debug;
23static int scm_major;
24static mempool_t *aidaw_pool;
25static DEFINE_SPINLOCK(list_lock);
26static LIST_HEAD(inactive_requests);
27static unsigned int nr_requests = 64;
28static unsigned int nr_requests_per_io = 8;
29static atomic_t nr_devices = ATOMIC_INIT(0);
30module_param(nr_requests, uint, S_IRUGO);
31MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
32
33module_param(nr_requests_per_io, uint, S_IRUGO);
34MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO.");
35
36MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
37MODULE_LICENSE("GPL");
38MODULE_ALIAS("scm:scmdev*");
39
40static void __scm_free_rq(struct scm_request *scmrq)
41{
42 struct aob_rq_header *aobrq = to_aobrq(scmrq);
43
44 free_page((unsigned long) scmrq->aob);
45 __scm_free_rq_cluster(scmrq);
46 kfree(scmrq->request);
47 kfree(aobrq);
48}
49
50static void scm_free_rqs(void)
51{
52 struct list_head *iter, *safe;
53 struct scm_request *scmrq;
54
55 spin_lock_irq(&list_lock);
56 list_for_each_safe(iter, safe, &inactive_requests) {
57 scmrq = list_entry(iter, struct scm_request, list);
58 list_del(&scmrq->list);
59 __scm_free_rq(scmrq);
60 }
61 spin_unlock_irq(&list_lock);
62
63 mempool_destroy(aidaw_pool);
64}
65
66static int __scm_alloc_rq(void)
67{
68 struct aob_rq_header *aobrq;
69 struct scm_request *scmrq;
70
71 aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
72 if (!aobrq)
73 return -ENOMEM;
74
75 scmrq = (void *) aobrq->data;
76 scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
77 if (!scmrq->aob)
78 goto free;
79
80 scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]),
81 GFP_KERNEL);
82 if (!scmrq->request)
83 goto free;
84
85 if (__scm_alloc_rq_cluster(scmrq))
86 goto free;
87
88 INIT_LIST_HEAD(&scmrq->list);
89 spin_lock_irq(&list_lock);
90 list_add(&scmrq->list, &inactive_requests);
91 spin_unlock_irq(&list_lock);
92
93 return 0;
94free:
95 __scm_free_rq(scmrq);
96 return -ENOMEM;
97}
98
99static int scm_alloc_rqs(unsigned int nrqs)
100{
101 int ret = 0;
102
103 aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
104 if (!aidaw_pool)
105 return -ENOMEM;
106
107 while (nrqs-- && !ret)
108 ret = __scm_alloc_rq();
109
110 return ret;
111}
112
113static struct scm_request *scm_request_fetch(void)
114{
115 struct scm_request *scmrq = NULL;
116
117 spin_lock(&list_lock);
118 if (list_empty(&inactive_requests))
119 goto out;
120 scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
121 list_del(&scmrq->list);
122out:
123 spin_unlock(&list_lock);
124 return scmrq;
125}
126
127static void scm_request_done(struct scm_request *scmrq)
128{
129 unsigned long flags;
130 struct msb *msb;
131 u64 aidaw;
132 int i;
133
134 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
135 msb = &scmrq->aob->msb[i];
136 aidaw = msb->data_addr;
137
138 if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
139 IS_ALIGNED(aidaw, PAGE_SIZE))
140 mempool_free(virt_to_page(aidaw), aidaw_pool);
141 }
142
143 spin_lock_irqsave(&list_lock, flags);
144 list_add(&scmrq->list, &inactive_requests);
145 spin_unlock_irqrestore(&list_lock, flags);
146}
147
148static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
149{
150 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
151}
152
153static inline struct aidaw *scm_aidaw_alloc(void)
154{
155 struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
156
157 return page ? page_address(page) : NULL;
158}
159
160static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
161{
162 unsigned long _aidaw = (unsigned long) aidaw;
163 unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
164
165 return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
166}
167
168struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
169{
170 struct aidaw *aidaw;
171
172 if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
173 return scmrq->next_aidaw;
174
175 aidaw = scm_aidaw_alloc();
176 if (aidaw)
177 memset(aidaw, 0, PAGE_SIZE);
178 return aidaw;
179}
180
181static int scm_request_prepare(struct scm_request *scmrq)
182{
183 struct scm_blk_dev *bdev = scmrq->bdev;
184 struct scm_device *scmdev = bdev->gendisk->private_data;
185 int pos = scmrq->aob->request.msb_count;
186 struct msb *msb = &scmrq->aob->msb[pos];
187 struct request *req = scmrq->request[pos];
188 struct req_iterator iter;
189 struct aidaw *aidaw;
190 struct bio_vec *bv;
191
192 aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
193 if (!aidaw)
194 return -ENOMEM;
195
196 msb->bs = MSB_BS_4K;
197 scmrq->aob->request.msb_count++;
198 msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
199 msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
200 msb->flags |= MSB_FLAG_IDA;
201 msb->data_addr = (u64) aidaw;
202
203 rq_for_each_segment(bv, req, iter) {
204 WARN_ON(bv->bv_offset);
205 msb->blk_count += bv->bv_len >> 12;
206 aidaw->data_addr = (u64) page_address(bv->bv_page);
207 aidaw++;
208 }
209
210 scmrq->next_aidaw = aidaw;
211 return 0;
212}
213
214static inline void scm_request_set(struct scm_request *scmrq,
215 struct request *req)
216{
217 scmrq->request[scmrq->aob->request.msb_count] = req;
218}
219
220static inline void scm_request_init(struct scm_blk_dev *bdev,
221 struct scm_request *scmrq)
222{
223 struct aob_rq_header *aobrq = to_aobrq(scmrq);
224 struct aob *aob = scmrq->aob;
225
226 memset(scmrq->request, 0,
227 nr_requests_per_io * sizeof(scmrq->request[0]));
228 memset(aob, 0, sizeof(*aob));
229 aobrq->scmdev = bdev->scmdev;
230 aob->request.cmd_code = ARQB_CMD_MOVE;
231 aob->request.data = (u64) aobrq;
232 scmrq->bdev = bdev;
233 scmrq->retries = 4;
234 scmrq->error = 0;
235
236 scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
237 scm_request_cluster_init(scmrq);
238}
239
240static void scm_ensure_queue_restart(struct scm_blk_dev *bdev)
241{
242 if (atomic_read(&bdev->queued_reqs)) {
243
244 return;
245 }
246 blk_delay_queue(bdev->rq, SCM_QUEUE_DELAY);
247}
248
249void scm_request_requeue(struct scm_request *scmrq)
250{
251 struct scm_blk_dev *bdev = scmrq->bdev;
252 int i;
253
254 scm_release_cluster(scmrq);
255 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
256 blk_requeue_request(bdev->rq, scmrq->request[i]);
257
258 atomic_dec(&bdev->queued_reqs);
259 scm_request_done(scmrq);
260 scm_ensure_queue_restart(bdev);
261}
262
263void scm_request_finish(struct scm_request *scmrq)
264{
265 struct scm_blk_dev *bdev = scmrq->bdev;
266 int i;
267
268 scm_release_cluster(scmrq);
269 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
270 blk_end_request_all(scmrq->request[i], scmrq->error);
271
272 atomic_dec(&bdev->queued_reqs);
273 scm_request_done(scmrq);
274}
275
276static int scm_request_start(struct scm_request *scmrq)
277{
278 struct scm_blk_dev *bdev = scmrq->bdev;
279 int ret;
280
281 atomic_inc(&bdev->queued_reqs);
282 if (!scmrq->aob->request.msb_count) {
283 scm_request_requeue(scmrq);
284 return -EINVAL;
285 }
286
287 ret = eadm_start_aob(scmrq->aob);
288 if (ret) {
289 SCM_LOG(5, "no subchannel");
290 scm_request_requeue(scmrq);
291 }
292 return ret;
293}
294
295static void scm_blk_request(struct request_queue *rq)
296{
297 struct scm_device *scmdev = rq->queuedata;
298 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
299 struct scm_request *scmrq = NULL;
300 struct request *req;
301
302 while ((req = blk_peek_request(rq))) {
303 if (req->cmd_type != REQ_TYPE_FS)
304 continue;
305
306 if (!scm_permit_request(bdev, req))
307 goto out;
308
309 if (!scmrq) {
310 scmrq = scm_request_fetch();
311 if (!scmrq) {
312 SCM_LOG(5, "no request");
313 goto out;
314 }
315 scm_request_init(bdev, scmrq);
316 }
317 scm_request_set(scmrq, req);
318
319 if (!scm_reserve_cluster(scmrq)) {
320 SCM_LOG(5, "cluster busy");
321 scm_request_set(scmrq, NULL);
322 if (scmrq->aob->request.msb_count)
323 goto out;
324
325 scm_request_done(scmrq);
326 return;
327 }
328
329 if (scm_need_cluster_request(scmrq)) {
330 if (scmrq->aob->request.msb_count) {
331
332 scm_request_set(scmrq, NULL);
333 if (scm_request_start(scmrq))
334 return;
335 } else {
336 atomic_inc(&bdev->queued_reqs);
337 blk_start_request(req);
338 scm_initiate_cluster_request(scmrq);
339 }
340 scmrq = NULL;
341 continue;
342 }
343
344 if (scm_request_prepare(scmrq)) {
345 SCM_LOG(5, "aidaw alloc failed");
346 scm_request_set(scmrq, NULL);
347 goto out;
348 }
349 blk_start_request(req);
350
351 if (scmrq->aob->request.msb_count < nr_requests_per_io)
352 continue;
353
354 if (scm_request_start(scmrq))
355 return;
356
357 scmrq = NULL;
358 }
359out:
360 if (scmrq)
361 scm_request_start(scmrq);
362 else
363 scm_ensure_queue_restart(bdev);
364}
365
366static void __scmrq_log_error(struct scm_request *scmrq)
367{
368 struct aob *aob = scmrq->aob;
369
370 if (scmrq->error == -ETIMEDOUT)
371 SCM_LOG(1, "Request timeout");
372 else {
373 SCM_LOG(1, "Request error");
374 SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
375 }
376 if (scmrq->retries)
377 SCM_LOG(1, "Retry request");
378 else
379 pr_err("An I/O operation to SCM failed with rc=%d\n",
380 scmrq->error);
381}
382
383void scm_blk_irq(struct scm_device *scmdev, void *data, int error)
384{
385 struct scm_request *scmrq = data;
386 struct scm_blk_dev *bdev = scmrq->bdev;
387
388 scmrq->error = error;
389 if (error)
390 __scmrq_log_error(scmrq);
391
392 spin_lock(&bdev->lock);
393 list_add_tail(&scmrq->list, &bdev->finished_requests);
394 spin_unlock(&bdev->lock);
395 tasklet_hi_schedule(&bdev->tasklet);
396}
397
398static void scm_blk_handle_error(struct scm_request *scmrq)
399{
400 struct scm_blk_dev *bdev = scmrq->bdev;
401 unsigned long flags;
402
403 if (scmrq->error != -EIO)
404 goto restart;
405
406
407 switch (scmrq->aob->response.eqc) {
408 case EQC_WR_PROHIBIT:
409 spin_lock_irqsave(&bdev->lock, flags);
410 if (bdev->state != SCM_WR_PROHIBIT)
411 pr_info("%lx: Write access to the SCM increment is suspended\n",
412 (unsigned long) bdev->scmdev->address);
413 bdev->state = SCM_WR_PROHIBIT;
414 spin_unlock_irqrestore(&bdev->lock, flags);
415 goto requeue;
416 default:
417 break;
418 }
419
420restart:
421 if (!eadm_start_aob(scmrq->aob))
422 return;
423
424requeue:
425 spin_lock_irqsave(&bdev->rq_lock, flags);
426 scm_request_requeue(scmrq);
427 spin_unlock_irqrestore(&bdev->rq_lock, flags);
428}
429
430static void scm_blk_tasklet(struct scm_blk_dev *bdev)
431{
432 struct scm_request *scmrq;
433 unsigned long flags;
434
435 spin_lock_irqsave(&bdev->lock, flags);
436 while (!list_empty(&bdev->finished_requests)) {
437 scmrq = list_first_entry(&bdev->finished_requests,
438 struct scm_request, list);
439 list_del(&scmrq->list);
440 spin_unlock_irqrestore(&bdev->lock, flags);
441
442 if (scmrq->error && scmrq->retries-- > 0) {
443 scm_blk_handle_error(scmrq);
444
445
446 spin_lock_irqsave(&bdev->lock, flags);
447 continue;
448 }
449
450 if (scm_test_cluster_request(scmrq)) {
451 scm_cluster_request_irq(scmrq);
452 spin_lock_irqsave(&bdev->lock, flags);
453 continue;
454 }
455
456 scm_request_finish(scmrq);
457 spin_lock_irqsave(&bdev->lock, flags);
458 }
459 spin_unlock_irqrestore(&bdev->lock, flags);
460
461 blk_run_queue(bdev->rq);
462}
463
464static const struct block_device_operations scm_blk_devops = {
465 .owner = THIS_MODULE,
466};
467
468int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
469{
470 struct request_queue *rq;
471 int len, ret = -ENOMEM;
472 unsigned int devindex, nr_max_blk;
473
474 devindex = atomic_inc_return(&nr_devices) - 1;
475
476 if (devindex > 701) {
477 ret = -ENODEV;
478 goto out;
479 }
480
481 bdev->scmdev = scmdev;
482 bdev->state = SCM_OPER;
483 spin_lock_init(&bdev->rq_lock);
484 spin_lock_init(&bdev->lock);
485 INIT_LIST_HEAD(&bdev->finished_requests);
486 atomic_set(&bdev->queued_reqs, 0);
487 tasklet_init(&bdev->tasklet,
488 (void (*)(unsigned long)) scm_blk_tasklet,
489 (unsigned long) bdev);
490
491 rq = blk_init_queue(scm_blk_request, &bdev->rq_lock);
492 if (!rq)
493 goto out;
494
495 bdev->rq = rq;
496 nr_max_blk = min(scmdev->nr_max_block,
497 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
498
499 blk_queue_logical_block_size(rq, 1 << 12);
500 blk_queue_max_hw_sectors(rq, nr_max_blk << 3);
501 blk_queue_max_segments(rq, nr_max_blk);
502 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, rq);
503 queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, rq);
504 scm_blk_dev_cluster_setup(bdev);
505
506 bdev->gendisk = alloc_disk(SCM_NR_PARTS);
507 if (!bdev->gendisk)
508 goto out_queue;
509
510 rq->queuedata = scmdev;
511 bdev->gendisk->driverfs_dev = &scmdev->dev;
512 bdev->gendisk->private_data = scmdev;
513 bdev->gendisk->fops = &scm_blk_devops;
514 bdev->gendisk->queue = rq;
515 bdev->gendisk->major = scm_major;
516 bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
517
518 len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
519 if (devindex > 25) {
520 len += snprintf(bdev->gendisk->disk_name + len,
521 DISK_NAME_LEN - len, "%c",
522 'a' + (devindex / 26) - 1);
523 devindex = devindex % 26;
524 }
525 snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
526 'a' + devindex);
527
528
529 set_capacity(bdev->gendisk, scmdev->size >> 9);
530 add_disk(bdev->gendisk);
531 return 0;
532
533out_queue:
534 blk_cleanup_queue(rq);
535out:
536 atomic_dec(&nr_devices);
537 return ret;
538}
539
540void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
541{
542 tasklet_kill(&bdev->tasklet);
543 del_gendisk(bdev->gendisk);
544 blk_cleanup_queue(bdev->gendisk->queue);
545 put_disk(bdev->gendisk);
546}
547
548void scm_blk_set_available(struct scm_blk_dev *bdev)
549{
550 unsigned long flags;
551
552 spin_lock_irqsave(&bdev->lock, flags);
553 if (bdev->state == SCM_WR_PROHIBIT)
554 pr_info("%lx: Write access to the SCM increment is restored\n",
555 (unsigned long) bdev->scmdev->address);
556 bdev->state = SCM_OPER;
557 spin_unlock_irqrestore(&bdev->lock, flags);
558}
559
560static bool __init scm_blk_params_valid(void)
561{
562 if (!nr_requests_per_io || nr_requests_per_io > 64)
563 return false;
564
565 return scm_cluster_size_valid();
566}
567
568static int __init scm_blk_init(void)
569{
570 int ret = -EINVAL;
571
572 if (!scm_blk_params_valid())
573 goto out;
574
575 ret = register_blkdev(0, "scm");
576 if (ret < 0)
577 goto out;
578
579 scm_major = ret;
580 ret = scm_alloc_rqs(nr_requests);
581 if (ret)
582 goto out_free;
583
584 scm_debug = debug_register("scm_log", 16, 1, 16);
585 if (!scm_debug) {
586 ret = -ENOMEM;
587 goto out_free;
588 }
589
590 debug_register_view(scm_debug, &debug_hex_ascii_view);
591 debug_set_level(scm_debug, 2);
592
593 ret = scm_drv_init();
594 if (ret)
595 goto out_dbf;
596
597 return ret;
598
599out_dbf:
600 debug_unregister(scm_debug);
601out_free:
602 scm_free_rqs();
603 unregister_blkdev(scm_major, "scm");
604out:
605 return ret;
606}
607module_init(scm_blk_init);
608
609static void __exit scm_blk_cleanup(void)
610{
611 scm_drv_cleanup();
612 debug_unregister(scm_debug);
613 scm_free_rqs();
614 unregister_blkdev(scm_major, "scm");
615}
616module_exit(scm_blk_cleanup);
617