1
2
3
4
5
6
7
8
9#define KMSG_COMPONENT "scm_block"
10#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
11
12#include <linux/interrupt.h>
13#include <linux/spinlock.h>
14#include <linux/mempool.h>
15#include <linux/module.h>
16#include <linux/blkdev.h>
17#include <linux/blk-mq.h>
18#include <linux/genhd.h>
19#include <linux/slab.h>
20#include <linux/list.h>
21#include <asm/eadm.h>
22#include "scm_blk.h"
23
24debug_info_t *scm_debug;
25static int scm_major;
26static mempool_t *aidaw_pool;
27static DEFINE_SPINLOCK(list_lock);
28static LIST_HEAD(inactive_requests);
29static unsigned int nr_requests = 64;
30static unsigned int nr_requests_per_io = 8;
31static atomic_t nr_devices = ATOMIC_INIT(0);
32module_param(nr_requests, uint, S_IRUGO);
33MODULE_PARM_DESC(nr_requests, "Number of parallel requests.");
34
35module_param(nr_requests_per_io, uint, S_IRUGO);
36MODULE_PARM_DESC(nr_requests_per_io, "Number of requests per IO.");
37
38MODULE_DESCRIPTION("Block driver for s390 storage class memory.");
39MODULE_LICENSE("GPL");
40MODULE_ALIAS("scm:scmdev*");
41
42static void __scm_free_rq(struct scm_request *scmrq)
43{
44 struct aob_rq_header *aobrq = to_aobrq(scmrq);
45
46 free_page((unsigned long) scmrq->aob);
47 kfree(scmrq->request);
48 kfree(aobrq);
49}
50
51static void scm_free_rqs(void)
52{
53 struct list_head *iter, *safe;
54 struct scm_request *scmrq;
55
56 spin_lock_irq(&list_lock);
57 list_for_each_safe(iter, safe, &inactive_requests) {
58 scmrq = list_entry(iter, struct scm_request, list);
59 list_del(&scmrq->list);
60 __scm_free_rq(scmrq);
61 }
62 spin_unlock_irq(&list_lock);
63
64 mempool_destroy(aidaw_pool);
65}
66
67static int __scm_alloc_rq(void)
68{
69 struct aob_rq_header *aobrq;
70 struct scm_request *scmrq;
71
72 aobrq = kzalloc(sizeof(*aobrq) + sizeof(*scmrq), GFP_KERNEL);
73 if (!aobrq)
74 return -ENOMEM;
75
76 scmrq = (void *) aobrq->data;
77 scmrq->aob = (void *) get_zeroed_page(GFP_DMA);
78 if (!scmrq->aob)
79 goto free;
80
81 scmrq->request = kcalloc(nr_requests_per_io, sizeof(scmrq->request[0]),
82 GFP_KERNEL);
83 if (!scmrq->request)
84 goto free;
85
86 INIT_LIST_HEAD(&scmrq->list);
87 spin_lock_irq(&list_lock);
88 list_add(&scmrq->list, &inactive_requests);
89 spin_unlock_irq(&list_lock);
90
91 return 0;
92free:
93 __scm_free_rq(scmrq);
94 return -ENOMEM;
95}
96
97static int scm_alloc_rqs(unsigned int nrqs)
98{
99 int ret = 0;
100
101 aidaw_pool = mempool_create_page_pool(max(nrqs/8, 1U), 0);
102 if (!aidaw_pool)
103 return -ENOMEM;
104
105 while (nrqs-- && !ret)
106 ret = __scm_alloc_rq();
107
108 return ret;
109}
110
111static struct scm_request *scm_request_fetch(void)
112{
113 struct scm_request *scmrq = NULL;
114
115 spin_lock_irq(&list_lock);
116 if (list_empty(&inactive_requests))
117 goto out;
118 scmrq = list_first_entry(&inactive_requests, struct scm_request, list);
119 list_del(&scmrq->list);
120out:
121 spin_unlock_irq(&list_lock);
122 return scmrq;
123}
124
125static void scm_request_done(struct scm_request *scmrq)
126{
127 unsigned long flags;
128 struct msb *msb;
129 u64 aidaw;
130 int i;
131
132 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
133 msb = &scmrq->aob->msb[i];
134 aidaw = msb->data_addr;
135
136 if ((msb->flags & MSB_FLAG_IDA) && aidaw &&
137 IS_ALIGNED(aidaw, PAGE_SIZE))
138 mempool_free(virt_to_page(aidaw), aidaw_pool);
139 }
140
141 spin_lock_irqsave(&list_lock, flags);
142 list_add(&scmrq->list, &inactive_requests);
143 spin_unlock_irqrestore(&list_lock, flags);
144}
145
146static bool scm_permit_request(struct scm_blk_dev *bdev, struct request *req)
147{
148 return rq_data_dir(req) != WRITE || bdev->state != SCM_WR_PROHIBIT;
149}
150
151static inline struct aidaw *scm_aidaw_alloc(void)
152{
153 struct page *page = mempool_alloc(aidaw_pool, GFP_ATOMIC);
154
155 return page ? page_address(page) : NULL;
156}
157
158static inline unsigned long scm_aidaw_bytes(struct aidaw *aidaw)
159{
160 unsigned long _aidaw = (unsigned long) aidaw;
161 unsigned long bytes = ALIGN(_aidaw, PAGE_SIZE) - _aidaw;
162
163 return (bytes / sizeof(*aidaw)) * PAGE_SIZE;
164}
165
166struct aidaw *scm_aidaw_fetch(struct scm_request *scmrq, unsigned int bytes)
167{
168 struct aidaw *aidaw;
169
170 if (scm_aidaw_bytes(scmrq->next_aidaw) >= bytes)
171 return scmrq->next_aidaw;
172
173 aidaw = scm_aidaw_alloc();
174 if (aidaw)
175 memset(aidaw, 0, PAGE_SIZE);
176 return aidaw;
177}
178
179static int scm_request_prepare(struct scm_request *scmrq)
180{
181 struct scm_blk_dev *bdev = scmrq->bdev;
182 struct scm_device *scmdev = bdev->gendisk->private_data;
183 int pos = scmrq->aob->request.msb_count;
184 struct msb *msb = &scmrq->aob->msb[pos];
185 struct request *req = scmrq->request[pos];
186 struct req_iterator iter;
187 struct aidaw *aidaw;
188 struct bio_vec bv;
189
190 aidaw = scm_aidaw_fetch(scmrq, blk_rq_bytes(req));
191 if (!aidaw)
192 return -ENOMEM;
193
194 msb->bs = MSB_BS_4K;
195 scmrq->aob->request.msb_count++;
196 msb->scm_addr = scmdev->address + ((u64) blk_rq_pos(req) << 9);
197 msb->oc = (rq_data_dir(req) == READ) ? MSB_OC_READ : MSB_OC_WRITE;
198 msb->flags |= MSB_FLAG_IDA;
199 msb->data_addr = (u64) aidaw;
200
201 rq_for_each_segment(bv, req, iter) {
202 WARN_ON(bv.bv_offset);
203 msb->blk_count += bv.bv_len >> 12;
204 aidaw->data_addr = (u64) page_address(bv.bv_page);
205 aidaw++;
206 }
207
208 scmrq->next_aidaw = aidaw;
209 return 0;
210}
211
212static inline void scm_request_set(struct scm_request *scmrq,
213 struct request *req)
214{
215 scmrq->request[scmrq->aob->request.msb_count] = req;
216}
217
218static inline void scm_request_init(struct scm_blk_dev *bdev,
219 struct scm_request *scmrq)
220{
221 struct aob_rq_header *aobrq = to_aobrq(scmrq);
222 struct aob *aob = scmrq->aob;
223
224 memset(scmrq->request, 0,
225 nr_requests_per_io * sizeof(scmrq->request[0]));
226 memset(aob, 0, sizeof(*aob));
227 aobrq->scmdev = bdev->scmdev;
228 aob->request.cmd_code = ARQB_CMD_MOVE;
229 aob->request.data = (u64) aobrq;
230 scmrq->bdev = bdev;
231 scmrq->retries = 4;
232 scmrq->error = BLK_STS_OK;
233
234 scmrq->next_aidaw = (void *) &aob->msb[nr_requests_per_io];
235}
236
237static void scm_request_requeue(struct scm_request *scmrq)
238{
239 struct scm_blk_dev *bdev = scmrq->bdev;
240 int i;
241
242 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++)
243 blk_mq_requeue_request(scmrq->request[i], false);
244
245 atomic_dec(&bdev->queued_reqs);
246 scm_request_done(scmrq);
247 blk_mq_kick_requeue_list(bdev->rq);
248}
249
250static void scm_request_finish(struct scm_request *scmrq)
251{
252 struct scm_blk_dev *bdev = scmrq->bdev;
253 blk_status_t *error;
254 int i;
255
256 for (i = 0; i < nr_requests_per_io && scmrq->request[i]; i++) {
257 error = blk_mq_rq_to_pdu(scmrq->request[i]);
258 *error = scmrq->error;
259 if (likely(!blk_should_fake_timeout(scmrq->request[i]->q)))
260 blk_mq_complete_request(scmrq->request[i]);
261 }
262
263 atomic_dec(&bdev->queued_reqs);
264 scm_request_done(scmrq);
265}
266
267static void scm_request_start(struct scm_request *scmrq)
268{
269 struct scm_blk_dev *bdev = scmrq->bdev;
270
271 atomic_inc(&bdev->queued_reqs);
272 if (eadm_start_aob(scmrq->aob)) {
273 SCM_LOG(5, "no subchannel");
274 scm_request_requeue(scmrq);
275 }
276}
277
278struct scm_queue {
279 struct scm_request *scmrq;
280 spinlock_t lock;
281};
282
283static blk_status_t scm_blk_request(struct blk_mq_hw_ctx *hctx,
284 const struct blk_mq_queue_data *qd)
285{
286 struct scm_device *scmdev = hctx->queue->queuedata;
287 struct scm_blk_dev *bdev = dev_get_drvdata(&scmdev->dev);
288 struct scm_queue *sq = hctx->driver_data;
289 struct request *req = qd->rq;
290 struct scm_request *scmrq;
291
292 spin_lock(&sq->lock);
293 if (!scm_permit_request(bdev, req)) {
294 spin_unlock(&sq->lock);
295 return BLK_STS_RESOURCE;
296 }
297
298 scmrq = sq->scmrq;
299 if (!scmrq) {
300 scmrq = scm_request_fetch();
301 if (!scmrq) {
302 SCM_LOG(5, "no request");
303 spin_unlock(&sq->lock);
304 return BLK_STS_RESOURCE;
305 }
306 scm_request_init(bdev, scmrq);
307 sq->scmrq = scmrq;
308 }
309 scm_request_set(scmrq, req);
310
311 if (scm_request_prepare(scmrq)) {
312 SCM_LOG(5, "aidaw alloc failed");
313 scm_request_set(scmrq, NULL);
314
315 if (scmrq->aob->request.msb_count)
316 scm_request_start(scmrq);
317
318 sq->scmrq = NULL;
319 spin_unlock(&sq->lock);
320 return BLK_STS_RESOURCE;
321 }
322 blk_mq_start_request(req);
323
324 if (qd->last || scmrq->aob->request.msb_count == nr_requests_per_io) {
325 scm_request_start(scmrq);
326 sq->scmrq = NULL;
327 }
328 spin_unlock(&sq->lock);
329 return BLK_STS_OK;
330}
331
332static int scm_blk_init_hctx(struct blk_mq_hw_ctx *hctx, void *data,
333 unsigned int idx)
334{
335 struct scm_queue *qd = kzalloc(sizeof(*qd), GFP_KERNEL);
336
337 if (!qd)
338 return -ENOMEM;
339
340 spin_lock_init(&qd->lock);
341 hctx->driver_data = qd;
342
343 return 0;
344}
345
346static void scm_blk_exit_hctx(struct blk_mq_hw_ctx *hctx, unsigned int idx)
347{
348 struct scm_queue *qd = hctx->driver_data;
349
350 WARN_ON(qd->scmrq);
351 kfree(hctx->driver_data);
352 hctx->driver_data = NULL;
353}
354
355static void __scmrq_log_error(struct scm_request *scmrq)
356{
357 struct aob *aob = scmrq->aob;
358
359 if (scmrq->error == BLK_STS_TIMEOUT)
360 SCM_LOG(1, "Request timeout");
361 else {
362 SCM_LOG(1, "Request error");
363 SCM_LOG_HEX(1, &aob->response, sizeof(aob->response));
364 }
365 if (scmrq->retries)
366 SCM_LOG(1, "Retry request");
367 else
368 pr_err("An I/O operation to SCM failed with rc=%d\n",
369 scmrq->error);
370}
371
372static void scm_blk_handle_error(struct scm_request *scmrq)
373{
374 struct scm_blk_dev *bdev = scmrq->bdev;
375 unsigned long flags;
376
377 if (scmrq->error != BLK_STS_IOERR)
378 goto restart;
379
380
381 switch (scmrq->aob->response.eqc) {
382 case EQC_WR_PROHIBIT:
383 spin_lock_irqsave(&bdev->lock, flags);
384 if (bdev->state != SCM_WR_PROHIBIT)
385 pr_info("%lx: Write access to the SCM increment is suspended\n",
386 (unsigned long) bdev->scmdev->address);
387 bdev->state = SCM_WR_PROHIBIT;
388 spin_unlock_irqrestore(&bdev->lock, flags);
389 goto requeue;
390 default:
391 break;
392 }
393
394restart:
395 if (!eadm_start_aob(scmrq->aob))
396 return;
397
398requeue:
399 scm_request_requeue(scmrq);
400}
401
402void scm_blk_irq(struct scm_device *scmdev, void *data, blk_status_t error)
403{
404 struct scm_request *scmrq = data;
405
406 scmrq->error = error;
407 if (error) {
408 __scmrq_log_error(scmrq);
409 if (scmrq->retries-- > 0) {
410 scm_blk_handle_error(scmrq);
411 return;
412 }
413 }
414
415 scm_request_finish(scmrq);
416}
417
418static void scm_blk_request_done(struct request *req)
419{
420 blk_status_t *error = blk_mq_rq_to_pdu(req);
421
422 blk_mq_end_request(req, *error);
423}
424
425static const struct block_device_operations scm_blk_devops = {
426 .owner = THIS_MODULE,
427};
428
429static const struct blk_mq_ops scm_mq_ops = {
430 .queue_rq = scm_blk_request,
431 .complete = scm_blk_request_done,
432 .init_hctx = scm_blk_init_hctx,
433 .exit_hctx = scm_blk_exit_hctx,
434};
435
436int scm_blk_dev_setup(struct scm_blk_dev *bdev, struct scm_device *scmdev)
437{
438 unsigned int devindex, nr_max_blk;
439 struct request_queue *rq;
440 int len, ret;
441
442 devindex = atomic_inc_return(&nr_devices) - 1;
443
444 if (devindex > 701) {
445 ret = -ENODEV;
446 goto out;
447 }
448
449 bdev->scmdev = scmdev;
450 bdev->state = SCM_OPER;
451 spin_lock_init(&bdev->lock);
452 atomic_set(&bdev->queued_reqs, 0);
453
454 bdev->tag_set.ops = &scm_mq_ops;
455 bdev->tag_set.cmd_size = sizeof(blk_status_t);
456 bdev->tag_set.nr_hw_queues = nr_requests;
457 bdev->tag_set.queue_depth = nr_requests_per_io * nr_requests;
458 bdev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
459
460 ret = blk_mq_alloc_tag_set(&bdev->tag_set);
461 if (ret)
462 goto out;
463
464 rq = blk_mq_init_queue(&bdev->tag_set);
465 if (IS_ERR(rq)) {
466 ret = PTR_ERR(rq);
467 goto out_tag;
468 }
469 bdev->rq = rq;
470 nr_max_blk = min(scmdev->nr_max_block,
471 (unsigned int) (PAGE_SIZE / sizeof(struct aidaw)));
472
473 blk_queue_logical_block_size(rq, 1 << 12);
474 blk_queue_max_hw_sectors(rq, nr_max_blk << 3);
475 blk_queue_max_segments(rq, nr_max_blk);
476 blk_queue_flag_set(QUEUE_FLAG_NONROT, rq);
477 blk_queue_flag_clear(QUEUE_FLAG_ADD_RANDOM, rq);
478
479 bdev->gendisk = alloc_disk(SCM_NR_PARTS);
480 if (!bdev->gendisk) {
481 ret = -ENOMEM;
482 goto out_queue;
483 }
484 rq->queuedata = scmdev;
485 bdev->gendisk->private_data = scmdev;
486 bdev->gendisk->fops = &scm_blk_devops;
487 bdev->gendisk->queue = rq;
488 bdev->gendisk->major = scm_major;
489 bdev->gendisk->first_minor = devindex * SCM_NR_PARTS;
490
491 len = snprintf(bdev->gendisk->disk_name, DISK_NAME_LEN, "scm");
492 if (devindex > 25) {
493 len += snprintf(bdev->gendisk->disk_name + len,
494 DISK_NAME_LEN - len, "%c",
495 'a' + (devindex / 26) - 1);
496 devindex = devindex % 26;
497 }
498 snprintf(bdev->gendisk->disk_name + len, DISK_NAME_LEN - len, "%c",
499 'a' + devindex);
500
501
502 set_capacity(bdev->gendisk, scmdev->size >> 9);
503 device_add_disk(&scmdev->dev, bdev->gendisk, NULL);
504 return 0;
505
506out_queue:
507 blk_cleanup_queue(rq);
508out_tag:
509 blk_mq_free_tag_set(&bdev->tag_set);
510out:
511 atomic_dec(&nr_devices);
512 return ret;
513}
514
515void scm_blk_dev_cleanup(struct scm_blk_dev *bdev)
516{
517 del_gendisk(bdev->gendisk);
518 blk_cleanup_queue(bdev->gendisk->queue);
519 blk_mq_free_tag_set(&bdev->tag_set);
520 put_disk(bdev->gendisk);
521}
522
523void scm_blk_set_available(struct scm_blk_dev *bdev)
524{
525 unsigned long flags;
526
527 spin_lock_irqsave(&bdev->lock, flags);
528 if (bdev->state == SCM_WR_PROHIBIT)
529 pr_info("%lx: Write access to the SCM increment is restored\n",
530 (unsigned long) bdev->scmdev->address);
531 bdev->state = SCM_OPER;
532 spin_unlock_irqrestore(&bdev->lock, flags);
533}
534
535static bool __init scm_blk_params_valid(void)
536{
537 if (!nr_requests_per_io || nr_requests_per_io > 64)
538 return false;
539
540 return true;
541}
542
543static int __init scm_blk_init(void)
544{
545 int ret = -EINVAL;
546
547 if (!scm_blk_params_valid())
548 goto out;
549
550 ret = register_blkdev(0, "scm");
551 if (ret < 0)
552 goto out;
553
554 scm_major = ret;
555 ret = scm_alloc_rqs(nr_requests);
556 if (ret)
557 goto out_free;
558
559 scm_debug = debug_register("scm_log", 16, 1, 16);
560 if (!scm_debug) {
561 ret = -ENOMEM;
562 goto out_free;
563 }
564
565 debug_register_view(scm_debug, &debug_hex_ascii_view);
566 debug_set_level(scm_debug, 2);
567
568 ret = scm_drv_init();
569 if (ret)
570 goto out_dbf;
571
572 return ret;
573
574out_dbf:
575 debug_unregister(scm_debug);
576out_free:
577 scm_free_rqs();
578 unregister_blkdev(scm_major, "scm");
579out:
580 return ret;
581}
582module_init(scm_blk_init);
583
584static void __exit scm_blk_cleanup(void)
585{
586 scm_drv_cleanup();
587 debug_unregister(scm_debug);
588 scm_free_rqs();
589 unregister_blkdev(scm_major, "scm");
590}
591module_exit(scm_blk_cleanup);
592