linux/drivers/mmc/card/queue.c
<<
>>
Prefs
   1/*
   2 *  linux/drivers/mmc/card/queue.c
   3 *
   4 *  Copyright (C) 2003 Russell King, All Rights Reserved.
   5 *  Copyright 2006-2007 Pierre Ossman
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 */
  12#include <linux/slab.h>
  13#include <linux/module.h>
  14#include <linux/blkdev.h>
  15#include <linux/freezer.h>
  16#include <linux/kthread.h>
  17#include <linux/scatterlist.h>
  18#include <linux/dma-mapping.h>
  19
  20#include <linux/mmc/card.h>
  21#include <linux/mmc/host.h>
  22#include "queue.h"
  23
  24#define MMC_QUEUE_BOUNCESZ      65536
  25
  26/*
  27 * Prepare a MMC request. This just filters out odd stuff.
  28 */
  29static int mmc_prep_request(struct request_queue *q, struct request *req)
  30{
  31        struct mmc_queue *mq = q->queuedata;
  32
  33        /*
  34         * We only like normal block requests and discards.
  35         */
  36        if (req->cmd_type != REQ_TYPE_FS && !(req->cmd_flags & REQ_DISCARD)) {
  37                blk_dump_rq_flags(req, "MMC bad request");
  38                return BLKPREP_KILL;
  39        }
  40
  41        if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
  42                return BLKPREP_KILL;
  43
  44        req->cmd_flags |= REQ_DONTPREP;
  45
  46        return BLKPREP_OK;
  47}
  48
  49static int mmc_queue_thread(void *d)
  50{
  51        struct mmc_queue *mq = d;
  52        struct request_queue *q = mq->queue;
  53
  54        current->flags |= PF_MEMALLOC;
  55
  56        down(&mq->thread_sem);
  57        do {
  58                struct request *req = NULL;
  59                struct mmc_queue_req *tmp;
  60                unsigned int cmd_flags = 0;
  61
  62                spin_lock_irq(q->queue_lock);
  63                set_current_state(TASK_INTERRUPTIBLE);
  64                req = blk_fetch_request(q);
  65                mq->mqrq_cur->req = req;
  66                spin_unlock_irq(q->queue_lock);
  67
  68                if (req || mq->mqrq_prev->req) {
  69                        set_current_state(TASK_RUNNING);
  70                        cmd_flags = req ? req->cmd_flags : 0;
  71                        mq->issue_fn(mq, req);
  72                        if (mq->flags & MMC_QUEUE_NEW_REQUEST) {
  73                                mq->flags &= ~MMC_QUEUE_NEW_REQUEST;
  74                                continue; /* fetch again */
  75                        }
  76
  77                        /*
  78                         * Current request becomes previous request
  79                         * and vice versa.
  80                         * In case of special requests, current request
  81                         * has been finished. Do not assign it to previous
  82                         * request.
  83                         */
  84                        if (cmd_flags & MMC_REQ_SPECIAL_MASK)
  85                                mq->mqrq_cur->req = NULL;
  86
  87                        mq->mqrq_prev->brq.mrq.data = NULL;
  88                        mq->mqrq_prev->req = NULL;
  89                        tmp = mq->mqrq_prev;
  90                        mq->mqrq_prev = mq->mqrq_cur;
  91                        mq->mqrq_cur = tmp;
  92                } else {
  93                        if (kthread_should_stop()) {
  94                                set_current_state(TASK_RUNNING);
  95                                break;
  96                        }
  97                        up(&mq->thread_sem);
  98                        schedule();
  99                        down(&mq->thread_sem);
 100                }
 101        } while (1);
 102        up(&mq->thread_sem);
 103
 104        return 0;
 105}
 106
 107/*
 108 * Generic MMC request handler.  This is called for any queue on a
 109 * particular host.  When the host is not busy, we look for a request
 110 * on any queue on this host, and attempt to issue it.  This may
 111 * not be the queue we were asked to process.
 112 */
 113static void mmc_request_fn(struct request_queue *q)
 114{
 115        struct mmc_queue *mq = q->queuedata;
 116        struct request *req;
 117        unsigned long flags;
 118        struct mmc_context_info *cntx;
 119
 120        if (!mq) {
 121                while ((req = blk_fetch_request(q)) != NULL) {
 122                        req->cmd_flags |= REQ_QUIET;
 123                        __blk_end_request_all(req, -EIO);
 124                }
 125                return;
 126        }
 127
 128        cntx = &mq->card->host->context_info;
 129        if (!mq->mqrq_cur->req && mq->mqrq_prev->req) {
 130                /*
 131                 * New MMC request arrived when MMC thread may be
 132                 * blocked on the previous request to be complete
 133                 * with no current request fetched
 134                 */
 135                spin_lock_irqsave(&cntx->lock, flags);
 136                if (cntx->is_waiting_last_req) {
 137                        cntx->is_new_req = true;
 138                        wake_up_interruptible(&cntx->wait);
 139                }
 140                spin_unlock_irqrestore(&cntx->lock, flags);
 141        } else if (!mq->mqrq_cur->req && !mq->mqrq_prev->req)
 142                wake_up_process(mq->thread);
 143}
 144
 145static struct scatterlist *mmc_alloc_sg(int sg_len, int *err)
 146{
 147        struct scatterlist *sg;
 148
 149        sg = kmalloc(sizeof(struct scatterlist)*sg_len, GFP_KERNEL);
 150        if (!sg)
 151                *err = -ENOMEM;
 152        else {
 153                *err = 0;
 154                sg_init_table(sg, sg_len);
 155        }
 156
 157        return sg;
 158}
 159
 160static void mmc_queue_setup_discard(struct request_queue *q,
 161                                    struct mmc_card *card)
 162{
 163        unsigned max_discard;
 164
 165        max_discard = mmc_calc_max_discard(card);
 166        if (!max_discard)
 167                return;
 168
 169        queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
 170        q->limits.max_discard_sectors = max_discard;
 171        if (card->erased_byte == 0 && !mmc_can_discard(card))
 172                q->limits.discard_zeroes_data = 1;
 173        q->limits.discard_granularity = card->pref_erase << 9;
 174        /* granularity must not be greater than max. discard */
 175        if (card->pref_erase > max_discard)
 176                q->limits.discard_granularity = 0;
 177        if (mmc_can_secure_erase_trim(card))
 178                queue_flag_set_unlocked(QUEUE_FLAG_SECDISCARD, q);
 179}
 180
 181/**
 182 * mmc_init_queue - initialise a queue structure.
 183 * @mq: mmc queue
 184 * @card: mmc card to attach this queue
 185 * @lock: queue lock
 186 * @subname: partition subname
 187 *
 188 * Initialise a MMC card request queue.
 189 */
 190int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
 191                   spinlock_t *lock, const char *subname)
 192{
 193        struct mmc_host *host = card->host;
 194        u64 limit = BLK_BOUNCE_HIGH;
 195        int ret;
 196        struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
 197        struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
 198
 199        if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
 200                limit = (u64)dma_max_pfn(mmc_dev(host)) << PAGE_SHIFT;
 201
 202        mq->card = card;
 203        mq->queue = blk_init_queue(mmc_request_fn, lock);
 204        if (!mq->queue)
 205                return -ENOMEM;
 206
 207        mq->mqrq_cur = mqrq_cur;
 208        mq->mqrq_prev = mqrq_prev;
 209        mq->queue->queuedata = mq;
 210
 211        blk_queue_prep_rq(mq->queue, mmc_prep_request);
 212        queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);
 213        queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, mq->queue);
 214        if (mmc_can_erase(card))
 215                mmc_queue_setup_discard(mq->queue, card);
 216
 217#ifdef CONFIG_MMC_BLOCK_BOUNCE
 218        if (host->max_segs == 1) {
 219                unsigned int bouncesz;
 220
 221                bouncesz = MMC_QUEUE_BOUNCESZ;
 222
 223                if (bouncesz > host->max_req_size)
 224                        bouncesz = host->max_req_size;
 225                if (bouncesz > host->max_seg_size)
 226                        bouncesz = host->max_seg_size;
 227                if (bouncesz > (host->max_blk_count * 512))
 228                        bouncesz = host->max_blk_count * 512;
 229
 230                if (bouncesz > 512) {
 231                        mqrq_cur->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
 232                        if (!mqrq_cur->bounce_buf) {
 233                                pr_warn("%s: unable to allocate bounce cur buffer\n",
 234                                        mmc_card_name(card));
 235                        } else {
 236                                mqrq_prev->bounce_buf =
 237                                                kmalloc(bouncesz, GFP_KERNEL);
 238                                if (!mqrq_prev->bounce_buf) {
 239                                        pr_warn("%s: unable to allocate bounce prev buffer\n",
 240                                                mmc_card_name(card));
 241                                        kfree(mqrq_cur->bounce_buf);
 242                                        mqrq_cur->bounce_buf = NULL;
 243                                }
 244                        }
 245                }
 246
 247                if (mqrq_cur->bounce_buf && mqrq_prev->bounce_buf) {
 248                        blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
 249                        blk_queue_max_hw_sectors(mq->queue, bouncesz / 512);
 250                        blk_queue_max_segments(mq->queue, bouncesz / 512);
 251                        blk_queue_max_segment_size(mq->queue, bouncesz);
 252
 253                        mqrq_cur->sg = mmc_alloc_sg(1, &ret);
 254                        if (ret)
 255                                goto cleanup_queue;
 256
 257                        mqrq_cur->bounce_sg =
 258                                mmc_alloc_sg(bouncesz / 512, &ret);
 259                        if (ret)
 260                                goto cleanup_queue;
 261
 262                        mqrq_prev->sg = mmc_alloc_sg(1, &ret);
 263                        if (ret)
 264                                goto cleanup_queue;
 265
 266                        mqrq_prev->bounce_sg =
 267                                mmc_alloc_sg(bouncesz / 512, &ret);
 268                        if (ret)
 269                                goto cleanup_queue;
 270                }
 271        }
 272#endif
 273
 274        if (!mqrq_cur->bounce_buf && !mqrq_prev->bounce_buf) {
 275                blk_queue_bounce_limit(mq->queue, limit);
 276                blk_queue_max_hw_sectors(mq->queue,
 277                        min(host->max_blk_count, host->max_req_size / 512));
 278                blk_queue_max_segments(mq->queue, host->max_segs);
 279                blk_queue_max_segment_size(mq->queue, host->max_seg_size);
 280
 281                mqrq_cur->sg = mmc_alloc_sg(host->max_segs, &ret);
 282                if (ret)
 283                        goto cleanup_queue;
 284
 285
 286                mqrq_prev->sg = mmc_alloc_sg(host->max_segs, &ret);
 287                if (ret)
 288                        goto cleanup_queue;
 289        }
 290
 291        sema_init(&mq->thread_sem, 1);
 292
 293        mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd/%d%s",
 294                host->index, subname ? subname : "");
 295
 296        if (IS_ERR(mq->thread)) {
 297                ret = PTR_ERR(mq->thread);
 298                goto free_bounce_sg;
 299        }
 300
 301        return 0;
 302 free_bounce_sg:
 303        kfree(mqrq_cur->bounce_sg);
 304        mqrq_cur->bounce_sg = NULL;
 305        kfree(mqrq_prev->bounce_sg);
 306        mqrq_prev->bounce_sg = NULL;
 307
 308 cleanup_queue:
 309        kfree(mqrq_cur->sg);
 310        mqrq_cur->sg = NULL;
 311        kfree(mqrq_cur->bounce_buf);
 312        mqrq_cur->bounce_buf = NULL;
 313
 314        kfree(mqrq_prev->sg);
 315        mqrq_prev->sg = NULL;
 316        kfree(mqrq_prev->bounce_buf);
 317        mqrq_prev->bounce_buf = NULL;
 318
 319        blk_cleanup_queue(mq->queue);
 320        return ret;
 321}
 322
 323void mmc_cleanup_queue(struct mmc_queue *mq)
 324{
 325        struct request_queue *q = mq->queue;
 326        unsigned long flags;
 327        struct mmc_queue_req *mqrq_cur = mq->mqrq_cur;
 328        struct mmc_queue_req *mqrq_prev = mq->mqrq_prev;
 329
 330        /* Make sure the queue isn't suspended, as that will deadlock */
 331        mmc_queue_resume(mq);
 332
 333        /* Then terminate our worker thread */
 334        kthread_stop(mq->thread);
 335
 336        /* Empty the queue */
 337        spin_lock_irqsave(q->queue_lock, flags);
 338        q->queuedata = NULL;
 339        blk_start_queue(q);
 340        spin_unlock_irqrestore(q->queue_lock, flags);
 341
 342        kfree(mqrq_cur->bounce_sg);
 343        mqrq_cur->bounce_sg = NULL;
 344
 345        kfree(mqrq_cur->sg);
 346        mqrq_cur->sg = NULL;
 347
 348        kfree(mqrq_cur->bounce_buf);
 349        mqrq_cur->bounce_buf = NULL;
 350
 351        kfree(mqrq_prev->bounce_sg);
 352        mqrq_prev->bounce_sg = NULL;
 353
 354        kfree(mqrq_prev->sg);
 355        mqrq_prev->sg = NULL;
 356
 357        kfree(mqrq_prev->bounce_buf);
 358        mqrq_prev->bounce_buf = NULL;
 359
 360        mq->card = NULL;
 361}
 362EXPORT_SYMBOL(mmc_cleanup_queue);
 363
 364int mmc_packed_init(struct mmc_queue *mq, struct mmc_card *card)
 365{
 366        struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
 367        struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
 368        int ret = 0;
 369
 370
 371        mqrq_cur->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
 372        if (!mqrq_cur->packed) {
 373                pr_warn("%s: unable to allocate packed cmd for mqrq_cur\n",
 374                        mmc_card_name(card));
 375                ret = -ENOMEM;
 376                goto out;
 377        }
 378
 379        mqrq_prev->packed = kzalloc(sizeof(struct mmc_packed), GFP_KERNEL);
 380        if (!mqrq_prev->packed) {
 381                pr_warn("%s: unable to allocate packed cmd for mqrq_prev\n",
 382                        mmc_card_name(card));
 383                kfree(mqrq_cur->packed);
 384                mqrq_cur->packed = NULL;
 385                ret = -ENOMEM;
 386                goto out;
 387        }
 388
 389        INIT_LIST_HEAD(&mqrq_cur->packed->list);
 390        INIT_LIST_HEAD(&mqrq_prev->packed->list);
 391
 392out:
 393        return ret;
 394}
 395
 396void mmc_packed_clean(struct mmc_queue *mq)
 397{
 398        struct mmc_queue_req *mqrq_cur = &mq->mqrq[0];
 399        struct mmc_queue_req *mqrq_prev = &mq->mqrq[1];
 400
 401        kfree(mqrq_cur->packed);
 402        mqrq_cur->packed = NULL;
 403        kfree(mqrq_prev->packed);
 404        mqrq_prev->packed = NULL;
 405}
 406
 407/**
 408 * mmc_queue_suspend - suspend a MMC request queue
 409 * @mq: MMC queue to suspend
 410 *
 411 * Stop the block request queue, and wait for our thread to
 412 * complete any outstanding requests.  This ensures that we
 413 * won't suspend while a request is being processed.
 414 */
 415void mmc_queue_suspend(struct mmc_queue *mq)
 416{
 417        struct request_queue *q = mq->queue;
 418        unsigned long flags;
 419
 420        if (!(mq->flags & MMC_QUEUE_SUSPENDED)) {
 421                mq->flags |= MMC_QUEUE_SUSPENDED;
 422
 423                spin_lock_irqsave(q->queue_lock, flags);
 424                blk_stop_queue(q);
 425                spin_unlock_irqrestore(q->queue_lock, flags);
 426
 427                down(&mq->thread_sem);
 428        }
 429}
 430
 431/**
 432 * mmc_queue_resume - resume a previously suspended MMC request queue
 433 * @mq: MMC queue to resume
 434 */
 435void mmc_queue_resume(struct mmc_queue *mq)
 436{
 437        struct request_queue *q = mq->queue;
 438        unsigned long flags;
 439
 440        if (mq->flags & MMC_QUEUE_SUSPENDED) {
 441                mq->flags &= ~MMC_QUEUE_SUSPENDED;
 442
 443                up(&mq->thread_sem);
 444
 445                spin_lock_irqsave(q->queue_lock, flags);
 446                blk_start_queue(q);
 447                spin_unlock_irqrestore(q->queue_lock, flags);
 448        }
 449}
 450
 451static unsigned int mmc_queue_packed_map_sg(struct mmc_queue *mq,
 452                                            struct mmc_packed *packed,
 453                                            struct scatterlist *sg,
 454                                            enum mmc_packed_type cmd_type)
 455{
 456        struct scatterlist *__sg = sg;
 457        unsigned int sg_len = 0;
 458        struct request *req;
 459
 460        if (mmc_packed_wr(cmd_type)) {
 461                unsigned int hdr_sz = mmc_large_sector(mq->card) ? 4096 : 512;
 462                unsigned int max_seg_sz = queue_max_segment_size(mq->queue);
 463                unsigned int len, remain, offset = 0;
 464                u8 *buf = (u8 *)packed->cmd_hdr;
 465
 466                remain = hdr_sz;
 467                do {
 468                        len = min(remain, max_seg_sz);
 469                        sg_set_buf(__sg, buf + offset, len);
 470                        offset += len;
 471                        remain -= len;
 472                        (__sg++)->page_link &= ~0x02;
 473                        sg_len++;
 474                } while (remain);
 475        }
 476
 477        list_for_each_entry(req, &packed->list, queuelist) {
 478                sg_len += blk_rq_map_sg(mq->queue, req, __sg);
 479                __sg = sg + (sg_len - 1);
 480                (__sg++)->page_link &= ~0x02;
 481        }
 482        sg_mark_end(sg + (sg_len - 1));
 483        return sg_len;
 484}
 485
 486/*
 487 * Prepare the sg list(s) to be handed of to the host driver
 488 */
 489unsigned int mmc_queue_map_sg(struct mmc_queue *mq, struct mmc_queue_req *mqrq)
 490{
 491        unsigned int sg_len;
 492        size_t buflen;
 493        struct scatterlist *sg;
 494        enum mmc_packed_type cmd_type;
 495        int i;
 496
 497        cmd_type = mqrq->cmd_type;
 498
 499        if (!mqrq->bounce_buf) {
 500                if (mmc_packed_cmd(cmd_type))
 501                        return mmc_queue_packed_map_sg(mq, mqrq->packed,
 502                                                       mqrq->sg, cmd_type);
 503                else
 504                        return blk_rq_map_sg(mq->queue, mqrq->req, mqrq->sg);
 505        }
 506
 507        BUG_ON(!mqrq->bounce_sg);
 508
 509        if (mmc_packed_cmd(cmd_type))
 510                sg_len = mmc_queue_packed_map_sg(mq, mqrq->packed,
 511                                                 mqrq->bounce_sg, cmd_type);
 512        else
 513                sg_len = blk_rq_map_sg(mq->queue, mqrq->req, mqrq->bounce_sg);
 514
 515        mqrq->bounce_sg_len = sg_len;
 516
 517        buflen = 0;
 518        for_each_sg(mqrq->bounce_sg, sg, sg_len, i)
 519                buflen += sg->length;
 520
 521        sg_init_one(mqrq->sg, mqrq->bounce_buf, buflen);
 522
 523        return 1;
 524}
 525
 526/*
 527 * If writing, bounce the data to the buffer before the request
 528 * is sent to the host driver
 529 */
 530void mmc_queue_bounce_pre(struct mmc_queue_req *mqrq)
 531{
 532        if (!mqrq->bounce_buf)
 533                return;
 534
 535        if (rq_data_dir(mqrq->req) != WRITE)
 536                return;
 537
 538        sg_copy_to_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
 539                mqrq->bounce_buf, mqrq->sg[0].length);
 540}
 541
 542/*
 543 * If reading, bounce the data from the buffer after the request
 544 * has been handled by the host driver
 545 */
 546void mmc_queue_bounce_post(struct mmc_queue_req *mqrq)
 547{
 548        if (!mqrq->bounce_buf)
 549                return;
 550
 551        if (rq_data_dir(mqrq->req) != READ)
 552                return;
 553
 554        sg_copy_from_buffer(mqrq->bounce_sg, mqrq->bounce_sg_len,
 555                mqrq->bounce_buf, mqrq->sg[0].length);
 556}
 557