linux/drivers/mmc/core/mmc_test.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-or-later
   2/*
   3 *  Copyright 2007-2008 Pierre Ossman
   4 */
   5
   6#include <linux/mmc/core.h>
   7#include <linux/mmc/card.h>
   8#include <linux/mmc/host.h>
   9#include <linux/mmc/mmc.h>
  10#include <linux/slab.h>
  11
  12#include <linux/scatterlist.h>
  13#include <linux/swap.h>         /* For nr_free_buffer_pages() */
  14#include <linux/list.h>
  15
  16#include <linux/debugfs.h>
  17#include <linux/uaccess.h>
  18#include <linux/seq_file.h>
  19#include <linux/module.h>
  20
  21#include "core.h"
  22#include "card.h"
  23#include "host.h"
  24#include "bus.h"
  25#include "mmc_ops.h"
  26
  27#define RESULT_OK               0
  28#define RESULT_FAIL             1
  29#define RESULT_UNSUP_HOST       2
  30#define RESULT_UNSUP_CARD       3
  31
  32#define BUFFER_ORDER            2
  33#define BUFFER_SIZE             (PAGE_SIZE << BUFFER_ORDER)
  34
  35#define TEST_ALIGN_END          8
  36
  37/*
  38 * Limit the test area size to the maximum MMC HC erase group size.  Note that
  39 * the maximum SD allocation unit size is just 4MiB.
  40 */
  41#define TEST_AREA_MAX_SIZE (128 * 1024 * 1024)
  42
  43/**
  44 * struct mmc_test_pages - pages allocated by 'alloc_pages()'.
  45 * @page: first page in the allocation
  46 * @order: order of the number of pages allocated
  47 */
  48struct mmc_test_pages {
  49        struct page *page;
  50        unsigned int order;
  51};
  52
  53/**
  54 * struct mmc_test_mem - allocated memory.
  55 * @arr: array of allocations
  56 * @cnt: number of allocations
  57 */
  58struct mmc_test_mem {
  59        struct mmc_test_pages *arr;
  60        unsigned int cnt;
  61};
  62
  63/**
  64 * struct mmc_test_area - information for performance tests.
  65 * @max_sz: test area size (in bytes)
  66 * @dev_addr: address on card at which to do performance tests
  67 * @max_tfr: maximum transfer size allowed by driver (in bytes)
  68 * @max_segs: maximum segments allowed by driver in scatterlist @sg
  69 * @max_seg_sz: maximum segment size allowed by driver
  70 * @blocks: number of (512 byte) blocks currently mapped by @sg
  71 * @sg_len: length of currently mapped scatterlist @sg
  72 * @mem: allocated memory
  73 * @sg: scatterlist
  74 * @sg_areq: scatterlist for non-blocking request
  75 */
  76struct mmc_test_area {
  77        unsigned long max_sz;
  78        unsigned int dev_addr;
  79        unsigned int max_tfr;
  80        unsigned int max_segs;
  81        unsigned int max_seg_sz;
  82        unsigned int blocks;
  83        unsigned int sg_len;
  84        struct mmc_test_mem *mem;
  85        struct scatterlist *sg;
  86        struct scatterlist *sg_areq;
  87};
  88
  89/**
  90 * struct mmc_test_transfer_result - transfer results for performance tests.
  91 * @link: double-linked list
  92 * @count: amount of group of sectors to check
  93 * @sectors: amount of sectors to check in one group
  94 * @ts: time values of transfer
  95 * @rate: calculated transfer rate
  96 * @iops: I/O operations per second (times 100)
  97 */
  98struct mmc_test_transfer_result {
  99        struct list_head link;
 100        unsigned int count;
 101        unsigned int sectors;
 102        struct timespec64 ts;
 103        unsigned int rate;
 104        unsigned int iops;
 105};
 106
 107/**
 108 * struct mmc_test_general_result - results for tests.
 109 * @link: double-linked list
 110 * @card: card under test
 111 * @testcase: number of test case
 112 * @result: result of test run
 113 * @tr_lst: transfer measurements if any as mmc_test_transfer_result
 114 */
 115struct mmc_test_general_result {
 116        struct list_head link;
 117        struct mmc_card *card;
 118        int testcase;
 119        int result;
 120        struct list_head tr_lst;
 121};
 122
 123/**
 124 * struct mmc_test_dbgfs_file - debugfs related file.
 125 * @link: double-linked list
 126 * @card: card under test
 127 * @file: file created under debugfs
 128 */
 129struct mmc_test_dbgfs_file {
 130        struct list_head link;
 131        struct mmc_card *card;
 132        struct dentry *file;
 133};
 134
 135/**
 136 * struct mmc_test_card - test information.
 137 * @card: card under test
 138 * @scratch: transfer buffer
 139 * @buffer: transfer buffer
 140 * @highmem: buffer for highmem tests
 141 * @area: information for performance tests
 142 * @gr: pointer to results of current testcase
 143 */
 144struct mmc_test_card {
 145        struct mmc_card *card;
 146
 147        u8              scratch[BUFFER_SIZE];
 148        u8              *buffer;
 149#ifdef CONFIG_HIGHMEM
 150        struct page     *highmem;
 151#endif
 152        struct mmc_test_area            area;
 153        struct mmc_test_general_result  *gr;
 154};
 155
 156enum mmc_test_prep_media {
 157        MMC_TEST_PREP_NONE = 0,
 158        MMC_TEST_PREP_WRITE_FULL = 1 << 0,
 159        MMC_TEST_PREP_ERASE = 1 << 1,
 160};
 161
 162struct mmc_test_multiple_rw {
 163        unsigned int *sg_len;
 164        unsigned int *bs;
 165        unsigned int len;
 166        unsigned int size;
 167        bool do_write;
 168        bool do_nonblock_req;
 169        enum mmc_test_prep_media prepare;
 170};
 171
 172/*******************************************************************/
 173/*  General helper functions                                       */
 174/*******************************************************************/
 175
 176/*
 177 * Configure correct block size in card
 178 */
 179static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size)
 180{
 181        return mmc_set_blocklen(test->card, size);
 182}
 183
 184static bool mmc_test_card_cmd23(struct mmc_card *card)
 185{
 186        return mmc_card_mmc(card) ||
 187               (mmc_card_sd(card) && card->scr.cmds & SD_SCR_CMD23_SUPPORT);
 188}
 189
 190static void mmc_test_prepare_sbc(struct mmc_test_card *test,
 191                                 struct mmc_request *mrq, unsigned int blocks)
 192{
 193        struct mmc_card *card = test->card;
 194
 195        if (!mrq->sbc || !mmc_host_cmd23(card->host) ||
 196            !mmc_test_card_cmd23(card) || !mmc_op_multi(mrq->cmd->opcode) ||
 197            (card->quirks & MMC_QUIRK_BLK_NO_CMD23)) {
 198                mrq->sbc = NULL;
 199                return;
 200        }
 201
 202        mrq->sbc->opcode = MMC_SET_BLOCK_COUNT;
 203        mrq->sbc->arg = blocks;
 204        mrq->sbc->flags = MMC_RSP_R1 | MMC_CMD_AC;
 205}
 206
 207/*
 208 * Fill in the mmc_request structure given a set of transfer parameters.
 209 */
 210static void mmc_test_prepare_mrq(struct mmc_test_card *test,
 211        struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len,
 212        unsigned dev_addr, unsigned blocks, unsigned blksz, int write)
 213{
 214        if (WARN_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop))
 215                return;
 216
 217        if (blocks > 1) {
 218                mrq->cmd->opcode = write ?
 219                        MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK;
 220        } else {
 221                mrq->cmd->opcode = write ?
 222                        MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
 223        }
 224
 225        mrq->cmd->arg = dev_addr;
 226        if (!mmc_card_blockaddr(test->card))
 227                mrq->cmd->arg <<= 9;
 228
 229        mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC;
 230
 231        if (blocks == 1)
 232                mrq->stop = NULL;
 233        else {
 234                mrq->stop->opcode = MMC_STOP_TRANSMISSION;
 235                mrq->stop->arg = 0;
 236                mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC;
 237        }
 238
 239        mrq->data->blksz = blksz;
 240        mrq->data->blocks = blocks;
 241        mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;
 242        mrq->data->sg = sg;
 243        mrq->data->sg_len = sg_len;
 244
 245        mmc_test_prepare_sbc(test, mrq, blocks);
 246
 247        mmc_set_data_timeout(mrq->data, test->card);
 248}
 249
 250static int mmc_test_busy(struct mmc_command *cmd)
 251{
 252        return !(cmd->resp[0] & R1_READY_FOR_DATA) ||
 253                (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG);
 254}
 255
 256/*
 257 * Wait for the card to finish the busy state
 258 */
 259static int mmc_test_wait_busy(struct mmc_test_card *test)
 260{
 261        int ret, busy;
 262        struct mmc_command cmd = {};
 263
 264        busy = 0;
 265        do {
 266                memset(&cmd, 0, sizeof(struct mmc_command));
 267
 268                cmd.opcode = MMC_SEND_STATUS;
 269                cmd.arg = test->card->rca << 16;
 270                cmd.flags = MMC_RSP_R1 | MMC_CMD_AC;
 271
 272                ret = mmc_wait_for_cmd(test->card->host, &cmd, 0);
 273                if (ret)
 274                        break;
 275
 276                if (!busy && mmc_test_busy(&cmd)) {
 277                        busy = 1;
 278                        if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY)
 279                                pr_info("%s: Warning: Host did not wait for busy state to end.\n",
 280                                        mmc_hostname(test->card->host));
 281                }
 282        } while (mmc_test_busy(&cmd));
 283
 284        return ret;
 285}
 286
 287/*
 288 * Transfer a single sector of kernel addressable data
 289 */
 290static int mmc_test_buffer_transfer(struct mmc_test_card *test,
 291        u8 *buffer, unsigned addr, unsigned blksz, int write)
 292{
 293        struct mmc_request mrq = {};
 294        struct mmc_command cmd = {};
 295        struct mmc_command stop = {};
 296        struct mmc_data data = {};
 297
 298        struct scatterlist sg;
 299
 300        mrq.cmd = &cmd;
 301        mrq.data = &data;
 302        mrq.stop = &stop;
 303
 304        sg_init_one(&sg, buffer, blksz);
 305
 306        mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write);
 307
 308        mmc_wait_for_req(test->card->host, &mrq);
 309
 310        if (cmd.error)
 311                return cmd.error;
 312        if (data.error)
 313                return data.error;
 314
 315        return mmc_test_wait_busy(test);
 316}
 317
 318static void mmc_test_free_mem(struct mmc_test_mem *mem)
 319{
 320        if (!mem)
 321                return;
 322        while (mem->cnt--)
 323                __free_pages(mem->arr[mem->cnt].page,
 324                             mem->arr[mem->cnt].order);
 325        kfree(mem->arr);
 326        kfree(mem);
 327}
 328
 329/*
 330 * Allocate a lot of memory, preferably max_sz but at least min_sz.  In case
 331 * there isn't much memory do not exceed 1/16th total lowmem pages.  Also do
 332 * not exceed a maximum number of segments and try not to make segments much
 333 * bigger than maximum segment size.
 334 */
 335static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz,
 336                                               unsigned long max_sz,
 337                                               unsigned int max_segs,
 338                                               unsigned int max_seg_sz)
 339{
 340        unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE);
 341        unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE);
 342        unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE);
 343        unsigned long page_cnt = 0;
 344        unsigned long limit = nr_free_buffer_pages() >> 4;
 345        struct mmc_test_mem *mem;
 346
 347        if (max_page_cnt > limit)
 348                max_page_cnt = limit;
 349        if (min_page_cnt > max_page_cnt)
 350                min_page_cnt = max_page_cnt;
 351
 352        if (max_seg_page_cnt > max_page_cnt)
 353                max_seg_page_cnt = max_page_cnt;
 354
 355        if (max_segs > max_page_cnt)
 356                max_segs = max_page_cnt;
 357
 358        mem = kzalloc(sizeof(*mem), GFP_KERNEL);
 359        if (!mem)
 360                return NULL;
 361
 362        mem->arr = kcalloc(max_segs, sizeof(*mem->arr), GFP_KERNEL);
 363        if (!mem->arr)
 364                goto out_free;
 365
 366        while (max_page_cnt) {
 367                struct page *page;
 368                unsigned int order;
 369                gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN |
 370                                __GFP_NORETRY;
 371
 372                order = get_order(max_seg_page_cnt << PAGE_SHIFT);
 373                while (1) {
 374                        page = alloc_pages(flags, order);
 375                        if (page || !order)
 376                                break;
 377                        order -= 1;
 378                }
 379                if (!page) {
 380                        if (page_cnt < min_page_cnt)
 381                                goto out_free;
 382                        break;
 383                }
 384                mem->arr[mem->cnt].page = page;
 385                mem->arr[mem->cnt].order = order;
 386                mem->cnt += 1;
 387                if (max_page_cnt <= (1UL << order))
 388                        break;
 389                max_page_cnt -= 1UL << order;
 390                page_cnt += 1UL << order;
 391                if (mem->cnt >= max_segs) {
 392                        if (page_cnt < min_page_cnt)
 393                                goto out_free;
 394                        break;
 395                }
 396        }
 397
 398        return mem;
 399
 400out_free:
 401        mmc_test_free_mem(mem);
 402        return NULL;
 403}
 404
 405/*
 406 * Map memory into a scatterlist.  Optionally allow the same memory to be
 407 * mapped more than once.
 408 */
 409static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size,
 410                           struct scatterlist *sglist, int repeat,
 411                           unsigned int max_segs, unsigned int max_seg_sz,
 412                           unsigned int *sg_len, int min_sg_len)
 413{
 414        struct scatterlist *sg = NULL;
 415        unsigned int i;
 416        unsigned long sz = size;
 417
 418        sg_init_table(sglist, max_segs);
 419        if (min_sg_len > max_segs)
 420                min_sg_len = max_segs;
 421
 422        *sg_len = 0;
 423        do {
 424                for (i = 0; i < mem->cnt; i++) {
 425                        unsigned long len = PAGE_SIZE << mem->arr[i].order;
 426
 427                        if (min_sg_len && (size / min_sg_len < len))
 428                                len = ALIGN(size / min_sg_len, 512);
 429                        if (len > sz)
 430                                len = sz;
 431                        if (len > max_seg_sz)
 432                                len = max_seg_sz;
 433                        if (sg)
 434                                sg = sg_next(sg);
 435                        else
 436                                sg = sglist;
 437                        if (!sg)
 438                                return -EINVAL;
 439                        sg_set_page(sg, mem->arr[i].page, len, 0);
 440                        sz -= len;
 441                        *sg_len += 1;
 442                        if (!sz)
 443                                break;
 444                }
 445        } while (sz && repeat);
 446
 447        if (sz)
 448                return -EINVAL;
 449
 450        if (sg)
 451                sg_mark_end(sg);
 452
 453        return 0;
 454}
 455
 456/*
 457 * Map memory into a scatterlist so that no pages are contiguous.  Allow the
 458 * same memory to be mapped more than once.
 459 */
 460static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem,
 461                                       unsigned long sz,
 462                                       struct scatterlist *sglist,
 463                                       unsigned int max_segs,
 464                                       unsigned int max_seg_sz,
 465                                       unsigned int *sg_len)
 466{
 467        struct scatterlist *sg = NULL;
 468        unsigned int i = mem->cnt, cnt;
 469        unsigned long len;
 470        void *base, *addr, *last_addr = NULL;
 471
 472        sg_init_table(sglist, max_segs);
 473
 474        *sg_len = 0;
 475        while (sz) {
 476                base = page_address(mem->arr[--i].page);
 477                cnt = 1 << mem->arr[i].order;
 478                while (sz && cnt) {
 479                        addr = base + PAGE_SIZE * --cnt;
 480                        if (last_addr && last_addr + PAGE_SIZE == addr)
 481                                continue;
 482                        last_addr = addr;
 483                        len = PAGE_SIZE;
 484                        if (len > max_seg_sz)
 485                                len = max_seg_sz;
 486                        if (len > sz)
 487                                len = sz;
 488                        if (sg)
 489                                sg = sg_next(sg);
 490                        else
 491                                sg = sglist;
 492                        if (!sg)
 493                                return -EINVAL;
 494                        sg_set_page(sg, virt_to_page(addr), len, 0);
 495                        sz -= len;
 496                        *sg_len += 1;
 497                }
 498                if (i == 0)
 499                        i = mem->cnt;
 500        }
 501
 502        if (sg)
 503                sg_mark_end(sg);
 504
 505        return 0;
 506}
 507
 508/*
 509 * Calculate transfer rate in bytes per second.
 510 */
 511static unsigned int mmc_test_rate(uint64_t bytes, struct timespec64 *ts)
 512{
 513        uint64_t ns;
 514
 515        ns = timespec64_to_ns(ts);
 516        bytes *= 1000000000;
 517
 518        while (ns > UINT_MAX) {
 519                bytes >>= 1;
 520                ns >>= 1;
 521        }
 522
 523        if (!ns)
 524                return 0;
 525
 526        do_div(bytes, (uint32_t)ns);
 527
 528        return bytes;
 529}
 530
 531/*
 532 * Save transfer results for future usage
 533 */
 534static void mmc_test_save_transfer_result(struct mmc_test_card *test,
 535        unsigned int count, unsigned int sectors, struct timespec64 ts,
 536        unsigned int rate, unsigned int iops)
 537{
 538        struct mmc_test_transfer_result *tr;
 539
 540        if (!test->gr)
 541                return;
 542
 543        tr = kmalloc(sizeof(*tr), GFP_KERNEL);
 544        if (!tr)
 545                return;
 546
 547        tr->count = count;
 548        tr->sectors = sectors;
 549        tr->ts = ts;
 550        tr->rate = rate;
 551        tr->iops = iops;
 552
 553        list_add_tail(&tr->link, &test->gr->tr_lst);
 554}
 555
 556/*
 557 * Print the transfer rate.
 558 */
 559static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes,
 560                                struct timespec64 *ts1, struct timespec64 *ts2)
 561{
 562        unsigned int rate, iops, sectors = bytes >> 9;
 563        struct timespec64 ts;
 564
 565        ts = timespec64_sub(*ts2, *ts1);
 566
 567        rate = mmc_test_rate(bytes, &ts);
 568        iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */
 569
 570        pr_info("%s: Transfer of %u sectors (%u%s KiB) took %llu.%09u "
 571                         "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n",
 572                         mmc_hostname(test->card->host), sectors, sectors >> 1,
 573                         (sectors & 1 ? ".5" : ""), (u64)ts.tv_sec,
 574                         (u32)ts.tv_nsec, rate / 1000, rate / 1024,
 575                         iops / 100, iops % 100);
 576
 577        mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops);
 578}
 579
 580/*
 581 * Print the average transfer rate.
 582 */
 583static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes,
 584                                    unsigned int count, struct timespec64 *ts1,
 585                                    struct timespec64 *ts2)
 586{
 587        unsigned int rate, iops, sectors = bytes >> 9;
 588        uint64_t tot = bytes * count;
 589        struct timespec64 ts;
 590
 591        ts = timespec64_sub(*ts2, *ts1);
 592
 593        rate = mmc_test_rate(tot, &ts);
 594        iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */
 595
 596        pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took "
 597                         "%llu.%09u seconds (%u kB/s, %u KiB/s, "
 598                         "%u.%02u IOPS, sg_len %d)\n",
 599                         mmc_hostname(test->card->host), count, sectors, count,
 600                         sectors >> 1, (sectors & 1 ? ".5" : ""),
 601                         (u64)ts.tv_sec, (u32)ts.tv_nsec,
 602                         rate / 1000, rate / 1024, iops / 100, iops % 100,
 603                         test->area.sg_len);
 604
 605        mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops);
 606}
 607
 608/*
 609 * Return the card size in sectors.
 610 */
 611static unsigned int mmc_test_capacity(struct mmc_card *card)
 612{
 613        if (!mmc_card_sd(card) && mmc_card_blockaddr(card))
 614                return card->ext_csd.sectors;
 615        else
 616                return card->csd.capacity << (card->csd.read_blkbits - 9);
 617}
 618
 619/*******************************************************************/
 620/*  Test preparation and cleanup                                   */
 621/*******************************************************************/
 622
 623/*
 624 * Fill the first couple of sectors of the card with known data
 625 * so that bad reads/writes can be detected
 626 */
 627static int __mmc_test_prepare(struct mmc_test_card *test, int write, int val)
 628{
 629        int ret, i;
 630
 631        ret = mmc_test_set_blksize(test, 512);
 632        if (ret)
 633                return ret;
 634
 635        if (write)
 636                memset(test->buffer, val, 512);
 637        else {
 638                for (i = 0; i < 512; i++)
 639                        test->buffer[i] = i;
 640        }
 641
 642        for (i = 0; i < BUFFER_SIZE / 512; i++) {
 643                ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1);
 644                if (ret)
 645                        return ret;
 646        }
 647
 648        return 0;
 649}
 650
 651static int mmc_test_prepare_write(struct mmc_test_card *test)
 652{
 653        return __mmc_test_prepare(test, 1, 0xDF);
 654}
 655
 656static int mmc_test_prepare_read(struct mmc_test_card *test)
 657{
 658        return __mmc_test_prepare(test, 0, 0);
 659}
 660
 661static int mmc_test_cleanup(struct mmc_test_card *test)
 662{
 663        return __mmc_test_prepare(test, 1, 0);
 664}
 665
 666/*******************************************************************/
 667/*  Test execution helpers                                         */
 668/*******************************************************************/
 669
 670/*
 671 * Modifies the mmc_request to perform the "short transfer" tests
 672 */
 673static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test,
 674        struct mmc_request *mrq, int write)
 675{
 676        if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
 677                return;
 678
 679        if (mrq->data->blocks > 1) {
 680                mrq->cmd->opcode = write ?
 681                        MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK;
 682                mrq->stop = NULL;
 683        } else {
 684                mrq->cmd->opcode = MMC_SEND_STATUS;
 685                mrq->cmd->arg = test->card->rca << 16;
 686        }
 687}
 688
 689/*
 690 * Checks that a normal transfer didn't have any errors
 691 */
 692static int mmc_test_check_result(struct mmc_test_card *test,
 693                                 struct mmc_request *mrq)
 694{
 695        int ret;
 696
 697        if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
 698                return -EINVAL;
 699
 700        ret = 0;
 701
 702        if (mrq->sbc && mrq->sbc->error)
 703                ret = mrq->sbc->error;
 704        if (!ret && mrq->cmd->error)
 705                ret = mrq->cmd->error;
 706        if (!ret && mrq->data->error)
 707                ret = mrq->data->error;
 708        if (!ret && mrq->stop && mrq->stop->error)
 709                ret = mrq->stop->error;
 710        if (!ret && mrq->data->bytes_xfered !=
 711                mrq->data->blocks * mrq->data->blksz)
 712                ret = RESULT_FAIL;
 713
 714        if (ret == -EINVAL)
 715                ret = RESULT_UNSUP_HOST;
 716
 717        return ret;
 718}
 719
 720/*
 721 * Checks that a "short transfer" behaved as expected
 722 */
 723static int mmc_test_check_broken_result(struct mmc_test_card *test,
 724        struct mmc_request *mrq)
 725{
 726        int ret;
 727
 728        if (WARN_ON(!mrq || !mrq->cmd || !mrq->data))
 729                return -EINVAL;
 730
 731        ret = 0;
 732
 733        if (!ret && mrq->cmd->error)
 734                ret = mrq->cmd->error;
 735        if (!ret && mrq->data->error == 0)
 736                ret = RESULT_FAIL;
 737        if (!ret && mrq->data->error != -ETIMEDOUT)
 738                ret = mrq->data->error;
 739        if (!ret && mrq->stop && mrq->stop->error)
 740                ret = mrq->stop->error;
 741        if (mrq->data->blocks > 1) {
 742                if (!ret && mrq->data->bytes_xfered > mrq->data->blksz)
 743                        ret = RESULT_FAIL;
 744        } else {
 745                if (!ret && mrq->data->bytes_xfered > 0)
 746                        ret = RESULT_FAIL;
 747        }
 748
 749        if (ret == -EINVAL)
 750                ret = RESULT_UNSUP_HOST;
 751
 752        return ret;
 753}
 754
 755struct mmc_test_req {
 756        struct mmc_request mrq;
 757        struct mmc_command sbc;
 758        struct mmc_command cmd;
 759        struct mmc_command stop;
 760        struct mmc_command status;
 761        struct mmc_data data;
 762};
 763
 764/*
 765 * Tests nonblock transfer with certain parameters
 766 */
 767static void mmc_test_req_reset(struct mmc_test_req *rq)
 768{
 769        memset(rq, 0, sizeof(struct mmc_test_req));
 770
 771        rq->mrq.cmd = &rq->cmd;
 772        rq->mrq.data = &rq->data;
 773        rq->mrq.stop = &rq->stop;
 774}
 775
 776static struct mmc_test_req *mmc_test_req_alloc(void)
 777{
 778        struct mmc_test_req *rq = kmalloc(sizeof(*rq), GFP_KERNEL);
 779
 780        if (rq)
 781                mmc_test_req_reset(rq);
 782
 783        return rq;
 784}
 785
 786static void mmc_test_wait_done(struct mmc_request *mrq)
 787{
 788        complete(&mrq->completion);
 789}
 790
 791static int mmc_test_start_areq(struct mmc_test_card *test,
 792                               struct mmc_request *mrq,
 793                               struct mmc_request *prev_mrq)
 794{
 795        struct mmc_host *host = test->card->host;
 796        int err = 0;
 797
 798        if (mrq) {
 799                init_completion(&mrq->completion);
 800                mrq->done = mmc_test_wait_done;
 801                mmc_pre_req(host, mrq);
 802        }
 803
 804        if (prev_mrq) {
 805                wait_for_completion(&prev_mrq->completion);
 806                err = mmc_test_wait_busy(test);
 807                if (!err)
 808                        err = mmc_test_check_result(test, prev_mrq);
 809        }
 810
 811        if (!err && mrq) {
 812                err = mmc_start_request(host, mrq);
 813                if (err)
 814                        mmc_retune_release(host);
 815        }
 816
 817        if (prev_mrq)
 818                mmc_post_req(host, prev_mrq, 0);
 819
 820        if (err && mrq)
 821                mmc_post_req(host, mrq, err);
 822
 823        return err;
 824}
 825
 826static int mmc_test_nonblock_transfer(struct mmc_test_card *test,
 827                                      unsigned int dev_addr, int write,
 828                                      int count)
 829{
 830        struct mmc_test_req *rq1, *rq2;
 831        struct mmc_request *mrq, *prev_mrq;
 832        int i;
 833        int ret = RESULT_OK;
 834        struct mmc_test_area *t = &test->area;
 835        struct scatterlist *sg = t->sg;
 836        struct scatterlist *sg_areq = t->sg_areq;
 837
 838        rq1 = mmc_test_req_alloc();
 839        rq2 = mmc_test_req_alloc();
 840        if (!rq1 || !rq2) {
 841                ret = RESULT_FAIL;
 842                goto err;
 843        }
 844
 845        mrq = &rq1->mrq;
 846        prev_mrq = NULL;
 847
 848        for (i = 0; i < count; i++) {
 849                mmc_test_req_reset(container_of(mrq, struct mmc_test_req, mrq));
 850                mmc_test_prepare_mrq(test, mrq, sg, t->sg_len, dev_addr,
 851                                     t->blocks, 512, write);
 852                ret = mmc_test_start_areq(test, mrq, prev_mrq);
 853                if (ret)
 854                        goto err;
 855
 856                if (!prev_mrq)
 857                        prev_mrq = &rq2->mrq;
 858
 859                swap(mrq, prev_mrq);
 860                swap(sg, sg_areq);
 861                dev_addr += t->blocks;
 862        }
 863
 864        ret = mmc_test_start_areq(test, NULL, prev_mrq);
 865err:
 866        kfree(rq1);
 867        kfree(rq2);
 868        return ret;
 869}
 870
 871/*
 872 * Tests a basic transfer with certain parameters
 873 */
 874static int mmc_test_simple_transfer(struct mmc_test_card *test,
 875        struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
 876        unsigned blocks, unsigned blksz, int write)
 877{
 878        struct mmc_request mrq = {};
 879        struct mmc_command cmd = {};
 880        struct mmc_command stop = {};
 881        struct mmc_data data = {};
 882
 883        mrq.cmd = &cmd;
 884        mrq.data = &data;
 885        mrq.stop = &stop;
 886
 887        mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr,
 888                blocks, blksz, write);
 889
 890        mmc_wait_for_req(test->card->host, &mrq);
 891
 892        mmc_test_wait_busy(test);
 893
 894        return mmc_test_check_result(test, &mrq);
 895}
 896
 897/*
 898 * Tests a transfer where the card will fail completely or partly
 899 */
 900static int mmc_test_broken_transfer(struct mmc_test_card *test,
 901        unsigned blocks, unsigned blksz, int write)
 902{
 903        struct mmc_request mrq = {};
 904        struct mmc_command cmd = {};
 905        struct mmc_command stop = {};
 906        struct mmc_data data = {};
 907
 908        struct scatterlist sg;
 909
 910        mrq.cmd = &cmd;
 911        mrq.data = &data;
 912        mrq.stop = &stop;
 913
 914        sg_init_one(&sg, test->buffer, blocks * blksz);
 915
 916        mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write);
 917        mmc_test_prepare_broken_mrq(test, &mrq, write);
 918
 919        mmc_wait_for_req(test->card->host, &mrq);
 920
 921        mmc_test_wait_busy(test);
 922
 923        return mmc_test_check_broken_result(test, &mrq);
 924}
 925
 926/*
 927 * Does a complete transfer test where data is also validated
 928 *
 929 * Note: mmc_test_prepare() must have been done before this call
 930 */
 931static int mmc_test_transfer(struct mmc_test_card *test,
 932        struct scatterlist *sg, unsigned sg_len, unsigned dev_addr,
 933        unsigned blocks, unsigned blksz, int write)
 934{
 935        int ret, i;
 936        unsigned long flags;
 937
 938        if (write) {
 939                for (i = 0; i < blocks * blksz; i++)
 940                        test->scratch[i] = i;
 941        } else {
 942                memset(test->scratch, 0, BUFFER_SIZE);
 943        }
 944        local_irq_save(flags);
 945        sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
 946        local_irq_restore(flags);
 947
 948        ret = mmc_test_set_blksize(test, blksz);
 949        if (ret)
 950                return ret;
 951
 952        ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr,
 953                blocks, blksz, write);
 954        if (ret)
 955                return ret;
 956
 957        if (write) {
 958                int sectors;
 959
 960                ret = mmc_test_set_blksize(test, 512);
 961                if (ret)
 962                        return ret;
 963
 964                sectors = (blocks * blksz + 511) / 512;
 965                if ((sectors * 512) == (blocks * blksz))
 966                        sectors++;
 967
 968                if ((sectors * 512) > BUFFER_SIZE)
 969                        return -EINVAL;
 970
 971                memset(test->buffer, 0, sectors * 512);
 972
 973                for (i = 0; i < sectors; i++) {
 974                        ret = mmc_test_buffer_transfer(test,
 975                                test->buffer + i * 512,
 976                                dev_addr + i, 512, 0);
 977                        if (ret)
 978                                return ret;
 979                }
 980
 981                for (i = 0; i < blocks * blksz; i++) {
 982                        if (test->buffer[i] != (u8)i)
 983                                return RESULT_FAIL;
 984                }
 985
 986                for (; i < sectors * 512; i++) {
 987                        if (test->buffer[i] != 0xDF)
 988                                return RESULT_FAIL;
 989                }
 990        } else {
 991                local_irq_save(flags);
 992                sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE);
 993                local_irq_restore(flags);
 994                for (i = 0; i < blocks * blksz; i++) {
 995                        if (test->scratch[i] != (u8)i)
 996                                return RESULT_FAIL;
 997                }
 998        }
 999
1000        return 0;
1001}
1002
1003/*******************************************************************/
1004/*  Tests                                                          */
1005/*******************************************************************/
1006
1007struct mmc_test_case {
1008        const char *name;
1009
1010        int (*prepare)(struct mmc_test_card *);
1011        int (*run)(struct mmc_test_card *);
1012        int (*cleanup)(struct mmc_test_card *);
1013};
1014
1015static int mmc_test_basic_write(struct mmc_test_card *test)
1016{
1017        int ret;
1018        struct scatterlist sg;
1019
1020        ret = mmc_test_set_blksize(test, 512);
1021        if (ret)
1022                return ret;
1023
1024        sg_init_one(&sg, test->buffer, 512);
1025
1026        return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1);
1027}
1028
1029static int mmc_test_basic_read(struct mmc_test_card *test)
1030{
1031        int ret;
1032        struct scatterlist sg;
1033
1034        ret = mmc_test_set_blksize(test, 512);
1035        if (ret)
1036                return ret;
1037
1038        sg_init_one(&sg, test->buffer, 512);
1039
1040        return mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0);
1041}
1042
1043static int mmc_test_verify_write(struct mmc_test_card *test)
1044{
1045        struct scatterlist sg;
1046
1047        sg_init_one(&sg, test->buffer, 512);
1048
1049        return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1050}
1051
1052static int mmc_test_verify_read(struct mmc_test_card *test)
1053{
1054        struct scatterlist sg;
1055
1056        sg_init_one(&sg, test->buffer, 512);
1057
1058        return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1059}
1060
1061static int mmc_test_multi_write(struct mmc_test_card *test)
1062{
1063        unsigned int size;
1064        struct scatterlist sg;
1065
1066        if (test->card->host->max_blk_count == 1)
1067                return RESULT_UNSUP_HOST;
1068
1069        size = PAGE_SIZE * 2;
1070        size = min(size, test->card->host->max_req_size);
1071        size = min(size, test->card->host->max_seg_size);
1072        size = min(size, test->card->host->max_blk_count * 512);
1073
1074        if (size < 1024)
1075                return RESULT_UNSUP_HOST;
1076
1077        sg_init_one(&sg, test->buffer, size);
1078
1079        return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1080}
1081
1082static int mmc_test_multi_read(struct mmc_test_card *test)
1083{
1084        unsigned int size;
1085        struct scatterlist sg;
1086
1087        if (test->card->host->max_blk_count == 1)
1088                return RESULT_UNSUP_HOST;
1089
1090        size = PAGE_SIZE * 2;
1091        size = min(size, test->card->host->max_req_size);
1092        size = min(size, test->card->host->max_seg_size);
1093        size = min(size, test->card->host->max_blk_count * 512);
1094
1095        if (size < 1024)
1096                return RESULT_UNSUP_HOST;
1097
1098        sg_init_one(&sg, test->buffer, size);
1099
1100        return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1101}
1102
1103static int mmc_test_pow2_write(struct mmc_test_card *test)
1104{
1105        int ret, i;
1106        struct scatterlist sg;
1107
1108        if (!test->card->csd.write_partial)
1109                return RESULT_UNSUP_CARD;
1110
1111        for (i = 1; i < 512; i <<= 1) {
1112                sg_init_one(&sg, test->buffer, i);
1113                ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1114                if (ret)
1115                        return ret;
1116        }
1117
1118        return 0;
1119}
1120
1121static int mmc_test_pow2_read(struct mmc_test_card *test)
1122{
1123        int ret, i;
1124        struct scatterlist sg;
1125
1126        if (!test->card->csd.read_partial)
1127                return RESULT_UNSUP_CARD;
1128
1129        for (i = 1; i < 512; i <<= 1) {
1130                sg_init_one(&sg, test->buffer, i);
1131                ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1132                if (ret)
1133                        return ret;
1134        }
1135
1136        return 0;
1137}
1138
1139static int mmc_test_weird_write(struct mmc_test_card *test)
1140{
1141        int ret, i;
1142        struct scatterlist sg;
1143
1144        if (!test->card->csd.write_partial)
1145                return RESULT_UNSUP_CARD;
1146
1147        for (i = 3; i < 512; i += 7) {
1148                sg_init_one(&sg, test->buffer, i);
1149                ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1);
1150                if (ret)
1151                        return ret;
1152        }
1153
1154        return 0;
1155}
1156
1157static int mmc_test_weird_read(struct mmc_test_card *test)
1158{
1159        int ret, i;
1160        struct scatterlist sg;
1161
1162        if (!test->card->csd.read_partial)
1163                return RESULT_UNSUP_CARD;
1164
1165        for (i = 3; i < 512; i += 7) {
1166                sg_init_one(&sg, test->buffer, i);
1167                ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0);
1168                if (ret)
1169                        return ret;
1170        }
1171
1172        return 0;
1173}
1174
1175static int mmc_test_align_write(struct mmc_test_card *test)
1176{
1177        int ret, i;
1178        struct scatterlist sg;
1179
1180        for (i = 1; i < TEST_ALIGN_END; i++) {
1181                sg_init_one(&sg, test->buffer + i, 512);
1182                ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1183                if (ret)
1184                        return ret;
1185        }
1186
1187        return 0;
1188}
1189
1190static int mmc_test_align_read(struct mmc_test_card *test)
1191{
1192        int ret, i;
1193        struct scatterlist sg;
1194
1195        for (i = 1; i < TEST_ALIGN_END; i++) {
1196                sg_init_one(&sg, test->buffer + i, 512);
1197                ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1198                if (ret)
1199                        return ret;
1200        }
1201
1202        return 0;
1203}
1204
1205static int mmc_test_align_multi_write(struct mmc_test_card *test)
1206{
1207        int ret, i;
1208        unsigned int size;
1209        struct scatterlist sg;
1210
1211        if (test->card->host->max_blk_count == 1)
1212                return RESULT_UNSUP_HOST;
1213
1214        size = PAGE_SIZE * 2;
1215        size = min(size, test->card->host->max_req_size);
1216        size = min(size, test->card->host->max_seg_size);
1217        size = min(size, test->card->host->max_blk_count * 512);
1218
1219        if (size < 1024)
1220                return RESULT_UNSUP_HOST;
1221
1222        for (i = 1; i < TEST_ALIGN_END; i++) {
1223                sg_init_one(&sg, test->buffer + i, size);
1224                ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1225                if (ret)
1226                        return ret;
1227        }
1228
1229        return 0;
1230}
1231
1232static int mmc_test_align_multi_read(struct mmc_test_card *test)
1233{
1234        int ret, i;
1235        unsigned int size;
1236        struct scatterlist sg;
1237
1238        if (test->card->host->max_blk_count == 1)
1239                return RESULT_UNSUP_HOST;
1240
1241        size = PAGE_SIZE * 2;
1242        size = min(size, test->card->host->max_req_size);
1243        size = min(size, test->card->host->max_seg_size);
1244        size = min(size, test->card->host->max_blk_count * 512);
1245
1246        if (size < 1024)
1247                return RESULT_UNSUP_HOST;
1248
1249        for (i = 1; i < TEST_ALIGN_END; i++) {
1250                sg_init_one(&sg, test->buffer + i, size);
1251                ret = mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1252                if (ret)
1253                        return ret;
1254        }
1255
1256        return 0;
1257}
1258
1259static int mmc_test_xfersize_write(struct mmc_test_card *test)
1260{
1261        int ret;
1262
1263        ret = mmc_test_set_blksize(test, 512);
1264        if (ret)
1265                return ret;
1266
1267        return mmc_test_broken_transfer(test, 1, 512, 1);
1268}
1269
1270static int mmc_test_xfersize_read(struct mmc_test_card *test)
1271{
1272        int ret;
1273
1274        ret = mmc_test_set_blksize(test, 512);
1275        if (ret)
1276                return ret;
1277
1278        return mmc_test_broken_transfer(test, 1, 512, 0);
1279}
1280
1281static int mmc_test_multi_xfersize_write(struct mmc_test_card *test)
1282{
1283        int ret;
1284
1285        if (test->card->host->max_blk_count == 1)
1286                return RESULT_UNSUP_HOST;
1287
1288        ret = mmc_test_set_blksize(test, 512);
1289        if (ret)
1290                return ret;
1291
1292        return mmc_test_broken_transfer(test, 2, 512, 1);
1293}
1294
1295static int mmc_test_multi_xfersize_read(struct mmc_test_card *test)
1296{
1297        int ret;
1298
1299        if (test->card->host->max_blk_count == 1)
1300                return RESULT_UNSUP_HOST;
1301
1302        ret = mmc_test_set_blksize(test, 512);
1303        if (ret)
1304                return ret;
1305
1306        return mmc_test_broken_transfer(test, 2, 512, 0);
1307}
1308
1309#ifdef CONFIG_HIGHMEM
1310
1311static int mmc_test_write_high(struct mmc_test_card *test)
1312{
1313        struct scatterlist sg;
1314
1315        sg_init_table(&sg, 1);
1316        sg_set_page(&sg, test->highmem, 512, 0);
1317
1318        return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1);
1319}
1320
1321static int mmc_test_read_high(struct mmc_test_card *test)
1322{
1323        struct scatterlist sg;
1324
1325        sg_init_table(&sg, 1);
1326        sg_set_page(&sg, test->highmem, 512, 0);
1327
1328        return mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0);
1329}
1330
1331static int mmc_test_multi_write_high(struct mmc_test_card *test)
1332{
1333        unsigned int size;
1334        struct scatterlist sg;
1335
1336        if (test->card->host->max_blk_count == 1)
1337                return RESULT_UNSUP_HOST;
1338
1339        size = PAGE_SIZE * 2;
1340        size = min(size, test->card->host->max_req_size);
1341        size = min(size, test->card->host->max_seg_size);
1342        size = min(size, test->card->host->max_blk_count * 512);
1343
1344        if (size < 1024)
1345                return RESULT_UNSUP_HOST;
1346
1347        sg_init_table(&sg, 1);
1348        sg_set_page(&sg, test->highmem, size, 0);
1349
1350        return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 1);
1351}
1352
1353static int mmc_test_multi_read_high(struct mmc_test_card *test)
1354{
1355        unsigned int size;
1356        struct scatterlist sg;
1357
1358        if (test->card->host->max_blk_count == 1)
1359                return RESULT_UNSUP_HOST;
1360
1361        size = PAGE_SIZE * 2;
1362        size = min(size, test->card->host->max_req_size);
1363        size = min(size, test->card->host->max_seg_size);
1364        size = min(size, test->card->host->max_blk_count * 512);
1365
1366        if (size < 1024)
1367                return RESULT_UNSUP_HOST;
1368
1369        sg_init_table(&sg, 1);
1370        sg_set_page(&sg, test->highmem, size, 0);
1371
1372        return mmc_test_transfer(test, &sg, 1, 0, size / 512, 512, 0);
1373}
1374
1375#else
1376
1377static int mmc_test_no_highmem(struct mmc_test_card *test)
1378{
1379        pr_info("%s: Highmem not configured - test skipped\n",
1380               mmc_hostname(test->card->host));
1381        return 0;
1382}
1383
1384#endif /* CONFIG_HIGHMEM */
1385
1386/*
1387 * Map sz bytes so that it can be transferred.
1388 */
1389static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz,
1390                             int max_scatter, int min_sg_len, bool nonblock)
1391{
1392        struct mmc_test_area *t = &test->area;
1393        int err;
1394        unsigned int sg_len = 0;
1395
1396        t->blocks = sz >> 9;
1397
1398        if (max_scatter) {
1399                err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg,
1400                                                  t->max_segs, t->max_seg_sz,
1401                                       &t->sg_len);
1402        } else {
1403                err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs,
1404                                      t->max_seg_sz, &t->sg_len, min_sg_len);
1405        }
1406
1407        if (err || !nonblock)
1408                goto err;
1409
1410        if (max_scatter) {
1411                err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg_areq,
1412                                                  t->max_segs, t->max_seg_sz,
1413                                                  &sg_len);
1414        } else {
1415                err = mmc_test_map_sg(t->mem, sz, t->sg_areq, 1, t->max_segs,
1416                                      t->max_seg_sz, &sg_len, min_sg_len);
1417        }
1418        if (!err && sg_len != t->sg_len)
1419                err = -EINVAL;
1420
1421err:
1422        if (err)
1423                pr_info("%s: Failed to map sg list\n",
1424                       mmc_hostname(test->card->host));
1425        return err;
1426}
1427
1428/*
1429 * Transfer bytes mapped by mmc_test_area_map().
1430 */
1431static int mmc_test_area_transfer(struct mmc_test_card *test,
1432                                  unsigned int dev_addr, int write)
1433{
1434        struct mmc_test_area *t = &test->area;
1435
1436        return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr,
1437                                        t->blocks, 512, write);
1438}
1439
1440/*
1441 * Map and transfer bytes for multiple transfers.
1442 */
1443static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz,
1444                                unsigned int dev_addr, int write,
1445                                int max_scatter, int timed, int count,
1446                                bool nonblock, int min_sg_len)
1447{
1448        struct timespec64 ts1, ts2;
1449        int ret = 0;
1450        int i;
1451
1452        /*
1453         * In the case of a maximally scattered transfer, the maximum transfer
1454         * size is further limited by using PAGE_SIZE segments.
1455         */
1456        if (max_scatter) {
1457                struct mmc_test_area *t = &test->area;
1458                unsigned long max_tfr;
1459
1460                if (t->max_seg_sz >= PAGE_SIZE)
1461                        max_tfr = t->max_segs * PAGE_SIZE;
1462                else
1463                        max_tfr = t->max_segs * t->max_seg_sz;
1464                if (sz > max_tfr)
1465                        sz = max_tfr;
1466        }
1467
1468        ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len, nonblock);
1469        if (ret)
1470                return ret;
1471
1472        if (timed)
1473                ktime_get_ts64(&ts1);
1474        if (nonblock)
1475                ret = mmc_test_nonblock_transfer(test, dev_addr, write, count);
1476        else
1477                for (i = 0; i < count && ret == 0; i++) {
1478                        ret = mmc_test_area_transfer(test, dev_addr, write);
1479                        dev_addr += sz >> 9;
1480                }
1481
1482        if (ret)
1483                return ret;
1484
1485        if (timed)
1486                ktime_get_ts64(&ts2);
1487
1488        if (timed)
1489                mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2);
1490
1491        return 0;
1492}
1493
1494static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz,
1495                            unsigned int dev_addr, int write, int max_scatter,
1496                            int timed)
1497{
1498        return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter,
1499                                    timed, 1, false, 0);
1500}
1501
1502/*
1503 * Write the test area entirely.
1504 */
1505static int mmc_test_area_fill(struct mmc_test_card *test)
1506{
1507        struct mmc_test_area *t = &test->area;
1508
1509        return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0);
1510}
1511
1512/*
1513 * Erase the test area entirely.
1514 */
1515static int mmc_test_area_erase(struct mmc_test_card *test)
1516{
1517        struct mmc_test_area *t = &test->area;
1518
1519        if (!mmc_can_erase(test->card))
1520                return 0;
1521
1522        return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9,
1523                         MMC_ERASE_ARG);
1524}
1525
1526/*
1527 * Cleanup struct mmc_test_area.
1528 */
1529static int mmc_test_area_cleanup(struct mmc_test_card *test)
1530{
1531        struct mmc_test_area *t = &test->area;
1532
1533        kfree(t->sg);
1534        kfree(t->sg_areq);
1535        mmc_test_free_mem(t->mem);
1536
1537        return 0;
1538}
1539
1540/*
1541 * Initialize an area for testing large transfers.  The test area is set to the
1542 * middle of the card because cards may have different characteristics at the
1543 * front (for FAT file system optimization).  Optionally, the area is erased
1544 * (if the card supports it) which may improve write performance.  Optionally,
1545 * the area is filled with data for subsequent read tests.
1546 */
1547static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill)
1548{
1549        struct mmc_test_area *t = &test->area;
1550        unsigned long min_sz = 64 * 1024, sz;
1551        int ret;
1552
1553        ret = mmc_test_set_blksize(test, 512);
1554        if (ret)
1555                return ret;
1556
1557        /* Make the test area size about 4MiB */
1558        sz = (unsigned long)test->card->pref_erase << 9;
1559        t->max_sz = sz;
1560        while (t->max_sz < 4 * 1024 * 1024)
1561                t->max_sz += sz;
1562        while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz)
1563                t->max_sz -= sz;
1564
1565        t->max_segs = test->card->host->max_segs;
1566        t->max_seg_sz = test->card->host->max_seg_size;
1567        t->max_seg_sz -= t->max_seg_sz % 512;
1568
1569        t->max_tfr = t->max_sz;
1570        if (t->max_tfr >> 9 > test->card->host->max_blk_count)
1571                t->max_tfr = test->card->host->max_blk_count << 9;
1572        if (t->max_tfr > test->card->host->max_req_size)
1573                t->max_tfr = test->card->host->max_req_size;
1574        if (t->max_tfr / t->max_seg_sz > t->max_segs)
1575                t->max_tfr = t->max_segs * t->max_seg_sz;
1576
1577        /*
1578         * Try to allocate enough memory for a max. sized transfer.  Less is OK
1579         * because the same memory can be mapped into the scatterlist more than
1580         * once.  Also, take into account the limits imposed on scatterlist
1581         * segments by the host driver.
1582         */
1583        t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs,
1584                                    t->max_seg_sz);
1585        if (!t->mem)
1586                return -ENOMEM;
1587
1588        t->sg = kmalloc_array(t->max_segs, sizeof(*t->sg), GFP_KERNEL);
1589        if (!t->sg) {
1590                ret = -ENOMEM;
1591                goto out_free;
1592        }
1593
1594        t->sg_areq = kmalloc_array(t->max_segs, sizeof(*t->sg_areq),
1595                                   GFP_KERNEL);
1596        if (!t->sg_areq) {
1597                ret = -ENOMEM;
1598                goto out_free;
1599        }
1600
1601        t->dev_addr = mmc_test_capacity(test->card) / 2;
1602        t->dev_addr -= t->dev_addr % (t->max_sz >> 9);
1603
1604        if (erase) {
1605                ret = mmc_test_area_erase(test);
1606                if (ret)
1607                        goto out_free;
1608        }
1609
1610        if (fill) {
1611                ret = mmc_test_area_fill(test);
1612                if (ret)
1613                        goto out_free;
1614        }
1615
1616        return 0;
1617
1618out_free:
1619        mmc_test_area_cleanup(test);
1620        return ret;
1621}
1622
1623/*
1624 * Prepare for large transfers.  Do not erase the test area.
1625 */
1626static int mmc_test_area_prepare(struct mmc_test_card *test)
1627{
1628        return mmc_test_area_init(test, 0, 0);
1629}
1630
1631/*
1632 * Prepare for large transfers.  Do erase the test area.
1633 */
1634static int mmc_test_area_prepare_erase(struct mmc_test_card *test)
1635{
1636        return mmc_test_area_init(test, 1, 0);
1637}
1638
1639/*
1640 * Prepare for large transfers.  Erase and fill the test area.
1641 */
1642static int mmc_test_area_prepare_fill(struct mmc_test_card *test)
1643{
1644        return mmc_test_area_init(test, 1, 1);
1645}
1646
1647/*
1648 * Test best-case performance.  Best-case performance is expected from
1649 * a single large transfer.
1650 *
1651 * An additional option (max_scatter) allows the measurement of the same
1652 * transfer but with no contiguous pages in the scatter list.  This tests
1653 * the efficiency of DMA to handle scattered pages.
1654 */
1655static int mmc_test_best_performance(struct mmc_test_card *test, int write,
1656                                     int max_scatter)
1657{
1658        struct mmc_test_area *t = &test->area;
1659
1660        return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write,
1661                                max_scatter, 1);
1662}
1663
1664/*
1665 * Best-case read performance.
1666 */
1667static int mmc_test_best_read_performance(struct mmc_test_card *test)
1668{
1669        return mmc_test_best_performance(test, 0, 0);
1670}
1671
1672/*
1673 * Best-case write performance.
1674 */
1675static int mmc_test_best_write_performance(struct mmc_test_card *test)
1676{
1677        return mmc_test_best_performance(test, 1, 0);
1678}
1679
1680/*
1681 * Best-case read performance into scattered pages.
1682 */
1683static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test)
1684{
1685        return mmc_test_best_performance(test, 0, 1);
1686}
1687
1688/*
1689 * Best-case write performance from scattered pages.
1690 */
1691static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test)
1692{
1693        return mmc_test_best_performance(test, 1, 1);
1694}
1695
1696/*
1697 * Single read performance by transfer size.
1698 */
1699static int mmc_test_profile_read_perf(struct mmc_test_card *test)
1700{
1701        struct mmc_test_area *t = &test->area;
1702        unsigned long sz;
1703        unsigned int dev_addr;
1704        int ret;
1705
1706        for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1707                dev_addr = t->dev_addr + (sz >> 9);
1708                ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1709                if (ret)
1710                        return ret;
1711        }
1712        sz = t->max_tfr;
1713        dev_addr = t->dev_addr;
1714        return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1);
1715}
1716
1717/*
1718 * Single write performance by transfer size.
1719 */
1720static int mmc_test_profile_write_perf(struct mmc_test_card *test)
1721{
1722        struct mmc_test_area *t = &test->area;
1723        unsigned long sz;
1724        unsigned int dev_addr;
1725        int ret;
1726
1727        ret = mmc_test_area_erase(test);
1728        if (ret)
1729                return ret;
1730        for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1731                dev_addr = t->dev_addr + (sz >> 9);
1732                ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1733                if (ret)
1734                        return ret;
1735        }
1736        ret = mmc_test_area_erase(test);
1737        if (ret)
1738                return ret;
1739        sz = t->max_tfr;
1740        dev_addr = t->dev_addr;
1741        return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1);
1742}
1743
1744/*
1745 * Single trim performance by transfer size.
1746 */
1747static int mmc_test_profile_trim_perf(struct mmc_test_card *test)
1748{
1749        struct mmc_test_area *t = &test->area;
1750        unsigned long sz;
1751        unsigned int dev_addr;
1752        struct timespec64 ts1, ts2;
1753        int ret;
1754
1755        if (!mmc_can_trim(test->card))
1756                return RESULT_UNSUP_CARD;
1757
1758        if (!mmc_can_erase(test->card))
1759                return RESULT_UNSUP_HOST;
1760
1761        for (sz = 512; sz < t->max_sz; sz <<= 1) {
1762                dev_addr = t->dev_addr + (sz >> 9);
1763                ktime_get_ts64(&ts1);
1764                ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1765                if (ret)
1766                        return ret;
1767                ktime_get_ts64(&ts2);
1768                mmc_test_print_rate(test, sz, &ts1, &ts2);
1769        }
1770        dev_addr = t->dev_addr;
1771        ktime_get_ts64(&ts1);
1772        ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG);
1773        if (ret)
1774                return ret;
1775        ktime_get_ts64(&ts2);
1776        mmc_test_print_rate(test, sz, &ts1, &ts2);
1777        return 0;
1778}
1779
1780static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz)
1781{
1782        struct mmc_test_area *t = &test->area;
1783        unsigned int dev_addr, i, cnt;
1784        struct timespec64 ts1, ts2;
1785        int ret;
1786
1787        cnt = t->max_sz / sz;
1788        dev_addr = t->dev_addr;
1789        ktime_get_ts64(&ts1);
1790        for (i = 0; i < cnt; i++) {
1791                ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0);
1792                if (ret)
1793                        return ret;
1794                dev_addr += (sz >> 9);
1795        }
1796        ktime_get_ts64(&ts2);
1797        mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1798        return 0;
1799}
1800
1801/*
1802 * Consecutive read performance by transfer size.
1803 */
1804static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test)
1805{
1806        struct mmc_test_area *t = &test->area;
1807        unsigned long sz;
1808        int ret;
1809
1810        for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1811                ret = mmc_test_seq_read_perf(test, sz);
1812                if (ret)
1813                        return ret;
1814        }
1815        sz = t->max_tfr;
1816        return mmc_test_seq_read_perf(test, sz);
1817}
1818
1819static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz)
1820{
1821        struct mmc_test_area *t = &test->area;
1822        unsigned int dev_addr, i, cnt;
1823        struct timespec64 ts1, ts2;
1824        int ret;
1825
1826        ret = mmc_test_area_erase(test);
1827        if (ret)
1828                return ret;
1829        cnt = t->max_sz / sz;
1830        dev_addr = t->dev_addr;
1831        ktime_get_ts64(&ts1);
1832        for (i = 0; i < cnt; i++) {
1833                ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0);
1834                if (ret)
1835                        return ret;
1836                dev_addr += (sz >> 9);
1837        }
1838        ktime_get_ts64(&ts2);
1839        mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1840        return 0;
1841}
1842
1843/*
1844 * Consecutive write performance by transfer size.
1845 */
1846static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test)
1847{
1848        struct mmc_test_area *t = &test->area;
1849        unsigned long sz;
1850        int ret;
1851
1852        for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1853                ret = mmc_test_seq_write_perf(test, sz);
1854                if (ret)
1855                        return ret;
1856        }
1857        sz = t->max_tfr;
1858        return mmc_test_seq_write_perf(test, sz);
1859}
1860
1861/*
1862 * Consecutive trim performance by transfer size.
1863 */
1864static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test)
1865{
1866        struct mmc_test_area *t = &test->area;
1867        unsigned long sz;
1868        unsigned int dev_addr, i, cnt;
1869        struct timespec64 ts1, ts2;
1870        int ret;
1871
1872        if (!mmc_can_trim(test->card))
1873                return RESULT_UNSUP_CARD;
1874
1875        if (!mmc_can_erase(test->card))
1876                return RESULT_UNSUP_HOST;
1877
1878        for (sz = 512; sz <= t->max_sz; sz <<= 1) {
1879                ret = mmc_test_area_erase(test);
1880                if (ret)
1881                        return ret;
1882                ret = mmc_test_area_fill(test);
1883                if (ret)
1884                        return ret;
1885                cnt = t->max_sz / sz;
1886                dev_addr = t->dev_addr;
1887                ktime_get_ts64(&ts1);
1888                for (i = 0; i < cnt; i++) {
1889                        ret = mmc_erase(test->card, dev_addr, sz >> 9,
1890                                        MMC_TRIM_ARG);
1891                        if (ret)
1892                                return ret;
1893                        dev_addr += (sz >> 9);
1894                }
1895                ktime_get_ts64(&ts2);
1896                mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1897        }
1898        return 0;
1899}
1900
1901static unsigned int rnd_next = 1;
1902
1903static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt)
1904{
1905        uint64_t r;
1906
1907        rnd_next = rnd_next * 1103515245 + 12345;
1908        r = (rnd_next >> 16) & 0x7fff;
1909        return (r * rnd_cnt) >> 15;
1910}
1911
1912static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print,
1913                             unsigned long sz)
1914{
1915        unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea;
1916        unsigned int ssz;
1917        struct timespec64 ts1, ts2, ts;
1918        int ret;
1919
1920        ssz = sz >> 9;
1921
1922        rnd_addr = mmc_test_capacity(test->card) / 4;
1923        range1 = rnd_addr / test->card->pref_erase;
1924        range2 = range1 / ssz;
1925
1926        ktime_get_ts64(&ts1);
1927        for (cnt = 0; cnt < UINT_MAX; cnt++) {
1928                ktime_get_ts64(&ts2);
1929                ts = timespec64_sub(ts2, ts1);
1930                if (ts.tv_sec >= 10)
1931                        break;
1932                ea = mmc_test_rnd_num(range1);
1933                if (ea == last_ea)
1934                        ea -= 1;
1935                last_ea = ea;
1936                dev_addr = rnd_addr + test->card->pref_erase * ea +
1937                           ssz * mmc_test_rnd_num(range2);
1938                ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0);
1939                if (ret)
1940                        return ret;
1941        }
1942        if (print)
1943                mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
1944        return 0;
1945}
1946
1947static int mmc_test_random_perf(struct mmc_test_card *test, int write)
1948{
1949        struct mmc_test_area *t = &test->area;
1950        unsigned int next;
1951        unsigned long sz;
1952        int ret;
1953
1954        for (sz = 512; sz < t->max_tfr; sz <<= 1) {
1955                /*
1956                 * When writing, try to get more consistent results by running
1957                 * the test twice with exactly the same I/O but outputting the
1958                 * results only for the 2nd run.
1959                 */
1960                if (write) {
1961                        next = rnd_next;
1962                        ret = mmc_test_rnd_perf(test, write, 0, sz);
1963                        if (ret)
1964                                return ret;
1965                        rnd_next = next;
1966                }
1967                ret = mmc_test_rnd_perf(test, write, 1, sz);
1968                if (ret)
1969                        return ret;
1970        }
1971        sz = t->max_tfr;
1972        if (write) {
1973                next = rnd_next;
1974                ret = mmc_test_rnd_perf(test, write, 0, sz);
1975                if (ret)
1976                        return ret;
1977                rnd_next = next;
1978        }
1979        return mmc_test_rnd_perf(test, write, 1, sz);
1980}
1981
1982/*
1983 * Random read performance by transfer size.
1984 */
1985static int mmc_test_random_read_perf(struct mmc_test_card *test)
1986{
1987        return mmc_test_random_perf(test, 0);
1988}
1989
1990/*
1991 * Random write performance by transfer size.
1992 */
1993static int mmc_test_random_write_perf(struct mmc_test_card *test)
1994{
1995        return mmc_test_random_perf(test, 1);
1996}
1997
1998static int mmc_test_seq_perf(struct mmc_test_card *test, int write,
1999                             unsigned int tot_sz, int max_scatter)
2000{
2001        struct mmc_test_area *t = &test->area;
2002        unsigned int dev_addr, i, cnt, sz, ssz;
2003        struct timespec64 ts1, ts2;
2004        int ret;
2005
2006        sz = t->max_tfr;
2007
2008        /*
2009         * In the case of a maximally scattered transfer, the maximum transfer
2010         * size is further limited by using PAGE_SIZE segments.
2011         */
2012        if (max_scatter) {
2013                unsigned long max_tfr;
2014
2015                if (t->max_seg_sz >= PAGE_SIZE)
2016                        max_tfr = t->max_segs * PAGE_SIZE;
2017                else
2018                        max_tfr = t->max_segs * t->max_seg_sz;
2019                if (sz > max_tfr)
2020                        sz = max_tfr;
2021        }
2022
2023        ssz = sz >> 9;
2024        dev_addr = mmc_test_capacity(test->card) / 4;
2025        if (tot_sz > dev_addr << 9)
2026                tot_sz = dev_addr << 9;
2027        cnt = tot_sz / sz;
2028        dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2029
2030        ktime_get_ts64(&ts1);
2031        for (i = 0; i < cnt; i++) {
2032                ret = mmc_test_area_io(test, sz, dev_addr, write,
2033                                       max_scatter, 0);
2034                if (ret)
2035                        return ret;
2036                dev_addr += ssz;
2037        }
2038        ktime_get_ts64(&ts2);
2039
2040        mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2);
2041
2042        return 0;
2043}
2044
2045static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write)
2046{
2047        int ret, i;
2048
2049        for (i = 0; i < 10; i++) {
2050                ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1);
2051                if (ret)
2052                        return ret;
2053        }
2054        for (i = 0; i < 5; i++) {
2055                ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1);
2056                if (ret)
2057                        return ret;
2058        }
2059        for (i = 0; i < 3; i++) {
2060                ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1);
2061                if (ret)
2062                        return ret;
2063        }
2064
2065        return ret;
2066}
2067
2068/*
2069 * Large sequential read performance.
2070 */
2071static int mmc_test_large_seq_read_perf(struct mmc_test_card *test)
2072{
2073        return mmc_test_large_seq_perf(test, 0);
2074}
2075
2076/*
2077 * Large sequential write performance.
2078 */
2079static int mmc_test_large_seq_write_perf(struct mmc_test_card *test)
2080{
2081        return mmc_test_large_seq_perf(test, 1);
2082}
2083
2084static int mmc_test_rw_multiple(struct mmc_test_card *test,
2085                                struct mmc_test_multiple_rw *tdata,
2086                                unsigned int reqsize, unsigned int size,
2087                                int min_sg_len)
2088{
2089        unsigned int dev_addr;
2090        struct mmc_test_area *t = &test->area;
2091        int ret = 0;
2092
2093        /* Set up test area */
2094        if (size > mmc_test_capacity(test->card) / 2 * 512)
2095                size = mmc_test_capacity(test->card) / 2 * 512;
2096        if (reqsize > t->max_tfr)
2097                reqsize = t->max_tfr;
2098        dev_addr = mmc_test_capacity(test->card) / 4;
2099        if ((dev_addr & 0xffff0000))
2100                dev_addr &= 0xffff0000; /* Round to 64MiB boundary */
2101        else
2102                dev_addr &= 0xfffff800; /* Round to 1MiB boundary */
2103        if (!dev_addr)
2104                goto err;
2105
2106        if (reqsize > size)
2107                return 0;
2108
2109        /* prepare test area */
2110        if (mmc_can_erase(test->card) &&
2111            tdata->prepare & MMC_TEST_PREP_ERASE) {
2112                ret = mmc_erase(test->card, dev_addr,
2113                                size / 512, test->card->erase_arg);
2114                if (ret)
2115                        ret = mmc_erase(test->card, dev_addr,
2116                                        size / 512, MMC_ERASE_ARG);
2117                if (ret)
2118                        goto err;
2119        }
2120
2121        /* Run test */
2122        ret = mmc_test_area_io_seq(test, reqsize, dev_addr,
2123                                   tdata->do_write, 0, 1, size / reqsize,
2124                                   tdata->do_nonblock_req, min_sg_len);
2125        if (ret)
2126                goto err;
2127
2128        return ret;
2129 err:
2130        pr_info("[%s] error\n", __func__);
2131        return ret;
2132}
2133
2134static int mmc_test_rw_multiple_size(struct mmc_test_card *test,
2135                                     struct mmc_test_multiple_rw *rw)
2136{
2137        int ret = 0;
2138        int i;
2139        void *pre_req = test->card->host->ops->pre_req;
2140        void *post_req = test->card->host->ops->post_req;
2141
2142        if (rw->do_nonblock_req &&
2143            ((!pre_req && post_req) || (pre_req && !post_req))) {
2144                pr_info("error: only one of pre/post is defined\n");
2145                return -EINVAL;
2146        }
2147
2148        for (i = 0 ; i < rw->len && ret == 0; i++) {
2149                ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0);
2150                if (ret)
2151                        break;
2152        }
2153        return ret;
2154}
2155
2156static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test,
2157                                       struct mmc_test_multiple_rw *rw)
2158{
2159        int ret = 0;
2160        int i;
2161
2162        for (i = 0 ; i < rw->len && ret == 0; i++) {
2163                ret = mmc_test_rw_multiple(test, rw, 512 * 1024, rw->size,
2164                                           rw->sg_len[i]);
2165                if (ret)
2166                        break;
2167        }
2168        return ret;
2169}
2170
2171/*
2172 * Multiple blocking write 4k to 4 MB chunks
2173 */
2174static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test)
2175{
2176        unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2177                             1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2178        struct mmc_test_multiple_rw test_data = {
2179                .bs = bs,
2180                .size = TEST_AREA_MAX_SIZE,
2181                .len = ARRAY_SIZE(bs),
2182                .do_write = true,
2183                .do_nonblock_req = false,
2184                .prepare = MMC_TEST_PREP_ERASE,
2185        };
2186
2187        return mmc_test_rw_multiple_size(test, &test_data);
2188};
2189
2190/*
2191 * Multiple non-blocking write 4k to 4 MB chunks
2192 */
2193static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test)
2194{
2195        unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2196                             1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2197        struct mmc_test_multiple_rw test_data = {
2198                .bs = bs,
2199                .size = TEST_AREA_MAX_SIZE,
2200                .len = ARRAY_SIZE(bs),
2201                .do_write = true,
2202                .do_nonblock_req = true,
2203                .prepare = MMC_TEST_PREP_ERASE,
2204        };
2205
2206        return mmc_test_rw_multiple_size(test, &test_data);
2207}
2208
2209/*
2210 * Multiple blocking read 4k to 4 MB chunks
2211 */
2212static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test)
2213{
2214        unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2215                             1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2216        struct mmc_test_multiple_rw test_data = {
2217                .bs = bs,
2218                .size = TEST_AREA_MAX_SIZE,
2219                .len = ARRAY_SIZE(bs),
2220                .do_write = false,
2221                .do_nonblock_req = false,
2222                .prepare = MMC_TEST_PREP_NONE,
2223        };
2224
2225        return mmc_test_rw_multiple_size(test, &test_data);
2226}
2227
2228/*
2229 * Multiple non-blocking read 4k to 4 MB chunks
2230 */
2231static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test)
2232{
2233        unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16,
2234                             1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22};
2235        struct mmc_test_multiple_rw test_data = {
2236                .bs = bs,
2237                .size = TEST_AREA_MAX_SIZE,
2238                .len = ARRAY_SIZE(bs),
2239                .do_write = false,
2240                .do_nonblock_req = true,
2241                .prepare = MMC_TEST_PREP_NONE,
2242        };
2243
2244        return mmc_test_rw_multiple_size(test, &test_data);
2245}
2246
2247/*
2248 * Multiple blocking write 1 to 512 sg elements
2249 */
2250static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test)
2251{
2252        unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2253                                 1 << 7, 1 << 8, 1 << 9};
2254        struct mmc_test_multiple_rw test_data = {
2255                .sg_len = sg_len,
2256                .size = TEST_AREA_MAX_SIZE,
2257                .len = ARRAY_SIZE(sg_len),
2258                .do_write = true,
2259                .do_nonblock_req = false,
2260                .prepare = MMC_TEST_PREP_ERASE,
2261        };
2262
2263        return mmc_test_rw_multiple_sg_len(test, &test_data);
2264};
2265
2266/*
2267 * Multiple non-blocking write 1 to 512 sg elements
2268 */
2269static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test)
2270{
2271        unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2272                                 1 << 7, 1 << 8, 1 << 9};
2273        struct mmc_test_multiple_rw test_data = {
2274                .sg_len = sg_len,
2275                .size = TEST_AREA_MAX_SIZE,
2276                .len = ARRAY_SIZE(sg_len),
2277                .do_write = true,
2278                .do_nonblock_req = true,
2279                .prepare = MMC_TEST_PREP_ERASE,
2280        };
2281
2282        return mmc_test_rw_multiple_sg_len(test, &test_data);
2283}
2284
2285/*
2286 * Multiple blocking read 1 to 512 sg elements
2287 */
2288static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test)
2289{
2290        unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2291                                 1 << 7, 1 << 8, 1 << 9};
2292        struct mmc_test_multiple_rw test_data = {
2293                .sg_len = sg_len,
2294                .size = TEST_AREA_MAX_SIZE,
2295                .len = ARRAY_SIZE(sg_len),
2296                .do_write = false,
2297                .do_nonblock_req = false,
2298                .prepare = MMC_TEST_PREP_NONE,
2299        };
2300
2301        return mmc_test_rw_multiple_sg_len(test, &test_data);
2302}
2303
2304/*
2305 * Multiple non-blocking read 1 to 512 sg elements
2306 */
2307static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test)
2308{
2309        unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6,
2310                                 1 << 7, 1 << 8, 1 << 9};
2311        struct mmc_test_multiple_rw test_data = {
2312                .sg_len = sg_len,
2313                .size = TEST_AREA_MAX_SIZE,
2314                .len = ARRAY_SIZE(sg_len),
2315                .do_write = false,
2316                .do_nonblock_req = true,
2317                .prepare = MMC_TEST_PREP_NONE,
2318        };
2319
2320        return mmc_test_rw_multiple_sg_len(test, &test_data);
2321}
2322
2323/*
2324 * eMMC hardware reset.
2325 */
2326static int mmc_test_reset(struct mmc_test_card *test)
2327{
2328        struct mmc_card *card = test->card;
2329        struct mmc_host *host = card->host;
2330        int err;
2331
2332        err = mmc_hw_reset(host);
2333        if (!err) {
2334                /*
2335                 * Reset will re-enable the card's command queue, but tests
2336                 * expect it to be disabled.
2337                 */
2338                if (card->ext_csd.cmdq_en)
2339                        mmc_cmdq_disable(card);
2340                return RESULT_OK;
2341        } else if (err == -EOPNOTSUPP) {
2342                return RESULT_UNSUP_HOST;
2343        }
2344
2345        return RESULT_FAIL;
2346}
2347
2348static int mmc_test_send_status(struct mmc_test_card *test,
2349                                struct mmc_command *cmd)
2350{
2351        memset(cmd, 0, sizeof(*cmd));
2352
2353        cmd->opcode = MMC_SEND_STATUS;
2354        if (!mmc_host_is_spi(test->card->host))
2355                cmd->arg = test->card->rca << 16;
2356        cmd->flags = MMC_RSP_SPI_R2 | MMC_RSP_R1 | MMC_CMD_AC;
2357
2358        return mmc_wait_for_cmd(test->card->host, cmd, 0);
2359}
2360
2361static int mmc_test_ongoing_transfer(struct mmc_test_card *test,
2362                                     unsigned int dev_addr, int use_sbc,
2363                                     int repeat_cmd, int write, int use_areq)
2364{
2365        struct mmc_test_req *rq = mmc_test_req_alloc();
2366        struct mmc_host *host = test->card->host;
2367        struct mmc_test_area *t = &test->area;
2368        struct mmc_request *mrq;
2369        unsigned long timeout;
2370        bool expired = false;
2371        int ret = 0, cmd_ret;
2372        u32 status = 0;
2373        int count = 0;
2374
2375        if (!rq)
2376                return -ENOMEM;
2377
2378        mrq = &rq->mrq;
2379        if (use_sbc)
2380                mrq->sbc = &rq->sbc;
2381        mrq->cap_cmd_during_tfr = true;
2382
2383        mmc_test_prepare_mrq(test, mrq, t->sg, t->sg_len, dev_addr, t->blocks,
2384                             512, write);
2385
2386        if (use_sbc && t->blocks > 1 && !mrq->sbc) {
2387                ret =  mmc_host_cmd23(host) ?
2388                       RESULT_UNSUP_CARD :
2389                       RESULT_UNSUP_HOST;
2390                goto out_free;
2391        }
2392
2393        /* Start ongoing data request */
2394        if (use_areq) {
2395                ret = mmc_test_start_areq(test, mrq, NULL);
2396                if (ret)
2397                        goto out_free;
2398        } else {
2399                mmc_wait_for_req(host, mrq);
2400        }
2401
2402        timeout = jiffies + msecs_to_jiffies(3000);
2403        do {
2404                count += 1;
2405
2406                /* Send status command while data transfer in progress */
2407                cmd_ret = mmc_test_send_status(test, &rq->status);
2408                if (cmd_ret)
2409                        break;
2410
2411                status = rq->status.resp[0];
2412                if (status & R1_ERROR) {
2413                        cmd_ret = -EIO;
2414                        break;
2415                }
2416
2417                if (mmc_is_req_done(host, mrq))
2418                        break;
2419
2420                expired = time_after(jiffies, timeout);
2421                if (expired) {
2422                        pr_info("%s: timeout waiting for Tran state status %#x\n",
2423                                mmc_hostname(host), status);
2424                        cmd_ret = -ETIMEDOUT;
2425                        break;
2426                }
2427        } while (repeat_cmd && R1_CURRENT_STATE(status) != R1_STATE_TRAN);
2428
2429        /* Wait for data request to complete */
2430        if (use_areq) {
2431                ret = mmc_test_start_areq(test, NULL, mrq);
2432        } else {
2433                mmc_wait_for_req_done(test->card->host, mrq);
2434        }
2435
2436        /*
2437         * For cap_cmd_during_tfr request, upper layer must send stop if
2438         * required.
2439         */
2440        if (mrq->data->stop && (mrq->data->error || !mrq->sbc)) {
2441                if (ret)
2442                        mmc_wait_for_cmd(host, mrq->data->stop, 0);
2443                else
2444                        ret = mmc_wait_for_cmd(host, mrq->data->stop, 0);
2445        }
2446
2447        if (ret)
2448                goto out_free;
2449
2450        if (cmd_ret) {
2451                pr_info("%s: Send Status failed: status %#x, error %d\n",
2452                        mmc_hostname(test->card->host), status, cmd_ret);
2453        }
2454
2455        ret = mmc_test_check_result(test, mrq);
2456        if (ret)
2457                goto out_free;
2458
2459        ret = mmc_test_wait_busy(test);
2460        if (ret)
2461                goto out_free;
2462
2463        if (repeat_cmd && (t->blocks + 1) << 9 > t->max_tfr)
2464                pr_info("%s: %d commands completed during transfer of %u blocks\n",
2465                        mmc_hostname(test->card->host), count, t->blocks);
2466
2467        if (cmd_ret)
2468                ret = cmd_ret;
2469out_free:
2470        kfree(rq);
2471
2472        return ret;
2473}
2474
2475static int __mmc_test_cmds_during_tfr(struct mmc_test_card *test,
2476                                      unsigned long sz, int use_sbc, int write,
2477                                      int use_areq)
2478{
2479        struct mmc_test_area *t = &test->area;
2480        int ret;
2481
2482        if (!(test->card->host->caps & MMC_CAP_CMD_DURING_TFR))
2483                return RESULT_UNSUP_HOST;
2484
2485        ret = mmc_test_area_map(test, sz, 0, 0, use_areq);
2486        if (ret)
2487                return ret;
2488
2489        ret = mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 0, write,
2490                                        use_areq);
2491        if (ret)
2492                return ret;
2493
2494        return mmc_test_ongoing_transfer(test, t->dev_addr, use_sbc, 1, write,
2495                                         use_areq);
2496}
2497
2498static int mmc_test_cmds_during_tfr(struct mmc_test_card *test, int use_sbc,
2499                                    int write, int use_areq)
2500{
2501        struct mmc_test_area *t = &test->area;
2502        unsigned long sz;
2503        int ret;
2504
2505        for (sz = 512; sz <= t->max_tfr; sz += 512) {
2506                ret = __mmc_test_cmds_during_tfr(test, sz, use_sbc, write,
2507                                                 use_areq);
2508                if (ret)
2509                        return ret;
2510        }
2511        return 0;
2512}
2513
2514/*
2515 * Commands during read - no Set Block Count (CMD23).
2516 */
2517static int mmc_test_cmds_during_read(struct mmc_test_card *test)
2518{
2519        return mmc_test_cmds_during_tfr(test, 0, 0, 0);
2520}
2521
2522/*
2523 * Commands during write - no Set Block Count (CMD23).
2524 */
2525static int mmc_test_cmds_during_write(struct mmc_test_card *test)
2526{
2527        return mmc_test_cmds_during_tfr(test, 0, 1, 0);
2528}
2529
2530/*
2531 * Commands during read - use Set Block Count (CMD23).
2532 */
2533static int mmc_test_cmds_during_read_cmd23(struct mmc_test_card *test)
2534{
2535        return mmc_test_cmds_during_tfr(test, 1, 0, 0);
2536}
2537
2538/*
2539 * Commands during write - use Set Block Count (CMD23).
2540 */
2541static int mmc_test_cmds_during_write_cmd23(struct mmc_test_card *test)
2542{
2543        return mmc_test_cmds_during_tfr(test, 1, 1, 0);
2544}
2545
2546/*
2547 * Commands during non-blocking read - use Set Block Count (CMD23).
2548 */
2549static int mmc_test_cmds_during_read_cmd23_nonblock(struct mmc_test_card *test)
2550{
2551        return mmc_test_cmds_during_tfr(test, 1, 0, 1);
2552}
2553
2554/*
2555 * Commands during non-blocking write - use Set Block Count (CMD23).
2556 */
2557static int mmc_test_cmds_during_write_cmd23_nonblock(struct mmc_test_card *test)
2558{
2559        return mmc_test_cmds_during_tfr(test, 1, 1, 1);
2560}
2561
2562static const struct mmc_test_case mmc_test_cases[] = {
2563        {
2564                .name = "Basic write (no data verification)",
2565                .run = mmc_test_basic_write,
2566        },
2567
2568        {
2569                .name = "Basic read (no data verification)",
2570                .run = mmc_test_basic_read,
2571        },
2572
2573        {
2574                .name = "Basic write (with data verification)",
2575                .prepare = mmc_test_prepare_write,
2576                .run = mmc_test_verify_write,
2577                .cleanup = mmc_test_cleanup,
2578        },
2579
2580        {
2581                .name = "Basic read (with data verification)",
2582                .prepare = mmc_test_prepare_read,
2583                .run = mmc_test_verify_read,
2584                .cleanup = mmc_test_cleanup,
2585        },
2586
2587        {
2588                .name = "Multi-block write",
2589                .prepare = mmc_test_prepare_write,
2590                .run = mmc_test_multi_write,
2591                .cleanup = mmc_test_cleanup,
2592        },
2593
2594        {
2595                .name = "Multi-block read",
2596                .prepare = mmc_test_prepare_read,
2597                .run = mmc_test_multi_read,
2598                .cleanup = mmc_test_cleanup,
2599        },
2600
2601        {
2602                .name = "Power of two block writes",
2603                .prepare = mmc_test_prepare_write,
2604                .run = mmc_test_pow2_write,
2605                .cleanup = mmc_test_cleanup,
2606        },
2607
2608        {
2609                .name = "Power of two block reads",
2610                .prepare = mmc_test_prepare_read,
2611                .run = mmc_test_pow2_read,
2612                .cleanup = mmc_test_cleanup,
2613        },
2614
2615        {
2616                .name = "Weird sized block writes",
2617                .prepare = mmc_test_prepare_write,
2618                .run = mmc_test_weird_write,
2619                .cleanup = mmc_test_cleanup,
2620        },
2621
2622        {
2623                .name = "Weird sized block reads",
2624                .prepare = mmc_test_prepare_read,
2625                .run = mmc_test_weird_read,
2626                .cleanup = mmc_test_cleanup,
2627        },
2628
2629        {
2630                .name = "Badly aligned write",
2631                .prepare = mmc_test_prepare_write,
2632                .run = mmc_test_align_write,
2633                .cleanup = mmc_test_cleanup,
2634        },
2635
2636        {
2637                .name = "Badly aligned read",
2638                .prepare = mmc_test_prepare_read,
2639                .run = mmc_test_align_read,
2640                .cleanup = mmc_test_cleanup,
2641        },
2642
2643        {
2644                .name = "Badly aligned multi-block write",
2645                .prepare = mmc_test_prepare_write,
2646                .run = mmc_test_align_multi_write,
2647                .cleanup = mmc_test_cleanup,
2648        },
2649
2650        {
2651                .name = "Badly aligned multi-block read",
2652                .prepare = mmc_test_prepare_read,
2653                .run = mmc_test_align_multi_read,
2654                .cleanup = mmc_test_cleanup,
2655        },
2656
2657        {
2658                .name = "Proper xfer_size at write (start failure)",
2659                .run = mmc_test_xfersize_write,
2660        },
2661
2662        {
2663                .name = "Proper xfer_size at read (start failure)",
2664                .run = mmc_test_xfersize_read,
2665        },
2666
2667        {
2668                .name = "Proper xfer_size at write (midway failure)",
2669                .run = mmc_test_multi_xfersize_write,
2670        },
2671
2672        {
2673                .name = "Proper xfer_size at read (midway failure)",
2674                .run = mmc_test_multi_xfersize_read,
2675        },
2676
2677#ifdef CONFIG_HIGHMEM
2678
2679        {
2680                .name = "Highmem write",
2681                .prepare = mmc_test_prepare_write,
2682                .run = mmc_test_write_high,
2683                .cleanup = mmc_test_cleanup,
2684        },
2685
2686        {
2687                .name = "Highmem read",
2688                .prepare = mmc_test_prepare_read,
2689                .run = mmc_test_read_high,
2690                .cleanup = mmc_test_cleanup,
2691        },
2692
2693        {
2694                .name = "Multi-block highmem write",
2695                .prepare = mmc_test_prepare_write,
2696                .run = mmc_test_multi_write_high,
2697                .cleanup = mmc_test_cleanup,
2698        },
2699
2700        {
2701                .name = "Multi-block highmem read",
2702                .prepare = mmc_test_prepare_read,
2703                .run = mmc_test_multi_read_high,
2704                .cleanup = mmc_test_cleanup,
2705        },
2706
2707#else
2708
2709        {
2710                .name = "Highmem write",
2711                .run = mmc_test_no_highmem,
2712        },
2713
2714        {
2715                .name = "Highmem read",
2716                .run = mmc_test_no_highmem,
2717        },
2718
2719        {
2720                .name = "Multi-block highmem write",
2721                .run = mmc_test_no_highmem,
2722        },
2723
2724        {
2725                .name = "Multi-block highmem read",
2726                .run = mmc_test_no_highmem,
2727        },
2728
2729#endif /* CONFIG_HIGHMEM */
2730
2731        {
2732                .name = "Best-case read performance",
2733                .prepare = mmc_test_area_prepare_fill,
2734                .run = mmc_test_best_read_performance,
2735                .cleanup = mmc_test_area_cleanup,
2736        },
2737
2738        {
2739                .name = "Best-case write performance",
2740                .prepare = mmc_test_area_prepare_erase,
2741                .run = mmc_test_best_write_performance,
2742                .cleanup = mmc_test_area_cleanup,
2743        },
2744
2745        {
2746                .name = "Best-case read performance into scattered pages",
2747                .prepare = mmc_test_area_prepare_fill,
2748                .run = mmc_test_best_read_perf_max_scatter,
2749                .cleanup = mmc_test_area_cleanup,
2750        },
2751
2752        {
2753                .name = "Best-case write performance from scattered pages",
2754                .prepare = mmc_test_area_prepare_erase,
2755                .run = mmc_test_best_write_perf_max_scatter,
2756                .cleanup = mmc_test_area_cleanup,
2757        },
2758
2759        {
2760                .name = "Single read performance by transfer size",
2761                .prepare = mmc_test_area_prepare_fill,
2762                .run = mmc_test_profile_read_perf,
2763                .cleanup = mmc_test_area_cleanup,
2764        },
2765
2766        {
2767                .name = "Single write performance by transfer size",
2768                .prepare = mmc_test_area_prepare,
2769                .run = mmc_test_profile_write_perf,
2770                .cleanup = mmc_test_area_cleanup,
2771        },
2772
2773        {
2774                .name = "Single trim performance by transfer size",
2775                .prepare = mmc_test_area_prepare_fill,
2776                .run = mmc_test_profile_trim_perf,
2777                .cleanup = mmc_test_area_cleanup,
2778        },
2779
2780        {
2781                .name = "Consecutive read performance by transfer size",
2782                .prepare = mmc_test_area_prepare_fill,
2783                .run = mmc_test_profile_seq_read_perf,
2784                .cleanup = mmc_test_area_cleanup,
2785        },
2786
2787        {
2788                .name = "Consecutive write performance by transfer size",
2789                .prepare = mmc_test_area_prepare,
2790                .run = mmc_test_profile_seq_write_perf,
2791                .cleanup = mmc_test_area_cleanup,
2792        },
2793
2794        {
2795                .name = "Consecutive trim performance by transfer size",
2796                .prepare = mmc_test_area_prepare,
2797                .run = mmc_test_profile_seq_trim_perf,
2798                .cleanup = mmc_test_area_cleanup,
2799        },
2800
2801        {
2802                .name = "Random read performance by transfer size",
2803                .prepare = mmc_test_area_prepare,
2804                .run = mmc_test_random_read_perf,
2805                .cleanup = mmc_test_area_cleanup,
2806        },
2807
2808        {
2809                .name = "Random write performance by transfer size",
2810                .prepare = mmc_test_area_prepare,
2811                .run = mmc_test_random_write_perf,
2812                .cleanup = mmc_test_area_cleanup,
2813        },
2814
2815        {
2816                .name = "Large sequential read into scattered pages",
2817                .prepare = mmc_test_area_prepare,
2818                .run = mmc_test_large_seq_read_perf,
2819                .cleanup = mmc_test_area_cleanup,
2820        },
2821
2822        {
2823                .name = "Large sequential write from scattered pages",
2824                .prepare = mmc_test_area_prepare,
2825                .run = mmc_test_large_seq_write_perf,
2826                .cleanup = mmc_test_area_cleanup,
2827        },
2828
2829        {
2830                .name = "Write performance with blocking req 4k to 4MB",
2831                .prepare = mmc_test_area_prepare,
2832                .run = mmc_test_profile_mult_write_blocking_perf,
2833                .cleanup = mmc_test_area_cleanup,
2834        },
2835
2836        {
2837                .name = "Write performance with non-blocking req 4k to 4MB",
2838                .prepare = mmc_test_area_prepare,
2839                .run = mmc_test_profile_mult_write_nonblock_perf,
2840                .cleanup = mmc_test_area_cleanup,
2841        },
2842
2843        {
2844                .name = "Read performance with blocking req 4k to 4MB",
2845                .prepare = mmc_test_area_prepare,
2846                .run = mmc_test_profile_mult_read_blocking_perf,
2847                .cleanup = mmc_test_area_cleanup,
2848        },
2849
2850        {
2851                .name = "Read performance with non-blocking req 4k to 4MB",
2852                .prepare = mmc_test_area_prepare,
2853                .run = mmc_test_profile_mult_read_nonblock_perf,
2854                .cleanup = mmc_test_area_cleanup,
2855        },
2856
2857        {
2858                .name = "Write performance blocking req 1 to 512 sg elems",
2859                .prepare = mmc_test_area_prepare,
2860                .run = mmc_test_profile_sglen_wr_blocking_perf,
2861                .cleanup = mmc_test_area_cleanup,
2862        },
2863
2864        {
2865                .name = "Write performance non-blocking req 1 to 512 sg elems",
2866                .prepare = mmc_test_area_prepare,
2867                .run = mmc_test_profile_sglen_wr_nonblock_perf,
2868                .cleanup = mmc_test_area_cleanup,
2869        },
2870
2871        {
2872                .name = "Read performance blocking req 1 to 512 sg elems",
2873                .prepare = mmc_test_area_prepare,
2874                .run = mmc_test_profile_sglen_r_blocking_perf,
2875                .cleanup = mmc_test_area_cleanup,
2876        },
2877
2878        {
2879                .name = "Read performance non-blocking req 1 to 512 sg elems",
2880                .prepare = mmc_test_area_prepare,
2881                .run = mmc_test_profile_sglen_r_nonblock_perf,
2882                .cleanup = mmc_test_area_cleanup,
2883        },
2884
2885        {
2886                .name = "Reset test",
2887                .run = mmc_test_reset,
2888        },
2889
2890        {
2891                .name = "Commands during read - no Set Block Count (CMD23)",
2892                .prepare = mmc_test_area_prepare,
2893                .run = mmc_test_cmds_during_read,
2894                .cleanup = mmc_test_area_cleanup,
2895        },
2896
2897        {
2898                .name = "Commands during write - no Set Block Count (CMD23)",
2899                .prepare = mmc_test_area_prepare,
2900                .run = mmc_test_cmds_during_write,
2901                .cleanup = mmc_test_area_cleanup,
2902        },
2903
2904        {
2905                .name = "Commands during read - use Set Block Count (CMD23)",
2906                .prepare = mmc_test_area_prepare,
2907                .run = mmc_test_cmds_during_read_cmd23,
2908                .cleanup = mmc_test_area_cleanup,
2909        },
2910
2911        {
2912                .name = "Commands during write - use Set Block Count (CMD23)",
2913                .prepare = mmc_test_area_prepare,
2914                .run = mmc_test_cmds_during_write_cmd23,
2915                .cleanup = mmc_test_area_cleanup,
2916        },
2917
2918        {
2919                .name = "Commands during non-blocking read - use Set Block Count (CMD23)",
2920                .prepare = mmc_test_area_prepare,
2921                .run = mmc_test_cmds_during_read_cmd23_nonblock,
2922                .cleanup = mmc_test_area_cleanup,
2923        },
2924
2925        {
2926                .name = "Commands during non-blocking write - use Set Block Count (CMD23)",
2927                .prepare = mmc_test_area_prepare,
2928                .run = mmc_test_cmds_during_write_cmd23_nonblock,
2929                .cleanup = mmc_test_area_cleanup,
2930        },
2931};
2932
2933static DEFINE_MUTEX(mmc_test_lock);
2934
2935static LIST_HEAD(mmc_test_result);
2936
2937static void mmc_test_run(struct mmc_test_card *test, int testcase)
2938{
2939        int i, ret;
2940
2941        pr_info("%s: Starting tests of card %s...\n",
2942                mmc_hostname(test->card->host), mmc_card_id(test->card));
2943
2944        mmc_claim_host(test->card->host);
2945
2946        for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) {
2947                struct mmc_test_general_result *gr;
2948
2949                if (testcase && ((i + 1) != testcase))
2950                        continue;
2951
2952                pr_info("%s: Test case %d. %s...\n",
2953                        mmc_hostname(test->card->host), i + 1,
2954                        mmc_test_cases[i].name);
2955
2956                if (mmc_test_cases[i].prepare) {
2957                        ret = mmc_test_cases[i].prepare(test);
2958                        if (ret) {
2959                                pr_info("%s: Result: Prepare stage failed! (%d)\n",
2960                                        mmc_hostname(test->card->host),
2961                                        ret);
2962                                continue;
2963                        }
2964                }
2965
2966                gr = kzalloc(sizeof(*gr), GFP_KERNEL);
2967                if (gr) {
2968                        INIT_LIST_HEAD(&gr->tr_lst);
2969
2970                        /* Assign data what we know already */
2971                        gr->card = test->card;
2972                        gr->testcase = i;
2973
2974                        /* Append container to global one */
2975                        list_add_tail(&gr->link, &mmc_test_result);
2976
2977                        /*
2978                         * Save the pointer to created container in our private
2979                         * structure.
2980                         */
2981                        test->gr = gr;
2982                }
2983
2984                ret = mmc_test_cases[i].run(test);
2985                switch (ret) {
2986                case RESULT_OK:
2987                        pr_info("%s: Result: OK\n",
2988                                mmc_hostname(test->card->host));
2989                        break;
2990                case RESULT_FAIL:
2991                        pr_info("%s: Result: FAILED\n",
2992                                mmc_hostname(test->card->host));
2993                        break;
2994                case RESULT_UNSUP_HOST:
2995                        pr_info("%s: Result: UNSUPPORTED (by host)\n",
2996                                mmc_hostname(test->card->host));
2997                        break;
2998                case RESULT_UNSUP_CARD:
2999                        pr_info("%s: Result: UNSUPPORTED (by card)\n",
3000                                mmc_hostname(test->card->host));
3001                        break;
3002                default:
3003                        pr_info("%s: Result: ERROR (%d)\n",
3004                                mmc_hostname(test->card->host), ret);
3005                }
3006
3007                /* Save the result */
3008                if (gr)
3009                        gr->result = ret;
3010
3011                if (mmc_test_cases[i].cleanup) {
3012                        ret = mmc_test_cases[i].cleanup(test);
3013                        if (ret) {
3014                                pr_info("%s: Warning: Cleanup stage failed! (%d)\n",
3015                                        mmc_hostname(test->card->host),
3016                                        ret);
3017                        }
3018                }
3019        }
3020
3021        mmc_release_host(test->card->host);
3022
3023        pr_info("%s: Tests completed.\n",
3024                mmc_hostname(test->card->host));
3025}
3026
3027static void mmc_test_free_result(struct mmc_card *card)
3028{
3029        struct mmc_test_general_result *gr, *grs;
3030
3031        mutex_lock(&mmc_test_lock);
3032
3033        list_for_each_entry_safe(gr, grs, &mmc_test_result, link) {
3034                struct mmc_test_transfer_result *tr, *trs;
3035
3036                if (card && gr->card != card)
3037                        continue;
3038
3039                list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) {
3040                        list_del(&tr->link);
3041                        kfree(tr);
3042                }
3043
3044                list_del(&gr->link);
3045                kfree(gr);
3046        }
3047
3048        mutex_unlock(&mmc_test_lock);
3049}
3050
3051static LIST_HEAD(mmc_test_file_test);
3052
3053static int mtf_test_show(struct seq_file *sf, void *data)
3054{
3055        struct mmc_card *card = (struct mmc_card *)sf->private;
3056        struct mmc_test_general_result *gr;
3057
3058        mutex_lock(&mmc_test_lock);
3059
3060        list_for_each_entry(gr, &mmc_test_result, link) {
3061                struct mmc_test_transfer_result *tr;
3062
3063                if (gr->card != card)
3064                        continue;
3065
3066                seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result);
3067
3068                list_for_each_entry(tr, &gr->tr_lst, link) {
3069                        seq_printf(sf, "%u %d %llu.%09u %u %u.%02u\n",
3070                                tr->count, tr->sectors,
3071                                (u64)tr->ts.tv_sec, (u32)tr->ts.tv_nsec,
3072                                tr->rate, tr->iops / 100, tr->iops % 100);
3073                }
3074        }
3075
3076        mutex_unlock(&mmc_test_lock);
3077
3078        return 0;
3079}
3080
3081static int mtf_test_open(struct inode *inode, struct file *file)
3082{
3083        return single_open(file, mtf_test_show, inode->i_private);
3084}
3085
3086static ssize_t mtf_test_write(struct file *file, const char __user *buf,
3087        size_t count, loff_t *pos)
3088{
3089        struct seq_file *sf = (struct seq_file *)file->private_data;
3090        struct mmc_card *card = (struct mmc_card *)sf->private;
3091        struct mmc_test_card *test;
3092        long testcase;
3093        int ret;
3094
3095        ret = kstrtol_from_user(buf, count, 10, &testcase);
3096        if (ret)
3097                return ret;
3098
3099        test = kzalloc(sizeof(*test), GFP_KERNEL);
3100        if (!test)
3101                return -ENOMEM;
3102
3103        /*
3104         * Remove all test cases associated with given card. Thus we have only
3105         * actual data of the last run.
3106         */
3107        mmc_test_free_result(card);
3108
3109        test->card = card;
3110
3111        test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL);
3112#ifdef CONFIG_HIGHMEM
3113        test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER);
3114#endif
3115
3116#ifdef CONFIG_HIGHMEM
3117        if (test->buffer && test->highmem) {
3118#else
3119        if (test->buffer) {
3120#endif
3121                mutex_lock(&mmc_test_lock);
3122                mmc_test_run(test, testcase);
3123                mutex_unlock(&mmc_test_lock);
3124        }
3125
3126#ifdef CONFIG_HIGHMEM
3127        __free_pages(test->highmem, BUFFER_ORDER);
3128#endif
3129        kfree(test->buffer);
3130        kfree(test);
3131
3132        return count;
3133}
3134
3135static const struct file_operations mmc_test_fops_test = {
3136        .open           = mtf_test_open,
3137        .read           = seq_read,
3138        .write          = mtf_test_write,
3139        .llseek         = seq_lseek,
3140        .release        = single_release,
3141};
3142
3143static int mtf_testlist_show(struct seq_file *sf, void *data)
3144{
3145        int i;
3146
3147        mutex_lock(&mmc_test_lock);
3148
3149        seq_puts(sf, "0:\tRun all tests\n");
3150        for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++)
3151                seq_printf(sf, "%d:\t%s\n", i + 1, mmc_test_cases[i].name);
3152
3153        mutex_unlock(&mmc_test_lock);
3154
3155        return 0;
3156}
3157
3158DEFINE_SHOW_ATTRIBUTE(mtf_testlist);
3159
3160static void mmc_test_free_dbgfs_file(struct mmc_card *card)
3161{
3162        struct mmc_test_dbgfs_file *df, *dfs;
3163
3164        mutex_lock(&mmc_test_lock);
3165
3166        list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) {
3167                if (card && df->card != card)
3168                        continue;
3169                debugfs_remove(df->file);
3170                list_del(&df->link);
3171                kfree(df);
3172        }
3173
3174        mutex_unlock(&mmc_test_lock);
3175}
3176
3177static int __mmc_test_register_dbgfs_file(struct mmc_card *card,
3178        const char *name, umode_t mode, const struct file_operations *fops)
3179{
3180        struct dentry *file = NULL;
3181        struct mmc_test_dbgfs_file *df;
3182
3183        if (card->debugfs_root)
3184                debugfs_create_file(name, mode, card->debugfs_root, card, fops);
3185
3186        df = kmalloc(sizeof(*df), GFP_KERNEL);
3187        if (!df) {
3188                debugfs_remove(file);
3189                return -ENOMEM;
3190        }
3191
3192        df->card = card;
3193        df->file = file;
3194
3195        list_add(&df->link, &mmc_test_file_test);
3196        return 0;
3197}
3198
3199static int mmc_test_register_dbgfs_file(struct mmc_card *card)
3200{
3201        int ret;
3202
3203        mutex_lock(&mmc_test_lock);
3204
3205        ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO,
3206                &mmc_test_fops_test);
3207        if (ret)
3208                goto err;
3209
3210        ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO,
3211                &mtf_testlist_fops);
3212        if (ret)
3213                goto err;
3214
3215err:
3216        mutex_unlock(&mmc_test_lock);
3217
3218        return ret;
3219}
3220
3221static int mmc_test_probe(struct mmc_card *card)
3222{
3223        int ret;
3224
3225        if (!mmc_card_mmc(card) && !mmc_card_sd(card))
3226                return -ENODEV;
3227
3228        ret = mmc_test_register_dbgfs_file(card);
3229        if (ret)
3230                return ret;
3231
3232        if (card->ext_csd.cmdq_en) {
3233                mmc_claim_host(card->host);
3234                ret = mmc_cmdq_disable(card);
3235                mmc_release_host(card->host);
3236                if (ret)
3237                        return ret;
3238        }
3239
3240        dev_info(&card->dev, "Card claimed for testing.\n");
3241
3242        return 0;
3243}
3244
3245static void mmc_test_remove(struct mmc_card *card)
3246{
3247        if (card->reenable_cmdq) {
3248                mmc_claim_host(card->host);
3249                mmc_cmdq_enable(card);
3250                mmc_release_host(card->host);
3251        }
3252        mmc_test_free_result(card);
3253        mmc_test_free_dbgfs_file(card);
3254}
3255
3256static struct mmc_driver mmc_driver = {
3257        .drv            = {
3258                .name   = "mmc_test",
3259        },
3260        .probe          = mmc_test_probe,
3261        .remove         = mmc_test_remove,
3262};
3263
3264static int __init mmc_test_init(void)
3265{
3266        return mmc_register_driver(&mmc_driver);
3267}
3268
3269static void __exit mmc_test_exit(void)
3270{
3271        /* Clear stalled data if card is still plugged */
3272        mmc_test_free_result(NULL);
3273        mmc_test_free_dbgfs_file(NULL);
3274
3275        mmc_unregister_driver(&mmc_driver);
3276}
3277
3278module_init(mmc_test_init);
3279module_exit(mmc_test_exit);
3280
3281MODULE_LICENSE("GPL");
3282MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver");
3283MODULE_AUTHOR("Pierre Ossman");
3284