linux/drivers/mtd/lpddr/lpddr_cmds.c
<<
>>
Prefs
   1/*
   2 * LPDDR flash memory device operations. This module provides read, write,
   3 * erase, lock/unlock support for LPDDR flash memories
   4 * (C) 2008 Korolev Alexey <akorolev@infradead.org>
   5 * (C) 2008 Vasiliy Leonenko <vasiliy.leonenko@gmail.com>
   6 * Many thanks to Roman Borisov for initial enabling
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU General Public License
  10 * as published by the Free Software Foundation; either version 2
  11 * of the License, or (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  16 * GNU General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU General Public License
  19 * along with this program; if not, write to the Free Software
  20 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
  21 * 02110-1301, USA.
  22 * TODO:
  23 * Implement VPP management
  24 * Implement XIP support
  25 * Implement OTP support
  26 */
  27#include <linux/mtd/pfow.h>
  28#include <linux/mtd/qinfo.h>
  29#include <linux/slab.h>
  30#include <linux/module.h>
  31
  32static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
  33                                        size_t *retlen, u_char *buf);
  34static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to,
  35                                size_t len, size_t *retlen, const u_char *buf);
  36static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
  37                                unsigned long count, loff_t to, size_t *retlen);
  38static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr);
  39static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  40static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len);
  41static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
  42                        size_t *retlen, void **mtdbuf, resource_size_t *phys);
  43static int lpddr_unpoint(struct mtd_info *mtd, loff_t adr, size_t len);
  44static int get_chip(struct map_info *map, struct flchip *chip, int mode);
  45static int chip_ready(struct map_info *map, struct flchip *chip, int mode);
  46static void put_chip(struct map_info *map, struct flchip *chip);
  47
  48struct mtd_info *lpddr_cmdset(struct map_info *map)
  49{
  50        struct lpddr_private *lpddr = map->fldrv_priv;
  51        struct flchip_shared *shared;
  52        struct flchip *chip;
  53        struct mtd_info *mtd;
  54        int numchips;
  55        int i, j;
  56
  57        mtd = kzalloc(sizeof(*mtd), GFP_KERNEL);
  58        if (!mtd)
  59                return NULL;
  60        mtd->priv = map;
  61        mtd->type = MTD_NORFLASH;
  62
  63        /* Fill in the default mtd operations */
  64        mtd->_read = lpddr_read;
  65        mtd->type = MTD_NORFLASH;
  66        mtd->flags = MTD_CAP_NORFLASH;
  67        mtd->flags &= ~MTD_BIT_WRITEABLE;
  68        mtd->_erase = lpddr_erase;
  69        mtd->_write = lpddr_write_buffers;
  70        mtd->_writev = lpddr_writev;
  71        mtd->_lock = lpddr_lock;
  72        mtd->_unlock = lpddr_unlock;
  73        if (map_is_linear(map)) {
  74                mtd->_point = lpddr_point;
  75                mtd->_unpoint = lpddr_unpoint;
  76        }
  77        mtd->size = 1 << lpddr->qinfo->DevSizeShift;
  78        mtd->erasesize = 1 << lpddr->qinfo->UniformBlockSizeShift;
  79        mtd->writesize = 1 << lpddr->qinfo->BufSizeShift;
  80
  81        shared = kmalloc(sizeof(struct flchip_shared) * lpddr->numchips,
  82                                                GFP_KERNEL);
  83        if (!shared) {
  84                kfree(lpddr);
  85                kfree(mtd);
  86                return NULL;
  87        }
  88
  89        chip = &lpddr->chips[0];
  90        numchips = lpddr->numchips / lpddr->qinfo->HWPartsNum;
  91        for (i = 0; i < numchips; i++) {
  92                shared[i].writing = shared[i].erasing = NULL;
  93                mutex_init(&shared[i].lock);
  94                for (j = 0; j < lpddr->qinfo->HWPartsNum; j++) {
  95                        *chip = lpddr->chips[i];
  96                        chip->start += j << lpddr->chipshift;
  97                        chip->oldstate = chip->state = FL_READY;
  98                        chip->priv = &shared[i];
  99                        /* those should be reset too since
 100                           they create memory references. */
 101                        init_waitqueue_head(&chip->wq);
 102                        mutex_init(&chip->mutex);
 103                        chip++;
 104                }
 105        }
 106
 107        return mtd;
 108}
 109EXPORT_SYMBOL(lpddr_cmdset);
 110
 111static int wait_for_ready(struct map_info *map, struct flchip *chip,
 112                unsigned int chip_op_time)
 113{
 114        unsigned int timeo, reset_timeo, sleep_time;
 115        unsigned int dsr;
 116        flstate_t chip_state = chip->state;
 117        int ret = 0;
 118
 119        /* set our timeout to 8 times the expected delay */
 120        timeo = chip_op_time * 8;
 121        if (!timeo)
 122                timeo = 500000;
 123        reset_timeo = timeo;
 124        sleep_time = chip_op_time / 2;
 125
 126        for (;;) {
 127                dsr = CMDVAL(map_read(map, map->pfow_base + PFOW_DSR));
 128                if (dsr & DSR_READY_STATUS)
 129                        break;
 130                if (!timeo) {
 131                        printk(KERN_ERR "%s: Flash timeout error state %d \n",
 132                                                        map->name, chip_state);
 133                        ret = -ETIME;
 134                        break;
 135                }
 136
 137                /* OK Still waiting. Drop the lock, wait a while and retry. */
 138                mutex_unlock(&chip->mutex);
 139                if (sleep_time >= 1000000/HZ) {
 140                        /*
 141                         * Half of the normal delay still remaining
 142                         * can be performed with a sleeping delay instead
 143                         * of busy waiting.
 144                         */
 145                        msleep(sleep_time/1000);
 146                        timeo -= sleep_time;
 147                        sleep_time = 1000000/HZ;
 148                } else {
 149                        udelay(1);
 150                        cond_resched();
 151                        timeo--;
 152                }
 153                mutex_lock(&chip->mutex);
 154
 155                while (chip->state != chip_state) {
 156                        /* Someone's suspended the operation: sleep */
 157                        DECLARE_WAITQUEUE(wait, current);
 158                        set_current_state(TASK_UNINTERRUPTIBLE);
 159                        add_wait_queue(&chip->wq, &wait);
 160                        mutex_unlock(&chip->mutex);
 161                        schedule();
 162                        remove_wait_queue(&chip->wq, &wait);
 163                        mutex_lock(&chip->mutex);
 164                }
 165                if (chip->erase_suspended || chip->write_suspended)  {
 166                        /* Suspend has occurred while sleep: reset timeout */
 167                        timeo = reset_timeo;
 168                        chip->erase_suspended = chip->write_suspended = 0;
 169                }
 170        }
 171        /* check status for errors */
 172        if (dsr & DSR_ERR) {
 173                /* Clear DSR*/
 174                map_write(map, CMD(~(DSR_ERR)), map->pfow_base + PFOW_DSR);
 175                printk(KERN_WARNING"%s: Bad status on wait: 0x%x \n",
 176                                map->name, dsr);
 177                print_drs_error(dsr);
 178                ret = -EIO;
 179        }
 180        chip->state = FL_READY;
 181        return ret;
 182}
 183
 184static int get_chip(struct map_info *map, struct flchip *chip, int mode)
 185{
 186        int ret;
 187        DECLARE_WAITQUEUE(wait, current);
 188
 189 retry:
 190        if (chip->priv && (mode == FL_WRITING || mode == FL_ERASING)
 191                && chip->state != FL_SYNCING) {
 192                /*
 193                 * OK. We have possibility for contension on the write/erase
 194                 * operations which are global to the real chip and not per
 195                 * partition.  So let's fight it over in the partition which
 196                 * currently has authority on the operation.
 197                 *
 198                 * The rules are as follows:
 199                 *
 200                 * - any write operation must own shared->writing.
 201                 *
 202                 * - any erase operation must own _both_ shared->writing and
 203                 *   shared->erasing.
 204                 *
 205                 * - contension arbitration is handled in the owner's context.
 206                 *
 207                 * The 'shared' struct can be read and/or written only when
 208                 * its lock is taken.
 209                 */
 210                struct flchip_shared *shared = chip->priv;
 211                struct flchip *contender;
 212                mutex_lock(&shared->lock);
 213                contender = shared->writing;
 214                if (contender && contender != chip) {
 215                        /*
 216                         * The engine to perform desired operation on this
 217                         * partition is already in use by someone else.
 218                         * Let's fight over it in the context of the chip
 219                         * currently using it.  If it is possible to suspend,
 220                         * that other partition will do just that, otherwise
 221                         * it'll happily send us to sleep.  In any case, when
 222                         * get_chip returns success we're clear to go ahead.
 223                         */
 224                        ret = mutex_trylock(&contender->mutex);
 225                        mutex_unlock(&shared->lock);
 226                        if (!ret)
 227                                goto retry;
 228                        mutex_unlock(&chip->mutex);
 229                        ret = chip_ready(map, contender, mode);
 230                        mutex_lock(&chip->mutex);
 231
 232                        if (ret == -EAGAIN) {
 233                                mutex_unlock(&contender->mutex);
 234                                goto retry;
 235                        }
 236                        if (ret) {
 237                                mutex_unlock(&contender->mutex);
 238                                return ret;
 239                        }
 240                        mutex_lock(&shared->lock);
 241
 242                        /* We should not own chip if it is already in FL_SYNCING
 243                         * state. Put contender and retry. */
 244                        if (chip->state == FL_SYNCING) {
 245                                put_chip(map, contender);
 246                                mutex_unlock(&contender->mutex);
 247                                goto retry;
 248                        }
 249                        mutex_unlock(&contender->mutex);
 250                }
 251
 252                /* Check if we have suspended erase on this chip.
 253                   Must sleep in such a case. */
 254                if (mode == FL_ERASING && shared->erasing
 255                    && shared->erasing->oldstate == FL_ERASING) {
 256                        mutex_unlock(&shared->lock);
 257                        set_current_state(TASK_UNINTERRUPTIBLE);
 258                        add_wait_queue(&chip->wq, &wait);
 259                        mutex_unlock(&chip->mutex);
 260                        schedule();
 261                        remove_wait_queue(&chip->wq, &wait);
 262                        mutex_lock(&chip->mutex);
 263                        goto retry;
 264                }
 265
 266                /* We now own it */
 267                shared->writing = chip;
 268                if (mode == FL_ERASING)
 269                        shared->erasing = chip;
 270                mutex_unlock(&shared->lock);
 271        }
 272
 273        ret = chip_ready(map, chip, mode);
 274        if (ret == -EAGAIN)
 275                goto retry;
 276
 277        return ret;
 278}
 279
 280static int chip_ready(struct map_info *map, struct flchip *chip, int mode)
 281{
 282        struct lpddr_private *lpddr = map->fldrv_priv;
 283        int ret = 0;
 284        DECLARE_WAITQUEUE(wait, current);
 285
 286        /* Prevent setting state FL_SYNCING for chip in suspended state. */
 287        if (FL_SYNCING == mode && FL_READY != chip->oldstate)
 288                goto sleep;
 289
 290        switch (chip->state) {
 291        case FL_READY:
 292        case FL_JEDEC_QUERY:
 293                return 0;
 294
 295        case FL_ERASING:
 296                if (!lpddr->qinfo->SuspEraseSupp ||
 297                        !(mode == FL_READY || mode == FL_POINT))
 298                        goto sleep;
 299
 300                map_write(map, CMD(LPDDR_SUSPEND),
 301                        map->pfow_base + PFOW_PROGRAM_ERASE_SUSPEND);
 302                chip->oldstate = FL_ERASING;
 303                chip->state = FL_ERASE_SUSPENDING;
 304                ret = wait_for_ready(map, chip, 0);
 305                if (ret) {
 306                        /* Oops. something got wrong. */
 307                        /* Resume and pretend we weren't here.  */
 308                        put_chip(map, chip);
 309                        printk(KERN_ERR "%s: suspend operation failed."
 310                                        "State may be wrong \n", map->name);
 311                        return -EIO;
 312                }
 313                chip->erase_suspended = 1;
 314                chip->state = FL_READY;
 315                return 0;
 316                /* Erase suspend */
 317        case FL_POINT:
 318                /* Only if there's no operation suspended... */
 319                if (mode == FL_READY && chip->oldstate == FL_READY)
 320                        return 0;
 321
 322        default:
 323sleep:
 324                set_current_state(TASK_UNINTERRUPTIBLE);
 325                add_wait_queue(&chip->wq, &wait);
 326                mutex_unlock(&chip->mutex);
 327                schedule();
 328                remove_wait_queue(&chip->wq, &wait);
 329                mutex_lock(&chip->mutex);
 330                return -EAGAIN;
 331        }
 332}
 333
 334static void put_chip(struct map_info *map, struct flchip *chip)
 335{
 336        if (chip->priv) {
 337                struct flchip_shared *shared = chip->priv;
 338                mutex_lock(&shared->lock);
 339                if (shared->writing == chip && chip->oldstate == FL_READY) {
 340                        /* We own the ability to write, but we're done */
 341                        shared->writing = shared->erasing;
 342                        if (shared->writing && shared->writing != chip) {
 343                                /* give back the ownership */
 344                                struct flchip *loaner = shared->writing;
 345                                mutex_lock(&loaner->mutex);
 346                                mutex_unlock(&shared->lock);
 347                                mutex_unlock(&chip->mutex);
 348                                put_chip(map, loaner);
 349                                mutex_lock(&chip->mutex);
 350                                mutex_unlock(&loaner->mutex);
 351                                wake_up(&chip->wq);
 352                                return;
 353                        }
 354                        shared->erasing = NULL;
 355                        shared->writing = NULL;
 356                } else if (shared->erasing == chip && shared->writing != chip) {
 357                        /*
 358                         * We own the ability to erase without the ability
 359                         * to write, which means the erase was suspended
 360                         * and some other partition is currently writing.
 361                         * Don't let the switch below mess things up since
 362                         * we don't have ownership to resume anything.
 363                         */
 364                        mutex_unlock(&shared->lock);
 365                        wake_up(&chip->wq);
 366                        return;
 367                }
 368                mutex_unlock(&shared->lock);
 369        }
 370
 371        switch (chip->oldstate) {
 372        case FL_ERASING:
 373                map_write(map, CMD(LPDDR_RESUME),
 374                                map->pfow_base + PFOW_COMMAND_CODE);
 375                map_write(map, CMD(LPDDR_START_EXECUTION),
 376                                map->pfow_base + PFOW_COMMAND_EXECUTE);
 377                chip->oldstate = FL_READY;
 378                chip->state = FL_ERASING;
 379                break;
 380        case FL_READY:
 381                break;
 382        default:
 383                printk(KERN_ERR "%s: put_chip() called with oldstate %d!\n",
 384                                map->name, chip->oldstate);
 385        }
 386        wake_up(&chip->wq);
 387}
 388
 389static int do_write_buffer(struct map_info *map, struct flchip *chip,
 390                        unsigned long adr, const struct kvec **pvec,
 391                        unsigned long *pvec_seek, int len)
 392{
 393        struct lpddr_private *lpddr = map->fldrv_priv;
 394        map_word datum;
 395        int ret, wbufsize, word_gap, words;
 396        const struct kvec *vec;
 397        unsigned long vec_seek;
 398        unsigned long prog_buf_ofs;
 399
 400        wbufsize = 1 << lpddr->qinfo->BufSizeShift;
 401
 402        mutex_lock(&chip->mutex);
 403        ret = get_chip(map, chip, FL_WRITING);
 404        if (ret) {
 405                mutex_unlock(&chip->mutex);
 406                return ret;
 407        }
 408        /* Figure out the number of words to write */
 409        word_gap = (-adr & (map_bankwidth(map)-1));
 410        words = (len - word_gap + map_bankwidth(map) - 1) / map_bankwidth(map);
 411        if (!word_gap) {
 412                words--;
 413        } else {
 414                word_gap = map_bankwidth(map) - word_gap;
 415                adr -= word_gap;
 416                datum = map_word_ff(map);
 417        }
 418        /* Write data */
 419        /* Get the program buffer offset from PFOW register data first*/
 420        prog_buf_ofs = map->pfow_base + CMDVAL(map_read(map,
 421                                map->pfow_base + PFOW_PROGRAM_BUFFER_OFFSET));
 422        vec = *pvec;
 423        vec_seek = *pvec_seek;
 424        do {
 425                int n = map_bankwidth(map) - word_gap;
 426
 427                if (n > vec->iov_len - vec_seek)
 428                        n = vec->iov_len - vec_seek;
 429                if (n > len)
 430                        n = len;
 431
 432                if (!word_gap && (len < map_bankwidth(map)))
 433                        datum = map_word_ff(map);
 434
 435                datum = map_word_load_partial(map, datum,
 436                                vec->iov_base + vec_seek, word_gap, n);
 437
 438                len -= n;
 439                word_gap += n;
 440                if (!len || word_gap == map_bankwidth(map)) {
 441                        map_write(map, datum, prog_buf_ofs);
 442                        prog_buf_ofs += map_bankwidth(map);
 443                        word_gap = 0;
 444                }
 445
 446                vec_seek += n;
 447                if (vec_seek == vec->iov_len) {
 448                        vec++;
 449                        vec_seek = 0;
 450                }
 451        } while (len);
 452        *pvec = vec;
 453        *pvec_seek = vec_seek;
 454
 455        /* GO GO GO */
 456        send_pfow_command(map, LPDDR_BUFF_PROGRAM, adr, wbufsize, NULL);
 457        chip->state = FL_WRITING;
 458        ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->ProgBufferTime));
 459        if (ret)        {
 460                printk(KERN_WARNING"%s Buffer program error: %d at %lx; \n",
 461                        map->name, ret, adr);
 462                goto out;
 463        }
 464
 465 out:   put_chip(map, chip);
 466        mutex_unlock(&chip->mutex);
 467        return ret;
 468}
 469
 470static int do_erase_oneblock(struct mtd_info *mtd, loff_t adr)
 471{
 472        struct map_info *map = mtd->priv;
 473        struct lpddr_private *lpddr = map->fldrv_priv;
 474        int chipnum = adr >> lpddr->chipshift;
 475        struct flchip *chip = &lpddr->chips[chipnum];
 476        int ret;
 477
 478        mutex_lock(&chip->mutex);
 479        ret = get_chip(map, chip, FL_ERASING);
 480        if (ret) {
 481                mutex_unlock(&chip->mutex);
 482                return ret;
 483        }
 484        send_pfow_command(map, LPDDR_BLOCK_ERASE, adr, 0, NULL);
 485        chip->state = FL_ERASING;
 486        ret = wait_for_ready(map, chip, (1<<lpddr->qinfo->BlockEraseTime)*1000);
 487        if (ret) {
 488                printk(KERN_WARNING"%s Erase block error %d at : %llx\n",
 489                        map->name, ret, adr);
 490                goto out;
 491        }
 492 out:   put_chip(map, chip);
 493        mutex_unlock(&chip->mutex);
 494        return ret;
 495}
 496
 497static int lpddr_read(struct mtd_info *mtd, loff_t adr, size_t len,
 498                        size_t *retlen, u_char *buf)
 499{
 500        struct map_info *map = mtd->priv;
 501        struct lpddr_private *lpddr = map->fldrv_priv;
 502        int chipnum = adr >> lpddr->chipshift;
 503        struct flchip *chip = &lpddr->chips[chipnum];
 504        int ret = 0;
 505
 506        mutex_lock(&chip->mutex);
 507        ret = get_chip(map, chip, FL_READY);
 508        if (ret) {
 509                mutex_unlock(&chip->mutex);
 510                return ret;
 511        }
 512
 513        map_copy_from(map, buf, adr, len);
 514        *retlen = len;
 515
 516        put_chip(map, chip);
 517        mutex_unlock(&chip->mutex);
 518        return ret;
 519}
 520
 521static int lpddr_point(struct mtd_info *mtd, loff_t adr, size_t len,
 522                        size_t *retlen, void **mtdbuf, resource_size_t *phys)
 523{
 524        struct map_info *map = mtd->priv;
 525        struct lpddr_private *lpddr = map->fldrv_priv;
 526        int chipnum = adr >> lpddr->chipshift;
 527        unsigned long ofs, last_end = 0;
 528        struct flchip *chip = &lpddr->chips[chipnum];
 529        int ret = 0;
 530
 531        if (!map->virt)
 532                return -EINVAL;
 533
 534        /* ofs: offset within the first chip that the first read should start */
 535        ofs = adr - (chipnum << lpddr->chipshift);
 536        *mtdbuf = (void *)map->virt + chip->start + ofs;
 537
 538        while (len) {
 539                unsigned long thislen;
 540
 541                if (chipnum >= lpddr->numchips)
 542                        break;
 543
 544                /* We cannot point across chips that are virtually disjoint */
 545                if (!last_end)
 546                        last_end = chip->start;
 547                else if (chip->start != last_end)
 548                        break;
 549
 550                if ((len + ofs - 1) >> lpddr->chipshift)
 551                        thislen = (1<<lpddr->chipshift) - ofs;
 552                else
 553                        thislen = len;
 554                /* get the chip */
 555                mutex_lock(&chip->mutex);
 556                ret = get_chip(map, chip, FL_POINT);
 557                mutex_unlock(&chip->mutex);
 558                if (ret)
 559                        break;
 560
 561                chip->state = FL_POINT;
 562                chip->ref_point_counter++;
 563                *retlen += thislen;
 564                len -= thislen;
 565
 566                ofs = 0;
 567                last_end += 1 << lpddr->chipshift;
 568                chipnum++;
 569                chip = &lpddr->chips[chipnum];
 570        }
 571        return 0;
 572}
 573
 574static int lpddr_unpoint (struct mtd_info *mtd, loff_t adr, size_t len)
 575{
 576        struct map_info *map = mtd->priv;
 577        struct lpddr_private *lpddr = map->fldrv_priv;
 578        int chipnum = adr >> lpddr->chipshift, err = 0;
 579        unsigned long ofs;
 580
 581        /* ofs: offset within the first chip that the first read should start */
 582        ofs = adr - (chipnum << lpddr->chipshift);
 583
 584        while (len) {
 585                unsigned long thislen;
 586                struct flchip *chip;
 587
 588                chip = &lpddr->chips[chipnum];
 589                if (chipnum >= lpddr->numchips)
 590                        break;
 591
 592                if ((len + ofs - 1) >> lpddr->chipshift)
 593                        thislen = (1<<lpddr->chipshift) - ofs;
 594                else
 595                        thislen = len;
 596
 597                mutex_lock(&chip->mutex);
 598                if (chip->state == FL_POINT) {
 599                        chip->ref_point_counter--;
 600                        if (chip->ref_point_counter == 0)
 601                                chip->state = FL_READY;
 602                } else {
 603                        printk(KERN_WARNING "%s: Warning: unpoint called on non"
 604                                        "pointed region\n", map->name);
 605                        err = -EINVAL;
 606                }
 607
 608                put_chip(map, chip);
 609                mutex_unlock(&chip->mutex);
 610
 611                len -= thislen;
 612                ofs = 0;
 613                chipnum++;
 614        }
 615
 616        return err;
 617}
 618
 619static int lpddr_write_buffers(struct mtd_info *mtd, loff_t to, size_t len,
 620                                size_t *retlen, const u_char *buf)
 621{
 622        struct kvec vec;
 623
 624        vec.iov_base = (void *) buf;
 625        vec.iov_len = len;
 626
 627        return lpddr_writev(mtd, &vec, 1, to, retlen);
 628}
 629
 630
 631static int lpddr_writev(struct mtd_info *mtd, const struct kvec *vecs,
 632                                unsigned long count, loff_t to, size_t *retlen)
 633{
 634        struct map_info *map = mtd->priv;
 635        struct lpddr_private *lpddr = map->fldrv_priv;
 636        int ret = 0;
 637        int chipnum;
 638        unsigned long ofs, vec_seek, i;
 639        int wbufsize = 1 << lpddr->qinfo->BufSizeShift;
 640        size_t len = 0;
 641
 642        for (i = 0; i < count; i++)
 643                len += vecs[i].iov_len;
 644
 645        if (!len)
 646                return 0;
 647
 648        chipnum = to >> lpddr->chipshift;
 649
 650        ofs = to;
 651        vec_seek = 0;
 652
 653        do {
 654                /* We must not cross write block boundaries */
 655                int size = wbufsize - (ofs & (wbufsize-1));
 656
 657                if (size > len)
 658                        size = len;
 659
 660                ret = do_write_buffer(map, &lpddr->chips[chipnum],
 661                                          ofs, &vecs, &vec_seek, size);
 662                if (ret)
 663                        return ret;
 664
 665                ofs += size;
 666                (*retlen) += size;
 667                len -= size;
 668
 669                /* Be nice and reschedule with the chip in a usable
 670                 * state for other processes */
 671                cond_resched();
 672
 673        } while (len);
 674
 675        return 0;
 676}
 677
 678static int lpddr_erase(struct mtd_info *mtd, struct erase_info *instr)
 679{
 680        unsigned long ofs, len;
 681        int ret;
 682        struct map_info *map = mtd->priv;
 683        struct lpddr_private *lpddr = map->fldrv_priv;
 684        int size = 1 << lpddr->qinfo->UniformBlockSizeShift;
 685
 686        ofs = instr->addr;
 687        len = instr->len;
 688
 689        while (len > 0) {
 690                ret = do_erase_oneblock(mtd, ofs);
 691                if (ret)
 692                        return ret;
 693                ofs += size;
 694                len -= size;
 695        }
 696
 697        return 0;
 698}
 699
 700#define DO_XXLOCK_LOCK          1
 701#define DO_XXLOCK_UNLOCK        2
 702static int do_xxlock(struct mtd_info *mtd, loff_t adr, uint32_t len, int thunk)
 703{
 704        int ret = 0;
 705        struct map_info *map = mtd->priv;
 706        struct lpddr_private *lpddr = map->fldrv_priv;
 707        int chipnum = adr >> lpddr->chipshift;
 708        struct flchip *chip = &lpddr->chips[chipnum];
 709
 710        mutex_lock(&chip->mutex);
 711        ret = get_chip(map, chip, FL_LOCKING);
 712        if (ret) {
 713                mutex_unlock(&chip->mutex);
 714                return ret;
 715        }
 716
 717        if (thunk == DO_XXLOCK_LOCK) {
 718                send_pfow_command(map, LPDDR_LOCK_BLOCK, adr, adr + len, NULL);
 719                chip->state = FL_LOCKING;
 720        } else if (thunk == DO_XXLOCK_UNLOCK) {
 721                send_pfow_command(map, LPDDR_UNLOCK_BLOCK, adr, adr + len, NULL);
 722                chip->state = FL_UNLOCKING;
 723        } else
 724                BUG();
 725
 726        ret = wait_for_ready(map, chip, 1);
 727        if (ret)        {
 728                printk(KERN_ERR "%s: block unlock error status %d \n",
 729                                map->name, ret);
 730                goto out;
 731        }
 732out:    put_chip(map, chip);
 733        mutex_unlock(&chip->mutex);
 734        return ret;
 735}
 736
 737static int lpddr_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 738{
 739        return do_xxlock(mtd, ofs, len, DO_XXLOCK_LOCK);
 740}
 741
 742static int lpddr_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len)
 743{
 744        return do_xxlock(mtd, ofs, len, DO_XXLOCK_UNLOCK);
 745}
 746
 747MODULE_LICENSE("GPL");
 748MODULE_AUTHOR("Alexey Korolev <akorolev@infradead.org>");
 749MODULE_DESCRIPTION("MTD driver for LPDDR flash chips");
 750