linux/net/sunrpc/xdr.c
<<
>>
Prefs
   1/*
   2 * linux/net/sunrpc/xdr.c
   3 *
   4 * Generic XDR support.
   5 *
   6 * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de>
   7 */
   8
   9#include <linux/module.h>
  10#include <linux/slab.h>
  11#include <linux/types.h>
  12#include <linux/string.h>
  13#include <linux/kernel.h>
  14#include <linux/pagemap.h>
  15#include <linux/errno.h>
  16#include <linux/sunrpc/xdr.h>
  17#include <linux/sunrpc/msg_prot.h>
  18
  19/*
  20 * XDR functions for basic NFS types
  21 */
  22__be32 *
  23xdr_encode_netobj(__be32 *p, const struct xdr_netobj *obj)
  24{
  25        unsigned int    quadlen = XDR_QUADLEN(obj->len);
  26
  27        p[quadlen] = 0;         /* zero trailing bytes */
  28        *p++ = cpu_to_be32(obj->len);
  29        memcpy(p, obj->data, obj->len);
  30        return p + XDR_QUADLEN(obj->len);
  31}
  32EXPORT_SYMBOL_GPL(xdr_encode_netobj);
  33
  34__be32 *
  35xdr_decode_netobj(__be32 *p, struct xdr_netobj *obj)
  36{
  37        unsigned int    len;
  38
  39        if ((len = be32_to_cpu(*p++)) > XDR_MAX_NETOBJ)
  40                return NULL;
  41        obj->len  = len;
  42        obj->data = (u8 *) p;
  43        return p + XDR_QUADLEN(len);
  44}
  45EXPORT_SYMBOL_GPL(xdr_decode_netobj);
  46
  47/**
  48 * xdr_encode_opaque_fixed - Encode fixed length opaque data
  49 * @p: pointer to current position in XDR buffer.
  50 * @ptr: pointer to data to encode (or NULL)
  51 * @nbytes: size of data.
  52 *
  53 * Copy the array of data of length nbytes at ptr to the XDR buffer
  54 * at position p, then align to the next 32-bit boundary by padding
  55 * with zero bytes (see RFC1832).
  56 * Note: if ptr is NULL, only the padding is performed.
  57 *
  58 * Returns the updated current XDR buffer position
  59 *
  60 */
  61__be32 *xdr_encode_opaque_fixed(__be32 *p, const void *ptr, unsigned int nbytes)
  62{
  63        if (likely(nbytes != 0)) {
  64                unsigned int quadlen = XDR_QUADLEN(nbytes);
  65                unsigned int padding = (quadlen << 2) - nbytes;
  66
  67                if (ptr != NULL)
  68                        memcpy(p, ptr, nbytes);
  69                if (padding != 0)
  70                        memset((char *)p + nbytes, 0, padding);
  71                p += quadlen;
  72        }
  73        return p;
  74}
  75EXPORT_SYMBOL_GPL(xdr_encode_opaque_fixed);
  76
  77/**
  78 * xdr_encode_opaque - Encode variable length opaque data
  79 * @p: pointer to current position in XDR buffer.
  80 * @ptr: pointer to data to encode (or NULL)
  81 * @nbytes: size of data.
  82 *
  83 * Returns the updated current XDR buffer position
  84 */
  85__be32 *xdr_encode_opaque(__be32 *p, const void *ptr, unsigned int nbytes)
  86{
  87        *p++ = cpu_to_be32(nbytes);
  88        return xdr_encode_opaque_fixed(p, ptr, nbytes);
  89}
  90EXPORT_SYMBOL_GPL(xdr_encode_opaque);
  91
  92__be32 *
  93xdr_encode_string(__be32 *p, const char *string)
  94{
  95        return xdr_encode_array(p, string, strlen(string));
  96}
  97EXPORT_SYMBOL_GPL(xdr_encode_string);
  98
  99__be32 *
 100xdr_decode_string_inplace(__be32 *p, char **sp,
 101                          unsigned int *lenp, unsigned int maxlen)
 102{
 103        u32 len;
 104
 105        len = be32_to_cpu(*p++);
 106        if (len > maxlen)
 107                return NULL;
 108        *lenp = len;
 109        *sp = (char *) p;
 110        return p + XDR_QUADLEN(len);
 111}
 112EXPORT_SYMBOL_GPL(xdr_decode_string_inplace);
 113
 114/**
 115 * xdr_terminate_string - '\0'-terminate a string residing in an xdr_buf
 116 * @buf: XDR buffer where string resides
 117 * @len: length of string, in bytes
 118 *
 119 */
 120void
 121xdr_terminate_string(struct xdr_buf *buf, const u32 len)
 122{
 123        char *kaddr;
 124
 125        kaddr = kmap_atomic(buf->pages[0], KM_USER0);
 126        kaddr[buf->page_base + len] = '\0';
 127        kunmap_atomic(kaddr, KM_USER0);
 128}
 129EXPORT_SYMBOL_GPL(xdr_terminate_string);
 130
 131void
 132xdr_encode_pages(struct xdr_buf *xdr, struct page **pages, unsigned int base,
 133                 unsigned int len)
 134{
 135        struct kvec *tail = xdr->tail;
 136        u32 *p;
 137
 138        xdr->pages = pages;
 139        xdr->page_base = base;
 140        xdr->page_len = len;
 141
 142        p = (u32 *)xdr->head[0].iov_base + XDR_QUADLEN(xdr->head[0].iov_len);
 143        tail->iov_base = p;
 144        tail->iov_len = 0;
 145
 146        if (len & 3) {
 147                unsigned int pad = 4 - (len & 3);
 148
 149                *p = 0;
 150                tail->iov_base = (char *)p + (len & 3);
 151                tail->iov_len  = pad;
 152                len += pad;
 153        }
 154        xdr->buflen += len;
 155        xdr->len += len;
 156}
 157EXPORT_SYMBOL_GPL(xdr_encode_pages);
 158
 159void
 160xdr_inline_pages(struct xdr_buf *xdr, unsigned int offset,
 161                 struct page **pages, unsigned int base, unsigned int len)
 162{
 163        struct kvec *head = xdr->head;
 164        struct kvec *tail = xdr->tail;
 165        char *buf = (char *)head->iov_base;
 166        unsigned int buflen = head->iov_len;
 167
 168        head->iov_len  = offset;
 169
 170        xdr->pages = pages;
 171        xdr->page_base = base;
 172        xdr->page_len = len;
 173
 174        tail->iov_base = buf + offset;
 175        tail->iov_len = buflen - offset;
 176
 177        xdr->buflen += len;
 178}
 179EXPORT_SYMBOL_GPL(xdr_inline_pages);
 180
 181/*
 182 * Helper routines for doing 'memmove' like operations on a struct xdr_buf
 183 *
 184 * _shift_data_right_pages
 185 * @pages: vector of pages containing both the source and dest memory area.
 186 * @pgto_base: page vector address of destination
 187 * @pgfrom_base: page vector address of source
 188 * @len: number of bytes to copy
 189 *
 190 * Note: the addresses pgto_base and pgfrom_base are both calculated in
 191 *       the same way:
 192 *            if a memory area starts at byte 'base' in page 'pages[i]',
 193 *            then its address is given as (i << PAGE_CACHE_SHIFT) + base
 194 * Also note: pgfrom_base must be < pgto_base, but the memory areas
 195 *      they point to may overlap.
 196 */
 197static void
 198_shift_data_right_pages(struct page **pages, size_t pgto_base,
 199                size_t pgfrom_base, size_t len)
 200{
 201        struct page **pgfrom, **pgto;
 202        char *vfrom, *vto;
 203        size_t copy;
 204
 205        BUG_ON(pgto_base <= pgfrom_base);
 206
 207        pgto_base += len;
 208        pgfrom_base += len;
 209
 210        pgto = pages + (pgto_base >> PAGE_CACHE_SHIFT);
 211        pgfrom = pages + (pgfrom_base >> PAGE_CACHE_SHIFT);
 212
 213        pgto_base &= ~PAGE_CACHE_MASK;
 214        pgfrom_base &= ~PAGE_CACHE_MASK;
 215
 216        do {
 217                /* Are any pointers crossing a page boundary? */
 218                if (pgto_base == 0) {
 219                        pgto_base = PAGE_CACHE_SIZE;
 220                        pgto--;
 221                }
 222                if (pgfrom_base == 0) {
 223                        pgfrom_base = PAGE_CACHE_SIZE;
 224                        pgfrom--;
 225                }
 226
 227                copy = len;
 228                if (copy > pgto_base)
 229                        copy = pgto_base;
 230                if (copy > pgfrom_base)
 231                        copy = pgfrom_base;
 232                pgto_base -= copy;
 233                pgfrom_base -= copy;
 234
 235                vto = kmap_atomic(*pgto, KM_USER0);
 236                vfrom = kmap_atomic(*pgfrom, KM_USER1);
 237                memmove(vto + pgto_base, vfrom + pgfrom_base, copy);
 238                flush_dcache_page(*pgto);
 239                kunmap_atomic(vfrom, KM_USER1);
 240                kunmap_atomic(vto, KM_USER0);
 241
 242        } while ((len -= copy) != 0);
 243}
 244
 245/*
 246 * _copy_to_pages
 247 * @pages: array of pages
 248 * @pgbase: page vector address of destination
 249 * @p: pointer to source data
 250 * @len: length
 251 *
 252 * Copies data from an arbitrary memory location into an array of pages
 253 * The copy is assumed to be non-overlapping.
 254 */
 255static void
 256_copy_to_pages(struct page **pages, size_t pgbase, const char *p, size_t len)
 257{
 258        struct page **pgto;
 259        char *vto;
 260        size_t copy;
 261
 262        pgto = pages + (pgbase >> PAGE_CACHE_SHIFT);
 263        pgbase &= ~PAGE_CACHE_MASK;
 264
 265        for (;;) {
 266                copy = PAGE_CACHE_SIZE - pgbase;
 267                if (copy > len)
 268                        copy = len;
 269
 270                vto = kmap_atomic(*pgto, KM_USER0);
 271                memcpy(vto + pgbase, p, copy);
 272                kunmap_atomic(vto, KM_USER0);
 273
 274                len -= copy;
 275                if (len == 0)
 276                        break;
 277
 278                pgbase += copy;
 279                if (pgbase == PAGE_CACHE_SIZE) {
 280                        flush_dcache_page(*pgto);
 281                        pgbase = 0;
 282                        pgto++;
 283                }
 284                p += copy;
 285        }
 286        flush_dcache_page(*pgto);
 287}
 288
 289/*
 290 * _copy_from_pages
 291 * @p: pointer to destination
 292 * @pages: array of pages
 293 * @pgbase: offset of source data
 294 * @len: length
 295 *
 296 * Copies data into an arbitrary memory location from an array of pages
 297 * The copy is assumed to be non-overlapping.
 298 */
 299static void
 300_copy_from_pages(char *p, struct page **pages, size_t pgbase, size_t len)
 301{
 302        struct page **pgfrom;
 303        char *vfrom;
 304        size_t copy;
 305
 306        pgfrom = pages + (pgbase >> PAGE_CACHE_SHIFT);
 307        pgbase &= ~PAGE_CACHE_MASK;
 308
 309        do {
 310                copy = PAGE_CACHE_SIZE - pgbase;
 311                if (copy > len)
 312                        copy = len;
 313
 314                vfrom = kmap_atomic(*pgfrom, KM_USER0);
 315                memcpy(p, vfrom + pgbase, copy);
 316                kunmap_atomic(vfrom, KM_USER0);
 317
 318                pgbase += copy;
 319                if (pgbase == PAGE_CACHE_SIZE) {
 320                        pgbase = 0;
 321                        pgfrom++;
 322                }
 323                p += copy;
 324
 325        } while ((len -= copy) != 0);
 326}
 327
 328/*
 329 * xdr_shrink_bufhead
 330 * @buf: xdr_buf
 331 * @len: bytes to remove from buf->head[0]
 332 *
 333 * Shrinks XDR buffer's header kvec buf->head[0] by
 334 * 'len' bytes. The extra data is not lost, but is instead
 335 * moved into the inlined pages and/or the tail.
 336 */
 337static void
 338xdr_shrink_bufhead(struct xdr_buf *buf, size_t len)
 339{
 340        struct kvec *head, *tail;
 341        size_t copy, offs;
 342        unsigned int pglen = buf->page_len;
 343
 344        tail = buf->tail;
 345        head = buf->head;
 346        BUG_ON (len > head->iov_len);
 347
 348        /* Shift the tail first */
 349        if (tail->iov_len != 0) {
 350                if (tail->iov_len > len) {
 351                        copy = tail->iov_len - len;
 352                        memmove((char *)tail->iov_base + len,
 353                                        tail->iov_base, copy);
 354                }
 355                /* Copy from the inlined pages into the tail */
 356                copy = len;
 357                if (copy > pglen)
 358                        copy = pglen;
 359                offs = len - copy;
 360                if (offs >= tail->iov_len)
 361                        copy = 0;
 362                else if (copy > tail->iov_len - offs)
 363                        copy = tail->iov_len - offs;
 364                if (copy != 0)
 365                        _copy_from_pages((char *)tail->iov_base + offs,
 366                                        buf->pages,
 367                                        buf->page_base + pglen + offs - len,
 368                                        copy);
 369                /* Do we also need to copy data from the head into the tail ? */
 370                if (len > pglen) {
 371                        offs = copy = len - pglen;
 372                        if (copy > tail->iov_len)
 373                                copy = tail->iov_len;
 374                        memcpy(tail->iov_base,
 375                                        (char *)head->iov_base +
 376                                        head->iov_len - offs,
 377                                        copy);
 378                }
 379        }
 380        /* Now handle pages */
 381        if (pglen != 0) {
 382                if (pglen > len)
 383                        _shift_data_right_pages(buf->pages,
 384                                        buf->page_base + len,
 385                                        buf->page_base,
 386                                        pglen - len);
 387                copy = len;
 388                if (len > pglen)
 389                        copy = pglen;
 390                _copy_to_pages(buf->pages, buf->page_base,
 391                                (char *)head->iov_base + head->iov_len - len,
 392                                copy);
 393        }
 394        head->iov_len -= len;
 395        buf->buflen -= len;
 396        /* Have we truncated the message? */
 397        if (buf->len > buf->buflen)
 398                buf->len = buf->buflen;
 399}
 400
 401/*
 402 * xdr_shrink_pagelen
 403 * @buf: xdr_buf
 404 * @len: bytes to remove from buf->pages
 405 *
 406 * Shrinks XDR buffer's page array buf->pages by
 407 * 'len' bytes. The extra data is not lost, but is instead
 408 * moved into the tail.
 409 */
 410static void
 411xdr_shrink_pagelen(struct xdr_buf *buf, size_t len)
 412{
 413        struct kvec *tail;
 414        size_t copy;
 415        unsigned int pglen = buf->page_len;
 416        unsigned int tailbuf_len;
 417
 418        tail = buf->tail;
 419        BUG_ON (len > pglen);
 420
 421        tailbuf_len = buf->buflen - buf->head->iov_len - buf->page_len;
 422
 423        /* Shift the tail first */
 424        if (tailbuf_len != 0) {
 425                unsigned int free_space = tailbuf_len - tail->iov_len;
 426
 427                if (len < free_space)
 428                        free_space = len;
 429                tail->iov_len += free_space;
 430
 431                copy = len;
 432                if (tail->iov_len > len) {
 433                        char *p = (char *)tail->iov_base + len;
 434                        memmove(p, tail->iov_base, tail->iov_len - len);
 435                } else
 436                        copy = tail->iov_len;
 437                /* Copy from the inlined pages into the tail */
 438                _copy_from_pages((char *)tail->iov_base,
 439                                buf->pages, buf->page_base + pglen - len,
 440                                copy);
 441        }
 442        buf->page_len -= len;
 443        buf->buflen -= len;
 444        /* Have we truncated the message? */
 445        if (buf->len > buf->buflen)
 446                buf->len = buf->buflen;
 447}
 448
 449void
 450xdr_shift_buf(struct xdr_buf *buf, size_t len)
 451{
 452        xdr_shrink_bufhead(buf, len);
 453}
 454EXPORT_SYMBOL_GPL(xdr_shift_buf);
 455
 456/**
 457 * xdr_init_encode - Initialize a struct xdr_stream for sending data.
 458 * @xdr: pointer to xdr_stream struct
 459 * @buf: pointer to XDR buffer in which to encode data
 460 * @p: current pointer inside XDR buffer
 461 *
 462 * Note: at the moment the RPC client only passes the length of our
 463 *       scratch buffer in the xdr_buf's header kvec. Previously this
 464 *       meant we needed to call xdr_adjust_iovec() after encoding the
 465 *       data. With the new scheme, the xdr_stream manages the details
 466 *       of the buffer length, and takes care of adjusting the kvec
 467 *       length for us.
 468 */
 469void xdr_init_encode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
 470{
 471        struct kvec *iov = buf->head;
 472        int scratch_len = buf->buflen - buf->page_len - buf->tail[0].iov_len;
 473
 474        BUG_ON(scratch_len < 0);
 475        xdr->buf = buf;
 476        xdr->iov = iov;
 477        xdr->p = (__be32 *)((char *)iov->iov_base + iov->iov_len);
 478        xdr->end = (__be32 *)((char *)iov->iov_base + scratch_len);
 479        BUG_ON(iov->iov_len > scratch_len);
 480
 481        if (p != xdr->p && p != NULL) {
 482                size_t len;
 483
 484                BUG_ON(p < xdr->p || p > xdr->end);
 485                len = (char *)p - (char *)xdr->p;
 486                xdr->p = p;
 487                buf->len += len;
 488                iov->iov_len += len;
 489        }
 490}
 491EXPORT_SYMBOL_GPL(xdr_init_encode);
 492
 493/**
 494 * xdr_reserve_space - Reserve buffer space for sending
 495 * @xdr: pointer to xdr_stream
 496 * @nbytes: number of bytes to reserve
 497 *
 498 * Checks that we have enough buffer space to encode 'nbytes' more
 499 * bytes of data. If so, update the total xdr_buf length, and
 500 * adjust the length of the current kvec.
 501 */
 502__be32 * xdr_reserve_space(struct xdr_stream *xdr, size_t nbytes)
 503{
 504        __be32 *p = xdr->p;
 505        __be32 *q;
 506
 507        /* align nbytes on the next 32-bit boundary */
 508        nbytes += 3;
 509        nbytes &= ~3;
 510        q = p + (nbytes >> 2);
 511        if (unlikely(q > xdr->end || q < p))
 512                return NULL;
 513        xdr->p = q;
 514        xdr->iov->iov_len += nbytes;
 515        xdr->buf->len += nbytes;
 516        return p;
 517}
 518EXPORT_SYMBOL_GPL(xdr_reserve_space);
 519
 520/**
 521 * xdr_write_pages - Insert a list of pages into an XDR buffer for sending
 522 * @xdr: pointer to xdr_stream
 523 * @pages: list of pages
 524 * @base: offset of first byte
 525 * @len: length of data in bytes
 526 *
 527 */
 528void xdr_write_pages(struct xdr_stream *xdr, struct page **pages, unsigned int base,
 529                 unsigned int len)
 530{
 531        struct xdr_buf *buf = xdr->buf;
 532        struct kvec *iov = buf->tail;
 533        buf->pages = pages;
 534        buf->page_base = base;
 535        buf->page_len = len;
 536
 537        iov->iov_base = (char *)xdr->p;
 538        iov->iov_len  = 0;
 539        xdr->iov = iov;
 540
 541        if (len & 3) {
 542                unsigned int pad = 4 - (len & 3);
 543
 544                BUG_ON(xdr->p >= xdr->end);
 545                iov->iov_base = (char *)xdr->p + (len & 3);
 546                iov->iov_len  += pad;
 547                len += pad;
 548                *xdr->p++ = 0;
 549        }
 550        buf->buflen += len;
 551        buf->len += len;
 552}
 553EXPORT_SYMBOL_GPL(xdr_write_pages);
 554
 555static void xdr_set_iov(struct xdr_stream *xdr, struct kvec *iov,
 556                __be32 *p, unsigned int len)
 557{
 558        if (len > iov->iov_len)
 559                len = iov->iov_len;
 560        if (p == NULL)
 561                p = (__be32*)iov->iov_base;
 562        xdr->p = p;
 563        xdr->end = (__be32*)(iov->iov_base + len);
 564        xdr->iov = iov;
 565        xdr->page_ptr = NULL;
 566}
 567
 568static int xdr_set_page_base(struct xdr_stream *xdr,
 569                unsigned int base, unsigned int len)
 570{
 571        unsigned int pgnr;
 572        unsigned int maxlen;
 573        unsigned int pgoff;
 574        unsigned int pgend;
 575        void *kaddr;
 576
 577        maxlen = xdr->buf->page_len;
 578        if (base >= maxlen)
 579                return -EINVAL;
 580        maxlen -= base;
 581        if (len > maxlen)
 582                len = maxlen;
 583
 584        base += xdr->buf->page_base;
 585
 586        pgnr = base >> PAGE_SHIFT;
 587        xdr->page_ptr = &xdr->buf->pages[pgnr];
 588        kaddr = page_address(*xdr->page_ptr);
 589
 590        pgoff = base & ~PAGE_MASK;
 591        xdr->p = (__be32*)(kaddr + pgoff);
 592
 593        pgend = pgoff + len;
 594        if (pgend > PAGE_SIZE)
 595                pgend = PAGE_SIZE;
 596        xdr->end = (__be32*)(kaddr + pgend);
 597        xdr->iov = NULL;
 598        return 0;
 599}
 600
 601static void xdr_set_next_page(struct xdr_stream *xdr)
 602{
 603        unsigned int newbase;
 604
 605        newbase = (1 + xdr->page_ptr - xdr->buf->pages) << PAGE_SHIFT;
 606        newbase -= xdr->buf->page_base;
 607
 608        if (xdr_set_page_base(xdr, newbase, PAGE_SIZE) < 0)
 609                xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
 610}
 611
 612static bool xdr_set_next_buffer(struct xdr_stream *xdr)
 613{
 614        if (xdr->page_ptr != NULL)
 615                xdr_set_next_page(xdr);
 616        else if (xdr->iov == xdr->buf->head) {
 617                if (xdr_set_page_base(xdr, 0, PAGE_SIZE) < 0)
 618                        xdr_set_iov(xdr, xdr->buf->tail, NULL, xdr->buf->len);
 619        }
 620        return xdr->p != xdr->end;
 621}
 622
 623/**
 624 * xdr_init_decode - Initialize an xdr_stream for decoding data.
 625 * @xdr: pointer to xdr_stream struct
 626 * @buf: pointer to XDR buffer from which to decode data
 627 * @p: current pointer inside XDR buffer
 628 */
 629void xdr_init_decode(struct xdr_stream *xdr, struct xdr_buf *buf, __be32 *p)
 630{
 631        xdr->buf = buf;
 632        xdr->scratch.iov_base = NULL;
 633        xdr->scratch.iov_len = 0;
 634        if (buf->head[0].iov_len != 0)
 635                xdr_set_iov(xdr, buf->head, p, buf->len);
 636        else if (buf->page_len != 0)
 637                xdr_set_page_base(xdr, 0, buf->len);
 638}
 639EXPORT_SYMBOL_GPL(xdr_init_decode);
 640
 641/**
 642 * xdr_init_decode - Initialize an xdr_stream for decoding data.
 643 * @xdr: pointer to xdr_stream struct
 644 * @buf: pointer to XDR buffer from which to decode data
 645 * @pages: list of pages to decode into
 646 * @len: length in bytes of buffer in pages
 647 */
 648void xdr_init_decode_pages(struct xdr_stream *xdr, struct xdr_buf *buf,
 649                           struct page **pages, unsigned int len)
 650{
 651        memset(buf, 0, sizeof(*buf));
 652        buf->pages =  pages;
 653        buf->page_len =  len;
 654        buf->buflen =  len;
 655        buf->len = len;
 656        xdr_init_decode(xdr, buf, NULL);
 657}
 658EXPORT_SYMBOL_GPL(xdr_init_decode_pages);
 659
 660static __be32 * __xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
 661{
 662        __be32 *p = xdr->p;
 663        __be32 *q = p + XDR_QUADLEN(nbytes);
 664
 665        if (unlikely(q > xdr->end || q < p))
 666                return NULL;
 667        xdr->p = q;
 668        return p;
 669}
 670
 671/**
 672 * xdr_set_scratch_buffer - Attach a scratch buffer for decoding data.
 673 * @xdr: pointer to xdr_stream struct
 674 * @buf: pointer to an empty buffer
 675 * @buflen: size of 'buf'
 676 *
 677 * The scratch buffer is used when decoding from an array of pages.
 678 * If an xdr_inline_decode() call spans across page boundaries, then
 679 * we copy the data into the scratch buffer in order to allow linear
 680 * access.
 681 */
 682void xdr_set_scratch_buffer(struct xdr_stream *xdr, void *buf, size_t buflen)
 683{
 684        xdr->scratch.iov_base = buf;
 685        xdr->scratch.iov_len = buflen;
 686}
 687EXPORT_SYMBOL_GPL(xdr_set_scratch_buffer);
 688
 689static __be32 *xdr_copy_to_scratch(struct xdr_stream *xdr, size_t nbytes)
 690{
 691        __be32 *p;
 692        void *cpdest = xdr->scratch.iov_base;
 693        size_t cplen = (char *)xdr->end - (char *)xdr->p;
 694
 695        if (nbytes > xdr->scratch.iov_len)
 696                return NULL;
 697        memcpy(cpdest, xdr->p, cplen);
 698        cpdest += cplen;
 699        nbytes -= cplen;
 700        if (!xdr_set_next_buffer(xdr))
 701                return NULL;
 702        p = __xdr_inline_decode(xdr, nbytes);
 703        if (p == NULL)
 704                return NULL;
 705        memcpy(cpdest, p, nbytes);
 706        return xdr->scratch.iov_base;
 707}
 708
 709/**
 710 * xdr_inline_decode - Retrieve XDR data to decode
 711 * @xdr: pointer to xdr_stream struct
 712 * @nbytes: number of bytes of data to decode
 713 *
 714 * Check if the input buffer is long enough to enable us to decode
 715 * 'nbytes' more bytes of data starting at the current position.
 716 * If so return the current pointer, then update the current
 717 * pointer position.
 718 */
 719__be32 * xdr_inline_decode(struct xdr_stream *xdr, size_t nbytes)
 720{
 721        __be32 *p;
 722
 723        if (nbytes == 0)
 724                return xdr->p;
 725        if (xdr->p == xdr->end && !xdr_set_next_buffer(xdr))
 726                return NULL;
 727        p = __xdr_inline_decode(xdr, nbytes);
 728        if (p != NULL)
 729                return p;
 730        return xdr_copy_to_scratch(xdr, nbytes);
 731}
 732EXPORT_SYMBOL_GPL(xdr_inline_decode);
 733
 734/**
 735 * xdr_read_pages - Ensure page-based XDR data to decode is aligned at current pointer position
 736 * @xdr: pointer to xdr_stream struct
 737 * @len: number of bytes of page data
 738 *
 739 * Moves data beyond the current pointer position from the XDR head[] buffer
 740 * into the page list. Any data that lies beyond current position + "len"
 741 * bytes is moved into the XDR tail[].
 742 */
 743void xdr_read_pages(struct xdr_stream *xdr, unsigned int len)
 744{
 745        struct xdr_buf *buf = xdr->buf;
 746        struct kvec *iov;
 747        ssize_t shift;
 748        unsigned int end;
 749        int padding;
 750
 751        /* Realign pages to current pointer position */
 752        iov  = buf->head;
 753        shift = iov->iov_len + (char *)iov->iov_base - (char *)xdr->p;
 754        if (shift > 0)
 755                xdr_shrink_bufhead(buf, shift);
 756
 757        /* Truncate page data and move it into the tail */
 758        if (buf->page_len > len)
 759                xdr_shrink_pagelen(buf, buf->page_len - len);
 760        padding = (XDR_QUADLEN(len) << 2) - len;
 761        xdr->iov = iov = buf->tail;
 762        /* Compute remaining message length.  */
 763        end = iov->iov_len;
 764        shift = buf->buflen - buf->len;
 765        if (shift < end)
 766                end -= shift;
 767        else if (shift > 0)
 768                end = 0;
 769        /*
 770         * Position current pointer at beginning of tail, and
 771         * set remaining message length.
 772         */
 773        xdr->p = (__be32 *)((char *)iov->iov_base + padding);
 774        xdr->end = (__be32 *)((char *)iov->iov_base + end);
 775}
 776EXPORT_SYMBOL_GPL(xdr_read_pages);
 777
 778/**
 779 * xdr_enter_page - decode data from the XDR page
 780 * @xdr: pointer to xdr_stream struct
 781 * @len: number of bytes of page data
 782 *
 783 * Moves data beyond the current pointer position from the XDR head[] buffer
 784 * into the page list. Any data that lies beyond current position + "len"
 785 * bytes is moved into the XDR tail[]. The current pointer is then
 786 * repositioned at the beginning of the first XDR page.
 787 */
 788void xdr_enter_page(struct xdr_stream *xdr, unsigned int len)
 789{
 790        xdr_read_pages(xdr, len);
 791        /*
 792         * Position current pointer at beginning of tail, and
 793         * set remaining message length.
 794         */
 795        xdr_set_page_base(xdr, 0, len);
 796}
 797EXPORT_SYMBOL_GPL(xdr_enter_page);
 798
 799static struct kvec empty_iov = {.iov_base = NULL, .iov_len = 0};
 800
 801void
 802xdr_buf_from_iov(struct kvec *iov, struct xdr_buf *buf)
 803{
 804        buf->head[0] = *iov;
 805        buf->tail[0] = empty_iov;
 806        buf->page_len = 0;
 807        buf->buflen = buf->len = iov->iov_len;
 808}
 809EXPORT_SYMBOL_GPL(xdr_buf_from_iov);
 810
 811/* Sets subbuf to the portion of buf of length len beginning base bytes
 812 * from the start of buf. Returns -1 if base of length are out of bounds. */
 813int
 814xdr_buf_subsegment(struct xdr_buf *buf, struct xdr_buf *subbuf,
 815                        unsigned int base, unsigned int len)
 816{
 817        subbuf->buflen = subbuf->len = len;
 818        if (base < buf->head[0].iov_len) {
 819                subbuf->head[0].iov_base = buf->head[0].iov_base + base;
 820                subbuf->head[0].iov_len = min_t(unsigned int, len,
 821                                                buf->head[0].iov_len - base);
 822                len -= subbuf->head[0].iov_len;
 823                base = 0;
 824        } else {
 825                subbuf->head[0].iov_base = NULL;
 826                subbuf->head[0].iov_len = 0;
 827                base -= buf->head[0].iov_len;
 828        }
 829
 830        if (base < buf->page_len) {
 831                subbuf->page_len = min(buf->page_len - base, len);
 832                base += buf->page_base;
 833                subbuf->page_base = base & ~PAGE_CACHE_MASK;
 834                subbuf->pages = &buf->pages[base >> PAGE_CACHE_SHIFT];
 835                len -= subbuf->page_len;
 836                base = 0;
 837        } else {
 838                base -= buf->page_len;
 839                subbuf->page_len = 0;
 840        }
 841
 842        if (base < buf->tail[0].iov_len) {
 843                subbuf->tail[0].iov_base = buf->tail[0].iov_base + base;
 844                subbuf->tail[0].iov_len = min_t(unsigned int, len,
 845                                                buf->tail[0].iov_len - base);
 846                len -= subbuf->tail[0].iov_len;
 847                base = 0;
 848        } else {
 849                subbuf->tail[0].iov_base = NULL;
 850                subbuf->tail[0].iov_len = 0;
 851                base -= buf->tail[0].iov_len;
 852        }
 853
 854        if (base || len)
 855                return -1;
 856        return 0;
 857}
 858EXPORT_SYMBOL_GPL(xdr_buf_subsegment);
 859
 860static void __read_bytes_from_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
 861{
 862        unsigned int this_len;
 863
 864        this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
 865        memcpy(obj, subbuf->head[0].iov_base, this_len);
 866        len -= this_len;
 867        obj += this_len;
 868        this_len = min_t(unsigned int, len, subbuf->page_len);
 869        if (this_len)
 870                _copy_from_pages(obj, subbuf->pages, subbuf->page_base, this_len);
 871        len -= this_len;
 872        obj += this_len;
 873        this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
 874        memcpy(obj, subbuf->tail[0].iov_base, this_len);
 875}
 876
 877/* obj is assumed to point to allocated memory of size at least len: */
 878int read_bytes_from_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
 879{
 880        struct xdr_buf subbuf;
 881        int status;
 882
 883        status = xdr_buf_subsegment(buf, &subbuf, base, len);
 884        if (status != 0)
 885                return status;
 886        __read_bytes_from_xdr_buf(&subbuf, obj, len);
 887        return 0;
 888}
 889EXPORT_SYMBOL_GPL(read_bytes_from_xdr_buf);
 890
 891static void __write_bytes_to_xdr_buf(struct xdr_buf *subbuf, void *obj, unsigned int len)
 892{
 893        unsigned int this_len;
 894
 895        this_len = min_t(unsigned int, len, subbuf->head[0].iov_len);
 896        memcpy(subbuf->head[0].iov_base, obj, this_len);
 897        len -= this_len;
 898        obj += this_len;
 899        this_len = min_t(unsigned int, len, subbuf->page_len);
 900        if (this_len)
 901                _copy_to_pages(subbuf->pages, subbuf->page_base, obj, this_len);
 902        len -= this_len;
 903        obj += this_len;
 904        this_len = min_t(unsigned int, len, subbuf->tail[0].iov_len);
 905        memcpy(subbuf->tail[0].iov_base, obj, this_len);
 906}
 907
 908/* obj is assumed to point to allocated memory of size at least len: */
 909int write_bytes_to_xdr_buf(struct xdr_buf *buf, unsigned int base, void *obj, unsigned int len)
 910{
 911        struct xdr_buf subbuf;
 912        int status;
 913
 914        status = xdr_buf_subsegment(buf, &subbuf, base, len);
 915        if (status != 0)
 916                return status;
 917        __write_bytes_to_xdr_buf(&subbuf, obj, len);
 918        return 0;
 919}
 920EXPORT_SYMBOL_GPL(write_bytes_to_xdr_buf);
 921
 922int
 923xdr_decode_word(struct xdr_buf *buf, unsigned int base, u32 *obj)
 924{
 925        __be32  raw;
 926        int     status;
 927
 928        status = read_bytes_from_xdr_buf(buf, base, &raw, sizeof(*obj));
 929        if (status)
 930                return status;
 931        *obj = be32_to_cpu(raw);
 932        return 0;
 933}
 934EXPORT_SYMBOL_GPL(xdr_decode_word);
 935
 936int
 937xdr_encode_word(struct xdr_buf *buf, unsigned int base, u32 obj)
 938{
 939        __be32  raw = cpu_to_be32(obj);
 940
 941        return write_bytes_to_xdr_buf(buf, base, &raw, sizeof(obj));
 942}
 943EXPORT_SYMBOL_GPL(xdr_encode_word);
 944
 945/* If the netobj starting offset bytes from the start of xdr_buf is contained
 946 * entirely in the head or the tail, set object to point to it; otherwise
 947 * try to find space for it at the end of the tail, copy it there, and
 948 * set obj to point to it. */
 949int xdr_buf_read_netobj(struct xdr_buf *buf, struct xdr_netobj *obj, unsigned int offset)
 950{
 951        struct xdr_buf subbuf;
 952
 953        if (xdr_decode_word(buf, offset, &obj->len))
 954                return -EFAULT;
 955        if (xdr_buf_subsegment(buf, &subbuf, offset + 4, obj->len))
 956                return -EFAULT;
 957
 958        /* Is the obj contained entirely in the head? */
 959        obj->data = subbuf.head[0].iov_base;
 960        if (subbuf.head[0].iov_len == obj->len)
 961                return 0;
 962        /* ..or is the obj contained entirely in the tail? */
 963        obj->data = subbuf.tail[0].iov_base;
 964        if (subbuf.tail[0].iov_len == obj->len)
 965                return 0;
 966
 967        /* use end of tail as storage for obj:
 968         * (We don't copy to the beginning because then we'd have
 969         * to worry about doing a potentially overlapping copy.
 970         * This assumes the object is at most half the length of the
 971         * tail.) */
 972        if (obj->len > buf->buflen - buf->len)
 973                return -ENOMEM;
 974        if (buf->tail[0].iov_len != 0)
 975                obj->data = buf->tail[0].iov_base + buf->tail[0].iov_len;
 976        else
 977                obj->data = buf->head[0].iov_base + buf->head[0].iov_len;
 978        __read_bytes_from_xdr_buf(&subbuf, obj->data, obj->len);
 979        return 0;
 980}
 981EXPORT_SYMBOL_GPL(xdr_buf_read_netobj);
 982
 983/* Returns 0 on success, or else a negative error code. */
 984static int
 985xdr_xcode_array2(struct xdr_buf *buf, unsigned int base,
 986                 struct xdr_array2_desc *desc, int encode)
 987{
 988        char *elem = NULL, *c;
 989        unsigned int copied = 0, todo, avail_here;
 990        struct page **ppages = NULL;
 991        int err;
 992
 993        if (encode) {
 994                if (xdr_encode_word(buf, base, desc->array_len) != 0)
 995                        return -EINVAL;
 996        } else {
 997                if (xdr_decode_word(buf, base, &desc->array_len) != 0 ||
 998                    desc->array_len > desc->array_maxlen ||
 999                    (unsigned long) base + 4 + desc->array_len *
1000                                    desc->elem_size > buf->len)
1001                        return -EINVAL;
1002        }
1003        base += 4;
1004
1005        if (!desc->xcode)
1006                return 0;
1007
1008        todo = desc->array_len * desc->elem_size;
1009
1010        /* process head */
1011        if (todo && base < buf->head->iov_len) {
1012                c = buf->head->iov_base + base;
1013                avail_here = min_t(unsigned int, todo,
1014                                   buf->head->iov_len - base);
1015                todo -= avail_here;
1016
1017                while (avail_here >= desc->elem_size) {
1018                        err = desc->xcode(desc, c);
1019                        if (err)
1020                                goto out;
1021                        c += desc->elem_size;
1022                        avail_here -= desc->elem_size;
1023                }
1024                if (avail_here) {
1025                        if (!elem) {
1026                                elem = kmalloc(desc->elem_size, GFP_KERNEL);
1027                                err = -ENOMEM;
1028                                if (!elem)
1029                                        goto out;
1030                        }
1031                        if (encode) {
1032                                err = desc->xcode(desc, elem);
1033                                if (err)
1034                                        goto out;
1035                                memcpy(c, elem, avail_here);
1036                        } else
1037                                memcpy(elem, c, avail_here);
1038                        copied = avail_here;
1039                }
1040                base = buf->head->iov_len;  /* align to start of pages */
1041        }
1042
1043        /* process pages array */
1044        base -= buf->head->iov_len;
1045        if (todo && base < buf->page_len) {
1046                unsigned int avail_page;
1047
1048                avail_here = min(todo, buf->page_len - base);
1049                todo -= avail_here;
1050
1051                base += buf->page_base;
1052                ppages = buf->pages + (base >> PAGE_CACHE_SHIFT);
1053                base &= ~PAGE_CACHE_MASK;
1054                avail_page = min_t(unsigned int, PAGE_CACHE_SIZE - base,
1055                                        avail_here);
1056                c = kmap(*ppages) + base;
1057
1058                while (avail_here) {
1059                        avail_here -= avail_page;
1060                        if (copied || avail_page < desc->elem_size) {
1061                                unsigned int l = min(avail_page,
1062                                        desc->elem_size - copied);
1063                                if (!elem) {
1064                                        elem = kmalloc(desc->elem_size,
1065                                                       GFP_KERNEL);
1066                                        err = -ENOMEM;
1067                                        if (!elem)
1068                                                goto out;
1069                                }
1070                                if (encode) {
1071                                        if (!copied) {
1072                                                err = desc->xcode(desc, elem);
1073                                                if (err)
1074                                                        goto out;
1075                                        }
1076                                        memcpy(c, elem + copied, l);
1077                                        copied += l;
1078                                        if (copied == desc->elem_size)
1079                                                copied = 0;
1080                                } else {
1081                                        memcpy(elem + copied, c, l);
1082                                        copied += l;
1083                                        if (copied == desc->elem_size) {
1084                                                err = desc->xcode(desc, elem);
1085                                                if (err)
1086                                                        goto out;
1087                                                copied = 0;
1088                                        }
1089                                }
1090                                avail_page -= l;
1091                                c += l;
1092                        }
1093                        while (avail_page >= desc->elem_size) {
1094                                err = desc->xcode(desc, c);
1095                                if (err)
1096                                        goto out;
1097                                c += desc->elem_size;
1098                                avail_page -= desc->elem_size;
1099                        }
1100                        if (avail_page) {
1101                                unsigned int l = min(avail_page,
1102                                            desc->elem_size - copied);
1103                                if (!elem) {
1104                                        elem = kmalloc(desc->elem_size,
1105                                                       GFP_KERNEL);
1106                                        err = -ENOMEM;
1107                                        if (!elem)
1108                                                goto out;
1109                                }
1110                                if (encode) {
1111                                        if (!copied) {
1112                                                err = desc->xcode(desc, elem);
1113                                                if (err)
1114                                                        goto out;
1115                                        }
1116                                        memcpy(c, elem + copied, l);
1117                                        copied += l;
1118                                        if (copied == desc->elem_size)
1119                                                copied = 0;
1120                                } else {
1121                                        memcpy(elem + copied, c, l);
1122                                        copied += l;
1123                                        if (copied == desc->elem_size) {
1124                                                err = desc->xcode(desc, elem);
1125                                                if (err)
1126                                                        goto out;
1127                                                copied = 0;
1128                                        }
1129                                }
1130                        }
1131                        if (avail_here) {
1132                                kunmap(*ppages);
1133                                ppages++;
1134                                c = kmap(*ppages);
1135                        }
1136
1137                        avail_page = min(avail_here,
1138                                 (unsigned int) PAGE_CACHE_SIZE);
1139                }
1140                base = buf->page_len;  /* align to start of tail */
1141        }
1142
1143        /* process tail */
1144        base -= buf->page_len;
1145        if (todo) {
1146                c = buf->tail->iov_base + base;
1147                if (copied) {
1148                        unsigned int l = desc->elem_size - copied;
1149
1150                        if (encode)
1151                                memcpy(c, elem + copied, l);
1152                        else {
1153                                memcpy(elem + copied, c, l);
1154                                err = desc->xcode(desc, elem);
1155                                if (err)
1156                                        goto out;
1157                        }
1158                        todo -= l;
1159                        c += l;
1160                }
1161                while (todo) {
1162                        err = desc->xcode(desc, c);
1163                        if (err)
1164                                goto out;
1165                        c += desc->elem_size;
1166                        todo -= desc->elem_size;
1167                }
1168        }
1169        err = 0;
1170
1171out:
1172        kfree(elem);
1173        if (ppages)
1174                kunmap(*ppages);
1175        return err;
1176}
1177
1178int
1179xdr_decode_array2(struct xdr_buf *buf, unsigned int base,
1180                  struct xdr_array2_desc *desc)
1181{
1182        if (base >= buf->len)
1183                return -EINVAL;
1184
1185        return xdr_xcode_array2(buf, base, desc, 0);
1186}
1187EXPORT_SYMBOL_GPL(xdr_decode_array2);
1188
1189int
1190xdr_encode_array2(struct xdr_buf *buf, unsigned int base,
1191                  struct xdr_array2_desc *desc)
1192{
1193        if ((unsigned long) base + 4 + desc->array_len * desc->elem_size >
1194            buf->head->iov_len + buf->page_len + buf->tail->iov_len)
1195                return -EINVAL;
1196
1197        return xdr_xcode_array2(buf, base, desc, 1);
1198}
1199EXPORT_SYMBOL_GPL(xdr_encode_array2);
1200
1201int
1202xdr_process_buf(struct xdr_buf *buf, unsigned int offset, unsigned int len,
1203                int (*actor)(struct scatterlist *, void *), void *data)
1204{
1205        int i, ret = 0;
1206        unsigned page_len, thislen, page_offset;
1207        struct scatterlist      sg[1];
1208
1209        sg_init_table(sg, 1);
1210
1211        if (offset >= buf->head[0].iov_len) {
1212                offset -= buf->head[0].iov_len;
1213        } else {
1214                thislen = buf->head[0].iov_len - offset;
1215                if (thislen > len)
1216                        thislen = len;
1217                sg_set_buf(sg, buf->head[0].iov_base + offset, thislen);
1218                ret = actor(sg, data);
1219                if (ret)
1220                        goto out;
1221                offset = 0;
1222                len -= thislen;
1223        }
1224        if (len == 0)
1225                goto out;
1226
1227        if (offset >= buf->page_len) {
1228                offset -= buf->page_len;
1229        } else {
1230                page_len = buf->page_len - offset;
1231                if (page_len > len)
1232                        page_len = len;
1233                len -= page_len;
1234                page_offset = (offset + buf->page_base) & (PAGE_CACHE_SIZE - 1);
1235                i = (offset + buf->page_base) >> PAGE_CACHE_SHIFT;
1236                thislen = PAGE_CACHE_SIZE - page_offset;
1237                do {
1238                        if (thislen > page_len)
1239                                thislen = page_len;
1240                        sg_set_page(sg, buf->pages[i], thislen, page_offset);
1241                        ret = actor(sg, data);
1242                        if (ret)
1243                                goto out;
1244                        page_len -= thislen;
1245                        i++;
1246                        page_offset = 0;
1247                        thislen = PAGE_CACHE_SIZE;
1248                } while (page_len != 0);
1249                offset = 0;
1250        }
1251        if (len == 0)
1252                goto out;
1253        if (offset < buf->tail[0].iov_len) {
1254                thislen = buf->tail[0].iov_len - offset;
1255                if (thislen > len)
1256                        thislen = len;
1257                sg_set_buf(sg, buf->tail[0].iov_base + offset, thislen);
1258                ret = actor(sg, data);
1259                len -= thislen;
1260        }
1261        if (len != 0)
1262                ret = -EINVAL;
1263out:
1264        return ret;
1265}
1266EXPORT_SYMBOL_GPL(xdr_process_buf);
1267
1268