linux/drivers/staging/lustre/lustre/ptlrpc/sec_bulk.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  19 *
  20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  21 * CA 95054 USA or visit www.sun.com if you need additional information or
  22 * have any questions.
  23 *
  24 * GPL HEADER END
  25 */
  26/*
  27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
  28 * Use is subject to license terms.
  29 *
  30 * Copyright (c) 2011, 2012, Intel Corporation.
  31 */
  32/*
  33 * This file is part of Lustre, http://www.lustre.org/
  34 * Lustre is a trademark of Sun Microsystems, Inc.
  35 *
  36 * lustre/ptlrpc/sec_bulk.c
  37 *
  38 * Author: Eric Mei <ericm@clusterfs.com>
  39 */
  40
  41#define DEBUG_SUBSYSTEM S_SEC
  42
  43#include <linux/libcfs/libcfs.h>
  44#include <linux/crypto.h>
  45
  46#include <obd.h>
  47#include <obd_cksum.h>
  48#include <obd_class.h>
  49#include <obd_support.h>
  50#include <lustre_net.h>
  51#include <lustre_import.h>
  52#include <lustre_dlm.h>
  53#include <lustre_sec.h>
  54
  55#include "ptlrpc_internal.h"
  56
  57/****************************************
  58 * bulk encryption page pools      *
  59 ****************************************/
  60
  61
  62#define PTRS_PER_PAGE   (PAGE_CACHE_SIZE / sizeof(void *))
  63#define PAGES_PER_POOL  (PTRS_PER_PAGE)
  64
  65#define IDLE_IDX_MAX        (100)
  66#define IDLE_IDX_WEIGHT  (3)
  67
  68#define CACHE_QUIESCENT_PERIOD  (20)
  69
  70static struct ptlrpc_enc_page_pool {
  71        /*
  72         * constants
  73         */
  74        unsigned long    epp_max_pages;   /* maximum pages can hold, const */
  75        unsigned int     epp_max_pools;   /* number of pools, const */
  76
  77        /*
  78         * wait queue in case of not enough free pages.
  79         */
  80        wait_queue_head_t      epp_waitq;       /* waiting threads */
  81        unsigned int     epp_waitqlen;    /* wait queue length */
  82        unsigned long    epp_pages_short; /* # of pages wanted of in-q users */
  83        unsigned int     epp_growing:1;   /* during adding pages */
  84
  85        /*
  86         * indicating how idle the pools are, from 0 to MAX_IDLE_IDX
  87         * this is counted based on each time when getting pages from
  88         * the pools, not based on time. which means in case that system
  89         * is idled for a while but the idle_idx might still be low if no
  90         * activities happened in the pools.
  91         */
  92        unsigned long    epp_idle_idx;
  93
  94        /* last shrink time due to mem tight */
  95        long         epp_last_shrink;
  96        long         epp_last_access;
  97
  98        /*
  99         * in-pool pages bookkeeping
 100         */
 101        spinlock_t       epp_lock;         /* protect following fields */
 102        unsigned long    epp_total_pages; /* total pages in pools */
 103        unsigned long    epp_free_pages;  /* current pages available */
 104
 105        /*
 106         * statistics
 107         */
 108        unsigned long    epp_st_max_pages;      /* # of pages ever reached */
 109        unsigned int     epp_st_grows;    /* # of grows */
 110        unsigned int     epp_st_grow_fails;     /* # of add pages failures */
 111        unsigned int     epp_st_shrinks;        /* # of shrinks */
 112        unsigned long    epp_st_access;  /* # of access */
 113        unsigned long    epp_st_missings;       /* # of cache missing */
 114        unsigned long    epp_st_lowfree;        /* lowest free pages reached */
 115        unsigned int     epp_st_max_wqlen;      /* highest waitqueue length */
 116        cfs_time_t       epp_st_max_wait;       /* in jeffies */
 117        /*
 118         * pointers to pools
 119         */
 120        struct page    ***epp_pools;
 121} page_pools;
 122
 123/*
 124 * memory shrinker
 125 */
 126const int pools_shrinker_seeks = DEFAULT_SEEKS;
 127static struct shrinker *pools_shrinker = NULL;
 128
 129
 130/*
 131 * /proc/fs/lustre/sptlrpc/encrypt_page_pools
 132 */
 133int sptlrpc_proc_enc_pool_seq_show(struct seq_file *m, void *v)
 134{
 135        int     rc;
 136
 137        spin_lock(&page_pools.epp_lock);
 138
 139        rc = seq_printf(m,
 140                      "physical pages:    %lu\n"
 141                      "pages per pool:    %lu\n"
 142                      "max pages:              %lu\n"
 143                      "max pools:              %u\n"
 144                      "total pages:          %lu\n"
 145                      "total free:            %lu\n"
 146                      "idle index:            %lu/100\n"
 147                      "last shrink:          %lds\n"
 148                      "last access:          %lds\n"
 149                      "max pages reached:       %lu\n"
 150                      "grows:              %u\n"
 151                      "grows failure:      %u\n"
 152                      "shrinks:          %u\n"
 153                      "cache access:        %lu\n"
 154                      "cache missing:      %lu\n"
 155                      "low free mark:      %lu\n"
 156                      "max waitqueue depth:     %u\n"
 157                      "max wait time:      "CFS_TIME_T"/%u\n"
 158                      ,
 159                      num_physpages,
 160                      PAGES_PER_POOL,
 161                      page_pools.epp_max_pages,
 162                      page_pools.epp_max_pools,
 163                      page_pools.epp_total_pages,
 164                      page_pools.epp_free_pages,
 165                      page_pools.epp_idle_idx,
 166                      cfs_time_current_sec() - page_pools.epp_last_shrink,
 167                      cfs_time_current_sec() - page_pools.epp_last_access,
 168                      page_pools.epp_st_max_pages,
 169                      page_pools.epp_st_grows,
 170                      page_pools.epp_st_grow_fails,
 171                      page_pools.epp_st_shrinks,
 172                      page_pools.epp_st_access,
 173                      page_pools.epp_st_missings,
 174                      page_pools.epp_st_lowfree,
 175                      page_pools.epp_st_max_wqlen,
 176                      page_pools.epp_st_max_wait, HZ
 177                     );
 178
 179        spin_unlock(&page_pools.epp_lock);
 180        return rc;
 181}
 182
 183static void enc_pools_release_free_pages(long npages)
 184{
 185        int     p_idx, g_idx;
 186        int     p_idx_max1, p_idx_max2;
 187
 188        LASSERT(npages > 0);
 189        LASSERT(npages <= page_pools.epp_free_pages);
 190        LASSERT(page_pools.epp_free_pages <= page_pools.epp_total_pages);
 191
 192        /* max pool index before the release */
 193        p_idx_max2 = (page_pools.epp_total_pages - 1) / PAGES_PER_POOL;
 194
 195        page_pools.epp_free_pages -= npages;
 196        page_pools.epp_total_pages -= npages;
 197
 198        /* max pool index after the release */
 199        p_idx_max1 = page_pools.epp_total_pages == 0 ? -1 :
 200                     ((page_pools.epp_total_pages - 1) / PAGES_PER_POOL);
 201
 202        p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
 203        g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
 204        LASSERT(page_pools.epp_pools[p_idx]);
 205
 206        while (npages--) {
 207                LASSERT(page_pools.epp_pools[p_idx]);
 208                LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
 209
 210                __free_page(page_pools.epp_pools[p_idx][g_idx]);
 211                page_pools.epp_pools[p_idx][g_idx] = NULL;
 212
 213                if (++g_idx == PAGES_PER_POOL) {
 214                        p_idx++;
 215                        g_idx = 0;
 216                }
 217        };
 218
 219        /* free unused pools */
 220        while (p_idx_max1 < p_idx_max2) {
 221                LASSERT(page_pools.epp_pools[p_idx_max2]);
 222                OBD_FREE(page_pools.epp_pools[p_idx_max2], PAGE_CACHE_SIZE);
 223                page_pools.epp_pools[p_idx_max2] = NULL;
 224                p_idx_max2--;
 225        }
 226}
 227
 228/*
 229 * could be called frequently for query (@nr_to_scan == 0).
 230 * we try to keep at least PTLRPC_MAX_BRW_PAGES pages in the pool.
 231 */
 232static int enc_pools_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
 233{
 234        if (unlikely(shrink_param(sc, nr_to_scan) != 0)) {
 235                spin_lock(&page_pools.epp_lock);
 236                shrink_param(sc, nr_to_scan) = min_t(unsigned long,
 237                                                   shrink_param(sc, nr_to_scan),
 238                                                   page_pools.epp_free_pages -
 239                                                   PTLRPC_MAX_BRW_PAGES);
 240                if (shrink_param(sc, nr_to_scan) > 0) {
 241                        enc_pools_release_free_pages(shrink_param(sc,
 242                                                                  nr_to_scan));
 243                        CDEBUG(D_SEC, "released %ld pages, %ld left\n",
 244                               (long)shrink_param(sc, nr_to_scan),
 245                               page_pools.epp_free_pages);
 246
 247                        page_pools.epp_st_shrinks++;
 248                        page_pools.epp_last_shrink = cfs_time_current_sec();
 249                }
 250                spin_unlock(&page_pools.epp_lock);
 251        }
 252
 253        /*
 254         * if no pool access for a long time, we consider it's fully idle.
 255         * a little race here is fine.
 256         */
 257        if (unlikely(cfs_time_current_sec() - page_pools.epp_last_access >
 258                     CACHE_QUIESCENT_PERIOD)) {
 259                spin_lock(&page_pools.epp_lock);
 260                page_pools.epp_idle_idx = IDLE_IDX_MAX;
 261                spin_unlock(&page_pools.epp_lock);
 262        }
 263
 264        LASSERT(page_pools.epp_idle_idx <= IDLE_IDX_MAX);
 265        return max((int)page_pools.epp_free_pages - PTLRPC_MAX_BRW_PAGES, 0) *
 266                (IDLE_IDX_MAX - page_pools.epp_idle_idx) / IDLE_IDX_MAX;
 267}
 268
 269static inline
 270int npages_to_npools(unsigned long npages)
 271{
 272        return (int) ((npages + PAGES_PER_POOL - 1) / PAGES_PER_POOL);
 273}
 274
 275/*
 276 * return how many pages cleaned up.
 277 */
 278static unsigned long enc_pools_cleanup(struct page ***pools, int npools)
 279{
 280        unsigned long cleaned = 0;
 281        int        i, j;
 282
 283        for (i = 0; i < npools; i++) {
 284                if (pools[i]) {
 285                        for (j = 0; j < PAGES_PER_POOL; j++) {
 286                                if (pools[i][j]) {
 287                                        __free_page(pools[i][j]);
 288                                        cleaned++;
 289                                }
 290                        }
 291                        OBD_FREE(pools[i], PAGE_CACHE_SIZE);
 292                        pools[i] = NULL;
 293                }
 294        }
 295
 296        return cleaned;
 297}
 298
 299/*
 300 * merge @npools pointed by @pools which contains @npages new pages
 301 * into current pools.
 302 *
 303 * we have options to avoid most memory copy with some tricks. but we choose
 304 * the simplest way to avoid complexity. It's not frequently called.
 305 */
 306static void enc_pools_insert(struct page ***pools, int npools, int npages)
 307{
 308        int     freeslot;
 309        int     op_idx, np_idx, og_idx, ng_idx;
 310        int     cur_npools, end_npools;
 311
 312        LASSERT(npages > 0);
 313        LASSERT(page_pools.epp_total_pages+npages <= page_pools.epp_max_pages);
 314        LASSERT(npages_to_npools(npages) == npools);
 315        LASSERT(page_pools.epp_growing);
 316
 317        spin_lock(&page_pools.epp_lock);
 318
 319        /*
 320         * (1) fill all the free slots of current pools.
 321         */
 322        /* free slots are those left by rent pages, and the extra ones with
 323         * index >= total_pages, locate at the tail of last pool. */
 324        freeslot = page_pools.epp_total_pages % PAGES_PER_POOL;
 325        if (freeslot != 0)
 326                freeslot = PAGES_PER_POOL - freeslot;
 327        freeslot += page_pools.epp_total_pages - page_pools.epp_free_pages;
 328
 329        op_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
 330        og_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
 331        np_idx = npools - 1;
 332        ng_idx = (npages - 1) % PAGES_PER_POOL;
 333
 334        while (freeslot) {
 335                LASSERT(page_pools.epp_pools[op_idx][og_idx] == NULL);
 336                LASSERT(pools[np_idx][ng_idx] != NULL);
 337
 338                page_pools.epp_pools[op_idx][og_idx] = pools[np_idx][ng_idx];
 339                pools[np_idx][ng_idx] = NULL;
 340
 341                freeslot--;
 342
 343                if (++og_idx == PAGES_PER_POOL) {
 344                        op_idx++;
 345                        og_idx = 0;
 346                }
 347                if (--ng_idx < 0) {
 348                        if (np_idx == 0)
 349                                break;
 350                        np_idx--;
 351                        ng_idx = PAGES_PER_POOL - 1;
 352                }
 353        }
 354
 355        /*
 356         * (2) add pools if needed.
 357         */
 358        cur_npools = (page_pools.epp_total_pages + PAGES_PER_POOL - 1) /
 359                     PAGES_PER_POOL;
 360        end_npools = (page_pools.epp_total_pages + npages + PAGES_PER_POOL -1) /
 361                     PAGES_PER_POOL;
 362        LASSERT(end_npools <= page_pools.epp_max_pools);
 363
 364        np_idx = 0;
 365        while (cur_npools < end_npools) {
 366                LASSERT(page_pools.epp_pools[cur_npools] == NULL);
 367                LASSERT(np_idx < npools);
 368                LASSERT(pools[np_idx] != NULL);
 369
 370                page_pools.epp_pools[cur_npools++] = pools[np_idx];
 371                pools[np_idx++] = NULL;
 372        }
 373
 374        page_pools.epp_total_pages += npages;
 375        page_pools.epp_free_pages += npages;
 376        page_pools.epp_st_lowfree = page_pools.epp_free_pages;
 377
 378        if (page_pools.epp_total_pages > page_pools.epp_st_max_pages)
 379                page_pools.epp_st_max_pages = page_pools.epp_total_pages;
 380
 381        CDEBUG(D_SEC, "add %d pages to total %lu\n", npages,
 382               page_pools.epp_total_pages);
 383
 384        spin_unlock(&page_pools.epp_lock);
 385}
 386
 387static int enc_pools_add_pages(int npages)
 388{
 389        static DEFINE_MUTEX(add_pages_mutex);
 390        struct page   ***pools;
 391        int          npools, alloced = 0;
 392        int          i, j, rc = -ENOMEM;
 393
 394        if (npages < PTLRPC_MAX_BRW_PAGES)
 395                npages = PTLRPC_MAX_BRW_PAGES;
 396
 397        mutex_lock(&add_pages_mutex);
 398
 399        if (npages + page_pools.epp_total_pages > page_pools.epp_max_pages)
 400                npages = page_pools.epp_max_pages - page_pools.epp_total_pages;
 401        LASSERT(npages > 0);
 402
 403        page_pools.epp_st_grows++;
 404
 405        npools = npages_to_npools(npages);
 406        OBD_ALLOC(pools, npools * sizeof(*pools));
 407        if (pools == NULL)
 408                goto out;
 409
 410        for (i = 0; i < npools; i++) {
 411                OBD_ALLOC(pools[i], PAGE_CACHE_SIZE);
 412                if (pools[i] == NULL)
 413                        goto out_pools;
 414
 415                for (j = 0; j < PAGES_PER_POOL && alloced < npages; j++) {
 416                        pools[i][j] = alloc_page(__GFP_IO |
 417                                                     __GFP_HIGHMEM);
 418                        if (pools[i][j] == NULL)
 419                                goto out_pools;
 420
 421                        alloced++;
 422                }
 423        }
 424        LASSERT(alloced == npages);
 425
 426        enc_pools_insert(pools, npools, npages);
 427        CDEBUG(D_SEC, "added %d pages into pools\n", npages);
 428        rc = 0;
 429
 430out_pools:
 431        enc_pools_cleanup(pools, npools);
 432        OBD_FREE(pools, npools * sizeof(*pools));
 433out:
 434        if (rc) {
 435                page_pools.epp_st_grow_fails++;
 436                CERROR("Failed to allocate %d enc pages\n", npages);
 437        }
 438
 439        mutex_unlock(&add_pages_mutex);
 440        return rc;
 441}
 442
 443static inline void enc_pools_wakeup(void)
 444{
 445        LASSERT(spin_is_locked(&page_pools.epp_lock));
 446        LASSERT(page_pools.epp_waitqlen >= 0);
 447
 448        if (unlikely(page_pools.epp_waitqlen)) {
 449                LASSERT(waitqueue_active(&page_pools.epp_waitq));
 450                wake_up_all(&page_pools.epp_waitq);
 451        }
 452}
 453
 454static int enc_pools_should_grow(int page_needed, long now)
 455{
 456        /* don't grow if someone else is growing the pools right now,
 457         * or the pools has reached its full capacity
 458         */
 459        if (page_pools.epp_growing ||
 460            page_pools.epp_total_pages == page_pools.epp_max_pages)
 461                return 0;
 462
 463        /* if total pages is not enough, we need to grow */
 464        if (page_pools.epp_total_pages < page_needed)
 465                return 1;
 466
 467        /*
 468         * we wanted to return 0 here if there was a shrink just happened
 469         * moment ago, but this may cause deadlock if both client and ost
 470         * live on single node.
 471         */
 472#if 0
 473        if (now - page_pools.epp_last_shrink < 2)
 474                return 0;
 475#endif
 476
 477        /*
 478         * here we perhaps need consider other factors like wait queue
 479         * length, idle index, etc. ?
 480         */
 481
 482        /* grow the pools in any other cases */
 483        return 1;
 484}
 485
 486/*
 487 * we allocate the requested pages atomically.
 488 */
 489int sptlrpc_enc_pool_get_pages(struct ptlrpc_bulk_desc *desc)
 490{
 491        wait_queue_t  waitlink;
 492        unsigned long   this_idle = -1;
 493        cfs_time_t      tick = 0;
 494        long        now;
 495        int          p_idx, g_idx;
 496        int          i;
 497
 498        LASSERT(desc->bd_iov_count > 0);
 499        LASSERT(desc->bd_iov_count <= page_pools.epp_max_pages);
 500
 501        /* resent bulk, enc iov might have been allocated previously */
 502        if (desc->bd_enc_iov != NULL)
 503                return 0;
 504
 505        OBD_ALLOC(desc->bd_enc_iov,
 506                  desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
 507        if (desc->bd_enc_iov == NULL)
 508                return -ENOMEM;
 509
 510        spin_lock(&page_pools.epp_lock);
 511
 512        page_pools.epp_st_access++;
 513again:
 514        if (unlikely(page_pools.epp_free_pages < desc->bd_iov_count)) {
 515                if (tick == 0)
 516                        tick = cfs_time_current();
 517
 518                now = cfs_time_current_sec();
 519
 520                page_pools.epp_st_missings++;
 521                page_pools.epp_pages_short += desc->bd_iov_count;
 522
 523                if (enc_pools_should_grow(desc->bd_iov_count, now)) {
 524                        page_pools.epp_growing = 1;
 525
 526                        spin_unlock(&page_pools.epp_lock);
 527                        enc_pools_add_pages(page_pools.epp_pages_short / 2);
 528                        spin_lock(&page_pools.epp_lock);
 529
 530                        page_pools.epp_growing = 0;
 531
 532                        enc_pools_wakeup();
 533                } else {
 534                        if (++page_pools.epp_waitqlen >
 535                            page_pools.epp_st_max_wqlen)
 536                                page_pools.epp_st_max_wqlen =
 537                                                page_pools.epp_waitqlen;
 538
 539                        set_current_state(TASK_UNINTERRUPTIBLE);
 540                        init_waitqueue_entry_current(&waitlink);
 541                        add_wait_queue(&page_pools.epp_waitq, &waitlink);
 542
 543                        spin_unlock(&page_pools.epp_lock);
 544                        waitq_wait(&waitlink, TASK_UNINTERRUPTIBLE);
 545                        remove_wait_queue(&page_pools.epp_waitq, &waitlink);
 546                        LASSERT(page_pools.epp_waitqlen > 0);
 547                        spin_lock(&page_pools.epp_lock);
 548                        page_pools.epp_waitqlen--;
 549                }
 550
 551                LASSERT(page_pools.epp_pages_short >= desc->bd_iov_count);
 552                page_pools.epp_pages_short -= desc->bd_iov_count;
 553
 554                this_idle = 0;
 555                goto again;
 556        }
 557
 558        /* record max wait time */
 559        if (unlikely(tick != 0)) {
 560                tick = cfs_time_current() - tick;
 561                if (tick > page_pools.epp_st_max_wait)
 562                        page_pools.epp_st_max_wait = tick;
 563        }
 564
 565        /* proceed with rest of allocation */
 566        page_pools.epp_free_pages -= desc->bd_iov_count;
 567
 568        p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
 569        g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
 570
 571        for (i = 0; i < desc->bd_iov_count; i++) {
 572                LASSERT(page_pools.epp_pools[p_idx][g_idx] != NULL);
 573                desc->bd_enc_iov[i].kiov_page =
 574                                        page_pools.epp_pools[p_idx][g_idx];
 575                page_pools.epp_pools[p_idx][g_idx] = NULL;
 576
 577                if (++g_idx == PAGES_PER_POOL) {
 578                        p_idx++;
 579                        g_idx = 0;
 580                }
 581        }
 582
 583        if (page_pools.epp_free_pages < page_pools.epp_st_lowfree)
 584                page_pools.epp_st_lowfree = page_pools.epp_free_pages;
 585
 586        /*
 587         * new idle index = (old * weight + new) / (weight + 1)
 588         */
 589        if (this_idle == -1) {
 590                this_idle = page_pools.epp_free_pages * IDLE_IDX_MAX /
 591                            page_pools.epp_total_pages;
 592        }
 593        page_pools.epp_idle_idx = (page_pools.epp_idle_idx * IDLE_IDX_WEIGHT +
 594                                   this_idle) /
 595                                  (IDLE_IDX_WEIGHT + 1);
 596
 597        page_pools.epp_last_access = cfs_time_current_sec();
 598
 599        spin_unlock(&page_pools.epp_lock);
 600        return 0;
 601}
 602EXPORT_SYMBOL(sptlrpc_enc_pool_get_pages);
 603
 604void sptlrpc_enc_pool_put_pages(struct ptlrpc_bulk_desc *desc)
 605{
 606        int     p_idx, g_idx;
 607        int     i;
 608
 609        if (desc->bd_enc_iov == NULL)
 610                return;
 611
 612        LASSERT(desc->bd_iov_count > 0);
 613
 614        spin_lock(&page_pools.epp_lock);
 615
 616        p_idx = page_pools.epp_free_pages / PAGES_PER_POOL;
 617        g_idx = page_pools.epp_free_pages % PAGES_PER_POOL;
 618
 619        LASSERT(page_pools.epp_free_pages + desc->bd_iov_count <=
 620                page_pools.epp_total_pages);
 621        LASSERT(page_pools.epp_pools[p_idx]);
 622
 623        for (i = 0; i < desc->bd_iov_count; i++) {
 624                LASSERT(desc->bd_enc_iov[i].kiov_page != NULL);
 625                LASSERT(g_idx != 0 || page_pools.epp_pools[p_idx]);
 626                LASSERT(page_pools.epp_pools[p_idx][g_idx] == NULL);
 627
 628                page_pools.epp_pools[p_idx][g_idx] =
 629                                        desc->bd_enc_iov[i].kiov_page;
 630
 631                if (++g_idx == PAGES_PER_POOL) {
 632                        p_idx++;
 633                        g_idx = 0;
 634                }
 635        }
 636
 637        page_pools.epp_free_pages += desc->bd_iov_count;
 638
 639        enc_pools_wakeup();
 640
 641        spin_unlock(&page_pools.epp_lock);
 642
 643        OBD_FREE(desc->bd_enc_iov,
 644                 desc->bd_iov_count * sizeof(*desc->bd_enc_iov));
 645        desc->bd_enc_iov = NULL;
 646}
 647EXPORT_SYMBOL(sptlrpc_enc_pool_put_pages);
 648
 649/*
 650 * we don't do much stuff for add_user/del_user anymore, except adding some
 651 * initial pages in add_user() if current pools are empty, rest would be
 652 * handled by the pools's self-adaption.
 653 */
 654int sptlrpc_enc_pool_add_user(void)
 655{
 656        int     need_grow = 0;
 657
 658        spin_lock(&page_pools.epp_lock);
 659        if (page_pools.epp_growing == 0 && page_pools.epp_total_pages == 0) {
 660                page_pools.epp_growing = 1;
 661                need_grow = 1;
 662        }
 663        spin_unlock(&page_pools.epp_lock);
 664
 665        if (need_grow) {
 666                enc_pools_add_pages(PTLRPC_MAX_BRW_PAGES +
 667                                    PTLRPC_MAX_BRW_PAGES);
 668
 669                spin_lock(&page_pools.epp_lock);
 670                page_pools.epp_growing = 0;
 671                enc_pools_wakeup();
 672                spin_unlock(&page_pools.epp_lock);
 673        }
 674        return 0;
 675}
 676EXPORT_SYMBOL(sptlrpc_enc_pool_add_user);
 677
 678int sptlrpc_enc_pool_del_user(void)
 679{
 680        return 0;
 681}
 682EXPORT_SYMBOL(sptlrpc_enc_pool_del_user);
 683
 684static inline void enc_pools_alloc(void)
 685{
 686        LASSERT(page_pools.epp_max_pools);
 687        OBD_ALLOC_LARGE(page_pools.epp_pools,
 688                        page_pools.epp_max_pools *
 689                        sizeof(*page_pools.epp_pools));
 690}
 691
 692static inline void enc_pools_free(void)
 693{
 694        LASSERT(page_pools.epp_max_pools);
 695        LASSERT(page_pools.epp_pools);
 696
 697        OBD_FREE_LARGE(page_pools.epp_pools,
 698                       page_pools.epp_max_pools *
 699                       sizeof(*page_pools.epp_pools));
 700}
 701
 702int sptlrpc_enc_pool_init(void)
 703{
 704        /*
 705         * maximum capacity is 1/8 of total physical memory.
 706         * is the 1/8 a good number?
 707         */
 708        page_pools.epp_max_pages = num_physpages / 8;
 709        page_pools.epp_max_pools = npages_to_npools(page_pools.epp_max_pages);
 710
 711        init_waitqueue_head(&page_pools.epp_waitq);
 712        page_pools.epp_waitqlen = 0;
 713        page_pools.epp_pages_short = 0;
 714
 715        page_pools.epp_growing = 0;
 716
 717        page_pools.epp_idle_idx = 0;
 718        page_pools.epp_last_shrink = cfs_time_current_sec();
 719        page_pools.epp_last_access = cfs_time_current_sec();
 720
 721        spin_lock_init(&page_pools.epp_lock);
 722        page_pools.epp_total_pages = 0;
 723        page_pools.epp_free_pages = 0;
 724
 725        page_pools.epp_st_max_pages = 0;
 726        page_pools.epp_st_grows = 0;
 727        page_pools.epp_st_grow_fails = 0;
 728        page_pools.epp_st_shrinks = 0;
 729        page_pools.epp_st_access = 0;
 730        page_pools.epp_st_missings = 0;
 731        page_pools.epp_st_lowfree = 0;
 732        page_pools.epp_st_max_wqlen = 0;
 733        page_pools.epp_st_max_wait = 0;
 734
 735        enc_pools_alloc();
 736        if (page_pools.epp_pools == NULL)
 737                return -ENOMEM;
 738
 739        pools_shrinker = set_shrinker(pools_shrinker_seeks,
 740                                          enc_pools_shrink);
 741        if (pools_shrinker == NULL) {
 742                enc_pools_free();
 743                return -ENOMEM;
 744        }
 745
 746        return 0;
 747}
 748
 749void sptlrpc_enc_pool_fini(void)
 750{
 751        unsigned long cleaned, npools;
 752
 753        LASSERT(pools_shrinker);
 754        LASSERT(page_pools.epp_pools);
 755        LASSERT(page_pools.epp_total_pages == page_pools.epp_free_pages);
 756
 757        remove_shrinker(pools_shrinker);
 758
 759        npools = npages_to_npools(page_pools.epp_total_pages);
 760        cleaned = enc_pools_cleanup(page_pools.epp_pools, npools);
 761        LASSERT(cleaned == page_pools.epp_total_pages);
 762
 763        enc_pools_free();
 764
 765        if (page_pools.epp_st_access > 0) {
 766                CDEBUG(D_SEC,
 767                       "max pages %lu, grows %u, grow fails %u, shrinks %u, "
 768                       "access %lu, missing %lu, max qlen %u, max wait "
 769                       CFS_TIME_T"/%d\n",
 770                       page_pools.epp_st_max_pages, page_pools.epp_st_grows,
 771                       page_pools.epp_st_grow_fails,
 772                       page_pools.epp_st_shrinks, page_pools.epp_st_access,
 773                       page_pools.epp_st_missings, page_pools.epp_st_max_wqlen,
 774                       page_pools.epp_st_max_wait, HZ);
 775        }
 776}
 777
 778
 779static int cfs_hash_alg_id[] = {
 780        [BULK_HASH_ALG_NULL]    = CFS_HASH_ALG_NULL,
 781        [BULK_HASH_ALG_ADLER32] = CFS_HASH_ALG_ADLER32,
 782        [BULK_HASH_ALG_CRC32]   = CFS_HASH_ALG_CRC32,
 783        [BULK_HASH_ALG_MD5]     = CFS_HASH_ALG_MD5,
 784        [BULK_HASH_ALG_SHA1]    = CFS_HASH_ALG_SHA1,
 785        [BULK_HASH_ALG_SHA256]  = CFS_HASH_ALG_SHA256,
 786        [BULK_HASH_ALG_SHA384]  = CFS_HASH_ALG_SHA384,
 787        [BULK_HASH_ALG_SHA512]  = CFS_HASH_ALG_SHA512,
 788};
 789const char * sptlrpc_get_hash_name(__u8 hash_alg)
 790{
 791        return cfs_crypto_hash_name(cfs_hash_alg_id[hash_alg]);
 792}
 793EXPORT_SYMBOL(sptlrpc_get_hash_name);
 794
 795__u8 sptlrpc_get_hash_alg(const char *algname)
 796{
 797        return cfs_crypto_hash_alg(algname);
 798}
 799EXPORT_SYMBOL(sptlrpc_get_hash_alg);
 800
 801int bulk_sec_desc_unpack(struct lustre_msg *msg, int offset, int swabbed)
 802{
 803        struct ptlrpc_bulk_sec_desc *bsd;
 804        int                       size = msg->lm_buflens[offset];
 805
 806        bsd = lustre_msg_buf(msg, offset, sizeof(*bsd));
 807        if (bsd == NULL) {
 808                CERROR("Invalid bulk sec desc: size %d\n", size);
 809                return -EINVAL;
 810        }
 811
 812        if (swabbed) {
 813                __swab32s(&bsd->bsd_nob);
 814        }
 815
 816        if (unlikely(bsd->bsd_version != 0)) {
 817                CERROR("Unexpected version %u\n", bsd->bsd_version);
 818                return -EPROTO;
 819        }
 820
 821        if (unlikely(bsd->bsd_type >= SPTLRPC_BULK_MAX)) {
 822                CERROR("Invalid type %u\n", bsd->bsd_type);
 823                return -EPROTO;
 824        }
 825
 826        /* FIXME more sanity check here */
 827
 828        if (unlikely(bsd->bsd_svc != SPTLRPC_BULK_SVC_NULL &&
 829                     bsd->bsd_svc != SPTLRPC_BULK_SVC_INTG &&
 830                     bsd->bsd_svc != SPTLRPC_BULK_SVC_PRIV)) {
 831                CERROR("Invalid svc %u\n", bsd->bsd_svc);
 832                return -EPROTO;
 833        }
 834
 835        return 0;
 836}
 837EXPORT_SYMBOL(bulk_sec_desc_unpack);
 838
 839int sptlrpc_get_bulk_checksum(struct ptlrpc_bulk_desc *desc, __u8 alg,
 840                              void *buf, int buflen)
 841{
 842        struct cfs_crypto_hash_desc     *hdesc;
 843        int                             hashsize;
 844        char                            hashbuf[64];
 845        unsigned int                    bufsize;
 846        int                             i, err;
 847
 848        LASSERT(alg > BULK_HASH_ALG_NULL && alg < BULK_HASH_ALG_MAX);
 849        LASSERT(buflen >= 4);
 850
 851        hdesc = cfs_crypto_hash_init(cfs_hash_alg_id[alg], NULL, 0);
 852        if (IS_ERR(hdesc)) {
 853                CERROR("Unable to initialize checksum hash %s\n",
 854                       cfs_crypto_hash_name(cfs_hash_alg_id[alg]));
 855                return PTR_ERR(hdesc);
 856        }
 857
 858        hashsize = cfs_crypto_hash_digestsize(cfs_hash_alg_id[alg]);
 859
 860        for (i = 0; i < desc->bd_iov_count; i++) {
 861                cfs_crypto_hash_update_page(hdesc, desc->bd_iov[i].kiov_page,
 862                                  desc->bd_iov[i].kiov_offset & ~CFS_PAGE_MASK,
 863                                  desc->bd_iov[i].kiov_len);
 864        }
 865        if (hashsize > buflen) {
 866                bufsize = sizeof(hashbuf);
 867                err = cfs_crypto_hash_final(hdesc, (unsigned char *)hashbuf,
 868                                            &bufsize);
 869                memcpy(buf, hashbuf, buflen);
 870        } else {
 871                bufsize = buflen;
 872                err = cfs_crypto_hash_final(hdesc, (unsigned char *)buf,
 873                                            &bufsize);
 874        }
 875
 876        if (err)
 877                cfs_crypto_hash_final(hdesc, NULL, NULL);
 878        return err;
 879}
 880EXPORT_SYMBOL(sptlrpc_get_bulk_checksum);
 881