linux/net/sunrpc/backchannel_rqst.c
<<
>>
Prefs
   1/******************************************************************************
   2
   3(c) 2007 Network Appliance, Inc.  All Rights Reserved.
   4(c) 2009 NetApp.  All Rights Reserved.
   5
   6NetApp provides this source code under the GPL v2 License.
   7The GPL v2 license is available at
   8http://opensource.org/licenses/gpl-license.php.
   9
  10THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  11"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  12LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  13A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
  14CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
  15EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
  16PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
  17PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  18LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  19NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  20SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  21
  22******************************************************************************/
  23
  24#include <linux/tcp.h>
  25#include <linux/slab.h>
  26#include <linux/sunrpc/xprt.h>
  27#include <linux/export.h>
  28#include <linux/sunrpc/bc_xprt.h>
  29
  30#ifdef RPC_DEBUG
  31#define RPCDBG_FACILITY RPCDBG_TRANS
  32#endif
  33
  34/*
  35 * Helper routines that track the number of preallocation elements
  36 * on the transport.
  37 */
  38static inline int xprt_need_to_requeue(struct rpc_xprt *xprt)
  39{
  40        return xprt->bc_alloc_count > 0;
  41}
  42
  43static inline void xprt_inc_alloc_count(struct rpc_xprt *xprt, unsigned int n)
  44{
  45        xprt->bc_alloc_count += n;
  46}
  47
  48static inline int xprt_dec_alloc_count(struct rpc_xprt *xprt, unsigned int n)
  49{
  50        return xprt->bc_alloc_count -= n;
  51}
  52
  53/*
  54 * Free the preallocated rpc_rqst structure and the memory
  55 * buffers hanging off of it.
  56 */
  57static void xprt_free_allocation(struct rpc_rqst *req)
  58{
  59        struct xdr_buf *xbufp;
  60
  61        dprintk("RPC:        free allocations for req= %p\n", req);
  62        WARN_ON_ONCE(test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
  63        xbufp = &req->rq_private_buf;
  64        free_page((unsigned long)xbufp->head[0].iov_base);
  65        xbufp = &req->rq_snd_buf;
  66        free_page((unsigned long)xbufp->head[0].iov_base);
  67        list_del(&req->rq_bc_pa_list);
  68        kfree(req);
  69}
  70
  71/*
  72 * Preallocate up to min_reqs structures and related buffers for use
  73 * by the backchannel.  This function can be called multiple times
  74 * when creating new sessions that use the same rpc_xprt.  The
  75 * preallocated buffers are added to the pool of resources used by
  76 * the rpc_xprt.  Anyone of these resources may be used used by an
  77 * incoming callback request.  It's up to the higher levels in the
  78 * stack to enforce that the maximum number of session slots is not
  79 * being exceeded.
  80 *
  81 * Some callback arguments can be large.  For example, a pNFS server
  82 * using multiple deviceids.  The list can be unbound, but the client
  83 * has the ability to tell the server the maximum size of the callback
  84 * requests.  Each deviceID is 16 bytes, so allocate one page
  85 * for the arguments to have enough room to receive a number of these
  86 * deviceIDs.  The NFS client indicates to the pNFS server that its
  87 * callback requests can be up to 4096 bytes in size.
  88 */
  89int xprt_setup_backchannel(struct rpc_xprt *xprt, unsigned int min_reqs)
  90{
  91        struct page *page_rcv = NULL, *page_snd = NULL;
  92        struct xdr_buf *xbufp = NULL;
  93        struct rpc_rqst *req, *tmp;
  94        struct list_head tmp_list;
  95        int i;
  96
  97        dprintk("RPC:       setup backchannel transport\n");
  98
  99        /*
 100         * We use a temporary list to keep track of the preallocated
 101         * buffers.  Once we're done building the list we splice it
 102         * into the backchannel preallocation list off of the rpc_xprt
 103         * struct.  This helps minimize the amount of time the list
 104         * lock is held on the rpc_xprt struct.  It also makes cleanup
 105         * easier in case of memory allocation errors.
 106         */
 107        INIT_LIST_HEAD(&tmp_list);
 108        for (i = 0; i < min_reqs; i++) {
 109                /* Pre-allocate one backchannel rpc_rqst */
 110                req = kzalloc(sizeof(struct rpc_rqst), GFP_KERNEL);
 111                if (req == NULL) {
 112                        printk(KERN_ERR "Failed to create bc rpc_rqst\n");
 113                        goto out_free;
 114                }
 115
 116                /* Add the allocated buffer to the tmp list */
 117                dprintk("RPC:       adding req= %p\n", req);
 118                list_add(&req->rq_bc_pa_list, &tmp_list);
 119
 120                req->rq_xprt = xprt;
 121                INIT_LIST_HEAD(&req->rq_list);
 122                INIT_LIST_HEAD(&req->rq_bc_list);
 123
 124                /* Preallocate one XDR receive buffer */
 125                page_rcv = alloc_page(GFP_KERNEL);
 126                if (page_rcv == NULL) {
 127                        printk(KERN_ERR "Failed to create bc receive xbuf\n");
 128                        goto out_free;
 129                }
 130                xbufp = &req->rq_rcv_buf;
 131                xbufp->head[0].iov_base = page_address(page_rcv);
 132                xbufp->head[0].iov_len = PAGE_SIZE;
 133                xbufp->tail[0].iov_base = NULL;
 134                xbufp->tail[0].iov_len = 0;
 135                xbufp->page_len = 0;
 136                xbufp->len = PAGE_SIZE;
 137                xbufp->buflen = PAGE_SIZE;
 138
 139                /* Preallocate one XDR send buffer */
 140                page_snd = alloc_page(GFP_KERNEL);
 141                if (page_snd == NULL) {
 142                        printk(KERN_ERR "Failed to create bc snd xbuf\n");
 143                        goto out_free;
 144                }
 145
 146                xbufp = &req->rq_snd_buf;
 147                xbufp->head[0].iov_base = page_address(page_snd);
 148                xbufp->head[0].iov_len = 0;
 149                xbufp->tail[0].iov_base = NULL;
 150                xbufp->tail[0].iov_len = 0;
 151                xbufp->page_len = 0;
 152                xbufp->len = 0;
 153                xbufp->buflen = PAGE_SIZE;
 154        }
 155
 156        /*
 157         * Add the temporary list to the backchannel preallocation list
 158         */
 159        spin_lock_bh(&xprt->bc_pa_lock);
 160        list_splice(&tmp_list, &xprt->bc_pa_list);
 161        xprt_inc_alloc_count(xprt, min_reqs);
 162        spin_unlock_bh(&xprt->bc_pa_lock);
 163
 164        dprintk("RPC:       setup backchannel transport done\n");
 165        return 0;
 166
 167out_free:
 168        /*
 169         * Memory allocation failed, free the temporary list
 170         */
 171        list_for_each_entry_safe(req, tmp, &tmp_list, rq_bc_pa_list)
 172                xprt_free_allocation(req);
 173
 174        dprintk("RPC:       setup backchannel transport failed\n");
 175        return -ENOMEM;
 176}
 177EXPORT_SYMBOL_GPL(xprt_setup_backchannel);
 178
 179/**
 180 * xprt_destroy_backchannel - Destroys the backchannel preallocated structures.
 181 * @xprt:       the transport holding the preallocated strucures
 182 * @max_reqs    the maximum number of preallocated structures to destroy
 183 *
 184 * Since these structures may have been allocated by multiple calls
 185 * to xprt_setup_backchannel, we only destroy up to the maximum number
 186 * of reqs specified by the caller.
 187 */
 188void xprt_destroy_backchannel(struct rpc_xprt *xprt, unsigned int max_reqs)
 189{
 190        struct rpc_rqst *req = NULL, *tmp = NULL;
 191
 192        dprintk("RPC:        destroy backchannel transport\n");
 193
 194        if (max_reqs == 0)
 195                goto out;
 196
 197        spin_lock_bh(&xprt->bc_pa_lock);
 198        xprt_dec_alloc_count(xprt, max_reqs);
 199        list_for_each_entry_safe(req, tmp, &xprt->bc_pa_list, rq_bc_pa_list) {
 200                dprintk("RPC:        req=%p\n", req);
 201                xprt_free_allocation(req);
 202                if (--max_reqs == 0)
 203                        break;
 204        }
 205        spin_unlock_bh(&xprt->bc_pa_lock);
 206
 207out:
 208        dprintk("RPC:        backchannel list empty= %s\n",
 209                list_empty(&xprt->bc_pa_list) ? "true" : "false");
 210}
 211EXPORT_SYMBOL_GPL(xprt_destroy_backchannel);
 212
 213/*
 214 * One or more rpc_rqst structure have been preallocated during the
 215 * backchannel setup.  Buffer space for the send and private XDR buffers
 216 * has been preallocated as well.  Use xprt_alloc_bc_request to allocate
 217 * to this request.  Use xprt_free_bc_request to return it.
 218 *
 219 * We know that we're called in soft interrupt context, grab the spin_lock
 220 * since there is no need to grab the bottom half spin_lock.
 221 *
 222 * Return an available rpc_rqst, otherwise NULL if non are available.
 223 */
 224struct rpc_rqst *xprt_alloc_bc_request(struct rpc_xprt *xprt)
 225{
 226        struct rpc_rqst *req;
 227
 228        dprintk("RPC:       allocate a backchannel request\n");
 229        spin_lock(&xprt->bc_pa_lock);
 230        if (!list_empty(&xprt->bc_pa_list)) {
 231                req = list_first_entry(&xprt->bc_pa_list, struct rpc_rqst,
 232                                rq_bc_pa_list);
 233                list_del(&req->rq_bc_pa_list);
 234        } else {
 235                req = NULL;
 236        }
 237        spin_unlock(&xprt->bc_pa_lock);
 238
 239        if (req != NULL) {
 240                set_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
 241                req->rq_reply_bytes_recvd = 0;
 242                req->rq_bytes_sent = 0;
 243                memcpy(&req->rq_private_buf, &req->rq_rcv_buf,
 244                        sizeof(req->rq_private_buf));
 245        }
 246        dprintk("RPC:       backchannel req=%p\n", req);
 247        return req;
 248}
 249
 250/*
 251 * Return the preallocated rpc_rqst structure and XDR buffers
 252 * associated with this rpc_task.
 253 */
 254void xprt_free_bc_request(struct rpc_rqst *req)
 255{
 256        struct rpc_xprt *xprt = req->rq_xprt;
 257
 258        dprintk("RPC:       free backchannel req=%p\n", req);
 259
 260        smp_mb__before_clear_bit();
 261        WARN_ON_ONCE(!test_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state));
 262        clear_bit(RPC_BC_PA_IN_USE, &req->rq_bc_pa_state);
 263        smp_mb__after_clear_bit();
 264
 265        if (!xprt_need_to_requeue(xprt)) {
 266                /*
 267                 * The last remaining session was destroyed while this
 268                 * entry was in use.  Free the entry and don't attempt
 269                 * to add back to the list because there is no need to
 270                 * have anymore preallocated entries.
 271                 */
 272                dprintk("RPC:       Last session removed req=%p\n", req);
 273                xprt_free_allocation(req);
 274                return;
 275        }
 276
 277        /*
 278         * Return it to the list of preallocations so that it
 279         * may be reused by a new callback request.
 280         */
 281        spin_lock_bh(&xprt->bc_pa_lock);
 282        list_add(&req->rq_bc_pa_list, &xprt->bc_pa_list);
 283        spin_unlock_bh(&xprt->bc_pa_lock);
 284}
 285
 286