linux/net/sunrpc/xprtrdma/physical_ops.c
<<
>>
Prefs
   1/*
   2 * Copyright (c) 2015 Oracle.  All rights reserved.
   3 * Copyright (c) 2003-2007 Network Appliance, Inc. All rights reserved.
   4 */
   5
   6/* No-op chunk preparation. All client memory is pre-registered.
   7 * Sometimes referred to as ALLPHYSICAL mode.
   8 *
   9 * Physical registration is simple because all client memory is
  10 * pre-registered and never deregistered. This mode is good for
  11 * adapter bring up, but is considered not safe: the server is
  12 * trusted not to abuse its access to client memory not involved
  13 * in RDMA I/O.
  14 */
  15
  16#include "xprt_rdma.h"
  17
  18#if IS_ENABLED(CONFIG_SUNRPC_DEBUG)
  19# define RPCDBG_FACILITY        RPCDBG_TRANS
  20#endif
  21
  22static int
  23physical_op_open(struct rpcrdma_ia *ia, struct rpcrdma_ep *ep,
  24                 struct rpcrdma_create_data_internal *cdata)
  25{
  26        struct ib_mr *mr;
  27
  28        /* Obtain an rkey to use for RPC data payloads.
  29         */
  30        mr = ib_get_dma_mr(ia->ri_pd,
  31                           IB_ACCESS_LOCAL_WRITE |
  32                           IB_ACCESS_REMOTE_WRITE |
  33                           IB_ACCESS_REMOTE_READ);
  34        if (IS_ERR(mr)) {
  35                pr_err("%s: ib_get_dma_mr for failed with %lX\n",
  36                       __func__, PTR_ERR(mr));
  37                return -ENOMEM;
  38        }
  39
  40        ia->ri_dma_mr = mr;
  41        return 0;
  42}
  43
  44/* PHYSICAL memory registration conveys one page per chunk segment.
  45 */
  46static size_t
  47physical_op_maxpages(struct rpcrdma_xprt *r_xprt)
  48{
  49        return min_t(unsigned int, RPCRDMA_MAX_DATA_SEGS,
  50                     rpcrdma_max_segments(r_xprt));
  51}
  52
  53static int
  54physical_op_init(struct rpcrdma_xprt *r_xprt)
  55{
  56        return 0;
  57}
  58
  59/* The client's physical memory is already exposed for
  60 * remote access via RDMA READ or RDMA WRITE.
  61 */
  62static int
  63physical_op_map(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg,
  64                int nsegs, bool writing)
  65{
  66        struct rpcrdma_ia *ia = &r_xprt->rx_ia;
  67
  68        rpcrdma_map_one(ia->ri_device, seg, rpcrdma_data_dir(writing));
  69        seg->mr_rkey = ia->ri_dma_mr->rkey;
  70        seg->mr_base = seg->mr_dma;
  71        return 1;
  72}
  73
  74/* Unmap a memory region, but leave it registered.
  75 */
  76static int
  77physical_op_unmap(struct rpcrdma_xprt *r_xprt, struct rpcrdma_mr_seg *seg)
  78{
  79        struct rpcrdma_ia *ia = &r_xprt->rx_ia;
  80
  81        rpcrdma_unmap_one(ia->ri_device, seg);
  82        return 1;
  83}
  84
  85/* DMA unmap all memory regions that were mapped for "req".
  86 */
  87static void
  88physical_op_unmap_sync(struct rpcrdma_xprt *r_xprt, struct rpcrdma_req *req)
  89{
  90        struct ib_device *device = r_xprt->rx_ia.ri_device;
  91        unsigned int i;
  92
  93        for (i = 0; req->rl_nchunks; --req->rl_nchunks)
  94                rpcrdma_unmap_one(device, &req->rl_segments[i++]);
  95}
  96
  97static void
  98physical_op_destroy(struct rpcrdma_buffer *buf)
  99{
 100}
 101
 102const struct rpcrdma_memreg_ops rpcrdma_physical_memreg_ops = {
 103        .ro_map                         = physical_op_map,
 104        .ro_unmap_sync                  = physical_op_unmap_sync,
 105        .ro_unmap                       = physical_op_unmap,
 106        .ro_open                        = physical_op_open,
 107        .ro_maxpages                    = physical_op_maxpages,
 108        .ro_init                        = physical_op_init,
 109        .ro_destroy                     = physical_op_destroy,
 110        .ro_displayname                 = "physical",
 111};
 112