linux/include/rdma/rdmavt_mr.h
<<
>>
Prefs
   1#ifndef DEF_RDMAVT_INCMR_H
   2#define DEF_RDMAVT_INCMR_H
   3
   4/*
   5 * Copyright(c) 2016 Intel Corporation.
   6 *
   7 * This file is provided under a dual BSD/GPLv2 license.  When using or
   8 * redistributing this file, you may do so under either license.
   9 *
  10 * GPL LICENSE SUMMARY
  11 *
  12 * This program is free software; you can redistribute it and/or modify
  13 * it under the terms of version 2 of the GNU General Public License as
  14 * published by the Free Software Foundation.
  15 *
  16 * This program is distributed in the hope that it will be useful, but
  17 * WITHOUT ANY WARRANTY; without even the implied warranty of
  18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  19 * General Public License for more details.
  20 *
  21 * BSD LICENSE
  22 *
  23 * Redistribution and use in source and binary forms, with or without
  24 * modification, are permitted provided that the following conditions
  25 * are met:
  26 *
  27 *  - Redistributions of source code must retain the above copyright
  28 *    notice, this list of conditions and the following disclaimer.
  29 *  - Redistributions in binary form must reproduce the above copyright
  30 *    notice, this list of conditions and the following disclaimer in
  31 *    the documentation and/or other materials provided with the
  32 *    distribution.
  33 *  - Neither the name of Intel Corporation nor the names of its
  34 *    contributors may be used to endorse or promote products derived
  35 *    from this software without specific prior written permission.
  36 *
  37 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
  38 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
  39 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
  40 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
  41 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
  42 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
  43 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
  44 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
  45 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
  46 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
  47 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  48 *
  49 */
  50
  51/*
  52 * For Memory Regions. This stuff should probably be moved into rdmavt/mr.h once
  53 * drivers no longer need access to the MR directly.
  54 */
  55#include <linux/percpu-refcount.h>
  56
  57/*
  58 * A segment is a linear region of low physical memory.
  59 * Used by the verbs layer.
  60 */
  61struct rvt_seg {
  62        void *vaddr;
  63        size_t length;
  64};
  65
  66/* The number of rvt_segs that fit in a page. */
  67#define RVT_SEGSZ     (PAGE_SIZE / sizeof(struct rvt_seg))
  68
  69struct rvt_segarray {
  70        struct rvt_seg segs[RVT_SEGSZ];
  71};
  72
  73struct rvt_mregion {
  74        struct ib_pd *pd;       /* shares refcnt of ibmr.pd */
  75        u64 user_base;          /* User's address for this region */
  76        u64 iova;               /* IB start address of this region */
  77        size_t length;
  78        u32 lkey;
  79        u32 offset;             /* offset (bytes) to start of region */
  80        int access_flags;
  81        u32 max_segs;           /* number of rvt_segs in all the arrays */
  82        u32 mapsz;              /* size of the map array */
  83        atomic_t lkey_invalid;  /* true if current lkey is invalid */
  84        u8  page_shift;         /* 0 - non unform/non powerof2 sizes */
  85        u8  lkey_published;     /* in global table */
  86        struct percpu_ref refcount;
  87        struct completion comp; /* complete when refcount goes to zero */
  88        struct rvt_segarray *map[0];    /* the segments */
  89};
  90
  91#define RVT_MAX_LKEY_TABLE_BITS 23
  92
  93struct rvt_lkey_table {
  94        /* read mostly fields */
  95        u32 max;                /* size of the table */
  96        u32 shift;              /* lkey/rkey shift */
  97        struct rvt_mregion __rcu **table;
  98        /* writeable fields */
  99        /* protect changes in this struct */
 100        spinlock_t lock ____cacheline_aligned_in_smp;
 101        u32 next;               /* next unused index (speeds search) */
 102        u32 gen;                /* generation count */
 103};
 104
 105/*
 106 * These keep track of the copy progress within a memory region.
 107 * Used by the verbs layer.
 108 */
 109struct rvt_sge {
 110        struct rvt_mregion *mr;
 111        void *vaddr;            /* kernel virtual address of segment */
 112        u32 sge_length;         /* length of the SGE */
 113        u32 length;             /* remaining length of the segment */
 114        u16 m;                  /* current index: mr->map[m] */
 115        u16 n;                  /* current index: mr->map[m]->segs[n] */
 116};
 117
 118struct rvt_sge_state {
 119        struct rvt_sge *sg_list;      /* next SGE to be used if any */
 120        struct rvt_sge sge;   /* progress state for the current SGE */
 121        u32 total_len;
 122        u8 num_sge;
 123};
 124
 125static inline void rvt_put_mr(struct rvt_mregion *mr)
 126{
 127        percpu_ref_put(&mr->refcount);
 128}
 129
 130static inline void rvt_get_mr(struct rvt_mregion *mr)
 131{
 132        percpu_ref_get(&mr->refcount);
 133}
 134
 135static inline void rvt_put_ss(struct rvt_sge_state *ss)
 136{
 137        while (ss->num_sge) {
 138                rvt_put_mr(ss->sge.mr);
 139                if (--ss->num_sge)
 140                        ss->sge = *ss->sg_list++;
 141        }
 142}
 143
 144static inline u32 rvt_get_sge_length(struct rvt_sge *sge, u32 length)
 145{
 146        u32 len = sge->length;
 147
 148        if (len > length)
 149                len = length;
 150        if (len > sge->sge_length)
 151                len = sge->sge_length;
 152
 153        return len;
 154}
 155
 156static inline void rvt_update_sge(struct rvt_sge_state *ss, u32 length,
 157                                  bool release)
 158{
 159        struct rvt_sge *sge = &ss->sge;
 160
 161        sge->vaddr += length;
 162        sge->length -= length;
 163        sge->sge_length -= length;
 164        if (sge->sge_length == 0) {
 165                if (release)
 166                        rvt_put_mr(sge->mr);
 167                if (--ss->num_sge)
 168                        *sge = *ss->sg_list++;
 169        } else if (sge->length == 0 && sge->mr->lkey) {
 170                if (++sge->n >= RVT_SEGSZ) {
 171                        if (++sge->m >= sge->mr->mapsz)
 172                                return;
 173                        sge->n = 0;
 174                }
 175                sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr;
 176                sge->length = sge->mr->map[sge->m]->segs[sge->n].length;
 177        }
 178}
 179
 180static inline void rvt_skip_sge(struct rvt_sge_state *ss, u32 length,
 181                                bool release)
 182{
 183        struct rvt_sge *sge = &ss->sge;
 184
 185        while (length) {
 186                u32 len = rvt_get_sge_length(sge, length);
 187
 188                WARN_ON_ONCE(len == 0);
 189                rvt_update_sge(ss, len, release);
 190                length -= len;
 191        }
 192}
 193
 194bool rvt_ss_has_lkey(struct rvt_sge_state *ss, u32 lkey);
 195bool rvt_mr_has_lkey(struct rvt_mregion *mr, u32 lkey);
 196
 197#endif          /* DEF_RDMAVT_INCMRH */
 198