linux/drivers/staging/lustre/lnet/lnet/lib-md.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * GPL HEADER START
   4 *
   5 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 only,
   9 * as published by the Free Software Foundation.
  10 *
  11 * This program is distributed in the hope that it will be useful, but
  12 * WITHOUT ANY WARRANTY; without even the implied warranty of
  13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  14 * General Public License version 2 for more details (a copy is included
  15 * in the LICENSE file that accompanied this code).
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * version 2 along with this program; If not, see
  19 * http://www.gnu.org/licenses/gpl-2.0.html
  20 *
  21 * GPL HEADER END
  22 */
  23/*
  24 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  25 * Use is subject to license terms.
  26 *
  27 * Copyright (c) 2012, Intel Corporation.
  28 */
  29/*
  30 * This file is part of Lustre, http://www.lustre.org/
  31 * Lustre is a trademark of Sun Microsystems, Inc.
  32 *
  33 * lnet/lnet/lib-md.c
  34 *
  35 * Memory Descriptor management routines
  36 */
  37
  38#define DEBUG_SUBSYSTEM S_LNET
  39
  40#include <linux/lnet/lib-lnet.h>
  41
  42/* must be called with lnet_res_lock held */
  43void
  44lnet_md_unlink(struct lnet_libmd *md)
  45{
  46        if (!(md->md_flags & LNET_MD_FLAG_ZOMBIE)) {
  47                /* first unlink attempt... */
  48                struct lnet_me *me = md->md_me;
  49
  50                md->md_flags |= LNET_MD_FLAG_ZOMBIE;
  51
  52                /*
  53                 * Disassociate from ME (if any),
  54                 * and unlink it if it was created
  55                 * with LNET_UNLINK
  56                 */
  57                if (me) {
  58                        /* detach MD from portal */
  59                        lnet_ptl_detach_md(me, md);
  60                        if (me->me_unlink == LNET_UNLINK)
  61                                lnet_me_unlink(me);
  62                }
  63
  64                /* ensure all future handle lookups fail */
  65                lnet_res_lh_invalidate(&md->md_lh);
  66        }
  67
  68        if (md->md_refcount) {
  69                CDEBUG(D_NET, "Queueing unlink of md %p\n", md);
  70                return;
  71        }
  72
  73        CDEBUG(D_NET, "Unlinking md %p\n", md);
  74
  75        if (md->md_eq) {
  76                int cpt = lnet_cpt_of_cookie(md->md_lh.lh_cookie);
  77
  78                LASSERT(*md->md_eq->eq_refs[cpt] > 0);
  79                (*md->md_eq->eq_refs[cpt])--;
  80        }
  81
  82        LASSERT(!list_empty(&md->md_list));
  83        list_del_init(&md->md_list);
  84        kfree(md);
  85}
  86
  87static int
  88lnet_md_build(struct lnet_libmd *lmd, struct lnet_md *umd, int unlink)
  89{
  90        int i;
  91        unsigned int niov;
  92        int total_length = 0;
  93
  94        lmd->md_me = NULL;
  95        lmd->md_start = umd->start;
  96        lmd->md_offset = 0;
  97        lmd->md_max_size = umd->max_size;
  98        lmd->md_options = umd->options;
  99        lmd->md_user_ptr = umd->user_ptr;
 100        lmd->md_eq = NULL;
 101        lmd->md_threshold = umd->threshold;
 102        lmd->md_refcount = 0;
 103        lmd->md_flags = (unlink == LNET_UNLINK) ? LNET_MD_FLAG_AUTO_UNLINK : 0;
 104
 105        if (umd->options & LNET_MD_IOVEC) {
 106                if (umd->options & LNET_MD_KIOV) /* Can't specify both */
 107                        return -EINVAL;
 108
 109                niov = umd->length;
 110                lmd->md_niov = umd->length;
 111                memcpy(lmd->md_iov.iov, umd->start,
 112                       niov * sizeof(lmd->md_iov.iov[0]));
 113
 114                for (i = 0; i < (int)niov; i++) {
 115                        /* We take the base address on trust */
 116                        /* invalid length */
 117                        if (lmd->md_iov.iov[i].iov_len <= 0)
 118                                return -EINVAL;
 119
 120                        total_length += lmd->md_iov.iov[i].iov_len;
 121                }
 122
 123                lmd->md_length = total_length;
 124
 125                if ((umd->options & LNET_MD_MAX_SIZE) && /* use max size */
 126                    (umd->max_size < 0 ||
 127                     umd->max_size > total_length)) /* illegal max_size */
 128                        return -EINVAL;
 129
 130        } else if (umd->options & LNET_MD_KIOV) {
 131                niov = umd->length;
 132                lmd->md_niov = umd->length;
 133                memcpy(lmd->md_iov.kiov, umd->start,
 134                       niov * sizeof(lmd->md_iov.kiov[0]));
 135
 136                for (i = 0; i < (int)niov; i++) {
 137                        /* We take the page pointer on trust */
 138                        if (lmd->md_iov.kiov[i].bv_offset +
 139                            lmd->md_iov.kiov[i].bv_len > PAGE_SIZE)
 140                                return -EINVAL; /* invalid length */
 141
 142                        total_length += lmd->md_iov.kiov[i].bv_len;
 143                }
 144
 145                lmd->md_length = total_length;
 146
 147                if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */
 148                    (umd->max_size < 0 ||
 149                     umd->max_size > total_length)) /* illegal max_size */
 150                        return -EINVAL;
 151        } else {   /* contiguous */
 152                lmd->md_length = umd->length;
 153                niov = 1;
 154                lmd->md_niov = 1;
 155                lmd->md_iov.iov[0].iov_base = umd->start;
 156                lmd->md_iov.iov[0].iov_len = umd->length;
 157
 158                if ((umd->options & LNET_MD_MAX_SIZE) && /* max size used */
 159                    (umd->max_size < 0 ||
 160                     umd->max_size > (int)umd->length)) /* illegal max_size */
 161                        return -EINVAL;
 162        }
 163
 164        return 0;
 165}
 166
 167/* must be called with resource lock held */
 168static int
 169lnet_md_link(struct lnet_libmd *md, struct lnet_handle_eq eq_handle, int cpt)
 170{
 171        struct lnet_res_container *container = the_lnet.ln_md_containers[cpt];
 172
 173        /*
 174         * NB we are passed an allocated, but inactive md.
 175         * if we return success, caller may lnet_md_unlink() it.
 176         * otherwise caller may only kfree() it.
 177         */
 178        /*
 179         * This implementation doesn't know how to create START events or
 180         * disable END events.  Best to LASSERT our caller is compliant so
 181         * we find out quickly...
 182         */
 183        /*
 184         * TODO - reevaluate what should be here in light of
 185         * the removal of the start and end events
 186         * maybe there we shouldn't even allow LNET_EQ_NONE!)
 187         * LASSERT(!eq);
 188         */
 189        if (!LNetEQHandleIsInvalid(eq_handle)) {
 190                md->md_eq = lnet_handle2eq(&eq_handle);
 191
 192                if (!md->md_eq)
 193                        return -ENOENT;
 194
 195                (*md->md_eq->eq_refs[cpt])++;
 196        }
 197
 198        lnet_res_lh_initialize(container, &md->md_lh);
 199
 200        LASSERT(list_empty(&md->md_list));
 201        list_add(&md->md_list, &container->rec_active);
 202
 203        return 0;
 204}
 205
 206/* must be called with lnet_res_lock held */
 207void
 208lnet_md_deconstruct(struct lnet_libmd *lmd, struct lnet_md *umd)
 209{
 210        /* NB this doesn't copy out all the iov entries so when a
 211         * discontiguous MD is copied out, the target gets to know the
 212         * original iov pointer (in start) and the number of entries it had
 213         * and that's all.
 214         */
 215        umd->start = lmd->md_start;
 216        umd->length = !(lmd->md_options &
 217                      (LNET_MD_IOVEC | LNET_MD_KIOV)) ?
 218                      lmd->md_length : lmd->md_niov;
 219        umd->threshold = lmd->md_threshold;
 220        umd->max_size = lmd->md_max_size;
 221        umd->options = lmd->md_options;
 222        umd->user_ptr = lmd->md_user_ptr;
 223        lnet_eq2handle(&umd->eq_handle, lmd->md_eq);
 224}
 225
 226static int
 227lnet_md_validate(struct lnet_md *umd)
 228{
 229        if (!umd->start && umd->length) {
 230                CERROR("MD start pointer can not be NULL with length %u\n",
 231                       umd->length);
 232                return -EINVAL;
 233        }
 234
 235        if ((umd->options & (LNET_MD_KIOV | LNET_MD_IOVEC)) &&
 236            umd->length > LNET_MAX_IOV) {
 237                CERROR("Invalid option: too many fragments %u, %d max\n",
 238                       umd->length, LNET_MAX_IOV);
 239                return -EINVAL;
 240        }
 241
 242        return 0;
 243}
 244
 245/**
 246 * Create a memory descriptor and attach it to a ME
 247 *
 248 * \param meh A handle for a ME to associate the new MD with.
 249 * \param umd Provides initial values for the user-visible parts of a MD.
 250 * Other than its use for initialization, there is no linkage between this
 251 * structure and the MD maintained by the LNet.
 252 * \param unlink A flag to indicate whether the MD is automatically unlinked
 253 * when it becomes inactive, either because the operation threshold drops to
 254 * zero or because the available memory becomes less than \a umd.max_size.
 255 * (Note that the check for unlinking a MD only occurs after the completion
 256 * of a successful operation on the MD.) The value LNET_UNLINK enables auto
 257 * unlinking; the value LNET_RETAIN disables it.
 258 * \param handle On successful returns, a handle to the newly created MD is
 259 * saved here. This handle can be used later in LNetMDUnlink().
 260 *
 261 * \retval 0       On success.
 262 * \retval -EINVAL If \a umd is not valid.
 263 * \retval -ENOMEM If new MD cannot be allocated.
 264 * \retval -ENOENT Either \a meh or \a umd.eq_handle does not point to a
 265 * valid object. Note that it's OK to supply a NULL \a umd.eq_handle by
 266 * calling LNetInvalidateHandle() on it.
 267 * \retval -EBUSY  If the ME pointed to by \a meh is already associated with
 268 * a MD.
 269 */
 270int
 271LNetMDAttach(struct lnet_handle_me meh, struct lnet_md umd,
 272             enum lnet_unlink unlink, struct lnet_handle_md *handle)
 273{
 274        LIST_HEAD(matches);
 275        LIST_HEAD(drops);
 276        struct lnet_me *me;
 277        struct lnet_libmd *md;
 278        int cpt;
 279        int rc;
 280
 281        LASSERT(the_lnet.ln_refcount > 0);
 282
 283        if (lnet_md_validate(&umd))
 284                return -EINVAL;
 285
 286        if (!(umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) {
 287                CERROR("Invalid option: no MD_OP set\n");
 288                return -EINVAL;
 289        }
 290
 291        md = lnet_md_alloc(&umd);
 292        if (!md)
 293                return -ENOMEM;
 294
 295        rc = lnet_md_build(md, &umd, unlink);
 296        if (rc)
 297                goto out_free;
 298
 299        cpt = lnet_cpt_of_cookie(meh.cookie);
 300
 301        lnet_res_lock(cpt);
 302
 303        me = lnet_handle2me(&meh);
 304        if (!me)
 305                rc = -ENOENT;
 306        else if (me->me_md)
 307                rc = -EBUSY;
 308        else
 309                rc = lnet_md_link(md, umd.eq_handle, cpt);
 310
 311        if (rc)
 312                goto out_unlock;
 313
 314        /*
 315         * attach this MD to portal of ME and check if it matches any
 316         * blocked msgs on this portal
 317         */
 318        lnet_ptl_attach_md(me, md, &matches, &drops);
 319
 320        lnet_md2handle(handle, md);
 321
 322        lnet_res_unlock(cpt);
 323
 324        lnet_drop_delayed_msg_list(&drops, "Bad match");
 325        lnet_recv_delayed_msg_list(&matches);
 326
 327        return 0;
 328
 329out_unlock:
 330        lnet_res_unlock(cpt);
 331out_free:
 332        kfree(md);
 333        return rc;
 334}
 335EXPORT_SYMBOL(LNetMDAttach);
 336
 337/**
 338 * Create a "free floating" memory descriptor - a MD that is not associated
 339 * with a ME. Such MDs are usually used in LNetPut() and LNetGet() operations.
 340 *
 341 * \param umd,unlink See the discussion for LNetMDAttach().
 342 * \param handle On successful returns, a handle to the newly created MD is
 343 * saved here. This handle can be used later in LNetMDUnlink(), LNetPut(),
 344 * and LNetGet() operations.
 345 *
 346 * \retval 0       On success.
 347 * \retval -EINVAL If \a umd is not valid.
 348 * \retval -ENOMEM If new MD cannot be allocated.
 349 * \retval -ENOENT \a umd.eq_handle does not point to a valid EQ. Note that
 350 * it's OK to supply a NULL \a umd.eq_handle by calling
 351 * LNetInvalidateHandle() on it.
 352 */
 353int
 354LNetMDBind(struct lnet_md umd, enum lnet_unlink unlink,
 355           struct lnet_handle_md *handle)
 356{
 357        struct lnet_libmd *md;
 358        int cpt;
 359        int rc;
 360
 361        LASSERT(the_lnet.ln_refcount > 0);
 362
 363        if (lnet_md_validate(&umd))
 364                return -EINVAL;
 365
 366        if ((umd.options & (LNET_MD_OP_GET | LNET_MD_OP_PUT))) {
 367                CERROR("Invalid option: GET|PUT illegal on active MDs\n");
 368                return -EINVAL;
 369        }
 370
 371        md = lnet_md_alloc(&umd);
 372        if (!md)
 373                return -ENOMEM;
 374
 375        rc = lnet_md_build(md, &umd, unlink);
 376        if (rc)
 377                goto out_free;
 378
 379        cpt = lnet_res_lock_current();
 380
 381        rc = lnet_md_link(md, umd.eq_handle, cpt);
 382        if (rc)
 383                goto out_unlock;
 384
 385        lnet_md2handle(handle, md);
 386
 387        lnet_res_unlock(cpt);
 388        return 0;
 389
 390out_unlock:
 391        lnet_res_unlock(cpt);
 392out_free:
 393        kfree(md);
 394
 395        return rc;
 396}
 397EXPORT_SYMBOL(LNetMDBind);
 398
 399/**
 400 * Unlink the memory descriptor from any ME it may be linked to and release
 401 * the internal resources associated with it. As a result, active messages
 402 * associated with the MD may get aborted.
 403 *
 404 * This function does not free the memory region associated with the MD;
 405 * i.e., the memory the user allocated for this MD. If the ME associated with
 406 * this MD is not NULL and was created with auto unlink enabled, the ME is
 407 * unlinked as well (see LNetMEAttach()).
 408 *
 409 * Explicitly unlinking a MD via this function call has the same behavior as
 410 * a MD that has been automatically unlinked, except that no LNET_EVENT_UNLINK
 411 * is generated in the latter case.
 412 *
 413 * An unlinked event can be reported in two ways:
 414 * - If there's no pending operations on the MD, it's unlinked immediately
 415 *   and an LNET_EVENT_UNLINK event is logged before this function returns.
 416 * - Otherwise, the MD is only marked for deletion when this function
 417 *   returns, and the unlinked event will be piggybacked on the event of
 418 *   the completion of the last operation by setting the unlinked field of
 419 *   the event. No dedicated LNET_EVENT_UNLINK event is generated.
 420 *
 421 * Note that in both cases the unlinked field of the event is always set; no
 422 * more event will happen on the MD after such an event is logged.
 423 *
 424 * \param mdh A handle for the MD to be unlinked.
 425 *
 426 * \retval 0       On success.
 427 * \retval -ENOENT If \a mdh does not point to a valid MD object.
 428 */
 429int
 430LNetMDUnlink(struct lnet_handle_md mdh)
 431{
 432        struct lnet_event ev;
 433        struct lnet_libmd *md;
 434        int cpt;
 435
 436        LASSERT(the_lnet.ln_refcount > 0);
 437
 438        cpt = lnet_cpt_of_cookie(mdh.cookie);
 439        lnet_res_lock(cpt);
 440
 441        md = lnet_handle2md(&mdh);
 442        if (!md) {
 443                lnet_res_unlock(cpt);
 444                return -ENOENT;
 445        }
 446
 447        md->md_flags |= LNET_MD_FLAG_ABORTED;
 448        /*
 449         * If the MD is busy, lnet_md_unlink just marks it for deletion, and
 450         * when the LND is done, the completion event flags that the MD was
 451         * unlinked.  Otherwise, we enqueue an event now...
 452         */
 453        if (md->md_eq && !md->md_refcount) {
 454                lnet_build_unlink_event(md, &ev);
 455                lnet_eq_enqueue_event(md->md_eq, &ev);
 456        }
 457
 458        lnet_md_unlink(md);
 459
 460        lnet_res_unlock(cpt);
 461        return 0;
 462}
 463EXPORT_SYMBOL(LNetMDUnlink);
 464