linux/drivers/staging/lustre/lnet/lnet/lib-eq.c
<<
>>
Prefs
   1/*
   2 * GPL HEADER START
   3 *
   4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 only,
   8 * as published by the Free Software Foundation.
   9 *
  10 * This program is distributed in the hope that it will be useful, but
  11 * WITHOUT ANY WARRANTY; without even the implied warranty of
  12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  13 * General Public License version 2 for more details (a copy is included
  14 * in the LICENSE file that accompanied this code).
  15 *
  16 * You should have received a copy of the GNU General Public License
  17 * version 2 along with this program; If not, see
  18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
  19 *
  20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  21 * CA 95054 USA or visit www.sun.com if you need additional information or
  22 * have any questions.
  23 *
  24 * GPL HEADER END
  25 */
  26/*
  27 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
  28 * Use is subject to license terms.
  29 *
  30 * Copyright (c) 2012, Intel Corporation.
  31 */
  32/*
  33 * This file is part of Lustre, http://www.lustre.org/
  34 * Lustre is a trademark of Sun Microsystems, Inc.
  35 *
  36 * lnet/lnet/lib-eq.c
  37 *
  38 * Library level Event queue management routines
  39 */
  40
  41#define DEBUG_SUBSYSTEM S_LNET
  42#include "../../include/linux/lnet/lib-lnet.h"
  43
  44/**
  45 * Create an event queue that has room for \a count number of events.
  46 *
  47 * The event queue is circular and older events will be overwritten by new
  48 * ones if they are not removed in time by the user using the functions
  49 * LNetEQGet(), LNetEQWait(), or LNetEQPoll(). It is up to the user to
  50 * determine the appropriate size of the event queue to prevent this loss
  51 * of events. Note that when EQ handler is specified in \a callback, no
  52 * event loss can happen, since the handler is run for each event deposited
  53 * into the EQ.
  54 *
  55 * \param count The number of events to be stored in the event queue. It
  56 * will be rounded up to the next power of two.
  57 * \param callback A handler function that runs when an event is deposited
  58 * into the EQ. The constant value LNET_EQ_HANDLER_NONE can be used to
  59 * indicate that no event handler is desired.
  60 * \param handle On successful return, this location will hold a handle for
  61 * the newly created EQ.
  62 *
  63 * \retval 0       On success.
  64 * \retval -EINVAL If an parameter is not valid.
  65 * \retval -ENOMEM If memory for the EQ can't be allocated.
  66 *
  67 * \see lnet_eq_handler_t for the discussion on EQ handler semantics.
  68 */
  69int
  70LNetEQAlloc(unsigned int count, lnet_eq_handler_t callback,
  71            lnet_handle_eq_t *handle)
  72{
  73        lnet_eq_t     *eq;
  74
  75        LASSERT(the_lnet.ln_init);
  76        LASSERT(the_lnet.ln_refcount > 0);
  77
  78        /* We need count to be a power of 2 so that when eq_{enq,deq}_seq
  79         * overflow, they don't skip entries, so the queue has the same
  80         * apparent capacity at all times */
  81
  82        count = cfs_power2_roundup(count);
  83
  84        if (callback != LNET_EQ_HANDLER_NONE && count != 0)
  85                CWARN("EQ callback is guaranteed to get every event, do you still want to set eqcount %d for polling event which will have locking overhead? Please contact with developer to confirm\n", count);
  86
  87        /* count can be 0 if only need callback, we can eliminate
  88         * overhead of enqueue event */
  89        if (count == 0 && callback == LNET_EQ_HANDLER_NONE)
  90                return -EINVAL;
  91
  92        eq = lnet_eq_alloc();
  93        if (eq == NULL)
  94                return -ENOMEM;
  95
  96        if (count != 0) {
  97                LIBCFS_ALLOC(eq->eq_events, count * sizeof(lnet_event_t));
  98                if (eq->eq_events == NULL)
  99                        goto failed;
 100                /* NB allocator has set all event sequence numbers to 0,
 101                 * so all them should be earlier than eq_deq_seq */
 102        }
 103
 104        eq->eq_deq_seq = 1;
 105        eq->eq_enq_seq = 1;
 106        eq->eq_size = count;
 107        eq->eq_callback = callback;
 108
 109        eq->eq_refs = cfs_percpt_alloc(lnet_cpt_table(),
 110                                       sizeof(*eq->eq_refs[0]));
 111        if (eq->eq_refs == NULL)
 112                goto failed;
 113
 114        /* MUST hold both exclusive lnet_res_lock */
 115        lnet_res_lock(LNET_LOCK_EX);
 116        /* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
 117         * both EQ lookup and poll event with only lnet_eq_wait_lock */
 118        lnet_eq_wait_lock();
 119
 120        lnet_res_lh_initialize(&the_lnet.ln_eq_container, &eq->eq_lh);
 121        list_add(&eq->eq_list, &the_lnet.ln_eq_container.rec_active);
 122
 123        lnet_eq_wait_unlock();
 124        lnet_res_unlock(LNET_LOCK_EX);
 125
 126        lnet_eq2handle(handle, eq);
 127        return 0;
 128
 129failed:
 130        if (eq->eq_events != NULL)
 131                LIBCFS_FREE(eq->eq_events, count * sizeof(lnet_event_t));
 132
 133        if (eq->eq_refs != NULL)
 134                cfs_percpt_free(eq->eq_refs);
 135
 136        lnet_eq_free(eq);
 137        return -ENOMEM;
 138}
 139EXPORT_SYMBOL(LNetEQAlloc);
 140
 141/**
 142 * Release the resources associated with an event queue if it's idle;
 143 * otherwise do nothing and it's up to the user to try again.
 144 *
 145 * \param eqh A handle for the event queue to be released.
 146 *
 147 * \retval 0 If the EQ is not in use and freed.
 148 * \retval -ENOENT If \a eqh does not point to a valid EQ.
 149 * \retval -EBUSY  If the EQ is still in use by some MDs.
 150 */
 151int
 152LNetEQFree(lnet_handle_eq_t eqh)
 153{
 154        struct lnet_eq  *eq;
 155        lnet_event_t    *events = NULL;
 156        int             **refs = NULL;
 157        int             *ref;
 158        int             rc = 0;
 159        int             size = 0;
 160        int             i;
 161
 162        LASSERT(the_lnet.ln_init);
 163        LASSERT(the_lnet.ln_refcount > 0);
 164
 165        lnet_res_lock(LNET_LOCK_EX);
 166        /* NB: hold lnet_eq_wait_lock for EQ link/unlink, so we can do
 167         * both EQ lookup and poll event with only lnet_eq_wait_lock */
 168        lnet_eq_wait_lock();
 169
 170        eq = lnet_handle2eq(&eqh);
 171        if (eq == NULL) {
 172                rc = -ENOENT;
 173                goto out;
 174        }
 175
 176        cfs_percpt_for_each(ref, i, eq->eq_refs) {
 177                LASSERT(*ref >= 0);
 178                if (*ref == 0)
 179                        continue;
 180
 181                CDEBUG(D_NET, "Event equeue (%d: %d) busy on destroy.\n",
 182                       i, *ref);
 183                rc = -EBUSY;
 184                goto out;
 185        }
 186
 187        /* stash for free after lock dropped */
 188        events  = eq->eq_events;
 189        size    = eq->eq_size;
 190        refs    = eq->eq_refs;
 191
 192        lnet_res_lh_invalidate(&eq->eq_lh);
 193        list_del(&eq->eq_list);
 194        lnet_eq_free_locked(eq);
 195 out:
 196        lnet_eq_wait_unlock();
 197        lnet_res_unlock(LNET_LOCK_EX);
 198
 199        if (events != NULL)
 200                LIBCFS_FREE(events, size * sizeof(lnet_event_t));
 201        if (refs != NULL)
 202                cfs_percpt_free(refs);
 203
 204        return rc;
 205}
 206EXPORT_SYMBOL(LNetEQFree);
 207
 208void
 209lnet_eq_enqueue_event(lnet_eq_t *eq, lnet_event_t *ev)
 210{
 211        /* MUST called with resource lock hold but w/o lnet_eq_wait_lock */
 212        int index;
 213
 214        if (eq->eq_size == 0) {
 215                LASSERT(eq->eq_callback != LNET_EQ_HANDLER_NONE);
 216                eq->eq_callback(ev);
 217                return;
 218        }
 219
 220        lnet_eq_wait_lock();
 221        ev->sequence = eq->eq_enq_seq++;
 222
 223        LASSERT(eq->eq_size == LOWEST_BIT_SET(eq->eq_size));
 224        index = ev->sequence & (eq->eq_size - 1);
 225
 226        eq->eq_events[index] = *ev;
 227
 228        if (eq->eq_callback != LNET_EQ_HANDLER_NONE)
 229                eq->eq_callback(ev);
 230
 231        /* Wake anyone waiting in LNetEQPoll() */
 232        if (waitqueue_active(&the_lnet.ln_eq_waitq))
 233                wake_up_all(&the_lnet.ln_eq_waitq);
 234        lnet_eq_wait_unlock();
 235}
 236
 237static int
 238lnet_eq_dequeue_event(lnet_eq_t *eq, lnet_event_t *ev)
 239{
 240        int             new_index = eq->eq_deq_seq & (eq->eq_size - 1);
 241        lnet_event_t    *new_event = &eq->eq_events[new_index];
 242        int             rc;
 243
 244        /* must called with lnet_eq_wait_lock hold */
 245        if (LNET_SEQ_GT(eq->eq_deq_seq, new_event->sequence))
 246                return 0;
 247
 248        /* We've got a new event... */
 249        *ev = *new_event;
 250
 251        CDEBUG(D_INFO, "event: %p, sequence: %lu, eq->size: %u\n",
 252               new_event, eq->eq_deq_seq, eq->eq_size);
 253
 254        /* ...but did it overwrite an event we've not seen yet? */
 255        if (eq->eq_deq_seq == new_event->sequence) {
 256                rc = 1;
 257        } else {
 258                /* don't complain with CERROR: some EQs are sized small
 259                 * anyway; if it's important, the caller should complain */
 260                CDEBUG(D_NET, "Event Queue Overflow: eq seq %lu ev seq %lu\n",
 261                       eq->eq_deq_seq, new_event->sequence);
 262                rc = -EOVERFLOW;
 263        }
 264
 265        eq->eq_deq_seq = new_event->sequence + 1;
 266        return rc;
 267}
 268
 269/**
 270 * A nonblocking function that can be used to get the next event in an EQ.
 271 * If an event handler is associated with the EQ, the handler will run before
 272 * this function returns successfully. The event is removed from the queue.
 273 *
 274 * \param eventq A handle for the event queue.
 275 * \param event On successful return (1 or -EOVERFLOW), this location will
 276 * hold the next event in the EQ.
 277 *
 278 * \retval 0      No pending event in the EQ.
 279 * \retval 1      Indicates success.
 280 * \retval -ENOENT    If \a eventq does not point to a valid EQ.
 281 * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
 282 * at least one event between this event and the last event obtained from the
 283 * EQ has been dropped due to limited space in the EQ.
 284 */
 285int
 286LNetEQGet(lnet_handle_eq_t eventq, lnet_event_t *event)
 287{
 288        int which;
 289
 290        return LNetEQPoll(&eventq, 1, 0,
 291                         event, &which);
 292}
 293EXPORT_SYMBOL(LNetEQGet);
 294
 295/**
 296 * Block the calling process until there is an event in the EQ.
 297 * If an event handler is associated with the EQ, the handler will run before
 298 * this function returns successfully. This function returns the next event
 299 * in the EQ and removes it from the EQ.
 300 *
 301 * \param eventq A handle for the event queue.
 302 * \param event On successful return (1 or -EOVERFLOW), this location will
 303 * hold the next event in the EQ.
 304 *
 305 * \retval 1      Indicates success.
 306 * \retval -ENOENT    If \a eventq does not point to a valid EQ.
 307 * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
 308 * at least one event between this event and the last event obtained from the
 309 * EQ has been dropped due to limited space in the EQ.
 310 */
 311int
 312LNetEQWait(lnet_handle_eq_t eventq, lnet_event_t *event)
 313{
 314        int which;
 315
 316        return LNetEQPoll(&eventq, 1, LNET_TIME_FOREVER,
 317                         event, &which);
 318}
 319EXPORT_SYMBOL(LNetEQWait);
 320
 321
 322static int
 323lnet_eq_wait_locked(int *timeout_ms)
 324__must_hold(&the_lnet.ln_eq_wait_lock)
 325{
 326        int             tms = *timeout_ms;
 327        int             wait;
 328        wait_queue_t  wl;
 329        unsigned long      now;
 330
 331        if (tms == 0)
 332                return -1; /* don't want to wait and no new event */
 333
 334        init_waitqueue_entry(&wl, current);
 335        set_current_state(TASK_INTERRUPTIBLE);
 336        add_wait_queue(&the_lnet.ln_eq_waitq, &wl);
 337
 338        lnet_eq_wait_unlock();
 339
 340        if (tms < 0) {
 341                schedule();
 342
 343        } else {
 344                struct timeval tv;
 345
 346                now = cfs_time_current();
 347                schedule_timeout(cfs_time_seconds(tms) / 1000);
 348                cfs_duration_usec(cfs_time_sub(cfs_time_current(), now), &tv);
 349                tms -= (int)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
 350                if (tms < 0) /* no more wait but may have new event */
 351                        tms = 0;
 352        }
 353
 354        wait = tms != 0; /* might need to call here again */
 355        *timeout_ms = tms;
 356
 357        lnet_eq_wait_lock();
 358        remove_wait_queue(&the_lnet.ln_eq_waitq, &wl);
 359
 360        return wait;
 361}
 362
 363
 364
 365/**
 366 * Block the calling process until there's an event from a set of EQs or
 367 * timeout happens.
 368 *
 369 * If an event handler is associated with the EQ, the handler will run before
 370 * this function returns successfully, in which case the corresponding event
 371 * is consumed.
 372 *
 373 * LNetEQPoll() provides a timeout to allow applications to poll, block for a
 374 * fixed period, or block indefinitely.
 375 *
 376 * \param eventqs,neq An array of EQ handles, and size of the array.
 377 * \param timeout_ms Time in milliseconds to wait for an event to occur on
 378 * one of the EQs. The constant LNET_TIME_FOREVER can be used to indicate an
 379 * infinite timeout.
 380 * \param event,which On successful return (1 or -EOVERFLOW), \a event will
 381 * hold the next event in the EQs, and \a which will contain the index of the
 382 * EQ from which the event was taken.
 383 *
 384 * \retval 0      No pending event in the EQs after timeout.
 385 * \retval 1      Indicates success.
 386 * \retval -EOVERFLOW Indicates success (i.e., an event is returned) and that
 387 * at least one event between this event and the last event obtained from the
 388 * EQ indicated by \a which has been dropped due to limited space in the EQ.
 389 * \retval -ENOENT    If there's an invalid handle in \a eventqs.
 390 */
 391int
 392LNetEQPoll(lnet_handle_eq_t *eventqs, int neq, int timeout_ms,
 393           lnet_event_t *event, int *which)
 394{
 395        int     wait = 1;
 396        int     rc;
 397        int     i;
 398
 399        LASSERT(the_lnet.ln_init);
 400        LASSERT(the_lnet.ln_refcount > 0);
 401
 402        if (neq < 1)
 403                return -ENOENT;
 404
 405        lnet_eq_wait_lock();
 406
 407        for (;;) {
 408                for (i = 0; i < neq; i++) {
 409                        lnet_eq_t *eq = lnet_handle2eq(&eventqs[i]);
 410
 411                        if (eq == NULL) {
 412                                lnet_eq_wait_unlock();
 413                                return -ENOENT;
 414                        }
 415
 416                        rc = lnet_eq_dequeue_event(eq, event);
 417                        if (rc != 0) {
 418                                lnet_eq_wait_unlock();
 419                                *which = i;
 420                                return rc;
 421                        }
 422                }
 423
 424                if (wait == 0)
 425                        break;
 426
 427                /*
 428                 * return value of lnet_eq_wait_locked:
 429                 * -1 : did nothing and it's sure no new event
 430                 *  1 : sleep inside and wait until new event
 431                 *  0 : don't want to wait anymore, but might have new event
 432                 *      so need to call dequeue again
 433                 */
 434                wait = lnet_eq_wait_locked(&timeout_ms);
 435                if (wait < 0) /* no new event */
 436                        break;
 437        }
 438
 439        lnet_eq_wait_unlock();
 440        return 0;
 441}
 442