qemu/hw/9pfs/codir.c
<<
>>
Prefs
   1/*
   2 * 9p backend
   3 *
   4 * Copyright IBM, Corp. 2011
   5 *
   6 * Authors:
   7 *  Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14#include "qemu/osdep.h"
  15#include "fsdev/qemu-fsdev.h"
  16#include "qemu/thread.h"
  17#include "qemu/coroutine.h"
  18#include "qemu/main-loop.h"
  19#include "coth.h"
  20
  21/*
  22 * Intended to be called from bottom-half (e.g. background I/O thread)
  23 * context.
  24 */
  25static int do_readdir(V9fsPDU *pdu, V9fsFidState *fidp, struct dirent **dent)
  26{
  27    int err = 0;
  28    V9fsState *s = pdu->s;
  29    struct dirent *entry;
  30
  31    errno = 0;
  32    entry = s->ops->readdir(&s->ctx, &fidp->fs);
  33    if (!entry && errno) {
  34        *dent = NULL;
  35        err = -errno;
  36    } else {
  37        *dent = entry;
  38    }
  39    return err;
  40}
  41
  42/*
  43 * TODO: This will be removed for performance reasons.
  44 * Use v9fs_co_readdir_many() instead.
  45 */
  46int coroutine_fn v9fs_co_readdir(V9fsPDU *pdu, V9fsFidState *fidp,
  47                                 struct dirent **dent)
  48{
  49    int err;
  50
  51    if (v9fs_request_cancelled(pdu)) {
  52        return -EINTR;
  53    }
  54    v9fs_co_run_in_worker({
  55        err = do_readdir(pdu, fidp, dent);
  56    });
  57    return err;
  58}
  59
  60/*
  61 * This is solely executed on a background IO thread.
  62 *
  63 * See v9fs_co_readdir_many() (as its only user) below for details.
  64 */
  65static int do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp,
  66                           struct V9fsDirEnt **entries, off_t offset,
  67                           int32_t maxsize, bool dostat)
  68{
  69    V9fsState *s = pdu->s;
  70    V9fsString name;
  71    int len, err = 0;
  72    int32_t size = 0;
  73    off_t saved_dir_pos;
  74    struct dirent *dent;
  75    struct V9fsDirEnt *e = NULL;
  76    V9fsPath path;
  77    struct stat stbuf;
  78
  79    *entries = NULL;
  80    v9fs_path_init(&path);
  81
  82    /*
  83     * TODO: Here should be a warn_report_once() if lock failed.
  84     *
  85     * With a good 9p client we should not get into concurrency here,
  86     * because a good client would not use the same fid for concurrent
  87     * requests. We do the lock here for safety reasons though. However
  88     * the client would then suffer performance issues, so better log that
  89     * issue here.
  90     */
  91    v9fs_readdir_lock(&fidp->fs.dir);
  92
  93    /* seek directory to requested initial position */
  94    if (offset == 0) {
  95        s->ops->rewinddir(&s->ctx, &fidp->fs);
  96    } else {
  97        s->ops->seekdir(&s->ctx, &fidp->fs, offset);
  98    }
  99
 100    /* save the directory position */
 101    saved_dir_pos = s->ops->telldir(&s->ctx, &fidp->fs);
 102    if (saved_dir_pos < 0) {
 103        err = saved_dir_pos;
 104        goto out;
 105    }
 106
 107    while (true) {
 108        /* interrupt loop if request was cancelled by a Tflush request */
 109        if (v9fs_request_cancelled(pdu)) {
 110            err = -EINTR;
 111            break;
 112        }
 113
 114        /* get directory entry from fs driver */
 115        err = do_readdir(pdu, fidp, &dent);
 116        if (err || !dent) {
 117            break;
 118        }
 119
 120        /*
 121         * stop this loop as soon as it would exceed the allowed maximum
 122         * response message size for the directory entries collected so far,
 123         * because anything beyond that size would need to be discarded by
 124         * 9p controller (main thread / top half) anyway
 125         */
 126        v9fs_string_init(&name);
 127        v9fs_string_sprintf(&name, "%s", dent->d_name);
 128        len = v9fs_readdir_response_size(&name);
 129        v9fs_string_free(&name);
 130        if (size + len > maxsize) {
 131            /* this is not an error case actually */
 132            break;
 133        }
 134
 135        /* append next node to result chain */
 136        if (!e) {
 137            *entries = e = g_malloc0(sizeof(V9fsDirEnt));
 138        } else {
 139            e = e->next = g_malloc0(sizeof(V9fsDirEnt));
 140        }
 141        e->dent = g_malloc0(sizeof(struct dirent));
 142        memcpy(e->dent, dent, sizeof(struct dirent));
 143
 144        /* perform a full stat() for directory entry if requested by caller */
 145        if (dostat) {
 146            err = s->ops->name_to_path(
 147                &s->ctx, &fidp->path, dent->d_name, &path
 148            );
 149            if (err < 0) {
 150                err = -errno;
 151                break;
 152            }
 153
 154            err = s->ops->lstat(&s->ctx, &path, &stbuf);
 155            if (err < 0) {
 156                err = -errno;
 157                break;
 158            }
 159
 160            e->st = g_malloc0(sizeof(struct stat));
 161            memcpy(e->st, &stbuf, sizeof(struct stat));
 162        }
 163
 164        size += len;
 165        saved_dir_pos = dent->d_off;
 166    }
 167
 168    /* restore (last) saved position */
 169    s->ops->seekdir(&s->ctx, &fidp->fs, saved_dir_pos);
 170
 171out:
 172    v9fs_readdir_unlock(&fidp->fs.dir);
 173    v9fs_path_free(&path);
 174    if (err < 0) {
 175        return err;
 176    }
 177    return size;
 178}
 179
 180/**
 181 * @brief Reads multiple directory entries in one rush.
 182 *
 183 * Retrieves the requested (max. amount of) directory entries from the fs
 184 * driver. This function must only be called by the main IO thread (top half).
 185 * Internally this function call will be dispatched to a background IO thread
 186 * (bottom half) where it is eventually executed by the fs driver.
 187 *
 188 * @discussion Acquiring multiple directory entries in one rush from the fs
 189 * driver, instead of retrieving each directory entry individually, is very
 190 * beneficial from performance point of view. Because for every fs driver
 191 * request latency is added, which in practice could lead to overall
 192 * latencies of several hundred ms for reading all entries (of just a single
 193 * directory) if every directory entry was individually requested from fs
 194 * driver.
 195 *
 196 * @note You must @b ALWAYS call @c v9fs_free_dirents(entries) after calling
 197 * v9fs_co_readdir_many(), both on success and on error cases of this
 198 * function, to avoid memory leaks once @p entries are no longer needed.
 199 *
 200 * @param pdu - the causing 9p (T_readdir) client request
 201 * @param fidp - already opened directory where readdir shall be performed on
 202 * @param entries - output for directory entries (must not be NULL)
 203 * @param offset - initial position inside the directory the function shall
 204 *                 seek to before retrieving the directory entries
 205 * @param maxsize - maximum result message body size (in bytes)
 206 * @param dostat - whether a stat() should be performed and returned for
 207 *                 each directory entry
 208 * @returns resulting response message body size (in bytes) on success,
 209 *          negative error code otherwise
 210 */
 211int coroutine_fn v9fs_co_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp,
 212                                      struct V9fsDirEnt **entries,
 213                                      off_t offset, int32_t maxsize,
 214                                      bool dostat)
 215{
 216    int err = 0;
 217
 218    if (v9fs_request_cancelled(pdu)) {
 219        return -EINTR;
 220    }
 221    v9fs_co_run_in_worker({
 222        err = do_readdir_many(pdu, fidp, entries, offset, maxsize, dostat);
 223    });
 224    return err;
 225}
 226
 227off_t v9fs_co_telldir(V9fsPDU *pdu, V9fsFidState *fidp)
 228{
 229    off_t err;
 230    V9fsState *s = pdu->s;
 231
 232    if (v9fs_request_cancelled(pdu)) {
 233        return -EINTR;
 234    }
 235    v9fs_co_run_in_worker(
 236        {
 237            err = s->ops->telldir(&s->ctx, &fidp->fs);
 238            if (err < 0) {
 239                err = -errno;
 240            }
 241        });
 242    return err;
 243}
 244
 245void coroutine_fn v9fs_co_seekdir(V9fsPDU *pdu, V9fsFidState *fidp,
 246                                  off_t offset)
 247{
 248    V9fsState *s = pdu->s;
 249    if (v9fs_request_cancelled(pdu)) {
 250        return;
 251    }
 252    v9fs_co_run_in_worker(
 253        {
 254            s->ops->seekdir(&s->ctx, &fidp->fs, offset);
 255        });
 256}
 257
 258void coroutine_fn v9fs_co_rewinddir(V9fsPDU *pdu, V9fsFidState *fidp)
 259{
 260    V9fsState *s = pdu->s;
 261    if (v9fs_request_cancelled(pdu)) {
 262        return;
 263    }
 264    v9fs_co_run_in_worker(
 265        {
 266            s->ops->rewinddir(&s->ctx, &fidp->fs);
 267        });
 268}
 269
 270int coroutine_fn v9fs_co_mkdir(V9fsPDU *pdu, V9fsFidState *fidp,
 271                               V9fsString *name, mode_t mode, uid_t uid,
 272                               gid_t gid, struct stat *stbuf)
 273{
 274    int err;
 275    FsCred cred;
 276    V9fsPath path;
 277    V9fsState *s = pdu->s;
 278
 279    if (v9fs_request_cancelled(pdu)) {
 280        return -EINTR;
 281    }
 282    cred_init(&cred);
 283    cred.fc_mode = mode;
 284    cred.fc_uid = uid;
 285    cred.fc_gid = gid;
 286    v9fs_path_read_lock(s);
 287    v9fs_co_run_in_worker(
 288        {
 289            err = s->ops->mkdir(&s->ctx, &fidp->path, name->data,  &cred);
 290            if (err < 0) {
 291                err = -errno;
 292            } else {
 293                v9fs_path_init(&path);
 294                err = v9fs_name_to_path(s, &fidp->path, name->data, &path);
 295                if (!err) {
 296                    err = s->ops->lstat(&s->ctx, &path, stbuf);
 297                    if (err < 0) {
 298                        err = -errno;
 299                    }
 300                }
 301                v9fs_path_free(&path);
 302            }
 303        });
 304    v9fs_path_unlock(s);
 305    return err;
 306}
 307
 308int coroutine_fn v9fs_co_opendir(V9fsPDU *pdu, V9fsFidState *fidp)
 309{
 310    int err;
 311    V9fsState *s = pdu->s;
 312
 313    if (v9fs_request_cancelled(pdu)) {
 314        return -EINTR;
 315    }
 316    v9fs_path_read_lock(s);
 317    v9fs_co_run_in_worker(
 318        {
 319            err = s->ops->opendir(&s->ctx, &fidp->path, &fidp->fs);
 320            if (err < 0) {
 321                err = -errno;
 322            } else {
 323                err = 0;
 324            }
 325        });
 326    v9fs_path_unlock(s);
 327    if (!err) {
 328        total_open_fd++;
 329        if (total_open_fd > open_fd_hw) {
 330            v9fs_reclaim_fd(pdu);
 331        }
 332    }
 333    return err;
 334}
 335
 336int coroutine_fn v9fs_co_closedir(V9fsPDU *pdu, V9fsFidOpenState *fs)
 337{
 338    int err;
 339    V9fsState *s = pdu->s;
 340
 341    if (v9fs_request_cancelled(pdu)) {
 342        return -EINTR;
 343    }
 344    v9fs_co_run_in_worker(
 345        {
 346            err = s->ops->closedir(&s->ctx, fs);
 347            if (err < 0) {
 348                err = -errno;
 349            }
 350        });
 351    if (!err) {
 352        total_open_fd--;
 353    }
 354    return err;
 355}
 356