qemu/hw/9pfs/codir.c
<<
>>
Prefs
   1/*
   2 * 9p backend
   3 *
   4 * Copyright IBM, Corp. 2011
   5 *
   6 * Authors:
   7 *  Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14/*
  15 * Not so fast! You might want to read the 9p developer docs first:
  16 * https://wiki.qemu.org/Documentation/9p
  17 */
  18
  19#include "qemu/osdep.h"
  20#include "fsdev/qemu-fsdev.h"
  21#include "qemu/thread.h"
  22#include "qemu/coroutine.h"
  23#include "qemu/main-loop.h"
  24#include "coth.h"
  25
  26/*
  27 * Intended to be called from bottom-half (e.g. background I/O thread)
  28 * context.
  29 */
  30static int do_readdir(V9fsPDU *pdu, V9fsFidState *fidp, struct dirent **dent)
  31{
  32    int err = 0;
  33    V9fsState *s = pdu->s;
  34    struct dirent *entry;
  35
  36    errno = 0;
  37    entry = s->ops->readdir(&s->ctx, &fidp->fs);
  38    if (!entry && errno) {
  39        *dent = NULL;
  40        err = -errno;
  41    } else {
  42        *dent = entry;
  43    }
  44    return err;
  45}
  46
  47/*
  48 * TODO: This will be removed for performance reasons.
  49 * Use v9fs_co_readdir_many() instead.
  50 */
  51int coroutine_fn v9fs_co_readdir(V9fsPDU *pdu, V9fsFidState *fidp,
  52                                 struct dirent **dent)
  53{
  54    int err;
  55
  56    if (v9fs_request_cancelled(pdu)) {
  57        return -EINTR;
  58    }
  59    v9fs_co_run_in_worker({
  60        err = do_readdir(pdu, fidp, dent);
  61    });
  62    return err;
  63}
  64
  65/*
  66 * This is solely executed on a background IO thread.
  67 *
  68 * See v9fs_co_readdir_many() (as its only user) below for details.
  69 */
  70static int do_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp,
  71                           struct V9fsDirEnt **entries, off_t offset,
  72                           int32_t maxsize, bool dostat)
  73{
  74    V9fsState *s = pdu->s;
  75    V9fsString name;
  76    int len, err = 0;
  77    int32_t size = 0;
  78    off_t saved_dir_pos;
  79    struct dirent *dent;
  80    struct V9fsDirEnt *e = NULL;
  81    V9fsPath path;
  82    struct stat stbuf;
  83
  84    *entries = NULL;
  85    v9fs_path_init(&path);
  86
  87    /*
  88     * TODO: Here should be a warn_report_once() if lock failed.
  89     *
  90     * With a good 9p client we should not get into concurrency here,
  91     * because a good client would not use the same fid for concurrent
  92     * requests. We do the lock here for safety reasons though. However
  93     * the client would then suffer performance issues, so better log that
  94     * issue here.
  95     */
  96    v9fs_readdir_lock(&fidp->fs.dir);
  97
  98    /* seek directory to requested initial position */
  99    if (offset == 0) {
 100        s->ops->rewinddir(&s->ctx, &fidp->fs);
 101    } else {
 102        s->ops->seekdir(&s->ctx, &fidp->fs, offset);
 103    }
 104
 105    /* save the directory position */
 106    saved_dir_pos = s->ops->telldir(&s->ctx, &fidp->fs);
 107    if (saved_dir_pos < 0) {
 108        err = saved_dir_pos;
 109        goto out;
 110    }
 111
 112    while (true) {
 113        /* interrupt loop if request was cancelled by a Tflush request */
 114        if (v9fs_request_cancelled(pdu)) {
 115            err = -EINTR;
 116            break;
 117        }
 118
 119        /* get directory entry from fs driver */
 120        err = do_readdir(pdu, fidp, &dent);
 121        if (err || !dent) {
 122            break;
 123        }
 124
 125        /*
 126         * stop this loop as soon as it would exceed the allowed maximum
 127         * response message size for the directory entries collected so far,
 128         * because anything beyond that size would need to be discarded by
 129         * 9p controller (main thread / top half) anyway
 130         */
 131        v9fs_string_init(&name);
 132        v9fs_string_sprintf(&name, "%s", dent->d_name);
 133        len = v9fs_readdir_response_size(&name);
 134        v9fs_string_free(&name);
 135        if (size + len > maxsize) {
 136            /* this is not an error case actually */
 137            break;
 138        }
 139
 140        /* append next node to result chain */
 141        if (!e) {
 142            *entries = e = g_malloc0(sizeof(V9fsDirEnt));
 143        } else {
 144            e = e->next = g_malloc0(sizeof(V9fsDirEnt));
 145        }
 146        e->dent = g_malloc0(sizeof(struct dirent));
 147        memcpy(e->dent, dent, sizeof(struct dirent));
 148
 149        /* perform a full stat() for directory entry if requested by caller */
 150        if (dostat) {
 151            err = s->ops->name_to_path(
 152                &s->ctx, &fidp->path, dent->d_name, &path
 153            );
 154            if (err < 0) {
 155                err = -errno;
 156                break;
 157            }
 158
 159            err = s->ops->lstat(&s->ctx, &path, &stbuf);
 160            if (err < 0) {
 161                err = -errno;
 162                break;
 163            }
 164
 165            e->st = g_malloc0(sizeof(struct stat));
 166            memcpy(e->st, &stbuf, sizeof(struct stat));
 167        }
 168
 169        size += len;
 170        saved_dir_pos = dent->d_off;
 171    }
 172
 173    /* restore (last) saved position */
 174    s->ops->seekdir(&s->ctx, &fidp->fs, saved_dir_pos);
 175
 176out:
 177    v9fs_readdir_unlock(&fidp->fs.dir);
 178    v9fs_path_free(&path);
 179    if (err < 0) {
 180        return err;
 181    }
 182    return size;
 183}
 184
 185/**
 186 * @brief Reads multiple directory entries in one rush.
 187 *
 188 * Retrieves the requested (max. amount of) directory entries from the fs
 189 * driver. This function must only be called by the main IO thread (top half).
 190 * Internally this function call will be dispatched to a background IO thread
 191 * (bottom half) where it is eventually executed by the fs driver.
 192 *
 193 * @discussion Acquiring multiple directory entries in one rush from the fs
 194 * driver, instead of retrieving each directory entry individually, is very
 195 * beneficial from performance point of view. Because for every fs driver
 196 * request latency is added, which in practice could lead to overall
 197 * latencies of several hundred ms for reading all entries (of just a single
 198 * directory) if every directory entry was individually requested from fs
 199 * driver.
 200 *
 201 * @note You must @b ALWAYS call @c v9fs_free_dirents(entries) after calling
 202 * v9fs_co_readdir_many(), both on success and on error cases of this
 203 * function, to avoid memory leaks once @p entries are no longer needed.
 204 *
 205 * @param pdu - the causing 9p (T_readdir) client request
 206 * @param fidp - already opened directory where readdir shall be performed on
 207 * @param entries - output for directory entries (must not be NULL)
 208 * @param offset - initial position inside the directory the function shall
 209 *                 seek to before retrieving the directory entries
 210 * @param maxsize - maximum result message body size (in bytes)
 211 * @param dostat - whether a stat() should be performed and returned for
 212 *                 each directory entry
 213 * @returns resulting response message body size (in bytes) on success,
 214 *          negative error code otherwise
 215 */
 216int coroutine_fn v9fs_co_readdir_many(V9fsPDU *pdu, V9fsFidState *fidp,
 217                                      struct V9fsDirEnt **entries,
 218                                      off_t offset, int32_t maxsize,
 219                                      bool dostat)
 220{
 221    int err = 0;
 222
 223    if (v9fs_request_cancelled(pdu)) {
 224        return -EINTR;
 225    }
 226    v9fs_co_run_in_worker({
 227        err = do_readdir_many(pdu, fidp, entries, offset, maxsize, dostat);
 228    });
 229    return err;
 230}
 231
 232off_t v9fs_co_telldir(V9fsPDU *pdu, V9fsFidState *fidp)
 233{
 234    off_t err;
 235    V9fsState *s = pdu->s;
 236
 237    if (v9fs_request_cancelled(pdu)) {
 238        return -EINTR;
 239    }
 240    v9fs_co_run_in_worker(
 241        {
 242            err = s->ops->telldir(&s->ctx, &fidp->fs);
 243            if (err < 0) {
 244                err = -errno;
 245            }
 246        });
 247    return err;
 248}
 249
 250void coroutine_fn v9fs_co_seekdir(V9fsPDU *pdu, V9fsFidState *fidp,
 251                                  off_t offset)
 252{
 253    V9fsState *s = pdu->s;
 254    if (v9fs_request_cancelled(pdu)) {
 255        return;
 256    }
 257    v9fs_co_run_in_worker(
 258        {
 259            s->ops->seekdir(&s->ctx, &fidp->fs, offset);
 260        });
 261}
 262
 263void coroutine_fn v9fs_co_rewinddir(V9fsPDU *pdu, V9fsFidState *fidp)
 264{
 265    V9fsState *s = pdu->s;
 266    if (v9fs_request_cancelled(pdu)) {
 267        return;
 268    }
 269    v9fs_co_run_in_worker(
 270        {
 271            s->ops->rewinddir(&s->ctx, &fidp->fs);
 272        });
 273}
 274
 275int coroutine_fn v9fs_co_mkdir(V9fsPDU *pdu, V9fsFidState *fidp,
 276                               V9fsString *name, mode_t mode, uid_t uid,
 277                               gid_t gid, struct stat *stbuf)
 278{
 279    int err;
 280    FsCred cred;
 281    V9fsPath path;
 282    V9fsState *s = pdu->s;
 283
 284    if (v9fs_request_cancelled(pdu)) {
 285        return -EINTR;
 286    }
 287    cred_init(&cred);
 288    cred.fc_mode = mode;
 289    cred.fc_uid = uid;
 290    cred.fc_gid = gid;
 291    v9fs_path_read_lock(s);
 292    v9fs_co_run_in_worker(
 293        {
 294            err = s->ops->mkdir(&s->ctx, &fidp->path, name->data,  &cred);
 295            if (err < 0) {
 296                err = -errno;
 297            } else {
 298                v9fs_path_init(&path);
 299                err = v9fs_name_to_path(s, &fidp->path, name->data, &path);
 300                if (!err) {
 301                    err = s->ops->lstat(&s->ctx, &path, stbuf);
 302                    if (err < 0) {
 303                        err = -errno;
 304                    }
 305                }
 306                v9fs_path_free(&path);
 307            }
 308        });
 309    v9fs_path_unlock(s);
 310    return err;
 311}
 312
 313int coroutine_fn v9fs_co_opendir(V9fsPDU *pdu, V9fsFidState *fidp)
 314{
 315    int err;
 316    V9fsState *s = pdu->s;
 317
 318    if (v9fs_request_cancelled(pdu)) {
 319        return -EINTR;
 320    }
 321    v9fs_path_read_lock(s);
 322    v9fs_co_run_in_worker(
 323        {
 324            err = s->ops->opendir(&s->ctx, &fidp->path, &fidp->fs);
 325            if (err < 0) {
 326                err = -errno;
 327            } else {
 328                err = 0;
 329            }
 330        });
 331    v9fs_path_unlock(s);
 332    if (!err) {
 333        total_open_fd++;
 334        if (total_open_fd > open_fd_hw) {
 335            v9fs_reclaim_fd(pdu);
 336        }
 337    }
 338    return err;
 339}
 340
 341int coroutine_fn v9fs_co_closedir(V9fsPDU *pdu, V9fsFidOpenState *fs)
 342{
 343    int err;
 344    V9fsState *s = pdu->s;
 345
 346    if (v9fs_request_cancelled(pdu)) {
 347        return -EINTR;
 348    }
 349    v9fs_co_run_in_worker(
 350        {
 351            err = s->ops->closedir(&s->ctx, fs);
 352            if (err < 0) {
 353                err = -errno;
 354            }
 355        });
 356    if (!err) {
 357        total_open_fd--;
 358    }
 359    return err;
 360}
 361