qemu/tools/virtiofsd/fuse_lowlevel.c
<<
>>
Prefs
   1/*
   2 * FUSE: Filesystem in Userspace
   3 * Copyright (C) 2001-2007  Miklos Szeredi <miklos@szeredi.hu>
   4 *
   5 * Implementation of (most of) the low-level FUSE API. The session loop
   6 * functions are implemented in separate files.
   7 *
   8 * This program can be distributed under the terms of the GNU LGPLv2.
   9 * See the file COPYING.LIB
  10 */
  11
  12#include "qemu/osdep.h"
  13#include "fuse_i.h"
  14#include "standard-headers/linux/fuse.h"
  15#include "fuse_misc.h"
  16#include "fuse_opt.h"
  17#include "fuse_virtio.h"
  18
  19#include <sys/file.h>
  20
  21#define THREAD_POOL_SIZE 0
  22
  23#define OFFSET_MAX 0x7fffffffffffffffLL
  24
  25struct fuse_pollhandle {
  26    uint64_t kh;
  27    struct fuse_session *se;
  28};
  29
  30static size_t pagesize;
  31
  32static __attribute__((constructor)) void fuse_ll_init_pagesize(void)
  33{
  34    pagesize = getpagesize();
  35}
  36
  37static void convert_stat(const struct stat *stbuf, struct fuse_attr *attr)
  38{
  39    *attr = (struct fuse_attr){
  40        .ino = stbuf->st_ino,
  41        .mode = stbuf->st_mode,
  42        .nlink = stbuf->st_nlink,
  43        .uid = stbuf->st_uid,
  44        .gid = stbuf->st_gid,
  45        .rdev = stbuf->st_rdev,
  46        .size = stbuf->st_size,
  47        .blksize = stbuf->st_blksize,
  48        .blocks = stbuf->st_blocks,
  49        .atime = stbuf->st_atime,
  50        .mtime = stbuf->st_mtime,
  51        .ctime = stbuf->st_ctime,
  52        .atimensec = ST_ATIM_NSEC(stbuf),
  53        .mtimensec = ST_MTIM_NSEC(stbuf),
  54        .ctimensec = ST_CTIM_NSEC(stbuf),
  55    };
  56}
  57
  58static void convert_attr(const struct fuse_setattr_in *attr, struct stat *stbuf)
  59{
  60    stbuf->st_mode = attr->mode;
  61    stbuf->st_uid = attr->uid;
  62    stbuf->st_gid = attr->gid;
  63    stbuf->st_size = attr->size;
  64    stbuf->st_atime = attr->atime;
  65    stbuf->st_mtime = attr->mtime;
  66    stbuf->st_ctime = attr->ctime;
  67    ST_ATIM_NSEC_SET(stbuf, attr->atimensec);
  68    ST_MTIM_NSEC_SET(stbuf, attr->mtimensec);
  69    ST_CTIM_NSEC_SET(stbuf, attr->ctimensec);
  70}
  71
  72static size_t iov_length(const struct iovec *iov, size_t count)
  73{
  74    size_t seg;
  75    size_t ret = 0;
  76
  77    for (seg = 0; seg < count; seg++) {
  78        ret += iov[seg].iov_len;
  79    }
  80    return ret;
  81}
  82
  83static void list_init_req(struct fuse_req *req)
  84{
  85    req->next = req;
  86    req->prev = req;
  87}
  88
  89static void list_del_req(struct fuse_req *req)
  90{
  91    struct fuse_req *prev = req->prev;
  92    struct fuse_req *next = req->next;
  93    prev->next = next;
  94    next->prev = prev;
  95}
  96
  97static void list_add_req(struct fuse_req *req, struct fuse_req *next)
  98{
  99    struct fuse_req *prev = next->prev;
 100    req->next = next;
 101    req->prev = prev;
 102    prev->next = req;
 103    next->prev = req;
 104}
 105
 106static void destroy_req(fuse_req_t req)
 107{
 108    pthread_mutex_destroy(&req->lock);
 109    g_free(req);
 110}
 111
 112void fuse_free_req(fuse_req_t req)
 113{
 114    int ctr;
 115    struct fuse_session *se = req->se;
 116
 117    pthread_mutex_lock(&se->lock);
 118    req->u.ni.func = NULL;
 119    req->u.ni.data = NULL;
 120    list_del_req(req);
 121    ctr = --req->ctr;
 122    req->ch = NULL;
 123    pthread_mutex_unlock(&se->lock);
 124    if (!ctr) {
 125        destroy_req(req);
 126    }
 127}
 128
 129static struct fuse_req *fuse_ll_alloc_req(struct fuse_session *se)
 130{
 131    struct fuse_req *req;
 132
 133    req = g_try_new0(struct fuse_req, 1);
 134    if (req == NULL) {
 135        fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate request\n");
 136    } else {
 137        req->se = se;
 138        req->ctr = 1;
 139        list_init_req(req);
 140        fuse_mutex_init(&req->lock);
 141    }
 142
 143    return req;
 144}
 145
 146/* Send data. If *ch* is NULL, send via session master fd */
 147static int fuse_send_msg(struct fuse_session *se, struct fuse_chan *ch,
 148                         struct iovec *iov, int count)
 149{
 150    struct fuse_out_header *out = iov[0].iov_base;
 151
 152    out->len = iov_length(iov, count);
 153    if (out->unique == 0) {
 154        fuse_log(FUSE_LOG_DEBUG, "NOTIFY: code=%d length=%u\n", out->error,
 155                 out->len);
 156    } else if (out->error) {
 157        fuse_log(FUSE_LOG_DEBUG,
 158                 "   unique: %llu, error: %i (%s), outsize: %i\n",
 159                 (unsigned long long)out->unique, out->error,
 160                 strerror(-out->error), out->len);
 161    } else {
 162        fuse_log(FUSE_LOG_DEBUG, "   unique: %llu, success, outsize: %i\n",
 163                 (unsigned long long)out->unique, out->len);
 164    }
 165
 166    if (fuse_lowlevel_is_virtio(se)) {
 167        return virtio_send_msg(se, ch, iov, count);
 168    }
 169
 170    abort(); /* virtio should have taken it before here */
 171    return 0;
 172}
 173
 174
 175int fuse_send_reply_iov_nofree(fuse_req_t req, int error, struct iovec *iov,
 176                               int count)
 177{
 178    struct fuse_out_header out = {
 179        .unique = req->unique,
 180        .error = error,
 181    };
 182
 183    if (error <= -1000 || error > 0) {
 184        fuse_log(FUSE_LOG_ERR, "fuse: bad error value: %i\n", error);
 185        out.error = -ERANGE;
 186    }
 187
 188    iov[0].iov_base = &out;
 189    iov[0].iov_len = sizeof(struct fuse_out_header);
 190
 191    return fuse_send_msg(req->se, req->ch, iov, count);
 192}
 193
 194static int send_reply_iov(fuse_req_t req, int error, struct iovec *iov,
 195                          int count)
 196{
 197    int res;
 198
 199    res = fuse_send_reply_iov_nofree(req, error, iov, count);
 200    fuse_free_req(req);
 201    return res;
 202}
 203
 204static int send_reply(fuse_req_t req, int error, const void *arg,
 205                      size_t argsize)
 206{
 207    struct iovec iov[2];
 208    int count = 1;
 209    if (argsize) {
 210        iov[1].iov_base = (void *)arg;
 211        iov[1].iov_len = argsize;
 212        count++;
 213    }
 214    return send_reply_iov(req, error, iov, count);
 215}
 216
 217int fuse_reply_iov(fuse_req_t req, const struct iovec *iov, int count)
 218{
 219    int res;
 220    g_autofree struct iovec *padded_iov = NULL;
 221
 222    padded_iov = g_try_new(struct iovec, count + 1);
 223    if (padded_iov == NULL) {
 224        return fuse_reply_err(req, ENOMEM);
 225    }
 226
 227    memcpy(padded_iov + 1, iov, count * sizeof(struct iovec));
 228    count++;
 229
 230    res = send_reply_iov(req, 0, padded_iov, count);
 231
 232    return res;
 233}
 234
 235
 236/*
 237 * 'buf` is allowed to be empty so that the proper size may be
 238 * allocated by the caller
 239 */
 240size_t fuse_add_direntry(fuse_req_t req, char *buf, size_t bufsize,
 241                         const char *name, const struct stat *stbuf, off_t off)
 242{
 243    (void)req;
 244    size_t namelen;
 245    size_t entlen;
 246    size_t entlen_padded;
 247    struct fuse_dirent *dirent;
 248
 249    namelen = strlen(name);
 250    entlen = FUSE_NAME_OFFSET + namelen;
 251    entlen_padded = FUSE_DIRENT_ALIGN(entlen);
 252
 253    if ((buf == NULL) || (entlen_padded > bufsize)) {
 254        return entlen_padded;
 255    }
 256
 257    dirent = (struct fuse_dirent *)buf;
 258    dirent->ino = stbuf->st_ino;
 259    dirent->off = off;
 260    dirent->namelen = namelen;
 261    dirent->type = (stbuf->st_mode & S_IFMT) >> 12;
 262    memcpy(dirent->name, name, namelen);
 263    memset(dirent->name + namelen, 0, entlen_padded - entlen);
 264
 265    return entlen_padded;
 266}
 267
 268static void convert_statfs(const struct statvfs *stbuf,
 269                           struct fuse_kstatfs *kstatfs)
 270{
 271    *kstatfs = (struct fuse_kstatfs){
 272        .bsize = stbuf->f_bsize,
 273        .frsize = stbuf->f_frsize,
 274        .blocks = stbuf->f_blocks,
 275        .bfree = stbuf->f_bfree,
 276        .bavail = stbuf->f_bavail,
 277        .files = stbuf->f_files,
 278        .ffree = stbuf->f_ffree,
 279        .namelen = stbuf->f_namemax,
 280    };
 281}
 282
 283static int send_reply_ok(fuse_req_t req, const void *arg, size_t argsize)
 284{
 285    return send_reply(req, 0, arg, argsize);
 286}
 287
 288int fuse_reply_err(fuse_req_t req, int err)
 289{
 290    return send_reply(req, -err, NULL, 0);
 291}
 292
 293void fuse_reply_none(fuse_req_t req)
 294{
 295    fuse_free_req(req);
 296}
 297
 298static unsigned long calc_timeout_sec(double t)
 299{
 300    if (t > (double)ULONG_MAX) {
 301        return ULONG_MAX;
 302    } else if (t < 0.0) {
 303        return 0;
 304    } else {
 305        return (unsigned long)t;
 306    }
 307}
 308
 309static unsigned int calc_timeout_nsec(double t)
 310{
 311    double f = t - (double)calc_timeout_sec(t);
 312    if (f < 0.0) {
 313        return 0;
 314    } else if (f >= 0.999999999) {
 315        return 999999999;
 316    } else {
 317        return (unsigned int)(f * 1.0e9);
 318    }
 319}
 320
 321static void fill_entry(struct fuse_entry_out *arg,
 322                       const struct fuse_entry_param *e)
 323{
 324    *arg = (struct fuse_entry_out){
 325        .nodeid = e->ino,
 326        .generation = e->generation,
 327        .entry_valid = calc_timeout_sec(e->entry_timeout),
 328        .entry_valid_nsec = calc_timeout_nsec(e->entry_timeout),
 329        .attr_valid = calc_timeout_sec(e->attr_timeout),
 330        .attr_valid_nsec = calc_timeout_nsec(e->attr_timeout),
 331    };
 332    convert_stat(&e->attr, &arg->attr);
 333
 334    arg->attr.flags = e->attr_flags;
 335}
 336
 337/*
 338 * `buf` is allowed to be empty so that the proper size may be
 339 * allocated by the caller
 340 */
 341size_t fuse_add_direntry_plus(fuse_req_t req, char *buf, size_t bufsize,
 342                              const char *name,
 343                              const struct fuse_entry_param *e, off_t off)
 344{
 345    (void)req;
 346    size_t namelen;
 347    size_t entlen;
 348    size_t entlen_padded;
 349
 350    namelen = strlen(name);
 351    entlen = FUSE_NAME_OFFSET_DIRENTPLUS + namelen;
 352    entlen_padded = FUSE_DIRENT_ALIGN(entlen);
 353    if ((buf == NULL) || (entlen_padded > bufsize)) {
 354        return entlen_padded;
 355    }
 356
 357    struct fuse_direntplus *dp = (struct fuse_direntplus *)buf;
 358    memset(&dp->entry_out, 0, sizeof(dp->entry_out));
 359    fill_entry(&dp->entry_out, e);
 360
 361    struct fuse_dirent *dirent = &dp->dirent;
 362    *dirent = (struct fuse_dirent){
 363        .ino = e->attr.st_ino,
 364        .off = off,
 365        .namelen = namelen,
 366        .type = (e->attr.st_mode & S_IFMT) >> 12,
 367    };
 368    memcpy(dirent->name, name, namelen);
 369    memset(dirent->name + namelen, 0, entlen_padded - entlen);
 370
 371    return entlen_padded;
 372}
 373
 374static void fill_open(struct fuse_open_out *arg, const struct fuse_file_info *f)
 375{
 376    arg->fh = f->fh;
 377    if (f->direct_io) {
 378        arg->open_flags |= FOPEN_DIRECT_IO;
 379    }
 380    if (f->keep_cache) {
 381        arg->open_flags |= FOPEN_KEEP_CACHE;
 382    }
 383    if (f->cache_readdir) {
 384        arg->open_flags |= FOPEN_CACHE_DIR;
 385    }
 386    if (f->nonseekable) {
 387        arg->open_flags |= FOPEN_NONSEEKABLE;
 388    }
 389}
 390
 391int fuse_reply_entry(fuse_req_t req, const struct fuse_entry_param *e)
 392{
 393    struct fuse_entry_out arg;
 394    size_t size = sizeof(arg);
 395
 396    memset(&arg, 0, sizeof(arg));
 397    fill_entry(&arg, e);
 398    return send_reply_ok(req, &arg, size);
 399}
 400
 401int fuse_reply_create(fuse_req_t req, const struct fuse_entry_param *e,
 402                      const struct fuse_file_info *f)
 403{
 404    char buf[sizeof(struct fuse_entry_out) + sizeof(struct fuse_open_out)];
 405    size_t entrysize = sizeof(struct fuse_entry_out);
 406    struct fuse_entry_out *earg = (struct fuse_entry_out *)buf;
 407    struct fuse_open_out *oarg = (struct fuse_open_out *)(buf + entrysize);
 408
 409    memset(buf, 0, sizeof(buf));
 410    fill_entry(earg, e);
 411    fill_open(oarg, f);
 412    return send_reply_ok(req, buf, entrysize + sizeof(struct fuse_open_out));
 413}
 414
 415int fuse_reply_attr(fuse_req_t req, const struct stat *attr,
 416                    double attr_timeout)
 417{
 418    struct fuse_attr_out arg;
 419    size_t size = sizeof(arg);
 420
 421    memset(&arg, 0, sizeof(arg));
 422    arg.attr_valid = calc_timeout_sec(attr_timeout);
 423    arg.attr_valid_nsec = calc_timeout_nsec(attr_timeout);
 424    convert_stat(attr, &arg.attr);
 425
 426    return send_reply_ok(req, &arg, size);
 427}
 428
 429int fuse_reply_readlink(fuse_req_t req, const char *linkname)
 430{
 431    return send_reply_ok(req, linkname, strlen(linkname));
 432}
 433
 434int fuse_reply_open(fuse_req_t req, const struct fuse_file_info *f)
 435{
 436    struct fuse_open_out arg;
 437
 438    memset(&arg, 0, sizeof(arg));
 439    fill_open(&arg, f);
 440    return send_reply_ok(req, &arg, sizeof(arg));
 441}
 442
 443int fuse_reply_write(fuse_req_t req, size_t count)
 444{
 445    struct fuse_write_out arg;
 446
 447    memset(&arg, 0, sizeof(arg));
 448    arg.size = count;
 449
 450    return send_reply_ok(req, &arg, sizeof(arg));
 451}
 452
 453int fuse_reply_buf(fuse_req_t req, const char *buf, size_t size)
 454{
 455    return send_reply_ok(req, buf, size);
 456}
 457
 458static int fuse_send_data_iov_fallback(struct fuse_session *se,
 459                                       struct fuse_chan *ch, struct iovec *iov,
 460                                       int iov_count, struct fuse_bufvec *buf,
 461                                       size_t len)
 462{
 463    /* Optimize common case */
 464    if (buf->count == 1 && buf->idx == 0 && buf->off == 0 &&
 465        !(buf->buf[0].flags & FUSE_BUF_IS_FD)) {
 466        /*
 467         * FIXME: also avoid memory copy if there are multiple buffers
 468         * but none of them contain an fd
 469         */
 470
 471        iov[iov_count].iov_base = buf->buf[0].mem;
 472        iov[iov_count].iov_len = len;
 473        iov_count++;
 474        return fuse_send_msg(se, ch, iov, iov_count);
 475    }
 476
 477    if (fuse_lowlevel_is_virtio(se) && buf->count == 1 &&
 478        buf->buf[0].flags == (FUSE_BUF_IS_FD | FUSE_BUF_FD_SEEK)) {
 479        return virtio_send_data_iov(se, ch, iov, iov_count, buf, len);
 480    }
 481
 482    abort(); /* Will have taken vhost path */
 483    return 0;
 484}
 485
 486static int fuse_send_data_iov(struct fuse_session *se, struct fuse_chan *ch,
 487                              struct iovec *iov, int iov_count,
 488                              struct fuse_bufvec *buf)
 489{
 490    size_t len = fuse_buf_size(buf);
 491
 492    return fuse_send_data_iov_fallback(se, ch, iov, iov_count, buf, len);
 493}
 494
 495int fuse_reply_data(fuse_req_t req, struct fuse_bufvec *bufv)
 496{
 497    struct iovec iov[2];
 498    struct fuse_out_header out = {
 499        .unique = req->unique,
 500    };
 501    int res;
 502
 503    iov[0].iov_base = &out;
 504    iov[0].iov_len = sizeof(struct fuse_out_header);
 505
 506    res = fuse_send_data_iov(req->se, req->ch, iov, 1, bufv);
 507    if (res <= 0) {
 508        fuse_free_req(req);
 509        return res;
 510    } else {
 511        return fuse_reply_err(req, res);
 512    }
 513}
 514
 515int fuse_reply_statfs(fuse_req_t req, const struct statvfs *stbuf)
 516{
 517    struct fuse_statfs_out arg;
 518    size_t size = sizeof(arg);
 519
 520    memset(&arg, 0, sizeof(arg));
 521    convert_statfs(stbuf, &arg.st);
 522
 523    return send_reply_ok(req, &arg, size);
 524}
 525
 526int fuse_reply_xattr(fuse_req_t req, size_t count)
 527{
 528    struct fuse_getxattr_out arg;
 529
 530    memset(&arg, 0, sizeof(arg));
 531    arg.size = count;
 532
 533    return send_reply_ok(req, &arg, sizeof(arg));
 534}
 535
 536int fuse_reply_lock(fuse_req_t req, const struct flock *lock)
 537{
 538    struct fuse_lk_out arg;
 539
 540    memset(&arg, 0, sizeof(arg));
 541    arg.lk.type = lock->l_type;
 542    if (lock->l_type != F_UNLCK) {
 543        arg.lk.start = lock->l_start;
 544        if (lock->l_len == 0) {
 545            arg.lk.end = OFFSET_MAX;
 546        } else {
 547            arg.lk.end = lock->l_start + lock->l_len - 1;
 548        }
 549    }
 550    arg.lk.pid = lock->l_pid;
 551    return send_reply_ok(req, &arg, sizeof(arg));
 552}
 553
 554int fuse_reply_bmap(fuse_req_t req, uint64_t idx)
 555{
 556    struct fuse_bmap_out arg;
 557
 558    memset(&arg, 0, sizeof(arg));
 559    arg.block = idx;
 560
 561    return send_reply_ok(req, &arg, sizeof(arg));
 562}
 563
 564static struct fuse_ioctl_iovec *fuse_ioctl_iovec_copy(const struct iovec *iov,
 565                                                      size_t count)
 566{
 567    struct fuse_ioctl_iovec *fiov;
 568    size_t i;
 569
 570    fiov = g_try_new(struct fuse_ioctl_iovec, count);
 571    if (!fiov) {
 572        return NULL;
 573    }
 574
 575    for (i = 0; i < count; i++) {
 576        fiov[i].base = (uintptr_t)iov[i].iov_base;
 577        fiov[i].len = iov[i].iov_len;
 578    }
 579
 580    return fiov;
 581}
 582
 583int fuse_reply_ioctl_retry(fuse_req_t req, const struct iovec *in_iov,
 584                           size_t in_count, const struct iovec *out_iov,
 585                           size_t out_count)
 586{
 587    struct fuse_ioctl_out arg;
 588    g_autofree struct fuse_ioctl_iovec *in_fiov = NULL;
 589    g_autofree struct fuse_ioctl_iovec *out_fiov = NULL;
 590    struct iovec iov[4];
 591    size_t count = 1;
 592    int res;
 593
 594    memset(&arg, 0, sizeof(arg));
 595    arg.flags |= FUSE_IOCTL_RETRY;
 596    arg.in_iovs = in_count;
 597    arg.out_iovs = out_count;
 598    iov[count].iov_base = &arg;
 599    iov[count].iov_len = sizeof(arg);
 600    count++;
 601
 602    /* Can't handle non-compat 64bit ioctls on 32bit */
 603    if (sizeof(void *) == 4 && req->ioctl_64bit) {
 604        res = fuse_reply_err(req, EINVAL);
 605        return res;
 606    }
 607
 608    if (in_count) {
 609        in_fiov = fuse_ioctl_iovec_copy(in_iov, in_count);
 610        if (!in_fiov) {
 611            res = fuse_reply_err(req, ENOMEM);
 612            return res;
 613        }
 614
 615        iov[count].iov_base = (void *)in_fiov;
 616        iov[count].iov_len = sizeof(in_fiov[0]) * in_count;
 617        count++;
 618    }
 619    if (out_count) {
 620        out_fiov = fuse_ioctl_iovec_copy(out_iov, out_count);
 621        if (!out_fiov) {
 622            res = fuse_reply_err(req, ENOMEM);
 623            return res;
 624        }
 625
 626        iov[count].iov_base = (void *)out_fiov;
 627        iov[count].iov_len = sizeof(out_fiov[0]) * out_count;
 628        count++;
 629    }
 630
 631    res = send_reply_iov(req, 0, iov, count);
 632
 633    return res;
 634}
 635
 636int fuse_reply_ioctl(fuse_req_t req, int result, const void *buf, size_t size)
 637{
 638    struct fuse_ioctl_out arg;
 639    struct iovec iov[3];
 640    size_t count = 1;
 641
 642    memset(&arg, 0, sizeof(arg));
 643    arg.result = result;
 644    iov[count].iov_base = &arg;
 645    iov[count].iov_len = sizeof(arg);
 646    count++;
 647
 648    if (size) {
 649        iov[count].iov_base = (char *)buf;
 650        iov[count].iov_len = size;
 651        count++;
 652    }
 653
 654    return send_reply_iov(req, 0, iov, count);
 655}
 656
 657int fuse_reply_ioctl_iov(fuse_req_t req, int result, const struct iovec *iov,
 658                         int count)
 659{
 660    g_autofree struct iovec *padded_iov = NULL;
 661    struct fuse_ioctl_out arg;
 662    int res;
 663
 664    padded_iov = g_try_new(struct iovec, count + 2);
 665    if (padded_iov == NULL) {
 666        return fuse_reply_err(req, ENOMEM);
 667    }
 668
 669    memset(&arg, 0, sizeof(arg));
 670    arg.result = result;
 671    padded_iov[1].iov_base = &arg;
 672    padded_iov[1].iov_len = sizeof(arg);
 673
 674    memcpy(&padded_iov[2], iov, count * sizeof(struct iovec));
 675
 676    res = send_reply_iov(req, 0, padded_iov, count + 2);
 677
 678    return res;
 679}
 680
 681int fuse_reply_poll(fuse_req_t req, unsigned revents)
 682{
 683    struct fuse_poll_out arg;
 684
 685    memset(&arg, 0, sizeof(arg));
 686    arg.revents = revents;
 687
 688    return send_reply_ok(req, &arg, sizeof(arg));
 689}
 690
 691int fuse_reply_lseek(fuse_req_t req, off_t off)
 692{
 693    struct fuse_lseek_out arg;
 694
 695    memset(&arg, 0, sizeof(arg));
 696    arg.offset = off;
 697
 698    return send_reply_ok(req, &arg, sizeof(arg));
 699}
 700
 701static void do_lookup(fuse_req_t req, fuse_ino_t nodeid,
 702                      struct fuse_mbuf_iter *iter)
 703{
 704    const char *name = fuse_mbuf_iter_advance_str(iter);
 705    if (!name) {
 706        fuse_reply_err(req, EINVAL);
 707        return;
 708    }
 709
 710    if (req->se->op.lookup) {
 711        req->se->op.lookup(req, nodeid, name);
 712    } else {
 713        fuse_reply_err(req, ENOSYS);
 714    }
 715}
 716
 717static void do_forget(fuse_req_t req, fuse_ino_t nodeid,
 718                      struct fuse_mbuf_iter *iter)
 719{
 720    struct fuse_forget_in *arg;
 721
 722    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
 723    if (!arg) {
 724        fuse_reply_err(req, EINVAL);
 725        return;
 726    }
 727
 728    if (req->se->op.forget) {
 729        req->se->op.forget(req, nodeid, arg->nlookup);
 730    } else {
 731        fuse_reply_none(req);
 732    }
 733}
 734
 735static void do_batch_forget(fuse_req_t req, fuse_ino_t nodeid,
 736                            struct fuse_mbuf_iter *iter)
 737{
 738    struct fuse_batch_forget_in *arg;
 739    struct fuse_forget_data *forgets;
 740    size_t scount;
 741
 742    (void)nodeid;
 743
 744    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
 745    if (!arg) {
 746        fuse_reply_none(req);
 747        return;
 748    }
 749
 750    /*
 751     * Prevent integer overflow.  The compiler emits the following warning
 752     * unless we use the scount local variable:
 753     *
 754     * error: comparison is always false due to limited range of data type
 755     * [-Werror=type-limits]
 756     *
 757     * This may be true on 64-bit hosts but we need this check for 32-bit
 758     * hosts.
 759     */
 760    scount = arg->count;
 761    if (scount > SIZE_MAX / sizeof(forgets[0])) {
 762        fuse_reply_none(req);
 763        return;
 764    }
 765
 766    forgets = fuse_mbuf_iter_advance(iter, arg->count * sizeof(forgets[0]));
 767    if (!forgets) {
 768        fuse_reply_none(req);
 769        return;
 770    }
 771
 772    if (req->se->op.forget_multi) {
 773        req->se->op.forget_multi(req, arg->count, forgets);
 774    } else if (req->se->op.forget) {
 775        unsigned int i;
 776
 777        for (i = 0; i < arg->count; i++) {
 778            struct fuse_req *dummy_req;
 779
 780            dummy_req = fuse_ll_alloc_req(req->se);
 781            if (dummy_req == NULL) {
 782                break;
 783            }
 784
 785            dummy_req->unique = req->unique;
 786            dummy_req->ctx = req->ctx;
 787            dummy_req->ch = NULL;
 788
 789            req->se->op.forget(dummy_req, forgets[i].ino, forgets[i].nlookup);
 790        }
 791        fuse_reply_none(req);
 792    } else {
 793        fuse_reply_none(req);
 794    }
 795}
 796
 797static void do_getattr(fuse_req_t req, fuse_ino_t nodeid,
 798                       struct fuse_mbuf_iter *iter)
 799{
 800    struct fuse_file_info *fip = NULL;
 801    struct fuse_file_info fi;
 802
 803    struct fuse_getattr_in *arg;
 804
 805    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
 806    if (!arg) {
 807        fuse_reply_err(req, EINVAL);
 808        return;
 809    }
 810
 811    if (arg->getattr_flags & FUSE_GETATTR_FH) {
 812        memset(&fi, 0, sizeof(fi));
 813        fi.fh = arg->fh;
 814        fip = &fi;
 815    }
 816
 817    if (req->se->op.getattr) {
 818        req->se->op.getattr(req, nodeid, fip);
 819    } else {
 820        fuse_reply_err(req, ENOSYS);
 821    }
 822}
 823
 824static void do_setattr(fuse_req_t req, fuse_ino_t nodeid,
 825                       struct fuse_mbuf_iter *iter)
 826{
 827    if (req->se->op.setattr) {
 828        struct fuse_setattr_in *arg;
 829        struct fuse_file_info *fi = NULL;
 830        struct fuse_file_info fi_store;
 831        struct stat stbuf;
 832
 833        arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
 834        if (!arg) {
 835            fuse_reply_err(req, EINVAL);
 836            return;
 837        }
 838
 839        memset(&stbuf, 0, sizeof(stbuf));
 840        convert_attr(arg, &stbuf);
 841        if (arg->valid & FATTR_FH) {
 842            arg->valid &= ~FATTR_FH;
 843            memset(&fi_store, 0, sizeof(fi_store));
 844            fi = &fi_store;
 845            fi->fh = arg->fh;
 846        }
 847        arg->valid &= FUSE_SET_ATTR_MODE | FUSE_SET_ATTR_UID |
 848                      FUSE_SET_ATTR_GID | FUSE_SET_ATTR_SIZE |
 849                      FUSE_SET_ATTR_ATIME | FUSE_SET_ATTR_MTIME |
 850                      FUSE_SET_ATTR_ATIME_NOW | FUSE_SET_ATTR_MTIME_NOW |
 851                      FUSE_SET_ATTR_CTIME | FUSE_SET_ATTR_KILL_SUIDGID;
 852
 853        req->se->op.setattr(req, nodeid, &stbuf, arg->valid, fi);
 854    } else {
 855        fuse_reply_err(req, ENOSYS);
 856    }
 857}
 858
 859static void do_access(fuse_req_t req, fuse_ino_t nodeid,
 860                      struct fuse_mbuf_iter *iter)
 861{
 862    struct fuse_access_in *arg;
 863
 864    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
 865    if (!arg) {
 866        fuse_reply_err(req, EINVAL);
 867        return;
 868    }
 869
 870    if (req->se->op.access) {
 871        req->se->op.access(req, nodeid, arg->mask);
 872    } else {
 873        fuse_reply_err(req, ENOSYS);
 874    }
 875}
 876
 877static void do_readlink(fuse_req_t req, fuse_ino_t nodeid,
 878                        struct fuse_mbuf_iter *iter)
 879{
 880    (void)iter;
 881
 882    if (req->se->op.readlink) {
 883        req->se->op.readlink(req, nodeid);
 884    } else {
 885        fuse_reply_err(req, ENOSYS);
 886    }
 887}
 888
 889static int parse_secctx_fill_req(fuse_req_t req, struct fuse_mbuf_iter *iter)
 890{
 891    struct fuse_secctx_header *fsecctx_header;
 892    struct fuse_secctx *fsecctx;
 893    const void *secctx;
 894    const char *name;
 895
 896    fsecctx_header = fuse_mbuf_iter_advance(iter, sizeof(*fsecctx_header));
 897    if (!fsecctx_header) {
 898        return -EINVAL;
 899    }
 900
 901    /*
 902     * As of now maximum of one security context is supported. It can
 903     * change in future though.
 904     */
 905    if (fsecctx_header->nr_secctx > 1) {
 906        return -EINVAL;
 907    }
 908
 909    /* No security context sent. Maybe no LSM supports it */
 910    if (!fsecctx_header->nr_secctx) {
 911        return 0;
 912    }
 913
 914    fsecctx = fuse_mbuf_iter_advance(iter, sizeof(*fsecctx));
 915    if (!fsecctx) {
 916        return -EINVAL;
 917    }
 918
 919    /* struct fsecctx with zero sized context is not expected */
 920    if (!fsecctx->size) {
 921        return -EINVAL;
 922    }
 923    name = fuse_mbuf_iter_advance_str(iter);
 924    if (!name) {
 925        return -EINVAL;
 926    }
 927
 928    secctx = fuse_mbuf_iter_advance(iter, fsecctx->size);
 929    if (!secctx) {
 930        return -EINVAL;
 931    }
 932
 933    req->secctx.name = name;
 934    req->secctx.ctx = secctx;
 935    req->secctx.ctxlen = fsecctx->size;
 936    return 0;
 937}
 938
 939static void do_mknod(fuse_req_t req, fuse_ino_t nodeid,
 940                     struct fuse_mbuf_iter *iter)
 941{
 942    struct fuse_mknod_in *arg;
 943    const char *name;
 944    bool secctx_enabled = req->se->conn.want & FUSE_CAP_SECURITY_CTX;
 945    int err;
 946
 947    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
 948    name = fuse_mbuf_iter_advance_str(iter);
 949    if (!arg || !name) {
 950        fuse_reply_err(req, EINVAL);
 951        return;
 952    }
 953
 954    req->ctx.umask = arg->umask;
 955
 956    if (secctx_enabled) {
 957        err = parse_secctx_fill_req(req, iter);
 958        if (err) {
 959            fuse_reply_err(req, -err);
 960            return;
 961        }
 962    }
 963
 964    if (req->se->op.mknod) {
 965        req->se->op.mknod(req, nodeid, name, arg->mode, arg->rdev);
 966    } else {
 967        fuse_reply_err(req, ENOSYS);
 968    }
 969}
 970
 971static void do_mkdir(fuse_req_t req, fuse_ino_t nodeid,
 972                     struct fuse_mbuf_iter *iter)
 973{
 974    struct fuse_mkdir_in *arg;
 975    const char *name;
 976    bool secctx_enabled = req->se->conn.want & FUSE_CAP_SECURITY_CTX;
 977    int err;
 978
 979    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
 980    name = fuse_mbuf_iter_advance_str(iter);
 981    if (!arg || !name) {
 982        fuse_reply_err(req, EINVAL);
 983        return;
 984    }
 985
 986    req->ctx.umask = arg->umask;
 987
 988    if (secctx_enabled) {
 989        err = parse_secctx_fill_req(req, iter);
 990        if (err) {
 991            fuse_reply_err(req, err);
 992            return;
 993        }
 994    }
 995
 996    if (req->se->op.mkdir) {
 997        req->se->op.mkdir(req, nodeid, name, arg->mode);
 998    } else {
 999        fuse_reply_err(req, ENOSYS);
1000    }
1001}
1002
1003static void do_unlink(fuse_req_t req, fuse_ino_t nodeid,
1004                      struct fuse_mbuf_iter *iter)
1005{
1006    const char *name = fuse_mbuf_iter_advance_str(iter);
1007
1008    if (!name) {
1009        fuse_reply_err(req, EINVAL);
1010        return;
1011    }
1012
1013    if (req->se->op.unlink) {
1014        req->se->op.unlink(req, nodeid, name);
1015    } else {
1016        fuse_reply_err(req, ENOSYS);
1017    }
1018}
1019
1020static void do_rmdir(fuse_req_t req, fuse_ino_t nodeid,
1021                     struct fuse_mbuf_iter *iter)
1022{
1023    const char *name = fuse_mbuf_iter_advance_str(iter);
1024
1025    if (!name) {
1026        fuse_reply_err(req, EINVAL);
1027        return;
1028    }
1029
1030    if (req->se->op.rmdir) {
1031        req->se->op.rmdir(req, nodeid, name);
1032    } else {
1033        fuse_reply_err(req, ENOSYS);
1034    }
1035}
1036
1037static void do_symlink(fuse_req_t req, fuse_ino_t nodeid,
1038                       struct fuse_mbuf_iter *iter)
1039{
1040    const char *name = fuse_mbuf_iter_advance_str(iter);
1041    const char *linkname = fuse_mbuf_iter_advance_str(iter);
1042    bool secctx_enabled = req->se->conn.want & FUSE_CAP_SECURITY_CTX;
1043    int err;
1044
1045    if (!name || !linkname) {
1046        fuse_reply_err(req, EINVAL);
1047        return;
1048    }
1049
1050    if (secctx_enabled) {
1051        err = parse_secctx_fill_req(req, iter);
1052        if (err) {
1053            fuse_reply_err(req, err);
1054            return;
1055        }
1056    }
1057
1058    if (req->se->op.symlink) {
1059        req->se->op.symlink(req, linkname, nodeid, name);
1060    } else {
1061        fuse_reply_err(req, ENOSYS);
1062    }
1063}
1064
1065static void do_rename(fuse_req_t req, fuse_ino_t nodeid,
1066                      struct fuse_mbuf_iter *iter)
1067{
1068    struct fuse_rename_in *arg;
1069    const char *oldname;
1070    const char *newname;
1071
1072    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1073    oldname = fuse_mbuf_iter_advance_str(iter);
1074    newname = fuse_mbuf_iter_advance_str(iter);
1075    if (!arg || !oldname || !newname) {
1076        fuse_reply_err(req, EINVAL);
1077        return;
1078    }
1079
1080    if (req->se->op.rename) {
1081        req->se->op.rename(req, nodeid, oldname, arg->newdir, newname, 0);
1082    } else {
1083        fuse_reply_err(req, ENOSYS);
1084    }
1085}
1086
1087static void do_rename2(fuse_req_t req, fuse_ino_t nodeid,
1088                       struct fuse_mbuf_iter *iter)
1089{
1090    struct fuse_rename2_in *arg;
1091    const char *oldname;
1092    const char *newname;
1093
1094    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1095    oldname = fuse_mbuf_iter_advance_str(iter);
1096    newname = fuse_mbuf_iter_advance_str(iter);
1097    if (!arg || !oldname || !newname) {
1098        fuse_reply_err(req, EINVAL);
1099        return;
1100    }
1101
1102    if (req->se->op.rename) {
1103        req->se->op.rename(req, nodeid, oldname, arg->newdir, newname,
1104                           arg->flags);
1105    } else {
1106        fuse_reply_err(req, ENOSYS);
1107    }
1108}
1109
1110static void do_link(fuse_req_t req, fuse_ino_t nodeid,
1111                    struct fuse_mbuf_iter *iter)
1112{
1113    struct fuse_link_in *arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1114    const char *name = fuse_mbuf_iter_advance_str(iter);
1115
1116    if (!arg || !name) {
1117        fuse_reply_err(req, EINVAL);
1118        return;
1119    }
1120
1121    if (req->se->op.link) {
1122        req->se->op.link(req, arg->oldnodeid, nodeid, name);
1123    } else {
1124        fuse_reply_err(req, ENOSYS);
1125    }
1126}
1127
1128static void do_create(fuse_req_t req, fuse_ino_t nodeid,
1129                      struct fuse_mbuf_iter *iter)
1130{
1131    bool secctx_enabled = req->se->conn.want & FUSE_CAP_SECURITY_CTX;
1132
1133    if (req->se->op.create) {
1134        struct fuse_create_in *arg;
1135        struct fuse_file_info fi;
1136        const char *name;
1137
1138        arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1139        name = fuse_mbuf_iter_advance_str(iter);
1140        if (!arg || !name) {
1141            fuse_reply_err(req, EINVAL);
1142            return;
1143        }
1144
1145        if (secctx_enabled) {
1146            int err;
1147            err = parse_secctx_fill_req(req, iter);
1148            if (err) {
1149                fuse_reply_err(req, err);
1150                return;
1151            }
1152        }
1153
1154        memset(&fi, 0, sizeof(fi));
1155        fi.flags = arg->flags;
1156        fi.kill_priv = arg->open_flags & FUSE_OPEN_KILL_SUIDGID;
1157
1158        req->ctx.umask = arg->umask;
1159
1160        req->se->op.create(req, nodeid, name, arg->mode, &fi);
1161    } else {
1162        fuse_reply_err(req, ENOSYS);
1163    }
1164}
1165
1166static void do_open(fuse_req_t req, fuse_ino_t nodeid,
1167                    struct fuse_mbuf_iter *iter)
1168{
1169    struct fuse_open_in *arg;
1170    struct fuse_file_info fi;
1171
1172    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1173    if (!arg) {
1174        fuse_reply_err(req, EINVAL);
1175        return;
1176    }
1177
1178    /* File creation is handled by do_create() or do_mknod() */
1179    if (arg->flags & (O_CREAT | O_TMPFILE)) {
1180        fuse_reply_err(req, EINVAL);
1181        return;
1182    }
1183
1184    memset(&fi, 0, sizeof(fi));
1185    fi.flags = arg->flags;
1186    fi.kill_priv = arg->open_flags & FUSE_OPEN_KILL_SUIDGID;
1187
1188    if (req->se->op.open) {
1189        req->se->op.open(req, nodeid, &fi);
1190    } else {
1191        fuse_reply_open(req, &fi);
1192    }
1193}
1194
1195static void do_read(fuse_req_t req, fuse_ino_t nodeid,
1196                    struct fuse_mbuf_iter *iter)
1197{
1198    if (req->se->op.read) {
1199        struct fuse_read_in *arg;
1200        struct fuse_file_info fi;
1201
1202        arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1203        if (!arg) {
1204            fuse_reply_err(req, EINVAL);
1205            return;
1206        }
1207
1208        memset(&fi, 0, sizeof(fi));
1209        fi.fh = arg->fh;
1210        fi.lock_owner = arg->lock_owner;
1211        fi.flags = arg->flags;
1212        req->se->op.read(req, nodeid, arg->size, arg->offset, &fi);
1213    } else {
1214        fuse_reply_err(req, ENOSYS);
1215    }
1216}
1217
1218static void do_write(fuse_req_t req, fuse_ino_t nodeid,
1219                     struct fuse_mbuf_iter *iter)
1220{
1221    struct fuse_write_in *arg;
1222    struct fuse_file_info fi;
1223    const char *param;
1224
1225    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1226    if (!arg) {
1227        fuse_reply_err(req, EINVAL);
1228        return;
1229    }
1230
1231    param = fuse_mbuf_iter_advance(iter, arg->size);
1232    if (!param) {
1233        fuse_reply_err(req, EINVAL);
1234        return;
1235    }
1236
1237    memset(&fi, 0, sizeof(fi));
1238    fi.fh = arg->fh;
1239    fi.writepage = (arg->write_flags & FUSE_WRITE_CACHE) != 0;
1240    fi.kill_priv = !!(arg->write_flags & FUSE_WRITE_KILL_PRIV);
1241
1242    fi.lock_owner = arg->lock_owner;
1243    fi.flags = arg->flags;
1244
1245    if (req->se->op.write) {
1246        req->se->op.write(req, nodeid, param, arg->size, arg->offset, &fi);
1247    } else {
1248        fuse_reply_err(req, ENOSYS);
1249    }
1250}
1251
1252static void do_write_buf(fuse_req_t req, fuse_ino_t nodeid,
1253                         struct fuse_mbuf_iter *iter, struct fuse_bufvec *ibufv)
1254{
1255    struct fuse_session *se = req->se;
1256    struct fuse_bufvec *pbufv = ibufv;
1257    struct fuse_bufvec tmpbufv = {
1258        .buf[0] = ibufv->buf[0],
1259        .count = 1,
1260    };
1261    struct fuse_write_in *arg;
1262    size_t arg_size = sizeof(*arg);
1263    struct fuse_file_info fi;
1264
1265    memset(&fi, 0, sizeof(fi));
1266
1267    arg = fuse_mbuf_iter_advance(iter, arg_size);
1268    if (!arg) {
1269        fuse_reply_err(req, EINVAL);
1270        return;
1271    }
1272
1273    fi.lock_owner = arg->lock_owner;
1274    fi.flags = arg->flags;
1275    fi.fh = arg->fh;
1276    fi.writepage = !!(arg->write_flags & FUSE_WRITE_CACHE);
1277    fi.kill_priv = !!(arg->write_flags & FUSE_WRITE_KILL_PRIV);
1278
1279    if (ibufv->count == 1) {
1280        assert(!(tmpbufv.buf[0].flags & FUSE_BUF_IS_FD));
1281        tmpbufv.buf[0].mem = ((char *)arg) + arg_size;
1282        tmpbufv.buf[0].size -= sizeof(struct fuse_in_header) + arg_size;
1283        pbufv = &tmpbufv;
1284    } else {
1285        /*
1286         *  Input bufv contains the headers in the first element
1287         * and the data in the rest, we need to skip that first element
1288         */
1289        ibufv->buf[0].size = 0;
1290    }
1291
1292    if (fuse_buf_size(pbufv) != arg->size) {
1293        fuse_log(FUSE_LOG_ERR,
1294                 "fuse: do_write_buf: buffer size doesn't match arg->size\n");
1295        fuse_reply_err(req, EIO);
1296        return;
1297    }
1298
1299    se->op.write_buf(req, nodeid, pbufv, arg->offset, &fi);
1300}
1301
1302static void do_flush(fuse_req_t req, fuse_ino_t nodeid,
1303                     struct fuse_mbuf_iter *iter)
1304{
1305    struct fuse_flush_in *arg;
1306    struct fuse_file_info fi;
1307
1308    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1309    if (!arg) {
1310        fuse_reply_err(req, EINVAL);
1311        return;
1312    }
1313
1314    memset(&fi, 0, sizeof(fi));
1315    fi.fh = arg->fh;
1316    fi.flush = 1;
1317    fi.lock_owner = arg->lock_owner;
1318
1319    if (req->se->op.flush) {
1320        req->se->op.flush(req, nodeid, &fi);
1321    } else {
1322        fuse_reply_err(req, ENOSYS);
1323    }
1324}
1325
1326static void do_release(fuse_req_t req, fuse_ino_t nodeid,
1327                       struct fuse_mbuf_iter *iter)
1328{
1329    struct fuse_release_in *arg;
1330    struct fuse_file_info fi;
1331
1332    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1333    if (!arg) {
1334        fuse_reply_err(req, EINVAL);
1335        return;
1336    }
1337
1338    memset(&fi, 0, sizeof(fi));
1339    fi.flags = arg->flags;
1340    fi.fh = arg->fh;
1341    fi.flush = (arg->release_flags & FUSE_RELEASE_FLUSH) ? 1 : 0;
1342    fi.lock_owner = arg->lock_owner;
1343
1344    if (arg->release_flags & FUSE_RELEASE_FLOCK_UNLOCK) {
1345        fi.flock_release = 1;
1346    }
1347
1348    if (req->se->op.release) {
1349        req->se->op.release(req, nodeid, &fi);
1350    } else {
1351        fuse_reply_err(req, 0);
1352    }
1353}
1354
1355static void do_fsync(fuse_req_t req, fuse_ino_t nodeid,
1356                     struct fuse_mbuf_iter *iter)
1357{
1358    struct fuse_fsync_in *arg;
1359    struct fuse_file_info fi;
1360    int datasync;
1361
1362    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1363    if (!arg) {
1364        fuse_reply_err(req, EINVAL);
1365        return;
1366    }
1367    datasync = arg->fsync_flags & 1;
1368
1369    memset(&fi, 0, sizeof(fi));
1370    fi.fh = arg->fh;
1371
1372    if (req->se->op.fsync) {
1373        if (fi.fh == (uint64_t)-1) {
1374            req->se->op.fsync(req, nodeid, datasync, NULL);
1375        } else {
1376            req->se->op.fsync(req, nodeid, datasync, &fi);
1377        }
1378    } else {
1379        fuse_reply_err(req, ENOSYS);
1380    }
1381}
1382
1383static void do_opendir(fuse_req_t req, fuse_ino_t nodeid,
1384                       struct fuse_mbuf_iter *iter)
1385{
1386    struct fuse_open_in *arg;
1387    struct fuse_file_info fi;
1388
1389    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1390    if (!arg) {
1391        fuse_reply_err(req, EINVAL);
1392        return;
1393    }
1394
1395    memset(&fi, 0, sizeof(fi));
1396    fi.flags = arg->flags;
1397
1398    if (req->se->op.opendir) {
1399        req->se->op.opendir(req, nodeid, &fi);
1400    } else {
1401        fuse_reply_open(req, &fi);
1402    }
1403}
1404
1405static void do_readdir(fuse_req_t req, fuse_ino_t nodeid,
1406                       struct fuse_mbuf_iter *iter)
1407{
1408    struct fuse_read_in *arg;
1409    struct fuse_file_info fi;
1410
1411    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1412    if (!arg) {
1413        fuse_reply_err(req, EINVAL);
1414        return;
1415    }
1416
1417    memset(&fi, 0, sizeof(fi));
1418    fi.fh = arg->fh;
1419
1420    if (req->se->op.readdir) {
1421        req->se->op.readdir(req, nodeid, arg->size, arg->offset, &fi);
1422    } else {
1423        fuse_reply_err(req, ENOSYS);
1424    }
1425}
1426
1427static void do_readdirplus(fuse_req_t req, fuse_ino_t nodeid,
1428                           struct fuse_mbuf_iter *iter)
1429{
1430    struct fuse_read_in *arg;
1431    struct fuse_file_info fi;
1432
1433    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1434    if (!arg) {
1435        fuse_reply_err(req, EINVAL);
1436        return;
1437    }
1438
1439    memset(&fi, 0, sizeof(fi));
1440    fi.fh = arg->fh;
1441
1442    if (req->se->op.readdirplus) {
1443        req->se->op.readdirplus(req, nodeid, arg->size, arg->offset, &fi);
1444    } else {
1445        fuse_reply_err(req, ENOSYS);
1446    }
1447}
1448
1449static void do_releasedir(fuse_req_t req, fuse_ino_t nodeid,
1450                          struct fuse_mbuf_iter *iter)
1451{
1452    struct fuse_release_in *arg;
1453    struct fuse_file_info fi;
1454
1455    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1456    if (!arg) {
1457        fuse_reply_err(req, EINVAL);
1458        return;
1459    }
1460
1461    memset(&fi, 0, sizeof(fi));
1462    fi.flags = arg->flags;
1463    fi.fh = arg->fh;
1464
1465    if (req->se->op.releasedir) {
1466        req->se->op.releasedir(req, nodeid, &fi);
1467    } else {
1468        fuse_reply_err(req, 0);
1469    }
1470}
1471
1472static void do_fsyncdir(fuse_req_t req, fuse_ino_t nodeid,
1473                        struct fuse_mbuf_iter *iter)
1474{
1475    struct fuse_fsync_in *arg;
1476    struct fuse_file_info fi;
1477    int datasync;
1478
1479    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1480    if (!arg) {
1481        fuse_reply_err(req, EINVAL);
1482        return;
1483    }
1484    datasync = arg->fsync_flags & 1;
1485
1486    memset(&fi, 0, sizeof(fi));
1487    fi.fh = arg->fh;
1488
1489    if (req->se->op.fsyncdir) {
1490        req->se->op.fsyncdir(req, nodeid, datasync, &fi);
1491    } else {
1492        fuse_reply_err(req, ENOSYS);
1493    }
1494}
1495
1496static void do_statfs(fuse_req_t req, fuse_ino_t nodeid,
1497                      struct fuse_mbuf_iter *iter)
1498{
1499    (void)nodeid;
1500    (void)iter;
1501
1502    if (req->se->op.statfs) {
1503        req->se->op.statfs(req, nodeid);
1504    } else {
1505        struct statvfs buf = {
1506            .f_namemax = 255,
1507            .f_bsize = 512,
1508        };
1509        fuse_reply_statfs(req, &buf);
1510    }
1511}
1512
1513static void do_setxattr(fuse_req_t req, fuse_ino_t nodeid,
1514                        struct fuse_mbuf_iter *iter)
1515{
1516    struct fuse_setxattr_in *arg;
1517    const char *name;
1518    const char *value;
1519    bool setxattr_ext = req->se->conn.want & FUSE_CAP_SETXATTR_EXT;
1520
1521    if (setxattr_ext) {
1522        arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1523    } else {
1524        arg = fuse_mbuf_iter_advance(iter, FUSE_COMPAT_SETXATTR_IN_SIZE);
1525    }
1526    name = fuse_mbuf_iter_advance_str(iter);
1527    if (!arg || !name) {
1528        fuse_reply_err(req, EINVAL);
1529        return;
1530    }
1531
1532    value = fuse_mbuf_iter_advance(iter, arg->size);
1533    if (!value) {
1534        fuse_reply_err(req, EINVAL);
1535        return;
1536    }
1537
1538    if (req->se->op.setxattr) {
1539        uint32_t setxattr_flags = setxattr_ext ? arg->setxattr_flags : 0;
1540        req->se->op.setxattr(req, nodeid, name, value, arg->size, arg->flags,
1541                             setxattr_flags);
1542    } else {
1543        fuse_reply_err(req, ENOSYS);
1544    }
1545}
1546
1547static void do_getxattr(fuse_req_t req, fuse_ino_t nodeid,
1548                        struct fuse_mbuf_iter *iter)
1549{
1550    struct fuse_getxattr_in *arg;
1551    const char *name;
1552
1553    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1554    name = fuse_mbuf_iter_advance_str(iter);
1555    if (!arg || !name) {
1556        fuse_reply_err(req, EINVAL);
1557        return;
1558    }
1559
1560    if (req->se->op.getxattr) {
1561        req->se->op.getxattr(req, nodeid, name, arg->size);
1562    } else {
1563        fuse_reply_err(req, ENOSYS);
1564    }
1565}
1566
1567static void do_listxattr(fuse_req_t req, fuse_ino_t nodeid,
1568                         struct fuse_mbuf_iter *iter)
1569{
1570    struct fuse_getxattr_in *arg;
1571
1572    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1573    if (!arg) {
1574        fuse_reply_err(req, EINVAL);
1575        return;
1576    }
1577
1578    if (req->se->op.listxattr) {
1579        req->se->op.listxattr(req, nodeid, arg->size);
1580    } else {
1581        fuse_reply_err(req, ENOSYS);
1582    }
1583}
1584
1585static void do_removexattr(fuse_req_t req, fuse_ino_t nodeid,
1586                           struct fuse_mbuf_iter *iter)
1587{
1588    const char *name = fuse_mbuf_iter_advance_str(iter);
1589
1590    if (!name) {
1591        fuse_reply_err(req, EINVAL);
1592        return;
1593    }
1594
1595    if (req->se->op.removexattr) {
1596        req->se->op.removexattr(req, nodeid, name);
1597    } else {
1598        fuse_reply_err(req, ENOSYS);
1599    }
1600}
1601
1602static void convert_fuse_file_lock(struct fuse_file_lock *fl,
1603                                   struct flock *flock)
1604{
1605    memset(flock, 0, sizeof(struct flock));
1606    flock->l_type = fl->type;
1607    flock->l_whence = SEEK_SET;
1608    flock->l_start = fl->start;
1609    if (fl->end == OFFSET_MAX) {
1610        flock->l_len = 0;
1611    } else {
1612        flock->l_len = fl->end - fl->start + 1;
1613    }
1614    flock->l_pid = fl->pid;
1615}
1616
1617static void do_getlk(fuse_req_t req, fuse_ino_t nodeid,
1618                     struct fuse_mbuf_iter *iter)
1619{
1620    struct fuse_lk_in *arg;
1621    struct fuse_file_info fi;
1622    struct flock flock;
1623
1624    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1625    if (!arg) {
1626        fuse_reply_err(req, EINVAL);
1627        return;
1628    }
1629
1630    memset(&fi, 0, sizeof(fi));
1631    fi.fh = arg->fh;
1632    fi.lock_owner = arg->owner;
1633
1634    convert_fuse_file_lock(&arg->lk, &flock);
1635    if (req->se->op.getlk) {
1636        req->se->op.getlk(req, nodeid, &fi, &flock);
1637    } else {
1638        fuse_reply_err(req, ENOSYS);
1639    }
1640}
1641
1642static void do_setlk_common(fuse_req_t req, fuse_ino_t nodeid,
1643                            struct fuse_mbuf_iter *iter, int sleep)
1644{
1645    struct fuse_lk_in *arg;
1646    struct fuse_file_info fi;
1647    struct flock flock;
1648
1649    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1650    if (!arg) {
1651        fuse_reply_err(req, EINVAL);
1652        return;
1653    }
1654
1655    memset(&fi, 0, sizeof(fi));
1656    fi.fh = arg->fh;
1657    fi.lock_owner = arg->owner;
1658
1659    if (arg->lk_flags & FUSE_LK_FLOCK) {
1660        int op = 0;
1661
1662        switch (arg->lk.type) {
1663        case F_RDLCK:
1664            op = LOCK_SH;
1665            break;
1666        case F_WRLCK:
1667            op = LOCK_EX;
1668            break;
1669        case F_UNLCK:
1670            op = LOCK_UN;
1671            break;
1672        }
1673        if (!sleep) {
1674            op |= LOCK_NB;
1675        }
1676
1677        if (req->se->op.flock) {
1678            req->se->op.flock(req, nodeid, &fi, op);
1679        } else {
1680            fuse_reply_err(req, ENOSYS);
1681        }
1682    } else {
1683        convert_fuse_file_lock(&arg->lk, &flock);
1684        if (req->se->op.setlk) {
1685            req->se->op.setlk(req, nodeid, &fi, &flock, sleep);
1686        } else {
1687            fuse_reply_err(req, ENOSYS);
1688        }
1689    }
1690}
1691
1692static void do_setlk(fuse_req_t req, fuse_ino_t nodeid,
1693                     struct fuse_mbuf_iter *iter)
1694{
1695    do_setlk_common(req, nodeid, iter, 0);
1696}
1697
1698static void do_setlkw(fuse_req_t req, fuse_ino_t nodeid,
1699                      struct fuse_mbuf_iter *iter)
1700{
1701    do_setlk_common(req, nodeid, iter, 1);
1702}
1703
1704static int find_interrupted(struct fuse_session *se, struct fuse_req *req)
1705{
1706    struct fuse_req *curr;
1707
1708    for (curr = se->list.next; curr != &se->list; curr = curr->next) {
1709        if (curr->unique == req->u.i.unique) {
1710            fuse_interrupt_func_t func;
1711            void *data;
1712
1713            curr->ctr++;
1714            pthread_mutex_unlock(&se->lock);
1715
1716            /* Ugh, ugly locking */
1717            pthread_mutex_lock(&curr->lock);
1718            pthread_mutex_lock(&se->lock);
1719            curr->interrupted = 1;
1720            func = curr->u.ni.func;
1721            data = curr->u.ni.data;
1722            pthread_mutex_unlock(&se->lock);
1723            if (func) {
1724                func(curr, data);
1725            }
1726            pthread_mutex_unlock(&curr->lock);
1727
1728            pthread_mutex_lock(&se->lock);
1729            curr->ctr--;
1730            if (!curr->ctr) {
1731                destroy_req(curr);
1732            }
1733
1734            return 1;
1735        }
1736    }
1737    for (curr = se->interrupts.next; curr != &se->interrupts;
1738         curr = curr->next) {
1739        if (curr->u.i.unique == req->u.i.unique) {
1740            return 1;
1741        }
1742    }
1743    return 0;
1744}
1745
1746static void do_interrupt(fuse_req_t req, fuse_ino_t nodeid,
1747                         struct fuse_mbuf_iter *iter)
1748{
1749    struct fuse_interrupt_in *arg;
1750    struct fuse_session *se = req->se;
1751
1752    (void)nodeid;
1753
1754    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1755    if (!arg) {
1756        fuse_reply_err(req, EINVAL);
1757        return;
1758    }
1759
1760    fuse_log(FUSE_LOG_DEBUG, "INTERRUPT: %llu\n",
1761             (unsigned long long)arg->unique);
1762
1763    req->u.i.unique = arg->unique;
1764
1765    pthread_mutex_lock(&se->lock);
1766    if (find_interrupted(se, req)) {
1767        destroy_req(req);
1768    } else {
1769        list_add_req(req, &se->interrupts);
1770    }
1771    pthread_mutex_unlock(&se->lock);
1772}
1773
1774static struct fuse_req *check_interrupt(struct fuse_session *se,
1775                                        struct fuse_req *req)
1776{
1777    struct fuse_req *curr;
1778
1779    for (curr = se->interrupts.next; curr != &se->interrupts;
1780         curr = curr->next) {
1781        if (curr->u.i.unique == req->unique) {
1782            req->interrupted = 1;
1783            list_del_req(curr);
1784            g_free(curr);
1785            return NULL;
1786        }
1787    }
1788    curr = se->interrupts.next;
1789    if (curr != &se->interrupts) {
1790        list_del_req(curr);
1791        list_init_req(curr);
1792        return curr;
1793    } else {
1794        return NULL;
1795    }
1796}
1797
1798static void do_bmap(fuse_req_t req, fuse_ino_t nodeid,
1799                    struct fuse_mbuf_iter *iter)
1800{
1801    struct fuse_bmap_in *arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1802
1803    if (!arg) {
1804        fuse_reply_err(req, EINVAL);
1805        return;
1806    }
1807
1808    if (req->se->op.bmap) {
1809        req->se->op.bmap(req, nodeid, arg->blocksize, arg->block);
1810    } else {
1811        fuse_reply_err(req, ENOSYS);
1812    }
1813}
1814
1815static void do_ioctl(fuse_req_t req, fuse_ino_t nodeid,
1816                     struct fuse_mbuf_iter *iter)
1817{
1818    struct fuse_ioctl_in *arg;
1819    unsigned int flags;
1820    void *in_buf = NULL;
1821    struct fuse_file_info fi;
1822
1823    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1824    if (!arg) {
1825        fuse_reply_err(req, EINVAL);
1826        return;
1827    }
1828
1829    flags = arg->flags;
1830    if (flags & FUSE_IOCTL_DIR && !(req->se->conn.want & FUSE_CAP_IOCTL_DIR)) {
1831        fuse_reply_err(req, ENOTTY);
1832        return;
1833    }
1834
1835    if (arg->in_size) {
1836        in_buf = fuse_mbuf_iter_advance(iter, arg->in_size);
1837        if (!in_buf) {
1838            fuse_reply_err(req, EINVAL);
1839            return;
1840        }
1841    }
1842
1843    memset(&fi, 0, sizeof(fi));
1844    fi.fh = arg->fh;
1845
1846    if (sizeof(void *) == 4 && !(flags & FUSE_IOCTL_32BIT)) {
1847        req->ioctl_64bit = 1;
1848    }
1849
1850    if (req->se->op.ioctl) {
1851        req->se->op.ioctl(req, nodeid, arg->cmd, (void *)(uintptr_t)arg->arg,
1852                          &fi, flags, in_buf, arg->in_size, arg->out_size);
1853    } else {
1854        fuse_reply_err(req, ENOSYS);
1855    }
1856}
1857
1858void fuse_pollhandle_destroy(struct fuse_pollhandle *ph)
1859{
1860    free(ph);
1861}
1862
1863static void do_poll(fuse_req_t req, fuse_ino_t nodeid,
1864                    struct fuse_mbuf_iter *iter)
1865{
1866    struct fuse_poll_in *arg;
1867    struct fuse_file_info fi;
1868
1869    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1870    if (!arg) {
1871        fuse_reply_err(req, EINVAL);
1872        return;
1873    }
1874
1875    memset(&fi, 0, sizeof(fi));
1876    fi.fh = arg->fh;
1877    fi.poll_events = arg->events;
1878
1879    if (req->se->op.poll) {
1880        struct fuse_pollhandle *ph = NULL;
1881
1882        if (arg->flags & FUSE_POLL_SCHEDULE_NOTIFY) {
1883            ph = malloc(sizeof(struct fuse_pollhandle));
1884            if (ph == NULL) {
1885                fuse_reply_err(req, ENOMEM);
1886                return;
1887            }
1888            ph->kh = arg->kh;
1889            ph->se = req->se;
1890        }
1891
1892        req->se->op.poll(req, nodeid, &fi, ph);
1893    } else {
1894        fuse_reply_err(req, ENOSYS);
1895    }
1896}
1897
1898static void do_fallocate(fuse_req_t req, fuse_ino_t nodeid,
1899                         struct fuse_mbuf_iter *iter)
1900{
1901    struct fuse_fallocate_in *arg;
1902    struct fuse_file_info fi;
1903
1904    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1905    if (!arg) {
1906        fuse_reply_err(req, EINVAL);
1907        return;
1908    }
1909
1910    memset(&fi, 0, sizeof(fi));
1911    fi.fh = arg->fh;
1912
1913    if (req->se->op.fallocate) {
1914        req->se->op.fallocate(req, nodeid, arg->mode, arg->offset, arg->length,
1915                              &fi);
1916    } else {
1917        fuse_reply_err(req, ENOSYS);
1918    }
1919}
1920
1921static void do_copy_file_range(fuse_req_t req, fuse_ino_t nodeid_in,
1922                               struct fuse_mbuf_iter *iter)
1923{
1924    struct fuse_copy_file_range_in *arg;
1925    struct fuse_file_info fi_in, fi_out;
1926
1927    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1928    if (!arg) {
1929        fuse_reply_err(req, EINVAL);
1930        return;
1931    }
1932
1933    memset(&fi_in, 0, sizeof(fi_in));
1934    fi_in.fh = arg->fh_in;
1935
1936    memset(&fi_out, 0, sizeof(fi_out));
1937    fi_out.fh = arg->fh_out;
1938
1939
1940    if (req->se->op.copy_file_range) {
1941        req->se->op.copy_file_range(req, nodeid_in, arg->off_in, &fi_in,
1942                                    arg->nodeid_out, arg->off_out, &fi_out,
1943                                    arg->len, arg->flags);
1944    } else {
1945        fuse_reply_err(req, ENOSYS);
1946    }
1947}
1948
1949static void do_lseek(fuse_req_t req, fuse_ino_t nodeid,
1950                     struct fuse_mbuf_iter *iter)
1951{
1952    struct fuse_lseek_in *arg;
1953    struct fuse_file_info fi;
1954
1955    arg = fuse_mbuf_iter_advance(iter, sizeof(*arg));
1956    if (!arg) {
1957        fuse_reply_err(req, EINVAL);
1958        return;
1959    }
1960    memset(&fi, 0, sizeof(fi));
1961    fi.fh = arg->fh;
1962
1963    if (req->se->op.lseek) {
1964        req->se->op.lseek(req, nodeid, arg->offset, arg->whence, &fi);
1965    } else {
1966        fuse_reply_err(req, ENOSYS);
1967    }
1968}
1969
1970static void do_syncfs(fuse_req_t req, fuse_ino_t nodeid,
1971                      struct fuse_mbuf_iter *iter)
1972{
1973    if (req->se->op.syncfs) {
1974        req->se->op.syncfs(req, nodeid);
1975    } else {
1976        fuse_reply_err(req, ENOSYS);
1977    }
1978}
1979
1980static void do_init(fuse_req_t req, fuse_ino_t nodeid,
1981                    struct fuse_mbuf_iter *iter)
1982{
1983    size_t compat_size = offsetof(struct fuse_init_in, max_readahead);
1984    size_t compat2_size = offsetof(struct fuse_init_in, flags) +
1985                              sizeof(uint32_t);
1986    /* Fuse structure extended with minor version 36 */
1987    size_t compat3_size = endof(struct fuse_init_in, unused);
1988    struct fuse_init_in *arg;
1989    struct fuse_init_out outarg;
1990    struct fuse_session *se = req->se;
1991    size_t bufsize = se->bufsize;
1992    size_t outargsize = sizeof(outarg);
1993    uint64_t flags = 0;
1994
1995    (void)nodeid;
1996
1997    /* First consume the old fields... */
1998    arg = fuse_mbuf_iter_advance(iter, compat_size);
1999    if (!arg) {
2000        fuse_reply_err(req, EINVAL);
2001        return;
2002    }
2003
2004    /* ...and now consume the new fields. */
2005    if (arg->major == 7 && arg->minor >= 6) {
2006        if (!fuse_mbuf_iter_advance(iter, compat2_size - compat_size)) {
2007            fuse_reply_err(req, EINVAL);
2008            return;
2009        }
2010        flags |= arg->flags;
2011    }
2012
2013    /*
2014     * fuse_init_in was extended again with minor version 36. Just read
2015     * current known size of fuse_init so that future extension and
2016     * header rebase does not cause breakage.
2017     */
2018    if (sizeof(*arg) > compat2_size && (arg->flags & FUSE_INIT_EXT)) {
2019        if (!fuse_mbuf_iter_advance(iter, compat3_size - compat2_size)) {
2020            fuse_reply_err(req, EINVAL);
2021            return;
2022        }
2023        flags |= (uint64_t) arg->flags2 << 32;
2024    }
2025
2026    fuse_log(FUSE_LOG_DEBUG, "INIT: %u.%u\n", arg->major, arg->minor);
2027    if (arg->major == 7 && arg->minor >= 6) {
2028        fuse_log(FUSE_LOG_DEBUG, "flags=0x%016llx\n", flags);
2029        fuse_log(FUSE_LOG_DEBUG, "max_readahead=0x%08x\n", arg->max_readahead);
2030    }
2031    se->conn.proto_major = arg->major;
2032    se->conn.proto_minor = arg->minor;
2033    se->conn.capable = 0;
2034    se->conn.want = 0;
2035
2036    memset(&outarg, 0, sizeof(outarg));
2037    outarg.major = FUSE_KERNEL_VERSION;
2038    outarg.minor = FUSE_KERNEL_MINOR_VERSION;
2039
2040    if (arg->major < 7 || (arg->major == 7 && arg->minor < 31)) {
2041        fuse_log(FUSE_LOG_ERR, "fuse: unsupported protocol version: %u.%u\n",
2042                 arg->major, arg->minor);
2043        fuse_reply_err(req, EPROTO);
2044        return;
2045    }
2046
2047    if (arg->major > 7) {
2048        /* Wait for a second INIT request with a 7.X version */
2049        send_reply_ok(req, &outarg, sizeof(outarg));
2050        return;
2051    }
2052
2053    if (arg->max_readahead < se->conn.max_readahead) {
2054        se->conn.max_readahead = arg->max_readahead;
2055    }
2056    if (flags & FUSE_ASYNC_READ) {
2057        se->conn.capable |= FUSE_CAP_ASYNC_READ;
2058    }
2059    if (flags & FUSE_POSIX_LOCKS) {
2060        se->conn.capable |= FUSE_CAP_POSIX_LOCKS;
2061    }
2062    if (flags & FUSE_ATOMIC_O_TRUNC) {
2063        se->conn.capable |= FUSE_CAP_ATOMIC_O_TRUNC;
2064    }
2065    if (flags & FUSE_EXPORT_SUPPORT) {
2066        se->conn.capable |= FUSE_CAP_EXPORT_SUPPORT;
2067    }
2068    if (flags & FUSE_DONT_MASK) {
2069        se->conn.capable |= FUSE_CAP_DONT_MASK;
2070    }
2071    if (flags & FUSE_FLOCK_LOCKS) {
2072        se->conn.capable |= FUSE_CAP_FLOCK_LOCKS;
2073    }
2074    if (flags & FUSE_AUTO_INVAL_DATA) {
2075        se->conn.capable |= FUSE_CAP_AUTO_INVAL_DATA;
2076    }
2077    if (flags & FUSE_DO_READDIRPLUS) {
2078        se->conn.capable |= FUSE_CAP_READDIRPLUS;
2079    }
2080    if (flags & FUSE_READDIRPLUS_AUTO) {
2081        se->conn.capable |= FUSE_CAP_READDIRPLUS_AUTO;
2082    }
2083    if (flags & FUSE_ASYNC_DIO) {
2084        se->conn.capable |= FUSE_CAP_ASYNC_DIO;
2085    }
2086    if (flags & FUSE_WRITEBACK_CACHE) {
2087        se->conn.capable |= FUSE_CAP_WRITEBACK_CACHE;
2088    }
2089    if (flags & FUSE_NO_OPEN_SUPPORT) {
2090        se->conn.capable |= FUSE_CAP_NO_OPEN_SUPPORT;
2091    }
2092    if (flags & FUSE_PARALLEL_DIROPS) {
2093        se->conn.capable |= FUSE_CAP_PARALLEL_DIROPS;
2094    }
2095    if (flags & FUSE_POSIX_ACL) {
2096        se->conn.capable |= FUSE_CAP_POSIX_ACL;
2097    }
2098    if (flags & FUSE_HANDLE_KILLPRIV) {
2099        se->conn.capable |= FUSE_CAP_HANDLE_KILLPRIV;
2100    }
2101    if (flags & FUSE_NO_OPENDIR_SUPPORT) {
2102        se->conn.capable |= FUSE_CAP_NO_OPENDIR_SUPPORT;
2103    }
2104    if (!(flags & FUSE_MAX_PAGES)) {
2105        size_t max_bufsize = FUSE_DEFAULT_MAX_PAGES_PER_REQ * getpagesize() +
2106                             FUSE_BUFFER_HEADER_SIZE;
2107        if (bufsize > max_bufsize) {
2108            bufsize = max_bufsize;
2109        }
2110    }
2111    if (flags & FUSE_SUBMOUNTS) {
2112        se->conn.capable |= FUSE_CAP_SUBMOUNTS;
2113    }
2114    if (flags & FUSE_HANDLE_KILLPRIV_V2) {
2115        se->conn.capable |= FUSE_CAP_HANDLE_KILLPRIV_V2;
2116    }
2117    if (flags & FUSE_SETXATTR_EXT) {
2118        se->conn.capable |= FUSE_CAP_SETXATTR_EXT;
2119    }
2120    if (flags & FUSE_SECURITY_CTX) {
2121        se->conn.capable |= FUSE_CAP_SECURITY_CTX;
2122    }
2123#ifdef HAVE_SPLICE
2124#ifdef HAVE_VMSPLICE
2125    se->conn.capable |= FUSE_CAP_SPLICE_WRITE | FUSE_CAP_SPLICE_MOVE;
2126#endif
2127    se->conn.capable |= FUSE_CAP_SPLICE_READ;
2128#endif
2129    se->conn.capable |= FUSE_CAP_IOCTL_DIR;
2130
2131    /*
2132     * Default settings for modern filesystems.
2133     *
2134     * Most of these capabilities were disabled by default in
2135     * libfuse2 for backwards compatibility reasons. In libfuse3,
2136     * we can finally enable them by default (as long as they're
2137     * supported by the kernel).
2138     */
2139#define LL_SET_DEFAULT(cond, cap)             \
2140    if ((cond) && (se->conn.capable & (cap))) \
2141        se->conn.want |= (cap)
2142    LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_READ);
2143    LL_SET_DEFAULT(1, FUSE_CAP_PARALLEL_DIROPS);
2144    LL_SET_DEFAULT(1, FUSE_CAP_AUTO_INVAL_DATA);
2145    LL_SET_DEFAULT(1, FUSE_CAP_HANDLE_KILLPRIV);
2146    LL_SET_DEFAULT(1, FUSE_CAP_ASYNC_DIO);
2147    LL_SET_DEFAULT(1, FUSE_CAP_IOCTL_DIR);
2148    LL_SET_DEFAULT(1, FUSE_CAP_ATOMIC_O_TRUNC);
2149    LL_SET_DEFAULT(se->op.write_buf, FUSE_CAP_SPLICE_READ);
2150    LL_SET_DEFAULT(se->op.getlk && se->op.setlk, FUSE_CAP_POSIX_LOCKS);
2151    LL_SET_DEFAULT(se->op.flock, FUSE_CAP_FLOCK_LOCKS);
2152    LL_SET_DEFAULT(se->op.readdirplus, FUSE_CAP_READDIRPLUS);
2153    LL_SET_DEFAULT(se->op.readdirplus && se->op.readdir,
2154                   FUSE_CAP_READDIRPLUS_AUTO);
2155    se->conn.time_gran = 1;
2156
2157    if (bufsize < FUSE_MIN_READ_BUFFER) {
2158        fuse_log(FUSE_LOG_ERR, "fuse: warning: buffer size too small: %zu\n",
2159                 bufsize);
2160        bufsize = FUSE_MIN_READ_BUFFER;
2161    }
2162    se->bufsize = bufsize;
2163
2164    if (se->conn.max_write > bufsize - FUSE_BUFFER_HEADER_SIZE) {
2165        se->conn.max_write = bufsize - FUSE_BUFFER_HEADER_SIZE;
2166    }
2167
2168    se->got_init = 1;
2169    se->got_destroy = 0;
2170    if (se->op.init) {
2171        se->op.init(se->userdata, &se->conn);
2172    }
2173
2174    if (se->conn.want & (~se->conn.capable)) {
2175        fuse_log(FUSE_LOG_ERR,
2176                 "fuse: error: filesystem requested capabilities "
2177                 "0x%llx that are not supported by kernel, aborting.\n",
2178                 se->conn.want & (~se->conn.capable));
2179        fuse_reply_err(req, EPROTO);
2180        se->error = -EPROTO;
2181        fuse_session_exit(se);
2182        return;
2183    }
2184
2185    if (se->conn.max_write < bufsize - FUSE_BUFFER_HEADER_SIZE) {
2186        se->bufsize = se->conn.max_write + FUSE_BUFFER_HEADER_SIZE;
2187    }
2188    if (flags & FUSE_MAX_PAGES) {
2189        outarg.flags |= FUSE_MAX_PAGES;
2190        outarg.max_pages = (se->conn.max_write - 1) / getpagesize() + 1;
2191    }
2192
2193    /*
2194     * Always enable big writes, this is superseded
2195     * by the max_write option
2196     */
2197    outarg.flags |= FUSE_BIG_WRITES;
2198
2199    if (se->conn.want & FUSE_CAP_ASYNC_READ) {
2200        outarg.flags |= FUSE_ASYNC_READ;
2201    }
2202    if (se->conn.want & FUSE_CAP_PARALLEL_DIROPS) {
2203        outarg.flags |= FUSE_PARALLEL_DIROPS;
2204    }
2205    if (se->conn.want & FUSE_CAP_POSIX_LOCKS) {
2206        outarg.flags |= FUSE_POSIX_LOCKS;
2207    }
2208    if (se->conn.want & FUSE_CAP_ATOMIC_O_TRUNC) {
2209        outarg.flags |= FUSE_ATOMIC_O_TRUNC;
2210    }
2211    if (se->conn.want & FUSE_CAP_EXPORT_SUPPORT) {
2212        outarg.flags |= FUSE_EXPORT_SUPPORT;
2213    }
2214    if (se->conn.want & FUSE_CAP_DONT_MASK) {
2215        outarg.flags |= FUSE_DONT_MASK;
2216    }
2217    if (se->conn.want & FUSE_CAP_FLOCK_LOCKS) {
2218        outarg.flags |= FUSE_FLOCK_LOCKS;
2219    }
2220    if (se->conn.want & FUSE_CAP_AUTO_INVAL_DATA) {
2221        outarg.flags |= FUSE_AUTO_INVAL_DATA;
2222    }
2223    if (se->conn.want & FUSE_CAP_READDIRPLUS) {
2224        outarg.flags |= FUSE_DO_READDIRPLUS;
2225    }
2226    if (se->conn.want & FUSE_CAP_READDIRPLUS_AUTO) {
2227        outarg.flags |= FUSE_READDIRPLUS_AUTO;
2228    }
2229    if (se->conn.want & FUSE_CAP_ASYNC_DIO) {
2230        outarg.flags |= FUSE_ASYNC_DIO;
2231    }
2232    if (se->conn.want & FUSE_CAP_WRITEBACK_CACHE) {
2233        outarg.flags |= FUSE_WRITEBACK_CACHE;
2234    }
2235    if (se->conn.want & FUSE_CAP_POSIX_ACL) {
2236        outarg.flags |= FUSE_POSIX_ACL;
2237    }
2238    outarg.max_readahead = se->conn.max_readahead;
2239    outarg.max_write = se->conn.max_write;
2240    if (se->conn.max_background >= (1 << 16)) {
2241        se->conn.max_background = (1 << 16) - 1;
2242    }
2243    if (se->conn.congestion_threshold > se->conn.max_background) {
2244        se->conn.congestion_threshold = se->conn.max_background;
2245    }
2246    if (!se->conn.congestion_threshold) {
2247        se->conn.congestion_threshold = se->conn.max_background * 3 / 4;
2248    }
2249
2250    outarg.max_background = se->conn.max_background;
2251    outarg.congestion_threshold = se->conn.congestion_threshold;
2252    outarg.time_gran = se->conn.time_gran;
2253
2254    if (se->conn.want & FUSE_CAP_HANDLE_KILLPRIV_V2) {
2255        outarg.flags |= FUSE_HANDLE_KILLPRIV_V2;
2256    }
2257
2258    if (se->conn.want & FUSE_CAP_SETXATTR_EXT) {
2259        outarg.flags |= FUSE_SETXATTR_EXT;
2260    }
2261
2262    if (se->conn.want & FUSE_CAP_SECURITY_CTX) {
2263        /* bits 32..63 get shifted down 32 bits into the flags2 field */
2264        outarg.flags2 |= FUSE_SECURITY_CTX >> 32;
2265    }
2266
2267    fuse_log(FUSE_LOG_DEBUG, "   INIT: %u.%u\n", outarg.major, outarg.minor);
2268    fuse_log(FUSE_LOG_DEBUG, "   flags2=0x%08x flags=0x%08x\n", outarg.flags2,
2269             outarg.flags);
2270    fuse_log(FUSE_LOG_DEBUG, "   max_readahead=0x%08x\n", outarg.max_readahead);
2271    fuse_log(FUSE_LOG_DEBUG, "   max_write=0x%08x\n", outarg.max_write);
2272    fuse_log(FUSE_LOG_DEBUG, "   max_background=%i\n", outarg.max_background);
2273    fuse_log(FUSE_LOG_DEBUG, "   congestion_threshold=%i\n",
2274             outarg.congestion_threshold);
2275    fuse_log(FUSE_LOG_DEBUG, "   time_gran=%u\n", outarg.time_gran);
2276
2277    send_reply_ok(req, &outarg, outargsize);
2278}
2279
2280static void do_destroy(fuse_req_t req, fuse_ino_t nodeid,
2281                       struct fuse_mbuf_iter *iter)
2282{
2283    struct fuse_session *se = req->se;
2284
2285    (void)nodeid;
2286    (void)iter;
2287
2288    se->got_destroy = 1;
2289    se->got_init = 0;
2290    if (se->op.destroy) {
2291        se->op.destroy(se->userdata);
2292    }
2293
2294    send_reply_ok(req, NULL, 0);
2295}
2296
2297int fuse_lowlevel_notify_store(struct fuse_session *se, fuse_ino_t ino,
2298                               off_t offset, struct fuse_bufvec *bufv)
2299{
2300    struct fuse_out_header out = {
2301        .error = FUSE_NOTIFY_STORE,
2302    };
2303    struct fuse_notify_store_out outarg = {
2304        .nodeid = ino,
2305        .offset = offset,
2306        .size = fuse_buf_size(bufv),
2307    };
2308    struct iovec iov[3];
2309    int res;
2310
2311    if (!se) {
2312        return -EINVAL;
2313    }
2314
2315    iov[0].iov_base = &out;
2316    iov[0].iov_len = sizeof(out);
2317    iov[1].iov_base = &outarg;
2318    iov[1].iov_len = sizeof(outarg);
2319
2320    res = fuse_send_data_iov(se, NULL, iov, 2, bufv);
2321    if (res > 0) {
2322        res = -res;
2323    }
2324
2325    return res;
2326}
2327
2328void *fuse_req_userdata(fuse_req_t req)
2329{
2330    return req->se->userdata;
2331}
2332
2333const struct fuse_ctx *fuse_req_ctx(fuse_req_t req)
2334{
2335    return &req->ctx;
2336}
2337
2338void fuse_req_interrupt_func(fuse_req_t req, fuse_interrupt_func_t func,
2339                             void *data)
2340{
2341    pthread_mutex_lock(&req->lock);
2342    pthread_mutex_lock(&req->se->lock);
2343    req->u.ni.func = func;
2344    req->u.ni.data = data;
2345    pthread_mutex_unlock(&req->se->lock);
2346    if (req->interrupted && func) {
2347        func(req, data);
2348    }
2349    pthread_mutex_unlock(&req->lock);
2350}
2351
2352int fuse_req_interrupted(fuse_req_t req)
2353{
2354    int interrupted;
2355
2356    pthread_mutex_lock(&req->se->lock);
2357    interrupted = req->interrupted;
2358    pthread_mutex_unlock(&req->se->lock);
2359
2360    return interrupted;
2361}
2362
2363static struct {
2364    void (*func)(fuse_req_t, fuse_ino_t, struct fuse_mbuf_iter *);
2365    const char *name;
2366} fuse_ll_ops[] = {
2367    [FUSE_LOOKUP] = { do_lookup, "LOOKUP" },
2368    [FUSE_FORGET] = { do_forget, "FORGET" },
2369    [FUSE_GETATTR] = { do_getattr, "GETATTR" },
2370    [FUSE_SETATTR] = { do_setattr, "SETATTR" },
2371    [FUSE_READLINK] = { do_readlink, "READLINK" },
2372    [FUSE_SYMLINK] = { do_symlink, "SYMLINK" },
2373    [FUSE_MKNOD] = { do_mknod, "MKNOD" },
2374    [FUSE_MKDIR] = { do_mkdir, "MKDIR" },
2375    [FUSE_UNLINK] = { do_unlink, "UNLINK" },
2376    [FUSE_RMDIR] = { do_rmdir, "RMDIR" },
2377    [FUSE_RENAME] = { do_rename, "RENAME" },
2378    [FUSE_LINK] = { do_link, "LINK" },
2379    [FUSE_OPEN] = { do_open, "OPEN" },
2380    [FUSE_READ] = { do_read, "READ" },
2381    [FUSE_WRITE] = { do_write, "WRITE" },
2382    [FUSE_STATFS] = { do_statfs, "STATFS" },
2383    [FUSE_RELEASE] = { do_release, "RELEASE" },
2384    [FUSE_FSYNC] = { do_fsync, "FSYNC" },
2385    [FUSE_SETXATTR] = { do_setxattr, "SETXATTR" },
2386    [FUSE_GETXATTR] = { do_getxattr, "GETXATTR" },
2387    [FUSE_LISTXATTR] = { do_listxattr, "LISTXATTR" },
2388    [FUSE_REMOVEXATTR] = { do_removexattr, "REMOVEXATTR" },
2389    [FUSE_FLUSH] = { do_flush, "FLUSH" },
2390    [FUSE_INIT] = { do_init, "INIT" },
2391    [FUSE_OPENDIR] = { do_opendir, "OPENDIR" },
2392    [FUSE_READDIR] = { do_readdir, "READDIR" },
2393    [FUSE_RELEASEDIR] = { do_releasedir, "RELEASEDIR" },
2394    [FUSE_FSYNCDIR] = { do_fsyncdir, "FSYNCDIR" },
2395    [FUSE_GETLK] = { do_getlk, "GETLK" },
2396    [FUSE_SETLK] = { do_setlk, "SETLK" },
2397    [FUSE_SETLKW] = { do_setlkw, "SETLKW" },
2398    [FUSE_ACCESS] = { do_access, "ACCESS" },
2399    [FUSE_CREATE] = { do_create, "CREATE" },
2400    [FUSE_INTERRUPT] = { do_interrupt, "INTERRUPT" },
2401    [FUSE_BMAP] = { do_bmap, "BMAP" },
2402    [FUSE_IOCTL] = { do_ioctl, "IOCTL" },
2403    [FUSE_POLL] = { do_poll, "POLL" },
2404    [FUSE_FALLOCATE] = { do_fallocate, "FALLOCATE" },
2405    [FUSE_DESTROY] = { do_destroy, "DESTROY" },
2406    [FUSE_NOTIFY_REPLY] = { NULL, "NOTIFY_REPLY" },
2407    [FUSE_BATCH_FORGET] = { do_batch_forget, "BATCH_FORGET" },
2408    [FUSE_READDIRPLUS] = { do_readdirplus, "READDIRPLUS" },
2409    [FUSE_RENAME2] = { do_rename2, "RENAME2" },
2410    [FUSE_COPY_FILE_RANGE] = { do_copy_file_range, "COPY_FILE_RANGE" },
2411    [FUSE_LSEEK] = { do_lseek, "LSEEK" },
2412    [FUSE_SYNCFS] = { do_syncfs, "SYNCFS" },
2413};
2414
2415#define FUSE_MAXOP (sizeof(fuse_ll_ops) / sizeof(fuse_ll_ops[0]))
2416
2417static const char *opname(enum fuse_opcode opcode)
2418{
2419    if (opcode >= FUSE_MAXOP || !fuse_ll_ops[opcode].name) {
2420        return "???";
2421    } else {
2422        return fuse_ll_ops[opcode].name;
2423    }
2424}
2425
2426void fuse_session_process_buf(struct fuse_session *se,
2427                              const struct fuse_buf *buf)
2428{
2429    struct fuse_bufvec bufv = { .buf[0] = *buf, .count = 1 };
2430    fuse_session_process_buf_int(se, &bufv, NULL);
2431}
2432
2433/*
2434 * Restriction:
2435 *   bufv is normally a single entry buffer, except for a write
2436 *   where (if it's in memory) then the bufv may be multiple entries,
2437 *   where the first entry contains all headers and subsequent entries
2438 *   contain data
2439 *   bufv shall not use any offsets etc to make the data anything
2440 *   other than contiguous starting from 0.
2441 */
2442void fuse_session_process_buf_int(struct fuse_session *se,
2443                                  struct fuse_bufvec *bufv,
2444                                  struct fuse_chan *ch)
2445{
2446    const struct fuse_buf *buf = bufv->buf;
2447    struct fuse_mbuf_iter iter = FUSE_MBUF_ITER_INIT(buf);
2448    struct fuse_in_header *in;
2449    struct fuse_req *req;
2450    int err;
2451
2452    /* The first buffer must be a memory buffer */
2453    assert(!(buf->flags & FUSE_BUF_IS_FD));
2454
2455    in = fuse_mbuf_iter_advance(&iter, sizeof(*in));
2456    assert(in); /* caller guarantees the input buffer is large enough */
2457
2458    fuse_log(
2459        FUSE_LOG_DEBUG,
2460        "unique: %llu, opcode: %s (%i), nodeid: %llu, insize: %zu, pid: %u\n",
2461        (unsigned long long)in->unique, opname((enum fuse_opcode)in->opcode),
2462        in->opcode, (unsigned long long)in->nodeid, buf->size, in->pid);
2463
2464    req = fuse_ll_alloc_req(se);
2465    if (req == NULL) {
2466        struct fuse_out_header out = {
2467            .unique = in->unique,
2468            .error = -ENOMEM,
2469        };
2470        struct iovec iov = {
2471            .iov_base = &out,
2472            .iov_len = sizeof(struct fuse_out_header),
2473        };
2474
2475        fuse_send_msg(se, ch, &iov, 1);
2476        return;
2477    }
2478
2479    req->unique = in->unique;
2480    req->ctx.uid = in->uid;
2481    req->ctx.gid = in->gid;
2482    req->ctx.pid = in->pid;
2483    req->ch = ch;
2484
2485    /*
2486     * INIT and DESTROY requests are serialized, all other request types
2487     * run in parallel.  This prevents races between FUSE_INIT and ordinary
2488     * requests, FUSE_INIT and FUSE_INIT, FUSE_INIT and FUSE_DESTROY, and
2489     * FUSE_DESTROY and FUSE_DESTROY.
2490     */
2491    if (in->opcode == FUSE_INIT || in->opcode == CUSE_INIT ||
2492        in->opcode == FUSE_DESTROY) {
2493        pthread_rwlock_wrlock(&se->init_rwlock);
2494    } else {
2495        pthread_rwlock_rdlock(&se->init_rwlock);
2496    }
2497
2498    err = EIO;
2499    if (!se->got_init) {
2500        enum fuse_opcode expected;
2501
2502        expected = se->cuse_data ? CUSE_INIT : FUSE_INIT;
2503        if (in->opcode != expected) {
2504            goto reply_err;
2505        }
2506    } else if (in->opcode == FUSE_INIT || in->opcode == CUSE_INIT) {
2507        if (fuse_lowlevel_is_virtio(se)) {
2508            /*
2509             * TODO: This is after a hard reboot typically, we need to do
2510             * a destroy, but we can't reply to this request yet so
2511             * we can't use do_destroy
2512             */
2513            fuse_log(FUSE_LOG_DEBUG, "%s: reinit\n", __func__);
2514            se->got_destroy = 1;
2515            se->got_init = 0;
2516            if (se->op.destroy) {
2517                se->op.destroy(se->userdata);
2518            }
2519        } else {
2520            goto reply_err;
2521        }
2522    }
2523
2524    err = EACCES;
2525    /* Implement -o allow_root */
2526    if (se->deny_others && in->uid != se->owner && in->uid != 0 &&
2527        in->opcode != FUSE_INIT && in->opcode != FUSE_READ &&
2528        in->opcode != FUSE_WRITE && in->opcode != FUSE_FSYNC &&
2529        in->opcode != FUSE_RELEASE && in->opcode != FUSE_READDIR &&
2530        in->opcode != FUSE_FSYNCDIR && in->opcode != FUSE_RELEASEDIR &&
2531        in->opcode != FUSE_NOTIFY_REPLY && in->opcode != FUSE_READDIRPLUS) {
2532        goto reply_err;
2533    }
2534
2535    err = ENOSYS;
2536    if (in->opcode >= FUSE_MAXOP || !fuse_ll_ops[in->opcode].func) {
2537        goto reply_err;
2538    }
2539    if (in->opcode != FUSE_INTERRUPT) {
2540        struct fuse_req *intr;
2541        pthread_mutex_lock(&se->lock);
2542        intr = check_interrupt(se, req);
2543        list_add_req(req, &se->list);
2544        pthread_mutex_unlock(&se->lock);
2545        if (intr) {
2546            fuse_reply_err(intr, EAGAIN);
2547        }
2548    }
2549
2550    if (in->opcode == FUSE_WRITE && se->op.write_buf) {
2551        do_write_buf(req, in->nodeid, &iter, bufv);
2552    } else {
2553        fuse_ll_ops[in->opcode].func(req, in->nodeid, &iter);
2554    }
2555
2556    pthread_rwlock_unlock(&se->init_rwlock);
2557    return;
2558
2559reply_err:
2560    fuse_reply_err(req, err);
2561    pthread_rwlock_unlock(&se->init_rwlock);
2562}
2563
2564#define LL_OPTION(n, o, v)                     \
2565    {                                          \
2566        n, offsetof(struct fuse_session, o), v \
2567    }
2568
2569static const struct fuse_opt fuse_ll_opts[] = {
2570    LL_OPTION("debug", debug, 1),
2571    LL_OPTION("-d", debug, 1),
2572    LL_OPTION("--debug", debug, 1),
2573    LL_OPTION("allow_root", deny_others, 1),
2574    LL_OPTION("--socket-path=%s", vu_socket_path, 0),
2575    LL_OPTION("--socket-group=%s", vu_socket_group, 0),
2576    LL_OPTION("--fd=%d", vu_listen_fd, 0),
2577    LL_OPTION("--thread-pool-size=%d", thread_pool_size, 0),
2578    FUSE_OPT_END
2579};
2580
2581void fuse_lowlevel_version(void)
2582{
2583    printf("using FUSE kernel interface version %i.%i\n", FUSE_KERNEL_VERSION,
2584           FUSE_KERNEL_MINOR_VERSION);
2585}
2586
2587void fuse_lowlevel_help(void)
2588{
2589    /*
2590     * These are not all options, but the ones that are
2591     * potentially of interest to an end-user
2592     */
2593    printf(
2594        "    -o allow_root              allow access by root\n"
2595        "    --socket-path=PATH         path for the vhost-user socket\n"
2596        "    --socket-group=GRNAME      name of group for the vhost-user socket\n"
2597        "    --fd=FDNUM                 fd number of vhost-user socket\n"
2598        "    --thread-pool-size=NUM     thread pool size limit (default %d)\n",
2599        THREAD_POOL_SIZE);
2600}
2601
2602void fuse_session_destroy(struct fuse_session *se)
2603{
2604    if (se->got_init && !se->got_destroy) {
2605        if (se->op.destroy) {
2606            se->op.destroy(se->userdata);
2607        }
2608    }
2609    pthread_rwlock_destroy(&se->init_rwlock);
2610    pthread_mutex_destroy(&se->lock);
2611    free(se->cuse_data);
2612    if (se->fd != -1) {
2613        close(se->fd);
2614    }
2615
2616    if (fuse_lowlevel_is_virtio(se)) {
2617        virtio_session_close(se);
2618    }
2619
2620    free(se->vu_socket_path);
2621    se->vu_socket_path = NULL;
2622
2623    g_free(se);
2624}
2625
2626
2627struct fuse_session *fuse_session_new(struct fuse_args *args,
2628                                      const struct fuse_lowlevel_ops *op,
2629                                      size_t op_size, void *userdata)
2630{
2631    struct fuse_session *se;
2632
2633    if (sizeof(struct fuse_lowlevel_ops) < op_size) {
2634        fuse_log(
2635            FUSE_LOG_ERR,
2636            "fuse: warning: library too old, some operations may not work\n");
2637        op_size = sizeof(struct fuse_lowlevel_ops);
2638    }
2639
2640    if (args->argc == 0) {
2641        fuse_log(FUSE_LOG_ERR,
2642                 "fuse: empty argv passed to fuse_session_new().\n");
2643        return NULL;
2644    }
2645
2646    se = g_try_new0(struct fuse_session, 1);
2647    if (se == NULL) {
2648        fuse_log(FUSE_LOG_ERR, "fuse: failed to allocate fuse object\n");
2649        goto out1;
2650    }
2651    se->fd = -1;
2652    se->vu_listen_fd = -1;
2653    se->thread_pool_size = THREAD_POOL_SIZE;
2654    se->conn.max_write = UINT_MAX;
2655    se->conn.max_readahead = UINT_MAX;
2656
2657    /* Parse options */
2658    if (fuse_opt_parse(args, se, fuse_ll_opts, NULL) == -1) {
2659        goto out2;
2660    }
2661    if (args->argc == 1 && args->argv[0][0] == '-') {
2662        fuse_log(FUSE_LOG_ERR,
2663                 "fuse: warning: argv[0] looks like an option, but "
2664                 "will be ignored\n");
2665    } else if (args->argc != 1) {
2666        int i;
2667        fuse_log(FUSE_LOG_ERR, "fuse: unknown option(s): `");
2668        for (i = 1; i < args->argc - 1; i++) {
2669            fuse_log(FUSE_LOG_ERR, "%s ", args->argv[i]);
2670        }
2671        fuse_log(FUSE_LOG_ERR, "%s'\n", args->argv[i]);
2672        goto out4;
2673    }
2674
2675    if (!se->vu_socket_path && se->vu_listen_fd < 0) {
2676        fuse_log(FUSE_LOG_ERR, "fuse: missing --socket-path or --fd option\n");
2677        goto out4;
2678    }
2679    if (se->vu_socket_path && se->vu_listen_fd >= 0) {
2680        fuse_log(FUSE_LOG_ERR,
2681                 "fuse: --socket-path and --fd cannot be given together\n");
2682        goto out4;
2683    }
2684    if (se->vu_socket_group && !se->vu_socket_path) {
2685        fuse_log(FUSE_LOG_ERR,
2686                 "fuse: --socket-group can only be used with --socket-path\n");
2687        goto out4;
2688    }
2689
2690    se->bufsize = FUSE_MAX_MAX_PAGES * getpagesize() + FUSE_BUFFER_HEADER_SIZE;
2691
2692    list_init_req(&se->list);
2693    list_init_req(&se->interrupts);
2694    fuse_mutex_init(&se->lock);
2695    pthread_rwlock_init(&se->init_rwlock, NULL);
2696
2697    memcpy(&se->op, op, op_size);
2698    se->owner = getuid();
2699    se->userdata = userdata;
2700
2701    return se;
2702
2703out4:
2704    fuse_opt_free_args(args);
2705out2:
2706    g_free(se);
2707out1:
2708    return NULL;
2709}
2710
2711int fuse_session_mount(struct fuse_session *se)
2712{
2713    return virtio_session_mount(se);
2714}
2715
2716int fuse_session_fd(struct fuse_session *se)
2717{
2718    return se->fd;
2719}
2720
2721void fuse_session_unmount(struct fuse_session *se)
2722{
2723}
2724
2725int fuse_lowlevel_is_virtio(struct fuse_session *se)
2726{
2727    return !!se->virtio_dev;
2728}
2729
2730void fuse_session_exit(struct fuse_session *se)
2731{
2732    se->exited = 1;
2733}
2734
2735void fuse_session_reset(struct fuse_session *se)
2736{
2737    se->exited = 0;
2738    se->error = 0;
2739}
2740
2741int fuse_session_exited(struct fuse_session *se)
2742{
2743    return se->exited;
2744}
2745