linux/ipc/mqueue.c
<<
>>
Prefs
   1/*
   2 * POSIX message queues filesystem for Linux.
   3 *
   4 * Copyright (C) 2003,2004  Krzysztof Benedyczak    (golbi@mat.uni.torun.pl)
   5 *                          Michal Wronski          (michal.wronski@gmail.com)
   6 *
   7 * Spinlocks:               Mohamed Abbas           (abbas.mohamed@intel.com)
   8 * Lockless receive & send, fd based notify:
   9 *                          Manfred Spraul          (manfred@colorfullife.com)
  10 *
  11 * Audit:                   George Wilson           (ltcgcw@us.ibm.com)
  12 *
  13 * This file is released under the GPL.
  14 */
  15
  16#include <linux/capability.h>
  17#include <linux/init.h>
  18#include <linux/pagemap.h>
  19#include <linux/file.h>
  20#include <linux/mount.h>
  21#include <linux/namei.h>
  22#include <linux/sysctl.h>
  23#include <linux/poll.h>
  24#include <linux/mqueue.h>
  25#include <linux/msg.h>
  26#include <linux/skbuff.h>
  27#include <linux/vmalloc.h>
  28#include <linux/netlink.h>
  29#include <linux/syscalls.h>
  30#include <linux/audit.h>
  31#include <linux/signal.h>
  32#include <linux/mutex.h>
  33#include <linux/nsproxy.h>
  34#include <linux/pid.h>
  35#include <linux/ipc_namespace.h>
  36#include <linux/user_namespace.h>
  37#include <linux/slab.h>
  38
  39#include <net/sock.h>
  40#include "util.h"
  41
  42#define MQUEUE_MAGIC    0x19800202
  43#define DIRENT_SIZE     20
  44#define FILENT_SIZE     80
  45
  46#define SEND            0
  47#define RECV            1
  48
  49#define STATE_NONE      0
  50#define STATE_READY     1
  51
  52struct posix_msg_tree_node {
  53        struct rb_node          rb_node;
  54        struct list_head        msg_list;
  55        int                     priority;
  56};
  57
  58struct ext_wait_queue {         /* queue of sleeping tasks */
  59        struct task_struct *task;
  60        struct list_head list;
  61        struct msg_msg *msg;    /* ptr of loaded message */
  62        int state;              /* one of STATE_* values */
  63};
  64
  65struct mqueue_inode_info {
  66        spinlock_t lock;
  67        struct inode vfs_inode;
  68        wait_queue_head_t wait_q;
  69
  70        struct rb_root msg_tree;
  71        struct posix_msg_tree_node *node_cache;
  72        struct mq_attr attr;
  73
  74        struct sigevent notify;
  75        struct pid *notify_owner;
  76        struct user_namespace *notify_user_ns;
  77        struct user_struct *user;       /* user who created, for accounting */
  78        struct sock *notify_sock;
  79        struct sk_buff *notify_cookie;
  80
  81        /* for tasks waiting for free space and messages, respectively */
  82        struct ext_wait_queue e_wait_q[2];
  83
  84        unsigned long qsize; /* size of queue in memory (sum of all msgs) */
  85};
  86
  87static const struct inode_operations mqueue_dir_inode_operations;
  88static const struct file_operations mqueue_file_operations;
  89static const struct super_operations mqueue_super_ops;
  90static void remove_notification(struct mqueue_inode_info *info);
  91
  92static struct kmem_cache *mqueue_inode_cachep;
  93
  94static struct ctl_table_header *mq_sysctl_table;
  95
  96static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
  97{
  98        return container_of(inode, struct mqueue_inode_info, vfs_inode);
  99}
 100
 101/*
 102 * This routine should be called with the mq_lock held.
 103 */
 104static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
 105{
 106        return get_ipc_ns(inode->i_sb->s_fs_info);
 107}
 108
 109static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
 110{
 111        struct ipc_namespace *ns;
 112
 113        spin_lock(&mq_lock);
 114        ns = __get_ns_from_inode(inode);
 115        spin_unlock(&mq_lock);
 116        return ns;
 117}
 118
 119/* Auxiliary functions to manipulate messages' list */
 120static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
 121{
 122        struct rb_node **p, *parent = NULL;
 123        struct posix_msg_tree_node *leaf;
 124
 125        p = &info->msg_tree.rb_node;
 126        while (*p) {
 127                parent = *p;
 128                leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
 129
 130                if (likely(leaf->priority == msg->m_type))
 131                        goto insert_msg;
 132                else if (msg->m_type < leaf->priority)
 133                        p = &(*p)->rb_left;
 134                else
 135                        p = &(*p)->rb_right;
 136        }
 137        if (info->node_cache) {
 138                leaf = info->node_cache;
 139                info->node_cache = NULL;
 140        } else {
 141                leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
 142                if (!leaf)
 143                        return -ENOMEM;
 144                INIT_LIST_HEAD(&leaf->msg_list);
 145        }
 146        leaf->priority = msg->m_type;
 147        rb_link_node(&leaf->rb_node, parent, p);
 148        rb_insert_color(&leaf->rb_node, &info->msg_tree);
 149insert_msg:
 150        info->attr.mq_curmsgs++;
 151        info->qsize += msg->m_ts;
 152        list_add_tail(&msg->m_list, &leaf->msg_list);
 153        return 0;
 154}
 155
 156static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
 157{
 158        struct rb_node **p, *parent = NULL;
 159        struct posix_msg_tree_node *leaf;
 160        struct msg_msg *msg;
 161
 162try_again:
 163        p = &info->msg_tree.rb_node;
 164        while (*p) {
 165                parent = *p;
 166                /*
 167                 * During insert, low priorities go to the left and high to the
 168                 * right.  On receive, we want the highest priorities first, so
 169                 * walk all the way to the right.
 170                 */
 171                p = &(*p)->rb_right;
 172        }
 173        if (!parent) {
 174                if (info->attr.mq_curmsgs) {
 175                        pr_warn_once("Inconsistency in POSIX message queue, "
 176                                     "no tree element, but supposedly messages "
 177                                     "should exist!\n");
 178                        info->attr.mq_curmsgs = 0;
 179                }
 180                return NULL;
 181        }
 182        leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
 183        if (unlikely(list_empty(&leaf->msg_list))) {
 184                pr_warn_once("Inconsistency in POSIX message queue, "
 185                             "empty leaf node but we haven't implemented "
 186                             "lazy leaf delete!\n");
 187                rb_erase(&leaf->rb_node, &info->msg_tree);
 188                if (info->node_cache) {
 189                        kfree(leaf);
 190                } else {
 191                        info->node_cache = leaf;
 192                }
 193                goto try_again;
 194        } else {
 195                msg = list_first_entry(&leaf->msg_list,
 196                                       struct msg_msg, m_list);
 197                list_del(&msg->m_list);
 198                if (list_empty(&leaf->msg_list)) {
 199                        rb_erase(&leaf->rb_node, &info->msg_tree);
 200                        if (info->node_cache) {
 201                                kfree(leaf);
 202                        } else {
 203                                info->node_cache = leaf;
 204                        }
 205                }
 206        }
 207        info->attr.mq_curmsgs--;
 208        info->qsize -= msg->m_ts;
 209        return msg;
 210}
 211
 212static struct inode *mqueue_get_inode(struct super_block *sb,
 213                struct ipc_namespace *ipc_ns, umode_t mode,
 214                struct mq_attr *attr)
 215{
 216        struct user_struct *u = current_user();
 217        struct inode *inode;
 218        int ret = -ENOMEM;
 219
 220        inode = new_inode(sb);
 221        if (!inode)
 222                goto err;
 223
 224        inode->i_ino = get_next_ino();
 225        inode->i_mode = mode;
 226        inode->i_uid = current_fsuid();
 227        inode->i_gid = current_fsgid();
 228        inode->i_mtime = inode->i_ctime = inode->i_atime = CURRENT_TIME;
 229
 230        if (S_ISREG(mode)) {
 231                struct mqueue_inode_info *info;
 232                unsigned long mq_bytes, mq_treesize;
 233
 234                inode->i_fop = &mqueue_file_operations;
 235                inode->i_size = FILENT_SIZE;
 236                /* mqueue specific info */
 237                info = MQUEUE_I(inode);
 238                spin_lock_init(&info->lock);
 239                init_waitqueue_head(&info->wait_q);
 240                INIT_LIST_HEAD(&info->e_wait_q[0].list);
 241                INIT_LIST_HEAD(&info->e_wait_q[1].list);
 242                info->notify_owner = NULL;
 243                info->notify_user_ns = NULL;
 244                info->qsize = 0;
 245                info->user = NULL;      /* set when all is ok */
 246                info->msg_tree = RB_ROOT;
 247                info->node_cache = NULL;
 248                memset(&info->attr, 0, sizeof(info->attr));
 249                info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
 250                                           ipc_ns->mq_msg_default);
 251                info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
 252                                            ipc_ns->mq_msgsize_default);
 253                if (attr) {
 254                        info->attr.mq_maxmsg = attr->mq_maxmsg;
 255                        info->attr.mq_msgsize = attr->mq_msgsize;
 256                }
 257                /*
 258                 * We used to allocate a static array of pointers and account
 259                 * the size of that array as well as one msg_msg struct per
 260                 * possible message into the queue size. That's no longer
 261                 * accurate as the queue is now an rbtree and will grow and
 262                 * shrink depending on usage patterns.  We can, however, still
 263                 * account one msg_msg struct per message, but the nodes are
 264                 * allocated depending on priority usage, and most programs
 265                 * only use one, or a handful, of priorities.  However, since
 266                 * this is pinned memory, we need to assume worst case, so
 267                 * that means the min(mq_maxmsg, max_priorities) * struct
 268                 * posix_msg_tree_node.
 269                 */
 270                mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
 271                        min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
 272                        sizeof(struct posix_msg_tree_node);
 273
 274                mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
 275                                          info->attr.mq_msgsize);
 276
 277                spin_lock(&mq_lock);
 278                if (u->mq_bytes + mq_bytes < u->mq_bytes ||
 279                    u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
 280                        spin_unlock(&mq_lock);
 281                        /* mqueue_evict_inode() releases info->messages */
 282                        ret = -EMFILE;
 283                        goto out_inode;
 284                }
 285                u->mq_bytes += mq_bytes;
 286                spin_unlock(&mq_lock);
 287
 288                /* all is ok */
 289                info->user = get_uid(u);
 290        } else if (S_ISDIR(mode)) {
 291                inc_nlink(inode);
 292                /* Some things misbehave if size == 0 on a directory */
 293                inode->i_size = 2 * DIRENT_SIZE;
 294                inode->i_op = &mqueue_dir_inode_operations;
 295                inode->i_fop = &simple_dir_operations;
 296        }
 297
 298        return inode;
 299out_inode:
 300        iput(inode);
 301err:
 302        return ERR_PTR(ret);
 303}
 304
 305static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
 306{
 307        struct inode *inode;
 308        struct ipc_namespace *ns = data;
 309
 310        sb->s_blocksize = PAGE_SIZE;
 311        sb->s_blocksize_bits = PAGE_SHIFT;
 312        sb->s_magic = MQUEUE_MAGIC;
 313        sb->s_op = &mqueue_super_ops;
 314
 315        inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
 316        if (IS_ERR(inode))
 317                return PTR_ERR(inode);
 318
 319        sb->s_root = d_make_root(inode);
 320        if (!sb->s_root)
 321                return -ENOMEM;
 322        return 0;
 323}
 324
 325static struct dentry *mqueue_mount(struct file_system_type *fs_type,
 326                         int flags, const char *dev_name,
 327                         void *data)
 328{
 329        if (!(flags & MS_KERNMOUNT)) {
 330                struct ipc_namespace *ns = current->nsproxy->ipc_ns;
 331                /* Don't allow mounting unless the caller has CAP_SYS_ADMIN
 332                 * over the ipc namespace.
 333                 */
 334                if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
 335                        return ERR_PTR(-EPERM);
 336
 337                data = ns;
 338        }
 339        return mount_ns(fs_type, flags, data, mqueue_fill_super);
 340}
 341
 342static void init_once(void *foo)
 343{
 344        struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
 345
 346        inode_init_once(&p->vfs_inode);
 347}
 348
 349static struct inode *mqueue_alloc_inode(struct super_block *sb)
 350{
 351        struct mqueue_inode_info *ei;
 352
 353        ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
 354        if (!ei)
 355                return NULL;
 356        return &ei->vfs_inode;
 357}
 358
 359static void mqueue_i_callback(struct rcu_head *head)
 360{
 361        struct inode *inode = container_of(head, struct inode, i_rcu);
 362        kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
 363}
 364
 365static void mqueue_destroy_inode(struct inode *inode)
 366{
 367        call_rcu(&inode->i_rcu, mqueue_i_callback);
 368}
 369
 370static void mqueue_evict_inode(struct inode *inode)
 371{
 372        struct mqueue_inode_info *info;
 373        struct user_struct *user;
 374        unsigned long mq_bytes, mq_treesize;
 375        struct ipc_namespace *ipc_ns;
 376        struct msg_msg *msg;
 377
 378        clear_inode(inode);
 379
 380        if (S_ISDIR(inode->i_mode))
 381                return;
 382
 383        ipc_ns = get_ns_from_inode(inode);
 384        info = MQUEUE_I(inode);
 385        spin_lock(&info->lock);
 386        while ((msg = msg_get(info)) != NULL)
 387                free_msg(msg);
 388        kfree(info->node_cache);
 389        spin_unlock(&info->lock);
 390
 391        /* Total amount of bytes accounted for the mqueue */
 392        mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
 393                min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
 394                sizeof(struct posix_msg_tree_node);
 395
 396        mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
 397                                  info->attr.mq_msgsize);
 398
 399        user = info->user;
 400        if (user) {
 401                spin_lock(&mq_lock);
 402                user->mq_bytes -= mq_bytes;
 403                /*
 404                 * get_ns_from_inode() ensures that the
 405                 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
 406                 * to which we now hold a reference, or it is NULL.
 407                 * We can't put it here under mq_lock, though.
 408                 */
 409                if (ipc_ns)
 410                        ipc_ns->mq_queues_count--;
 411                spin_unlock(&mq_lock);
 412                free_uid(user);
 413        }
 414        if (ipc_ns)
 415                put_ipc_ns(ipc_ns);
 416}
 417
 418static int mqueue_create(struct inode *dir, struct dentry *dentry,
 419                                umode_t mode, bool excl)
 420{
 421        struct inode *inode;
 422        struct mq_attr *attr = dentry->d_fsdata;
 423        int error;
 424        struct ipc_namespace *ipc_ns;
 425
 426        spin_lock(&mq_lock);
 427        ipc_ns = __get_ns_from_inode(dir);
 428        if (!ipc_ns) {
 429                error = -EACCES;
 430                goto out_unlock;
 431        }
 432
 433        if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
 434            !capable(CAP_SYS_RESOURCE)) {
 435                error = -ENOSPC;
 436                goto out_unlock;
 437        }
 438        ipc_ns->mq_queues_count++;
 439        spin_unlock(&mq_lock);
 440
 441        inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
 442        if (IS_ERR(inode)) {
 443                error = PTR_ERR(inode);
 444                spin_lock(&mq_lock);
 445                ipc_ns->mq_queues_count--;
 446                goto out_unlock;
 447        }
 448
 449        put_ipc_ns(ipc_ns);
 450        dir->i_size += DIRENT_SIZE;
 451        dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
 452
 453        d_instantiate(dentry, inode);
 454        dget(dentry);
 455        return 0;
 456out_unlock:
 457        spin_unlock(&mq_lock);
 458        if (ipc_ns)
 459                put_ipc_ns(ipc_ns);
 460        return error;
 461}
 462
 463static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
 464{
 465        struct inode *inode = d_inode(dentry);
 466
 467        dir->i_ctime = dir->i_mtime = dir->i_atime = CURRENT_TIME;
 468        dir->i_size -= DIRENT_SIZE;
 469        drop_nlink(inode);
 470        dput(dentry);
 471        return 0;
 472}
 473
 474/*
 475*       This is routine for system read from queue file.
 476*       To avoid mess with doing here some sort of mq_receive we allow
 477*       to read only queue size & notification info (the only values
 478*       that are interesting from user point of view and aren't accessible
 479*       through std routines)
 480*/
 481static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
 482                                size_t count, loff_t *off)
 483{
 484        struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 485        char buffer[FILENT_SIZE];
 486        ssize_t ret;
 487
 488        spin_lock(&info->lock);
 489        snprintf(buffer, sizeof(buffer),
 490                        "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
 491                        info->qsize,
 492                        info->notify_owner ? info->notify.sigev_notify : 0,
 493                        (info->notify_owner &&
 494                         info->notify.sigev_notify == SIGEV_SIGNAL) ?
 495                                info->notify.sigev_signo : 0,
 496                        pid_vnr(info->notify_owner));
 497        spin_unlock(&info->lock);
 498        buffer[sizeof(buffer)-1] = '\0';
 499
 500        ret = simple_read_from_buffer(u_data, count, off, buffer,
 501                                strlen(buffer));
 502        if (ret <= 0)
 503                return ret;
 504
 505        file_inode(filp)->i_atime = file_inode(filp)->i_ctime = CURRENT_TIME;
 506        return ret;
 507}
 508
 509static int mqueue_flush_file(struct file *filp, fl_owner_t id)
 510{
 511        struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 512
 513        spin_lock(&info->lock);
 514        if (task_tgid(current) == info->notify_owner)
 515                remove_notification(info);
 516
 517        spin_unlock(&info->lock);
 518        return 0;
 519}
 520
 521static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
 522{
 523        struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 524        int retval = 0;
 525
 526        poll_wait(filp, &info->wait_q, poll_tab);
 527
 528        spin_lock(&info->lock);
 529        if (info->attr.mq_curmsgs)
 530                retval = POLLIN | POLLRDNORM;
 531
 532        if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
 533                retval |= POLLOUT | POLLWRNORM;
 534        spin_unlock(&info->lock);
 535
 536        return retval;
 537}
 538
 539/* Adds current to info->e_wait_q[sr] before element with smaller prio */
 540static void wq_add(struct mqueue_inode_info *info, int sr,
 541                        struct ext_wait_queue *ewp)
 542{
 543        struct ext_wait_queue *walk;
 544
 545        ewp->task = current;
 546
 547        list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
 548                if (walk->task->static_prio <= current->static_prio) {
 549                        list_add_tail(&ewp->list, &walk->list);
 550                        return;
 551                }
 552        }
 553        list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
 554}
 555
 556/*
 557 * Puts current task to sleep. Caller must hold queue lock. After return
 558 * lock isn't held.
 559 * sr: SEND or RECV
 560 */
 561static int wq_sleep(struct mqueue_inode_info *info, int sr,
 562                    ktime_t *timeout, struct ext_wait_queue *ewp)
 563{
 564        int retval;
 565        signed long time;
 566
 567        wq_add(info, sr, ewp);
 568
 569        for (;;) {
 570                __set_current_state(TASK_INTERRUPTIBLE);
 571
 572                spin_unlock(&info->lock);
 573                time = schedule_hrtimeout_range_clock(timeout, 0,
 574                        HRTIMER_MODE_ABS, CLOCK_REALTIME);
 575
 576                if (ewp->state == STATE_READY) {
 577                        retval = 0;
 578                        goto out;
 579                }
 580                spin_lock(&info->lock);
 581                if (ewp->state == STATE_READY) {
 582                        retval = 0;
 583                        goto out_unlock;
 584                }
 585                if (signal_pending(current)) {
 586                        retval = -ERESTARTSYS;
 587                        break;
 588                }
 589                if (time == 0) {
 590                        retval = -ETIMEDOUT;
 591                        break;
 592                }
 593        }
 594        list_del(&ewp->list);
 595out_unlock:
 596        spin_unlock(&info->lock);
 597out:
 598        return retval;
 599}
 600
 601/*
 602 * Returns waiting task that should be serviced first or NULL if none exists
 603 */
 604static struct ext_wait_queue *wq_get_first_waiter(
 605                struct mqueue_inode_info *info, int sr)
 606{
 607        struct list_head *ptr;
 608
 609        ptr = info->e_wait_q[sr].list.prev;
 610        if (ptr == &info->e_wait_q[sr].list)
 611                return NULL;
 612        return list_entry(ptr, struct ext_wait_queue, list);
 613}
 614
 615
 616static inline void set_cookie(struct sk_buff *skb, char code)
 617{
 618        ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
 619}
 620
 621/*
 622 * The next function is only to split too long sys_mq_timedsend
 623 */
 624static void __do_notify(struct mqueue_inode_info *info)
 625{
 626        /* notification
 627         * invoked when there is registered process and there isn't process
 628         * waiting synchronously for message AND state of queue changed from
 629         * empty to not empty. Here we are sure that no one is waiting
 630         * synchronously. */
 631        if (info->notify_owner &&
 632            info->attr.mq_curmsgs == 1) {
 633                struct siginfo sig_i;
 634                switch (info->notify.sigev_notify) {
 635                case SIGEV_NONE:
 636                        break;
 637                case SIGEV_SIGNAL:
 638                        /* sends signal */
 639
 640                        sig_i.si_signo = info->notify.sigev_signo;
 641                        sig_i.si_errno = 0;
 642                        sig_i.si_code = SI_MESGQ;
 643                        sig_i.si_value = info->notify.sigev_value;
 644                        /* map current pid/uid into info->owner's namespaces */
 645                        rcu_read_lock();
 646                        sig_i.si_pid = task_tgid_nr_ns(current,
 647                                                ns_of_pid(info->notify_owner));
 648                        sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
 649                        rcu_read_unlock();
 650
 651                        kill_pid_info(info->notify.sigev_signo,
 652                                      &sig_i, info->notify_owner);
 653                        break;
 654                case SIGEV_THREAD:
 655                        set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
 656                        netlink_sendskb(info->notify_sock, info->notify_cookie);
 657                        break;
 658                }
 659                /* after notification unregisters process */
 660                put_pid(info->notify_owner);
 661                put_user_ns(info->notify_user_ns);
 662                info->notify_owner = NULL;
 663                info->notify_user_ns = NULL;
 664        }
 665        wake_up(&info->wait_q);
 666}
 667
 668static int prepare_timeout(const struct timespec __user *u_abs_timeout,
 669                           ktime_t *expires, struct timespec *ts)
 670{
 671        if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec)))
 672                return -EFAULT;
 673        if (!timespec_valid(ts))
 674                return -EINVAL;
 675
 676        *expires = timespec_to_ktime(*ts);
 677        return 0;
 678}
 679
 680static void remove_notification(struct mqueue_inode_info *info)
 681{
 682        if (info->notify_owner != NULL &&
 683            info->notify.sigev_notify == SIGEV_THREAD) {
 684                set_cookie(info->notify_cookie, NOTIFY_REMOVED);
 685                netlink_sendskb(info->notify_sock, info->notify_cookie);
 686        }
 687        put_pid(info->notify_owner);
 688        put_user_ns(info->notify_user_ns);
 689        info->notify_owner = NULL;
 690        info->notify_user_ns = NULL;
 691}
 692
 693static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
 694{
 695        int mq_treesize;
 696        unsigned long total_size;
 697
 698        if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
 699                return -EINVAL;
 700        if (capable(CAP_SYS_RESOURCE)) {
 701                if (attr->mq_maxmsg > HARD_MSGMAX ||
 702                    attr->mq_msgsize > HARD_MSGSIZEMAX)
 703                        return -EINVAL;
 704        } else {
 705                if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
 706                                attr->mq_msgsize > ipc_ns->mq_msgsize_max)
 707                        return -EINVAL;
 708        }
 709        /* check for overflow */
 710        if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
 711                return -EOVERFLOW;
 712        mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
 713                min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
 714                sizeof(struct posix_msg_tree_node);
 715        total_size = attr->mq_maxmsg * attr->mq_msgsize;
 716        if (total_size + mq_treesize < total_size)
 717                return -EOVERFLOW;
 718        return 0;
 719}
 720
 721/*
 722 * Invoked when creating a new queue via sys_mq_open
 723 */
 724static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
 725                        struct path *path, int oflag, umode_t mode,
 726                        struct mq_attr *attr)
 727{
 728        const struct cred *cred = current_cred();
 729        int ret;
 730
 731        if (attr) {
 732                ret = mq_attr_ok(ipc_ns, attr);
 733                if (ret)
 734                        return ERR_PTR(ret);
 735                /* store for use during create */
 736                path->dentry->d_fsdata = attr;
 737        } else {
 738                struct mq_attr def_attr;
 739
 740                def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
 741                                         ipc_ns->mq_msg_default);
 742                def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
 743                                          ipc_ns->mq_msgsize_default);
 744                ret = mq_attr_ok(ipc_ns, &def_attr);
 745                if (ret)
 746                        return ERR_PTR(ret);
 747        }
 748
 749        mode &= ~current_umask();
 750        ret = vfs_create(dir, path->dentry, mode, true);
 751        path->dentry->d_fsdata = NULL;
 752        if (ret)
 753                return ERR_PTR(ret);
 754        return dentry_open(path, oflag, cred);
 755}
 756
 757/* Opens existing queue */
 758static struct file *do_open(struct path *path, int oflag)
 759{
 760        static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
 761                                                  MAY_READ | MAY_WRITE };
 762        int acc;
 763        if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
 764                return ERR_PTR(-EINVAL);
 765        acc = oflag2acc[oflag & O_ACCMODE];
 766        if (inode_permission(d_inode(path->dentry), acc))
 767                return ERR_PTR(-EACCES);
 768        return dentry_open(path, oflag, current_cred());
 769}
 770
 771SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
 772                struct mq_attr __user *, u_attr)
 773{
 774        struct path path;
 775        struct file *filp;
 776        struct filename *name;
 777        struct mq_attr attr;
 778        int fd, error;
 779        struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
 780        struct vfsmount *mnt = ipc_ns->mq_mnt;
 781        struct dentry *root = mnt->mnt_root;
 782        int ro;
 783
 784        if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
 785                return -EFAULT;
 786
 787        audit_mq_open(oflag, mode, u_attr ? &attr : NULL);
 788
 789        if (IS_ERR(name = getname(u_name)))
 790                return PTR_ERR(name);
 791
 792        fd = get_unused_fd_flags(O_CLOEXEC);
 793        if (fd < 0)
 794                goto out_putname;
 795
 796        ro = mnt_want_write(mnt);       /* we'll drop it in any case */
 797        error = 0;
 798        inode_lock(d_inode(root));
 799        path.dentry = lookup_one_len(name->name, root, strlen(name->name));
 800        if (IS_ERR(path.dentry)) {
 801                error = PTR_ERR(path.dentry);
 802                goto out_putfd;
 803        }
 804        path.mnt = mntget(mnt);
 805
 806        if (oflag & O_CREAT) {
 807                if (d_really_is_positive(path.dentry)) {        /* entry already exists */
 808                        audit_inode(name, path.dentry, 0);
 809                        if (oflag & O_EXCL) {
 810                                error = -EEXIST;
 811                                goto out;
 812                        }
 813                        filp = do_open(&path, oflag);
 814                } else {
 815                        if (ro) {
 816                                error = ro;
 817                                goto out;
 818                        }
 819                        audit_inode_parent_hidden(name, root);
 820                        filp = do_create(ipc_ns, d_inode(root),
 821                                                &path, oflag, mode,
 822                                                u_attr ? &attr : NULL);
 823                }
 824        } else {
 825                if (d_really_is_negative(path.dentry)) {
 826                        error = -ENOENT;
 827                        goto out;
 828                }
 829                audit_inode(name, path.dentry, 0);
 830                filp = do_open(&path, oflag);
 831        }
 832
 833        if (!IS_ERR(filp))
 834                fd_install(fd, filp);
 835        else
 836                error = PTR_ERR(filp);
 837out:
 838        path_put(&path);
 839out_putfd:
 840        if (error) {
 841                put_unused_fd(fd);
 842                fd = error;
 843        }
 844        inode_unlock(d_inode(root));
 845        if (!ro)
 846                mnt_drop_write(mnt);
 847out_putname:
 848        putname(name);
 849        return fd;
 850}
 851
 852SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
 853{
 854        int err;
 855        struct filename *name;
 856        struct dentry *dentry;
 857        struct inode *inode = NULL;
 858        struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
 859        struct vfsmount *mnt = ipc_ns->mq_mnt;
 860
 861        name = getname(u_name);
 862        if (IS_ERR(name))
 863                return PTR_ERR(name);
 864
 865        audit_inode_parent_hidden(name, mnt->mnt_root);
 866        err = mnt_want_write(mnt);
 867        if (err)
 868                goto out_name;
 869        inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
 870        dentry = lookup_one_len(name->name, mnt->mnt_root,
 871                                strlen(name->name));
 872        if (IS_ERR(dentry)) {
 873                err = PTR_ERR(dentry);
 874                goto out_unlock;
 875        }
 876
 877        inode = d_inode(dentry);
 878        if (!inode) {
 879                err = -ENOENT;
 880        } else {
 881                ihold(inode);
 882                err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL);
 883        }
 884        dput(dentry);
 885
 886out_unlock:
 887        inode_unlock(d_inode(mnt->mnt_root));
 888        if (inode)
 889                iput(inode);
 890        mnt_drop_write(mnt);
 891out_name:
 892        putname(name);
 893
 894        return err;
 895}
 896
 897/* Pipelined send and receive functions.
 898 *
 899 * If a receiver finds no waiting message, then it registers itself in the
 900 * list of waiting receivers. A sender checks that list before adding the new
 901 * message into the message array. If there is a waiting receiver, then it
 902 * bypasses the message array and directly hands the message over to the
 903 * receiver. The receiver accepts the message and returns without grabbing the
 904 * queue spinlock:
 905 *
 906 * - Set pointer to message.
 907 * - Queue the receiver task for later wakeup (without the info->lock).
 908 * - Update its state to STATE_READY. Now the receiver can continue.
 909 * - Wake up the process after the lock is dropped. Should the process wake up
 910 *   before this wakeup (due to a timeout or a signal) it will either see
 911 *   STATE_READY and continue or acquire the lock to check the state again.
 912 *
 913 * The same algorithm is used for senders.
 914 */
 915
 916/* pipelined_send() - send a message directly to the task waiting in
 917 * sys_mq_timedreceive() (without inserting message into a queue).
 918 */
 919static inline void pipelined_send(struct wake_q_head *wake_q,
 920                                  struct mqueue_inode_info *info,
 921                                  struct msg_msg *message,
 922                                  struct ext_wait_queue *receiver)
 923{
 924        receiver->msg = message;
 925        list_del(&receiver->list);
 926        wake_q_add(wake_q, receiver->task);
 927        /*
 928         * Rely on the implicit cmpxchg barrier from wake_q_add such
 929         * that we can ensure that updating receiver->state is the last
 930         * write operation: As once set, the receiver can continue,
 931         * and if we don't have the reference count from the wake_q,
 932         * yet, at that point we can later have a use-after-free
 933         * condition and bogus wakeup.
 934         */
 935        receiver->state = STATE_READY;
 936}
 937
 938/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
 939 * gets its message and put to the queue (we have one free place for sure). */
 940static inline void pipelined_receive(struct wake_q_head *wake_q,
 941                                     struct mqueue_inode_info *info)
 942{
 943        struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
 944
 945        if (!sender) {
 946                /* for poll */
 947                wake_up_interruptible(&info->wait_q);
 948                return;
 949        }
 950        if (msg_insert(sender->msg, info))
 951                return;
 952
 953        list_del(&sender->list);
 954        wake_q_add(wake_q, sender->task);
 955        sender->state = STATE_READY;
 956}
 957
 958SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
 959                size_t, msg_len, unsigned int, msg_prio,
 960                const struct timespec __user *, u_abs_timeout)
 961{
 962        struct fd f;
 963        struct inode *inode;
 964        struct ext_wait_queue wait;
 965        struct ext_wait_queue *receiver;
 966        struct msg_msg *msg_ptr;
 967        struct mqueue_inode_info *info;
 968        ktime_t expires, *timeout = NULL;
 969        struct timespec ts;
 970        struct posix_msg_tree_node *new_leaf = NULL;
 971        int ret = 0;
 972        WAKE_Q(wake_q);
 973
 974        if (u_abs_timeout) {
 975                int res = prepare_timeout(u_abs_timeout, &expires, &ts);
 976                if (res)
 977                        return res;
 978                timeout = &expires;
 979        }
 980
 981        if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
 982                return -EINVAL;
 983
 984        audit_mq_sendrecv(mqdes, msg_len, msg_prio, timeout ? &ts : NULL);
 985
 986        f = fdget(mqdes);
 987        if (unlikely(!f.file)) {
 988                ret = -EBADF;
 989                goto out;
 990        }
 991
 992        inode = file_inode(f.file);
 993        if (unlikely(f.file->f_op != &mqueue_file_operations)) {
 994                ret = -EBADF;
 995                goto out_fput;
 996        }
 997        info = MQUEUE_I(inode);
 998        audit_file(f.file);
 999
1000        if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
1001                ret = -EBADF;
1002                goto out_fput;
1003        }
1004
1005        if (unlikely(msg_len > info->attr.mq_msgsize)) {
1006                ret = -EMSGSIZE;
1007                goto out_fput;
1008        }
1009
1010        /* First try to allocate memory, before doing anything with
1011         * existing queues. */
1012        msg_ptr = load_msg(u_msg_ptr, msg_len);
1013        if (IS_ERR(msg_ptr)) {
1014                ret = PTR_ERR(msg_ptr);
1015                goto out_fput;
1016        }
1017        msg_ptr->m_ts = msg_len;
1018        msg_ptr->m_type = msg_prio;
1019
1020        /*
1021         * msg_insert really wants us to have a valid, spare node struct so
1022         * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1023         * fall back to that if necessary.
1024         */
1025        if (!info->node_cache)
1026                new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1027
1028        spin_lock(&info->lock);
1029
1030        if (!info->node_cache && new_leaf) {
1031                /* Save our speculative allocation into the cache */
1032                INIT_LIST_HEAD(&new_leaf->msg_list);
1033                info->node_cache = new_leaf;
1034                new_leaf = NULL;
1035        } else {
1036                kfree(new_leaf);
1037        }
1038
1039        if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1040                if (f.file->f_flags & O_NONBLOCK) {
1041                        ret = -EAGAIN;
1042                } else {
1043                        wait.task = current;
1044                        wait.msg = (void *) msg_ptr;
1045                        wait.state = STATE_NONE;
1046                        ret = wq_sleep(info, SEND, timeout, &wait);
1047                        /*
1048                         * wq_sleep must be called with info->lock held, and
1049                         * returns with the lock released
1050                         */
1051                        goto out_free;
1052                }
1053        } else {
1054                receiver = wq_get_first_waiter(info, RECV);
1055                if (receiver) {
1056                        pipelined_send(&wake_q, info, msg_ptr, receiver);
1057                } else {
1058                        /* adds message to the queue */
1059                        ret = msg_insert(msg_ptr, info);
1060                        if (ret)
1061                                goto out_unlock;
1062                        __do_notify(info);
1063                }
1064                inode->i_atime = inode->i_mtime = inode->i_ctime =
1065                                CURRENT_TIME;
1066        }
1067out_unlock:
1068        spin_unlock(&info->lock);
1069        wake_up_q(&wake_q);
1070out_free:
1071        if (ret)
1072                free_msg(msg_ptr);
1073out_fput:
1074        fdput(f);
1075out:
1076        return ret;
1077}
1078
1079SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1080                size_t, msg_len, unsigned int __user *, u_msg_prio,
1081                const struct timespec __user *, u_abs_timeout)
1082{
1083        ssize_t ret;
1084        struct msg_msg *msg_ptr;
1085        struct fd f;
1086        struct inode *inode;
1087        struct mqueue_inode_info *info;
1088        struct ext_wait_queue wait;
1089        ktime_t expires, *timeout = NULL;
1090        struct timespec ts;
1091        struct posix_msg_tree_node *new_leaf = NULL;
1092
1093        if (u_abs_timeout) {
1094                int res = prepare_timeout(u_abs_timeout, &expires, &ts);
1095                if (res)
1096                        return res;
1097                timeout = &expires;
1098        }
1099
1100        audit_mq_sendrecv(mqdes, msg_len, 0, timeout ? &ts : NULL);
1101
1102        f = fdget(mqdes);
1103        if (unlikely(!f.file)) {
1104                ret = -EBADF;
1105                goto out;
1106        }
1107
1108        inode = file_inode(f.file);
1109        if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1110                ret = -EBADF;
1111                goto out_fput;
1112        }
1113        info = MQUEUE_I(inode);
1114        audit_file(f.file);
1115
1116        if (unlikely(!(f.file->f_mode & FMODE_READ))) {
1117                ret = -EBADF;
1118                goto out_fput;
1119        }
1120
1121        /* checks if buffer is big enough */
1122        if (unlikely(msg_len < info->attr.mq_msgsize)) {
1123                ret = -EMSGSIZE;
1124                goto out_fput;
1125        }
1126
1127        /*
1128         * msg_insert really wants us to have a valid, spare node struct so
1129         * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1130         * fall back to that if necessary.
1131         */
1132        if (!info->node_cache)
1133                new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1134
1135        spin_lock(&info->lock);
1136
1137        if (!info->node_cache && new_leaf) {
1138                /* Save our speculative allocation into the cache */
1139                INIT_LIST_HEAD(&new_leaf->msg_list);
1140                info->node_cache = new_leaf;
1141        } else {
1142                kfree(new_leaf);
1143        }
1144
1145        if (info->attr.mq_curmsgs == 0) {
1146                if (f.file->f_flags & O_NONBLOCK) {
1147                        spin_unlock(&info->lock);
1148                        ret = -EAGAIN;
1149                } else {
1150                        wait.task = current;
1151                        wait.state = STATE_NONE;
1152                        ret = wq_sleep(info, RECV, timeout, &wait);
1153                        msg_ptr = wait.msg;
1154                }
1155        } else {
1156                WAKE_Q(wake_q);
1157
1158                msg_ptr = msg_get(info);
1159
1160                inode->i_atime = inode->i_mtime = inode->i_ctime =
1161                                CURRENT_TIME;
1162
1163                /* There is now free space in queue. */
1164                pipelined_receive(&wake_q, info);
1165                spin_unlock(&info->lock);
1166                wake_up_q(&wake_q);
1167                ret = 0;
1168        }
1169        if (ret == 0) {
1170                ret = msg_ptr->m_ts;
1171
1172                if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1173                        store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1174                        ret = -EFAULT;
1175                }
1176                free_msg(msg_ptr);
1177        }
1178out_fput:
1179        fdput(f);
1180out:
1181        return ret;
1182}
1183
1184/*
1185 * Notes: the case when user wants us to deregister (with NULL as pointer)
1186 * and he isn't currently owner of notification, will be silently discarded.
1187 * It isn't explicitly defined in the POSIX.
1188 */
1189SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1190                const struct sigevent __user *, u_notification)
1191{
1192        int ret;
1193        struct fd f;
1194        struct sock *sock;
1195        struct inode *inode;
1196        struct sigevent notification;
1197        struct mqueue_inode_info *info;
1198        struct sk_buff *nc;
1199
1200        if (u_notification) {
1201                if (copy_from_user(&notification, u_notification,
1202                                        sizeof(struct sigevent)))
1203                        return -EFAULT;
1204        }
1205
1206        audit_mq_notify(mqdes, u_notification ? &notification : NULL);
1207
1208        nc = NULL;
1209        sock = NULL;
1210        if (u_notification != NULL) {
1211                if (unlikely(notification.sigev_notify != SIGEV_NONE &&
1212                             notification.sigev_notify != SIGEV_SIGNAL &&
1213                             notification.sigev_notify != SIGEV_THREAD))
1214                        return -EINVAL;
1215                if (notification.sigev_notify == SIGEV_SIGNAL &&
1216                        !valid_signal(notification.sigev_signo)) {
1217                        return -EINVAL;
1218                }
1219                if (notification.sigev_notify == SIGEV_THREAD) {
1220                        long timeo;
1221
1222                        /* create the notify skb */
1223                        nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1224                        if (!nc) {
1225                                ret = -ENOMEM;
1226                                goto out;
1227                        }
1228                        if (copy_from_user(nc->data,
1229                                        notification.sigev_value.sival_ptr,
1230                                        NOTIFY_COOKIE_LEN)) {
1231                                ret = -EFAULT;
1232                                goto out;
1233                        }
1234
1235                        /* TODO: add a header? */
1236                        skb_put(nc, NOTIFY_COOKIE_LEN);
1237                        /* and attach it to the socket */
1238retry:
1239                        f = fdget(notification.sigev_signo);
1240                        if (!f.file) {
1241                                ret = -EBADF;
1242                                goto out;
1243                        }
1244                        sock = netlink_getsockbyfilp(f.file);
1245                        fdput(f);
1246                        if (IS_ERR(sock)) {
1247                                ret = PTR_ERR(sock);
1248                                sock = NULL;
1249                                goto out;
1250                        }
1251
1252                        timeo = MAX_SCHEDULE_TIMEOUT;
1253                        ret = netlink_attachskb(sock, nc, &timeo, NULL);
1254                        if (ret == 1)
1255                                goto retry;
1256                        if (ret) {
1257                                sock = NULL;
1258                                nc = NULL;
1259                                goto out;
1260                        }
1261                }
1262        }
1263
1264        f = fdget(mqdes);
1265        if (!f.file) {
1266                ret = -EBADF;
1267                goto out;
1268        }
1269
1270        inode = file_inode(f.file);
1271        if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1272                ret = -EBADF;
1273                goto out_fput;
1274        }
1275        info = MQUEUE_I(inode);
1276
1277        ret = 0;
1278        spin_lock(&info->lock);
1279        if (u_notification == NULL) {
1280                if (info->notify_owner == task_tgid(current)) {
1281                        remove_notification(info);
1282                        inode->i_atime = inode->i_ctime = CURRENT_TIME;
1283                }
1284        } else if (info->notify_owner != NULL) {
1285                ret = -EBUSY;
1286        } else {
1287                switch (notification.sigev_notify) {
1288                case SIGEV_NONE:
1289                        info->notify.sigev_notify = SIGEV_NONE;
1290                        break;
1291                case SIGEV_THREAD:
1292                        info->notify_sock = sock;
1293                        info->notify_cookie = nc;
1294                        sock = NULL;
1295                        nc = NULL;
1296                        info->notify.sigev_notify = SIGEV_THREAD;
1297                        break;
1298                case SIGEV_SIGNAL:
1299                        info->notify.sigev_signo = notification.sigev_signo;
1300                        info->notify.sigev_value = notification.sigev_value;
1301                        info->notify.sigev_notify = SIGEV_SIGNAL;
1302                        break;
1303                }
1304
1305                info->notify_owner = get_pid(task_tgid(current));
1306                info->notify_user_ns = get_user_ns(current_user_ns());
1307                inode->i_atime = inode->i_ctime = CURRENT_TIME;
1308        }
1309        spin_unlock(&info->lock);
1310out_fput:
1311        fdput(f);
1312out:
1313        if (sock)
1314                netlink_detachskb(sock, nc);
1315        else if (nc)
1316                dev_kfree_skb(nc);
1317
1318        return ret;
1319}
1320
1321SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1322                const struct mq_attr __user *, u_mqstat,
1323                struct mq_attr __user *, u_omqstat)
1324{
1325        int ret;
1326        struct mq_attr mqstat, omqstat;
1327        struct fd f;
1328        struct inode *inode;
1329        struct mqueue_inode_info *info;
1330
1331        if (u_mqstat != NULL) {
1332                if (copy_from_user(&mqstat, u_mqstat, sizeof(struct mq_attr)))
1333                        return -EFAULT;
1334                if (mqstat.mq_flags & (~O_NONBLOCK))
1335                        return -EINVAL;
1336        }
1337
1338        f = fdget(mqdes);
1339        if (!f.file) {
1340                ret = -EBADF;
1341                goto out;
1342        }
1343
1344        inode = file_inode(f.file);
1345        if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1346                ret = -EBADF;
1347                goto out_fput;
1348        }
1349        info = MQUEUE_I(inode);
1350
1351        spin_lock(&info->lock);
1352
1353        omqstat = info->attr;
1354        omqstat.mq_flags = f.file->f_flags & O_NONBLOCK;
1355        if (u_mqstat) {
1356                audit_mq_getsetattr(mqdes, &mqstat);
1357                spin_lock(&f.file->f_lock);
1358                if (mqstat.mq_flags & O_NONBLOCK)
1359                        f.file->f_flags |= O_NONBLOCK;
1360                else
1361                        f.file->f_flags &= ~O_NONBLOCK;
1362                spin_unlock(&f.file->f_lock);
1363
1364                inode->i_atime = inode->i_ctime = CURRENT_TIME;
1365        }
1366
1367        spin_unlock(&info->lock);
1368
1369        ret = 0;
1370        if (u_omqstat != NULL && copy_to_user(u_omqstat, &omqstat,
1371                                                sizeof(struct mq_attr)))
1372                ret = -EFAULT;
1373
1374out_fput:
1375        fdput(f);
1376out:
1377        return ret;
1378}
1379
1380static const struct inode_operations mqueue_dir_inode_operations = {
1381        .lookup = simple_lookup,
1382        .create = mqueue_create,
1383        .unlink = mqueue_unlink,
1384};
1385
1386static const struct file_operations mqueue_file_operations = {
1387        .flush = mqueue_flush_file,
1388        .poll = mqueue_poll_file,
1389        .read = mqueue_read_file,
1390        .llseek = default_llseek,
1391};
1392
1393static const struct super_operations mqueue_super_ops = {
1394        .alloc_inode = mqueue_alloc_inode,
1395        .destroy_inode = mqueue_destroy_inode,
1396        .evict_inode = mqueue_evict_inode,
1397        .statfs = simple_statfs,
1398};
1399
1400static struct file_system_type mqueue_fs_type = {
1401        .name = "mqueue",
1402        .mount = mqueue_mount,
1403        .kill_sb = kill_litter_super,
1404        .fs_flags = FS_USERNS_MOUNT,
1405};
1406
1407int mq_init_ns(struct ipc_namespace *ns)
1408{
1409        ns->mq_queues_count  = 0;
1410        ns->mq_queues_max    = DFLT_QUEUESMAX;
1411        ns->mq_msg_max       = DFLT_MSGMAX;
1412        ns->mq_msgsize_max   = DFLT_MSGSIZEMAX;
1413        ns->mq_msg_default   = DFLT_MSG;
1414        ns->mq_msgsize_default  = DFLT_MSGSIZE;
1415
1416        ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
1417        if (IS_ERR(ns->mq_mnt)) {
1418                int err = PTR_ERR(ns->mq_mnt);
1419                ns->mq_mnt = NULL;
1420                return err;
1421        }
1422        return 0;
1423}
1424
1425void mq_clear_sbinfo(struct ipc_namespace *ns)
1426{
1427        ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1428}
1429
1430void mq_put_mnt(struct ipc_namespace *ns)
1431{
1432        kern_unmount(ns->mq_mnt);
1433}
1434
1435static int __init init_mqueue_fs(void)
1436{
1437        int error;
1438
1439        mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1440                                sizeof(struct mqueue_inode_info), 0,
1441                                SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
1442        if (mqueue_inode_cachep == NULL)
1443                return -ENOMEM;
1444
1445        /* ignore failures - they are not fatal */
1446        mq_sysctl_table = mq_register_sysctl_table();
1447
1448        error = register_filesystem(&mqueue_fs_type);
1449        if (error)
1450                goto out_sysctl;
1451
1452        spin_lock_init(&mq_lock);
1453
1454        error = mq_init_ns(&init_ipc_ns);
1455        if (error)
1456                goto out_filesystem;
1457
1458        return 0;
1459
1460out_filesystem:
1461        unregister_filesystem(&mqueue_fs_type);
1462out_sysctl:
1463        if (mq_sysctl_table)
1464                unregister_sysctl_table(mq_sysctl_table);
1465        kmem_cache_destroy(mqueue_inode_cachep);
1466        return error;
1467}
1468
1469device_initcall(init_mqueue_fs);
1470