linux/ipc/mqueue.c
<<
>>
Prefs
   1/*
   2 * POSIX message queues filesystem for Linux.
   3 *
   4 * Copyright (C) 2003,2004  Krzysztof Benedyczak    (golbi@mat.uni.torun.pl)
   5 *                          Michal Wronski          (michal.wronski@gmail.com)
   6 *
   7 * Spinlocks:               Mohamed Abbas           (abbas.mohamed@intel.com)
   8 * Lockless receive & send, fd based notify:
   9 *                          Manfred Spraul          (manfred@colorfullife.com)
  10 *
  11 * Audit:                   George Wilson           (ltcgcw@us.ibm.com)
  12 *
  13 * This file is released under the GPL.
  14 */
  15
  16#include <linux/capability.h>
  17#include <linux/init.h>
  18#include <linux/pagemap.h>
  19#include <linux/file.h>
  20#include <linux/mount.h>
  21#include <linux/namei.h>
  22#include <linux/sysctl.h>
  23#include <linux/poll.h>
  24#include <linux/mqueue.h>
  25#include <linux/msg.h>
  26#include <linux/skbuff.h>
  27#include <linux/vmalloc.h>
  28#include <linux/netlink.h>
  29#include <linux/syscalls.h>
  30#include <linux/audit.h>
  31#include <linux/signal.h>
  32#include <linux/mutex.h>
  33#include <linux/nsproxy.h>
  34#include <linux/pid.h>
  35#include <linux/ipc_namespace.h>
  36#include <linux/user_namespace.h>
  37#include <linux/slab.h>
  38#include <linux/sched/wake_q.h>
  39#include <linux/sched/signal.h>
  40#include <linux/sched/user.h>
  41
  42#include <net/sock.h>
  43#include "util.h"
  44
  45#define MQUEUE_MAGIC    0x19800202
  46#define DIRENT_SIZE     20
  47#define FILENT_SIZE     80
  48
  49#define SEND            0
  50#define RECV            1
  51
  52#define STATE_NONE      0
  53#define STATE_READY     1
  54
  55struct posix_msg_tree_node {
  56        struct rb_node          rb_node;
  57        struct list_head        msg_list;
  58        int                     priority;
  59};
  60
  61struct ext_wait_queue {         /* queue of sleeping tasks */
  62        struct task_struct *task;
  63        struct list_head list;
  64        struct msg_msg *msg;    /* ptr of loaded message */
  65        int state;              /* one of STATE_* values */
  66};
  67
  68struct mqueue_inode_info {
  69        spinlock_t lock;
  70        struct inode vfs_inode;
  71        wait_queue_head_t wait_q;
  72
  73        struct rb_root msg_tree;
  74        struct posix_msg_tree_node *node_cache;
  75        struct mq_attr attr;
  76
  77        struct sigevent notify;
  78        struct pid *notify_owner;
  79        struct user_namespace *notify_user_ns;
  80        struct user_struct *user;       /* user who created, for accounting */
  81        struct sock *notify_sock;
  82        struct sk_buff *notify_cookie;
  83
  84        /* for tasks waiting for free space and messages, respectively */
  85        struct ext_wait_queue e_wait_q[2];
  86
  87        unsigned long qsize; /* size of queue in memory (sum of all msgs) */
  88};
  89
  90static const struct inode_operations mqueue_dir_inode_operations;
  91static const struct file_operations mqueue_file_operations;
  92static const struct super_operations mqueue_super_ops;
  93static void remove_notification(struct mqueue_inode_info *info);
  94
  95static struct kmem_cache *mqueue_inode_cachep;
  96
  97static struct ctl_table_header *mq_sysctl_table;
  98
  99static inline struct mqueue_inode_info *MQUEUE_I(struct inode *inode)
 100{
 101        return container_of(inode, struct mqueue_inode_info, vfs_inode);
 102}
 103
 104/*
 105 * This routine should be called with the mq_lock held.
 106 */
 107static inline struct ipc_namespace *__get_ns_from_inode(struct inode *inode)
 108{
 109        return get_ipc_ns(inode->i_sb->s_fs_info);
 110}
 111
 112static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
 113{
 114        struct ipc_namespace *ns;
 115
 116        spin_lock(&mq_lock);
 117        ns = __get_ns_from_inode(inode);
 118        spin_unlock(&mq_lock);
 119        return ns;
 120}
 121
 122/* Auxiliary functions to manipulate messages' list */
 123static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
 124{
 125        struct rb_node **p, *parent = NULL;
 126        struct posix_msg_tree_node *leaf;
 127
 128        p = &info->msg_tree.rb_node;
 129        while (*p) {
 130                parent = *p;
 131                leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
 132
 133                if (likely(leaf->priority == msg->m_type))
 134                        goto insert_msg;
 135                else if (msg->m_type < leaf->priority)
 136                        p = &(*p)->rb_left;
 137                else
 138                        p = &(*p)->rb_right;
 139        }
 140        if (info->node_cache) {
 141                leaf = info->node_cache;
 142                info->node_cache = NULL;
 143        } else {
 144                leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
 145                if (!leaf)
 146                        return -ENOMEM;
 147                INIT_LIST_HEAD(&leaf->msg_list);
 148        }
 149        leaf->priority = msg->m_type;
 150        rb_link_node(&leaf->rb_node, parent, p);
 151        rb_insert_color(&leaf->rb_node, &info->msg_tree);
 152insert_msg:
 153        info->attr.mq_curmsgs++;
 154        info->qsize += msg->m_ts;
 155        list_add_tail(&msg->m_list, &leaf->msg_list);
 156        return 0;
 157}
 158
 159static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
 160{
 161        struct rb_node **p, *parent = NULL;
 162        struct posix_msg_tree_node *leaf;
 163        struct msg_msg *msg;
 164
 165try_again:
 166        p = &info->msg_tree.rb_node;
 167        while (*p) {
 168                parent = *p;
 169                /*
 170                 * During insert, low priorities go to the left and high to the
 171                 * right.  On receive, we want the highest priorities first, so
 172                 * walk all the way to the right.
 173                 */
 174                p = &(*p)->rb_right;
 175        }
 176        if (!parent) {
 177                if (info->attr.mq_curmsgs) {
 178                        pr_warn_once("Inconsistency in POSIX message queue, "
 179                                     "no tree element, but supposedly messages "
 180                                     "should exist!\n");
 181                        info->attr.mq_curmsgs = 0;
 182                }
 183                return NULL;
 184        }
 185        leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
 186        if (unlikely(list_empty(&leaf->msg_list))) {
 187                pr_warn_once("Inconsistency in POSIX message queue, "
 188                             "empty leaf node but we haven't implemented "
 189                             "lazy leaf delete!\n");
 190                rb_erase(&leaf->rb_node, &info->msg_tree);
 191                if (info->node_cache) {
 192                        kfree(leaf);
 193                } else {
 194                        info->node_cache = leaf;
 195                }
 196                goto try_again;
 197        } else {
 198                msg = list_first_entry(&leaf->msg_list,
 199                                       struct msg_msg, m_list);
 200                list_del(&msg->m_list);
 201                if (list_empty(&leaf->msg_list)) {
 202                        rb_erase(&leaf->rb_node, &info->msg_tree);
 203                        if (info->node_cache) {
 204                                kfree(leaf);
 205                        } else {
 206                                info->node_cache = leaf;
 207                        }
 208                }
 209        }
 210        info->attr.mq_curmsgs--;
 211        info->qsize -= msg->m_ts;
 212        return msg;
 213}
 214
 215static struct inode *mqueue_get_inode(struct super_block *sb,
 216                struct ipc_namespace *ipc_ns, umode_t mode,
 217                struct mq_attr *attr)
 218{
 219        struct user_struct *u = current_user();
 220        struct inode *inode;
 221        int ret = -ENOMEM;
 222
 223        inode = new_inode(sb);
 224        if (!inode)
 225                goto err;
 226
 227        inode->i_ino = get_next_ino();
 228        inode->i_mode = mode;
 229        inode->i_uid = current_fsuid();
 230        inode->i_gid = current_fsgid();
 231        inode->i_mtime = inode->i_ctime = inode->i_atime = current_time(inode);
 232
 233        if (S_ISREG(mode)) {
 234                struct mqueue_inode_info *info;
 235                unsigned long mq_bytes, mq_treesize;
 236
 237                inode->i_fop = &mqueue_file_operations;
 238                inode->i_size = FILENT_SIZE;
 239                /* mqueue specific info */
 240                info = MQUEUE_I(inode);
 241                spin_lock_init(&info->lock);
 242                init_waitqueue_head(&info->wait_q);
 243                INIT_LIST_HEAD(&info->e_wait_q[0].list);
 244                INIT_LIST_HEAD(&info->e_wait_q[1].list);
 245                info->notify_owner = NULL;
 246                info->notify_user_ns = NULL;
 247                info->qsize = 0;
 248                info->user = NULL;      /* set when all is ok */
 249                info->msg_tree = RB_ROOT;
 250                info->node_cache = NULL;
 251                memset(&info->attr, 0, sizeof(info->attr));
 252                info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
 253                                           ipc_ns->mq_msg_default);
 254                info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
 255                                            ipc_ns->mq_msgsize_default);
 256                if (attr) {
 257                        info->attr.mq_maxmsg = attr->mq_maxmsg;
 258                        info->attr.mq_msgsize = attr->mq_msgsize;
 259                }
 260                /*
 261                 * We used to allocate a static array of pointers and account
 262                 * the size of that array as well as one msg_msg struct per
 263                 * possible message into the queue size. That's no longer
 264                 * accurate as the queue is now an rbtree and will grow and
 265                 * shrink depending on usage patterns.  We can, however, still
 266                 * account one msg_msg struct per message, but the nodes are
 267                 * allocated depending on priority usage, and most programs
 268                 * only use one, or a handful, of priorities.  However, since
 269                 * this is pinned memory, we need to assume worst case, so
 270                 * that means the min(mq_maxmsg, max_priorities) * struct
 271                 * posix_msg_tree_node.
 272                 */
 273                mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
 274                        min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
 275                        sizeof(struct posix_msg_tree_node);
 276
 277                mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
 278                                          info->attr.mq_msgsize);
 279
 280                spin_lock(&mq_lock);
 281                if (u->mq_bytes + mq_bytes < u->mq_bytes ||
 282                    u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
 283                        spin_unlock(&mq_lock);
 284                        /* mqueue_evict_inode() releases info->messages */
 285                        ret = -EMFILE;
 286                        goto out_inode;
 287                }
 288                u->mq_bytes += mq_bytes;
 289                spin_unlock(&mq_lock);
 290
 291                /* all is ok */
 292                info->user = get_uid(u);
 293        } else if (S_ISDIR(mode)) {
 294                inc_nlink(inode);
 295                /* Some things misbehave if size == 0 on a directory */
 296                inode->i_size = 2 * DIRENT_SIZE;
 297                inode->i_op = &mqueue_dir_inode_operations;
 298                inode->i_fop = &simple_dir_operations;
 299        }
 300
 301        return inode;
 302out_inode:
 303        iput(inode);
 304err:
 305        return ERR_PTR(ret);
 306}
 307
 308static int mqueue_fill_super(struct super_block *sb, void *data, int silent)
 309{
 310        struct inode *inode;
 311        struct ipc_namespace *ns = sb->s_fs_info;
 312
 313        sb->s_iflags |= SB_I_NOEXEC | SB_I_NODEV;
 314        sb->s_blocksize = PAGE_SIZE;
 315        sb->s_blocksize_bits = PAGE_SHIFT;
 316        sb->s_magic = MQUEUE_MAGIC;
 317        sb->s_op = &mqueue_super_ops;
 318
 319        inode = mqueue_get_inode(sb, ns, S_IFDIR | S_ISVTX | S_IRWXUGO, NULL);
 320        if (IS_ERR(inode))
 321                return PTR_ERR(inode);
 322
 323        sb->s_root = d_make_root(inode);
 324        if (!sb->s_root)
 325                return -ENOMEM;
 326        return 0;
 327}
 328
 329static struct dentry *mqueue_mount(struct file_system_type *fs_type,
 330                         int flags, const char *dev_name,
 331                         void *data)
 332{
 333        struct ipc_namespace *ns;
 334        if (flags & MS_KERNMOUNT) {
 335                ns = data;
 336                data = NULL;
 337        } else {
 338                ns = current->nsproxy->ipc_ns;
 339        }
 340        return mount_ns(fs_type, flags, data, ns, ns->user_ns, mqueue_fill_super);
 341}
 342
 343static void init_once(void *foo)
 344{
 345        struct mqueue_inode_info *p = (struct mqueue_inode_info *) foo;
 346
 347        inode_init_once(&p->vfs_inode);
 348}
 349
 350static struct inode *mqueue_alloc_inode(struct super_block *sb)
 351{
 352        struct mqueue_inode_info *ei;
 353
 354        ei = kmem_cache_alloc(mqueue_inode_cachep, GFP_KERNEL);
 355        if (!ei)
 356                return NULL;
 357        return &ei->vfs_inode;
 358}
 359
 360static void mqueue_i_callback(struct rcu_head *head)
 361{
 362        struct inode *inode = container_of(head, struct inode, i_rcu);
 363        kmem_cache_free(mqueue_inode_cachep, MQUEUE_I(inode));
 364}
 365
 366static void mqueue_destroy_inode(struct inode *inode)
 367{
 368        call_rcu(&inode->i_rcu, mqueue_i_callback);
 369}
 370
 371static void mqueue_evict_inode(struct inode *inode)
 372{
 373        struct mqueue_inode_info *info;
 374        struct user_struct *user;
 375        unsigned long mq_bytes, mq_treesize;
 376        struct ipc_namespace *ipc_ns;
 377        struct msg_msg *msg;
 378
 379        clear_inode(inode);
 380
 381        if (S_ISDIR(inode->i_mode))
 382                return;
 383
 384        ipc_ns = get_ns_from_inode(inode);
 385        info = MQUEUE_I(inode);
 386        spin_lock(&info->lock);
 387        while ((msg = msg_get(info)) != NULL)
 388                free_msg(msg);
 389        kfree(info->node_cache);
 390        spin_unlock(&info->lock);
 391
 392        /* Total amount of bytes accounted for the mqueue */
 393        mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
 394                min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
 395                sizeof(struct posix_msg_tree_node);
 396
 397        mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
 398                                  info->attr.mq_msgsize);
 399
 400        user = info->user;
 401        if (user) {
 402                spin_lock(&mq_lock);
 403                user->mq_bytes -= mq_bytes;
 404                /*
 405                 * get_ns_from_inode() ensures that the
 406                 * (ipc_ns = sb->s_fs_info) is either a valid ipc_ns
 407                 * to which we now hold a reference, or it is NULL.
 408                 * We can't put it here under mq_lock, though.
 409                 */
 410                if (ipc_ns)
 411                        ipc_ns->mq_queues_count--;
 412                spin_unlock(&mq_lock);
 413                free_uid(user);
 414        }
 415        if (ipc_ns)
 416                put_ipc_ns(ipc_ns);
 417}
 418
 419static int mqueue_create(struct inode *dir, struct dentry *dentry,
 420                                umode_t mode, bool excl)
 421{
 422        struct inode *inode;
 423        struct mq_attr *attr = dentry->d_fsdata;
 424        int error;
 425        struct ipc_namespace *ipc_ns;
 426
 427        spin_lock(&mq_lock);
 428        ipc_ns = __get_ns_from_inode(dir);
 429        if (!ipc_ns) {
 430                error = -EACCES;
 431                goto out_unlock;
 432        }
 433
 434        if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
 435            !capable(CAP_SYS_RESOURCE)) {
 436                error = -ENOSPC;
 437                goto out_unlock;
 438        }
 439        ipc_ns->mq_queues_count++;
 440        spin_unlock(&mq_lock);
 441
 442        inode = mqueue_get_inode(dir->i_sb, ipc_ns, mode, attr);
 443        if (IS_ERR(inode)) {
 444                error = PTR_ERR(inode);
 445                spin_lock(&mq_lock);
 446                ipc_ns->mq_queues_count--;
 447                goto out_unlock;
 448        }
 449
 450        put_ipc_ns(ipc_ns);
 451        dir->i_size += DIRENT_SIZE;
 452        dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
 453
 454        d_instantiate(dentry, inode);
 455        dget(dentry);
 456        return 0;
 457out_unlock:
 458        spin_unlock(&mq_lock);
 459        if (ipc_ns)
 460                put_ipc_ns(ipc_ns);
 461        return error;
 462}
 463
 464static int mqueue_unlink(struct inode *dir, struct dentry *dentry)
 465{
 466        struct inode *inode = d_inode(dentry);
 467
 468        dir->i_ctime = dir->i_mtime = dir->i_atime = current_time(dir);
 469        dir->i_size -= DIRENT_SIZE;
 470        drop_nlink(inode);
 471        dput(dentry);
 472        return 0;
 473}
 474
 475/*
 476*       This is routine for system read from queue file.
 477*       To avoid mess with doing here some sort of mq_receive we allow
 478*       to read only queue size & notification info (the only values
 479*       that are interesting from user point of view and aren't accessible
 480*       through std routines)
 481*/
 482static ssize_t mqueue_read_file(struct file *filp, char __user *u_data,
 483                                size_t count, loff_t *off)
 484{
 485        struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 486        char buffer[FILENT_SIZE];
 487        ssize_t ret;
 488
 489        spin_lock(&info->lock);
 490        snprintf(buffer, sizeof(buffer),
 491                        "QSIZE:%-10lu NOTIFY:%-5d SIGNO:%-5d NOTIFY_PID:%-6d\n",
 492                        info->qsize,
 493                        info->notify_owner ? info->notify.sigev_notify : 0,
 494                        (info->notify_owner &&
 495                         info->notify.sigev_notify == SIGEV_SIGNAL) ?
 496                                info->notify.sigev_signo : 0,
 497                        pid_vnr(info->notify_owner));
 498        spin_unlock(&info->lock);
 499        buffer[sizeof(buffer)-1] = '\0';
 500
 501        ret = simple_read_from_buffer(u_data, count, off, buffer,
 502                                strlen(buffer));
 503        if (ret <= 0)
 504                return ret;
 505
 506        file_inode(filp)->i_atime = file_inode(filp)->i_ctime = current_time(file_inode(filp));
 507        return ret;
 508}
 509
 510static int mqueue_flush_file(struct file *filp, fl_owner_t id)
 511{
 512        struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 513
 514        spin_lock(&info->lock);
 515        if (task_tgid(current) == info->notify_owner)
 516                remove_notification(info);
 517
 518        spin_unlock(&info->lock);
 519        return 0;
 520}
 521
 522static unsigned int mqueue_poll_file(struct file *filp, struct poll_table_struct *poll_tab)
 523{
 524        struct mqueue_inode_info *info = MQUEUE_I(file_inode(filp));
 525        int retval = 0;
 526
 527        poll_wait(filp, &info->wait_q, poll_tab);
 528
 529        spin_lock(&info->lock);
 530        if (info->attr.mq_curmsgs)
 531                retval = POLLIN | POLLRDNORM;
 532
 533        if (info->attr.mq_curmsgs < info->attr.mq_maxmsg)
 534                retval |= POLLOUT | POLLWRNORM;
 535        spin_unlock(&info->lock);
 536
 537        return retval;
 538}
 539
 540/* Adds current to info->e_wait_q[sr] before element with smaller prio */
 541static void wq_add(struct mqueue_inode_info *info, int sr,
 542                        struct ext_wait_queue *ewp)
 543{
 544        struct ext_wait_queue *walk;
 545
 546        ewp->task = current;
 547
 548        list_for_each_entry(walk, &info->e_wait_q[sr].list, list) {
 549                if (walk->task->static_prio <= current->static_prio) {
 550                        list_add_tail(&ewp->list, &walk->list);
 551                        return;
 552                }
 553        }
 554        list_add_tail(&ewp->list, &info->e_wait_q[sr].list);
 555}
 556
 557/*
 558 * Puts current task to sleep. Caller must hold queue lock. After return
 559 * lock isn't held.
 560 * sr: SEND or RECV
 561 */
 562static int wq_sleep(struct mqueue_inode_info *info, int sr,
 563                    ktime_t *timeout, struct ext_wait_queue *ewp)
 564        __releases(&info->lock)
 565{
 566        int retval;
 567        signed long time;
 568
 569        wq_add(info, sr, ewp);
 570
 571        for (;;) {
 572                __set_current_state(TASK_INTERRUPTIBLE);
 573
 574                spin_unlock(&info->lock);
 575                time = schedule_hrtimeout_range_clock(timeout, 0,
 576                        HRTIMER_MODE_ABS, CLOCK_REALTIME);
 577
 578                if (ewp->state == STATE_READY) {
 579                        retval = 0;
 580                        goto out;
 581                }
 582                spin_lock(&info->lock);
 583                if (ewp->state == STATE_READY) {
 584                        retval = 0;
 585                        goto out_unlock;
 586                }
 587                if (signal_pending(current)) {
 588                        retval = -ERESTARTSYS;
 589                        break;
 590                }
 591                if (time == 0) {
 592                        retval = -ETIMEDOUT;
 593                        break;
 594                }
 595        }
 596        list_del(&ewp->list);
 597out_unlock:
 598        spin_unlock(&info->lock);
 599out:
 600        return retval;
 601}
 602
 603/*
 604 * Returns waiting task that should be serviced first or NULL if none exists
 605 */
 606static struct ext_wait_queue *wq_get_first_waiter(
 607                struct mqueue_inode_info *info, int sr)
 608{
 609        struct list_head *ptr;
 610
 611        ptr = info->e_wait_q[sr].list.prev;
 612        if (ptr == &info->e_wait_q[sr].list)
 613                return NULL;
 614        return list_entry(ptr, struct ext_wait_queue, list);
 615}
 616
 617
 618static inline void set_cookie(struct sk_buff *skb, char code)
 619{
 620        ((char *)skb->data)[NOTIFY_COOKIE_LEN-1] = code;
 621}
 622
 623/*
 624 * The next function is only to split too long sys_mq_timedsend
 625 */
 626static void __do_notify(struct mqueue_inode_info *info)
 627{
 628        /* notification
 629         * invoked when there is registered process and there isn't process
 630         * waiting synchronously for message AND state of queue changed from
 631         * empty to not empty. Here we are sure that no one is waiting
 632         * synchronously. */
 633        if (info->notify_owner &&
 634            info->attr.mq_curmsgs == 1) {
 635                struct siginfo sig_i;
 636                switch (info->notify.sigev_notify) {
 637                case SIGEV_NONE:
 638                        break;
 639                case SIGEV_SIGNAL:
 640                        /* sends signal */
 641
 642                        sig_i.si_signo = info->notify.sigev_signo;
 643                        sig_i.si_errno = 0;
 644                        sig_i.si_code = SI_MESGQ;
 645                        sig_i.si_value = info->notify.sigev_value;
 646                        /* map current pid/uid into info->owner's namespaces */
 647                        rcu_read_lock();
 648                        sig_i.si_pid = task_tgid_nr_ns(current,
 649                                                ns_of_pid(info->notify_owner));
 650                        sig_i.si_uid = from_kuid_munged(info->notify_user_ns, current_uid());
 651                        rcu_read_unlock();
 652
 653                        kill_pid_info(info->notify.sigev_signo,
 654                                      &sig_i, info->notify_owner);
 655                        break;
 656                case SIGEV_THREAD:
 657                        set_cookie(info->notify_cookie, NOTIFY_WOKENUP);
 658                        netlink_sendskb(info->notify_sock, info->notify_cookie);
 659                        break;
 660                }
 661                /* after notification unregisters process */
 662                put_pid(info->notify_owner);
 663                put_user_ns(info->notify_user_ns);
 664                info->notify_owner = NULL;
 665                info->notify_user_ns = NULL;
 666        }
 667        wake_up(&info->wait_q);
 668}
 669
 670static int prepare_timeout(const struct timespec __user *u_abs_timeout,
 671                           struct timespec *ts)
 672{
 673        if (copy_from_user(ts, u_abs_timeout, sizeof(struct timespec)))
 674                return -EFAULT;
 675        if (!timespec_valid(ts))
 676                return -EINVAL;
 677        return 0;
 678}
 679
 680static void remove_notification(struct mqueue_inode_info *info)
 681{
 682        if (info->notify_owner != NULL &&
 683            info->notify.sigev_notify == SIGEV_THREAD) {
 684                set_cookie(info->notify_cookie, NOTIFY_REMOVED);
 685                netlink_sendskb(info->notify_sock, info->notify_cookie);
 686        }
 687        put_pid(info->notify_owner);
 688        put_user_ns(info->notify_user_ns);
 689        info->notify_owner = NULL;
 690        info->notify_user_ns = NULL;
 691}
 692
 693static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
 694{
 695        int mq_treesize;
 696        unsigned long total_size;
 697
 698        if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
 699                return -EINVAL;
 700        if (capable(CAP_SYS_RESOURCE)) {
 701                if (attr->mq_maxmsg > HARD_MSGMAX ||
 702                    attr->mq_msgsize > HARD_MSGSIZEMAX)
 703                        return -EINVAL;
 704        } else {
 705                if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
 706                                attr->mq_msgsize > ipc_ns->mq_msgsize_max)
 707                        return -EINVAL;
 708        }
 709        /* check for overflow */
 710        if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
 711                return -EOVERFLOW;
 712        mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
 713                min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
 714                sizeof(struct posix_msg_tree_node);
 715        total_size = attr->mq_maxmsg * attr->mq_msgsize;
 716        if (total_size + mq_treesize < total_size)
 717                return -EOVERFLOW;
 718        return 0;
 719}
 720
 721/*
 722 * Invoked when creating a new queue via sys_mq_open
 723 */
 724static struct file *do_create(struct ipc_namespace *ipc_ns, struct inode *dir,
 725                        struct path *path, int oflag, umode_t mode,
 726                        struct mq_attr *attr)
 727{
 728        const struct cred *cred = current_cred();
 729        int ret;
 730
 731        if (attr) {
 732                ret = mq_attr_ok(ipc_ns, attr);
 733                if (ret)
 734                        return ERR_PTR(ret);
 735                /* store for use during create */
 736                path->dentry->d_fsdata = attr;
 737        } else {
 738                struct mq_attr def_attr;
 739
 740                def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
 741                                         ipc_ns->mq_msg_default);
 742                def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
 743                                          ipc_ns->mq_msgsize_default);
 744                ret = mq_attr_ok(ipc_ns, &def_attr);
 745                if (ret)
 746                        return ERR_PTR(ret);
 747        }
 748
 749        mode &= ~current_umask();
 750        ret = vfs_create(dir, path->dentry, mode, true);
 751        path->dentry->d_fsdata = NULL;
 752        if (ret)
 753                return ERR_PTR(ret);
 754        return dentry_open(path, oflag, cred);
 755}
 756
 757/* Opens existing queue */
 758static struct file *do_open(struct path *path, int oflag)
 759{
 760        static const int oflag2acc[O_ACCMODE] = { MAY_READ, MAY_WRITE,
 761                                                  MAY_READ | MAY_WRITE };
 762        int acc;
 763        if ((oflag & O_ACCMODE) == (O_RDWR | O_WRONLY))
 764                return ERR_PTR(-EINVAL);
 765        acc = oflag2acc[oflag & O_ACCMODE];
 766        if (inode_permission(d_inode(path->dentry), acc))
 767                return ERR_PTR(-EACCES);
 768        return dentry_open(path, oflag, current_cred());
 769}
 770
 771static int do_mq_open(const char __user *u_name, int oflag, umode_t mode,
 772                      struct mq_attr *attr)
 773{
 774        struct path path;
 775        struct file *filp;
 776        struct filename *name;
 777        int fd, error;
 778        struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
 779        struct vfsmount *mnt = ipc_ns->mq_mnt;
 780        struct dentry *root = mnt->mnt_root;
 781        int ro;
 782
 783        audit_mq_open(oflag, mode, attr);
 784
 785        if (IS_ERR(name = getname(u_name)))
 786                return PTR_ERR(name);
 787
 788        fd = get_unused_fd_flags(O_CLOEXEC);
 789        if (fd < 0)
 790                goto out_putname;
 791
 792        ro = mnt_want_write(mnt);       /* we'll drop it in any case */
 793        error = 0;
 794        inode_lock(d_inode(root));
 795        path.dentry = lookup_one_len(name->name, root, strlen(name->name));
 796        if (IS_ERR(path.dentry)) {
 797                error = PTR_ERR(path.dentry);
 798                goto out_putfd;
 799        }
 800        path.mnt = mntget(mnt);
 801
 802        if (oflag & O_CREAT) {
 803                if (d_really_is_positive(path.dentry)) {        /* entry already exists */
 804                        audit_inode(name, path.dentry, 0);
 805                        if (oflag & O_EXCL) {
 806                                error = -EEXIST;
 807                                goto out;
 808                        }
 809                        filp = do_open(&path, oflag);
 810                } else {
 811                        if (ro) {
 812                                error = ro;
 813                                goto out;
 814                        }
 815                        audit_inode_parent_hidden(name, root);
 816                        filp = do_create(ipc_ns, d_inode(root), &path,
 817                                         oflag, mode, attr);
 818                }
 819        } else {
 820                if (d_really_is_negative(path.dentry)) {
 821                        error = -ENOENT;
 822                        goto out;
 823                }
 824                audit_inode(name, path.dentry, 0);
 825                filp = do_open(&path, oflag);
 826        }
 827
 828        if (!IS_ERR(filp))
 829                fd_install(fd, filp);
 830        else
 831                error = PTR_ERR(filp);
 832out:
 833        path_put(&path);
 834out_putfd:
 835        if (error) {
 836                put_unused_fd(fd);
 837                fd = error;
 838        }
 839        inode_unlock(d_inode(root));
 840        if (!ro)
 841                mnt_drop_write(mnt);
 842out_putname:
 843        putname(name);
 844        return fd;
 845}
 846
 847SYSCALL_DEFINE4(mq_open, const char __user *, u_name, int, oflag, umode_t, mode,
 848                struct mq_attr __user *, u_attr)
 849{
 850        struct mq_attr attr;
 851        if (u_attr && copy_from_user(&attr, u_attr, sizeof(struct mq_attr)))
 852                return -EFAULT;
 853
 854        return do_mq_open(u_name, oflag, mode, u_attr ? &attr : NULL);
 855}
 856
 857SYSCALL_DEFINE1(mq_unlink, const char __user *, u_name)
 858{
 859        int err;
 860        struct filename *name;
 861        struct dentry *dentry;
 862        struct inode *inode = NULL;
 863        struct ipc_namespace *ipc_ns = current->nsproxy->ipc_ns;
 864        struct vfsmount *mnt = ipc_ns->mq_mnt;
 865
 866        name = getname(u_name);
 867        if (IS_ERR(name))
 868                return PTR_ERR(name);
 869
 870        audit_inode_parent_hidden(name, mnt->mnt_root);
 871        err = mnt_want_write(mnt);
 872        if (err)
 873                goto out_name;
 874        inode_lock_nested(d_inode(mnt->mnt_root), I_MUTEX_PARENT);
 875        dentry = lookup_one_len(name->name, mnt->mnt_root,
 876                                strlen(name->name));
 877        if (IS_ERR(dentry)) {
 878                err = PTR_ERR(dentry);
 879                goto out_unlock;
 880        }
 881
 882        inode = d_inode(dentry);
 883        if (!inode) {
 884                err = -ENOENT;
 885        } else {
 886                ihold(inode);
 887                err = vfs_unlink(d_inode(dentry->d_parent), dentry, NULL);
 888        }
 889        dput(dentry);
 890
 891out_unlock:
 892        inode_unlock(d_inode(mnt->mnt_root));
 893        if (inode)
 894                iput(inode);
 895        mnt_drop_write(mnt);
 896out_name:
 897        putname(name);
 898
 899        return err;
 900}
 901
 902/* Pipelined send and receive functions.
 903 *
 904 * If a receiver finds no waiting message, then it registers itself in the
 905 * list of waiting receivers. A sender checks that list before adding the new
 906 * message into the message array. If there is a waiting receiver, then it
 907 * bypasses the message array and directly hands the message over to the
 908 * receiver. The receiver accepts the message and returns without grabbing the
 909 * queue spinlock:
 910 *
 911 * - Set pointer to message.
 912 * - Queue the receiver task for later wakeup (without the info->lock).
 913 * - Update its state to STATE_READY. Now the receiver can continue.
 914 * - Wake up the process after the lock is dropped. Should the process wake up
 915 *   before this wakeup (due to a timeout or a signal) it will either see
 916 *   STATE_READY and continue or acquire the lock to check the state again.
 917 *
 918 * The same algorithm is used for senders.
 919 */
 920
 921/* pipelined_send() - send a message directly to the task waiting in
 922 * sys_mq_timedreceive() (without inserting message into a queue).
 923 */
 924static inline void pipelined_send(struct wake_q_head *wake_q,
 925                                  struct mqueue_inode_info *info,
 926                                  struct msg_msg *message,
 927                                  struct ext_wait_queue *receiver)
 928{
 929        receiver->msg = message;
 930        list_del(&receiver->list);
 931        wake_q_add(wake_q, receiver->task);
 932        /*
 933         * Rely on the implicit cmpxchg barrier from wake_q_add such
 934         * that we can ensure that updating receiver->state is the last
 935         * write operation: As once set, the receiver can continue,
 936         * and if we don't have the reference count from the wake_q,
 937         * yet, at that point we can later have a use-after-free
 938         * condition and bogus wakeup.
 939         */
 940        receiver->state = STATE_READY;
 941}
 942
 943/* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
 944 * gets its message and put to the queue (we have one free place for sure). */
 945static inline void pipelined_receive(struct wake_q_head *wake_q,
 946                                     struct mqueue_inode_info *info)
 947{
 948        struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
 949
 950        if (!sender) {
 951                /* for poll */
 952                wake_up_interruptible(&info->wait_q);
 953                return;
 954        }
 955        if (msg_insert(sender->msg, info))
 956                return;
 957
 958        list_del(&sender->list);
 959        wake_q_add(wake_q, sender->task);
 960        sender->state = STATE_READY;
 961}
 962
 963static int do_mq_timedsend(mqd_t mqdes, const char __user *u_msg_ptr,
 964                size_t msg_len, unsigned int msg_prio,
 965                struct timespec *ts)
 966{
 967        struct fd f;
 968        struct inode *inode;
 969        struct ext_wait_queue wait;
 970        struct ext_wait_queue *receiver;
 971        struct msg_msg *msg_ptr;
 972        struct mqueue_inode_info *info;
 973        ktime_t expires, *timeout = NULL;
 974        struct posix_msg_tree_node *new_leaf = NULL;
 975        int ret = 0;
 976        DEFINE_WAKE_Q(wake_q);
 977
 978        if (unlikely(msg_prio >= (unsigned long) MQ_PRIO_MAX))
 979                return -EINVAL;
 980
 981        if (ts) {
 982                expires = timespec_to_ktime(*ts);
 983                timeout = &expires;
 984        }
 985
 986        audit_mq_sendrecv(mqdes, msg_len, msg_prio, ts);
 987
 988        f = fdget(mqdes);
 989        if (unlikely(!f.file)) {
 990                ret = -EBADF;
 991                goto out;
 992        }
 993
 994        inode = file_inode(f.file);
 995        if (unlikely(f.file->f_op != &mqueue_file_operations)) {
 996                ret = -EBADF;
 997                goto out_fput;
 998        }
 999        info = MQUEUE_I(inode);
1000        audit_file(f.file);
1001
1002        if (unlikely(!(f.file->f_mode & FMODE_WRITE))) {
1003                ret = -EBADF;
1004                goto out_fput;
1005        }
1006
1007        if (unlikely(msg_len > info->attr.mq_msgsize)) {
1008                ret = -EMSGSIZE;
1009                goto out_fput;
1010        }
1011
1012        /* First try to allocate memory, before doing anything with
1013         * existing queues. */
1014        msg_ptr = load_msg(u_msg_ptr, msg_len);
1015        if (IS_ERR(msg_ptr)) {
1016                ret = PTR_ERR(msg_ptr);
1017                goto out_fput;
1018        }
1019        msg_ptr->m_ts = msg_len;
1020        msg_ptr->m_type = msg_prio;
1021
1022        /*
1023         * msg_insert really wants us to have a valid, spare node struct so
1024         * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1025         * fall back to that if necessary.
1026         */
1027        if (!info->node_cache)
1028                new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1029
1030        spin_lock(&info->lock);
1031
1032        if (!info->node_cache && new_leaf) {
1033                /* Save our speculative allocation into the cache */
1034                INIT_LIST_HEAD(&new_leaf->msg_list);
1035                info->node_cache = new_leaf;
1036                new_leaf = NULL;
1037        } else {
1038                kfree(new_leaf);
1039        }
1040
1041        if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
1042                if (f.file->f_flags & O_NONBLOCK) {
1043                        ret = -EAGAIN;
1044                } else {
1045                        wait.task = current;
1046                        wait.msg = (void *) msg_ptr;
1047                        wait.state = STATE_NONE;
1048                        ret = wq_sleep(info, SEND, timeout, &wait);
1049                        /*
1050                         * wq_sleep must be called with info->lock held, and
1051                         * returns with the lock released
1052                         */
1053                        goto out_free;
1054                }
1055        } else {
1056                receiver = wq_get_first_waiter(info, RECV);
1057                if (receiver) {
1058                        pipelined_send(&wake_q, info, msg_ptr, receiver);
1059                } else {
1060                        /* adds message to the queue */
1061                        ret = msg_insert(msg_ptr, info);
1062                        if (ret)
1063                                goto out_unlock;
1064                        __do_notify(info);
1065                }
1066                inode->i_atime = inode->i_mtime = inode->i_ctime =
1067                                current_time(inode);
1068        }
1069out_unlock:
1070        spin_unlock(&info->lock);
1071        wake_up_q(&wake_q);
1072out_free:
1073        if (ret)
1074                free_msg(msg_ptr);
1075out_fput:
1076        fdput(f);
1077out:
1078        return ret;
1079}
1080
1081static int do_mq_timedreceive(mqd_t mqdes, char __user *u_msg_ptr,
1082                size_t msg_len, unsigned int __user *u_msg_prio,
1083                struct timespec *ts)
1084{
1085        ssize_t ret;
1086        struct msg_msg *msg_ptr;
1087        struct fd f;
1088        struct inode *inode;
1089        struct mqueue_inode_info *info;
1090        struct ext_wait_queue wait;
1091        ktime_t expires, *timeout = NULL;
1092        struct posix_msg_tree_node *new_leaf = NULL;
1093
1094        if (ts) {
1095                expires = timespec_to_ktime(*ts);
1096                timeout = &expires;
1097        }
1098
1099        audit_mq_sendrecv(mqdes, msg_len, 0, ts);
1100
1101        f = fdget(mqdes);
1102        if (unlikely(!f.file)) {
1103                ret = -EBADF;
1104                goto out;
1105        }
1106
1107        inode = file_inode(f.file);
1108        if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1109                ret = -EBADF;
1110                goto out_fput;
1111        }
1112        info = MQUEUE_I(inode);
1113        audit_file(f.file);
1114
1115        if (unlikely(!(f.file->f_mode & FMODE_READ))) {
1116                ret = -EBADF;
1117                goto out_fput;
1118        }
1119
1120        /* checks if buffer is big enough */
1121        if (unlikely(msg_len < info->attr.mq_msgsize)) {
1122                ret = -EMSGSIZE;
1123                goto out_fput;
1124        }
1125
1126        /*
1127         * msg_insert really wants us to have a valid, spare node struct so
1128         * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1129         * fall back to that if necessary.
1130         */
1131        if (!info->node_cache)
1132                new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1133
1134        spin_lock(&info->lock);
1135
1136        if (!info->node_cache && new_leaf) {
1137                /* Save our speculative allocation into the cache */
1138                INIT_LIST_HEAD(&new_leaf->msg_list);
1139                info->node_cache = new_leaf;
1140        } else {
1141                kfree(new_leaf);
1142        }
1143
1144        if (info->attr.mq_curmsgs == 0) {
1145                if (f.file->f_flags & O_NONBLOCK) {
1146                        spin_unlock(&info->lock);
1147                        ret = -EAGAIN;
1148                } else {
1149                        wait.task = current;
1150                        wait.state = STATE_NONE;
1151                        ret = wq_sleep(info, RECV, timeout, &wait);
1152                        msg_ptr = wait.msg;
1153                }
1154        } else {
1155                DEFINE_WAKE_Q(wake_q);
1156
1157                msg_ptr = msg_get(info);
1158
1159                inode->i_atime = inode->i_mtime = inode->i_ctime =
1160                                current_time(inode);
1161
1162                /* There is now free space in queue. */
1163                pipelined_receive(&wake_q, info);
1164                spin_unlock(&info->lock);
1165                wake_up_q(&wake_q);
1166                ret = 0;
1167        }
1168        if (ret == 0) {
1169                ret = msg_ptr->m_ts;
1170
1171                if ((u_msg_prio && put_user(msg_ptr->m_type, u_msg_prio)) ||
1172                        store_msg(u_msg_ptr, msg_ptr, msg_ptr->m_ts)) {
1173                        ret = -EFAULT;
1174                }
1175                free_msg(msg_ptr);
1176        }
1177out_fput:
1178        fdput(f);
1179out:
1180        return ret;
1181}
1182
1183SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
1184                size_t, msg_len, unsigned int, msg_prio,
1185                const struct timespec __user *, u_abs_timeout)
1186{
1187        struct timespec ts, *p = NULL;
1188        if (u_abs_timeout) {
1189                int res = prepare_timeout(u_abs_timeout, &ts);
1190                if (res)
1191                        return res;
1192                p = &ts;
1193        }
1194        return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1195}
1196
1197SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
1198                size_t, msg_len, unsigned int __user *, u_msg_prio,
1199                const struct timespec __user *, u_abs_timeout)
1200{
1201        struct timespec ts, *p = NULL;
1202        if (u_abs_timeout) {
1203                int res = prepare_timeout(u_abs_timeout, &ts);
1204                if (res)
1205                        return res;
1206                p = &ts;
1207        }
1208        return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1209}
1210
1211/*
1212 * Notes: the case when user wants us to deregister (with NULL as pointer)
1213 * and he isn't currently owner of notification, will be silently discarded.
1214 * It isn't explicitly defined in the POSIX.
1215 */
1216static int do_mq_notify(mqd_t mqdes, const struct sigevent *notification)
1217{
1218        int ret;
1219        struct fd f;
1220        struct sock *sock;
1221        struct inode *inode;
1222        struct mqueue_inode_info *info;
1223        struct sk_buff *nc;
1224
1225        audit_mq_notify(mqdes, notification);
1226
1227        nc = NULL;
1228        sock = NULL;
1229        if (notification != NULL) {
1230                if (unlikely(notification->sigev_notify != SIGEV_NONE &&
1231                             notification->sigev_notify != SIGEV_SIGNAL &&
1232                             notification->sigev_notify != SIGEV_THREAD))
1233                        return -EINVAL;
1234                if (notification->sigev_notify == SIGEV_SIGNAL &&
1235                        !valid_signal(notification->sigev_signo)) {
1236                        return -EINVAL;
1237                }
1238                if (notification->sigev_notify == SIGEV_THREAD) {
1239                        long timeo;
1240
1241                        /* create the notify skb */
1242                        nc = alloc_skb(NOTIFY_COOKIE_LEN, GFP_KERNEL);
1243                        if (!nc) {
1244                                ret = -ENOMEM;
1245                                goto out;
1246                        }
1247                        if (copy_from_user(nc->data,
1248                                        notification->sigev_value.sival_ptr,
1249                                        NOTIFY_COOKIE_LEN)) {
1250                                ret = -EFAULT;
1251                                goto out;
1252                        }
1253
1254                        /* TODO: add a header? */
1255                        skb_put(nc, NOTIFY_COOKIE_LEN);
1256                        /* and attach it to the socket */
1257retry:
1258                        f = fdget(notification->sigev_signo);
1259                        if (!f.file) {
1260                                ret = -EBADF;
1261                                goto out;
1262                        }
1263                        sock = netlink_getsockbyfilp(f.file);
1264                        fdput(f);
1265                        if (IS_ERR(sock)) {
1266                                ret = PTR_ERR(sock);
1267                                sock = NULL;
1268                                goto out;
1269                        }
1270
1271                        timeo = MAX_SCHEDULE_TIMEOUT;
1272                        ret = netlink_attachskb(sock, nc, &timeo, NULL);
1273                        if (ret == 1) {
1274                                sock = NULL;
1275                                goto retry;
1276                        }
1277                        if (ret) {
1278                                sock = NULL;
1279                                nc = NULL;
1280                                goto out;
1281                        }
1282                }
1283        }
1284
1285        f = fdget(mqdes);
1286        if (!f.file) {
1287                ret = -EBADF;
1288                goto out;
1289        }
1290
1291        inode = file_inode(f.file);
1292        if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1293                ret = -EBADF;
1294                goto out_fput;
1295        }
1296        info = MQUEUE_I(inode);
1297
1298        ret = 0;
1299        spin_lock(&info->lock);
1300        if (notification == NULL) {
1301                if (info->notify_owner == task_tgid(current)) {
1302                        remove_notification(info);
1303                        inode->i_atime = inode->i_ctime = current_time(inode);
1304                }
1305        } else if (info->notify_owner != NULL) {
1306                ret = -EBUSY;
1307        } else {
1308                switch (notification->sigev_notify) {
1309                case SIGEV_NONE:
1310                        info->notify.sigev_notify = SIGEV_NONE;
1311                        break;
1312                case SIGEV_THREAD:
1313                        info->notify_sock = sock;
1314                        info->notify_cookie = nc;
1315                        sock = NULL;
1316                        nc = NULL;
1317                        info->notify.sigev_notify = SIGEV_THREAD;
1318                        break;
1319                case SIGEV_SIGNAL:
1320                        info->notify.sigev_signo = notification->sigev_signo;
1321                        info->notify.sigev_value = notification->sigev_value;
1322                        info->notify.sigev_notify = SIGEV_SIGNAL;
1323                        break;
1324                }
1325
1326                info->notify_owner = get_pid(task_tgid(current));
1327                info->notify_user_ns = get_user_ns(current_user_ns());
1328                inode->i_atime = inode->i_ctime = current_time(inode);
1329        }
1330        spin_unlock(&info->lock);
1331out_fput:
1332        fdput(f);
1333out:
1334        if (sock)
1335                netlink_detachskb(sock, nc);
1336        else if (nc)
1337                dev_kfree_skb(nc);
1338
1339        return ret;
1340}
1341
1342SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1343                const struct sigevent __user *, u_notification)
1344{
1345        struct sigevent n, *p = NULL;
1346        if (u_notification) {
1347                if (copy_from_user(&n, u_notification, sizeof(struct sigevent)))
1348                        return -EFAULT;
1349                p = &n;
1350        }
1351        return do_mq_notify(mqdes, p);
1352}
1353
1354static int do_mq_getsetattr(int mqdes, struct mq_attr *new, struct mq_attr *old)
1355{
1356        struct fd f;
1357        struct inode *inode;
1358        struct mqueue_inode_info *info;
1359
1360        if (new && (new->mq_flags & (~O_NONBLOCK)))
1361                return -EINVAL;
1362
1363        f = fdget(mqdes);
1364        if (!f.file)
1365                return -EBADF;
1366
1367        if (unlikely(f.file->f_op != &mqueue_file_operations)) {
1368                fdput(f);
1369                return -EBADF;
1370        }
1371
1372        inode = file_inode(f.file);
1373        info = MQUEUE_I(inode);
1374
1375        spin_lock(&info->lock);
1376
1377        if (old) {
1378                *old = info->attr;
1379                old->mq_flags = f.file->f_flags & O_NONBLOCK;
1380        }
1381        if (new) {
1382                audit_mq_getsetattr(mqdes, new);
1383                spin_lock(&f.file->f_lock);
1384                if (new->mq_flags & O_NONBLOCK)
1385                        f.file->f_flags |= O_NONBLOCK;
1386                else
1387                        f.file->f_flags &= ~O_NONBLOCK;
1388                spin_unlock(&f.file->f_lock);
1389
1390                inode->i_atime = inode->i_ctime = current_time(inode);
1391        }
1392
1393        spin_unlock(&info->lock);
1394        fdput(f);
1395        return 0;
1396}
1397
1398SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1399                const struct mq_attr __user *, u_mqstat,
1400                struct mq_attr __user *, u_omqstat)
1401{
1402        int ret;
1403        struct mq_attr mqstat, omqstat;
1404        struct mq_attr *new = NULL, *old = NULL;
1405
1406        if (u_mqstat) {
1407                new = &mqstat;
1408                if (copy_from_user(new, u_mqstat, sizeof(struct mq_attr)))
1409                        return -EFAULT;
1410        }
1411        if (u_omqstat)
1412                old = &omqstat;
1413
1414        ret = do_mq_getsetattr(mqdes, new, old);
1415        if (ret || !old)
1416                return ret;
1417
1418        if (copy_to_user(u_omqstat, old, sizeof(struct mq_attr)))
1419                return -EFAULT;
1420        return 0;
1421}
1422
1423#ifdef CONFIG_COMPAT
1424
1425struct compat_mq_attr {
1426        compat_long_t mq_flags;      /* message queue flags                  */
1427        compat_long_t mq_maxmsg;     /* maximum number of messages           */
1428        compat_long_t mq_msgsize;    /* maximum message size                 */
1429        compat_long_t mq_curmsgs;    /* number of messages currently queued  */
1430        compat_long_t __reserved[4]; /* ignored for input, zeroed for output */
1431};
1432
1433static inline int get_compat_mq_attr(struct mq_attr *attr,
1434                        const struct compat_mq_attr __user *uattr)
1435{
1436        struct compat_mq_attr v;
1437
1438        if (copy_from_user(&v, uattr, sizeof(*uattr)))
1439                return -EFAULT;
1440
1441        memset(attr, 0, sizeof(*attr));
1442        attr->mq_flags = v.mq_flags;
1443        attr->mq_maxmsg = v.mq_maxmsg;
1444        attr->mq_msgsize = v.mq_msgsize;
1445        attr->mq_curmsgs = v.mq_curmsgs;
1446        return 0;
1447}
1448
1449static inline int put_compat_mq_attr(const struct mq_attr *attr,
1450                        struct compat_mq_attr __user *uattr)
1451{
1452        struct compat_mq_attr v;
1453
1454        memset(&v, 0, sizeof(v));
1455        v.mq_flags = attr->mq_flags;
1456        v.mq_maxmsg = attr->mq_maxmsg;
1457        v.mq_msgsize = attr->mq_msgsize;
1458        v.mq_curmsgs = attr->mq_curmsgs;
1459        if (copy_to_user(uattr, &v, sizeof(*uattr)))
1460                return -EFAULT;
1461        return 0;
1462}
1463
1464COMPAT_SYSCALL_DEFINE4(mq_open, const char __user *, u_name,
1465                       int, oflag, compat_mode_t, mode,
1466                       struct compat_mq_attr __user *, u_attr)
1467{
1468        struct mq_attr attr, *p = NULL;
1469        if (u_attr && oflag & O_CREAT) {
1470                p = &attr;
1471                if (get_compat_mq_attr(&attr, u_attr))
1472                        return -EFAULT;
1473        }
1474        return do_mq_open(u_name, oflag, mode, p);
1475}
1476
1477static int compat_prepare_timeout(const struct compat_timespec __user *p,
1478                                   struct timespec *ts)
1479{
1480        if (compat_get_timespec(ts, p))
1481                return -EFAULT;
1482        if (!timespec_valid(ts))
1483                return -EINVAL;
1484        return 0;
1485}
1486
1487COMPAT_SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes,
1488                       const char __user *, u_msg_ptr,
1489                       compat_size_t, msg_len, unsigned int, msg_prio,
1490                       const struct compat_timespec __user *, u_abs_timeout)
1491{
1492        struct timespec ts, *p = NULL;
1493        if (u_abs_timeout) {
1494                int res = compat_prepare_timeout(u_abs_timeout, &ts);
1495                if (res)
1496                        return res;
1497                p = &ts;
1498        }
1499        return do_mq_timedsend(mqdes, u_msg_ptr, msg_len, msg_prio, p);
1500}
1501
1502COMPAT_SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes,
1503                       char __user *, u_msg_ptr,
1504                       compat_size_t, msg_len, unsigned int __user *, u_msg_prio,
1505                       const struct compat_timespec __user *, u_abs_timeout)
1506{
1507        struct timespec ts, *p = NULL;
1508        if (u_abs_timeout) {
1509                int res = compat_prepare_timeout(u_abs_timeout, &ts);
1510                if (res)
1511                        return res;
1512                p = &ts;
1513        }
1514        return do_mq_timedreceive(mqdes, u_msg_ptr, msg_len, u_msg_prio, p);
1515}
1516
1517COMPAT_SYSCALL_DEFINE2(mq_notify, mqd_t, mqdes,
1518                       const struct compat_sigevent __user *, u_notification)
1519{
1520        struct sigevent n, *p = NULL;
1521        if (u_notification) {
1522                if (get_compat_sigevent(&n, u_notification))
1523                        return -EFAULT;
1524                if (n.sigev_notify == SIGEV_THREAD)
1525                        n.sigev_value.sival_ptr = compat_ptr(n.sigev_value.sival_int);
1526                p = &n;
1527        }
1528        return do_mq_notify(mqdes, p);
1529}
1530
1531COMPAT_SYSCALL_DEFINE3(mq_getsetattr, mqd_t, mqdes,
1532                       const struct compat_mq_attr __user *, u_mqstat,
1533                       struct compat_mq_attr __user *, u_omqstat)
1534{
1535        int ret;
1536        struct mq_attr mqstat, omqstat;
1537        struct mq_attr *new = NULL, *old = NULL;
1538
1539        if (u_mqstat) {
1540                new = &mqstat;
1541                if (get_compat_mq_attr(new, u_mqstat))
1542                        return -EFAULT;
1543        }
1544        if (u_omqstat)
1545                old = &omqstat;
1546
1547        ret = do_mq_getsetattr(mqdes, new, old);
1548        if (ret || !old)
1549                return ret;
1550
1551        if (put_compat_mq_attr(old, u_omqstat))
1552                return -EFAULT;
1553        return 0;
1554}
1555#endif
1556
1557static const struct inode_operations mqueue_dir_inode_operations = {
1558        .lookup = simple_lookup,
1559        .create = mqueue_create,
1560        .unlink = mqueue_unlink,
1561};
1562
1563static const struct file_operations mqueue_file_operations = {
1564        .flush = mqueue_flush_file,
1565        .poll = mqueue_poll_file,
1566        .read = mqueue_read_file,
1567        .llseek = default_llseek,
1568};
1569
1570static const struct super_operations mqueue_super_ops = {
1571        .alloc_inode = mqueue_alloc_inode,
1572        .destroy_inode = mqueue_destroy_inode,
1573        .evict_inode = mqueue_evict_inode,
1574        .statfs = simple_statfs,
1575};
1576
1577static struct file_system_type mqueue_fs_type = {
1578        .name = "mqueue",
1579        .mount = mqueue_mount,
1580        .kill_sb = kill_litter_super,
1581        .fs_flags = FS_USERNS_MOUNT,
1582};
1583
1584int mq_init_ns(struct ipc_namespace *ns)
1585{
1586        ns->mq_queues_count  = 0;
1587        ns->mq_queues_max    = DFLT_QUEUESMAX;
1588        ns->mq_msg_max       = DFLT_MSGMAX;
1589        ns->mq_msgsize_max   = DFLT_MSGSIZEMAX;
1590        ns->mq_msg_default   = DFLT_MSG;
1591        ns->mq_msgsize_default  = DFLT_MSGSIZE;
1592
1593        ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
1594        if (IS_ERR(ns->mq_mnt)) {
1595                int err = PTR_ERR(ns->mq_mnt);
1596                ns->mq_mnt = NULL;
1597                return err;
1598        }
1599        return 0;
1600}
1601
1602void mq_clear_sbinfo(struct ipc_namespace *ns)
1603{
1604        ns->mq_mnt->mnt_sb->s_fs_info = NULL;
1605}
1606
1607void mq_put_mnt(struct ipc_namespace *ns)
1608{
1609        kern_unmount(ns->mq_mnt);
1610}
1611
1612static int __init init_mqueue_fs(void)
1613{
1614        int error;
1615
1616        mqueue_inode_cachep = kmem_cache_create("mqueue_inode_cache",
1617                                sizeof(struct mqueue_inode_info), 0,
1618                                SLAB_HWCACHE_ALIGN|SLAB_ACCOUNT, init_once);
1619        if (mqueue_inode_cachep == NULL)
1620                return -ENOMEM;
1621
1622        /* ignore failures - they are not fatal */
1623        mq_sysctl_table = mq_register_sysctl_table();
1624
1625        error = register_filesystem(&mqueue_fs_type);
1626        if (error)
1627                goto out_sysctl;
1628
1629        spin_lock_init(&mq_lock);
1630
1631        error = mq_init_ns(&init_ipc_ns);
1632        if (error)
1633                goto out_filesystem;
1634
1635        return 0;
1636
1637out_filesystem:
1638        unregister_filesystem(&mqueue_fs_type);
1639out_sysctl:
1640        if (mq_sysctl_table)
1641                unregister_sysctl_table(mq_sysctl_table);
1642        kmem_cache_destroy(mqueue_inode_cachep);
1643        return error;
1644}
1645
1646device_initcall(init_mqueue_fs);
1647