linux/net/unix/garbage.c
<<
>>
Prefs
   1/*
   2 * NET3:        Garbage Collector For AF_UNIX sockets
   3 *
   4 * Garbage Collector:
   5 *      Copyright (C) Barak A. Pearlmutter.
   6 *      Released under the GPL version 2 or later.
   7 *
   8 * Chopped about by Alan Cox 22/3/96 to make it fit the AF_UNIX socket problem.
   9 * If it doesn't work blame me, it worked when Barak sent it.
  10 *
  11 * Assumptions:
  12 *
  13 *  - object w/ a bit
  14 *  - free list
  15 *
  16 * Current optimizations:
  17 *
  18 *  - explicit stack instead of recursion
  19 *  - tail recurse on first born instead of immediate push/pop
  20 *  - we gather the stuff that should not be killed into tree
  21 *    and stack is just a path from root to the current pointer.
  22 *
  23 *  Future optimizations:
  24 *
  25 *  - don't just push entire root set; process in place
  26 *
  27 *      This program is free software; you can redistribute it and/or
  28 *      modify it under the terms of the GNU General Public License
  29 *      as published by the Free Software Foundation; either version
  30 *      2 of the License, or (at your option) any later version.
  31 *
  32 *  Fixes:
  33 *      Alan Cox        07 Sept 1997    Vmalloc internal stack as needed.
  34 *                                      Cope with changing max_files.
  35 *      Al Viro         11 Oct 1998
  36 *              Graph may have cycles. That is, we can send the descriptor
  37 *              of foo to bar and vice versa. Current code chokes on that.
  38 *              Fix: move SCM_RIGHTS ones into the separate list and then
  39 *              skb_free() them all instead of doing explicit fput's.
  40 *              Another problem: since fput() may block somebody may
  41 *              create a new unix_socket when we are in the middle of sweep
  42 *              phase. Fix: revert the logic wrt MARKED. Mark everything
  43 *              upon the beginning and unmark non-junk ones.
  44 *
  45 *              [12 Oct 1998] AAARGH! New code purges all SCM_RIGHTS
  46 *              sent to connect()'ed but still not accept()'ed sockets.
  47 *              Fixed. Old code had slightly different problem here:
  48 *              extra fput() in situation when we passed the descriptor via
  49 *              such socket and closed it (descriptor). That would happen on
  50 *              each unix_gc() until the accept(). Since the struct file in
  51 *              question would go to the free list and might be reused...
  52 *              That might be the reason of random oopses on filp_close()
  53 *              in unrelated processes.
  54 *
  55 *      AV              28 Feb 1999
  56 *              Kill the explicit allocation of stack. Now we keep the tree
  57 *              with root in dummy + pointer (gc_current) to one of the nodes.
  58 *              Stack is represented as path from gc_current to dummy. Unmark
  59 *              now means "add to tree". Push == "make it a son of gc_current".
  60 *              Pop == "move gc_current to parent". We keep only pointers to
  61 *              parents (->gc_tree).
  62 *      AV              1 Mar 1999
  63 *              Damn. Added missing check for ->dead in listen queues scanning.
  64 *
  65 *      Miklos Szeredi 25 Jun 2007
  66 *              Reimplement with a cycle collecting algorithm. This should
  67 *              solve several problems with the previous code, like being racy
  68 *              wrt receive and holding up unrelated socket operations.
  69 */
  70
  71#include <linux/kernel.h>
  72#include <linux/string.h>
  73#include <linux/socket.h>
  74#include <linux/un.h>
  75#include <linux/net.h>
  76#include <linux/fs.h>
  77#include <linux/skbuff.h>
  78#include <linux/netdevice.h>
  79#include <linux/file.h>
  80#include <linux/proc_fs.h>
  81#include <linux/mutex.h>
  82#include <linux/wait.h>
  83
  84#include <net/sock.h>
  85#include <net/af_unix.h>
  86#include <net/scm.h>
  87#include <net/tcp_states.h>
  88
  89/* Internal data structures and random procedures: */
  90
  91static LIST_HEAD(gc_inflight_list);
  92static LIST_HEAD(gc_candidates);
  93static DEFINE_SPINLOCK(unix_gc_lock);
  94static DECLARE_WAIT_QUEUE_HEAD(unix_gc_wait);
  95
  96unsigned int unix_tot_inflight;
  97
  98struct sock *unix_get_socket(struct file *filp)
  99{
 100        struct sock *u_sock = NULL;
 101        struct inode *inode = file_inode(filp);
 102
 103        /* Socket ? */
 104        if (S_ISSOCK(inode->i_mode) && !(filp->f_mode & FMODE_PATH)) {
 105                struct socket *sock = SOCKET_I(inode);
 106                struct sock *s = sock->sk;
 107
 108                /* PF_UNIX ? */
 109                if (s && sock->ops && sock->ops->family == PF_UNIX)
 110                        u_sock = s;
 111        }
 112        return u_sock;
 113}
 114
 115/* Keep the number of times in flight count for the file
 116 * descriptor if it is for an AF_UNIX socket.
 117 */
 118
 119void unix_inflight(struct user_struct *user, struct file *fp)
 120{
 121        struct sock *s = unix_get_socket(fp);
 122
 123        spin_lock(&unix_gc_lock);
 124
 125        if (s) {
 126                struct unix_sock *u = unix_sk(s);
 127
 128                if (atomic_long_inc_return(&u->inflight) == 1) {
 129                        BUG_ON(!list_empty(&u->link));
 130                        list_add_tail(&u->link, &gc_inflight_list);
 131                } else {
 132                        BUG_ON(list_empty(&u->link));
 133                }
 134                unix_tot_inflight++;
 135        }
 136        user->unix_inflight++;
 137        spin_unlock(&unix_gc_lock);
 138}
 139
 140void unix_notinflight(struct user_struct *user, struct file *fp)
 141{
 142        struct sock *s = unix_get_socket(fp);
 143
 144        spin_lock(&unix_gc_lock);
 145
 146        if (s) {
 147                struct unix_sock *u = unix_sk(s);
 148
 149                BUG_ON(list_empty(&u->link));
 150
 151                if (atomic_long_dec_and_test(&u->inflight))
 152                        list_del_init(&u->link);
 153                unix_tot_inflight--;
 154        }
 155        user->unix_inflight--;
 156        spin_unlock(&unix_gc_lock);
 157}
 158
 159static void scan_inflight(struct sock *x, void (*func)(struct unix_sock *),
 160                          struct sk_buff_head *hitlist)
 161{
 162        struct sk_buff *skb;
 163        struct sk_buff *next;
 164
 165        spin_lock(&x->sk_receive_queue.lock);
 166        skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
 167                /* Do we have file descriptors ? */
 168                if (UNIXCB(skb).fp) {
 169                        bool hit = false;
 170                        /* Process the descriptors of this socket */
 171                        int nfd = UNIXCB(skb).fp->count;
 172                        struct file **fp = UNIXCB(skb).fp->fp;
 173
 174                        while (nfd--) {
 175                                /* Get the socket the fd matches if it indeed does so */
 176                                struct sock *sk = unix_get_socket(*fp++);
 177
 178                                if (sk) {
 179                                        struct unix_sock *u = unix_sk(sk);
 180
 181                                        /* Ignore non-candidates, they could
 182                                         * have been added to the queues after
 183                                         * starting the garbage collection
 184                                         */
 185                                        if (test_bit(UNIX_GC_CANDIDATE, &u->gc_flags)) {
 186                                                hit = true;
 187
 188                                                func(u);
 189                                        }
 190                                }
 191                        }
 192                        if (hit && hitlist != NULL) {
 193                                __skb_unlink(skb, &x->sk_receive_queue);
 194                                __skb_queue_tail(hitlist, skb);
 195                        }
 196                }
 197        }
 198        spin_unlock(&x->sk_receive_queue.lock);
 199}
 200
 201static void scan_children(struct sock *x, void (*func)(struct unix_sock *),
 202                          struct sk_buff_head *hitlist)
 203{
 204        if (x->sk_state != TCP_LISTEN) {
 205                scan_inflight(x, func, hitlist);
 206        } else {
 207                struct sk_buff *skb;
 208                struct sk_buff *next;
 209                struct unix_sock *u;
 210                LIST_HEAD(embryos);
 211
 212                /* For a listening socket collect the queued embryos
 213                 * and perform a scan on them as well.
 214                 */
 215                spin_lock(&x->sk_receive_queue.lock);
 216                skb_queue_walk_safe(&x->sk_receive_queue, skb, next) {
 217                        u = unix_sk(skb->sk);
 218
 219                        /* An embryo cannot be in-flight, so it's safe
 220                         * to use the list link.
 221                         */
 222                        BUG_ON(!list_empty(&u->link));
 223                        list_add_tail(&u->link, &embryos);
 224                }
 225                spin_unlock(&x->sk_receive_queue.lock);
 226
 227                while (!list_empty(&embryos)) {
 228                        u = list_entry(embryos.next, struct unix_sock, link);
 229                        scan_inflight(&u->sk, func, hitlist);
 230                        list_del_init(&u->link);
 231                }
 232        }
 233}
 234
 235static void dec_inflight(struct unix_sock *usk)
 236{
 237        atomic_long_dec(&usk->inflight);
 238}
 239
 240static void inc_inflight(struct unix_sock *usk)
 241{
 242        atomic_long_inc(&usk->inflight);
 243}
 244
 245static void inc_inflight_move_tail(struct unix_sock *u)
 246{
 247        atomic_long_inc(&u->inflight);
 248        /* If this still might be part of a cycle, move it to the end
 249         * of the list, so that it's checked even if it was already
 250         * passed over
 251         */
 252        if (test_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags))
 253                list_move_tail(&u->link, &gc_candidates);
 254}
 255
 256static bool gc_in_progress;
 257#define UNIX_INFLIGHT_TRIGGER_GC 16000
 258
 259void wait_for_unix_gc(void)
 260{
 261        /* If number of inflight sockets is insane,
 262         * force a garbage collect right now.
 263         */
 264        if (unix_tot_inflight > UNIX_INFLIGHT_TRIGGER_GC && !gc_in_progress)
 265                unix_gc();
 266        wait_event(unix_gc_wait, gc_in_progress == false);
 267}
 268
 269/* The external entry point: unix_gc() */
 270void unix_gc(void)
 271{
 272        struct unix_sock *u;
 273        struct unix_sock *next;
 274        struct sk_buff_head hitlist;
 275        struct list_head cursor;
 276        LIST_HEAD(not_cycle_list);
 277
 278        spin_lock(&unix_gc_lock);
 279
 280        /* Avoid a recursive GC. */
 281        if (gc_in_progress)
 282                goto out;
 283
 284        gc_in_progress = true;
 285        /* First, select candidates for garbage collection.  Only
 286         * in-flight sockets are considered, and from those only ones
 287         * which don't have any external reference.
 288         *
 289         * Holding unix_gc_lock will protect these candidates from
 290         * being detached, and hence from gaining an external
 291         * reference.  Since there are no possible receivers, all
 292         * buffers currently on the candidates' queues stay there
 293         * during the garbage collection.
 294         *
 295         * We also know that no new candidate can be added onto the
 296         * receive queues.  Other, non candidate sockets _can_ be
 297         * added to queue, so we must make sure only to touch
 298         * candidates.
 299         */
 300        list_for_each_entry_safe(u, next, &gc_inflight_list, link) {
 301                long total_refs;
 302                long inflight_refs;
 303
 304                total_refs = file_count(u->sk.sk_socket->file);
 305                inflight_refs = atomic_long_read(&u->inflight);
 306
 307                BUG_ON(inflight_refs < 1);
 308                BUG_ON(total_refs < inflight_refs);
 309                if (total_refs == inflight_refs) {
 310                        list_move_tail(&u->link, &gc_candidates);
 311                        __set_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
 312                        __set_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
 313                }
 314        }
 315
 316        /* Now remove all internal in-flight reference to children of
 317         * the candidates.
 318         */
 319        list_for_each_entry(u, &gc_candidates, link)
 320                scan_children(&u->sk, dec_inflight, NULL);
 321
 322        /* Restore the references for children of all candidates,
 323         * which have remaining references.  Do this recursively, so
 324         * only those remain, which form cyclic references.
 325         *
 326         * Use a "cursor" link, to make the list traversal safe, even
 327         * though elements might be moved about.
 328         */
 329        list_add(&cursor, &gc_candidates);
 330        while (cursor.next != &gc_candidates) {
 331                u = list_entry(cursor.next, struct unix_sock, link);
 332
 333                /* Move cursor to after the current position. */
 334                list_move(&cursor, &u->link);
 335
 336                if (atomic_long_read(&u->inflight) > 0) {
 337                        list_move_tail(&u->link, &not_cycle_list);
 338                        __clear_bit(UNIX_GC_MAYBE_CYCLE, &u->gc_flags);
 339                        scan_children(&u->sk, inc_inflight_move_tail, NULL);
 340                }
 341        }
 342        list_del(&cursor);
 343
 344        /* not_cycle_list contains those sockets which do not make up a
 345         * cycle.  Restore these to the inflight list.
 346         */
 347        while (!list_empty(&not_cycle_list)) {
 348                u = list_entry(not_cycle_list.next, struct unix_sock, link);
 349                __clear_bit(UNIX_GC_CANDIDATE, &u->gc_flags);
 350                list_move_tail(&u->link, &gc_inflight_list);
 351        }
 352
 353        /* Now gc_candidates contains only garbage.  Restore original
 354         * inflight counters for these as well, and remove the skbuffs
 355         * which are creating the cycle(s).
 356         */
 357        skb_queue_head_init(&hitlist);
 358        list_for_each_entry(u, &gc_candidates, link)
 359        scan_children(&u->sk, inc_inflight, &hitlist);
 360
 361        spin_unlock(&unix_gc_lock);
 362
 363        /* Here we are. Hitlist is filled. Die. */
 364        __skb_queue_purge(&hitlist);
 365
 366        spin_lock(&unix_gc_lock);
 367
 368        /* All candidates should have been detached by now. */
 369        BUG_ON(!list_empty(&gc_candidates));
 370        gc_in_progress = false;
 371        wake_up(&unix_gc_wait);
 372
 373 out:
 374        spin_unlock(&unix_gc_lock);
 375}
 376