linux/fs/kernfs/file.c
<<
>>
Prefs
   1/*
   2 * fs/kernfs/file.c - kernfs file implementation
   3 *
   4 * Copyright (c) 2001-3 Patrick Mochel
   5 * Copyright (c) 2007 SUSE Linux Products GmbH
   6 * Copyright (c) 2007, 2013 Tejun Heo <tj@kernel.org>
   7 *
   8 * This file is released under the GPLv2.
   9 */
  10
  11#include <linux/fs.h>
  12#include <linux/seq_file.h>
  13#include <linux/slab.h>
  14#include <linux/poll.h>
  15#include <linux/pagemap.h>
  16#include <linux/sched.h>
  17#include <linux/fsnotify.h>
  18
  19#include "kernfs-internal.h"
  20
  21/*
  22 * There's one kernfs_open_file for each open file and one kernfs_open_node
  23 * for each kernfs_node with one or more open files.
  24 *
  25 * kernfs_node->attr.open points to kernfs_open_node.  attr.open is
  26 * protected by kernfs_open_node_lock.
  27 *
  28 * filp->private_data points to seq_file whose ->private points to
  29 * kernfs_open_file.  kernfs_open_files are chained at
  30 * kernfs_open_node->files, which is protected by kernfs_open_file_mutex.
  31 */
  32static DEFINE_SPINLOCK(kernfs_open_node_lock);
  33static DEFINE_MUTEX(kernfs_open_file_mutex);
  34
  35struct kernfs_open_node {
  36        atomic_t                refcnt;
  37        atomic_t                event;
  38        wait_queue_head_t       poll;
  39        struct list_head        files; /* goes through kernfs_open_file.list */
  40};
  41
  42/*
  43 * kernfs_notify() may be called from any context and bounces notifications
  44 * through a work item.  To minimize space overhead in kernfs_node, the
  45 * pending queue is implemented as a singly linked list of kernfs_nodes.
  46 * The list is terminated with the self pointer so that whether a
  47 * kernfs_node is on the list or not can be determined by testing the next
  48 * pointer for NULL.
  49 */
  50#define KERNFS_NOTIFY_EOL                       ((void *)&kernfs_notify_list)
  51
  52static DEFINE_SPINLOCK(kernfs_notify_lock);
  53static struct kernfs_node *kernfs_notify_list = KERNFS_NOTIFY_EOL;
  54
  55static struct kernfs_open_file *kernfs_of(struct file *file)
  56{
  57        return ((struct seq_file *)file->private_data)->private;
  58}
  59
  60/*
  61 * Determine the kernfs_ops for the given kernfs_node.  This function must
  62 * be called while holding an active reference.
  63 */
  64static const struct kernfs_ops *kernfs_ops(struct kernfs_node *kn)
  65{
  66        if (kn->flags & KERNFS_LOCKDEP)
  67                lockdep_assert_held(kn);
  68        return kn->attr.ops;
  69}
  70
  71/*
  72 * As kernfs_seq_stop() is also called after kernfs_seq_start() or
  73 * kernfs_seq_next() failure, it needs to distinguish whether it's stopping
  74 * a seq_file iteration which is fully initialized with an active reference
  75 * or an aborted kernfs_seq_start() due to get_active failure.  The
  76 * position pointer is the only context for each seq_file iteration and
  77 * thus the stop condition should be encoded in it.  As the return value is
  78 * directly visible to userland, ERR_PTR(-ENODEV) is the only acceptable
  79 * choice to indicate get_active failure.
  80 *
  81 * Unfortunately, this is complicated due to the optional custom seq_file
  82 * operations which may return ERR_PTR(-ENODEV) too.  kernfs_seq_stop()
  83 * can't distinguish whether ERR_PTR(-ENODEV) is from get_active failure or
  84 * custom seq_file operations and thus can't decide whether put_active
  85 * should be performed or not only on ERR_PTR(-ENODEV).
  86 *
  87 * This is worked around by factoring out the custom seq_stop() and
  88 * put_active part into kernfs_seq_stop_active(), skipping it from
  89 * kernfs_seq_stop() if ERR_PTR(-ENODEV) while invoking it directly after
  90 * custom seq_file operations fail with ERR_PTR(-ENODEV) - this ensures
  91 * that kernfs_seq_stop_active() is skipped only after get_active failure.
  92 */
  93static void kernfs_seq_stop_active(struct seq_file *sf, void *v)
  94{
  95        struct kernfs_open_file *of = sf->private;
  96        const struct kernfs_ops *ops = kernfs_ops(of->kn);
  97
  98        if (ops->seq_stop)
  99                ops->seq_stop(sf, v);
 100        kernfs_put_active(of->kn);
 101}
 102
 103static void *kernfs_seq_start(struct seq_file *sf, loff_t *ppos)
 104{
 105        struct kernfs_open_file *of = sf->private;
 106        const struct kernfs_ops *ops;
 107
 108        /*
 109         * @of->mutex nests outside active ref and is primarily to ensure that
 110         * the ops aren't called concurrently for the same open file.
 111         */
 112        mutex_lock(&of->mutex);
 113        if (!kernfs_get_active(of->kn))
 114                return ERR_PTR(-ENODEV);
 115
 116        ops = kernfs_ops(of->kn);
 117        if (ops->seq_start) {
 118                void *next = ops->seq_start(sf, ppos);
 119                /* see the comment above kernfs_seq_stop_active() */
 120                if (next == ERR_PTR(-ENODEV))
 121                        kernfs_seq_stop_active(sf, next);
 122                return next;
 123        } else {
 124                /*
 125                 * The same behavior and code as single_open().  Returns
 126                 * !NULL if pos is at the beginning; otherwise, NULL.
 127                 */
 128                return NULL + !*ppos;
 129        }
 130}
 131
 132static void *kernfs_seq_next(struct seq_file *sf, void *v, loff_t *ppos)
 133{
 134        struct kernfs_open_file *of = sf->private;
 135        const struct kernfs_ops *ops = kernfs_ops(of->kn);
 136
 137        if (ops->seq_next) {
 138                void *next = ops->seq_next(sf, v, ppos);
 139                /* see the comment above kernfs_seq_stop_active() */
 140                if (next == ERR_PTR(-ENODEV))
 141                        kernfs_seq_stop_active(sf, next);
 142                return next;
 143        } else {
 144                /*
 145                 * The same behavior and code as single_open(), always
 146                 * terminate after the initial read.
 147                 */
 148                ++*ppos;
 149                return NULL;
 150        }
 151}
 152
 153static void kernfs_seq_stop(struct seq_file *sf, void *v)
 154{
 155        struct kernfs_open_file *of = sf->private;
 156
 157        if (v != ERR_PTR(-ENODEV))
 158                kernfs_seq_stop_active(sf, v);
 159        mutex_unlock(&of->mutex);
 160}
 161
 162static int kernfs_seq_show(struct seq_file *sf, void *v)
 163{
 164        struct kernfs_open_file *of = sf->private;
 165
 166        of->event = atomic_read(&of->kn->attr.open->event);
 167
 168        return of->kn->attr.ops->seq_show(sf, v);
 169}
 170
 171static const struct seq_operations kernfs_seq_ops = {
 172        .start = kernfs_seq_start,
 173        .next = kernfs_seq_next,
 174        .stop = kernfs_seq_stop,
 175        .show = kernfs_seq_show,
 176};
 177
 178/*
 179 * As reading a bin file can have side-effects, the exact offset and bytes
 180 * specified in read(2) call should be passed to the read callback making
 181 * it difficult to use seq_file.  Implement simplistic custom buffering for
 182 * bin files.
 183 */
 184static ssize_t kernfs_file_direct_read(struct kernfs_open_file *of,
 185                                       char __user *user_buf, size_t count,
 186                                       loff_t *ppos)
 187{
 188        ssize_t len = min_t(size_t, count, PAGE_SIZE);
 189        const struct kernfs_ops *ops;
 190        char *buf;
 191
 192        buf = of->prealloc_buf;
 193        if (!buf)
 194                buf = kmalloc(len, GFP_KERNEL);
 195        if (!buf)
 196                return -ENOMEM;
 197
 198        /*
 199         * @of->mutex nests outside active ref and is used both to ensure that
 200         * the ops aren't called concurrently for the same open file, and
 201         * to provide exclusive access to ->prealloc_buf (when that exists).
 202         */
 203        mutex_lock(&of->mutex);
 204        if (!kernfs_get_active(of->kn)) {
 205                len = -ENODEV;
 206                mutex_unlock(&of->mutex);
 207                goto out_free;
 208        }
 209
 210        of->event = atomic_read(&of->kn->attr.open->event);
 211        ops = kernfs_ops(of->kn);
 212        if (ops->read)
 213                len = ops->read(of, buf, len, *ppos);
 214        else
 215                len = -EINVAL;
 216
 217        if (len < 0)
 218                goto out_unlock;
 219
 220        if (copy_to_user(user_buf, buf, len)) {
 221                len = -EFAULT;
 222                goto out_unlock;
 223        }
 224
 225        *ppos += len;
 226
 227 out_unlock:
 228        kernfs_put_active(of->kn);
 229        mutex_unlock(&of->mutex);
 230 out_free:
 231        if (buf != of->prealloc_buf)
 232                kfree(buf);
 233        return len;
 234}
 235
 236/**
 237 * kernfs_fop_read - kernfs vfs read callback
 238 * @file: file pointer
 239 * @user_buf: data to write
 240 * @count: number of bytes
 241 * @ppos: starting offset
 242 */
 243static ssize_t kernfs_fop_read(struct file *file, char __user *user_buf,
 244                               size_t count, loff_t *ppos)
 245{
 246        struct kernfs_open_file *of = kernfs_of(file);
 247
 248        if (of->kn->flags & KERNFS_HAS_SEQ_SHOW)
 249                return seq_read(file, user_buf, count, ppos);
 250        else
 251                return kernfs_file_direct_read(of, user_buf, count, ppos);
 252}
 253
 254/**
 255 * kernfs_fop_write - kernfs vfs write callback
 256 * @file: file pointer
 257 * @user_buf: data to write
 258 * @count: number of bytes
 259 * @ppos: starting offset
 260 *
 261 * Copy data in from userland and pass it to the matching kernfs write
 262 * operation.
 263 *
 264 * There is no easy way for us to know if userspace is only doing a partial
 265 * write, so we don't support them. We expect the entire buffer to come on
 266 * the first write.  Hint: if you're writing a value, first read the file,
 267 * modify only the the value you're changing, then write entire buffer
 268 * back.
 269 */
 270static ssize_t kernfs_fop_write(struct file *file, const char __user *user_buf,
 271                                size_t count, loff_t *ppos)
 272{
 273        struct kernfs_open_file *of = kernfs_of(file);
 274        const struct kernfs_ops *ops;
 275        size_t len;
 276        char *buf;
 277
 278        if (of->atomic_write_len) {
 279                len = count;
 280                if (len > of->atomic_write_len)
 281                        return -E2BIG;
 282        } else {
 283                len = min_t(size_t, count, PAGE_SIZE);
 284        }
 285
 286        buf = of->prealloc_buf;
 287        if (!buf)
 288                buf = kmalloc(len + 1, GFP_KERNEL);
 289        if (!buf)
 290                return -ENOMEM;
 291
 292        /*
 293         * @of->mutex nests outside active ref and is used both to ensure that
 294         * the ops aren't called concurrently for the same open file, and
 295         * to provide exclusive access to ->prealloc_buf (when that exists).
 296         */
 297        mutex_lock(&of->mutex);
 298        if (!kernfs_get_active(of->kn)) {
 299                mutex_unlock(&of->mutex);
 300                len = -ENODEV;
 301                goto out_free;
 302        }
 303
 304        if (copy_from_user(buf, user_buf, len)) {
 305                len = -EFAULT;
 306                goto out_unlock;
 307        }
 308        buf[len] = '\0';        /* guarantee string termination */
 309
 310        ops = kernfs_ops(of->kn);
 311        if (ops->write)
 312                len = ops->write(of, buf, len, *ppos);
 313        else
 314                len = -EINVAL;
 315
 316        if (len > 0)
 317                *ppos += len;
 318
 319out_unlock:
 320        kernfs_put_active(of->kn);
 321        mutex_unlock(&of->mutex);
 322out_free:
 323        if (buf != of->prealloc_buf)
 324                kfree(buf);
 325        return len;
 326}
 327
 328static void kernfs_vma_open(struct vm_area_struct *vma)
 329{
 330        struct file *file = vma->vm_file;
 331        struct kernfs_open_file *of = kernfs_of(file);
 332
 333        if (!of->vm_ops)
 334                return;
 335
 336        if (!kernfs_get_active(of->kn))
 337                return;
 338
 339        if (of->vm_ops->open)
 340                of->vm_ops->open(vma);
 341
 342        kernfs_put_active(of->kn);
 343}
 344
 345static int kernfs_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
 346{
 347        struct file *file = vma->vm_file;
 348        struct kernfs_open_file *of = kernfs_of(file);
 349        int ret;
 350
 351        if (!of->vm_ops)
 352                return VM_FAULT_SIGBUS;
 353
 354        if (!kernfs_get_active(of->kn))
 355                return VM_FAULT_SIGBUS;
 356
 357        ret = VM_FAULT_SIGBUS;
 358        if (of->vm_ops->fault)
 359                ret = of->vm_ops->fault(vma, vmf);
 360
 361        kernfs_put_active(of->kn);
 362        return ret;
 363}
 364
 365static int kernfs_vma_page_mkwrite(struct vm_area_struct *vma,
 366                                   struct vm_fault *vmf)
 367{
 368        struct file *file = vma->vm_file;
 369        struct kernfs_open_file *of = kernfs_of(file);
 370        int ret;
 371
 372        if (!of->vm_ops)
 373                return VM_FAULT_SIGBUS;
 374
 375        if (!kernfs_get_active(of->kn))
 376                return VM_FAULT_SIGBUS;
 377
 378        ret = 0;
 379        if (of->vm_ops->page_mkwrite)
 380                ret = of->vm_ops->page_mkwrite(vma, vmf);
 381        else
 382                file_update_time(file);
 383
 384        kernfs_put_active(of->kn);
 385        return ret;
 386}
 387
 388static int kernfs_vma_access(struct vm_area_struct *vma, unsigned long addr,
 389                             void *buf, int len, int write)
 390{
 391        struct file *file = vma->vm_file;
 392        struct kernfs_open_file *of = kernfs_of(file);
 393        int ret;
 394
 395        if (!of->vm_ops)
 396                return -EINVAL;
 397
 398        if (!kernfs_get_active(of->kn))
 399                return -EINVAL;
 400
 401        ret = -EINVAL;
 402        if (of->vm_ops->access)
 403                ret = of->vm_ops->access(vma, addr, buf, len, write);
 404
 405        kernfs_put_active(of->kn);
 406        return ret;
 407}
 408
 409#ifdef CONFIG_NUMA
 410static int kernfs_vma_set_policy(struct vm_area_struct *vma,
 411                                 struct mempolicy *new)
 412{
 413        struct file *file = vma->vm_file;
 414        struct kernfs_open_file *of = kernfs_of(file);
 415        int ret;
 416
 417        if (!of->vm_ops)
 418                return 0;
 419
 420        if (!kernfs_get_active(of->kn))
 421                return -EINVAL;
 422
 423        ret = 0;
 424        if (of->vm_ops->set_policy)
 425                ret = of->vm_ops->set_policy(vma, new);
 426
 427        kernfs_put_active(of->kn);
 428        return ret;
 429}
 430
 431static struct mempolicy *kernfs_vma_get_policy(struct vm_area_struct *vma,
 432                                               unsigned long addr)
 433{
 434        struct file *file = vma->vm_file;
 435        struct kernfs_open_file *of = kernfs_of(file);
 436        struct mempolicy *pol;
 437
 438        if (!of->vm_ops)
 439                return vma->vm_policy;
 440
 441        if (!kernfs_get_active(of->kn))
 442                return vma->vm_policy;
 443
 444        pol = vma->vm_policy;
 445        if (of->vm_ops->get_policy)
 446                pol = of->vm_ops->get_policy(vma, addr);
 447
 448        kernfs_put_active(of->kn);
 449        return pol;
 450}
 451
 452#endif
 453
 454static const struct vm_operations_struct kernfs_vm_ops = {
 455        .open           = kernfs_vma_open,
 456        .fault          = kernfs_vma_fault,
 457        .page_mkwrite   = kernfs_vma_page_mkwrite,
 458        .access         = kernfs_vma_access,
 459#ifdef CONFIG_NUMA
 460        .set_policy     = kernfs_vma_set_policy,
 461        .get_policy     = kernfs_vma_get_policy,
 462#endif
 463};
 464
 465static int kernfs_fop_mmap(struct file *file, struct vm_area_struct *vma)
 466{
 467        struct kernfs_open_file *of = kernfs_of(file);
 468        const struct kernfs_ops *ops;
 469        int rc;
 470
 471        /*
 472         * mmap path and of->mutex are prone to triggering spurious lockdep
 473         * warnings and we don't want to add spurious locking dependency
 474         * between the two.  Check whether mmap is actually implemented
 475         * without grabbing @of->mutex by testing HAS_MMAP flag.  See the
 476         * comment in kernfs_file_open() for more details.
 477         */
 478        if (!(of->kn->flags & KERNFS_HAS_MMAP))
 479                return -ENODEV;
 480
 481        mutex_lock(&of->mutex);
 482
 483        rc = -ENODEV;
 484        if (!kernfs_get_active(of->kn))
 485                goto out_unlock;
 486
 487        ops = kernfs_ops(of->kn);
 488        rc = ops->mmap(of, vma);
 489        if (rc)
 490                goto out_put;
 491
 492        /*
 493         * PowerPC's pci_mmap of legacy_mem uses shmem_zero_setup()
 494         * to satisfy versions of X which crash if the mmap fails: that
 495         * substitutes a new vm_file, and we don't then want bin_vm_ops.
 496         */
 497        if (vma->vm_file != file)
 498                goto out_put;
 499
 500        rc = -EINVAL;
 501        if (of->mmapped && of->vm_ops != vma->vm_ops)
 502                goto out_put;
 503
 504        /*
 505         * It is not possible to successfully wrap close.
 506         * So error if someone is trying to use close.
 507         */
 508        rc = -EINVAL;
 509        if (vma->vm_ops && vma->vm_ops->close)
 510                goto out_put;
 511
 512        rc = 0;
 513        of->mmapped = 1;
 514        of->vm_ops = vma->vm_ops;
 515        vma->vm_ops = &kernfs_vm_ops;
 516out_put:
 517        kernfs_put_active(of->kn);
 518out_unlock:
 519        mutex_unlock(&of->mutex);
 520
 521        return rc;
 522}
 523
 524/**
 525 *      kernfs_get_open_node - get or create kernfs_open_node
 526 *      @kn: target kernfs_node
 527 *      @of: kernfs_open_file for this instance of open
 528 *
 529 *      If @kn->attr.open exists, increment its reference count; otherwise,
 530 *      create one.  @of is chained to the files list.
 531 *
 532 *      LOCKING:
 533 *      Kernel thread context (may sleep).
 534 *
 535 *      RETURNS:
 536 *      0 on success, -errno on failure.
 537 */
 538static int kernfs_get_open_node(struct kernfs_node *kn,
 539                                struct kernfs_open_file *of)
 540{
 541        struct kernfs_open_node *on, *new_on = NULL;
 542
 543 retry:
 544        mutex_lock(&kernfs_open_file_mutex);
 545        spin_lock_irq(&kernfs_open_node_lock);
 546
 547        if (!kn->attr.open && new_on) {
 548                kn->attr.open = new_on;
 549                new_on = NULL;
 550        }
 551
 552        on = kn->attr.open;
 553        if (on) {
 554                atomic_inc(&on->refcnt);
 555                list_add_tail(&of->list, &on->files);
 556        }
 557
 558        spin_unlock_irq(&kernfs_open_node_lock);
 559        mutex_unlock(&kernfs_open_file_mutex);
 560
 561        if (on) {
 562                kfree(new_on);
 563                return 0;
 564        }
 565
 566        /* not there, initialize a new one and retry */
 567        new_on = kmalloc(sizeof(*new_on), GFP_KERNEL);
 568        if (!new_on)
 569                return -ENOMEM;
 570
 571        atomic_set(&new_on->refcnt, 0);
 572        atomic_set(&new_on->event, 1);
 573        init_waitqueue_head(&new_on->poll);
 574        INIT_LIST_HEAD(&new_on->files);
 575        goto retry;
 576}
 577
 578/**
 579 *      kernfs_put_open_node - put kernfs_open_node
 580 *      @kn: target kernfs_nodet
 581 *      @of: associated kernfs_open_file
 582 *
 583 *      Put @kn->attr.open and unlink @of from the files list.  If
 584 *      reference count reaches zero, disassociate and free it.
 585 *
 586 *      LOCKING:
 587 *      None.
 588 */
 589static void kernfs_put_open_node(struct kernfs_node *kn,
 590                                 struct kernfs_open_file *of)
 591{
 592        struct kernfs_open_node *on = kn->attr.open;
 593        unsigned long flags;
 594
 595        mutex_lock(&kernfs_open_file_mutex);
 596        spin_lock_irqsave(&kernfs_open_node_lock, flags);
 597
 598        if (of)
 599                list_del(&of->list);
 600
 601        if (atomic_dec_and_test(&on->refcnt))
 602                kn->attr.open = NULL;
 603        else
 604                on = NULL;
 605
 606        spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
 607        mutex_unlock(&kernfs_open_file_mutex);
 608
 609        kfree(on);
 610}
 611
 612static int kernfs_fop_open(struct inode *inode, struct file *file)
 613{
 614        struct kernfs_node *kn = file->f_path.dentry->d_fsdata;
 615        struct kernfs_root *root = kernfs_root(kn);
 616        const struct kernfs_ops *ops;
 617        struct kernfs_open_file *of;
 618        bool has_read, has_write, has_mmap;
 619        int error = -EACCES;
 620
 621        if (!kernfs_get_active(kn))
 622                return -ENODEV;
 623
 624        ops = kernfs_ops(kn);
 625
 626        has_read = ops->seq_show || ops->read || ops->mmap;
 627        has_write = ops->write || ops->mmap;
 628        has_mmap = ops->mmap;
 629
 630        /* see the flag definition for details */
 631        if (root->flags & KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK) {
 632                if ((file->f_mode & FMODE_WRITE) &&
 633                    (!(inode->i_mode & S_IWUGO) || !has_write))
 634                        goto err_out;
 635
 636                if ((file->f_mode & FMODE_READ) &&
 637                    (!(inode->i_mode & S_IRUGO) || !has_read))
 638                        goto err_out;
 639        }
 640
 641        /* allocate a kernfs_open_file for the file */
 642        error = -ENOMEM;
 643        of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL);
 644        if (!of)
 645                goto err_out;
 646
 647        /*
 648         * The following is done to give a different lockdep key to
 649         * @of->mutex for files which implement mmap.  This is a rather
 650         * crude way to avoid false positive lockdep warning around
 651         * mm->mmap_sem - mmap nests @of->mutex under mm->mmap_sem and
 652         * reading /sys/block/sda/trace/act_mask grabs sr_mutex, under
 653         * which mm->mmap_sem nests, while holding @of->mutex.  As each
 654         * open file has a separate mutex, it's okay as long as those don't
 655         * happen on the same file.  At this point, we can't easily give
 656         * each file a separate locking class.  Let's differentiate on
 657         * whether the file has mmap or not for now.
 658         *
 659         * Both paths of the branch look the same.  They're supposed to
 660         * look that way and give @of->mutex different static lockdep keys.
 661         */
 662        if (has_mmap)
 663                mutex_init(&of->mutex);
 664        else
 665                mutex_init(&of->mutex);
 666
 667        of->kn = kn;
 668        of->file = file;
 669
 670        /*
 671         * Write path needs to atomic_write_len outside active reference.
 672         * Cache it in open_file.  See kernfs_fop_write() for details.
 673         */
 674        of->atomic_write_len = ops->atomic_write_len;
 675
 676        error = -EINVAL;
 677        /*
 678         * ->seq_show is incompatible with ->prealloc,
 679         * as seq_read does its own allocation.
 680         * ->read must be used instead.
 681         */
 682        if (ops->prealloc && ops->seq_show)
 683                goto err_free;
 684        if (ops->prealloc) {
 685                int len = of->atomic_write_len ?: PAGE_SIZE;
 686                of->prealloc_buf = kmalloc(len + 1, GFP_KERNEL);
 687                error = -ENOMEM;
 688                if (!of->prealloc_buf)
 689                        goto err_free;
 690        }
 691
 692        /*
 693         * Always instantiate seq_file even if read access doesn't use
 694         * seq_file or is not requested.  This unifies private data access
 695         * and readable regular files are the vast majority anyway.
 696         */
 697        if (ops->seq_show)
 698                error = seq_open(file, &kernfs_seq_ops);
 699        else
 700                error = seq_open(file, NULL);
 701        if (error)
 702                goto err_free;
 703
 704        ((struct seq_file *)file->private_data)->private = of;
 705
 706        /* seq_file clears PWRITE unconditionally, restore it if WRITE */
 707        if (file->f_mode & FMODE_WRITE)
 708                file->f_mode |= FMODE_PWRITE;
 709
 710        /* make sure we have open node struct */
 711        error = kernfs_get_open_node(kn, of);
 712        if (error)
 713                goto err_close;
 714
 715        /* open succeeded, put active references */
 716        kernfs_put_active(kn);
 717        return 0;
 718
 719err_close:
 720        seq_release(inode, file);
 721err_free:
 722        kfree(of->prealloc_buf);
 723        kfree(of);
 724err_out:
 725        kernfs_put_active(kn);
 726        return error;
 727}
 728
 729static int kernfs_fop_release(struct inode *inode, struct file *filp)
 730{
 731        struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
 732        struct kernfs_open_file *of = kernfs_of(filp);
 733
 734        kernfs_put_open_node(kn, of);
 735        seq_release(inode, filp);
 736        kfree(of->prealloc_buf);
 737        kfree(of);
 738
 739        return 0;
 740}
 741
 742void kernfs_unmap_bin_file(struct kernfs_node *kn)
 743{
 744        struct kernfs_open_node *on;
 745        struct kernfs_open_file *of;
 746
 747        if (!(kn->flags & KERNFS_HAS_MMAP))
 748                return;
 749
 750        spin_lock_irq(&kernfs_open_node_lock);
 751        on = kn->attr.open;
 752        if (on)
 753                atomic_inc(&on->refcnt);
 754        spin_unlock_irq(&kernfs_open_node_lock);
 755        if (!on)
 756                return;
 757
 758        mutex_lock(&kernfs_open_file_mutex);
 759        list_for_each_entry(of, &on->files, list) {
 760                struct inode *inode = file_inode(of->file);
 761                unmap_mapping_range(inode->i_mapping, 0, 0, 1);
 762        }
 763        mutex_unlock(&kernfs_open_file_mutex);
 764
 765        kernfs_put_open_node(kn, NULL);
 766}
 767
 768/*
 769 * Kernfs attribute files are pollable.  The idea is that you read
 770 * the content and then you use 'poll' or 'select' to wait for
 771 * the content to change.  When the content changes (assuming the
 772 * manager for the kobject supports notification), poll will
 773 * return POLLERR|POLLPRI, and select will return the fd whether
 774 * it is waiting for read, write, or exceptions.
 775 * Once poll/select indicates that the value has changed, you
 776 * need to close and re-open the file, or seek to 0 and read again.
 777 * Reminder: this only works for attributes which actively support
 778 * it, and it is not possible to test an attribute from userspace
 779 * to see if it supports poll (Neither 'poll' nor 'select' return
 780 * an appropriate error code).  When in doubt, set a suitable timeout value.
 781 */
 782static unsigned int kernfs_fop_poll(struct file *filp, poll_table *wait)
 783{
 784        struct kernfs_open_file *of = kernfs_of(filp);
 785        struct kernfs_node *kn = filp->f_path.dentry->d_fsdata;
 786        struct kernfs_open_node *on = kn->attr.open;
 787
 788        if (!kernfs_get_active(kn))
 789                goto trigger;
 790
 791        poll_wait(filp, &on->poll, wait);
 792
 793        kernfs_put_active(kn);
 794
 795        if (of->event != atomic_read(&on->event))
 796                goto trigger;
 797
 798        return DEFAULT_POLLMASK;
 799
 800 trigger:
 801        return DEFAULT_POLLMASK|POLLERR|POLLPRI;
 802}
 803
 804static void kernfs_notify_workfn(struct work_struct *work)
 805{
 806        struct kernfs_node *kn;
 807        struct kernfs_open_node *on;
 808        struct kernfs_super_info *info;
 809repeat:
 810        /* pop one off the notify_list */
 811        spin_lock_irq(&kernfs_notify_lock);
 812        kn = kernfs_notify_list;
 813        if (kn == KERNFS_NOTIFY_EOL) {
 814                spin_unlock_irq(&kernfs_notify_lock);
 815                return;
 816        }
 817        kernfs_notify_list = kn->attr.notify_next;
 818        kn->attr.notify_next = NULL;
 819        spin_unlock_irq(&kernfs_notify_lock);
 820
 821        /* kick poll */
 822        spin_lock_irq(&kernfs_open_node_lock);
 823
 824        on = kn->attr.open;
 825        if (on) {
 826                atomic_inc(&on->event);
 827                wake_up_interruptible(&on->poll);
 828        }
 829
 830        spin_unlock_irq(&kernfs_open_node_lock);
 831
 832        /* kick fsnotify */
 833        mutex_lock(&kernfs_mutex);
 834
 835        list_for_each_entry(info, &kernfs_root(kn)->supers, node) {
 836                struct inode *inode;
 837                struct dentry *dentry;
 838
 839                inode = ilookup(info->sb, kn->ino);
 840                if (!inode)
 841                        continue;
 842
 843                dentry = d_find_any_alias(inode);
 844                if (dentry) {
 845                        fsnotify_parent(NULL, dentry, FS_MODIFY);
 846                        fsnotify(inode, FS_MODIFY, inode, FSNOTIFY_EVENT_INODE,
 847                                 NULL, 0);
 848                        dput(dentry);
 849                }
 850
 851                iput(inode);
 852        }
 853
 854        mutex_unlock(&kernfs_mutex);
 855        kernfs_put(kn);
 856        goto repeat;
 857}
 858
 859/**
 860 * kernfs_notify - notify a kernfs file
 861 * @kn: file to notify
 862 *
 863 * Notify @kn such that poll(2) on @kn wakes up.  Maybe be called from any
 864 * context.
 865 */
 866void kernfs_notify(struct kernfs_node *kn)
 867{
 868        static DECLARE_WORK(kernfs_notify_work, kernfs_notify_workfn);
 869        unsigned long flags;
 870
 871        if (WARN_ON(kernfs_type(kn) != KERNFS_FILE))
 872                return;
 873
 874        spin_lock_irqsave(&kernfs_notify_lock, flags);
 875        if (!kn->attr.notify_next) {
 876                kernfs_get(kn);
 877                kn->attr.notify_next = kernfs_notify_list;
 878                kernfs_notify_list = kn;
 879                schedule_work(&kernfs_notify_work);
 880        }
 881        spin_unlock_irqrestore(&kernfs_notify_lock, flags);
 882}
 883EXPORT_SYMBOL_GPL(kernfs_notify);
 884
 885const struct file_operations kernfs_file_fops = {
 886        .read           = kernfs_fop_read,
 887        .write          = kernfs_fop_write,
 888        .llseek         = generic_file_llseek,
 889        .mmap           = kernfs_fop_mmap,
 890        .open           = kernfs_fop_open,
 891        .release        = kernfs_fop_release,
 892        .poll           = kernfs_fop_poll,
 893};
 894
 895/**
 896 * __kernfs_create_file - kernfs internal function to create a file
 897 * @parent: directory to create the file in
 898 * @name: name of the file
 899 * @mode: mode of the file
 900 * @size: size of the file
 901 * @ops: kernfs operations for the file
 902 * @priv: private data for the file
 903 * @ns: optional namespace tag of the file
 904 * @key: lockdep key for the file's active_ref, %NULL to disable lockdep
 905 *
 906 * Returns the created node on success, ERR_PTR() value on error.
 907 */
 908struct kernfs_node *__kernfs_create_file(struct kernfs_node *parent,
 909                                         const char *name,
 910                                         umode_t mode, loff_t size,
 911                                         const struct kernfs_ops *ops,
 912                                         void *priv, const void *ns,
 913                                         struct lock_class_key *key)
 914{
 915        struct kernfs_node *kn;
 916        unsigned flags;
 917        int rc;
 918
 919        flags = KERNFS_FILE;
 920
 921        kn = kernfs_new_node(parent, name, (mode & S_IALLUGO) | S_IFREG, flags);
 922        if (!kn)
 923                return ERR_PTR(-ENOMEM);
 924
 925        kn->attr.ops = ops;
 926        kn->attr.size = size;
 927        kn->ns = ns;
 928        kn->priv = priv;
 929
 930#ifdef CONFIG_DEBUG_LOCK_ALLOC
 931        if (key) {
 932                lockdep_init_map(&kn->dep_map, "s_active", key, 0);
 933                kn->flags |= KERNFS_LOCKDEP;
 934        }
 935#endif
 936
 937        /*
 938         * kn->attr.ops is accesible only while holding active ref.  We
 939         * need to know whether some ops are implemented outside active
 940         * ref.  Cache their existence in flags.
 941         */
 942        if (ops->seq_show)
 943                kn->flags |= KERNFS_HAS_SEQ_SHOW;
 944        if (ops->mmap)
 945                kn->flags |= KERNFS_HAS_MMAP;
 946
 947        rc = kernfs_add_one(kn);
 948        if (rc) {
 949                kernfs_put(kn);
 950                return ERR_PTR(rc);
 951        }
 952        return kn;
 953}
 954