linux/Documentation/filesystems/Locking
<<
>>
Prefs
   1        The text below describes the locking rules for VFS-related methods.
   2It is (believed to be) up-to-date. *Please*, if you change anything in
   3prototypes or locking protocols - update this file. And update the relevant
   4instances in the tree, don't leave that to maintainers of filesystems/devices/
   5etc. At the very least, put the list of dubious cases in the end of this file.
   6Don't turn it into log - maintainers of out-of-the-tree code are supposed to
   7be able to use diff(1).
   8        Thing currently missing here: socket operations. Alexey?
   9
  10--------------------------- dentry_operations --------------------------
  11prototypes:
  12        int (*d_revalidate)(struct dentry *, unsigned int);
  13        int (*d_weak_revalidate)(struct dentry *, unsigned int);
  14        int (*d_hash)(const struct dentry *, struct qstr *);
  15        int (*d_compare)(const struct dentry *, const struct dentry *,
  16                        unsigned int, const char *, const struct qstr *);
  17        int (*d_delete)(struct dentry *);
  18        void (*d_release)(struct dentry *);
  19        void (*d_iput)(struct dentry *, struct inode *);
  20        char *(*d_dname)((struct dentry *dentry, char *buffer, int buflen);
  21        struct vfsmount *(*d_automount)(struct path *path);
  22        int (*d_manage)(struct dentry *, bool);
  23
  24locking rules:
  25                rename_lock     ->d_lock        may block       rcu-walk
  26d_revalidate:   no              no              yes (ref-walk)  maybe
  27d_weak_revalidate:no            no              yes             no
  28d_hash          no              no              no              maybe
  29d_compare:      yes             no              no              maybe
  30d_delete:       no              yes             no              no
  31d_release:      no              no              yes             no
  32d_prune:        no              yes             no              no
  33d_iput:         no              no              yes             no
  34d_dname:        no              no              no              no
  35d_automount:    no              no              yes             no
  36d_manage:       no              no              yes (ref-walk)  maybe
  37
  38--------------------------- inode_operations --------------------------- 
  39prototypes:
  40        int (*create) (struct inode *,struct dentry *,umode_t, bool);
  41        struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
  42        int (*link) (struct dentry *,struct inode *,struct dentry *);
  43        int (*unlink) (struct inode *,struct dentry *);
  44        int (*symlink) (struct inode *,struct dentry *,const char *);
  45        int (*mkdir) (struct inode *,struct dentry *,umode_t);
  46        int (*rmdir) (struct inode *,struct dentry *);
  47        int (*mknod) (struct inode *,struct dentry *,umode_t,dev_t);
  48        int (*rename) (struct inode *, struct dentry *,
  49                        struct inode *, struct dentry *);
  50        int (*rename2) (struct inode *, struct dentry *,
  51                        struct inode *, struct dentry *, unsigned int);
  52        int (*readlink) (struct dentry *, char __user *,int);
  53        const char *(*follow_link) (struct dentry *, void **);
  54        void (*put_link) (struct inode *, void *);
  55        void (*truncate) (struct inode *);
  56        int (*permission) (struct inode *, int, unsigned int);
  57        int (*get_acl)(struct inode *, int);
  58        int (*setattr) (struct dentry *, struct iattr *);
  59        int (*getattr) (struct vfsmount *, struct dentry *, struct kstat *);
  60        int (*setxattr) (struct dentry *, const char *,const void *,size_t,int);
  61        ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
  62        ssize_t (*listxattr) (struct dentry *, char *, size_t);
  63        int (*removexattr) (struct dentry *, const char *);
  64        int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
  65        void (*update_time)(struct inode *, struct timespec *, int);
  66        int (*atomic_open)(struct inode *, struct dentry *,
  67                                struct file *, unsigned open_flag,
  68                                umode_t create_mode, int *opened);
  69        int (*tmpfile) (struct inode *, struct dentry *, umode_t);
  70        int (*dentry_open)(struct dentry *, struct file *, const struct cred *);
  71
  72locking rules:
  73        all may block
  74                i_mutex(inode)
  75lookup:         yes
  76create:         yes
  77link:           yes (both)
  78mknod:          yes
  79symlink:        yes
  80mkdir:          yes
  81unlink:         yes (both)
  82rmdir:          yes (both)      (see below)
  83rename:         yes (all)       (see below)
  84rename2:        yes (all)       (see below)
  85readlink:       no
  86follow_link:    no
  87put_link:       no
  88setattr:        yes
  89permission:     no (may not block if called in rcu-walk mode)
  90get_acl:        no
  91getattr:        no
  92setxattr:       yes
  93getxattr:       no
  94listxattr:      no
  95removexattr:    yes
  96fiemap:         no
  97update_time:    no
  98atomic_open:    yes
  99tmpfile:        no
 100dentry_open:    no
 101
 102        Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
 103victim.
 104        cross-directory ->rename() and rename2() has (per-superblock)
 105->s_vfs_rename_sem.
 106
 107See Documentation/filesystems/directory-locking for more detailed discussion
 108of the locking scheme for directory operations.
 109
 110--------------------------- super_operations ---------------------------
 111prototypes:
 112        struct inode *(*alloc_inode)(struct super_block *sb);
 113        void (*destroy_inode)(struct inode *);
 114        void (*dirty_inode) (struct inode *, int flags);
 115        int (*write_inode) (struct inode *, struct writeback_control *wbc);
 116        int (*drop_inode) (struct inode *);
 117        void (*evict_inode) (struct inode *);
 118        void (*put_super) (struct super_block *);
 119        int (*sync_fs)(struct super_block *sb, int wait);
 120        int (*freeze_fs) (struct super_block *);
 121        int (*unfreeze_fs) (struct super_block *);
 122        int (*statfs) (struct dentry *, struct kstatfs *);
 123        int (*remount_fs) (struct super_block *, int *, char *);
 124        void (*umount_begin) (struct super_block *);
 125        int (*show_options)(struct seq_file *, struct dentry *);
 126        ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t);
 127        ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t);
 128        int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t);
 129
 130locking rules:
 131        All may block [not true, see below]
 132                        s_umount
 133alloc_inode:
 134destroy_inode:
 135dirty_inode:
 136write_inode:
 137drop_inode:                             !!!inode->i_lock!!!
 138evict_inode:
 139put_super:              write
 140sync_fs:                read
 141freeze_fs:              write
 142unfreeze_fs:            write
 143statfs:                 maybe(read)     (see below)
 144remount_fs:             write
 145umount_begin:           no
 146show_options:           no              (namespace_sem)
 147quota_read:             no              (see below)
 148quota_write:            no              (see below)
 149bdev_try_to_free_page:  no              (see below)
 150
 151->statfs() has s_umount (shared) when called by ustat(2) (native or
 152compat), but that's an accident of bad API; s_umount is used to pin
 153the superblock down when we only have dev_t given us by userland to
 154identify the superblock.  Everything else (statfs(), fstatfs(), etc.)
 155doesn't hold it when calling ->statfs() - superblock is pinned down
 156by resolving the pathname passed to syscall.
 157->quota_read() and ->quota_write() functions are both guaranteed to
 158be the only ones operating on the quota file by the quota code (via
 159dqio_sem) (unless an admin really wants to screw up something and
 160writes to quota files with quotas on). For other details about locking
 161see also dquot_operations section.
 162->bdev_try_to_free_page is called from the ->releasepage handler of
 163the block device inode.  See there for more details.
 164
 165--------------------------- file_system_type ---------------------------
 166prototypes:
 167        struct dentry *(*mount) (struct file_system_type *, int,
 168                       const char *, void *);
 169        void (*kill_sb) (struct super_block *);
 170locking rules:
 171                may block
 172mount           yes
 173kill_sb         yes
 174
 175->mount() returns ERR_PTR or the root dentry; its superblock should be locked
 176on return.
 177->kill_sb() takes a write-locked superblock, does all shutdown work on it,
 178unlocks and drops the reference.
 179
 180--------------------------- address_space_operations --------------------------
 181prototypes:
 182        int (*writepage)(struct page *page, struct writeback_control *wbc);
 183        int (*readpage)(struct file *, struct page *);
 184        int (*sync_page)(struct page *);
 185        int (*writepages)(struct address_space *, struct writeback_control *);
 186        int (*set_page_dirty)(struct page *page);
 187        int (*readpages)(struct file *filp, struct address_space *mapping,
 188                        struct list_head *pages, unsigned nr_pages);
 189        int (*write_begin)(struct file *, struct address_space *mapping,
 190                                loff_t pos, unsigned len, unsigned flags,
 191                                struct page **pagep, void **fsdata);
 192        int (*write_end)(struct file *, struct address_space *mapping,
 193                                loff_t pos, unsigned len, unsigned copied,
 194                                struct page *page, void *fsdata);
 195        sector_t (*bmap)(struct address_space *, sector_t);
 196        void (*invalidatepage) (struct page *, unsigned int, unsigned int);
 197        int (*releasepage) (struct page *, int);
 198        void (*freepage)(struct page *);
 199        int (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
 200        int (*migratepage)(struct address_space *, struct page *, struct page *);
 201        int (*launder_page)(struct page *);
 202        int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);
 203        int (*error_remove_page)(struct address_space *, struct page *);
 204        int (*swap_activate)(struct file *);
 205        int (*swap_deactivate)(struct file *);
 206
 207locking rules:
 208        All except set_page_dirty and freepage may block
 209
 210                        PageLocked(page)        i_mutex
 211writepage:              yes, unlocks (see below)
 212readpage:               yes, unlocks
 213sync_page:              maybe
 214writepages:
 215set_page_dirty          no
 216readpages:
 217write_begin:            locks the page          yes
 218write_end:              yes, unlocks            yes
 219bmap:
 220invalidatepage:         yes
 221releasepage:            yes
 222freepage:               yes
 223direct_IO:
 224migratepage:            yes (both)
 225launder_page:           yes
 226is_partially_uptodate:  yes
 227error_remove_page:      yes
 228swap_activate:          no
 229swap_deactivate:        no
 230
 231        ->write_begin(), ->write_end(), ->sync_page() and ->readpage()
 232may be called from the request handler (/dev/loop).
 233
 234        ->readpage() unlocks the page, either synchronously or via I/O
 235completion.
 236
 237        ->readpages() populates the pagecache with the passed pages and starts
 238I/O against them.  They come unlocked upon I/O completion.
 239
 240        ->writepage() is used for two purposes: for "memory cleansing" and for
 241"sync".  These are quite different operations and the behaviour may differ
 242depending upon the mode.
 243
 244If writepage is called for sync (wbc->sync_mode != WBC_SYNC_NONE) then
 245it *must* start I/O against the page, even if that would involve
 246blocking on in-progress I/O.
 247
 248If writepage is called for memory cleansing (sync_mode ==
 249WBC_SYNC_NONE) then its role is to get as much writeout underway as
 250possible.  So writepage should try to avoid blocking against
 251currently-in-progress I/O.
 252
 253If the filesystem is not called for "sync" and it determines that it
 254would need to block against in-progress I/O to be able to start new I/O
 255against the page the filesystem should redirty the page with
 256redirty_page_for_writepage(), then unlock the page and return zero.
 257This may also be done to avoid internal deadlocks, but rarely.
 258
 259If the filesystem is called for sync then it must wait on any
 260in-progress I/O and then start new I/O.
 261
 262The filesystem should unlock the page synchronously, before returning to the
 263caller, unless ->writepage() returns special WRITEPAGE_ACTIVATE
 264value. WRITEPAGE_ACTIVATE means that page cannot really be written out
 265currently, and VM should stop calling ->writepage() on this page for some
 266time. VM does this by moving page to the head of the active list, hence the
 267name.
 268
 269Unless the filesystem is going to redirty_page_for_writepage(), unlock the page
 270and return zero, writepage *must* run set_page_writeback() against the page,
 271followed by unlocking it.  Once set_page_writeback() has been run against the
 272page, write I/O can be submitted and the write I/O completion handler must run
 273end_page_writeback() once the I/O is complete.  If no I/O is submitted, the
 274filesystem must run end_page_writeback() against the page before returning from
 275writepage.
 276
 277That is: after 2.5.12, pages which are under writeout are *not* locked.  Note,
 278if the filesystem needs the page to be locked during writeout, that is ok, too,
 279the page is allowed to be unlocked at any point in time between the calls to
 280set_page_writeback() and end_page_writeback().
 281
 282Note, failure to run either redirty_page_for_writepage() or the combination of
 283set_page_writeback()/end_page_writeback() on a page submitted to writepage
 284will leave the page itself marked clean but it will be tagged as dirty in the
 285radix tree.  This incoherency can lead to all sorts of hard-to-debug problems
 286in the filesystem like having dirty inodes at umount and losing written data.
 287
 288        ->sync_page() locking rules are not well-defined - usually it is called
 289with lock on page, but that is not guaranteed. Considering the currently
 290existing instances of this method ->sync_page() itself doesn't look
 291well-defined...
 292
 293        ->writepages() is used for periodic writeback and for syscall-initiated
 294sync operations.  The address_space should start I/O against at least
 295*nr_to_write pages.  *nr_to_write must be decremented for each page which is
 296written.  The address_space implementation may write more (or less) pages
 297than *nr_to_write asks for, but it should try to be reasonably close.  If
 298nr_to_write is NULL, all dirty pages must be written.
 299
 300writepages should _only_ write pages which are present on
 301mapping->io_pages.
 302
 303        ->set_page_dirty() is called from various places in the kernel
 304when the target page is marked as needing writeback.  It may be called
 305under spinlock (it cannot block) and is sometimes called with the page
 306not locked.
 307
 308        ->bmap() is currently used by legacy ioctl() (FIBMAP) provided by some
 309filesystems and by the swapper. The latter will eventually go away.  Please,
 310keep it that way and don't breed new callers.
 311
 312        ->invalidatepage() is called when the filesystem must attempt to drop
 313some or all of the buffers from the page when it is being truncated. It
 314returns zero on success. If ->invalidatepage is zero, the kernel uses
 315block_invalidatepage() instead.
 316
 317        ->releasepage() is called when the kernel is about to try to drop the
 318buffers from the page in preparation for freeing it.  It returns zero to
 319indicate that the buffers are (or may be) freeable.  If ->releasepage is zero,
 320the kernel assumes that the fs has no private interest in the buffers.
 321
 322        ->freepage() is called when the kernel is done dropping the page
 323from the page cache.
 324
 325        ->launder_page() may be called prior to releasing a page if
 326it is still found to be dirty. It returns zero if the page was successfully
 327cleaned, or an error value if not. Note that in order to prevent the page
 328getting mapped back in and redirtied, it needs to be kept locked
 329across the entire operation.
 330
 331        ->swap_activate will be called with a non-zero argument on
 332files backing (non block device backed) swapfiles. A return value
 333of zero indicates success, in which case this file can be used for
 334backing swapspace. The swapspace operations will be proxied to the
 335address space operations.
 336
 337        ->swap_deactivate() will be called in the sys_swapoff()
 338path after ->swap_activate() returned success.
 339
 340----------------------- file_lock_operations ------------------------------
 341prototypes:
 342        void (*fl_copy_lock)(struct file_lock *, struct file_lock *);
 343        void (*fl_release_private)(struct file_lock *);
 344
 345
 346locking rules:
 347                        inode->i_lock   may block
 348fl_copy_lock:           yes             no
 349fl_release_private:     maybe           maybe[1]
 350
 351[1]:    ->fl_release_private for flock or POSIX locks is currently allowed
 352to block. Leases however can still be freed while the i_lock is held and
 353so fl_release_private called on a lease should not block.
 354
 355----------------------- lock_manager_operations ---------------------------
 356prototypes:
 357        int (*lm_compare_owner)(struct file_lock *, struct file_lock *);
 358        unsigned long (*lm_owner_key)(struct file_lock *);
 359        void (*lm_notify)(struct file_lock *);  /* unblock callback */
 360        int (*lm_grant)(struct file_lock *, struct file_lock *, int);
 361        void (*lm_break)(struct file_lock *); /* break_lease callback */
 362        int (*lm_change)(struct file_lock **, int);
 363
 364locking rules:
 365
 366                        inode->i_lock   blocked_lock_lock       may block
 367lm_compare_owner:       yes[1]          maybe                   no
 368lm_owner_key            yes[1]          yes                     no
 369lm_notify:              yes             yes                     no
 370lm_grant:               no              no                      no
 371lm_break:               yes             no                      no
 372lm_change               yes             no                      no
 373
 374[1]:    ->lm_compare_owner and ->lm_owner_key are generally called with
 375*an* inode->i_lock held. It may not be the i_lock of the inode
 376associated with either file_lock argument! This is the case with deadlock
 377detection, since the code has to chase down the owners of locks that may
 378be entirely unrelated to the one on which the lock is being acquired.
 379For deadlock detection however, the blocked_lock_lock is also held. The
 380fact that these locks are held ensures that the file_locks do not
 381disappear out from under you while doing the comparison or generating an
 382owner key.
 383
 384--------------------------- buffer_head -----------------------------------
 385prototypes:
 386        void (*b_end_io)(struct buffer_head *bh, int uptodate);
 387
 388locking rules:
 389        called from interrupts. In other words, extreme care is needed here.
 390bh is locked, but that's all warranties we have here. Currently only RAID1,
 391highmem, fs/buffer.c, and fs/ntfs/aops.c are providing these. Block devices
 392call this method upon the IO completion.
 393
 394--------------------------- block_device_operations -----------------------
 395prototypes:
 396        int (*open) (struct block_device *, fmode_t);
 397        int (*release) (struct gendisk *, fmode_t);
 398        int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
 399        int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
 400        int (*direct_access) (struct block_device *, sector_t, void __pmem **,
 401                                unsigned long *);
 402        int (*media_changed) (struct gendisk *);
 403        void (*unlock_native_capacity) (struct gendisk *);
 404        int (*revalidate_disk) (struct gendisk *);
 405        int (*getgeo)(struct block_device *, struct hd_geometry *);
 406        void (*swap_slot_free_notify) (struct block_device *, unsigned long);
 407
 408locking rules:
 409                        bd_mutex
 410open:                   yes
 411release:                yes
 412ioctl:                  no
 413compat_ioctl:           no
 414direct_access:          no
 415media_changed:          no
 416unlock_native_capacity: no
 417revalidate_disk:        no
 418getgeo:                 no
 419swap_slot_free_notify:  no      (see below)
 420
 421media_changed, unlock_native_capacity and revalidate_disk are called only from
 422check_disk_change().
 423
 424swap_slot_free_notify is called with swap_lock and sometimes the page lock
 425held.
 426
 427
 428--------------------------- file_operations -------------------------------
 429prototypes:
 430        loff_t (*llseek) (struct file *, loff_t, int);
 431        ssize_t (*read) (struct file *, char __user *, size_t, loff_t *);
 432        ssize_t (*write) (struct file *, const char __user *, size_t, loff_t *);
 433        ssize_t (*read_iter) (struct kiocb *, struct iov_iter *);
 434        ssize_t (*write_iter) (struct kiocb *, struct iov_iter *);
 435        int (*iterate) (struct file *, struct dir_context *);
 436        unsigned int (*poll) (struct file *, struct poll_table_struct *);
 437        long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
 438        long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
 439        int (*mmap) (struct file *, struct vm_area_struct *);
 440        int (*open) (struct inode *, struct file *);
 441        int (*flush) (struct file *);
 442        int (*release) (struct inode *, struct file *);
 443        int (*fsync) (struct file *, loff_t start, loff_t end, int datasync);
 444        int (*aio_fsync) (struct kiocb *, int datasync);
 445        int (*fasync) (int, struct file *, int);
 446        int (*lock) (struct file *, int, struct file_lock *);
 447        ssize_t (*readv) (struct file *, const struct iovec *, unsigned long,
 448                        loff_t *);
 449        ssize_t (*writev) (struct file *, const struct iovec *, unsigned long,
 450                        loff_t *);
 451        ssize_t (*sendfile) (struct file *, loff_t *, size_t, read_actor_t,
 452                        void __user *);
 453        ssize_t (*sendpage) (struct file *, struct page *, int, size_t,
 454                        loff_t *, int);
 455        unsigned long (*get_unmapped_area)(struct file *, unsigned long,
 456                        unsigned long, unsigned long, unsigned long);
 457        int (*check_flags)(int);
 458        int (*flock) (struct file *, int, struct file_lock *);
 459        ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *,
 460                        size_t, unsigned int);
 461        ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *,
 462                        size_t, unsigned int);
 463        int (*setlease)(struct file *, long, struct file_lock **, void **);
 464        long (*fallocate)(struct file *, int, loff_t, loff_t);
 465};
 466
 467locking rules:
 468        All may block.
 469
 470->llseek() locking has moved from llseek to the individual llseek
 471implementations.  If your fs is not using generic_file_llseek, you
 472need to acquire and release the appropriate locks in your ->llseek().
 473For many filesystems, it is probably safe to acquire the inode
 474mutex or just to use i_size_read() instead.
 475Note: this does not protect the file->f_pos against concurrent modifications
 476since this is something the userspace has to take care about.
 477
 478->fasync() is responsible for maintaining the FASYNC bit in filp->f_flags.
 479Most instances call fasync_helper(), which does that maintenance, so it's
 480not normally something one needs to worry about.  Return values > 0 will be
 481mapped to zero in the VFS layer.
 482
 483->readdir() and ->ioctl() on directories must be changed. Ideally we would
 484move ->readdir() to inode_operations and use a separate method for directory
 485->ioctl() or kill the latter completely. One of the problems is that for
 486anything that resembles union-mount we won't have a struct file for all
 487components. And there are other reasons why the current interface is a mess...
 488
 489->read on directories probably must go away - we should just enforce -EISDIR
 490in sys_read() and friends.
 491
 492->setlease operations should call generic_setlease() before or after setting
 493the lease within the individual filesystem to record the result of the
 494operation
 495
 496--------------------------- dquot_operations -------------------------------
 497prototypes:
 498        int (*write_dquot) (struct dquot *);
 499        int (*acquire_dquot) (struct dquot *);
 500        int (*release_dquot) (struct dquot *);
 501        int (*mark_dirty) (struct dquot *);
 502        int (*write_info) (struct super_block *, int);
 503
 504These operations are intended to be more or less wrapping functions that ensure
 505a proper locking wrt the filesystem and call the generic quota operations.
 506
 507What filesystem should expect from the generic quota functions:
 508
 509                FS recursion    Held locks when called
 510write_dquot:    yes             dqonoff_sem or dqptr_sem
 511acquire_dquot:  yes             dqonoff_sem or dqptr_sem
 512release_dquot:  yes             dqonoff_sem or dqptr_sem
 513mark_dirty:     no              -
 514write_info:     yes             dqonoff_sem
 515
 516FS recursion means calling ->quota_read() and ->quota_write() from superblock
 517operations.
 518
 519More details about quota locking can be found in fs/dquot.c.
 520
 521--------------------------- vm_operations_struct -----------------------------
 522prototypes:
 523        void (*open)(struct vm_area_struct*);
 524        void (*close)(struct vm_area_struct*);
 525        int (*fault)(struct vm_area_struct*, struct vm_fault *);
 526        int (*page_mkwrite)(struct vm_area_struct *, struct vm_fault *);
 527        int (*pfn_mkwrite)(struct vm_area_struct *, struct vm_fault *);
 528        int (*access)(struct vm_area_struct *, unsigned long, void*, int, int);
 529
 530locking rules:
 531                mmap_sem        PageLocked(page)
 532open:           yes
 533close:          yes
 534fault:          yes             can return with page locked
 535map_pages:      yes
 536page_mkwrite:   yes             can return with page locked
 537pfn_mkwrite:    yes
 538access:         yes
 539
 540        ->fault() is called when a previously not present pte is about
 541to be faulted in. The filesystem must find and return the page associated
 542with the passed in "pgoff" in the vm_fault structure. If it is possible that
 543the page may be truncated and/or invalidated, then the filesystem must lock
 544the page, then ensure it is not already truncated (the page lock will block
 545subsequent truncate), and then return with VM_FAULT_LOCKED, and the page
 546locked. The VM will unlock the page.
 547
 548        ->map_pages() is called when VM asks to map easy accessible pages.
 549Filesystem should find and map pages associated with offsets from "pgoff"
 550till "max_pgoff". ->map_pages() is called with page table locked and must
 551not block.  If it's not possible to reach a page without blocking,
 552filesystem should skip it. Filesystem should use do_set_pte() to setup
 553page table entry. Pointer to entry associated with offset "pgoff" is
 554passed in "pte" field in vm_fault structure. Pointers to entries for other
 555offsets should be calculated relative to "pte".
 556
 557        ->page_mkwrite() is called when a previously read-only pte is
 558about to become writeable. The filesystem again must ensure that there are
 559no truncate/invalidate races, and then return with the page locked. If
 560the page has been truncated, the filesystem should not look up a new page
 561like the ->fault() handler, but simply return with VM_FAULT_NOPAGE, which
 562will cause the VM to retry the fault.
 563
 564        ->pfn_mkwrite() is the same as page_mkwrite but when the pte is
 565VM_PFNMAP or VM_MIXEDMAP with a page-less entry. Expected return is
 566VM_FAULT_NOPAGE. Or one of the VM_FAULT_ERROR types. The default behavior
 567after this call is to make the pte read-write, unless pfn_mkwrite returns
 568an error.
 569
 570        ->access() is called when get_user_pages() fails in
 571access_process_vm(), typically used to debug a process through
 572/proc/pid/mem or ptrace.  This function is needed only for
 573VM_IO | VM_PFNMAP VMAs.
 574
 575================================================================================
 576                        Dubious stuff
 577
 578(if you break something or notice that it is broken and do not fix it yourself
 579- at least put it here)
 580