linux/include/linux/mman.h
<<
>>
Prefs
   1#ifndef _LINUX_MMAN_H
   2#define _LINUX_MMAN_H
   3
   4#include <linux/mm.h>
   5#include <linux/percpu_counter.h>
   6
   7#include <linux/atomic.h>
   8#include <uapi/linux/mman.h>
   9
  10/*
  11 * Arrange for legacy / undefined architecture specific flags to be
  12 * ignored by mmap handling code.
  13 */
  14#ifndef MAP_32BIT
  15#define MAP_32BIT 0
  16#endif
  17#ifndef MAP_HUGE_2MB
  18#define MAP_HUGE_2MB 0
  19#endif
  20#ifndef MAP_HUGE_1GB
  21#define MAP_HUGE_1GB 0
  22#endif
  23#ifndef MAP_UNINITIALIZED
  24#define MAP_UNINITIALIZED 0
  25#endif
  26#ifndef MAP_SYNC
  27#define MAP_SYNC 0
  28#endif
  29
  30/*
  31 * The historical set of flags that all mmap implementations implicitly
  32 * support when a ->mmap_validate() op is not provided in file_operations.
  33 */
  34#define LEGACY_MAP_MASK (MAP_SHARED \
  35                | MAP_PRIVATE \
  36                | MAP_FIXED \
  37                | MAP_ANONYMOUS \
  38                | MAP_DENYWRITE \
  39                | MAP_EXECUTABLE \
  40                | MAP_UNINITIALIZED \
  41                | MAP_GROWSDOWN \
  42                | MAP_LOCKED \
  43                | MAP_NORESERVE \
  44                | MAP_POPULATE \
  45                | MAP_NONBLOCK \
  46                | MAP_STACK \
  47                | MAP_HUGETLB \
  48                | MAP_32BIT \
  49                | MAP_HUGE_2MB \
  50                | MAP_HUGE_1GB)
  51
  52extern int sysctl_overcommit_memory;
  53extern int sysctl_overcommit_ratio;
  54extern unsigned long sysctl_overcommit_kbytes;
  55extern struct percpu_counter vm_committed_as;
  56
  57unsigned long vm_memory_committed(void);
  58
  59static inline void vm_acct_memory(long pages)
  60{
  61        percpu_counter_add(&vm_committed_as, pages);
  62}
  63
  64static inline void vm_unacct_memory(long pages)
  65{
  66        vm_acct_memory(-pages);
  67}
  68
  69/*
  70 * Allow architectures to handle additional protection bits
  71 */
  72
  73#ifndef arch_calc_vm_prot_bits
  74#define arch_calc_vm_prot_bits(prot, pkey) 0
  75#endif
  76
  77#ifndef arch_vm_get_page_prot
  78#define arch_vm_get_page_prot(vm_flags) __pgprot(0)
  79#endif
  80
  81#ifndef arch_validate_prot
  82/*
  83 * This is called from mprotect().  PROT_GROWSDOWN and PROT_GROWSUP have
  84 * already been masked out.
  85 *
  86 * Returns true if the prot flags are valid
  87 */
  88static inline int arch_validate_prot(unsigned long prot)
  89{
  90        return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0;
  91}
  92#define arch_validate_prot arch_validate_prot
  93#endif
  94
  95/*
  96 * Optimisation macro.  It is equivalent to:
  97 *      (x & bit1) ? bit2 : 0
  98 * but this version is faster.
  99 * ("bit1" and "bit2" must be single bits)
 100 */
 101#define _calc_vm_trans(x, bit1, bit2) \
 102  ((!(bit1) || !(bit2)) ? 0 : \
 103  ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \
 104   : ((x) & (bit1)) / ((bit1) / (bit2))))
 105
 106/*
 107 * Combine the mmap "prot" argument into "vm_flags" used internally.
 108 */
 109static inline unsigned long
 110calc_vm_prot_bits(unsigned long prot, unsigned long pkey)
 111{
 112        return _calc_vm_trans(prot, PROT_READ,  VM_READ ) |
 113               _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) |
 114               _calc_vm_trans(prot, PROT_EXEC,  VM_EXEC) |
 115               arch_calc_vm_prot_bits(prot, pkey);
 116}
 117
 118/*
 119 * Combine the mmap "flags" argument into "vm_flags" used internally.
 120 */
 121static inline unsigned long
 122calc_vm_flag_bits(unsigned long flags)
 123{
 124        return _calc_vm_trans(flags, MAP_GROWSDOWN,  VM_GROWSDOWN ) |
 125               _calc_vm_trans(flags, MAP_DENYWRITE,  VM_DENYWRITE ) |
 126               _calc_vm_trans(flags, MAP_LOCKED,     VM_LOCKED    ) |
 127               _calc_vm_trans(flags, MAP_SYNC,       VM_SYNC      );
 128}
 129
 130unsigned long vm_commit_limit(void);
 131#endif /* _LINUX_MMAN_H */
 132