LCOV - code coverage report
Current view: top level - include/linux - mm.h (source / functions) Hit Total Coverage
Test: coverage.info Lines: 59 76 77.6 %
Date: 2014-02-18 Functions: 0 0 -
Branches: 76 276 27.5 %

           Branch data     Line data    Source code
       1                 :            : #ifndef _LINUX_MM_H
       2                 :            : #define _LINUX_MM_H
       3                 :            : 
       4                 :            : #include <linux/errno.h>
       5                 :            : 
       6                 :            : #ifdef __KERNEL__
       7                 :            : 
       8                 :            : #include <linux/gfp.h>
       9                 :            : #include <linux/bug.h>
      10                 :            : #include <linux/list.h>
      11                 :            : #include <linux/mmzone.h>
      12                 :            : #include <linux/rbtree.h>
      13                 :            : #include <linux/atomic.h>
      14                 :            : #include <linux/debug_locks.h>
      15                 :            : #include <linux/mm_types.h>
      16                 :            : #include <linux/range.h>
      17                 :            : #include <linux/pfn.h>
      18                 :            : #include <linux/bit_spinlock.h>
      19                 :            : #include <linux/shrinker.h>
      20                 :            : 
      21                 :            : struct mempolicy;
      22                 :            : struct anon_vma;
      23                 :            : struct anon_vma_chain;
      24                 :            : struct file_ra_state;
      25                 :            : struct user_struct;
      26                 :            : struct writeback_control;
      27                 :            : 
      28                 :            : #ifndef CONFIG_NEED_MULTIPLE_NODES      /* Don't use mapnrs, do it properly */
      29                 :            : extern unsigned long max_mapnr;
      30                 :            : 
      31                 :            : static inline void set_max_mapnr(unsigned long limit)
      32                 :            : {
      33                 :            :         max_mapnr = limit;
      34                 :            : }
      35                 :            : #else
      36                 :            : static inline void set_max_mapnr(unsigned long limit) { }
      37                 :            : #endif
      38                 :            : 
      39                 :            : extern unsigned long totalram_pages;
      40                 :            : extern void * high_memory;
      41                 :            : extern int page_cluster;
      42                 :            : 
      43                 :            : #ifdef CONFIG_SYSCTL
      44                 :            : extern int sysctl_legacy_va_layout;
      45                 :            : #else
      46                 :            : #define sysctl_legacy_va_layout 0
      47                 :            : #endif
      48                 :            : 
      49                 :            : #include <asm/page.h>
      50                 :            : #include <asm/pgtable.h>
      51                 :            : #include <asm/processor.h>
      52                 :            : 
      53                 :            : #ifndef __pa_symbol
      54                 :            : #define __pa_symbol(x)  __pa(RELOC_HIDE((unsigned long)(x), 0))
      55                 :            : #endif
      56                 :            : 
      57                 :            : extern unsigned long sysctl_user_reserve_kbytes;
      58                 :            : extern unsigned long sysctl_admin_reserve_kbytes;
      59                 :            : 
      60                 :            : #define nth_page(page,n) pfn_to_page(page_to_pfn((page)) + (n))
      61                 :            : 
      62                 :            : /* to align the pointer to the (next) page boundary */
      63                 :            : #define PAGE_ALIGN(addr) ALIGN(addr, PAGE_SIZE)
      64                 :            : 
      65                 :            : /* test whether an address (unsigned long or pointer) is aligned to PAGE_SIZE */
      66                 :            : #define PAGE_ALIGNED(addr)      IS_ALIGNED((unsigned long)addr, PAGE_SIZE)
      67                 :            : 
      68                 :            : /*
      69                 :            :  * Linux kernel virtual memory manager primitives.
      70                 :            :  * The idea being to have a "virtual" mm in the same way
      71                 :            :  * we have a virtual fs - giving a cleaner interface to the
      72                 :            :  * mm details, and allowing different kinds of memory mappings
      73                 :            :  * (from shared memory to executable loading to arbitrary
      74                 :            :  * mmap() functions).
      75                 :            :  */
      76                 :            : 
      77                 :            : extern struct kmem_cache *vm_area_cachep;
      78                 :            : 
      79                 :            : #ifndef CONFIG_MMU
      80                 :            : extern struct rb_root nommu_region_tree;
      81                 :            : extern struct rw_semaphore nommu_region_sem;
      82                 :            : 
      83                 :            : extern unsigned int kobjsize(const void *objp);
      84                 :            : #endif
      85                 :            : 
      86                 :            : /*
      87                 :            :  * vm_flags in vm_area_struct, see mm_types.h.
      88                 :            :  */
      89                 :            : #define VM_NONE         0x00000000
      90                 :            : 
      91                 :            : #define VM_READ         0x00000001      /* currently active flags */
      92                 :            : #define VM_WRITE        0x00000002
      93                 :            : #define VM_EXEC         0x00000004
      94                 :            : #define VM_SHARED       0x00000008
      95                 :            : 
      96                 :            : /* mprotect() hardcodes VM_MAYREAD >> 4 == VM_READ, and so for r/w/x bits. */
      97                 :            : #define VM_MAYREAD      0x00000010      /* limits for mprotect() etc */
      98                 :            : #define VM_MAYWRITE     0x00000020
      99                 :            : #define VM_MAYEXEC      0x00000040
     100                 :            : #define VM_MAYSHARE     0x00000080
     101                 :            : 
     102                 :            : #define VM_GROWSDOWN    0x00000100      /* general info on the segment */
     103                 :            : #define VM_PFNMAP       0x00000400      /* Page-ranges managed without "struct page", just pure PFN */
     104                 :            : #define VM_DENYWRITE    0x00000800      /* ETXTBSY on write attempts.. */
     105                 :            : 
     106                 :            : #define VM_LOCKED       0x00002000
     107                 :            : #define VM_IO           0x00004000      /* Memory mapped I/O or similar */
     108                 :            : 
     109                 :            :                                         /* Used by sys_madvise() */
     110                 :            : #define VM_SEQ_READ     0x00008000      /* App will access data sequentially */
     111                 :            : #define VM_RAND_READ    0x00010000      /* App will not benefit from clustered reads */
     112                 :            : 
     113                 :            : #define VM_DONTCOPY     0x00020000      /* Do not copy this vma on fork */
     114                 :            : #define VM_DONTEXPAND   0x00040000      /* Cannot expand with mremap() */
     115                 :            : #define VM_ACCOUNT      0x00100000      /* Is a VM accounted object */
     116                 :            : #define VM_NORESERVE    0x00200000      /* should the VM suppress accounting */
     117                 :            : #define VM_HUGETLB      0x00400000      /* Huge TLB Page VM */
     118                 :            : #define VM_NONLINEAR    0x00800000      /* Is non-linear (remap_file_pages) */
     119                 :            : #define VM_ARCH_1       0x01000000      /* Architecture-specific flag */
     120                 :            : #define VM_DONTDUMP     0x04000000      /* Do not include in the core dump */
     121                 :            : 
     122                 :            : #ifdef CONFIG_MEM_SOFT_DIRTY
     123                 :            : # define VM_SOFTDIRTY   0x08000000      /* Not soft dirty clean area */
     124                 :            : #else
     125                 :            : # define VM_SOFTDIRTY   0
     126                 :            : #endif
     127                 :            : 
     128                 :            : #define VM_MIXEDMAP     0x10000000      /* Can contain "struct page" and pure PFN pages */
     129                 :            : #define VM_HUGEPAGE     0x20000000      /* MADV_HUGEPAGE marked this vma */
     130                 :            : #define VM_NOHUGEPAGE   0x40000000      /* MADV_NOHUGEPAGE marked this vma */
     131                 :            : #define VM_MERGEABLE    0x80000000      /* KSM may merge identical pages */
     132                 :            : 
     133                 :            : #if defined(CONFIG_X86)
     134                 :            : # define VM_PAT         VM_ARCH_1       /* PAT reserves whole VMA at once (x86) */
     135                 :            : #elif defined(CONFIG_PPC)
     136                 :            : # define VM_SAO         VM_ARCH_1       /* Strong Access Ordering (powerpc) */
     137                 :            : #elif defined(CONFIG_PARISC)
     138                 :            : # define VM_GROWSUP     VM_ARCH_1
     139                 :            : #elif defined(CONFIG_METAG)
     140                 :            : # define VM_GROWSUP     VM_ARCH_1
     141                 :            : #elif defined(CONFIG_IA64)
     142                 :            : # define VM_GROWSUP     VM_ARCH_1
     143                 :            : #elif !defined(CONFIG_MMU)
     144                 :            : # define VM_MAPPED_COPY VM_ARCH_1       /* T if mapped copy of data (nommu mmap) */
     145                 :            : #endif
     146                 :            : 
     147                 :            : #ifndef VM_GROWSUP
     148                 :            : # define VM_GROWSUP     VM_NONE
     149                 :            : #endif
     150                 :            : 
     151                 :            : /* Bits set in the VMA until the stack is in its final location */
     152                 :            : #define VM_STACK_INCOMPLETE_SETUP       (VM_RAND_READ | VM_SEQ_READ)
     153                 :            : 
     154                 :            : #ifndef VM_STACK_DEFAULT_FLAGS          /* arch can override this */
     155                 :            : #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
     156                 :            : #endif
     157                 :            : 
     158                 :            : #ifdef CONFIG_STACK_GROWSUP
     159                 :            : #define VM_STACK_FLAGS  (VM_GROWSUP | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
     160                 :            : #else
     161                 :            : #define VM_STACK_FLAGS  (VM_GROWSDOWN | VM_STACK_DEFAULT_FLAGS | VM_ACCOUNT)
     162                 :            : #endif
     163                 :            : 
     164                 :            : /*
     165                 :            :  * Special vmas that are non-mergable, non-mlock()able.
     166                 :            :  * Note: mm/huge_memory.c VM_NO_THP depends on this definition.
     167                 :            :  */
     168                 :            : #define VM_SPECIAL (VM_IO | VM_DONTEXPAND | VM_PFNMAP)
     169                 :            : 
     170                 :            : /*
     171                 :            :  * mapping from the currently active vm_flags protection bits (the
     172                 :            :  * low four bits) to a page protection mask..
     173                 :            :  */
     174                 :            : extern pgprot_t protection_map[16];
     175                 :            : 
     176                 :            : #define FAULT_FLAG_WRITE        0x01    /* Fault was a write access */
     177                 :            : #define FAULT_FLAG_NONLINEAR    0x02    /* Fault was via a nonlinear mapping */
     178                 :            : #define FAULT_FLAG_MKWRITE      0x04    /* Fault was mkwrite of existing pte */
     179                 :            : #define FAULT_FLAG_ALLOW_RETRY  0x08    /* Retry fault if blocking */
     180                 :            : #define FAULT_FLAG_RETRY_NOWAIT 0x10    /* Don't drop mmap_sem and wait when retrying */
     181                 :            : #define FAULT_FLAG_KILLABLE     0x20    /* The fault task is in SIGKILL killable region */
     182                 :            : #define FAULT_FLAG_TRIED        0x40    /* second try */
     183                 :            : #define FAULT_FLAG_USER         0x80    /* The fault originated in userspace */
     184                 :            : 
     185                 :            : /*
     186                 :            :  * vm_fault is filled by the the pagefault handler and passed to the vma's
     187                 :            :  * ->fault function. The vma's ->fault is responsible for returning a bitmask
     188                 :            :  * of VM_FAULT_xxx flags that give details about how the fault was handled.
     189                 :            :  *
     190                 :            :  * pgoff should be used in favour of virtual_address, if possible. If pgoff
     191                 :            :  * is used, one may implement ->remap_pages to get nonlinear mapping support.
     192                 :            :  */
     193                 :            : struct vm_fault {
     194                 :            :         unsigned int flags;             /* FAULT_FLAG_xxx flags */
     195                 :            :         pgoff_t pgoff;                  /* Logical page offset based on vma */
     196                 :            :         void __user *virtual_address;   /* Faulting virtual address */
     197                 :            : 
     198                 :            :         struct page *page;              /* ->fault handlers should return a
     199                 :            :                                          * page here, unless VM_FAULT_NOPAGE
     200                 :            :                                          * is set (which is also implied by
     201                 :            :                                          * VM_FAULT_ERROR).
     202                 :            :                                          */
     203                 :            : };
     204                 :            : 
     205                 :            : /*
     206                 :            :  * These are the virtual MM functions - opening of an area, closing and
     207                 :            :  * unmapping it (needed to keep files on disk up-to-date etc), pointer
     208                 :            :  * to the functions called when a no-page or a wp-page exception occurs. 
     209                 :            :  */
     210                 :            : struct vm_operations_struct {
     211                 :            :         void (*open)(struct vm_area_struct * area);
     212                 :            :         void (*close)(struct vm_area_struct * area);
     213                 :            :         int (*fault)(struct vm_area_struct *vma, struct vm_fault *vmf);
     214                 :            : 
     215                 :            :         /* notification that a previously read-only page is about to become
     216                 :            :          * writable, if an error is returned it will cause a SIGBUS */
     217                 :            :         int (*page_mkwrite)(struct vm_area_struct *vma, struct vm_fault *vmf);
     218                 :            : 
     219                 :            :         /* called by access_process_vm when get_user_pages() fails, typically
     220                 :            :          * for use by special VMAs that can switch between memory and hardware
     221                 :            :          */
     222                 :            :         int (*access)(struct vm_area_struct *vma, unsigned long addr,
     223                 :            :                       void *buf, int len, int write);
     224                 :            : #ifdef CONFIG_NUMA
     225                 :            :         /*
     226                 :            :          * set_policy() op must add a reference to any non-NULL @new mempolicy
     227                 :            :          * to hold the policy upon return.  Caller should pass NULL @new to
     228                 :            :          * remove a policy and fall back to surrounding context--i.e. do not
     229                 :            :          * install a MPOL_DEFAULT policy, nor the task or system default
     230                 :            :          * mempolicy.
     231                 :            :          */
     232                 :            :         int (*set_policy)(struct vm_area_struct *vma, struct mempolicy *new);
     233                 :            : 
     234                 :            :         /*
     235                 :            :          * get_policy() op must add reference [mpol_get()] to any policy at
     236                 :            :          * (vma,addr) marked as MPOL_SHARED.  The shared policy infrastructure
     237                 :            :          * in mm/mempolicy.c will do this automatically.
     238                 :            :          * get_policy() must NOT add a ref if the policy at (vma,addr) is not
     239                 :            :          * marked as MPOL_SHARED. vma policies are protected by the mmap_sem.
     240                 :            :          * If no [shared/vma] mempolicy exists at the addr, get_policy() op
     241                 :            :          * must return NULL--i.e., do not "fallback" to task or system default
     242                 :            :          * policy.
     243                 :            :          */
     244                 :            :         struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
     245                 :            :                                         unsigned long addr);
     246                 :            :         int (*migrate)(struct vm_area_struct *vma, const nodemask_t *from,
     247                 :            :                 const nodemask_t *to, unsigned long flags);
     248                 :            : #endif
     249                 :            :         /* called by sys_remap_file_pages() to populate non-linear mapping */
     250                 :            :         int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
     251                 :            :                            unsigned long size, pgoff_t pgoff);
     252                 :            : };
     253                 :            : 
     254                 :            : struct mmu_gather;
     255                 :            : struct inode;
     256                 :            : 
     257                 :            : #define page_private(page)              ((page)->private)
     258                 :            : #define set_page_private(page, v)       ((page)->private = (v))
     259                 :            : 
     260                 :            : /* It's valid only if the page is free path or free_list */
     261                 :            : static inline void set_freepage_migratetype(struct page *page, int migratetype)
     262                 :            : {
     263                 :   63385171 :         page->index = migratetype;
     264                 :            : }
     265                 :            : 
     266                 :            : /* It's valid only if the page is free path or free_list */
     267                 :            : static inline int get_freepage_migratetype(struct page *page)
     268                 :            : {
     269                 :   21769960 :         return page->index;
     270                 :            : }
     271                 :            : 
     272                 :            : /*
     273                 :            :  * FIXME: take this include out, include page-flags.h in
     274                 :            :  * files which need it (119 of them)
     275                 :            :  */
     276                 :            : #include <linux/page-flags.h>
     277                 :            : #include <linux/huge_mm.h>
     278                 :            : 
     279                 :            : /*
     280                 :            :  * Methods to modify the page usage count.
     281                 :            :  *
     282                 :            :  * What counts for a page usage:
     283                 :            :  * - cache mapping   (page->mapping)
     284                 :            :  * - private data    (page->private)
     285                 :            :  * - page mapped in a task's page tables, each mapping
     286                 :            :  *   is counted separately
     287                 :            :  *
     288                 :            :  * Also, many kernel routines increase the page count before a critical
     289                 :            :  * routine so they can be sure the page doesn't go away from under them.
     290                 :            :  */
     291                 :            : 
     292                 :            : /*
     293                 :            :  * Drop a ref, return true if the refcount fell to zero (the page has no users)
     294                 :            :  */
     295                 :            : static inline int put_page_testzero(struct page *page)
     296                 :            : {
     297                 :            :         VM_BUG_ON(atomic_read(&page->_count) == 0);
     298                 :  188765293 :         return atomic_dec_and_test(&page->_count);
     299                 :            : }
     300                 :            : 
     301                 :            : /*
     302                 :            :  * Try to grab a ref unless the page has a refcount of zero, return false if
     303                 :            :  * that is the case.
     304                 :            :  * This can be called when MMU is off so it must not access
     305                 :            :  * any of the virtual mappings.
     306                 :            :  */
     307                 :            : static inline int get_page_unless_zero(struct page *page)
     308                 :            : {
     309                 :   73067460 :         return atomic_inc_not_zero(&page->_count);
     310                 :            : }
     311                 :            : 
     312                 :            : /*
     313                 :            :  * Try to drop a ref unless the page has a refcount of one, return false if
     314                 :            :  * that is the case.
     315                 :            :  * This is to make sure that the refcount won't become zero after this drop.
     316                 :            :  * This can be called when MMU is off so it must not access
     317                 :            :  * any of the virtual mappings.
     318                 :            :  */
     319                 :            : static inline int put_page_unless_one(struct page *page)
     320                 :            : {
     321                 :            :         return atomic_add_unless(&page->_count, -1, 1);
     322                 :            : }
     323                 :            : 
     324                 :            : extern int page_is_ram(unsigned long pfn);
     325                 :            : 
     326                 :            : /* Support for virtually mapped pages */
     327                 :            : struct page *vmalloc_to_page(const void *addr);
     328                 :            : unsigned long vmalloc_to_pfn(const void *addr);
     329                 :            : 
     330                 :            : /*
     331                 :            :  * Determine if an address is within the vmalloc range
     332                 :            :  *
     333                 :            :  * On nommu, vmalloc/vfree wrap through kmalloc/kfree directly, so there
     334                 :            :  * is no special casing required.
     335                 :            :  */
     336                 :            : static inline int is_vmalloc_addr(const void *x)
     337                 :            : {
     338                 :            : #ifdef CONFIG_MMU
     339                 :      58597 :         unsigned long addr = (unsigned long)x;
     340                 :            : 
     341 [ +  + ][ +  # ]:      58597 :         return addr >= VMALLOC_START && addr < VMALLOC_END;
         [ #  # ][ #  # ]
                 [ #  # ]
           [ #  #  #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     342                 :            : #else
     343                 :            :         return 0;
     344                 :            : #endif
     345                 :            : }
     346                 :            : #ifdef CONFIG_MMU
     347                 :            : extern int is_vmalloc_or_module_addr(const void *x);
     348                 :            : #else
     349                 :            : static inline int is_vmalloc_or_module_addr(const void *x)
     350                 :            : {
     351                 :            :         return 0;
     352                 :            : }
     353                 :            : #endif
     354                 :            : 
     355                 :            : static inline void compound_lock(struct page *page)
     356                 :            : {
     357                 :            : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     358                 :            :         VM_BUG_ON(PageSlab(page));
     359                 :            :         bit_spin_lock(PG_compound_lock, &page->flags);
     360                 :            : #endif
     361                 :            : }
     362                 :            : 
     363                 :            : static inline void compound_unlock(struct page *page)
     364                 :            : {
     365                 :            : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     366                 :            :         VM_BUG_ON(PageSlab(page));
     367                 :            :         bit_spin_unlock(PG_compound_lock, &page->flags);
     368                 :            : #endif
     369                 :            : }
     370                 :            : 
     371                 :            : static inline unsigned long compound_lock_irqsave(struct page *page)
     372                 :            : {
     373                 :            :         unsigned long uninitialized_var(flags);
     374                 :            : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     375                 :            :         local_irq_save(flags);
     376                 :            :         compound_lock(page);
     377                 :            : #endif
     378                 :            :         return flags;
     379                 :            : }
     380                 :            : 
     381                 :            : static inline void compound_unlock_irqrestore(struct page *page,
     382                 :            :                                               unsigned long flags)
     383                 :            : {
     384                 :            : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
     385                 :            :         compound_unlock(page);
     386                 :            :         local_irq_restore(flags);
     387                 :            : #endif
     388                 :            : }
     389                 :            : 
     390                 :            : static inline struct page *compound_head(struct page *page)
     391                 :            : {
     392         [ -  + ]:   76746123 :         if (unlikely(PageTail(page)))
           [ -  +  -  + ]
         [ #  # ][ +  + ]
         [ #  # ][ #  # ]
     393                 :        168 :                 return page->first_page;
     394                 :            :         return page;
     395                 :            : }
     396                 :            : 
     397                 :            : /*
     398                 :            :  * The atomic page->_mapcount, starts from -1: so that transitions
     399                 :            :  * both from it and to it can be tracked, using atomic_inc_and_test
     400                 :            :  * and atomic_add_negative(-1).
     401                 :            :  */
     402                 :            : static inline void page_mapcount_reset(struct page *page)
     403                 :            : {
     404                 :     358157 :         atomic_set(&(page)->_mapcount, -1);
     405                 :            : }
     406                 :            : 
     407                 :            : static inline int page_mapcount(struct page *page)
     408                 :            : {
     409                 :  193268573 :         return atomic_read(&(page)->_mapcount) + 1;
     410                 :            : }
     411                 :            : 
     412                 :            : static inline int page_count(struct page *page)
     413                 :            : {
     414         [ #  # ]:     541297 :         return atomic_read(&compound_head(page)->_count);
     415                 :            : }
     416                 :            : 
     417                 :            : static inline void get_huge_page_tail(struct page *page)
     418                 :            : {
     419                 :            :         /*
     420                 :            :          * __split_huge_page_refcount() cannot run
     421                 :            :          * from under us.
     422                 :            :          */
     423                 :            :         VM_BUG_ON(page_mapcount(page) < 0);
     424                 :            :         VM_BUG_ON(atomic_read(&page->_count) != 0);
     425                 :            :         atomic_inc(&page->_mapcount);
     426                 :            : }
     427                 :            : 
     428                 :            : extern bool __get_page_tail(struct page *page);
     429                 :            : 
     430                 :            : static inline void get_page(struct page *page)
     431                 :            : {
     432         [ -  + ]:   72579129 :         if (unlikely(PageTail(page)))
           [ -  +  -  + ]
           [ -  +  -  + ]
           [ -  +  -  + ]
         [ -  + ][ -  + ]
         [ -  + ][ -  + ]
         [ -  + ][ -  + ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ -  + ][ #  # ]
         [ -  + ][ #  # ]
         [ #  # ][ -  +  
             #  #  #  # ]
     433 [ #  # ][ #  # ]:          0 :                 if (likely(__get_page_tail(page)))
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
     434                 :            :                         return;
     435                 :            :         /*
     436                 :            :          * Getting a normal page or the head of a compound page
     437                 :            :          * requires to already have an elevated page->_count.
     438                 :            :          */
     439                 :            :         VM_BUG_ON(atomic_read(&page->_count) <= 0);
     440                 :   43579115 :         atomic_inc(&page->_count);
     441                 :            : }
     442                 :            : 
     443                 :            : static inline struct page *virt_to_head_page(const void *x)
     444                 :            : {
     445                 :  130627058 :         struct page *page = virt_to_page(x);
     446                 :            :         return compound_head(page);
     447                 :            : }
     448                 :            : 
     449                 :            : /*
     450                 :            :  * Setup the page count before being freed into the page allocator for
     451                 :            :  * the first time (boot or memory hotplug)
     452                 :            :  */
     453                 :            : static inline void init_page_count(struct page *page)
     454                 :            : {
     455                 :          0 :         atomic_set(&page->_count, 1);
     456                 :            : }
     457                 :            : 
     458                 :            : /*
     459                 :            :  * PageBuddy() indicate that the page is free and in the buddy system
     460                 :            :  * (see mm/page_alloc.c).
     461                 :            :  *
     462                 :            :  * PAGE_BUDDY_MAPCOUNT_VALUE must be <= -2 but better not too close to
     463                 :            :  * -2 so that an underflow of the page_mapcount() won't be mistaken
     464                 :            :  * for a genuine PAGE_BUDDY_MAPCOUNT_VALUE. -128 can be created very
     465                 :            :  * efficiently by most CPU architectures.
     466                 :            :  */
     467                 :            : #define PAGE_BUDDY_MAPCOUNT_VALUE (-128)
     468                 :            : 
     469                 :            : static inline int PageBuddy(struct page *page)
     470                 :            : {
     471                 :   61955865 :         return atomic_read(&page->_mapcount) == PAGE_BUDDY_MAPCOUNT_VALUE;
     472                 :            : }
     473                 :            : 
     474                 :            : static inline void __SetPageBuddy(struct page *page)
     475                 :            : {
     476                 :            :         VM_BUG_ON(atomic_read(&page->_mapcount) != -1);
     477                 :   11758392 :         atomic_set(&page->_mapcount, PAGE_BUDDY_MAPCOUNT_VALUE);
     478                 :            : }
     479                 :            : 
     480                 :            : static inline void __ClearPageBuddy(struct page *page)
     481                 :            : {
     482                 :            :         VM_BUG_ON(!PageBuddy(page));
     483                 :   36602013 :         atomic_set(&page->_mapcount, -1);
     484                 :            : }
     485                 :            : 
     486                 :            : void put_page(struct page *page);
     487                 :            : void put_pages_list(struct list_head *pages);
     488                 :            : 
     489                 :            : void split_page(struct page *page, unsigned int order);
     490                 :            : int split_free_page(struct page *page);
     491                 :            : 
     492                 :            : /*
     493                 :            :  * Compound pages have a destructor function.  Provide a
     494                 :            :  * prototype for that function and accessor functions.
     495                 :            :  * These are _only_ valid on the head of a PG_compound page.
     496                 :            :  */
     497                 :            : typedef void compound_page_dtor(struct page *);
     498                 :            : 
     499                 :            : static inline void set_compound_page_dtor(struct page *page,
     500                 :            :                                                 compound_page_dtor *dtor)
     501                 :            : {
     502                 :        279 :         page[1].lru.next = (void *)dtor;
     503                 :            : }
     504                 :            : 
     505                 :            : static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
     506                 :            : {
     507                 :        189 :         return (compound_page_dtor *)page[1].lru.next;
     508                 :            : }
     509                 :            : 
     510                 :            : static inline int compound_order(struct page *page)
     511                 :            : {
     512 [ #  # ][ +  - ]:        189 :         if (!PageHead(page))
     513                 :            :                 return 0;
     514                 :        189 :         return (unsigned long)page[1].lru.prev;
     515                 :            : }
     516                 :            : 
     517                 :            : static inline void set_compound_order(struct page *page, unsigned long order)
     518                 :            : {
     519                 :        279 :         page[1].lru.prev = (void *)order;
     520                 :            : }
     521                 :            : 
     522                 :            : #ifdef CONFIG_MMU
     523                 :            : /*
     524                 :            :  * Do pte_mkwrite, but only if the vma says VM_WRITE.  We do this when
     525                 :            :  * servicing faults for write access.  In the normal case, do always want
     526                 :            :  * pte_mkwrite.  But get_user_pages can cause write faults for mappings
     527                 :            :  * that do not have writing enabled, when used by access_process_vm.
     528                 :            :  */
     529                 :            : static inline pte_t maybe_mkwrite(pte_t pte, struct vm_area_struct *vma)
     530                 :            : {
     531         [ +  + ]:   18959242 :         if (likely(vma->vm_flags & VM_WRITE))
           [ #  #  +  + ]
     532                 :            :                 pte = pte_mkwrite(pte);
     533                 :            :         return pte;
     534                 :            : }
     535                 :            : #endif
     536                 :            : 
     537                 :            : /*
     538                 :            :  * Multiple processes may "see" the same page. E.g. for untouched
     539                 :            :  * mappings of /dev/null, all processes see the same page full of
     540                 :            :  * zeroes, and text pages of executables and shared libraries have
     541                 :            :  * only one copy in memory, at most, normally.
     542                 :            :  *
     543                 :            :  * For the non-reserved pages, page_count(page) denotes a reference count.
     544                 :            :  *   page_count() == 0 means the page is free. page->lru is then used for
     545                 :            :  *   freelist management in the buddy allocator.
     546                 :            :  *   page_count() > 0  means the page has been allocated.
     547                 :            :  *
     548                 :            :  * Pages are allocated by the slab allocator in order to provide memory
     549                 :            :  * to kmalloc and kmem_cache_alloc. In this case, the management of the
     550                 :            :  * page, and the fields in 'struct page' are the responsibility of mm/slab.c
     551                 :            :  * unless a particular usage is carefully commented. (the responsibility of
     552                 :            :  * freeing the kmalloc memory is the caller's, of course).
     553                 :            :  *
     554                 :            :  * A page may be used by anyone else who does a __get_free_page().
     555                 :            :  * In this case, page_count still tracks the references, and should only
     556                 :            :  * be used through the normal accessor functions. The top bits of page->flags
     557                 :            :  * and page->virtual store page management information, but all other fields
     558                 :            :  * are unused and could be used privately, carefully. The management of this
     559                 :            :  * page is the responsibility of the one who allocated it, and those who have
     560                 :            :  * subsequently been given references to it.
     561                 :            :  *
     562                 :            :  * The other pages (we may call them "pagecache pages") are completely
     563                 :            :  * managed by the Linux memory manager: I/O, buffers, swapping etc.
     564                 :            :  * The following discussion applies only to them.
     565                 :            :  *
     566                 :            :  * A pagecache page contains an opaque `private' member, which belongs to the
     567                 :            :  * page's address_space. Usually, this is the address of a circular list of
     568                 :            :  * the page's disk buffers. PG_private must be set to tell the VM to call
     569                 :            :  * into the filesystem to release these pages.
     570                 :            :  *
     571                 :            :  * A page may belong to an inode's memory mapping. In this case, page->mapping
     572                 :            :  * is the pointer to the inode, and page->index is the file offset of the page,
     573                 :            :  * in units of PAGE_CACHE_SIZE.
     574                 :            :  *
     575                 :            :  * If pagecache pages are not associated with an inode, they are said to be
     576                 :            :  * anonymous pages. These may become associated with the swapcache, and in that
     577                 :            :  * case PG_swapcache is set, and page->private is an offset into the swapcache.
     578                 :            :  *
     579                 :            :  * In either case (swapcache or inode backed), the pagecache itself holds one
     580                 :            :  * reference to the page. Setting PG_private should also increment the
     581                 :            :  * refcount. The each user mapping also has a reference to the page.
     582                 :            :  *
     583                 :            :  * The pagecache pages are stored in a per-mapping radix tree, which is
     584                 :            :  * rooted at mapping->page_tree, and indexed by offset.
     585                 :            :  * Where 2.4 and early 2.6 kernels kept dirty/clean pages in per-address_space
     586                 :            :  * lists, we instead now tag pages as dirty/writeback in the radix tree.
     587                 :            :  *
     588                 :            :  * All pagecache pages may be subject to I/O:
     589                 :            :  * - inode pages may need to be read from disk,
     590                 :            :  * - inode pages which have been modified and are MAP_SHARED may need
     591                 :            :  *   to be written back to the inode on disk,
     592                 :            :  * - anonymous pages (including MAP_PRIVATE file mappings) which have been
     593                 :            :  *   modified may need to be swapped out to swap space and (later) to be read
     594                 :            :  *   back into memory.
     595                 :            :  */
     596                 :            : 
     597                 :            : /*
     598                 :            :  * The zone field is never updated after free_area_init_core()
     599                 :            :  * sets it, so none of the operations on it need to be atomic.
     600                 :            :  */
     601                 :            : 
     602                 :            : /* Page flags: | [SECTION] | [NODE] | ZONE | [LAST_CPUPID] | ... | FLAGS | */
     603                 :            : #define SECTIONS_PGOFF          ((sizeof(unsigned long)*8) - SECTIONS_WIDTH)
     604                 :            : #define NODES_PGOFF             (SECTIONS_PGOFF - NODES_WIDTH)
     605                 :            : #define ZONES_PGOFF             (NODES_PGOFF - ZONES_WIDTH)
     606                 :            : #define LAST_CPUPID_PGOFF       (ZONES_PGOFF - LAST_CPUPID_WIDTH)
     607                 :            : 
     608                 :            : /*
     609                 :            :  * Define the bit shifts to access each section.  For non-existent
     610                 :            :  * sections we define the shift as 0; that plus a 0 mask ensures
     611                 :            :  * the compiler will optimise away reference to them.
     612                 :            :  */
     613                 :            : #define SECTIONS_PGSHIFT        (SECTIONS_PGOFF * (SECTIONS_WIDTH != 0))
     614                 :            : #define NODES_PGSHIFT           (NODES_PGOFF * (NODES_WIDTH != 0))
     615                 :            : #define ZONES_PGSHIFT           (ZONES_PGOFF * (ZONES_WIDTH != 0))
     616                 :            : #define LAST_CPUPID_PGSHIFT     (LAST_CPUPID_PGOFF * (LAST_CPUPID_WIDTH != 0))
     617                 :            : 
     618                 :            : /* NODE:ZONE or SECTION:ZONE is used to ID a zone for the buddy allocator */
     619                 :            : #ifdef NODE_NOT_IN_PAGE_FLAGS
     620                 :            : #define ZONEID_SHIFT            (SECTIONS_SHIFT + ZONES_SHIFT)
     621                 :            : #define ZONEID_PGOFF            ((SECTIONS_PGOFF < ZONES_PGOFF)? \
     622                 :            :                                                 SECTIONS_PGOFF : ZONES_PGOFF)
     623                 :            : #else
     624                 :            : #define ZONEID_SHIFT            (NODES_SHIFT + ZONES_SHIFT)
     625                 :            : #define ZONEID_PGOFF            ((NODES_PGOFF < ZONES_PGOFF)? \
     626                 :            :                                                 NODES_PGOFF : ZONES_PGOFF)
     627                 :            : #endif
     628                 :            : 
     629                 :            : #define ZONEID_PGSHIFT          (ZONEID_PGOFF * (ZONEID_SHIFT != 0))
     630                 :            : 
     631                 :            : #if SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
     632                 :            : #error SECTIONS_WIDTH+NODES_WIDTH+ZONES_WIDTH > BITS_PER_LONG - NR_PAGEFLAGS
     633                 :            : #endif
     634                 :            : 
     635                 :            : #define ZONES_MASK              ((1UL << ZONES_WIDTH) - 1)
     636                 :            : #define NODES_MASK              ((1UL << NODES_WIDTH) - 1)
     637                 :            : #define SECTIONS_MASK           ((1UL << SECTIONS_WIDTH) - 1)
     638                 :            : #define LAST_CPUPID_MASK        ((1UL << LAST_CPUPID_WIDTH) - 1)
     639                 :            : #define ZONEID_MASK             ((1UL << ZONEID_SHIFT) - 1)
     640                 :            : 
     641                 :            : static inline enum zone_type page_zonenum(const struct page *page)
     642                 :            : {
     643                 :  701955649 :         return (page->flags >> ZONES_PGSHIFT) & ZONES_MASK;
     644                 :            : }
     645                 :            : 
     646                 :            : #if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
     647                 :            : #define SECTION_IN_PAGE_FLAGS
     648                 :            : #endif
     649                 :            : 
     650                 :            : /*
     651                 :            :  * The identification function is mainly used by the buddy allocator for
     652                 :            :  * determining if two pages could be buddies. We are not really identifying
     653                 :            :  * the zone since we could be using the section number id if we do not have
     654                 :            :  * node id available in page flags.
     655                 :            :  * We only guarantee that it will return the same value for two combinable
     656                 :            :  * pages in a zone.
     657                 :            :  */
     658                 :            : static inline int page_zone_id(struct page *page)
     659                 :            : {
     660                 :  122850505 :         return (page->flags >> ZONEID_PGSHIFT) & ZONEID_MASK;
     661                 :            : }
     662                 :            : 
     663                 :            : static inline int zone_to_nid(struct zone *zone)
     664                 :            : {
     665                 :            : #ifdef CONFIG_NUMA
     666                 :            :         return zone->node;
     667                 :            : #else
     668                 :            :         return 0;
     669                 :            : #endif
     670                 :            : }
     671                 :            : 
     672                 :            : #ifdef NODE_NOT_IN_PAGE_FLAGS
     673                 :            : extern int page_to_nid(const struct page *page);
     674                 :            : #else
     675                 :            : static inline int page_to_nid(const struct page *page)
     676                 :            : {
     677                 :            :         return (page->flags >> NODES_PGSHIFT) & NODES_MASK;
     678                 :            : }
     679                 :            : #endif
     680                 :            : 
     681                 :            : #ifdef CONFIG_NUMA_BALANCING
     682                 :            : static inline int cpu_pid_to_cpupid(int cpu, int pid)
     683                 :            : {
     684                 :            :         return ((cpu & LAST__CPU_MASK) << LAST__PID_SHIFT) | (pid & LAST__PID_MASK);
     685                 :            : }
     686                 :            : 
     687                 :            : static inline int cpupid_to_pid(int cpupid)
     688                 :            : {
     689                 :            :         return cpupid & LAST__PID_MASK;
     690                 :            : }
     691                 :            : 
     692                 :            : static inline int cpupid_to_cpu(int cpupid)
     693                 :            : {
     694                 :            :         return (cpupid >> LAST__PID_SHIFT) & LAST__CPU_MASK;
     695                 :            : }
     696                 :            : 
     697                 :            : static inline int cpupid_to_nid(int cpupid)
     698                 :            : {
     699                 :            :         return cpu_to_node(cpupid_to_cpu(cpupid));
     700                 :            : }
     701                 :            : 
     702                 :            : static inline bool cpupid_pid_unset(int cpupid)
     703                 :            : {
     704                 :            :         return cpupid_to_pid(cpupid) == (-1 & LAST__PID_MASK);
     705                 :            : }
     706                 :            : 
     707                 :            : static inline bool cpupid_cpu_unset(int cpupid)
     708                 :            : {
     709                 :            :         return cpupid_to_cpu(cpupid) == (-1 & LAST__CPU_MASK);
     710                 :            : }
     711                 :            : 
     712                 :            : static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
     713                 :            : {
     714                 :            :         return (task_pid & LAST__PID_MASK) == cpupid_to_pid(cpupid);
     715                 :            : }
     716                 :            : 
     717                 :            : #define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
     718                 :            : #ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
     719                 :            : static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
     720                 :            : {
     721                 :            :         return xchg(&page->_last_cpupid, cpupid);
     722                 :            : }
     723                 :            : 
     724                 :            : static inline int page_cpupid_last(struct page *page)
     725                 :            : {
     726                 :            :         return page->_last_cpupid;
     727                 :            : }
     728                 :            : static inline void page_cpupid_reset_last(struct page *page)
     729                 :            : {
     730                 :            :         page->_last_cpupid = -1;
     731                 :            : }
     732                 :            : #else
     733                 :            : static inline int page_cpupid_last(struct page *page)
     734                 :            : {
     735                 :            :         return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
     736                 :            : }
     737                 :            : 
     738                 :            : extern int page_cpupid_xchg_last(struct page *page, int cpupid);
     739                 :            : 
     740                 :            : static inline void page_cpupid_reset_last(struct page *page)
     741                 :            : {
     742                 :            :         int cpupid = (1 << LAST_CPUPID_SHIFT) - 1;
     743                 :            : 
     744                 :            :         page->flags &= ~(LAST_CPUPID_MASK << LAST_CPUPID_PGSHIFT);
     745                 :            :         page->flags |= (cpupid & LAST_CPUPID_MASK) << LAST_CPUPID_PGSHIFT;
     746                 :            : }
     747                 :            : #endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
     748                 :            : #else /* !CONFIG_NUMA_BALANCING */
     749                 :            : static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
     750                 :            : {
     751                 :            :         return page_to_nid(page); /* XXX */
     752                 :            : }
     753                 :            : 
     754                 :            : static inline int page_cpupid_last(struct page *page)
     755                 :            : {
     756                 :            :         return page_to_nid(page); /* XXX */
     757                 :            : }
     758                 :            : 
     759                 :            : static inline int cpupid_to_nid(int cpupid)
     760                 :            : {
     761                 :            :         return -1;
     762                 :            : }
     763                 :            : 
     764                 :            : static inline int cpupid_to_pid(int cpupid)
     765                 :            : {
     766                 :            :         return -1;
     767                 :            : }
     768                 :            : 
     769                 :            : static inline int cpupid_to_cpu(int cpupid)
     770                 :            : {
     771                 :            :         return -1;
     772                 :            : }
     773                 :            : 
     774                 :            : static inline int cpu_pid_to_cpupid(int nid, int pid)
     775                 :            : {
     776                 :            :         return -1;
     777                 :            : }
     778                 :            : 
     779                 :            : static inline bool cpupid_pid_unset(int cpupid)
     780                 :            : {
     781                 :            :         return 1;
     782                 :            : }
     783                 :            : 
     784                 :            : static inline void page_cpupid_reset_last(struct page *page)
     785                 :            : {
     786                 :            : }
     787                 :            : 
     788                 :            : static inline bool cpupid_match_pid(struct task_struct *task, int cpupid)
     789                 :            : {
     790                 :            :         return false;
     791                 :            : }
     792                 :            : #endif /* CONFIG_NUMA_BALANCING */
     793                 :            : 
     794                 :  701955649 : static inline struct zone *page_zone(const struct page *page)
     795                 :            : {
     796                 :            :         return &NODE_DATA(page_to_nid(page))->node_zones[page_zonenum(page)];
     797                 :            : }
     798                 :            : 
     799                 :            : #ifdef SECTION_IN_PAGE_FLAGS
     800                 :            : static inline void set_page_section(struct page *page, unsigned long section)
     801                 :            : {
     802                 :            :         page->flags &= ~(SECTIONS_MASK << SECTIONS_PGSHIFT);
     803                 :            :         page->flags |= (section & SECTIONS_MASK) << SECTIONS_PGSHIFT;
     804                 :            : }
     805                 :            : 
     806                 :            : static inline unsigned long page_to_section(const struct page *page)
     807                 :            : {
     808                 :            :         return (page->flags >> SECTIONS_PGSHIFT) & SECTIONS_MASK;
     809                 :            : }
     810                 :            : #endif
     811                 :            : 
     812                 :            : static inline void set_page_zone(struct page *page, enum zone_type zone)
     813                 :            : {
     814                 :          0 :         page->flags &= ~(ZONES_MASK << ZONES_PGSHIFT);
     815                 :          0 :         page->flags |= (zone & ZONES_MASK) << ZONES_PGSHIFT;
     816                 :            : }
     817                 :            : 
     818                 :            : static inline void set_page_node(struct page *page, unsigned long node)
     819                 :            : {
     820                 :            :         page->flags &= ~(NODES_MASK << NODES_PGSHIFT);
     821                 :            :         page->flags |= (node & NODES_MASK) << NODES_PGSHIFT;
     822                 :            : }
     823                 :            : 
     824                 :            : static inline void set_page_links(struct page *page, enum zone_type zone,
     825                 :            :         unsigned long node, unsigned long pfn)
     826                 :            : {
     827                 :            :         set_page_zone(page, zone);
     828                 :            :         set_page_node(page, node);
     829                 :            : #ifdef SECTION_IN_PAGE_FLAGS
     830                 :            :         set_page_section(page, pfn_to_section_nr(pfn));
     831                 :            : #endif
     832                 :            : }
     833                 :            : 
     834                 :            : /*
     835                 :            :  * Some inline functions in vmstat.h depend on page_zone()
     836                 :            :  */
     837                 :            : #include <linux/vmstat.h>
     838                 :            : 
     839                 :            : static __always_inline void *lowmem_page_address(const struct page *page)
     840                 :            : {
     841                 :  262143750 :         return __va(PFN_PHYS(page_to_pfn(page)));
     842                 :            : }
     843                 :            : 
     844                 :            : #if defined(CONFIG_HIGHMEM) && !defined(WANT_PAGE_VIRTUAL)
     845                 :            : #define HASHED_PAGE_VIRTUAL
     846                 :            : #endif
     847                 :            : 
     848                 :            : #if defined(WANT_PAGE_VIRTUAL)
     849                 :            : #define page_address(page) ((page)->virtual)
     850                 :            : #define set_page_address(page, address)                 \
     851                 :            :         do {                                            \
     852                 :            :                 (page)->virtual = (address);         \
     853                 :            :         } while(0)
     854                 :            : #define page_address_init()  do { } while(0)
     855                 :            : #endif
     856                 :            : 
     857                 :            : #if defined(HASHED_PAGE_VIRTUAL)
     858                 :            : void *page_address(const struct page *page);
     859                 :            : void set_page_address(struct page *page, void *virtual);
     860                 :            : void page_address_init(void);
     861                 :            : #endif
     862                 :            : 
     863                 :            : #if !defined(HASHED_PAGE_VIRTUAL) && !defined(WANT_PAGE_VIRTUAL)
     864                 :            : #define page_address(page) lowmem_page_address(page)
     865                 :            : #define set_page_address(page, address)  do { } while(0)
     866                 :            : #define page_address_init()  do { } while(0)
     867                 :            : #endif
     868                 :            : 
     869                 :            : /*
     870                 :            :  * On an anonymous page mapped into a user virtual memory area,
     871                 :            :  * page->mapping points to its anon_vma, not to a struct address_space;
     872                 :            :  * with the PAGE_MAPPING_ANON bit set to distinguish it.  See rmap.h.
     873                 :            :  *
     874                 :            :  * On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
     875                 :            :  * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
     876                 :            :  * and then page->mapping points, not to an anon_vma, but to a private
     877                 :            :  * structure which KSM associates with that merged page.  See ksm.h.
     878                 :            :  *
     879                 :            :  * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
     880                 :            :  *
     881                 :            :  * Please note that, confusingly, "page_mapping" refers to the inode
     882                 :            :  * address_space which maps the page from disk; whereas "page_mapped"
     883                 :            :  * refers to user virtual address space into which the page is mapped.
     884                 :            :  */
     885                 :            : #define PAGE_MAPPING_ANON       1
     886                 :            : #define PAGE_MAPPING_KSM        2
     887                 :            : #define PAGE_MAPPING_FLAGS      (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
     888                 :            : 
     889                 :            : extern struct address_space *page_mapping(struct page *page);
     890                 :            : 
     891                 :            : /* Neutral page->mapping pointer to address_space or anon_vma or other */
     892                 :            : static inline void *page_rmapping(struct page *page)
     893                 :            : {
     894                 :          0 :         return (void *)((unsigned long)page->mapping & ~PAGE_MAPPING_FLAGS);
     895                 :            : }
     896                 :            : 
     897                 :            : extern struct address_space *__page_file_mapping(struct page *);
     898                 :            : 
     899                 :            : static inline
     900                 :            : struct address_space *page_file_mapping(struct page *page)
     901                 :            : {
     902 [ #  # ][ #  # ]:          0 :         if (unlikely(PageSwapCache(page)))
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  #  
             #  #  #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
     903                 :          0 :                 return __page_file_mapping(page);
     904                 :            : 
     905                 :          0 :         return page->mapping;
     906                 :            : }
     907                 :            : 
     908                 :            : static inline int PageAnon(struct page *page)
     909                 :            : {
     910                 :  334489494 :         return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
     911                 :            : }
     912                 :            : 
     913                 :            : /*
     914                 :            :  * Return the pagecache index of the passed page.  Regular pagecache pages
     915                 :            :  * use ->index whereas swapcache pages use ->private
     916                 :            :  */
     917                 :            : static inline pgoff_t page_index(struct page *page)
     918                 :            : {
     919 [ -  + ][ -  + ]:    8233126 :         if (unlikely(PageSwapCache(page)))
         [ -  + ][ -  + ]
           [ -  +  -  + ]
                 [ #  # ]
     920                 :          0 :                 return page_private(page);
     921                 :    6093867 :         return page->index;
     922                 :            : }
     923                 :            : 
     924                 :            : extern pgoff_t __page_file_index(struct page *page);
     925                 :            : 
     926                 :            : /*
     927                 :            :  * Return the file index of the page. Regular pagecache pages use ->index
     928                 :            :  * whereas swapcache pages use swp_offset(->private)
     929                 :            :  */
     930                 :            : static inline pgoff_t page_file_index(struct page *page)
     931                 :            : {
     932 [ #  # ][ #  # ]:          0 :         if (unlikely(PageSwapCache(page)))
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
         [ #  # ][ #  # ]
                 [ #  # ]
     933                 :          0 :                 return __page_file_index(page);
     934                 :            : 
     935                 :          0 :         return page->index;
     936                 :            : }
     937                 :            : 
     938                 :            : /*
     939                 :            :  * Return true if this page is mapped into pagetables.
     940                 :            :  */
     941                 :            : static inline int page_mapped(struct page *page)
     942                 :            : {
     943                 :   35272375 :         return atomic_read(&(page)->_mapcount) >= 0;
     944                 :            : }
     945                 :            : 
     946                 :            : /*
     947                 :            :  * Different kinds of faults, as returned by handle_mm_fault().
     948                 :            :  * Used to decide whether a process gets delivered SIGBUS or
     949                 :            :  * just gets major/minor fault counters bumped up.
     950                 :            :  */
     951                 :            : 
     952                 :            : #define VM_FAULT_MINOR  0 /* For backwards compat. Remove me quickly. */
     953                 :            : 
     954                 :            : #define VM_FAULT_OOM    0x0001
     955                 :            : #define VM_FAULT_SIGBUS 0x0002
     956                 :            : #define VM_FAULT_MAJOR  0x0004
     957                 :            : #define VM_FAULT_WRITE  0x0008  /* Special case for get_user_pages */
     958                 :            : #define VM_FAULT_HWPOISON 0x0010        /* Hit poisoned small page */
     959                 :            : #define VM_FAULT_HWPOISON_LARGE 0x0020  /* Hit poisoned large page. Index encoded in upper bits */
     960                 :            : 
     961                 :            : #define VM_FAULT_NOPAGE 0x0100  /* ->fault installed the pte, not return page */
     962                 :            : #define VM_FAULT_LOCKED 0x0200  /* ->fault locked the returned page */
     963                 :            : #define VM_FAULT_RETRY  0x0400  /* ->fault blocked, must retry */
     964                 :            : #define VM_FAULT_FALLBACK 0x0800        /* huge page fault failed, fall back to small */
     965                 :            : 
     966                 :            : #define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
     967                 :            : 
     968                 :            : #define VM_FAULT_ERROR  (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
     969                 :            :                          VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
     970                 :            : 
     971                 :            : /* Encode hstate index for a hwpoisoned large page */
     972                 :            : #define VM_FAULT_SET_HINDEX(x) ((x) << 12)
     973                 :            : #define VM_FAULT_GET_HINDEX(x) (((x) >> 12) & 0xf)
     974                 :            : 
     975                 :            : /*
     976                 :            :  * Can be called by the pagefault handler when it gets a VM_FAULT_OOM.
     977                 :            :  */
     978                 :            : extern void pagefault_out_of_memory(void);
     979                 :            : 
     980                 :            : #define offset_in_page(p)       ((unsigned long)(p) & ~PAGE_MASK)
     981                 :            : 
     982                 :            : /*
     983                 :            :  * Flags passed to show_mem() and show_free_areas() to suppress output in
     984                 :            :  * various contexts.
     985                 :            :  */
     986                 :            : #define SHOW_MEM_FILTER_NODES           (0x0001u)       /* disallowed nodes */
     987                 :            : #define SHOW_MEM_FILTER_PAGE_COUNT      (0x0002u)       /* page type count */
     988                 :            : 
     989                 :            : extern void show_free_areas(unsigned int flags);
     990                 :            : extern bool skip_free_areas_node(unsigned int flags, int nid);
     991                 :            : 
     992                 :            : void shmem_set_file(struct vm_area_struct *vma, struct file *file);
     993                 :            : int shmem_zero_setup(struct vm_area_struct *);
     994                 :            : 
     995                 :            : extern int can_do_mlock(void);
     996                 :            : extern int user_shm_lock(size_t, struct user_struct *);
     997                 :            : extern void user_shm_unlock(size_t, struct user_struct *);
     998                 :            : 
     999                 :            : /*
    1000                 :            :  * Parameter block passed down to zap_pte_range in exceptional cases.
    1001                 :            :  */
    1002                 :            : struct zap_details {
    1003                 :            :         struct vm_area_struct *nonlinear_vma;   /* Check page->index if set */
    1004                 :            :         struct address_space *check_mapping;    /* Check page->mapping if set */
    1005                 :            :         pgoff_t first_index;                    /* Lowest page->index to unmap */
    1006                 :            :         pgoff_t last_index;                     /* Highest page->index to unmap */
    1007                 :            : };
    1008                 :            : 
    1009                 :            : struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
    1010                 :            :                 pte_t pte);
    1011                 :            : 
    1012                 :            : int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
    1013                 :            :                 unsigned long size);
    1014                 :            : void zap_page_range(struct vm_area_struct *vma, unsigned long address,
    1015                 :            :                 unsigned long size, struct zap_details *);
    1016                 :            : void unmap_vmas(struct mmu_gather *tlb, struct vm_area_struct *start_vma,
    1017                 :            :                 unsigned long start, unsigned long end);
    1018                 :            : 
    1019                 :            : /**
    1020                 :            :  * mm_walk - callbacks for walk_page_range
    1021                 :            :  * @pgd_entry: if set, called for each non-empty PGD (top-level) entry
    1022                 :            :  * @pud_entry: if set, called for each non-empty PUD (2nd-level) entry
    1023                 :            :  * @pmd_entry: if set, called for each non-empty PMD (3rd-level) entry
    1024                 :            :  *             this handler is required to be able to handle
    1025                 :            :  *             pmd_trans_huge() pmds.  They may simply choose to
    1026                 :            :  *             split_huge_page() instead of handling it explicitly.
    1027                 :            :  * @pte_entry: if set, called for each non-empty PTE (4th-level) entry
    1028                 :            :  * @pte_hole: if set, called for each hole at all levels
    1029                 :            :  * @hugetlb_entry: if set, called for each hugetlb entry
    1030                 :            :  *                 *Caution*: The caller must hold mmap_sem() if @hugetlb_entry
    1031                 :            :  *                            is used.
    1032                 :            :  *
    1033                 :            :  * (see walk_page_range for more details)
    1034                 :            :  */
    1035                 :            : struct mm_walk {
    1036                 :            :         int (*pgd_entry)(pgd_t *pgd, unsigned long addr,
    1037                 :            :                          unsigned long next, struct mm_walk *walk);
    1038                 :            :         int (*pud_entry)(pud_t *pud, unsigned long addr,
    1039                 :            :                          unsigned long next, struct mm_walk *walk);
    1040                 :            :         int (*pmd_entry)(pmd_t *pmd, unsigned long addr,
    1041                 :            :                          unsigned long next, struct mm_walk *walk);
    1042                 :            :         int (*pte_entry)(pte_t *pte, unsigned long addr,
    1043                 :            :                          unsigned long next, struct mm_walk *walk);
    1044                 :            :         int (*pte_hole)(unsigned long addr, unsigned long next,
    1045                 :            :                         struct mm_walk *walk);
    1046                 :            :         int (*hugetlb_entry)(pte_t *pte, unsigned long hmask,
    1047                 :            :                              unsigned long addr, unsigned long next,
    1048                 :            :                              struct mm_walk *walk);
    1049                 :            :         struct mm_struct *mm;
    1050                 :            :         void *private;
    1051                 :            : };
    1052                 :            : 
    1053                 :            : int walk_page_range(unsigned long addr, unsigned long end,
    1054                 :            :                 struct mm_walk *walk);
    1055                 :            : void free_pgd_range(struct mmu_gather *tlb, unsigned long addr,
    1056                 :            :                 unsigned long end, unsigned long floor, unsigned long ceiling);
    1057                 :            : int copy_page_range(struct mm_struct *dst, struct mm_struct *src,
    1058                 :            :                         struct vm_area_struct *vma);
    1059                 :            : void unmap_mapping_range(struct address_space *mapping,
    1060                 :            :                 loff_t const holebegin, loff_t const holelen, int even_cows);
    1061                 :            : int follow_pfn(struct vm_area_struct *vma, unsigned long address,
    1062                 :            :         unsigned long *pfn);
    1063                 :            : int follow_phys(struct vm_area_struct *vma, unsigned long address,
    1064                 :            :                 unsigned int flags, unsigned long *prot, resource_size_t *phys);
    1065                 :            : int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
    1066                 :            :                         void *buf, int len, int write);
    1067                 :            : 
    1068                 :            : static inline void unmap_shared_mapping_range(struct address_space *mapping,
    1069                 :            :                 loff_t const holebegin, loff_t const holelen)
    1070                 :            : {
    1071                 :            :         unmap_mapping_range(mapping, holebegin, holelen, 0);
    1072                 :            : }
    1073                 :            : 
    1074                 :            : extern void truncate_pagecache(struct inode *inode, loff_t new);
    1075                 :            : extern void truncate_setsize(struct inode *inode, loff_t newsize);
    1076                 :            : void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
    1077                 :            : int truncate_inode_page(struct address_space *mapping, struct page *page);
    1078                 :            : int generic_error_remove_page(struct address_space *mapping, struct page *page);
    1079                 :            : int invalidate_inode_page(struct page *page);
    1080                 :            : 
    1081                 :            : #ifdef CONFIG_MMU
    1082                 :            : extern int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
    1083                 :            :                         unsigned long address, unsigned int flags);
    1084                 :            : extern int fixup_user_fault(struct task_struct *tsk, struct mm_struct *mm,
    1085                 :            :                             unsigned long address, unsigned int fault_flags);
    1086                 :            : #else
    1087                 :            : static inline int handle_mm_fault(struct mm_struct *mm,
    1088                 :            :                         struct vm_area_struct *vma, unsigned long address,
    1089                 :            :                         unsigned int flags)
    1090                 :            : {
    1091                 :            :         /* should never happen if there's no MMU */
    1092                 :            :         BUG();
    1093                 :            :         return VM_FAULT_SIGBUS;
    1094                 :            : }
    1095                 :            : static inline int fixup_user_fault(struct task_struct *tsk,
    1096                 :            :                 struct mm_struct *mm, unsigned long address,
    1097                 :            :                 unsigned int fault_flags)
    1098                 :            : {
    1099                 :            :         /* should never happen if there's no MMU */
    1100                 :            :         BUG();
    1101                 :            :         return -EFAULT;
    1102                 :            : }
    1103                 :            : #endif
    1104                 :            : 
    1105                 :            : extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
    1106                 :            : extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
    1107                 :            :                 void *buf, int len, int write);
    1108                 :            : 
    1109                 :            : long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
    1110                 :            :                       unsigned long start, unsigned long nr_pages,
    1111                 :            :                       unsigned int foll_flags, struct page **pages,
    1112                 :            :                       struct vm_area_struct **vmas, int *nonblocking);
    1113                 :            : long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
    1114                 :            :                     unsigned long start, unsigned long nr_pages,
    1115                 :            :                     int write, int force, struct page **pages,
    1116                 :            :                     struct vm_area_struct **vmas);
    1117                 :            : int get_user_pages_fast(unsigned long start, int nr_pages, int write,
    1118                 :            :                         struct page **pages);
    1119                 :            : struct kvec;
    1120                 :            : int get_kernel_pages(const struct kvec *iov, int nr_pages, int write,
    1121                 :            :                         struct page **pages);
    1122                 :            : int get_kernel_page(unsigned long start, int write, struct page **pages);
    1123                 :            : struct page *get_dump_page(unsigned long addr);
    1124                 :            : 
    1125                 :            : extern int try_to_release_page(struct page * page, gfp_t gfp_mask);
    1126                 :            : extern void do_invalidatepage(struct page *page, unsigned int offset,
    1127                 :            :                               unsigned int length);
    1128                 :            : 
    1129                 :            : int __set_page_dirty_nobuffers(struct page *page);
    1130                 :            : int __set_page_dirty_no_writeback(struct page *page);
    1131                 :            : int redirty_page_for_writepage(struct writeback_control *wbc,
    1132                 :            :                                 struct page *page);
    1133                 :            : void account_page_dirtied(struct page *page, struct address_space *mapping);
    1134                 :            : void account_page_writeback(struct page *page);
    1135                 :            : int set_page_dirty(struct page *page);
    1136                 :            : int set_page_dirty_lock(struct page *page);
    1137                 :            : int clear_page_dirty_for_io(struct page *page);
    1138                 :            : 
    1139                 :            : /* Is the vma a continuation of the stack vma above it? */
    1140                 :            : static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
    1141                 :            : {
    1142 [ +  - ][ +  + ]:        156 :         return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
                 [ -  + ]
    1143                 :            : }
    1144                 :            : 
    1145                 :            : static inline int stack_guard_page_start(struct vm_area_struct *vma,
    1146                 :            :                                              unsigned long addr)
    1147                 :            : {
    1148         [ +  + ]:     875491 :         return (vma->vm_flags & VM_GROWSDOWN) &&
    1149 [ +  + ][ +  + ]:     933332 :                 (vma->vm_start == addr) &&
    1150                 :        156 :                 !vma_growsdown(vma->vm_prev, addr);
    1151                 :            : }
    1152                 :            : 
    1153                 :            : /* Is the vma a continuation of the stack vma below it? */
    1154                 :            : static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
    1155                 :            : {
    1156                 :            :         return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
    1157                 :            : }
    1158                 :            : 
    1159                 :            : static inline int stack_guard_page_end(struct vm_area_struct *vma,
    1160                 :            :                                            unsigned long addr)
    1161                 :            : {
    1162                 :            :         return (vma->vm_flags & VM_GROWSUP) &&
    1163                 :            :                 (vma->vm_end == addr) &&
    1164                 :            :                 !vma_growsup(vma->vm_next, addr);
    1165                 :            : }
    1166                 :            : 
    1167                 :            : extern pid_t
    1168                 :            : vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
    1169                 :            : 
    1170                 :            : extern unsigned long move_page_tables(struct vm_area_struct *vma,
    1171                 :            :                 unsigned long old_addr, struct vm_area_struct *new_vma,
    1172                 :            :                 unsigned long new_addr, unsigned long len,
    1173                 :            :                 bool need_rmap_locks);
    1174                 :            : extern unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
    1175                 :            :                               unsigned long end, pgprot_t newprot,
    1176                 :            :                               int dirty_accountable, int prot_numa);
    1177                 :            : extern int mprotect_fixup(struct vm_area_struct *vma,
    1178                 :            :                           struct vm_area_struct **pprev, unsigned long start,
    1179                 :            :                           unsigned long end, unsigned long newflags);
    1180                 :            : 
    1181                 :            : /*
    1182                 :            :  * doesn't attempt to fault and will return short.
    1183                 :            :  */
    1184                 :            : int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
    1185                 :            :                           struct page **pages);
    1186                 :            : /*
    1187                 :            :  * per-process(per-mm_struct) statistics.
    1188                 :            :  */
    1189                 :            : static inline unsigned long get_mm_counter(struct mm_struct *mm, int member)
    1190                 :            : {
    1191                 :            :         long val = atomic_long_read(&mm->rss_stat.count[member]);
    1192                 :            : 
    1193                 :            : #ifdef SPLIT_RSS_COUNTING
    1194                 :            :         /*
    1195                 :            :          * counter is updated in asynchronous manner and may go to minus.
    1196                 :            :          * But it's never be expected number for users.
    1197                 :            :          */
    1198 [ +  + ][ -  + ]:    3658900 :         if (val < 0)
         [ +  + ][ -  + ]
         [ #  # ][ +  + ]
         [ -  + ][ +  + ]
         [ -  + ][ +  + ]
         [ +  + ][ -  + ]
                 [ -  + ]
    1199                 :            :                 val = 0;
    1200                 :            : #endif
    1201                 :    7313498 :         return (unsigned long)val;
    1202                 :            : }
    1203                 :            : 
    1204                 :            : static inline void add_mm_counter(struct mm_struct *mm, int member, long value)
    1205                 :            : {
    1206                 :   35535290 :         atomic_long_add(value, &mm->rss_stat.count[member]);
    1207                 :            : }
    1208                 :            : 
    1209                 :            : static inline void inc_mm_counter(struct mm_struct *mm, int member)
    1210                 :            : {
    1211                 :          0 :         atomic_long_inc(&mm->rss_stat.count[member]);
    1212                 :            : }
    1213                 :            : 
    1214                 :            : static inline void dec_mm_counter(struct mm_struct *mm, int member)
    1215                 :            : {
    1216                 :       5136 :         atomic_long_dec(&mm->rss_stat.count[member]);
    1217                 :            : }
    1218                 :            : 
    1219                 :            : static inline unsigned long get_mm_rss(struct mm_struct *mm)
    1220                 :            : {
    1221                 :    3654532 :         return get_mm_counter(mm, MM_FILEPAGES) +
    1222                 :            :                 get_mm_counter(mm, MM_ANONPAGES);
    1223                 :            : }
    1224                 :            : 
    1225                 :            : static inline unsigned long get_mm_hiwater_rss(struct mm_struct *mm)
    1226                 :            : {
    1227                 :    2526448 :         return max(mm->hiwater_rss, get_mm_rss(mm));
    1228                 :            : }
    1229                 :            : 
    1230                 :            : static inline unsigned long get_mm_hiwater_vm(struct mm_struct *mm)
    1231                 :            : {
    1232                 :            :         return max(mm->hiwater_vm, mm->total_vm);
    1233                 :            : }
    1234                 :            : 
    1235                 :            : static inline void update_hiwater_rss(struct mm_struct *mm)
    1236                 :            : {
    1237                 :            :         unsigned long _rss = get_mm_rss(mm);
    1238                 :            : 
    1239 [ +  + ][ +  + ]:    1248585 :         if ((mm)->hiwater_rss < _rss)
    1240                 :     529213 :                 (mm)->hiwater_rss = _rss;
    1241                 :            : }
    1242                 :            : 
    1243                 :            : static inline void update_hiwater_vm(struct mm_struct *mm)
    1244                 :            : {
    1245         [ +  + ]:    1179116 :         if (mm->hiwater_vm < mm->total_vm)
    1246                 :    1179116 :                 mm->hiwater_vm = mm->total_vm;
    1247                 :            : }
    1248                 :            : 
    1249                 :            : static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
    1250                 :            :                                          struct mm_struct *mm)
    1251                 :            : {
    1252                 :            :         unsigned long hiwater_rss = get_mm_hiwater_rss(mm);
    1253                 :            : 
    1254         [ +  + ]:    1263224 :         if (*maxrss < hiwater_rss)
    1255                 :    1192708 :                 *maxrss = hiwater_rss;
    1256                 :            : }
    1257                 :            : 
    1258                 :            : #if defined(SPLIT_RSS_COUNTING)
    1259                 :            : void sync_mm_rss(struct mm_struct *mm);
    1260                 :            : #else
    1261                 :            : static inline void sync_mm_rss(struct mm_struct *mm)
    1262                 :            : {
    1263                 :            : }
    1264                 :            : #endif
    1265                 :            : 
    1266                 :            : int vma_wants_writenotify(struct vm_area_struct *vma);
    1267                 :            : 
    1268                 :            : extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
    1269                 :            :                                spinlock_t **ptl);
    1270                 :            : static inline pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr,
    1271                 :            :                                     spinlock_t **ptl)
    1272                 :            : {
    1273                 :            :         pte_t *ptep;
    1274                 :       1119 :         __cond_lock(*ptl, ptep = __get_locked_pte(mm, addr, ptl));
    1275                 :            :         return ptep;
    1276                 :            : }
    1277                 :            : 
    1278                 :            : #ifdef __PAGETABLE_PUD_FOLDED
    1279                 :            : static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
    1280                 :            :                                                 unsigned long address)
    1281                 :            : {
    1282                 :            :         return 0;
    1283                 :            : }
    1284                 :            : #else
    1285                 :            : int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
    1286                 :            : #endif
    1287                 :            : 
    1288                 :            : #ifdef __PAGETABLE_PMD_FOLDED
    1289                 :            : static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
    1290                 :            :                                                 unsigned long address)
    1291                 :            : {
    1292                 :            :         return 0;
    1293                 :            : }
    1294                 :            : #else
    1295                 :            : int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
    1296                 :            : #endif
    1297                 :            : 
    1298                 :            : int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
    1299                 :            :                 pmd_t *pmd, unsigned long address);
    1300                 :            : int __pte_alloc_kernel(pmd_t *pmd, unsigned long address);
    1301                 :            : 
    1302                 :            : /*
    1303                 :            :  * The following ifdef needed to get the 4level-fixup.h header to work.
    1304                 :            :  * Remove it when 4level-fixup.h has been removed.
    1305                 :            :  */
    1306                 :            : #if defined(CONFIG_MMU) && !defined(__ARCH_HAS_4LEVEL_HACK)
    1307                 :            : static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
    1308                 :            : {
    1309                 :            :         return (unlikely(pgd_none(*pgd)) && __pud_alloc(mm, pgd, address))?
    1310                 :            :                 NULL: pud_offset(pgd, address);
    1311                 :            : }
    1312                 :            : 
    1313                 :            : static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
    1314                 :            : {
    1315                 :            :         return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
    1316                 :            :                 NULL: pmd_offset(pud, address);
    1317                 :            : }
    1318                 :            : #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
    1319                 :            : 
    1320                 :            : #if USE_SPLIT_PTE_PTLOCKS
    1321                 :            : #if ALLOC_SPLIT_PTLOCKS
    1322                 :            : extern bool ptlock_alloc(struct page *page);
    1323                 :            : extern void ptlock_free(struct page *page);
    1324                 :            : 
    1325                 :            : static inline spinlock_t *ptlock_ptr(struct page *page)
    1326                 :            : {
    1327                 :            :         return page->ptl;
    1328                 :            : }
    1329                 :            : #else /* ALLOC_SPLIT_PTLOCKS */
    1330                 :            : static inline bool ptlock_alloc(struct page *page)
    1331                 :            : {
    1332                 :            :         return true;
    1333                 :            : }
    1334                 :            : 
    1335                 :            : static inline void ptlock_free(struct page *page)
    1336                 :            : {
    1337                 :            : }
    1338                 :            : 
    1339                 :            : static inline spinlock_t *ptlock_ptr(struct page *page)
    1340                 :            : {
    1341                 :            :         return &page->ptl;
    1342                 :            : }
    1343                 :            : #endif /* ALLOC_SPLIT_PTLOCKS */
    1344                 :            : 
    1345                 :            : static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
    1346                 :            : {
    1347                 :  152317751 :         return ptlock_ptr(pmd_page(*pmd));
    1348                 :            : }
    1349                 :            : 
    1350                 :            : static inline bool ptlock_init(struct page *page)
    1351                 :            : {
    1352                 :            :         /*
    1353                 :            :          * prep_new_page() initialize page->private (and therefore page->ptl)
    1354                 :            :          * with 0. Make sure nobody took it in use in between.
    1355                 :            :          *
    1356                 :            :          * It can happen if arch try to use slab for page table allocation:
    1357                 :            :          * slab code uses page->slab_cache and page->first_page (for tail
    1358                 :            :          * pages), which share storage with page->ptl.
    1359                 :            :          */
    1360                 :            :         VM_BUG_ON(*(unsigned long *)&page->ptl);
    1361                 :            :         if (!ptlock_alloc(page))
    1362                 :            :                 return false;
    1363                 :    4992034 :         spin_lock_init(ptlock_ptr(page));
    1364                 :            :         return true;
    1365                 :            : }
    1366                 :            : 
    1367                 :            : /* Reset page->mapping so free_pages_check won't complain. */
    1368                 :            : static inline void pte_lock_deinit(struct page *page)
    1369                 :            : {
    1370                 :    3796982 :         page->mapping = NULL;
    1371                 :            :         ptlock_free(page);
    1372                 :            : }
    1373                 :            : 
    1374                 :            : #else   /* !USE_SPLIT_PTE_PTLOCKS */
    1375                 :            : /*
    1376                 :            :  * We use mm->page_table_lock to guard all pagetable pages of the mm.
    1377                 :            :  */
    1378                 :            : static inline spinlock_t *pte_lockptr(struct mm_struct *mm, pmd_t *pmd)
    1379                 :            : {
    1380                 :            :         return &mm->page_table_lock;
    1381                 :            : }
    1382                 :            : static inline bool ptlock_init(struct page *page) { return true; }
    1383                 :            : static inline void pte_lock_deinit(struct page *page) {}
    1384                 :            : #endif /* USE_SPLIT_PTE_PTLOCKS */
    1385                 :            : 
    1386                 :            : static inline bool pgtable_page_ctor(struct page *page)
    1387                 :            : {
    1388                 :    4992136 :         inc_zone_page_state(page, NR_PAGETABLE);
    1389                 :            :         return ptlock_init(page);
    1390                 :            : }
    1391                 :            : 
    1392                 :            : static inline void pgtable_page_dtor(struct page *page)
    1393                 :            : {
    1394                 :            :         pte_lock_deinit(page);
    1395                 :    3796982 :         dec_zone_page_state(page, NR_PAGETABLE);
    1396                 :            : }
    1397                 :            : 
    1398                 :            : #define pte_offset_map_lock(mm, pmd, address, ptlp)     \
    1399                 :            : ({                                                      \
    1400                 :            :         spinlock_t *__ptl = pte_lockptr(mm, pmd);       \
    1401                 :            :         pte_t *__pte = pte_offset_map(pmd, address);    \
    1402                 :            :         *(ptlp) = __ptl;                                \
    1403                 :            :         spin_lock(__ptl);                               \
    1404                 :            :         __pte;                                          \
    1405                 :            : })
    1406                 :            : 
    1407                 :            : #define pte_unmap_unlock(pte, ptl)      do {            \
    1408                 :            :         spin_unlock(ptl);                               \
    1409                 :            :         pte_unmap(pte);                                 \
    1410                 :            : } while (0)
    1411                 :            : 
    1412                 :            : #define pte_alloc_map(mm, vma, pmd, address)                            \
    1413                 :            :         ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, vma,    \
    1414                 :            :                                                         pmd, address))? \
    1415                 :            :          NULL: pte_offset_map(pmd, address))
    1416                 :            : 
    1417                 :            : #define pte_alloc_map_lock(mm, pmd, address, ptlp)      \
    1418                 :            :         ((unlikely(pmd_none(*(pmd))) && __pte_alloc(mm, NULL,   \
    1419                 :            :                                                         pmd, address))? \
    1420                 :            :                 NULL: pte_offset_map_lock(mm, pmd, address, ptlp))
    1421                 :            : 
    1422                 :            : #define pte_alloc_kernel(pmd, address)                  \
    1423                 :            :         ((unlikely(pmd_none(*(pmd))) && __pte_alloc_kernel(pmd, address))? \
    1424                 :            :                 NULL: pte_offset_kernel(pmd, address))
    1425                 :            : 
    1426                 :            : #if USE_SPLIT_PMD_PTLOCKS
    1427                 :            : 
    1428                 :            : static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
    1429                 :            : {
    1430                 :            :         return ptlock_ptr(virt_to_page(pmd));
    1431                 :            : }
    1432                 :            : 
    1433                 :            : static inline bool pgtable_pmd_page_ctor(struct page *page)
    1434                 :            : {
    1435                 :            : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
    1436                 :            :         page->pmd_huge_pte = NULL;
    1437                 :            : #endif
    1438                 :            :         return ptlock_init(page);
    1439                 :            : }
    1440                 :            : 
    1441                 :            : static inline void pgtable_pmd_page_dtor(struct page *page)
    1442                 :            : {
    1443                 :            : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
    1444                 :            :         VM_BUG_ON(page->pmd_huge_pte);
    1445                 :            : #endif
    1446                 :            :         ptlock_free(page);
    1447                 :            : }
    1448                 :            : 
    1449                 :            : #define pmd_huge_pte(mm, pmd) (virt_to_page(pmd)->pmd_huge_pte)
    1450                 :            : 
    1451                 :            : #else
    1452                 :            : 
    1453                 :            : static inline spinlock_t *pmd_lockptr(struct mm_struct *mm, pmd_t *pmd)
    1454                 :            : {
    1455                 :            :         return &mm->page_table_lock;
    1456                 :            : }
    1457                 :            : 
    1458                 :            : static inline bool pgtable_pmd_page_ctor(struct page *page) { return true; }
    1459                 :            : static inline void pgtable_pmd_page_dtor(struct page *page) {}
    1460                 :            : 
    1461                 :            : #define pmd_huge_pte(mm, pmd) ((mm)->pmd_huge_pte)
    1462                 :            : 
    1463                 :            : #endif
    1464                 :            : 
    1465                 :            : static inline spinlock_t *pmd_lock(struct mm_struct *mm, pmd_t *pmd)
    1466                 :            : {
    1467                 :    4991998 :         spinlock_t *ptl = pmd_lockptr(mm, pmd);
    1468                 :            :         spin_lock(ptl);
    1469                 :            :         return ptl;
    1470                 :            : }
    1471                 :            : 
    1472                 :            : extern void free_area_init(unsigned long * zones_size);
    1473                 :            : extern void free_area_init_node(int nid, unsigned long * zones_size,
    1474                 :            :                 unsigned long zone_start_pfn, unsigned long *zholes_size);
    1475                 :            : extern void free_initmem(void);
    1476                 :            : 
    1477                 :            : /*
    1478                 :            :  * Free reserved pages within range [PAGE_ALIGN(start), end & PAGE_MASK)
    1479                 :            :  * into the buddy system. The freed pages will be poisoned with pattern
    1480                 :            :  * "poison" if it's within range [0, UCHAR_MAX].
    1481                 :            :  * Return pages freed into the buddy system.
    1482                 :            :  */
    1483                 :            : extern unsigned long free_reserved_area(void *start, void *end,
    1484                 :            :                                         int poison, char *s);
    1485                 :            : 
    1486                 :            : #ifdef  CONFIG_HIGHMEM
    1487                 :            : /*
    1488                 :            :  * Free a highmem page into the buddy system, adjusting totalhigh_pages
    1489                 :            :  * and totalram_pages.
    1490                 :            :  */
    1491                 :            : extern void free_highmem_page(struct page *page);
    1492                 :            : #endif
    1493                 :            : 
    1494                 :            : extern void adjust_managed_page_count(struct page *page, long count);
    1495                 :            : extern void mem_init_print_info(const char *str);
    1496                 :            : 
    1497                 :            : /* Free the reserved page into the buddy system, so it gets managed. */
    1498                 :            : static inline void __free_reserved_page(struct page *page)
    1499                 :            : {
    1500                 :            :         ClearPageReserved(page);
    1501                 :            :         init_page_count(page);
    1502                 :          0 :         __free_page(page);
    1503                 :            : }
    1504                 :            : 
    1505                 :            : static inline void free_reserved_page(struct page *page)
    1506                 :            : {
    1507                 :            :         __free_reserved_page(page);
    1508                 :          0 :         adjust_managed_page_count(page, 1);
    1509                 :            : }
    1510                 :            : 
    1511                 :            : static inline void mark_page_reserved(struct page *page)
    1512                 :            : {
    1513                 :            :         SetPageReserved(page);
    1514                 :            :         adjust_managed_page_count(page, -1);
    1515                 :            : }
    1516                 :            : 
    1517                 :            : /*
    1518                 :            :  * Default method to free all the __init memory into the buddy system.
    1519                 :            :  * The freed pages will be poisoned with pattern "poison" if it's within
    1520                 :            :  * range [0, UCHAR_MAX].
    1521                 :            :  * Return pages freed into the buddy system.
    1522                 :            :  */
    1523                 :            : static inline unsigned long free_initmem_default(int poison)
    1524                 :            : {
    1525                 :            :         extern char __init_begin[], __init_end[];
    1526                 :            : 
    1527                 :            :         return free_reserved_area(&__init_begin, &__init_end,
    1528                 :            :                                   poison, "unused kernel");
    1529                 :            : }
    1530                 :            : 
    1531                 :            : static inline unsigned long get_num_physpages(void)
    1532                 :            : {
    1533                 :            :         int nid;
    1534                 :            :         unsigned long phys_pages = 0;
    1535                 :            : 
    1536         [ #  # ]:          0 :         for_each_online_node(nid)
    1537                 :          0 :                 phys_pages += node_present_pages(nid);
    1538                 :            : 
    1539                 :            :         return phys_pages;
    1540                 :            : }
    1541                 :            : 
    1542                 :            : #ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
    1543                 :            : /*
    1544                 :            :  * With CONFIG_HAVE_MEMBLOCK_NODE_MAP set, an architecture may initialise its
    1545                 :            :  * zones, allocate the backing mem_map and account for memory holes in a more
    1546                 :            :  * architecture independent manner. This is a substitute for creating the
    1547                 :            :  * zone_sizes[] and zholes_size[] arrays and passing them to
    1548                 :            :  * free_area_init_node()
    1549                 :            :  *
    1550                 :            :  * An architecture is expected to register range of page frames backed by
    1551                 :            :  * physical memory with memblock_add[_node]() before calling
    1552                 :            :  * free_area_init_nodes() passing in the PFN each zone ends at. At a basic
    1553                 :            :  * usage, an architecture is expected to do something like
    1554                 :            :  *
    1555                 :            :  * unsigned long max_zone_pfns[MAX_NR_ZONES] = {max_dma, max_normal_pfn,
    1556                 :            :  *                                                       max_highmem_pfn};
    1557                 :            :  * for_each_valid_physical_page_range()
    1558                 :            :  *      memblock_add_node(base, size, nid)
    1559                 :            :  * free_area_init_nodes(max_zone_pfns);
    1560                 :            :  *
    1561                 :            :  * free_bootmem_with_active_regions() calls free_bootmem_node() for each
    1562                 :            :  * registered physical page range.  Similarly
    1563                 :            :  * sparse_memory_present_with_active_regions() calls memory_present() for
    1564                 :            :  * each range when SPARSEMEM is enabled.
    1565                 :            :  *
    1566                 :            :  * See mm/page_alloc.c for more information on each function exposed by
    1567                 :            :  * CONFIG_HAVE_MEMBLOCK_NODE_MAP.
    1568                 :            :  */
    1569                 :            : extern void free_area_init_nodes(unsigned long *max_zone_pfn);
    1570                 :            : unsigned long node_map_pfn_alignment(void);
    1571                 :            : unsigned long __absent_pages_in_range(int nid, unsigned long start_pfn,
    1572                 :            :                                                 unsigned long end_pfn);
    1573                 :            : extern unsigned long absent_pages_in_range(unsigned long start_pfn,
    1574                 :            :                                                 unsigned long end_pfn);
    1575                 :            : extern void get_pfn_range_for_nid(unsigned int nid,
    1576                 :            :                         unsigned long *start_pfn, unsigned long *end_pfn);
    1577                 :            : extern unsigned long find_min_pfn_with_active_regions(void);
    1578                 :            : extern void free_bootmem_with_active_regions(int nid,
    1579                 :            :                                                 unsigned long max_low_pfn);
    1580                 :            : extern void sparse_memory_present_with_active_regions(int nid);
    1581                 :            : 
    1582                 :            : #endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
    1583                 :            : 
    1584                 :            : #if !defined(CONFIG_HAVE_MEMBLOCK_NODE_MAP) && \
    1585                 :            :     !defined(CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID)
    1586                 :            : static inline int __early_pfn_to_nid(unsigned long pfn)
    1587                 :            : {
    1588                 :            :         return 0;
    1589                 :            : }
    1590                 :            : #else
    1591                 :            : /* please see mm/page_alloc.c */
    1592                 :            : extern int __meminit early_pfn_to_nid(unsigned long pfn);
    1593                 :            : #ifdef CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID
    1594                 :            : /* there is a per-arch backend function. */
    1595                 :            : extern int __meminit __early_pfn_to_nid(unsigned long pfn);
    1596                 :            : #endif /* CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID */
    1597                 :            : #endif
    1598                 :            : 
    1599                 :            : extern void set_dma_reserve(unsigned long new_dma_reserve);
    1600                 :            : extern void memmap_init_zone(unsigned long, int, unsigned long,
    1601                 :            :                                 unsigned long, enum memmap_context);
    1602                 :            : extern void setup_per_zone_wmarks(void);
    1603                 :            : extern int __meminit init_per_zone_wmark_min(void);
    1604                 :            : extern void mem_init(void);
    1605                 :            : extern void __init mmap_init(void);
    1606                 :            : extern void show_mem(unsigned int flags);
    1607                 :            : extern void si_meminfo(struct sysinfo * val);
    1608                 :            : extern void si_meminfo_node(struct sysinfo *val, int nid);
    1609                 :            : 
    1610                 :            : extern __printf(3, 4)
    1611                 :            : void warn_alloc_failed(gfp_t gfp_mask, int order, const char *fmt, ...);
    1612                 :            : 
    1613                 :            : extern void setup_per_cpu_pageset(void);
    1614                 :            : 
    1615                 :            : extern void zone_pcp_update(struct zone *zone);
    1616                 :            : extern void zone_pcp_reset(struct zone *zone);
    1617                 :            : 
    1618                 :            : /* page_alloc.c */
    1619                 :            : extern int min_free_kbytes;
    1620                 :            : 
    1621                 :            : /* nommu.c */
    1622                 :            : extern atomic_long_t mmap_pages_allocated;
    1623                 :            : extern int nommu_shrink_inode_mappings(struct inode *, size_t, size_t);
    1624                 :            : 
    1625                 :            : /* interval_tree.c */
    1626                 :            : void vma_interval_tree_insert(struct vm_area_struct *node,
    1627                 :            :                               struct rb_root *root);
    1628                 :            : void vma_interval_tree_insert_after(struct vm_area_struct *node,
    1629                 :            :                                     struct vm_area_struct *prev,
    1630                 :            :                                     struct rb_root *root);
    1631                 :            : void vma_interval_tree_remove(struct vm_area_struct *node,
    1632                 :            :                               struct rb_root *root);
    1633                 :            : struct vm_area_struct *vma_interval_tree_iter_first(struct rb_root *root,
    1634                 :            :                                 unsigned long start, unsigned long last);
    1635                 :            : struct vm_area_struct *vma_interval_tree_iter_next(struct vm_area_struct *node,
    1636                 :            :                                 unsigned long start, unsigned long last);
    1637                 :            : 
    1638                 :            : #define vma_interval_tree_foreach(vma, root, start, last)               \
    1639                 :            :         for (vma = vma_interval_tree_iter_first(root, start, last);     \
    1640                 :            :              vma; vma = vma_interval_tree_iter_next(vma, start, last))
    1641                 :            : 
    1642                 :            : static inline void vma_nonlinear_insert(struct vm_area_struct *vma,
    1643                 :            :                                         struct list_head *list)
    1644                 :            : {
    1645                 :          1 :         list_add_tail(&vma->shared.nonlinear, list);
    1646                 :            : }
    1647                 :            : 
    1648                 :            : void anon_vma_interval_tree_insert(struct anon_vma_chain *node,
    1649                 :            :                                    struct rb_root *root);
    1650                 :            : void anon_vma_interval_tree_remove(struct anon_vma_chain *node,
    1651                 :            :                                    struct rb_root *root);
    1652                 :            : struct anon_vma_chain *anon_vma_interval_tree_iter_first(
    1653                 :            :         struct rb_root *root, unsigned long start, unsigned long last);
    1654                 :            : struct anon_vma_chain *anon_vma_interval_tree_iter_next(
    1655                 :            :         struct anon_vma_chain *node, unsigned long start, unsigned long last);
    1656                 :            : #ifdef CONFIG_DEBUG_VM_RB
    1657                 :            : void anon_vma_interval_tree_verify(struct anon_vma_chain *node);
    1658                 :            : #endif
    1659                 :            : 
    1660                 :            : #define anon_vma_interval_tree_foreach(avc, root, start, last)           \
    1661                 :            :         for (avc = anon_vma_interval_tree_iter_first(root, start, last); \
    1662                 :            :              avc; avc = anon_vma_interval_tree_iter_next(avc, start, last))
    1663                 :            : 
    1664                 :            : /* mmap.c */
    1665                 :            : extern int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin);
    1666                 :            : extern int vma_adjust(struct vm_area_struct *vma, unsigned long start,
    1667                 :            :         unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert);
    1668                 :            : extern struct vm_area_struct *vma_merge(struct mm_struct *,
    1669                 :            :         struct vm_area_struct *prev, unsigned long addr, unsigned long end,
    1670                 :            :         unsigned long vm_flags, struct anon_vma *, struct file *, pgoff_t,
    1671                 :            :         struct mempolicy *, const char __user *);
    1672                 :            : extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
    1673                 :            : extern int split_vma(struct mm_struct *,
    1674                 :            :         struct vm_area_struct *, unsigned long addr, int new_below);
    1675                 :            : extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
    1676                 :            : extern void __vma_link_rb(struct mm_struct *, struct vm_area_struct *,
    1677                 :            :         struct rb_node **, struct rb_node *);
    1678                 :            : extern void unlink_file_vma(struct vm_area_struct *);
    1679                 :            : extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
    1680                 :            :         unsigned long addr, unsigned long len, pgoff_t pgoff,
    1681                 :            :         bool *need_rmap_locks);
    1682                 :            : extern void exit_mmap(struct mm_struct *);
    1683                 :            : 
    1684                 :            : extern int mm_take_all_locks(struct mm_struct *mm);
    1685                 :            : extern void mm_drop_all_locks(struct mm_struct *mm);
    1686                 :            : 
    1687                 :            : extern void set_mm_exe_file(struct mm_struct *mm, struct file *new_exe_file);
    1688                 :            : extern struct file *get_mm_exe_file(struct mm_struct *mm);
    1689                 :            : 
    1690                 :            : extern int may_expand_vm(struct mm_struct *mm, unsigned long npages);
    1691                 :            : extern int install_special_mapping(struct mm_struct *mm,
    1692                 :            :                                    unsigned long addr, unsigned long len,
    1693                 :            :                                    unsigned long flags, struct page **pages);
    1694                 :            : 
    1695                 :            : extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned long, unsigned long, unsigned long);
    1696                 :            : 
    1697                 :            : extern unsigned long mmap_region(struct file *file, unsigned long addr,
    1698                 :            :         unsigned long len, vm_flags_t vm_flags, unsigned long pgoff);
    1699                 :            : extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
    1700                 :            :         unsigned long len, unsigned long prot, unsigned long flags,
    1701                 :            :         unsigned long pgoff, unsigned long *populate);
    1702                 :            : extern int do_munmap(struct mm_struct *, unsigned long, size_t);
    1703                 :            : 
    1704                 :            : #ifdef CONFIG_MMU
    1705                 :            : extern int __mm_populate(unsigned long addr, unsigned long len,
    1706                 :            :                          int ignore_errors);
    1707                 :            : static inline void mm_populate(unsigned long addr, unsigned long len)
    1708                 :            : {
    1709                 :            :         /* Ignore errors */
    1710                 :     469834 :         (void) __mm_populate(addr, len, 1);
    1711                 :            : }
    1712                 :            : #else
    1713                 :            : static inline void mm_populate(unsigned long addr, unsigned long len) {}
    1714                 :            : #endif
    1715                 :            : 
    1716                 :            : /* These take the mm semaphore themselves */
    1717                 :            : extern unsigned long vm_brk(unsigned long, unsigned long);
    1718                 :            : extern int vm_munmap(unsigned long, size_t);
    1719                 :            : extern unsigned long vm_mmap(struct file *, unsigned long,
    1720                 :            :         unsigned long, unsigned long,
    1721                 :            :         unsigned long, unsigned long);
    1722                 :            : 
    1723                 :            : struct vm_unmapped_area_info {
    1724                 :            : #define VM_UNMAPPED_AREA_TOPDOWN 1
    1725                 :            :         unsigned long flags;
    1726                 :            :         unsigned long length;
    1727                 :            :         unsigned long low_limit;
    1728                 :            :         unsigned long high_limit;
    1729                 :            :         unsigned long align_mask;
    1730                 :            :         unsigned long align_offset;
    1731                 :            : };
    1732                 :            : 
    1733                 :            : extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
    1734                 :            : extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
    1735                 :            : 
    1736                 :            : /*
    1737                 :            :  * Search for an unmapped address range.
    1738                 :            :  *
    1739                 :            :  * We are looking for a range that:
    1740                 :            :  * - does not intersect with any VMA;
    1741                 :            :  * - is contained within the [low_limit, high_limit) interval;
    1742                 :            :  * - is at least the desired size.
    1743                 :            :  * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
    1744                 :            :  */
    1745                 :            : static inline unsigned long
    1746                 :            : vm_unmapped_area(struct vm_unmapped_area_info *info)
    1747                 :            : {
    1748                 :            :         if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
    1749                 :            :                 return unmapped_area(info);
    1750                 :            :         else
    1751                 :            :                 return unmapped_area_topdown(info);
    1752                 :            : }
    1753                 :            : 
    1754                 :            : /* truncate.c */
    1755                 :            : extern void truncate_inode_pages(struct address_space *, loff_t);
    1756                 :            : extern void truncate_inode_pages_range(struct address_space *,
    1757                 :            :                                        loff_t lstart, loff_t lend);
    1758                 :            : 
    1759                 :            : /* generic vm_area_ops exported for stackable file systems */
    1760                 :            : extern int filemap_fault(struct vm_area_struct *, struct vm_fault *);
    1761                 :            : extern int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf);
    1762                 :            : 
    1763                 :            : /* mm/page-writeback.c */
    1764                 :            : int write_one_page(struct page *page, int wait);
    1765                 :            : void task_dirty_inc(struct task_struct *tsk);
    1766                 :            : 
    1767                 :            : /* readahead.c */
    1768                 :            : #define VM_MAX_READAHEAD        128     /* kbytes */
    1769                 :            : #define VM_MIN_READAHEAD        16      /* kbytes (includes current page) */
    1770                 :            : 
    1771                 :            : int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
    1772                 :            :                         pgoff_t offset, unsigned long nr_to_read);
    1773                 :            : 
    1774                 :            : void page_cache_sync_readahead(struct address_space *mapping,
    1775                 :            :                                struct file_ra_state *ra,
    1776                 :            :                                struct file *filp,
    1777                 :            :                                pgoff_t offset,
    1778                 :            :                                unsigned long size);
    1779                 :            : 
    1780                 :            : void page_cache_async_readahead(struct address_space *mapping,
    1781                 :            :                                 struct file_ra_state *ra,
    1782                 :            :                                 struct file *filp,
    1783                 :            :                                 struct page *pg,
    1784                 :            :                                 pgoff_t offset,
    1785                 :            :                                 unsigned long size);
    1786                 :            : 
    1787                 :            : unsigned long max_sane_readahead(unsigned long nr);
    1788                 :            : unsigned long ra_submit(struct file_ra_state *ra,
    1789                 :            :                         struct address_space *mapping,
    1790                 :            :                         struct file *filp);
    1791                 :            : 
    1792                 :            : /* Generic expand stack which grows the stack according to GROWS{UP,DOWN} */
    1793                 :            : extern int expand_stack(struct vm_area_struct *vma, unsigned long address);
    1794                 :            : 
    1795                 :            : /* CONFIG_STACK_GROWSUP still needs to to grow downwards at some places */
    1796                 :            : extern int expand_downwards(struct vm_area_struct *vma,
    1797                 :            :                 unsigned long address);
    1798                 :            : #if VM_GROWSUP
    1799                 :            : extern int expand_upwards(struct vm_area_struct *vma, unsigned long address);
    1800                 :            : #else
    1801                 :            :   #define expand_upwards(vma, address) do { } while (0)
    1802                 :            : #endif
    1803                 :            : 
    1804                 :            : /* Look up the first VMA which satisfies  addr < vm_end,  NULL if none. */
    1805                 :            : extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
    1806                 :            : extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
    1807                 :            :                                              struct vm_area_struct **pprev);
    1808                 :            : 
    1809                 :            : /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
    1810                 :            :    NULL if none.  Assume start_addr < end_addr. */
    1811                 :            : static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
    1812                 :            : {
    1813                 :     149017 :         struct vm_area_struct * vma = find_vma(mm,start_addr);
    1814                 :            : 
    1815 [ +  - ][ +  - ]:     149017 :         if (vma && end_addr <= vma->vm_start)
         [ +  - ][ +  + ]
    1816                 :            :                 vma = NULL;
    1817                 :            :         return vma;
    1818                 :            : }
    1819                 :            : 
    1820                 :            : static inline unsigned long vma_pages(struct vm_area_struct *vma)
    1821                 :            : {
    1822                 :   28032987 :         return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
    1823                 :            : }
    1824                 :            : 
    1825                 :            : /* Look up the first VMA which exactly match the interval vm_start ... vm_end */
    1826                 :            : static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
    1827                 :            :                                 unsigned long vm_start, unsigned long vm_end)
    1828                 :            : {
    1829                 :            :         struct vm_area_struct *vma = find_vma(mm, vm_start);
    1830                 :            : 
    1831                 :            :         if (vma && (vma->vm_start != vm_start || vma->vm_end != vm_end))
    1832                 :            :                 vma = NULL;
    1833                 :            : 
    1834                 :            :         return vma;
    1835                 :            : }
    1836                 :            : 
    1837                 :            : #ifdef CONFIG_MMU
    1838                 :            : pgprot_t vm_get_page_prot(unsigned long vm_flags);
    1839                 :            : #else
    1840                 :            : static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
    1841                 :            : {
    1842                 :            :         return __pgprot(0);
    1843                 :            : }
    1844                 :            : #endif
    1845                 :            : 
    1846                 :            : #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
    1847                 :            : unsigned long change_prot_numa(struct vm_area_struct *vma,
    1848                 :            :                         unsigned long start, unsigned long end);
    1849                 :            : #endif
    1850                 :            : 
    1851                 :            : struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr);
    1852                 :            : int remap_pfn_range(struct vm_area_struct *, unsigned long addr,
    1853                 :            :                         unsigned long pfn, unsigned long size, pgprot_t);
    1854                 :            : int vm_insert_page(struct vm_area_struct *, unsigned long addr, struct page *);
    1855                 :            : int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
    1856                 :            :                         unsigned long pfn);
    1857                 :            : int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
    1858                 :            :                         unsigned long pfn);
    1859                 :            : int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len);
    1860                 :            : 
    1861                 :            : 
    1862                 :            : struct page *follow_page_mask(struct vm_area_struct *vma,
    1863                 :            :                               unsigned long address, unsigned int foll_flags,
    1864                 :            :                               unsigned int *page_mask);
    1865                 :            : 
    1866                 :            : static inline struct page *follow_page(struct vm_area_struct *vma,
    1867                 :            :                 unsigned long address, unsigned int foll_flags)
    1868                 :            : {
    1869                 :            :         unsigned int unused_page_mask;
    1870                 :            :         return follow_page_mask(vma, address, foll_flags, &unused_page_mask);
    1871                 :            : }
    1872                 :            : 
    1873                 :            : #define FOLL_WRITE      0x01    /* check pte is writable */
    1874                 :            : #define FOLL_TOUCH      0x02    /* mark page accessed */
    1875                 :            : #define FOLL_GET        0x04    /* do get_page on page */
    1876                 :            : #define FOLL_DUMP       0x08    /* give error on hole if it would be zero */
    1877                 :            : #define FOLL_FORCE      0x10    /* get_user_pages read/write w/o permission */
    1878                 :            : #define FOLL_NOWAIT     0x20    /* if a disk transfer is needed, start the IO
    1879                 :            :                                  * and return without waiting upon it */
    1880                 :            : #define FOLL_MLOCK      0x40    /* mark page as mlocked */
    1881                 :            : #define FOLL_SPLIT      0x80    /* don't return transhuge pages, split them */
    1882                 :            : #define FOLL_HWPOISON   0x100   /* check page is hwpoisoned */
    1883                 :            : #define FOLL_NUMA       0x200   /* force NUMA hinting page fault */
    1884                 :            : #define FOLL_MIGRATION  0x400   /* wait for page to replace migration entry */
    1885                 :            : 
    1886                 :            : typedef int (*pte_fn_t)(pte_t *pte, pgtable_t token, unsigned long addr,
    1887                 :            :                         void *data);
    1888                 :            : extern int apply_to_page_range(struct mm_struct *mm, unsigned long address,
    1889                 :            :                                unsigned long size, pte_fn_t fn, void *data);
    1890                 :            : 
    1891                 :            : #ifdef CONFIG_PROC_FS
    1892                 :            : void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
    1893                 :            : #else
    1894                 :            : static inline void vm_stat_account(struct mm_struct *mm,
    1895                 :            :                         unsigned long flags, struct file *file, long pages)
    1896                 :            : {
    1897                 :            :         mm->total_vm += pages;
    1898                 :            : }
    1899                 :            : #endif /* CONFIG_PROC_FS */
    1900                 :            : 
    1901                 :            : #ifdef CONFIG_DEBUG_PAGEALLOC
    1902                 :            : extern void kernel_map_pages(struct page *page, int numpages, int enable);
    1903                 :            : #ifdef CONFIG_HIBERNATION
    1904                 :            : extern bool kernel_page_present(struct page *page);
    1905                 :            : #endif /* CONFIG_HIBERNATION */
    1906                 :            : #else
    1907                 :            : static inline void
    1908                 :            : kernel_map_pages(struct page *page, int numpages, int enable) {}
    1909                 :            : #ifdef CONFIG_HIBERNATION
    1910                 :            : static inline bool kernel_page_present(struct page *page) { return true; }
    1911                 :            : #endif /* CONFIG_HIBERNATION */
    1912                 :            : #endif
    1913                 :            : 
    1914                 :            : extern struct vm_area_struct *get_gate_vma(struct mm_struct *mm);
    1915                 :            : #ifdef  __HAVE_ARCH_GATE_AREA
    1916                 :            : int in_gate_area_no_mm(unsigned long addr);
    1917                 :            : int in_gate_area(struct mm_struct *mm, unsigned long addr);
    1918                 :            : #else
    1919                 :            : int in_gate_area_no_mm(unsigned long addr);
    1920                 :            : #define in_gate_area(mm, addr) ({(void)mm; in_gate_area_no_mm(addr);})
    1921                 :            : #endif  /* __HAVE_ARCH_GATE_AREA */
    1922                 :            : 
    1923                 :            : #ifdef CONFIG_SYSCTL
    1924                 :            : extern int sysctl_drop_caches;
    1925                 :            : int drop_caches_sysctl_handler(struct ctl_table *, int,
    1926                 :            :                                         void __user *, size_t *, loff_t *);
    1927                 :            : #endif
    1928                 :            : 
    1929                 :            : unsigned long shrink_slab(struct shrink_control *shrink,
    1930                 :            :                           unsigned long nr_pages_scanned,
    1931                 :            :                           unsigned long lru_pages);
    1932                 :            : 
    1933                 :            : #ifndef CONFIG_MMU
    1934                 :            : #define randomize_va_space 0
    1935                 :            : #else
    1936                 :            : extern int randomize_va_space;
    1937                 :            : #endif
    1938                 :            : 
    1939                 :            : const char * arch_vma_name(struct vm_area_struct *vma);
    1940                 :            : void print_vma_addr(char *prefix, unsigned long rip);
    1941                 :            : 
    1942                 :            : void sparse_mem_maps_populate_node(struct page **map_map,
    1943                 :            :                                    unsigned long pnum_begin,
    1944                 :            :                                    unsigned long pnum_end,
    1945                 :            :                                    unsigned long map_count,
    1946                 :            :                                    int nodeid);
    1947                 :            : 
    1948                 :            : struct page *sparse_mem_map_populate(unsigned long pnum, int nid);
    1949                 :            : pgd_t *vmemmap_pgd_populate(unsigned long addr, int node);
    1950                 :            : pud_t *vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node);
    1951                 :            : pmd_t *vmemmap_pmd_populate(pud_t *pud, unsigned long addr, int node);
    1952                 :            : pte_t *vmemmap_pte_populate(pmd_t *pmd, unsigned long addr, int node);
    1953                 :            : void *vmemmap_alloc_block(unsigned long size, int node);
    1954                 :            : void *vmemmap_alloc_block_buf(unsigned long size, int node);
    1955                 :            : void vmemmap_verify(pte_t *, int, unsigned long, unsigned long);
    1956                 :            : int vmemmap_populate_basepages(unsigned long start, unsigned long end,
    1957                 :            :                                int node);
    1958                 :            : int vmemmap_populate(unsigned long start, unsigned long end, int node);
    1959                 :            : void vmemmap_populate_print_last(void);
    1960                 :            : #ifdef CONFIG_MEMORY_HOTPLUG
    1961                 :            : void vmemmap_free(unsigned long start, unsigned long end);
    1962                 :            : #endif
    1963                 :            : void register_page_bootmem_memmap(unsigned long section_nr, struct page *map,
    1964                 :            :                                   unsigned long size);
    1965                 :            : 
    1966                 :            : enum mf_flags {
    1967                 :            :         MF_COUNT_INCREASED = 1 << 0,
    1968                 :            :         MF_ACTION_REQUIRED = 1 << 1,
    1969                 :            :         MF_MUST_KILL = 1 << 2,
    1970                 :            :         MF_SOFT_OFFLINE = 1 << 3,
    1971                 :            : };
    1972                 :            : extern int memory_failure(unsigned long pfn, int trapno, int flags);
    1973                 :            : extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
    1974                 :            : extern int unpoison_memory(unsigned long pfn);
    1975                 :            : extern int sysctl_memory_failure_early_kill;
    1976                 :            : extern int sysctl_memory_failure_recovery;
    1977                 :            : extern void shake_page(struct page *p, int access);
    1978                 :            : extern atomic_long_t num_poisoned_pages;
    1979                 :            : extern int soft_offline_page(struct page *page, int flags);
    1980                 :            : 
    1981                 :            : extern void dump_page(struct page *page);
    1982                 :            : 
    1983                 :            : #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
    1984                 :            : extern void clear_huge_page(struct page *page,
    1985                 :            :                             unsigned long addr,
    1986                 :            :                             unsigned int pages_per_huge_page);
    1987                 :            : extern void copy_user_huge_page(struct page *dst, struct page *src,
    1988                 :            :                                 unsigned long addr, struct vm_area_struct *vma,
    1989                 :            :                                 unsigned int pages_per_huge_page);
    1990                 :            : #endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
    1991                 :            : 
    1992                 :            : #ifdef CONFIG_DEBUG_PAGEALLOC
    1993                 :            : extern unsigned int _debug_guardpage_minorder;
    1994                 :            : 
    1995                 :            : static inline unsigned int debug_guardpage_minorder(void)
    1996                 :            : {
    1997                 :            :         return _debug_guardpage_minorder;
    1998                 :            : }
    1999                 :            : 
    2000                 :            : static inline bool page_is_guard(struct page *page)
    2001                 :            : {
    2002                 :            :         return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
    2003                 :            : }
    2004                 :            : #else
    2005                 :            : static inline unsigned int debug_guardpage_minorder(void) { return 0; }
    2006                 :            : static inline bool page_is_guard(struct page *page) { return false; }
    2007                 :            : #endif /* CONFIG_DEBUG_PAGEALLOC */
    2008                 :            : 
    2009                 :            : #if MAX_NUMNODES > 1
    2010                 :            : void __init setup_nr_node_ids(void);
    2011                 :            : #else
    2012                 :            : static inline void setup_nr_node_ids(void) {}
    2013                 :            : #endif
    2014                 :            : 
    2015                 :            : #endif /* __KERNEL__ */
    2016                 :            : #endif /* _LINUX_MM_H */

Generated by: LCOV version 1.9