LCOV - code coverage report
Current view: top level - mm - mlock.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 204 241 84.6 %
Date: 2014-04-16 Functions: 22 23 95.7 %
Branches: 143 183 78.1 %

           Branch data     Line data    Source code
       1                 :            : /*
       2                 :            :  *      linux/mm/mlock.c
       3                 :            :  *
       4                 :            :  *  (C) Copyright 1995 Linus Torvalds
       5                 :            :  *  (C) Copyright 2002 Christoph Hellwig
       6                 :            :  */
       7                 :            : 
       8                 :            : #include <linux/capability.h>
       9                 :            : #include <linux/mman.h>
      10                 :            : #include <linux/mm.h>
      11                 :            : #include <linux/swap.h>
      12                 :            : #include <linux/swapops.h>
      13                 :            : #include <linux/pagemap.h>
      14                 :            : #include <linux/pagevec.h>
      15                 :            : #include <linux/mempolicy.h>
      16                 :            : #include <linux/syscalls.h>
      17                 :            : #include <linux/sched.h>
      18                 :            : #include <linux/export.h>
      19                 :            : #include <linux/rmap.h>
      20                 :            : #include <linux/mmzone.h>
      21                 :            : #include <linux/hugetlb.h>
      22                 :            : #include <linux/memcontrol.h>
      23                 :            : #include <linux/mm_inline.h>
      24                 :            : 
      25                 :            : #include "internal.h"
      26                 :            : 
      27                 :          0 : int can_do_mlock(void)
      28                 :            : {
      29         [ +  + ]:        203 :         if (capable(CAP_IPC_LOCK))
      30                 :            :                 return 1;
      31         [ +  + ]:          4 :         if (rlimit(RLIMIT_MEMLOCK) != 0)
      32                 :            :                 return 1;
      33                 :          2 :         return 0;
      34                 :            : }
      35                 :            : EXPORT_SYMBOL(can_do_mlock);
      36                 :            : 
      37                 :            : /*
      38                 :            :  * Mlocked pages are marked with PageMlocked() flag for efficient testing
      39                 :            :  * in vmscan and, possibly, the fault path; and to support semi-accurate
      40                 :            :  * statistics.
      41                 :            :  *
      42                 :            :  * An mlocked page [PageMlocked(page)] is unevictable.  As such, it will
      43                 :            :  * be placed on the LRU "unevictable" list, rather than the [in]active lists.
      44                 :            :  * The unevictable list is an LRU sibling list to the [in]active lists.
      45                 :            :  * PageUnevictable is set to indicate the unevictable state.
      46                 :            :  *
      47                 :            :  * When lazy mlocking via vmscan, it is important to ensure that the
      48                 :            :  * vma's VM_LOCKED status is not concurrently being modified, otherwise we
      49                 :            :  * may have mlocked a page that is being munlocked. So lazy mlock must take
      50                 :            :  * the mmap_sem for read, and verify that the vma really is locked
      51                 :            :  * (see mm/rmap.c).
      52                 :            :  */
      53                 :            : 
      54                 :            : /*
      55                 :            :  *  LRU accounting for clear_page_mlock()
      56                 :            :  */
      57                 :          0 : void clear_page_mlock(struct page *page)
      58                 :            : {
      59         [ +  - ]:          1 :         if (!TestClearPageMlocked(page))
      60                 :          0 :                 return;
      61                 :            : 
      62                 :          1 :         mod_zone_page_state(page_zone(page), NR_MLOCK,
      63                 :            :                             -hpage_nr_pages(page));
      64                 :            :         count_vm_event(UNEVICTABLE_PGCLEARED);
      65         [ +  - ]:          1 :         if (!isolate_lru_page(page)) {
      66                 :          1 :                 putback_lru_page(page);
      67                 :            :         } else {
      68                 :            :                 /*
      69                 :            :                  * We lost the race. the page already moved to evictable list.
      70                 :            :                  */
      71         [ #  # ]:          0 :                 if (PageUnevictable(page))
      72                 :            :                         count_vm_event(UNEVICTABLE_PGSTRANDED);
      73                 :            :         }
      74                 :            : }
      75                 :            : 
      76                 :            : /*
      77                 :            :  * Mark page as mlocked if not already.
      78                 :            :  * If page on LRU, isolate and putback to move to unevictable list.
      79                 :            :  */
      80                 :          0 : void mlock_vma_page(struct page *page)
      81                 :            : {
      82         [ -  + ]:      10404 :         BUG_ON(!PageLocked(page));
      83                 :            : 
      84         [ +  + ]:      10404 :         if (!TestSetPageMlocked(page)) {
      85                 :       3782 :                 mod_zone_page_state(page_zone(page), NR_MLOCK,
      86                 :            :                                     hpage_nr_pages(page));
      87                 :            :                 count_vm_event(UNEVICTABLE_PGMLOCKED);
      88         [ +  - ]:       3782 :                 if (!isolate_lru_page(page))
      89                 :       3782 :                         putback_lru_page(page);
      90                 :            :         }
      91                 :          0 : }
      92                 :            : 
      93                 :            : /*
      94                 :            :  * Isolate a page from LRU with optional get_page() pin.
      95                 :            :  * Assumes lru_lock already held and page already pinned.
      96                 :            :  */
      97                 :          0 : static bool __munlock_isolate_lru_page(struct page *page, bool getpage)
      98                 :            : {
      99         [ +  - ]:      10058 :         if (PageLRU(page)) {
     100                 :            :                 struct lruvec *lruvec;
     101                 :            : 
     102                 :      10058 :                 lruvec = mem_cgroup_page_lruvec(page, page_zone(page));
     103         [ +  + ]:      10058 :                 if (getpage)
     104                 :            :                         get_page(page);
     105                 :            :                 ClearPageLRU(page);
     106                 :            :                 del_page_from_lru_list(page, lruvec, page_lru(page));
     107                 :      10058 :                 return true;
     108                 :            :         }
     109                 :            : 
     110                 :            :         return false;
     111                 :            : }
     112                 :            : 
     113                 :            : /*
     114                 :            :  * Finish munlock after successful page isolation
     115                 :            :  *
     116                 :            :  * Page must be locked. This is a wrapper for try_to_munlock()
     117                 :            :  * and putback_lru_page() with munlock accounting.
     118                 :            :  */
     119                 :          0 : static void __munlock_isolated_page(struct page *page)
     120                 :            : {
     121                 :            :         int ret = SWAP_AGAIN;
     122                 :            : 
     123                 :            :         /*
     124                 :            :          * Optimization: if the page was mapped just once, that's our mapping
     125                 :            :          * and we don't need to check all the other vmas.
     126                 :            :          */
     127         [ +  + ]:       1883 :         if (page_mapcount(page) > 1)
     128                 :       1870 :                 ret = try_to_munlock(page);
     129                 :            : 
     130                 :            :         /* Did try_to_unlock() succeed or punt? */
     131         [ +  - ]:       1883 :         if (ret != SWAP_MLOCK)
     132                 :            :                 count_vm_event(UNEVICTABLE_PGMUNLOCKED);
     133                 :            : 
     134                 :       1883 :         putback_lru_page(page);
     135                 :       1883 : }
     136                 :            : 
     137                 :            : /*
     138                 :            :  * Accounting for page isolation fail during munlock
     139                 :            :  *
     140                 :            :  * Performs accounting when page isolation fails in munlock. There is nothing
     141                 :            :  * else to do because it means some other task has already removed the page
     142                 :            :  * from the LRU. putback_lru_page() will take care of removing the page from
     143                 :            :  * the unevictable list, if necessary. vmscan [page_referenced()] will move
     144                 :            :  * the page back to the unevictable list if some other vma has it mlocked.
     145                 :            :  */
     146                 :          0 : static void __munlock_isolation_failed(struct page *page)
     147                 :            : {
     148         [ #  # ]:          0 :         if (PageUnevictable(page))
     149                 :            :                 __count_vm_event(UNEVICTABLE_PGSTRANDED);
     150                 :            :         else
     151                 :            :                 __count_vm_event(UNEVICTABLE_PGMUNLOCKED);
     152                 :          0 : }
     153                 :            : 
     154                 :            : /**
     155                 :            :  * munlock_vma_page - munlock a vma page
     156                 :            :  * @page - page to be unlocked, either a normal page or THP page head
     157                 :            :  *
     158                 :            :  * returns the size of the page as a page mask (0 for normal page,
     159                 :            :  *         HPAGE_PMD_NR - 1 for THP head page)
     160                 :            :  *
     161                 :            :  * called from munlock()/munmap() path with page supposedly on the LRU.
     162                 :            :  * When we munlock a page, because the vma where we found the page is being
     163                 :            :  * munlock()ed or munmap()ed, we want to check whether other vmas hold the
     164                 :            :  * page locked so that we can leave it on the unevictable lru list and not
     165                 :            :  * bother vmscan with it.  However, to walk the page's rmap list in
     166                 :            :  * try_to_munlock() we must isolate the page from the LRU.  If some other
     167                 :            :  * task has removed the page from the LRU, we won't be able to do that.
     168                 :            :  * So we clear the PageMlocked as we might not get another chance.  If we
     169                 :            :  * can't isolate the page, we leave it for putback_lru_page() and vmscan
     170                 :            :  * [page_referenced()/try_to_unmap()] to deal with.
     171                 :            :  */
     172                 :          0 : unsigned int munlock_vma_page(struct page *page)
     173                 :            : {
     174                 :            :         unsigned int nr_pages;
     175                 :         14 :         struct zone *zone = page_zone(page);
     176                 :            : 
     177         [ -  + ]:         14 :         BUG_ON(!PageLocked(page));
     178                 :            : 
     179                 :            :         /*
     180                 :            :          * Serialize with any parallel __split_huge_page_refcount() which
     181                 :            :          * might otherwise copy PageMlocked to part of the tail pages before
     182                 :            :          * we clear it in the head page. It also stabilizes hpage_nr_pages().
     183                 :            :          */
     184                 :            :         spin_lock_irq(&zone->lru_lock);
     185                 :            : 
     186                 :            :         nr_pages = hpage_nr_pages(page);
     187         [ +  + ]:         14 :         if (!TestClearPageMlocked(page))
     188                 :            :                 goto unlock_out;
     189                 :            : 
     190                 :         13 :         __mod_zone_page_state(zone, NR_MLOCK, -nr_pages);
     191                 :            : 
     192         [ +  - ]:         13 :         if (__munlock_isolate_lru_page(page, true)) {
     193                 :            :                 spin_unlock_irq(&zone->lru_lock);
     194                 :         13 :                 __munlock_isolated_page(page);
     195                 :         13 :                 goto out;
     196                 :            :         }
     197                 :          0 :         __munlock_isolation_failed(page);
     198                 :            : 
     199                 :            : unlock_out:
     200                 :            :         spin_unlock_irq(&zone->lru_lock);
     201                 :            : 
     202                 :            : out:
     203                 :         14 :         return nr_pages - 1;
     204                 :            : }
     205                 :            : 
     206                 :            : /**
     207                 :            :  * __mlock_vma_pages_range() -  mlock a range of pages in the vma.
     208                 :            :  * @vma:   target vma
     209                 :            :  * @start: start address
     210                 :            :  * @end:   end address
     211                 :            :  *
     212                 :            :  * This takes care of making the pages present too.
     213                 :            :  *
     214                 :            :  * return 0 on success, negative error code on error.
     215                 :            :  *
     216                 :            :  * vma->vm_mm->mmap_sem must be held for at least read.
     217                 :            :  */
     218                 :          0 : long __mlock_vma_pages_range(struct vm_area_struct *vma,
     219                 :            :                 unsigned long start, unsigned long end, int *nonblocking)
     220                 :            : {
     221                 :       2498 :         struct mm_struct *mm = vma->vm_mm;
     222                 :       2498 :         unsigned long nr_pages = (end - start) / PAGE_SIZE;
     223                 :            :         int gup_flags;
     224                 :            : 
     225                 :            :         VM_BUG_ON(start & ~PAGE_MASK);
     226                 :            :         VM_BUG_ON(end   & ~PAGE_MASK);
     227                 :            :         VM_BUG_ON(start < vma->vm_start);
     228                 :            :         VM_BUG_ON(end   > vma->vm_end);
     229                 :            :         VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
     230                 :            : 
     231                 :            :         gup_flags = FOLL_TOUCH | FOLL_MLOCK;
     232                 :            :         /*
     233                 :            :          * We want to touch writable mappings with a write fault in order
     234                 :            :          * to break COW, except for shared mappings because these don't COW
     235                 :            :          * and we would not want to dirty them for nothing.
     236                 :            :          */
     237         [ +  + ]:       2498 :         if ((vma->vm_flags & (VM_WRITE | VM_SHARED)) == VM_WRITE)
     238                 :            :                 gup_flags |= FOLL_WRITE;
     239                 :            : 
     240                 :            :         /*
     241                 :            :          * We want mlock to succeed for regions that have any permissions
     242                 :            :          * other than PROT_NONE.
     243                 :            :          */
     244         [ +  + ]:       2498 :         if (vma->vm_flags & (VM_READ | VM_WRITE | VM_EXEC))
     245                 :       2484 :                 gup_flags |= FOLL_FORCE;
     246                 :            : 
     247                 :            :         /*
     248                 :            :          * We made sure addr is within a VMA, so the following will
     249                 :            :          * not result in a stack expansion that recurses back here.
     250                 :            :          */
     251                 :       2498 :         return __get_user_pages(current, mm, start, nr_pages, gup_flags,
     252                 :            :                                 NULL, NULL, nonblocking);
     253                 :            : }
     254                 :            : 
     255                 :            : /*
     256                 :            :  * convert get_user_pages() return value to posix mlock() error
     257                 :            :  */
     258                 :            : static int __mlock_posix_error_return(long retval)
     259                 :            : {
     260         [ -  + ]:         10 :         if (retval == -EFAULT)
     261                 :            :                 retval = -ENOMEM;
     262         [ #  # ]:          0 :         else if (retval == -ENOMEM)
     263                 :            :                 retval = -EAGAIN;
     264                 :            :         return retval;
     265                 :            : }
     266                 :            : 
     267                 :            : /*
     268                 :            :  * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
     269                 :            :  *
     270                 :            :  * The fast path is available only for evictable pages with single mapping.
     271                 :            :  * Then we can bypass the per-cpu pvec and get better performance.
     272                 :            :  * when mapcount > 1 we need try_to_munlock() which can fail.
     273                 :            :  * when !page_evictable(), we need the full redo logic of putback_lru_page to
     274                 :            :  * avoid leaving evictable page in unevictable list.
     275                 :            :  *
     276                 :            :  * In case of success, @page is added to @pvec and @pgrescued is incremented
     277                 :            :  * in case that the page was previously unevictable. @page is also unlocked.
     278                 :            :  */
     279                 :          0 : static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec,
     280                 :            :                 int *pgrescued)
     281                 :            : {
     282                 :            :         VM_BUG_ON_PAGE(PageLRU(page), page);
     283                 :            :         VM_BUG_ON_PAGE(!PageLocked(page), page);
     284                 :            : 
     285 [ +  + ][ +  - ]:      10045 :         if (page_mapcount(page) <= 1 && page_evictable(page)) {
     286                 :            :                 pagevec_add(pvec, page);
     287         [ +  - ]:       8175 :                 if (TestClearPageUnevictable(page))
     288                 :       8175 :                         (*pgrescued)++;
     289                 :       8175 :                 unlock_page(page);
     290                 :       8175 :                 return true;
     291                 :            :         }
     292                 :            : 
     293                 :            :         return false;
     294                 :            : }
     295                 :            : 
     296                 :            : /*
     297                 :            :  * Putback multiple evictable pages to the LRU
     298                 :            :  *
     299                 :            :  * Batched putback of evictable pages that bypasses the per-cpu pvec. Some of
     300                 :            :  * the pages might have meanwhile become unevictable but that is OK.
     301                 :            :  */
     302                 :          0 : static void __putback_lru_fast(struct pagevec *pvec, int pgrescued)
     303                 :            : {
     304                 :            :         count_vm_events(UNEVICTABLE_PGMUNLOCKED, pagevec_count(pvec));
     305                 :            :         /*
     306                 :            :          *__pagevec_lru_add() calls release_pages() so we don't call
     307                 :            :          * put_page() explicitly
     308                 :            :          */
     309                 :       1031 :         __pagevec_lru_add(pvec);
     310                 :            :         count_vm_events(UNEVICTABLE_PGRESCUED, pgrescued);
     311                 :       1031 : }
     312                 :            : 
     313                 :            : /*
     314                 :            :  * Munlock a batch of pages from the same zone
     315                 :            :  *
     316                 :            :  * The work is split to two main phases. First phase clears the Mlocked flag
     317                 :            :  * and attempts to isolate the pages, all under a single zone lru lock.
     318                 :            :  * The second phase finishes the munlock only for pages where isolation
     319                 :            :  * succeeded.
     320                 :            :  *
     321                 :            :  * Note that the pagevec may be modified during the process.
     322                 :            :  */
     323                 :          0 : static void __munlock_pagevec(struct pagevec *pvec, struct zone *zone)
     324                 :            : {
     325                 :            :         int i;
     326                 :       1079 :         int nr = pagevec_count(pvec);
     327                 :            :         int delta_munlocked;
     328                 :            :         struct pagevec pvec_putback;
     329                 :       1079 :         int pgrescued = 0;
     330                 :            : 
     331                 :            :         pagevec_init(&pvec_putback, 0);
     332                 :            : 
     333                 :            :         /* Phase 1: page isolation */
     334                 :            :         spin_lock_irq(&zone->lru_lock);
     335         [ +  + ]:      11124 :         for (i = 0; i < nr; i++) {
     336                 :      10045 :                 struct page *page = pvec->pages[i];
     337                 :            : 
     338         [ +  - ]:      10045 :                 if (TestClearPageMlocked(page)) {
     339                 :            :                         /*
     340                 :            :                          * We already have pin from follow_page_mask()
     341                 :            :                          * so we can spare the get_page() here.
     342                 :            :                          */
     343         [ +  - ]:      10045 :                         if (__munlock_isolate_lru_page(page, false))
     344                 :      10045 :                                 continue;
     345                 :            :                         else
     346                 :          0 :                                 __munlock_isolation_failed(page);
     347                 :            :                 }
     348                 :            : 
     349                 :            :                 /*
     350                 :            :                  * We won't be munlocking this page in the next phase
     351                 :            :                  * but we still need to release the follow_page_mask()
     352                 :            :                  * pin. We cannot do it under lru_lock however. If it's
     353                 :            :                  * the last pin, __page_cache_release() would deadlock.
     354                 :            :                  */
     355                 :          0 :                 pagevec_add(&pvec_putback, pvec->pages[i]);
     356                 :          0 :                 pvec->pages[i] = NULL;
     357                 :            :         }
     358                 :       1079 :         delta_munlocked = -nr + pagevec_count(&pvec_putback);
     359                 :       1079 :         __mod_zone_page_state(zone, NR_MLOCK, delta_munlocked);
     360                 :            :         spin_unlock_irq(&zone->lru_lock);
     361                 :            : 
     362                 :            :         /* Now we can release pins of pages that we are not munlocking */
     363                 :            :         pagevec_release(&pvec_putback);
     364                 :            : 
     365                 :            :         /* Phase 2: page munlock */
     366         [ +  + ]:      11124 :         for (i = 0; i < nr; i++) {
     367                 :      10045 :                 struct page *page = pvec->pages[i];
     368                 :            : 
     369         [ +  - ]:      10045 :                 if (page) {
     370                 :            :                         lock_page(page);
     371         [ +  + ]:      10045 :                         if (!__putback_lru_fast_prepare(page, &pvec_putback,
     372                 :            :                                         &pgrescued)) {
     373                 :            :                                 /*
     374                 :            :                                  * Slow path. We don't want to lose the last
     375                 :            :                                  * pin before unlock_page()
     376                 :            :                                  */
     377                 :            :                                 get_page(page); /* for putback_lru_page() */
     378                 :       1870 :                                 __munlock_isolated_page(page);
     379                 :       1870 :                                 unlock_page(page);
     380                 :       1870 :                                 put_page(page); /* from follow_page_mask() */
     381                 :            :                         }
     382                 :            :                 }
     383                 :            :         }
     384                 :            : 
     385                 :            :         /*
     386                 :            :          * Phase 3: page putback for pages that qualified for the fast path
     387                 :            :          * This will also call put_page() to return pin from follow_page_mask()
     388                 :            :          */
     389         [ +  + ]:       1079 :         if (pagevec_count(&pvec_putback))
     390                 :       1031 :                 __putback_lru_fast(&pvec_putback, pgrescued);
     391                 :       1079 : }
     392                 :            : 
     393                 :            : /*
     394                 :            :  * Fill up pagevec for __munlock_pagevec using pte walk
     395                 :            :  *
     396                 :            :  * The function expects that the struct page corresponding to @start address is
     397                 :            :  * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
     398                 :            :  *
     399                 :            :  * The rest of @pvec is filled by subsequent pages within the same pmd and same
     400                 :            :  * zone, as long as the pte's are present and vm_normal_page() succeeds. These
     401                 :            :  * pages also get pinned.
     402                 :            :  *
     403                 :            :  * Returns the address of the next page that should be scanned. This equals
     404                 :            :  * @start + PAGE_SIZE when no page could be added by the pte walk.
     405                 :            :  */
     406                 :          0 : static unsigned long __munlock_pagevec_fill(struct pagevec *pvec,
     407                 :            :                 struct vm_area_struct *vma, int zoneid, unsigned long start,
     408                 :            :                 unsigned long end)
     409                 :            : {
     410                 :            :         pte_t *pte;
     411                 :            :         spinlock_t *ptl;
     412                 :            : 
     413                 :            :         /*
     414                 :            :          * Initialize pte walk starting at the already pinned page where we
     415                 :            :          * are sure that there is a pte, as it was pinned under the same
     416                 :            :          * mmap_sem write op.
     417                 :            :          */
     418                 :       1079 :         pte = get_locked_pte(vma->vm_mm, start,      &ptl);
     419                 :            :         /* Make sure we do not cross the page table boundary */
     420         [ +  + ]:       1079 :         end = pgd_addr_end(start, end);
     421                 :            :         end = pud_addr_end(start, end);
     422                 :            :         end = pmd_addr_end(start, end);
     423                 :            : 
     424                 :            :         /* The page next to the pinned page is the first we will try to get */
     425                 :       1079 :         start += PAGE_SIZE;
     426         [ +  + ]:       9417 :         while (start < end) {
     427                 :       9018 :                 struct page *page = NULL;
     428                 :       9018 :                 pte++;
     429         [ +  - ]:       9018 :                 if (pte_present(*pte))
     430                 :       9018 :                         page = vm_normal_page(vma, start, *pte);
     431                 :            :                 /*
     432                 :            :                  * Break if page could not be obtained or the page's node+zone does not
     433                 :            :                  * match
     434                 :            :                  */
     435 [ +  - ][ +  + ]:       9018 :                 if (!page || page_zone_id(page) != zoneid)
     436                 :            :                         break;
     437                 :            : 
     438                 :            :                 get_page(page);
     439                 :            :                 /*
     440                 :            :                  * Increase the address that will be returned *before* the
     441                 :            :                  * eventual break due to pvec becoming full by adding the page
     442                 :            :                  */
     443                 :       8966 :                 start += PAGE_SIZE;
     444         [ +  + ]:       8966 :                 if (pagevec_add(pvec, page) == 0)
     445                 :            :                         break;
     446                 :            :         }
     447                 :       1079 :         pte_unmap_unlock(pte, ptl);
     448                 :       1079 :         return start;
     449                 :            : }
     450                 :            : 
     451                 :            : /*
     452                 :            :  * munlock_vma_pages_range() - munlock all pages in the vma range.'
     453                 :            :  * @vma - vma containing range to be munlock()ed.
     454                 :            :  * @start - start address in @vma of the range
     455                 :            :  * @end - end of range in @vma.
     456                 :            :  *
     457                 :            :  *  For mremap(), munmap() and exit().
     458                 :            :  *
     459                 :            :  * Called with @vma VM_LOCKED.
     460                 :            :  *
     461                 :            :  * Returns with VM_LOCKED cleared.  Callers must be prepared to
     462                 :            :  * deal with this.
     463                 :            :  *
     464                 :            :  * We don't save and restore VM_LOCKED here because pages are
     465                 :            :  * still on lru.  In unmap path, pages might be scanned by reclaim
     466                 :            :  * and re-mlocked by try_to_{munlock|unmap} before we unmap and
     467                 :            :  * free them.  This will result in freeing mlocked pages.
     468                 :            :  */
     469                 :          0 : void munlock_vma_pages_range(struct vm_area_struct *vma,
     470                 :            :                              unsigned long start, unsigned long end)
     471                 :            : {
     472                 :        396 :         vma->vm_flags &= ~VM_LOCKED;
     473                 :            : 
     474         [ +  + ]:       1568 :         while (start < end) {
     475                 :            :                 struct page *page = NULL;
     476                 :            :                 unsigned int page_mask;
     477                 :            :                 unsigned long page_increm;
     478                 :            :                 struct pagevec pvec;
     479                 :            :                 struct zone *zone;
     480                 :            :                 int zoneid;
     481                 :            : 
     482                 :            :                 pagevec_init(&pvec, 0);
     483                 :            :                 /*
     484                 :            :                  * Although FOLL_DUMP is intended for get_dump_page(),
     485                 :            :                  * it just so happens that its special treatment of the
     486                 :            :                  * ZERO_PAGE (returning an error instead of doing get_page)
     487                 :            :                  * suits munlock very well (and if somehow an abnormal page
     488                 :            :                  * has sneaked into the range, we won't oops here: great).
     489                 :            :                  */
     490                 :       1172 :                 page = follow_page_mask(vma, start, FOLL_GET | FOLL_DUMP,
     491                 :            :                                 &page_mask);
     492                 :            : 
     493 [ +  + ][ +  + ]:       1568 :                 if (page && !IS_ERR(page)) {
     494                 :            :                         if (PageTransHuge(page)) {
     495                 :            :                                 lock_page(page);
     496                 :            :                                 /*
     497                 :            :                                  * Any THP page found by follow_page_mask() may
     498                 :            :                                  * have gotten split before reaching
     499                 :            :                                  * munlock_vma_page(), so we need to recompute
     500                 :            :                                  * the page_mask here.
     501                 :            :                                  */
     502                 :            :                                 page_mask = munlock_vma_page(page);
     503                 :            :                                 unlock_page(page);
     504                 :            :                                 put_page(page); /* follow_page_mask() */
     505                 :            :                         } else {
     506                 :            :                                 /*
     507                 :            :                                  * Non-huge pages are handled in batches via
     508                 :            :                                  * pagevec. The pin from follow_page_mask()
     509                 :            :                                  * prevents them from collapsing by THP.
     510                 :            :                                  */
     511                 :            :                                 pagevec_add(&pvec, page);
     512                 :       1079 :                                 zone = page_zone(page);
     513                 :            :                                 zoneid = page_zone_id(page);
     514                 :            : 
     515                 :            :                                 /*
     516                 :            :                                  * Try to fill the rest of pagevec using fast
     517                 :            :                                  * pte walk. This will also update start to
     518                 :            :                                  * the next page to process. Then munlock the
     519                 :            :                                  * pagevec.
     520                 :            :                                  */
     521                 :       1079 :                                 start = __munlock_pagevec_fill(&pvec, vma,
     522                 :            :                                                 zoneid, start, end);
     523                 :       1079 :                                 __munlock_pagevec(&pvec, zone);
     524                 :       1079 :                                 goto next;
     525                 :            :                         }
     526                 :            :                 }
     527                 :            :                 /* It's a bug to munlock in the middle of a THP page */
     528                 :            :                 VM_BUG_ON((start >> PAGE_SHIFT) & page_mask);
     529                 :         93 :                 page_increm = 1 + page_mask;
     530                 :         93 :                 start += page_increm * PAGE_SIZE;
     531                 :            : next:
     532                 :       1172 :                 cond_resched();
     533                 :            :         }
     534                 :        396 : }
     535                 :            : 
     536                 :            : /*
     537                 :            :  * mlock_fixup  - handle mlock[all]/munlock[all] requests.
     538                 :            :  *
     539                 :            :  * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
     540                 :            :  * munlock is a no-op.  However, for some special vmas, we go ahead and
     541                 :            :  * populate the ptes.
     542                 :            :  *
     543                 :            :  * For vmas that pass the filters, merge/split as appropriate.
     544                 :            :  */
     545                 :          0 : static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
     546                 :            :         unsigned long start, unsigned long end, vm_flags_t newflags)
     547                 :            : {
     548                 :        460 :         struct mm_struct *mm = vma->vm_mm;
     549                 :            :         pgoff_t pgoff;
     550                 :            :         int nr_pages;
     551                 :            :         int ret = 0;
     552                 :        460 :         int lock = !!(newflags & VM_LOCKED);
     553                 :            : 
     554 [ +  + ][ +  + ]:        460 :         if (newflags == vma->vm_flags || (vma->vm_flags & VM_SPECIAL) ||
     555         [ +  - ]:        361 :             is_vm_hugetlb_page(vma) || vma == get_gate_vma(current->mm))
     556                 :            :                 goto out;       /* don't set VM_LOCKED,  don't count */
     557                 :            : 
     558                 :        361 :         pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
     559                 :        361 :         *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
     560                 :            :                           vma->vm_file, pgoff, vma_policy(vma),
     561                 :            :                           vma_get_anon_name(vma));
     562         [ +  + ]:        361 :         if (*prev) {
     563                 :            :                 vma = *prev;
     564                 :            :                 goto success;
     565                 :            :         }
     566                 :            : 
     567         [ +  + ]:        347 :         if (start != vma->vm_start) {
     568                 :         11 :                 ret = split_vma(mm, vma, start, 1);
     569            [ + ]:         11 :                 if (ret)
     570                 :            :                         goto out;
     571                 :            :         }
     572                 :            : 
     573         [ +  + ]:        807 :         if (end != vma->vm_end) {
     574                 :          4 :                 ret = split_vma(mm, vma, end, 0);
     575         [ +  - ]:          4 :                 if (ret)
     576                 :            :                         goto out;
     577                 :            :         }
     578                 :            : 
     579                 :            : success:
     580                 :            :         /*
     581                 :            :          * Keep track of amount of locked VM.
     582                 :            :          */
     583                 :        361 :         nr_pages = (end - start) >> PAGE_SHIFT;
     584         [ +  + ]:        361 :         if (!lock)
     585                 :        166 :                 nr_pages = -nr_pages;
     586                 :        361 :         mm->locked_vm += nr_pages;
     587                 :            : 
     588                 :            :         /*
     589                 :            :          * vm_flags is protected by the mmap_sem held in write mode.
     590                 :            :          * It's okay if try_to_unmap_one unmaps a page just after we
     591                 :            :          * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
     592                 :            :          */
     593                 :            : 
     594         [ +  + ]:        361 :         if (lock)
     595                 :        195 :                 vma->vm_flags = newflags;
     596                 :            :         else
     597                 :        166 :                 munlock_vma_pages_range(vma, start, end);
     598                 :            : 
     599                 :            : out:
     600                 :          0 :         *prev = vma;
     601                 :          0 :         return ret;
     602                 :            : }
     603                 :            : 
     604                 :          0 : static int do_mlock(unsigned long start, size_t len, int on)
     605                 :            : {
     606                 :            :         unsigned long nstart, end, tmp;
     607                 :            :         struct vm_area_struct * vma, * prev;
     608                 :            :         int error;
     609                 :            : 
     610                 :            :         VM_BUG_ON(start & ~PAGE_MASK);
     611                 :            :         VM_BUG_ON(len != PAGE_ALIGN(len));
     612                 :        400 :         end = start + len;
     613         [ +  - ]:        400 :         if (end < start)
     614                 :            :                 return -EINVAL;
     615         [ +  + ]:        400 :         if (end == start)
     616                 :            :                 return 0;
     617                 :        390 :         vma = find_vma(current->mm, start);
     618 [ +  + ][ +  + ]:        390 :         if (!vma || vma->vm_start > start)
     619                 :            :                 return -ENOMEM;
     620                 :            : 
     621                 :        359 :         prev = vma->vm_prev;
     622         [ +  + ]:        359 :         if (start > vma->vm_start)
     623                 :        359 :                 prev = vma;
     624                 :            : 
     625                 :            :         for (nstart = start ; ; ) {
     626                 :            :                 vm_flags_t newflags;
     627                 :            : 
     628                 :            :                 /* Here we know that  vma->vm_start <= nstart < vma->vm_end. */
     629                 :            : 
     630                 :        359 :                 newflags = vma->vm_flags & ~VM_LOCKED;
     631         [ +  + ]:        359 :                 if (on)
     632                 :        173 :                         newflags |= VM_LOCKED;
     633                 :            : 
     634                 :        359 :                 tmp = vma->vm_end;
     635         [ +  + ]:        359 :                 if (tmp > end)
     636                 :            :                         tmp = end;
     637                 :        359 :                 error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
     638         [ +  - ]:        759 :                 if (error)
     639                 :            :                         break;
     640                 :            :                 nstart = tmp;
     641         [ +  + ]:        359 :                 if (nstart < prev->vm_end)
     642                 :            :                         nstart = prev->vm_end;
     643         [ +  + ]:        359 :                 if (nstart >= end)
     644                 :            :                         break;
     645                 :            : 
     646                 :          1 :                 vma = prev->vm_next;
     647 [ +  - ][ -  + ]:          1 :                 if (!vma || vma->vm_start != nstart) {
     648                 :            :                         error = -ENOMEM;
     649                 :            :                         break;
     650                 :            :                 }
     651                 :            :         }
     652                 :        359 :         return error;
     653                 :            : }
     654                 :            : 
     655                 :            : /*
     656                 :            :  * __mm_populate - populate and/or mlock pages within a range of address space.
     657                 :            :  *
     658                 :            :  * This is used to implement mlock() and the MAP_POPULATE / MAP_LOCKED mmap
     659                 :            :  * flags. VMAs must be already marked with the desired vm_flags, and
     660                 :            :  * mmap_sem must not be held.
     661                 :            :  */
     662                 :          0 : int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
     663                 :            : {
     664                 :        397 :         struct mm_struct *mm = current->mm;
     665                 :            :         unsigned long end, nstart, nend;
     666                 :            :         struct vm_area_struct *vma = NULL;
     667                 :        397 :         int locked = 0;
     668                 :            :         long ret = 0;
     669                 :            : 
     670                 :            :         VM_BUG_ON(start & ~PAGE_MASK);
     671                 :            :         VM_BUG_ON(len != PAGE_ALIGN(len));
     672                 :        397 :         end = start + len;
     673                 :            : 
     674         [ +  + ]:       2885 :         for (nstart = start; nstart < end; nstart = nend) {
     675                 :            :                 /*
     676                 :            :                  * We want to fault in pages for [nstart; end) address range.
     677                 :            :                  * Find first corresponding VMA.
     678                 :            :                  */
     679         [ +  + ]:       2501 :                 if (!locked) {
     680                 :       2449 :                         locked = 1;
     681                 :       2449 :                         down_read(&mm->mmap_sem);
     682                 :       2449 :                         vma = find_vma(mm, nstart);
     683         [ +  - ]:         52 :                 } else if (nstart >= vma->vm_end)
     684                 :         52 :                         vma = vma->vm_next;
     685 [ +  + ][ +  - ]:       2898 :                 if (!vma || vma->vm_start >= end)
     686                 :            :                         break;
     687                 :            :                 /*
     688                 :            :                  * Set [nstart; nend) to intersection of desired address
     689                 :            :                  * range with the first VMA. Also, skip undesirable VMA types.
     690                 :            :                  */
     691                 :       2498 :                 nend = min(end, vma->vm_end);
     692         [ -  + ]:       2498 :                 if (vma->vm_flags & (VM_IO | VM_PFNMAP))
     693                 :          0 :                         continue;
     694         [ +  + ]:       2498 :                 if (nstart < vma->vm_start)
     695                 :            :                         nstart = vma->vm_start;
     696                 :            :                 /*
     697                 :            :                  * Now fault in a range of pages. __mlock_vma_pages_range()
     698                 :            :                  * double checks the vma flags, so that it won't mlock pages
     699                 :            :                  * if the vma was already munlocked.
     700                 :            :                  */
     701                 :       2498 :                 ret = __mlock_vma_pages_range(vma, nstart, nend, &locked);
     702         [ +  + ]:       2498 :                 if (ret < 0) {
     703         [ +  + ]:         14 :                         if (ignore_errors) {
     704                 :            :                                 ret = 0;
     705                 :          4 :                                 continue;       /* continue at next VMA */
     706                 :            :                         }
     707                 :            :                         ret = __mlock_posix_error_return(ret);
     708                 :         10 :                         break;
     709                 :            :                 }
     710                 :       2488 :                 nend = nstart + ret * PAGE_SIZE;
     711                 :            :                 ret = 0;
     712                 :            :         }
     713         [ +  - ]:        397 :         if (locked)
     714                 :        397 :                 up_read(&mm->mmap_sem);
     715                 :        397 :         return ret;     /* 0 or negative error code */
     716                 :            : }
     717                 :            : 
     718                 :          0 : SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
     719                 :            : {
     720                 :            :         unsigned long locked;
     721                 :            :         unsigned long lock_limit;
     722                 :            :         int error = -ENOMEM;
     723                 :            : 
     724         [ +  - ]:        194 :         if (!can_do_mlock())
     725                 :            :                 return -EPERM;
     726                 :            : 
     727                 :        194 :         lru_add_drain_all();    /* flush pagevec */
     728                 :            : 
     729                 :        194 :         len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
     730                 :        194 :         start &= PAGE_MASK;
     731                 :            : 
     732                 :            :         lock_limit = rlimit(RLIMIT_MEMLOCK);
     733                 :        194 :         lock_limit >>= PAGE_SHIFT;
     734                 :        194 :         locked = len >> PAGE_SHIFT;
     735                 :            : 
     736                 :        194 :         down_write(&current->mm->mmap_sem);
     737                 :            : 
     738                 :        194 :         locked += current->mm->locked_vm;
     739                 :            : 
     740                 :            :         /* check against resource limits */
     741 [ +  + ][ +  - ]:        194 :         if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
     742                 :        194 :                 error = do_mlock(start, len, 1);
     743                 :            : 
     744                 :        194 :         up_write(&current->mm->mmap_sem);
     745         [ +  + ]:        194 :         if (!error)
     746                 :        173 :                 error = __mm_populate(start, len, 0);
     747                 :            :         return error;
     748                 :            : }
     749                 :            : 
     750                 :          0 : SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
     751                 :            : {
     752                 :            :         int ret;
     753                 :            : 
     754                 :        206 :         len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
     755                 :        206 :         start &= PAGE_MASK;
     756                 :            : 
     757                 :        206 :         down_write(&current->mm->mmap_sem);
     758                 :        206 :         ret = do_mlock(start, len, 0);
     759                 :        206 :         up_write(&current->mm->mmap_sem);
     760                 :            : 
     761                 :            :         return ret;
     762                 :            : }
     763                 :            : 
     764                 :          0 : static int do_mlockall(int flags)
     765                 :            : {
     766                 :          6 :         struct vm_area_struct * vma, * prev = NULL;
     767                 :            : 
     768         [ +  + ]:          6 :         if (flags & MCL_FUTURE)
     769                 :          3 :                 current->mm->def_flags |= VM_LOCKED;
     770                 :            :         else
     771                 :          3 :                 current->mm->def_flags &= ~VM_LOCKED;
     772         [ +  + ]:          6 :         if (flags == MCL_FUTURE)
     773                 :            :                 goto out;
     774                 :            : 
     775         [ +  + ]:        106 :         for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
     776                 :            :                 vm_flags_t newflags;
     777                 :            : 
     778                 :        101 :                 newflags = vma->vm_flags & ~VM_LOCKED;
     779         [ +  + ]:        101 :                 if (flags & MCL_CURRENT)
     780                 :         52 :                         newflags |= VM_LOCKED;
     781                 :            : 
     782                 :            :                 /* Ignore errors */
     783                 :        101 :                 mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
     784                 :        101 :                 cond_resched();
     785                 :            :         }
     786                 :            : out:
     787                 :          6 :         return 0;
     788                 :            : }
     789                 :            : 
     790                 :          0 : SYSCALL_DEFINE1(mlockall, int, flags)
     791                 :            : {
     792                 :            :         unsigned long lock_limit;
     793                 :            :         int ret = -EINVAL;
     794                 :            : 
     795 [ +  + ][ +  + ]:         10 :         if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
     796                 :            :                 goto out;
     797                 :            : 
     798                 :            :         ret = -EPERM;
     799         [ +  + ]:          8 :         if (!can_do_mlock())
     800                 :            :                 goto out;
     801                 :            : 
     802         [ +  + ]:          6 :         if (flags & MCL_CURRENT)
     803                 :          5 :                 lru_add_drain_all();    /* flush pagevec */
     804                 :            : 
     805                 :            :         lock_limit = rlimit(RLIMIT_MEMLOCK);
     806                 :          6 :         lock_limit >>= PAGE_SHIFT;
     807                 :            : 
     808                 :            :         ret = -ENOMEM;
     809                 :          6 :         down_write(&current->mm->mmap_sem);
     810                 :            : 
     811         [ +  + ]:         11 :         if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
           [ +  -  +  + ]
     812                 :          5 :             capable(CAP_IPC_LOCK))
     813                 :          4 :                 ret = do_mlockall(flags);
     814                 :          6 :         up_write(&current->mm->mmap_sem);
     815 [ +  + ][ +  + ]:          6 :         if (!ret && (flags & MCL_CURRENT))
     816                 :            :                 mm_populate(0, TASK_SIZE);
     817                 :            : out:
     818                 :            :         return ret;
     819                 :            : }
     820                 :            : 
     821                 :          0 : SYSCALL_DEFINE0(munlockall)
     822                 :            : {
     823                 :            :         int ret;
     824                 :            : 
     825                 :          2 :         down_write(&current->mm->mmap_sem);
     826                 :          2 :         ret = do_mlockall(0);
     827                 :          2 :         up_write(&current->mm->mmap_sem);
     828                 :          2 :         return ret;
     829                 :            : }
     830                 :            : 
     831                 :            : /*
     832                 :            :  * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
     833                 :            :  * shm segments) get accounted against the user_struct instead.
     834                 :            :  */
     835                 :            : static DEFINE_SPINLOCK(shmlock_user_lock);
     836                 :            : 
     837                 :          0 : int user_shm_lock(size_t size, struct user_struct *user)
     838                 :            : {
     839                 :            :         unsigned long lock_limit, locked;
     840                 :            :         int allowed = 0;
     841                 :            : 
     842                 :          2 :         locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
     843                 :            :         lock_limit = rlimit(RLIMIT_MEMLOCK);
     844         [ -  + ]:          2 :         if (lock_limit == RLIM_INFINITY)
     845                 :            :                 allowed = 1;
     846                 :          2 :         lock_limit >>= PAGE_SHIFT;
     847                 :            :         spin_lock(&shmlock_user_lock);
     848 [ +  - ][ -  + ]:          2 :         if (!allowed &&
     849         [ #  # ]:          0 :             locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
     850                 :            :                 goto out;
     851                 :            :         get_uid(user);
     852                 :          2 :         user->locked_shm += locked;
     853                 :            :         allowed = 1;
     854                 :            : out:
     855                 :            :         spin_unlock(&shmlock_user_lock);
     856                 :          2 :         return allowed;
     857                 :            : }
     858                 :            : 
     859                 :          0 : void user_shm_unlock(size_t size, struct user_struct *user)
     860                 :            : {
     861                 :            :         spin_lock(&shmlock_user_lock);
     862                 :          2 :         user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
     863                 :            :         spin_unlock(&shmlock_user_lock);
     864                 :          2 :         free_uid(user);
     865                 :          2 : }

Generated by: LCOV version 1.9