Branch data Line data Source code
1 : : /*
2 : : * mm/mmap.c
3 : : *
4 : : * Written by obz.
5 : : *
6 : : * Address space accounting code <alan@lxorguk.ukuu.org.uk>
7 : : */
8 : :
9 : : #include <linux/kernel.h>
10 : : #include <linux/slab.h>
11 : : #include <linux/backing-dev.h>
12 : : #include <linux/mm.h>
13 : : #include <linux/shm.h>
14 : : #include <linux/mman.h>
15 : : #include <linux/pagemap.h>
16 : : #include <linux/swap.h>
17 : : #include <linux/syscalls.h>
18 : : #include <linux/capability.h>
19 : : #include <linux/init.h>
20 : : #include <linux/file.h>
21 : : #include <linux/fs.h>
22 : : #include <linux/personality.h>
23 : : #include <linux/security.h>
24 : : #include <linux/hugetlb.h>
25 : : #include <linux/profile.h>
26 : : #include <linux/export.h>
27 : : #include <linux/mount.h>
28 : : #include <linux/mempolicy.h>
29 : : #include <linux/rmap.h>
30 : : #include <linux/mmu_notifier.h>
31 : : #include <linux/perf_event.h>
32 : : #include <linux/audit.h>
33 : : #include <linux/khugepaged.h>
34 : : #include <linux/uprobes.h>
35 : : #include <linux/rbtree_augmented.h>
36 : : #include <linux/sched/sysctl.h>
37 : : #include <linux/notifier.h>
38 : : #include <linux/memory.h>
39 : :
40 : : #include <asm/uaccess.h>
41 : : #include <asm/cacheflush.h>
42 : : #include <asm/tlb.h>
43 : : #include <asm/mmu_context.h>
44 : :
45 : : #include "internal.h"
46 : :
47 : : #ifndef arch_mmap_check
48 : : #define arch_mmap_check(addr, len, flags) (0)
49 : : #endif
50 : :
51 : : #ifndef arch_rebalance_pgtables
52 : : #define arch_rebalance_pgtables(addr, len) (addr)
53 : : #endif
54 : :
55 : : static void unmap_region(struct mm_struct *mm,
56 : : struct vm_area_struct *vma, struct vm_area_struct *prev,
57 : : unsigned long start, unsigned long end);
58 : :
59 : : /* description of effects of mapping type and prot in current implementation.
60 : : * this is due to the limited x86 page protection hardware. The expected
61 : : * behavior is in parens:
62 : : *
63 : : * map_type prot
64 : : * PROT_NONE PROT_READ PROT_WRITE PROT_EXEC
65 : : * MAP_SHARED r: (no) no r: (yes) yes r: (no) yes r: (no) yes
66 : : * w: (no) no w: (no) no w: (yes) yes w: (no) no
67 : : * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
68 : : *
69 : : * MAP_PRIVATE r: (no) no r: (yes) yes r: (no) yes r: (no) yes
70 : : * w: (no) no w: (no) no w: (copy) copy w: (no) no
71 : : * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
72 : : *
73 : : */
74 : : pgprot_t protection_map[16] = {
75 : : __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
76 : : __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
77 : : };
78 : :
79 : 0 : pgprot_t vm_get_page_prot(unsigned long vm_flags)
80 : : {
81 : 3383505 : return __pgprot(pgprot_val(protection_map[vm_flags &
82 : : (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
83 : : pgprot_val(arch_vm_get_page_prot(vm_flags)));
84 : : }
85 : : EXPORT_SYMBOL(vm_get_page_prot);
86 : :
87 : : int sysctl_overcommit_memory __read_mostly = OVERCOMMIT_GUESS; /* heuristic overcommit */
88 : : int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
89 : : int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
90 : : unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
91 : : unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
92 : : /*
93 : : * Make sure vm_committed_as in one cacheline and not cacheline shared with
94 : : * other variables. It can be updated by several CPUs frequently.
95 : : */
96 : : struct percpu_counter vm_committed_as ____cacheline_aligned_in_smp;
97 : :
98 : : /*
99 : : * The global memory commitment made in the system can be a metric
100 : : * that can be used to drive ballooning decisions when Linux is hosted
101 : : * as a guest. On Hyper-V, the host implements a policy engine for dynamically
102 : : * balancing memory across competing virtual machines that are hosted.
103 : : * Several metrics drive this policy engine including the guest reported
104 : : * memory commitment.
105 : : */
106 : 0 : unsigned long vm_memory_committed(void)
107 : : {
108 : 0 : return percpu_counter_read_positive(&vm_committed_as);
109 : : }
110 : : EXPORT_SYMBOL_GPL(vm_memory_committed);
111 : :
112 : : /*
113 : : * Check that a process has enough memory to allocate a new virtual
114 : : * mapping. 0 means there is enough memory for the allocation to
115 : : * succeed and -ENOMEM implies there is not.
116 : : *
117 : : * We currently support three overcommit policies, which are set via the
118 : : * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting
119 : : *
120 : : * Strict overcommit modes added 2002 Feb 26 by Alan Cox.
121 : : * Additional code 2002 Jul 20 by Robert Love.
122 : : *
123 : : * cap_sys_admin is 1 if the process has admin privileges, 0 otherwise.
124 : : *
125 : : * Note this is a helper function intended to be used by LSMs which
126 : : * wish to use this logic.
127 : : */
128 : 0 : int __vm_enough_memory(struct mm_struct *mm, long pages, int cap_sys_admin)
129 : : {
130 : : unsigned long free, allowed, reserve;
131 : :
132 : : vm_acct_memory(pages);
133 : :
134 : : /*
135 : : * Sometimes we want to use more memory than we have
136 : : */
137 [ + + ]: 16881049 : if (sysctl_overcommit_memory == OVERCOMMIT_ALWAYS)
138 : : return 0;
139 : :
140 [ + + ]: 16875249 : if (sysctl_overcommit_memory == OVERCOMMIT_GUESS) {
141 : : free = global_page_state(NR_FREE_PAGES);
142 : 15998847 : free += global_page_state(NR_FILE_PAGES);
143 : :
144 : : /*
145 : : * shmem pages shouldn't be counted as free in this
146 : : * case, they can't be purged, only swapped out, and
147 : : * that won't affect the overall amount of available
148 : : * memory in the system.
149 : : */
150 : 15998847 : free -= global_page_state(NR_SHMEM);
151 : :
152 : 15998847 : free += get_nr_swap_pages();
153 : :
154 : : /*
155 : : * Any slabs which are created with the
156 : : * SLAB_RECLAIM_ACCOUNT flag claim to have contents
157 : : * which are reclaimable, under pressure. The dentry
158 : : * cache and most inode caches should fall into this
159 : : */
160 : 0 : free += global_page_state(NR_SLAB_RECLAIMABLE);
161 : :
162 : : /*
163 : : * Leave reserved pages. The pages are not for anonymous pages.
164 : : */
165 [ + ]: 15998847 : if (free <= totalreserve_pages)
166 : : goto error;
167 : : else
168 : 15998903 : free -= totalreserve_pages;
169 : :
170 : : /*
171 : : * Reserve some for root
172 : : */
173 [ + + ]: 15998903 : if (!cap_sys_admin)
174 : 5742 : free -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
175 : :
176 [ + + ]: 15998903 : if (free > pages)
177 : : return 0;
178 : :
179 : : goto error;
180 : : }
181 : :
182 : 876402 : allowed = vm_commit_limit();
183 : : /*
184 : : * Reserve some for root
185 : : */
186 [ - + ]: 876402 : if (!cap_sys_admin)
187 : 0 : allowed -= sysctl_admin_reserve_kbytes >> (PAGE_SHIFT - 10);
188 : :
189 : : /*
190 : : * Don't let a single process grow so big a user can't recover
191 : : */
192 [ + - ]: 876402 : if (mm) {
193 : 876402 : reserve = sysctl_user_reserve_kbytes >> (PAGE_SHIFT - 10);
194 : 876402 : allowed -= min(mm->total_vm / 32, reserve);
195 : : }
196 : :
197 [ + + ]: 876402 : if (percpu_counter_read_positive(&vm_committed_as) < allowed)
198 : : return 0;
199 : : error:
200 : : vm_unacct_memory(pages);
201 : :
202 : 9 : return -ENOMEM;
203 : : }
204 : :
205 : : /*
206 : : * Requires inode->i_mapping->i_mmap_mutex
207 : : */
208 : 0 : static void __remove_shared_vm_struct(struct vm_area_struct *vma,
209 : 7237655 : struct file *file, struct address_space *mapping)
210 : : {
211 [ + + ]: 15516437 : if (vma->vm_flags & VM_DENYWRITE)
212 : 7237655 : atomic_inc(&file_inode(file)->i_writecount);
213 [ + + ]: 15516674 : if (vma->vm_flags & VM_SHARED)
214 : 1003593 : mapping->i_mmap_writable--;
215 : :
216 : : flush_dcache_mmap_lock(mapping);
217 [ + + ]: 31033434 : if (unlikely(vma->vm_flags & VM_NONLINEAR))
218 : 1 : list_del_init(&vma->shared.nonlinear);
219 : : else
220 : 15516996 : vma_interval_tree_remove(vma, &mapping->i_mmap);
221 : : flush_dcache_mmap_unlock(mapping);
222 : 15517207 : }
223 : :
224 : : /*
225 : : * Unlink a file-based vm structure from its interval tree, to hide
226 : : * vma from rmap and vmtruncate before freeing its page tables.
227 : : */
228 : 0 : void unlink_file_vma(struct vm_area_struct *vma)
229 : : {
230 : 23295890 : struct file *file = vma->vm_file;
231 : :
232 [ + + ]: 23295890 : if (file) {
233 : 15515433 : struct address_space *mapping = file->f_mapping;
234 : 15515433 : mutex_lock(&mapping->i_mmap_mutex);
235 : 15516426 : __remove_shared_vm_struct(vma, file, mapping);
236 : 15516394 : mutex_unlock(&mapping->i_mmap_mutex);
237 : : }
238 : 1674 : }
239 : :
240 : : /*
241 : : * Close a vm structure and free it, returning the next.
242 : : */
243 : 0 : static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
244 : : {
245 : 23296400 : struct vm_area_struct *next = vma->vm_next;
246 : :
247 : : might_sleep();
248 [ + + ][ + + ]: 23296400 : if (vma->vm_ops && vma->vm_ops->close)
249 : 1197543 : vma->vm_ops->close(vma);
250 [ + + ]: 23296711 : if (vma->vm_file)
251 : 15516407 : fput(vma->vm_file);
252 : : mpol_put(vma_policy(vma));
253 : 23296616 : kmem_cache_free(vm_area_cachep, vma);
254 : 23296669 : return next;
255 : : }
256 : :
257 : : static unsigned long do_brk(unsigned long addr, unsigned long len);
258 : :
259 : 0 : SYSCALL_DEFINE1(brk, unsigned long, brk)
260 : : {
261 : : unsigned long rlim, retval;
262 : : unsigned long newbrk, oldbrk;
263 : 591452 : struct mm_struct *mm = current->mm;
264 : : unsigned long min_brk;
265 : : bool populate;
266 : :
267 : 591452 : down_write(&mm->mmap_sem);
268 : :
269 : : #ifdef CONFIG_COMPAT_BRK
270 : : /*
271 : : * CONFIG_COMPAT_BRK can still be overridden by setting
272 : : * randomize_va_space to 2, which will still cause mm->start_brk
273 : : * to be arbitrarily shifted
274 : : */
275 : : if (current->brk_randomized)
276 : : min_brk = mm->start_brk;
277 : : else
278 : : min_brk = mm->end_data;
279 : : #else
280 : 591452 : min_brk = mm->start_brk;
281 : : #endif
282 [ + + ]: 591452 : if (brk < min_brk)
283 : : goto out;
284 : :
285 : : /*
286 : : * Check against rlimit here. If this check is done later after the test
287 : : * of oldbrk with newbrk then it can escape the test and let the data
288 : : * segment grow beyond its set limit the in case where the limit is
289 : : * not page aligned -Ram Gupta
290 : : */
291 : : rlim = rlimit(RLIMIT_DATA);
292 [ - + ][ # # ]: 475646 : if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
293 : 0 : (mm->end_data - mm->start_data) > rlim)
294 : : goto out;
295 : :
296 : 475647 : newbrk = PAGE_ALIGN(brk);
297 : 475647 : oldbrk = PAGE_ALIGN(mm->brk);
298 [ + + ]: 1067099 : if (oldbrk == newbrk)
299 : : goto set_brk;
300 : :
301 : : /* Always allow shrinking brk. */
302 [ + + ]: 475646 : if (brk <= mm->brk) {
303 [ + - ]: 326644 : if (!do_munmap(mm, newbrk, oldbrk-newbrk))
304 : : goto set_brk;
305 : : goto out;
306 : : }
307 : :
308 : : /* Check against existing mmap mappings. */
309 [ + + ]: 149002 : if (find_vma_intersection(mm, oldbrk, newbrk+PAGE_SIZE))
310 : : goto out;
311 : :
312 : : /* Ok, looks good - let it rip. */
313 [ + + ]: 142960 : if (do_brk(oldbrk, newbrk-oldbrk) != oldbrk)
314 : : goto out;
315 : :
316 : : set_brk:
317 : 469602 : mm->brk = brk;
318 [ + + ][ + - ]: 469602 : populate = newbrk > oldbrk && (mm->def_flags & VM_LOCKED) != 0;
319 : 469602 : up_write(&mm->mmap_sem);
320 [ + + ]: 469604 : if (populate)
321 : 1 : mm_populate(oldbrk, newbrk - oldbrk);
322 : : return brk;
323 : :
324 : : out:
325 : 0 : retval = mm->brk;
326 : 0 : up_write(&mm->mmap_sem);
327 : 121848 : return retval;
328 : : }
329 : :
330 : 0 : static long vma_compute_subtree_gap(struct vm_area_struct *vma)
331 : : {
332 : : unsigned long max, subtree_gap;
333 : 123354825 : max = vma->vm_start;
334 [ + + ]: 123354825 : if (vma->vm_prev)
335 : 117577148 : max -= vma->vm_prev->vm_end;
336 [ + + ]: 123354825 : if (vma->vm_rb.rb_left) {
337 : 71865354 : subtree_gap = rb_entry(vma->vm_rb.rb_left,
338 : : struct vm_area_struct, vm_rb)->rb_subtree_gap;
339 [ + + ]: 71865354 : if (subtree_gap > max)
340 : : max = subtree_gap;
341 : : }
342 [ # # ]: 123354825 : if (vma->vm_rb.rb_right) {
343 : 80963526 : subtree_gap = rb_entry(vma->vm_rb.rb_right,
344 : : struct vm_area_struct, vm_rb)->rb_subtree_gap;
345 [ + + ]: 80963526 : if (subtree_gap > max)
346 : : max = subtree_gap;
347 : : }
348 : 0 : return max;
349 : : }
350 : :
351 : : #ifdef CONFIG_DEBUG_VM_RB
352 : : static int browse_rb(struct rb_root *root)
353 : : {
354 : : int i = 0, j, bug = 0;
355 : : struct rb_node *nd, *pn = NULL;
356 : : unsigned long prev = 0, pend = 0;
357 : :
358 : : for (nd = rb_first(root); nd; nd = rb_next(nd)) {
359 : : struct vm_area_struct *vma;
360 : : vma = rb_entry(nd, struct vm_area_struct, vm_rb);
361 : : if (vma->vm_start < prev) {
362 : : printk("vm_start %lx prev %lx\n", vma->vm_start, prev);
363 : : bug = 1;
364 : : }
365 : : if (vma->vm_start < pend) {
366 : : printk("vm_start %lx pend %lx\n", vma->vm_start, pend);
367 : : bug = 1;
368 : : }
369 : : if (vma->vm_start > vma->vm_end) {
370 : : printk("vm_end %lx < vm_start %lx\n",
371 : : vma->vm_end, vma->vm_start);
372 : : bug = 1;
373 : : }
374 : : if (vma->rb_subtree_gap != vma_compute_subtree_gap(vma)) {
375 : : printk("free gap %lx, correct %lx\n",
376 : : vma->rb_subtree_gap,
377 : : vma_compute_subtree_gap(vma));
378 : : bug = 1;
379 : : }
380 : : i++;
381 : : pn = nd;
382 : : prev = vma->vm_start;
383 : : pend = vma->vm_end;
384 : : }
385 : : j = 0;
386 : : for (nd = pn; nd; nd = rb_prev(nd))
387 : : j++;
388 : : if (i != j) {
389 : : printk("backwards %d, forwards %d\n", j, i);
390 : : bug = 1;
391 : : }
392 : : return bug ? -1 : i;
393 : : }
394 : :
395 : : static void validate_mm_rb(struct rb_root *root, struct vm_area_struct *ignore)
396 : : {
397 : : struct rb_node *nd;
398 : :
399 : : for (nd = rb_first(root); nd; nd = rb_next(nd)) {
400 : : struct vm_area_struct *vma;
401 : : vma = rb_entry(nd, struct vm_area_struct, vm_rb);
402 : : BUG_ON(vma != ignore &&
403 : : vma->rb_subtree_gap != vma_compute_subtree_gap(vma));
404 : : }
405 : : }
406 : :
407 : : void validate_mm(struct mm_struct *mm)
408 : : {
409 : : int bug = 0;
410 : : int i = 0;
411 : : unsigned long highest_address = 0;
412 : : struct vm_area_struct *vma = mm->mmap;
413 : : while (vma) {
414 : : struct anon_vma_chain *avc;
415 : : vma_lock_anon_vma(vma);
416 : : list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
417 : : anon_vma_interval_tree_verify(avc);
418 : : vma_unlock_anon_vma(vma);
419 : : highest_address = vma->vm_end;
420 : : vma = vma->vm_next;
421 : : i++;
422 : : }
423 : : if (i != mm->map_count) {
424 : : printk("map_count %d vm_next %d\n", mm->map_count, i);
425 : : bug = 1;
426 : : }
427 : : if (highest_address != mm->highest_vm_end) {
428 : : printk("mm->highest_vm_end %lx, found %lx\n",
429 : : mm->highest_vm_end, highest_address);
430 : : bug = 1;
431 : : }
432 : : i = browse_rb(&mm->mm_rb);
433 : : if (i != mm->map_count) {
434 : : printk("map_count %d rb %d\n", mm->map_count, i);
435 : : bug = 1;
436 : : }
437 : : BUG_ON(bug);
438 : : }
439 : : #else
440 : : #define validate_mm_rb(root, ignore) do { } while (0)
441 : : #define validate_mm(mm) do { } while (0)
442 : : #endif
443 : :
444 [ + + ][ + + ]: 132744766 : RB_DECLARE_CALLBACKS(static, vma_gap_callbacks, struct vm_area_struct, vm_rb,
[ + + ][ + + ]
445 : : unsigned long, rb_subtree_gap, vma_compute_subtree_gap)
446 : :
447 : : /*
448 : : * Update augmented rbtree rb_subtree_gap values after vma->vm_start or
449 : : * vma->vm_prev->vm_end values changed, without modifying the vma's position
450 : : * in the rbtree.
451 : : */
452 : 0 : static void vma_gap_update(struct vm_area_struct *vma)
453 : : {
454 : : /*
455 : : * As it turns out, RB_DECLARE_CALLBACKS() already created a callback
456 : : * function that does exacltly what we want.
457 : : */
458 : 31229061 : vma_gap_callbacks_propagate(&vma->vm_rb, NULL);
459 : 766 : }
460 : :
461 : : static inline void vma_rb_insert(struct vm_area_struct *vma,
462 : : struct rb_root *root)
463 : : {
464 : : /* All rb_subtree_gap values must be consistent prior to insertion */
465 : : validate_mm_rb(root, NULL);
466 : :
467 : : rb_insert_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
468 : : }
469 : :
470 : 0 : static void vma_rb_erase(struct vm_area_struct *vma, struct rb_root *root)
471 : : {
472 : : /*
473 : : * All rb_subtree_gap values must be consistent prior to erase,
474 : : * with the possible exception of the vma being erased.
475 : : */
476 : : validate_mm_rb(root, vma);
477 : :
478 : : /*
479 : : * Note rb_erase_augmented is a fairly large inline function,
480 : : * so make sure we instantiate it only once with our desired
481 : : * augmented rbtree callbacks.
482 : : */
483 : 1211535 : rb_erase_augmented(&vma->vm_rb, root, &vma_gap_callbacks);
484 : 1211534 : }
485 : :
486 : : /*
487 : : * vma has some anon_vma assigned, and is already inserted on that
488 : : * anon_vma's interval trees.
489 : : *
490 : : * Before updating the vma's vm_start / vm_end / vm_pgoff fields, the
491 : : * vma must be removed from the anon_vma's interval trees using
492 : : * anon_vma_interval_tree_pre_update_vma().
493 : : *
494 : : * After the update, the vma will be reinserted using
495 : : * anon_vma_interval_tree_post_update_vma().
496 : : *
497 : : * The entire update must be protected by exclusive mmap_sem and by
498 : : * the root anon_vma's mutex.
499 : : */
500 : : static inline void
501 : : anon_vma_interval_tree_pre_update_vma(struct vm_area_struct *vma)
502 : : {
503 : : struct anon_vma_chain *avc;
504 : :
505 [ + + ][ + + ]: 9173639 : list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
[ - + ]
506 : 3569561 : anon_vma_interval_tree_remove(avc, &avc->anon_vma->rb_root);
507 : : }
508 : :
509 : : static inline void
510 : : anon_vma_interval_tree_post_update_vma(struct vm_area_struct *vma)
511 : : {
512 : : struct anon_vma_chain *avc;
513 : :
514 [ + + ][ + + ]: 7136740 : list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
[ - + ]
515 : 3569543 : anon_vma_interval_tree_insert(avc, &avc->anon_vma->rb_root);
516 : : }
517 : :
518 : : static int find_vma_links(struct mm_struct *mm, unsigned long addr,
519 : : unsigned long end, struct vm_area_struct **pprev,
520 : : struct rb_node ***rb_link, struct rb_node **rb_parent)
521 : : {
522 : : struct rb_node **__rb_link, *__rb_parent, *rb_prev;
523 : :
524 : 5253572 : __rb_link = &mm->mm_rb.rb_node;
525 : : rb_prev = __rb_parent = NULL;
526 : :
527 [ + + ][ + + ]: 47485852 : while (*__rb_link) {
[ + + ][ + + ]
[ + + ]
528 : : struct vm_area_struct *vma_tmp;
529 : :
530 : : __rb_parent = *__rb_link;
531 : : vma_tmp = rb_entry(__rb_parent, struct vm_area_struct, vm_rb);
532 : :
533 [ + + ][ + + ]: 42786488 : if (vma_tmp->vm_end > addr) {
[ + + ][ + + ]
[ + + ]
534 : : /* Fail if an existing vma overlaps the area */
535 [ + - ][ + - ]: 30578416 : if (vma_tmp->vm_start < end)
[ + - ][ + + ]
[ + ]
536 : : return -ENOMEM;
537 : 30259407 : __rb_link = &__rb_parent->rb_left;
538 : : } else {
539 : : rb_prev = __rb_parent;
540 : 42467479 : __rb_link = &__rb_parent->rb_right;
541 : : }
542 : : }
543 : :
544 : : *pprev = NULL;
545 [ + + ][ + + ]: 4699364 : if (rb_prev)
[ + + ][ + + ]
[ + + ]
546 : 4699364 : *pprev = rb_entry(rb_prev, struct vm_area_struct, vm_rb);
547 : : *rb_link = __rb_link;
548 : : *rb_parent = __rb_parent;
549 : : return 0;
550 : : }
551 : :
552 : 0 : static unsigned long count_vma_pages_range(struct mm_struct *mm,
553 : : unsigned long addr, unsigned long end)
554 : : {
555 : : unsigned long nr_pages = 0;
556 : : struct vm_area_struct *vma;
557 : :
558 : : /* Find first overlaping mapping */
559 : : vma = find_vma_intersection(mm, addr, end);
560 [ # # ]: 0 : if (!vma)
561 : : return 0;
562 : :
563 : 0 : nr_pages = (min(end, vma->vm_end) -
564 : 0 : max(addr, vma->vm_start)) >> PAGE_SHIFT;
565 : :
566 : : /* Iterate over the rest of the overlaps */
567 [ # # ]: 0 : for (vma = vma->vm_next; vma; vma = vma->vm_next) {
568 : : unsigned long overlap_len;
569 : :
570 [ # # ]: 0 : if (vma->vm_start > end)
571 : : break;
572 : :
573 : 0 : overlap_len = min(end, vma->vm_end) - vma->vm_start;
574 : 0 : nr_pages += overlap_len >> PAGE_SHIFT;
575 : : }
576 : :
577 : : return nr_pages;
578 : : }
579 : :
580 : 0 : void __vma_link_rb(struct mm_struct *mm, struct vm_area_struct *vma,
581 : : struct rb_node **rb_link, struct rb_node *rb_parent)
582 : : {
583 : : /* Update tracking information for the gap following the new vma. */
584 [ + + ]: 23315500 : if (vma->vm_next)
585 : 4153624 : vma_gap_update(vma->vm_next);
586 : : else
587 : 19161876 : mm->highest_vm_end = vma->vm_end;
588 : :
589 : : /*
590 : : * vma->vm_prev wasn't known when we followed the rbtree to find the
591 : : * correct insertion point for that vma. As a result, we could not
592 : : * update the vma vm_rb parents rb_subtree_gap values on the way down.
593 : : * So, we first insert the vma with a zero rb_subtree_gap value
594 : : * (to be consistent with what we did on the way down), and then
595 : : * immediately update the gap to the correct value. Finally we
596 : : * rebalance the rbtree after all augmented values have been set.
597 : : */
598 : 23315402 : rb_link_node(&vma->vm_rb, rb_parent, rb_link);
599 : 23315402 : vma->rb_subtree_gap = 0;
600 : 23315402 : vma_gap_update(vma);
601 : 23314941 : vma_rb_insert(vma, &mm->mm_rb);
602 : 23314937 : }
603 : :
604 : 0 : static void __vma_link_file(struct vm_area_struct *vma)
605 : : {
606 : 412820 : struct file *file;
607 : :
608 : 3643359 : file = vma->vm_file;
609 [ + + ]: 3643359 : if (file) {
610 : 3228367 : struct address_space *mapping = file->f_mapping;
611 : :
612 [ + + ]: 3228367 : if (vma->vm_flags & VM_DENYWRITE)
613 : 412820 : atomic_dec(&file_inode(file)->i_writecount);
614 [ + + ]: 3228385 : if (vma->vm_flags & VM_SHARED)
615 : 989292 : mapping->i_mmap_writable++;
616 : :
617 : : flush_dcache_mmap_lock(mapping);
618 [ - + ]: 6871746 : if (unlikely(vma->vm_flags & VM_NONLINEAR))
619 : 0 : vma_nonlinear_insert(vma, &mapping->i_mmap_nonlinear);
620 : : else
621 : 3228387 : vma_interval_tree_insert(vma, &mapping->i_mmap);
622 : : flush_dcache_mmap_unlock(mapping);
623 : : }
624 : 3643206 : }
625 : :
626 : : static void
627 : 0 : __vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
628 : : struct vm_area_struct *prev, struct rb_node **rb_link,
629 : : struct rb_node *rb_parent)
630 : : {
631 : 4212434 : __vma_link_list(mm, vma, prev, rb_parent);
632 : 4212459 : __vma_link_rb(mm, vma, rb_link, rb_parent);
633 : 4212437 : }
634 : :
635 : 0 : static void vma_link(struct mm_struct *mm, struct vm_area_struct *vma,
636 : : struct vm_area_struct *prev, struct rb_node **rb_link,
637 : : struct rb_node *rb_parent)
638 : : {
639 : : struct address_space *mapping = NULL;
640 : :
641 [ + + ]: 2667786 : if (vma->vm_file)
642 : 2252776 : mapping = vma->vm_file->f_mapping;
643 : :
644 [ + + ]: 2667786 : if (mapping)
645 : 2252697 : mutex_lock(&mapping->i_mmap_mutex);
646 : :
647 : 2667895 : __vma_link(mm, vma, prev, rb_link, rb_parent);
648 : 2667782 : __vma_link_file(vma);
649 : :
650 [ + + ]: 2667681 : if (mapping)
651 : 2252770 : mutex_unlock(&mapping->i_mmap_mutex);
652 : :
653 : 2667740 : mm->map_count++;
654 : : validate_mm(mm);
655 : 2667740 : }
656 : :
657 : : /*
658 : : * Helper for vma_adjust() in the split_vma insert case: insert a vma into the
659 : : * mm's list and rbtree. It has already been inserted into the interval tree.
660 : : */
661 : 0 : static void __insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
662 : : {
663 : : struct vm_area_struct *prev;
664 : : struct rb_node **rb_link, *rb_parent;
665 : :
666 [ - + ]: 1544887 : if (find_vma_links(mm, vma->vm_start, vma->vm_end,
667 : : &prev, &rb_link, &rb_parent))
668 : 0 : BUG();
669 : 1544887 : __vma_link(mm, vma, prev, rb_link, rb_parent);
670 : 1544874 : mm->map_count++;
671 : 1544874 : }
672 : :
673 : : static inline void
674 : : __vma_unlink(struct mm_struct *mm, struct vm_area_struct *vma,
675 : : struct vm_area_struct *prev)
676 : : {
677 : : struct vm_area_struct *next;
678 : :
679 : 18236 : vma_rb_erase(vma, &mm->mm_rb);
680 : 18236 : prev->vm_next = next = vma->vm_next;
681 [ + + ]: 18236 : if (next)
682 : 18226 : next->vm_prev = prev;
683 [ + + ]: 18236 : if (mm->mmap_cache == vma)
684 : 20 : mm->mmap_cache = prev;
685 : : }
686 : :
687 : : /*
688 : : * We cannot adjust vm_start, vm_end, vm_pgoff fields of a vma that
689 : : * is already present in an i_mmap tree without adjusting the tree.
690 : : * The following helper function should be used when such adjustments
691 : : * are necessary. The "insert" vma (if any) is to be inserted
692 : : * before we drop the necessary locks.
693 : : */
694 : 0 : int vma_adjust(struct vm_area_struct *vma, unsigned long start,
695 : : unsigned long end, pgoff_t pgoff, struct vm_area_struct *insert)
696 : : {
697 : 2150397 : struct mm_struct *mm = vma->vm_mm;
698 : 2150397 : struct vm_area_struct *next = vma->vm_next;
699 : : struct vm_area_struct *importer = NULL;
700 : : struct address_space *mapping = NULL;
701 : : struct rb_root *root = NULL;
702 : 3059643 : struct anon_vma *anon_vma = NULL;
703 : 2150397 : struct file *file = vma->vm_file;
704 : : bool start_changed = false, end_changed = false;
705 : : long adjust_next = 0;
706 : : int remove_next = 0;
707 : :
708 [ + + ]: 2150397 : if (next && !insert) {
709 : : struct vm_area_struct *exporter = NULL;
710 : :
711 [ + + ]: 487666 : if (end >= next->vm_end) {
712 : : /*
713 : : * vma expands, overlapping all the next, and
714 : : * perhaps the one after too (mprotect case 6).
715 : : */
716 [ + - ]: 18236 : again: remove_next = 1 + (end > next->vm_end);
717 : : end = next->vm_end;
718 : : exporter = next;
719 : : importer = vma;
720 [ + + ]: 469430 : } else if (end > next->vm_start) {
721 : : /*
722 : : * vma expands, overlapping part of the next:
723 : : * mprotect case 5 shifting the boundary up.
724 : : */
725 : 582 : adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
726 : : exporter = next;
727 : : importer = vma;
728 [ - + ]: 468848 : } else if (end < vma->vm_end) {
729 : : /*
730 : : * vma shrinks, and !insert tells it's not
731 : : * split_vma inserting another: so it must be
732 : : * mprotect case 4 shifting the boundary down.
733 : : */
734 : 0 : adjust_next = - ((vma->vm_end - end) >> PAGE_SHIFT);
735 : : exporter = vma;
736 : : importer = next;
737 : : }
738 : :
739 : : /*
740 : : * Easily overlooked: when mprotect shifts the boundary,
741 : : * make sure the expanding vma has anon_vma set if the
742 : : * shrinking vma had, to cover any anon pages imported.
743 : : */
744 [ + + ][ + + ]: 487666 : if (exporter && exporter->anon_vma && !importer->anon_vma) {
[ - + ]
745 [ # # ]: 0 : if (anon_vma_clone(importer, exporter))
746 : : return -ENOMEM;
747 : 0 : importer->anon_vma = exporter->anon_vma;
748 : : }
749 : : }
750 : :
751 [ + + ]: 2150397 : if (file) {
752 : 975691 : mapping = file->f_mapping;
753 [ + + ]: 975691 : if (!(vma->vm_flags & VM_NONLINEAR)) {
754 : 975647 : root = &mapping->i_mmap;
755 : : uprobe_munmap(vma, vma->vm_start, vma->vm_end);
756 : :
757 : : if (adjust_next)
758 : : uprobe_munmap(next, next->vm_start,
759 : : next->vm_end);
760 : : }
761 : :
762 : 975691 : mutex_lock(&mapping->i_mmap_mutex);
763 [ + + ]: 975688 : if (insert) {
764 : : /*
765 : : * Put into interval tree now, so instantiated pages
766 : : * are visible to arm/parisc __flush_dcache_page
767 : : * throughout; but we cannot insert into address
768 : : * space until vma start or end is updated.
769 : : */
770 : 975607 : __vma_link_file(insert);
771 : : }
772 : : }
773 : :
774 : : vma_adjust_trans_huge(vma, start, end, adjust_next);
775 : :
776 : 4300813 : anon_vma = vma->anon_vma;
777 [ - + ]: 4300813 : if (!anon_vma && adjust_next)
778 : 0 : anon_vma = next->anon_vma;
779 [ + + ]: 4300813 : if (anon_vma) {
780 : : VM_BUG_ON(adjust_next && next->anon_vma &&
781 : : anon_vma != next->anon_vma);
782 : : anon_vma_lock_write(anon_vma);
783 : : anon_vma_interval_tree_pre_update_vma(vma);
784 [ + + ]: 1529832 : if (adjust_next)
785 : : anon_vma_interval_tree_pre_update_vma(next);
786 : : }
787 : :
788 [ + + ]: 4300818 : if (root) {
789 : : flush_dcache_mmap_lock(mapping);
790 : 975708 : vma_interval_tree_remove(vma, root);
791 [ - + ]: 975687 : if (adjust_next)
792 : 0 : vma_interval_tree_remove(next, root);
793 : : }
794 : :
795 [ + + ]: 2150412 : if (start != vma->vm_start) {
796 : 764079 : vma->vm_start = start;
797 : : start_changed = true;
798 : : }
799 [ + + ]: 2150412 : if (end != vma->vm_end) {
800 : 1386368 : vma->vm_end = end;
801 : : end_changed = true;
802 : : }
803 : 2150412 : vma->vm_pgoff = pgoff;
804 [ + + ]: 2150412 : if (adjust_next) {
805 : 582 : next->vm_start += adjust_next << PAGE_SHIFT;
806 : 582 : next->vm_pgoff += adjust_next;
807 : : }
808 : :
809 [ + + ]: 2150412 : if (root) {
810 [ - + ]: 975688 : if (adjust_next)
811 : 0 : vma_interval_tree_insert(next, root);
812 : 975688 : vma_interval_tree_insert(vma, root);
813 : : flush_dcache_mmap_unlock(mapping);
814 : : }
815 : :
816 [ + + ]: 2150446 : if (remove_next) {
817 : : /*
818 : : * vma_merge has merged next into vma, and needs
819 : : * us to remove next before dropping the locks.
820 : : */
821 : : __vma_unlink(mm, next, vma);
822 [ + + ]: 18236 : if (file)
823 : 1 : __remove_shared_vm_struct(next, file, mapping);
824 [ + + ]: 2132210 : } else if (insert) {
825 : : /*
826 : : * split_vma has split insert from vma, and needs
827 : : * us to insert it before dropping the locks
828 : : * (it may either follow vma or precede it).
829 : : */
830 : 1544887 : __insert_vm_struct(mm, insert);
831 : : } else {
832 [ + + ]: 587323 : if (start_changed)
833 : 301874 : vma_gap_update(vma);
834 [ + + ]: 587294 : if (end_changed) {
835 [ + + ]: 285442 : if (!next)
836 : 58951 : mm->highest_vm_end = end;
837 [ + + ]: 226491 : else if (!adjust_next)
838 : 225909 : vma_gap_update(next);
839 : : }
840 : : }
841 : :
842 [ + + ]: 2150447 : if (anon_vma) {
843 : : anon_vma_interval_tree_post_update_vma(vma);
844 [ + + ]: 1529816 : if (adjust_next)
845 : : anon_vma_interval_tree_post_update_vma(next);
846 : : anon_vma_unlock_write(anon_vma);
847 : : }
848 [ + + ]: 2150429 : if (mapping)
849 : 975702 : mutex_unlock(&mapping->i_mmap_mutex);
850 : :
851 : : if (root) {
852 : : uprobe_mmap(vma);
853 : :
854 : : if (adjust_next)
855 : : uprobe_mmap(next);
856 : : }
857 : :
858 [ + + ]: 2150442 : if (remove_next) {
859 [ + + ]: 18236 : if (file) {
860 : : uprobe_munmap(next, next->vm_start, next->vm_end);
861 : 1 : fput(file);
862 : : }
863 [ + + ]: 18236 : if (next->anon_vma)
864 : : anon_vma_merge(vma, next);
865 : 18236 : mm->map_count--;
866 : : mpol_put(vma_policy(next));
867 : 18236 : kmem_cache_free(vm_area_cachep, next);
868 : : /*
869 : : * In mprotect's case 6 (see comments on vma_merge),
870 : : * we must remove another next too. It would clutter
871 : : * up the code too much to do both in one go.
872 : : */
873 : 18236 : next = vma->vm_next;
874 [ - + ]: 18236 : if (remove_next == 2)
875 : : goto again;
876 [ + + ]: 18236 : else if (next)
877 : 18226 : vma_gap_update(next);
878 : : else
879 : 10 : mm->highest_vm_end = end;
880 : : }
881 : : if (insert && file)
882 : : uprobe_mmap(insert);
883 : :
884 : : validate_mm(mm);
885 : :
886 : : return 0;
887 : : }
888 : :
889 : : /*
890 : : * If the vma has a ->close operation then the driver probably needs to release
891 : : * per-vma resources, so we don't attempt to merge those.
892 : : */
893 : 507819 : static inline int is_mergeable_vma(struct vm_area_struct *vma,
894 : : struct file *file, unsigned long vm_flags,
895 : : const char __user *anon_name)
896 : : {
897 [ + + ][ + + ]: 3520301 : if (vma->vm_flags ^ vm_flags)
898 : : return 0;
899 [ + + ][ + + ]: 1894647 : if (vma->vm_file != file)
900 : : return 0;
901 [ + + ][ + - ]: 507813 : if (vma->vm_ops && vma->vm_ops->close)
[ + + ][ + ]
902 : : return 0;
903 [ + - ][ + - ]: 507819 : if (vma_get_anon_name(vma) != anon_name)
904 : : return 0;
905 : : return 1;
906 : : }
907 : :
908 : : static inline int is_mergeable_anon_vma(struct anon_vma *anon_vma1,
909 : : struct anon_vma *anon_vma2,
910 : : struct vm_area_struct *vma)
911 : : {
912 : : /*
913 : : * The list_is_singular() test is to avoid merging VMA cloned from
914 : : * parents. This can improve scalability caused by anon_vma lock.
915 : : */
916 [ + + ][ + + ]: 1034255 : if ((!anon_vma1 || !anon_vma2) && (!vma ||
[ + - ][ + + ]
[ + + ][ + + ]
[ + + ]
917 : 507775 : list_is_singular(&vma->anon_vma_chain)))
918 : : return 1;
919 : 21511 : return anon_vma1 == anon_vma2;
920 : : }
921 : :
922 : : /*
923 : : * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
924 : : * in front of (at a lower virtual address and file offset than) the vma.
925 : : *
926 : : * We cannot merge two vmas if they have differently assigned (non-NULL)
927 : : * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
928 : : *
929 : : * We don't check here for the merged mmap wrapping around the end of pagecache
930 : : * indices (16TB on ia32) because do_mmap_pgoff() does not permit mmap's which
931 : : * wrap, nor mmaps which cover the final page at index -1UL.
932 : : */
933 : : static int
934 : 0 : can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
935 : : struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff,
936 : : const char __user *anon_name)
937 : : {
938 [ # # ][ + + ]: 2490300 : if (is_mergeable_vma(vma, file, vm_flags, anon_name) &&
939 : 262478 : is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
940 [ + ]: 261606 : if (vma->vm_pgoff == vm_pgoff)
941 : : return 1;
942 : : }
943 : 1966274 : return 0;
944 : : }
945 : :
946 : : /*
947 : : * Return true if we can merge this (vm_flags,anon_vma,file,vm_pgoff)
948 : : * beyond (at a higher virtual address and file offset than) the vma.
949 : : *
950 : : * We cannot merge two vmas if they have differently assigned (non-NULL)
951 : : * anon_vmas, nor if same anon_vma is assigned but offsets incompatible.
952 : : */
953 : : static int
954 : 0 : can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
955 : : struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff,
956 : : const char __user *anon_name)
957 : : {
958 [ # # ][ + + ]: 1537820 : if (is_mergeable_vma(vma, file, vm_flags, anon_name) &&
959 : 245341 : is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
960 : : pgoff_t vm_pglen;
961 : : vm_pglen = vma_pages(vma);
962 [ + ]: 244744 : if (vma->vm_pgoff + vm_pglen == vm_pgoff)
963 : : return 1;
964 : : }
965 : 1047746 : return 0;
966 : : }
967 : :
968 : : /*
969 : : * Given a mapping request (addr,end,vm_flags,file,pgoff,anon_name),
970 : : * figure out whether that can be merged with its predecessor or its
971 : : * successor. Or both (it neatly fills a hole).
972 : : *
973 : : * In most cases - when called for mmap, brk or mremap - [addr,end) is
974 : : * certain not to be mapped by the time vma_merge is called; but when
975 : : * called for mprotect, it is certain to be already mapped (either at
976 : : * an offset within prev, or at the start of next), and the flags of
977 : : * this area are about to be changed to vm_flags - and the no-change
978 : : * case has already been eliminated.
979 : : *
980 : : * The following mprotect cases have to be considered, where AAAA is
981 : : * the area passed down from mprotect_fixup, never extending beyond one
982 : : * vma, PPPPPP is the prev vma specified, and NNNNNN the next vma after:
983 : : *
984 : : * AAAA AAAA AAAA AAAA
985 : : * PPPPPPNNNNNN PPPPPPNNNNNN PPPPPPNNNNNN PPPPNNNNXXXX
986 : : * cannot merge might become might become might become
987 : : * PPNNNNNNNNNN PPPPPPPPPPNN PPPPPPPPPPPP 6 or
988 : : * mmap, brk or case 4 below case 5 below PPPPPPPPXXXX 7 or
989 : : * mremap move: PPPPNNNNNNNN 8
990 : : * AAAA
991 : : * PPPP NNNN PPPPPPPPPPPP PPPPPPPPNNNN PPPPNNNNNNNN
992 : : * might become case 1 below case 2 below case 3 below
993 : : *
994 : : * Odd one out? Case 8, because it extends NNNN but needs flags of XXXX:
995 : : * mprotect_fixup updates vm_flags & vm_page_prot on successful return.
996 : : */
997 : 0 : struct vm_area_struct *vma_merge(struct mm_struct *mm,
998 : : struct vm_area_struct *prev, unsigned long addr,
999 : : unsigned long end, unsigned long vm_flags,
1000 : : struct anon_vma *anon_vma, struct file *file,
1001 : : pgoff_t pgoff, struct mempolicy *policy,
1002 : : const char __user *anon_name)
1003 : : {
1004 : 3638332 : pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
1005 : : struct vm_area_struct *area, *next;
1006 : : int err;
1007 : :
1008 : : /*
1009 : : * We later require that vma->vm_flags == vm_flags,
1010 : : * so this tests vma->vm_flags & VM_SPECIAL, too.
1011 : : */
1012 [ + + ]: 3638332 : if (vm_flags & VM_SPECIAL)
1013 : : return NULL;
1014 : :
1015 [ + + ]: 3638266 : if (prev)
1016 : 3561928 : next = prev->vm_next;
1017 : : else
1018 : 76338 : next = mm->mmap;
1019 : : area = next;
1020 [ + + ][ + + ]: 3638266 : if (next && next->vm_end == end) /* cases 6, 7, 8 */
1021 : 355 : next = next->vm_next;
1022 : :
1023 : : /*
1024 : : * Can it merge with the predecessor?
1025 : : */
1026 [ + + ][ + + ]: 3638266 : if (prev && prev->vm_end == addr &&
1027 [ + + ]: 1292390 : mpol_equal(vma_policy(prev), policy) &&
1028 : 1292391 : can_vma_merge_after(prev, vm_flags, anon_vma,
1029 : : file, pgoff, anon_name)) {
1030 : : /*
1031 : : * OK, it can. Can we now merge in the successor as well?
1032 : : */
1033 [ + + ][ + + ]: 244716 : if (next && end == next->vm_start &&
1034 [ + + ]: 39898 : mpol_equal(policy, vma_policy(next)) &&
1035 : 39898 : can_vma_merge_before(next, vm_flags, anon_vma,
1036 [ + + ]: 18661 : file, pgoff+pglen, anon_name) &&
1037 : 18661 : is_mergeable_anon_vma(prev->anon_vma,
1038 : : next->anon_vma, NULL)) {
1039 : : /* cases 1, 6 */
1040 : 18219 : err = vma_adjust(prev, prev->vm_start,
1041 : : next->vm_end, prev->vm_pgoff, NULL);
1042 : : } else /* cases 2, 5, 7 */
1043 : 226497 : err = vma_adjust(prev, prev->vm_start,
1044 : : end, prev->vm_pgoff, NULL);
1045 [ + - ]: 244717 : if (err)
1046 : : return NULL;
1047 : : khugepaged_enter_vma_merge(prev);
1048 : 244717 : return prev;
1049 : : }
1050 : :
1051 : : /*
1052 : : * Can this new request be merged in front of next?
1053 : : */
1054 [ + + ][ + + ]: 3393549 : if (next && end == next->vm_start &&
1055 [ + + ]: 2187794 : mpol_equal(policy, vma_policy(next)) &&
1056 : 2187734 : can_vma_merge_before(next, vm_flags, anon_vma,
1057 : : file, pgoff+pglen, anon_name)) {
1058 [ + + ][ - + ]: 242929 : if (prev && addr < prev->vm_end) /* case 4 */
1059 : 0 : err = vma_adjust(prev, prev->vm_start,
1060 : : addr, prev->vm_pgoff, NULL);
1061 : : else /* cases 3, 8 */
1062 : 242929 : err = vma_adjust(area, addr, next->vm_end,
1063 : 242929 : next->vm_pgoff - pglen, NULL);
1064 [ + - ]: 242939 : if (err)
1065 : : return NULL;
1066 : : khugepaged_enter_vma_merge(area);
1067 : 242939 : return area;
1068 : : }
1069 : :
1070 : : return NULL;
1071 : : }
1072 : :
1073 : : /*
1074 : : * Rough compatbility check to quickly see if it's even worth looking
1075 : : * at sharing an anon_vma.
1076 : : *
1077 : : * They need to have the same vm_file, and the flags can only differ
1078 : : * in things that mprotect may change.
1079 : : *
1080 : : * NOTE! The fact that we share an anon_vma doesn't _have_ to mean that
1081 : : * we can merge the two vma's. For example, we refuse to merge a vma if
1082 : : * there is a vm_ops->close() function, because that indicates that the
1083 : : * driver is doing some kind of reference counting. But that doesn't
1084 : : * really matter for the anon_vma sharing case.
1085 : : */
1086 : 0 : static int anon_vma_compatible(struct vm_area_struct *a, struct vm_area_struct *b)
1087 : : {
1088 : 1289955 : return a->vm_end == b->vm_start &&
1089 [ + + ]: 756035 : mpol_equal(vma_policy(a), vma_policy(b)) &&
1090 [ + + ]: 385061 : a->vm_file == b->vm_file &&
1091 [ + + ][ + ]: 2045990 : !((a->vm_flags ^ b->vm_flags) & ~(VM_READ|VM_WRITE|VM_EXEC)) &&
1092 : 3837 : b->vm_pgoff == a->vm_pgoff + ((b->vm_start - a->vm_start) >> PAGE_SHIFT);
1093 : : }
1094 : :
1095 : : /*
1096 : : * Do some basic sanity checking to see if we can re-use the anon_vma
1097 : : * from 'old'. The 'a'/'b' vma's are in VM order - one of them will be
1098 : : * the same as 'old', the other will be the new one that is trying
1099 : : * to share the anon_vma.
1100 : : *
1101 : : * NOTE! This runs with mm_sem held for reading, so it is possible that
1102 : : * the anon_vma of 'old' is concurrently in the process of being set up
1103 : : * by another page fault trying to merge _that_. But that's ok: if it
1104 : : * is being set up, that automatically means that it will be a singleton
1105 : : * acceptable for merging, so we can do all of this optimistically. But
1106 : : * we do that ACCESS_ONCE() to make sure that we never re-load the pointer.
1107 : : *
1108 : : * IOW: that the "list_is_singular()" test on the anon_vma_chain only
1109 : : * matters for the 'stable anon_vma' case (ie the thing we want to avoid
1110 : : * is to return an anon_vma that is "complex" due to having gone through
1111 : : * a fork).
1112 : : *
1113 : : * We also make sure that the two vma's are compatible (adjacent,
1114 : : * and with the same memory policies). That's all stable, even with just
1115 : : * a read lock on the mm_sem.
1116 : : */
1117 : 0 : static struct anon_vma *reusable_anon_vma(struct vm_area_struct *old, struct vm_area_struct *a, struct vm_area_struct *b)
1118 : : {
1119 [ + + ]: 1289946 : if (anon_vma_compatible(a, b)) {
1120 : 3837 : struct anon_vma *anon_vma = ACCESS_ONCE(old->anon_vma);
1121 : :
1122 [ + + ][ + ]: 7318 : if (anon_vma && list_is_singular(&old->anon_vma_chain))
1123 : 2074 : return anon_vma;
1124 : : }
1125 : : return NULL;
1126 : : }
1127 : :
1128 : : /*
1129 : : * find_mergeable_anon_vma is used by anon_vma_prepare, to check
1130 : : * neighbouring vmas for a suitable anon_vma, before it goes off
1131 : : * to allocate a new anon_vma. It checks because a repetitive
1132 : : * sequence of mprotects and faults may otherwise lead to distinct
1133 : : * anon_vmas being allocated, preventing vma merge in subsequent
1134 : : * mprotect.
1135 : : */
1136 : 0 : struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *vma)
1137 : : {
1138 : : struct anon_vma *anon_vma;
1139 : : struct vm_area_struct *near;
1140 : :
1141 : 705400 : near = vma->vm_next;
1142 [ + + ]: 705400 : if (!near)
1143 : : goto try_prev;
1144 : :
1145 : 646412 : anon_vma = reusable_anon_vma(near, vma, near);
1146 [ + + ]: 646409 : if (anon_vma)
1147 : : return anon_vma;
1148 : : try_prev:
1149 : 703320 : near = vma->vm_prev;
1150 [ + + ]: 703320 : if (!near)
1151 : : goto none;
1152 : :
1153 : 643555 : anon_vma = reusable_anon_vma(near, near, vma);
1154 [ - + ]: 643557 : if (anon_vma)
1155 : 0 : return anon_vma;
1156 : : none:
1157 : : /*
1158 : : * There's no absolute need to look only at touching neighbours:
1159 : : * we could search further afield for "compatible" anon_vmas.
1160 : : * But it would probably just be a waste of time searching,
1161 : : * or lead to too many vmas hanging off the same anon_vma.
1162 : : * We're trying to allow mprotect remerging later on,
1163 : : * not trying to minimize memory used for anon_vmas.
1164 : : */
1165 : : return NULL;
1166 : : }
1167 : :
1168 : : #ifdef CONFIG_PROC_FS
1169 : 0 : void vm_stat_account(struct mm_struct *mm, unsigned long flags,
1170 : : struct file *file, long pages)
1171 : : {
1172 : : const unsigned long stack_flags
1173 : : = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
1174 : :
1175 : 7292855 : mm->total_vm += pages;
1176 : :
1177 [ + + ]: 7292855 : if (file) {
[ - + + + ]
[ + + ]
1178 : 4031196 : mm->shared_vm += pages;
1179 [ + + ][ # # ]: 4031196 : if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
[ + + ][ + + ]
1180 : 979334 : mm->exec_vm += pages;
1181 [ - + ][ + + ]: 3261659 : } else if (flags & stack_flags)
[ - + ][ - + ]
1182 : 2036823 : mm->stack_vm += pages;
1183 : 0 : }
1184 : : #endif /* CONFIG_PROC_FS */
1185 : :
1186 : : /*
1187 : : * If a hint addr is less than mmap_min_addr change hint to be as
1188 : : * low as possible but still greater than mmap_min_addr
1189 : : */
1190 : : static inline unsigned long round_hint_to_min(unsigned long hint)
1191 : : {
1192 : 2388071 : hint &= PAGE_MASK;
1193 [ + + ][ - + ]: 2388071 : if (((void *)hint != NULL) &&
1194 : 48 : (hint < mmap_min_addr))
1195 : 2388071 : return PAGE_ALIGN(mmap_min_addr);
1196 : : return hint;
1197 : : }
1198 : :
1199 : : /*
1200 : : * The caller must hold down_write(¤t->mm->mmap_sem).
1201 : : */
1202 : :
1203 : 0 : unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1204 : : unsigned long len, unsigned long prot,
1205 : : unsigned long flags, unsigned long pgoff,
1206 : : unsigned long *populate)
1207 : : {
1208 : 2883282 : struct mm_struct * mm = current->mm;
1209 : : vm_flags_t vm_flags;
1210 : :
1211 : 2883282 : *populate = 0;
1212 : :
1213 : : /*
1214 : : * Does the application expect PROT_READ to imply PROT_EXEC?
1215 : : *
1216 : : * (the exception is when the underlying filesystem is noexec
1217 : : * mounted, in which case we dont add PROT_EXEC.)
1218 : : */
1219 [ + + ][ - + ]: 2883282 : if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
1220 [ # # ][ # # ]: 0 : if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
1221 : 0 : prot |= PROT_EXEC;
1222 : :
1223 [ + + ]: 2883282 : if (!len)
1224 : : return -EINVAL;
1225 : :
1226 [ + + ]: 2883271 : if (!(flags & MAP_FIXED))
1227 : : addr = round_hint_to_min(addr);
1228 : :
1229 : : /* Careful about overflows.. */
1230 : 2883271 : len = PAGE_ALIGN(len);
1231 [ + + ]: 2883271 : if (!len)
1232 : : return -ENOMEM;
1233 : :
1234 : : /* offset overflow? */
1235 [ + + ]: 2883236 : if ((pgoff + (len >> PAGE_SHIFT)) < pgoff)
1236 : : return -EOVERFLOW;
1237 : :
1238 : : /* Too many mappings? */
1239 [ + ]: 2883098 : if (mm->map_count > sysctl_max_map_count)
1240 : : return -ENOMEM;
1241 : :
1242 : : /* Obtain the address to map to. we verify (or select) it and ensure
1243 : : * that it represents a valid section of the address space.
1244 : : */
1245 : 2883194 : addr = get_unmapped_area(file, addr, len, pgoff, flags);
1246 [ + + ]: 2883044 : if (addr & ~PAGE_MASK)
1247 : : return addr;
1248 : :
1249 : : /* Do simple checking here so the lower-level routines won't have
1250 : : * to. we assume access permissions have been handled by the open
1251 : : * of the memory object, so we don't do any here.
1252 : : */
1253 : 5721102 : vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
1254 : 2860551 : mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
1255 : :
1256 [ + + ]: 2860551 : if (flags & MAP_LOCKED)
1257 [ + ]: 1 : if (!can_do_mlock())
1258 : : return -EPERM;
1259 : :
1260 : : /* mlock MCL_FUTURE? */
1261 [ + + ]: 2860697 : if (vm_flags & VM_LOCKED) {
1262 : : unsigned long locked, lock_limit;
1263 : : locked = len >> PAGE_SHIFT;
1264 : 181 : locked += mm->locked_vm;
1265 : : lock_limit = rlimit(RLIMIT_MEMLOCK);
1266 : 181 : lock_limit >>= PAGE_SHIFT;
1267 [ + - ][ + - ]: 181 : if (locked > lock_limit && !capable(CAP_IPC_LOCK))
1268 : : return -EAGAIN;
1269 : : }
1270 : :
1271 [ + + ]: 2860697 : if (file) {
1272 : : struct inode *inode = file_inode(file);
1273 : :
1274 [ + + + ]: 1380262 : switch (flags & MAP_TYPE) {
1275 : : case MAP_SHARED:
1276 [ + + ][ + ]: 171235 : if ((prot&PROT_WRITE) && !(file->f_mode&FMODE_WRITE))
1277 : : return -EACCES;
1278 : :
1279 : : /*
1280 : : * Make sure we don't allow writing to an append-only
1281 : : * file..
1282 : : */
1283 [ - + ][ # # ]: 3054464 : if (IS_APPEND(inode) && (file->f_mode & FMODE_WRITE))
1284 : : return -EACCES;
1285 : :
1286 : : /*
1287 : : * Make sure there are no mandatory locks on the file.
1288 : : */
1289 [ + - ]: 171182 : if (locks_verify_locked(inode))
1290 : : return -EAGAIN;
1291 : :
1292 : 171182 : vm_flags |= VM_SHARED | VM_MAYSHARE;
1293 [ + + ]: 171182 : if (!(file->f_mode & FMODE_WRITE))
1294 : 54501 : vm_flags &= ~(VM_MAYWRITE | VM_SHARED);
1295 : :
1296 : : /* fall through */
1297 : : case MAP_PRIVATE:
1298 [ + + ]: 1380206 : if (!(file->f_mode & FMODE_READ))
1299 : : return -EACCES;
1300 [ - + ]: 1380204 : if (file->f_path.mnt->mnt_flags & MNT_NOEXEC) {
1301 [ # # ]: 0 : if (vm_flags & VM_EXEC)
1302 : : return -EPERM;
1303 : 0 : vm_flags &= ~VM_MAYEXEC;
1304 : : }
1305 : :
1306 [ + + ]: 1380204 : if (!file->f_op->mmap)
1307 : : return -ENODEV;
1308 [ + ]: 1380113 : if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1309 : : return -EINVAL;
1310 : : break;
1311 : :
1312 : : default:
1313 : : return -EINVAL;
1314 : : }
1315 : : } else {
1316 [ + + + ]: 1480435 : switch (flags & MAP_TYPE) {
1317 : : case MAP_SHARED:
1318 [ + - ]: 872602 : if (vm_flags & (VM_GROWSDOWN|VM_GROWSUP))
1319 : : return -EINVAL;
1320 : : /*
1321 : : * Ignore pgoff.
1322 : : */
1323 : : pgoff = 0;
1324 : 872602 : vm_flags |= VM_SHARED | VM_MAYSHARE;
1325 : 872602 : break;
1326 : : case MAP_PRIVATE:
1327 : : /*
1328 : : * Set pgoff according to addr for anon_vma.
1329 : : */
1330 : 607832 : pgoff = addr >> PAGE_SHIFT;
1331 : 607832 : break;
1332 : : default:
1333 : : return -EINVAL;
1334 : : }
1335 : : }
1336 : :
1337 : : /*
1338 : : * Set 'VM_NORESERVE' if we should not account for the
1339 : : * memory use of this mapping.
1340 : : */
1341 [ + + ]: 2860638 : if (flags & MAP_NORESERVE) {
1342 : : /* We honor MAP_NORESERVE if allowed to overcommit */
1343 [ + - ]: 323 : if (sysctl_overcommit_memory != OVERCOMMIT_NEVER)
1344 : 323 : vm_flags |= VM_NORESERVE;
1345 : :
1346 : : /* hugetlb applies strict overcommit unless MAP_NORESERVE */
1347 : : if (file && is_file_hugepages(file))
1348 : : vm_flags |= VM_NORESERVE;
1349 : : }
1350 : :
1351 : 2860638 : addr = mmap_region(file, addr, len, vm_flags, pgoff);
1352 [ + + ][ + + ]: 2860641 : if (!IS_ERR_VALUE(addr) &&
1353 [ + ]: 2860409 : ((vm_flags & VM_LOCKED) ||
1354 : 2860409 : (flags & (MAP_POPULATE | MAP_NONBLOCK)) == MAP_POPULATE))
1355 : 195 : *populate = len;
1356 : 2860641 : return addr;
1357 : : }
1358 : :
1359 : 0 : SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1360 : : unsigned long, prot, unsigned long, flags,
1361 : : unsigned long, fd, unsigned long, pgoff)
1362 : : {
1363 : : struct file *file = NULL;
1364 : : unsigned long retval = -EBADF;
1365 : :
1366 [ + + ]: 2646172 : if (!(flags & MAP_ANONYMOUS)) {
1367 : : audit_mmap_fd(fd, flags);
1368 : 1143180 : file = fget(fd);
1369 [ + + ]: 1143272 : if (!file)
1370 : : goto out;
1371 : : if (is_file_hugepages(file))
1372 : : len = ALIGN(len, huge_page_size(hstate_file(file)));
1373 : : retval = -EINVAL;
1374 [ + ]: 1143217 : if (unlikely(flags & MAP_HUGETLB && !is_file_hugepages(file)))
1375 : : goto out_fput;
1376 [ + ]: 1502992 : } else if (flags & MAP_HUGETLB) {
1377 : : struct user_struct *user = NULL;
1378 : : struct hstate *hs;
1379 : :
1380 : : hs = hstate_sizelog((flags >> MAP_HUGE_SHIFT) & SHM_HUGE_MASK);
1381 : : if (!hs)
1382 : : return -EINVAL;
1383 : :
1384 : : len = ALIGN(len, huge_page_size(hs));
1385 : : /*
1386 : : * VM_NORESERVE is used because the reservations will be
1387 : : * taken when vm_ops->mmap() is called
1388 : : * A dummy user value is used because we are not locking
1389 : : * memory so no accounting is necessary
1390 : : */
1391 : : file = hugetlb_file_setup(HUGETLB_ANON_FILE, len,
1392 : : VM_NORESERVE,
1393 : : &user, HUGETLB_ANONHUGE_INODE,
1394 : : (flags >> MAP_HUGE_SHIFT) & MAP_HUGE_MASK);
1395 : : if (IS_ERR(file))
1396 : : return PTR_ERR(file);
1397 : : }
1398 : :
1399 : 2646289 : flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1400 : :
1401 : 2646289 : retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1402 : : out_fput:
1403 [ + + ]: 2646219 : if (file)
1404 : 1143314 : fput(file);
1405 : : out:
1406 : 2646280 : return retval;
1407 : : }
1408 : :
1409 : : #ifdef __ARCH_WANT_SYS_OLD_MMAP
1410 : : struct mmap_arg_struct {
1411 : : unsigned long addr;
1412 : : unsigned long len;
1413 : : unsigned long prot;
1414 : : unsigned long flags;
1415 : : unsigned long fd;
1416 : : unsigned long offset;
1417 : : };
1418 : :
1419 : 0 : SYSCALL_DEFINE1(old_mmap, struct mmap_arg_struct __user *, arg)
1420 : : {
1421 : : struct mmap_arg_struct a;
1422 : :
1423 [ # # ]: 0 : if (copy_from_user(&a, arg, sizeof(a)))
1424 : : return -EFAULT;
1425 [ # # ]: 0 : if (a.offset & ~PAGE_MASK)
1426 : : return -EINVAL;
1427 : :
1428 : 0 : return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
1429 : : a.offset >> PAGE_SHIFT);
1430 : : }
1431 : : #endif /* __ARCH_WANT_SYS_OLD_MMAP */
1432 : :
1433 : : /*
1434 : : * Some shared mappigns will want the pages marked read-only
1435 : : * to track write events. If so, we'll downgrade vm_page_prot
1436 : : * to the private version (using protection_map[] without the
1437 : : * VM_SHARED bit).
1438 : : */
1439 : 0 : int vma_wants_writenotify(struct vm_area_struct *vma)
1440 : : {
1441 : 3060480 : vm_flags_t vm_flags = vma->vm_flags;
1442 : :
1443 : : /* If it was private or non-writable, the write bit is already clear */
1444 [ + + ]: 3060480 : if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
1445 : : return 0;
1446 : :
1447 : : /* The backer wishes to know when pages are first written to? */
1448 [ + + ][ + + ]: 114683 : if (vma->vm_ops && vma->vm_ops->page_mkwrite)
1449 : : return 1;
1450 : :
1451 : : /* The open routine did something to the protections already? */
1452 [ + - ]: 1097 : if (pgprot_val(vma->vm_page_prot) !=
1453 : : pgprot_val(vm_get_page_prot(vm_flags)))
1454 : : return 0;
1455 : :
1456 : : /* Specialty mapping? */
1457 [ + ]: 1097 : if (vm_flags & VM_PFNMAP)
1458 : : return 0;
1459 : :
1460 : : /* Can the mapping track the dirty pages? */
1461 [ + - ][ + - ]: 3061577 : return vma->vm_file && vma->vm_file->f_mapping &&
[ + - ]
1462 : : mapping_cap_account_dirty(vma->vm_file->f_mapping);
1463 : : }
1464 : :
1465 : : /*
1466 : : * We account for memory if it's a private writeable mapping,
1467 : : * not hugepages and VM_NORESERVE wasn't set.
1468 : : */
1469 : : static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
1470 : : {
1471 : : /*
1472 : : * hugetlb has its own accounting separate from the core VM
1473 : : * VM_HUGETLB may not be set yet so we cannot check for that flag.
1474 : : */
1475 : : if (file && is_file_hugepages(file))
1476 : : return 0;
1477 : :
1478 : 2860558 : return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
1479 : : }
1480 : :
1481 : 0 : unsigned long mmap_region(struct file *file, unsigned long addr,
1482 : : unsigned long len, vm_flags_t vm_flags, unsigned long pgoff)
1483 : : {
1484 : 2860511 : struct mm_struct *mm = current->mm;
1485 : : struct vm_area_struct *vma, *prev;
1486 : : int error;
1487 : : struct rb_node **rb_link, *rb_parent;
1488 : : unsigned long charged = 0;
1489 : :
1490 : : /* Check against address space limit. */
1491 [ - + ]: 2860511 : if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
1492 : : unsigned long nr_pages;
1493 : :
1494 : : /*
1495 : : * MAP_FIXED may remove pages of mappings that intersects with
1496 : : * requested mapping. Account for the pages it would unmap.
1497 : : */
1498 [ # # ]: 0 : if (!(vm_flags & MAP_FIXED))
1499 : : return -ENOMEM;
1500 : :
1501 : 0 : nr_pages = count_vma_pages_range(mm, addr, addr + len);
1502 : :
1503 [ # # ]: 2860511 : if (!may_expand_vm(mm, (len >> PAGE_SHIFT) - nr_pages))
1504 : : return -ENOMEM;
1505 : : }
1506 : :
1507 : : /* Clear old maps */
1508 : : error = -ENOMEM;
1509 : : munmap_back:
1510 [ + + ]: 3179294 : if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
1511 [ + + ]: 318736 : if (do_munmap(mm, addr, len))
1512 : : return -ENOMEM;
1513 : : goto munmap_back;
1514 : : }
1515 : :
1516 : : /*
1517 : : * Private writable mapping: check memory availability
1518 : : */
1519 [ + + ]: 2860558 : if (accountable_mapping(file, vm_flags)) {
1520 : : charged = len >> PAGE_SHIFT;
1521 [ + + ]: 965708 : if (security_vm_enough_memory_mm(mm, charged))
1522 : : return -ENOMEM;
1523 : 965672 : vm_flags |= VM_ACCOUNT;
1524 : : }
1525 : :
1526 : : /*
1527 : : * Can we just expand an old mapping?
1528 : : */
1529 : 2860522 : vma = vma_merge(mm, prev, addr, addr + len, vm_flags, NULL, file, pgoff,
1530 : : NULL, NULL);
1531 [ + + ]: 2860382 : if (vma)
1532 : : goto out;
1533 : :
1534 : : /*
1535 : : * Determine the object being mapped and call the appropriate
1536 : : * specific mapper. the address has already been validated, but
1537 : : * not unmapped, but the maps are removed from the list.
1538 : : */
1539 : 2458916 : vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
1540 [ + ]: 2459230 : if (!vma) {
1541 : : error = -ENOMEM;
1542 : : goto unacct_error;
1543 : : }
1544 : :
1545 : 2459370 : vma->vm_mm = mm;
1546 : 2459370 : vma->vm_start = addr;
1547 : 2459370 : vma->vm_end = addr + len;
1548 : 2459370 : vma->vm_flags = vm_flags;
1549 : 2459370 : vma->vm_page_prot = vm_get_page_prot(vm_flags);
1550 : 2459370 : vma->vm_pgoff = pgoff;
1551 : 2459370 : INIT_LIST_HEAD(&vma->anon_vma_chain);
1552 : :
1553 [ + + ]: 2459370 : if (file) {
1554 [ + + ]: 1380151 : if (vm_flags & VM_DENYWRITE) {
1555 : : error = deny_write_access(file);
1556 [ + ]: 235836 : if (error)
1557 : : goto free_vma;
1558 : : }
1559 : 1380113 : vma->vm_file = get_file(file);
1560 : 1380113 : error = file->f_op->mmap(file, vma);
1561 [ + - ]: 1380125 : if (error)
1562 : : goto unmap_and_free_vma;
1563 : :
1564 : : /* Can addr have changed??
1565 : : *
1566 : : * Answer: Yes, several device drivers can do it in their
1567 : : * f_op->mmap method. -DaveM
1568 : : * Bug: If addr is changed, prev, rb_link, rb_parent should
1569 : : * be updated for vma_link()
1570 : : */
1571 [ - + ][ # # ]: 1380125 : WARN_ON_ONCE(addr != vma->vm_start);
[ - + ]
1572 : :
1573 : 1380145 : addr = vma->vm_start;
1574 : 1380145 : vm_flags = vma->vm_flags;
1575 [ + + ]: 1079219 : } else if (vm_flags & VM_SHARED) {
1576 : 872602 : error = shmem_zero_setup(vma);
1577 [ + - ]: 872602 : if (error)
1578 : : goto free_vma;
1579 : : }
1580 : :
1581 [ + + ]: 2459364 : if (vma_wants_writenotify(vma)) {
1582 : 113550 : pgprot_t pprot = vma->vm_page_prot;
1583 : :
1584 : : /* Can vma->vm_page_prot have changed??
1585 : : *
1586 : : * Answer: Yes, drivers may have changed it in their
1587 : : * f_op->mmap method.
1588 : : *
1589 : : * Ensures that vmas marked as uncached stay that way.
1590 : : */
1591 : 113550 : vma->vm_page_prot = vm_get_page_prot(vm_flags & ~VM_SHARED);
1592 [ - + ]: 113550 : if (pgprot_val(pprot) == pgprot_val(pgprot_noncached(pprot)))
1593 : 0 : vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
1594 : : }
1595 : :
1596 : 2459309 : vma_link(mm, vma, prev, rb_link, rb_parent);
1597 : : /* Once vma denies write, undo our temporary denial count */
1598 [ + + ]: 2459438 : if (vm_flags & VM_DENYWRITE)
1599 : : allow_write_access(file);
1600 : 2459403 : file = vma->vm_file;
1601 : : out:
1602 : 2860869 : perf_event_mmap(vma);
1603 : :
1604 : : vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
1605 [ + + ]: 2860666 : if (vm_flags & VM_LOCKED) {
1606 [ + - + - ]: 362 : if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
1607 : 181 : vma == get_gate_vma(current->mm)))
1608 : 181 : mm->locked_vm += (len >> PAGE_SHIFT);
1609 : : else
1610 : 0 : vma->vm_flags &= ~VM_LOCKED;
1611 : : }
1612 : :
1613 : : if (file)
1614 : : uprobe_mmap(vma);
1615 : :
1616 : : /*
1617 : : * New (or expanded) vma always get soft dirty status.
1618 : : * Otherwise user-space soft-dirty page tracker won't
1619 : : * be able to distinguish situation when vma area unmapped,
1620 : : * then new mapped in-place (which must be aimed as
1621 : : * a completely new data area).
1622 : : */
1623 : : vma->vm_flags |= VM_SOFTDIRTY;
1624 : :
1625 : 2860666 : return addr;
1626 : :
1627 : : unmap_and_free_vma:
1628 [ # # ]: 0 : if (vm_flags & VM_DENYWRITE)
1629 : : allow_write_access(file);
1630 : 0 : vma->vm_file = NULL;
1631 : 0 : fput(file);
1632 : :
1633 : : /* Undo any partial mapping done by a device driver. */
1634 : 0 : unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
1635 : : charged = 0;
1636 : : free_vma:
1637 : 0 : kmem_cache_free(vm_area_cachep, vma);
1638 : : unacct_error:
1639 [ # # ]: 0 : if (charged)
1640 : 0 : vm_unacct_memory(charged);
1641 : 0 : return error;
1642 : : }
1643 : :
1644 : 0 : unsigned long unmapped_area(struct vm_unmapped_area_info *info)
1645 : : {
1646 : : /*
1647 : : * We implement the search by looking for an rbtree node that
1648 : : * immediately follows a suitable gap. That is,
1649 : : * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
1650 : : * - gap_end = vma->vm_start >= info->low_limit + length;
1651 : : * - gap_end - gap_start >= length
1652 : : */
1653 : :
1654 : 435721 : struct mm_struct *mm = current->mm;
1655 : : struct vm_area_struct *vma;
1656 : : unsigned long length, low_limit, high_limit, gap_start, gap_end;
1657 : :
1658 : : /* Adjust search length to account for worst case alignment overhead */
1659 : 435721 : length = info->length + info->align_mask;
1660 [ + ]: 435721 : if (length < info->length)
1661 : : return -ENOMEM;
1662 : :
1663 : : /* Adjust search limits by the desired length */
1664 [ + ]: 435722 : if (info->high_limit < length)
1665 : : return -ENOMEM;
1666 : 435723 : high_limit = info->high_limit - length;
1667 : :
1668 [ + + ]: 435723 : if (info->low_limit > high_limit)
1669 : : return -ENOMEM;
1670 : 432145 : low_limit = info->low_limit + length;
1671 : :
1672 : : /* Check if rbtree root looks promising */
1673 [ + + ]: 432145 : if (RB_EMPTY_ROOT(&mm->mm_rb))
1674 : : goto check_highest;
1675 : 432139 : vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1676 [ + ]: 2240035 : if (vma->rb_subtree_gap < length)
1677 : : goto check_highest;
1678 : :
1679 : : while (true) {
1680 : : /* Visit left subtree if it looks promising */
1681 : 2221037 : gap_end = vma->vm_start;
1682 [ + + ][ + + ]: 2221037 : if (gap_end >= low_limit && vma->vm_rb.rb_left) {
1683 : 1488645 : struct vm_area_struct *left =
1684 : : rb_entry(vma->vm_rb.rb_left,
1685 : : struct vm_area_struct, vm_rb);
1686 [ + + ]: 1488645 : if (left->rb_subtree_gap >= length) {
1687 : : vma = left;
1688 : 391827 : continue;
1689 : : }
1690 : : }
1691 : :
1692 [ + + ]: 1829210 : gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1693 : : check_current:
1694 : : /* Check if current node has a suitable gap */
1695 [ + + ]: 2078392 : if (gap_start > high_limit)
1696 : : return -ENOMEM;
1697 [ + + ][ + + ]: 2078381 : if (gap_end >= low_limit && gap_end - gap_start >= length)
1698 : : goto found;
1699 : :
1700 : : /* Visit right subtree if it looks promising */
1701 [ + + ]: 1665253 : if (vma->vm_rb.rb_right) {
1702 : 1605655 : struct vm_area_struct *right =
1703 : : rb_entry(vma->vm_rb.rb_right,
1704 : : struct vm_area_struct, vm_rb);
1705 [ + + ]: 1605655 : if (right->rb_subtree_gap >= length) {
1706 : : vma = right;
1707 : 1665253 : continue;
1708 : : }
1709 : : }
1710 : :
1711 : : /* Go back up the rbtree to find next candidate node */
1712 : : while (true) {
1713 : 341933 : struct rb_node *prev = &vma->vm_rb;
1714 [ + + ]: 341933 : if (!rb_parent(prev))
1715 : : goto check_highest;
1716 : 341931 : vma = rb_entry(rb_parent(prev),
1717 : : struct vm_area_struct, vm_rb);
1718 [ + + ]: 341931 : if (prev == vma->vm_rb.rb_left) {
1719 : 249182 : gap_start = vma->vm_prev->vm_end;
1720 : 249182 : gap_end = vma->vm_start;
1721 : 249182 : goto check_current;
1722 : : }
1723 : : }
1724 : : }
1725 : :
1726 : : check_highest:
1727 : : /* Check highest gap, which does not precede any rbtree node */
1728 : 0 : gap_start = mm->highest_vm_end;
1729 : : gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */
1730 [ + ]: 0 : if (gap_start > high_limit)
1731 : : return -ENOMEM;
1732 : :
1733 : : found:
1734 : : /* We found a suitable gap. Clip it with the original low_limit. */
1735 [ + + ]: 413229 : if (gap_start < info->low_limit)
1736 : : gap_start = info->low_limit;
1737 : :
1738 : : /* Adjust gap address to the desired alignment */
1739 : 413229 : gap_start += (info->align_offset - gap_start) & info->align_mask;
1740 : :
1741 : : VM_BUG_ON(gap_start + info->length > info->high_limit);
1742 : : VM_BUG_ON(gap_start + info->length > gap_end);
1743 : 413229 : return gap_start;
1744 : : }
1745 : :
1746 : 0 : unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
1747 : : {
1748 : 2034012 : struct mm_struct *mm = current->mm;
1749 : : struct vm_area_struct *vma;
1750 : : unsigned long length, low_limit, high_limit, gap_start, gap_end;
1751 : :
1752 : : /* Adjust search length to account for worst case alignment overhead */
1753 : 2034012 : length = info->length + info->align_mask;
1754 [ + ]: 2034012 : if (length < info->length)
1755 : : return -ENOMEM;
1756 : :
1757 : : /*
1758 : : * Adjust search limits by the desired length.
1759 : : * See implementation comment at top of unmapped_area().
1760 : : */
1761 : 2034050 : gap_end = info->high_limit;
1762 [ + + ]: 2034050 : if (gap_end < length)
1763 : : return -ENOMEM;
1764 : 2033897 : high_limit = gap_end - length;
1765 : :
1766 [ + ]: 2033897 : if (info->low_limit > high_limit)
1767 : : return -ENOMEM;
1768 : 2034016 : low_limit = info->low_limit + length;
1769 : :
1770 : : /* Check highest gap, which does not precede any rbtree node */
1771 : 2034016 : gap_start = mm->highest_vm_end;
1772 [ + + ]: 2034016 : if (gap_start <= high_limit)
1773 : : goto found_highest;
1774 : :
1775 : : /* Check if rbtree root looks promising */
1776 [ + ]: 2033965 : if (RB_EMPTY_ROOT(&mm->mm_rb))
1777 : : return -ENOMEM;
1778 : 2033992 : vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
1779 [ + ]: 40673137 : if (vma->rb_subtree_gap < length)
1780 : : return -ENOMEM;
1781 : :
1782 : : while (true) {
1783 : : /* Visit right subtree if it looks promising */
1784 [ + + ]: 40650480 : gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
1785 [ + + ][ + + ]: 40650480 : if (gap_start <= high_limit && vma->vm_rb.rb_right) {
1786 : 37257008 : struct vm_area_struct *right =
1787 : : rb_entry(vma->vm_rb.rb_right,
1788 : : struct vm_area_struct, vm_rb);
1789 [ + + ]: 40650480 : if (right->rb_subtree_gap >= length) {
1790 : : vma = right;
1791 : 30909887 : continue;
1792 : : }
1793 : : }
1794 : :
1795 : : check_current:
1796 : : /* Check if current node has a suitable gap */
1797 : 37568093 : gap_end = vma->vm_start;
1798 [ + ]: 37568093 : if (gap_end < low_limit)
1799 : : return -ENOMEM;
1800 [ + + ][ + + ]: 37568230 : if (gap_start <= high_limit && gap_end - gap_start >= length)
1801 : : goto found;
1802 : :
1803 : : /* Visit left subtree if it looks promising */
1804 [ + + ]: 35557061 : if (vma->vm_rb.rb_left) {
1805 : 33618107 : struct vm_area_struct *left =
1806 : : rb_entry(vma->vm_rb.rb_left,
1807 : : struct vm_area_struct, vm_rb);
1808 [ + + ]: 33618107 : if (left->rb_subtree_gap >= length) {
1809 : : vma = left;
1810 : 35557061 : continue;
1811 : : }
1812 : : }
1813 : :
1814 : : /* Go back up the rbtree to find next candidate node */
1815 : : while (true) {
1816 : 13914053 : struct rb_node *prev = &vma->vm_rb;
1817 [ + + ]: 13914053 : if (!rb_parent(prev))
1818 : : return -ENOMEM;
1819 : 13913750 : vma = rb_entry(rb_parent(prev),
1820 : : struct vm_area_struct, vm_rb);
1821 [ - + ]: 13913750 : if (prev == vma->vm_rb.rb_right) {
1822 : 13913750 : gap_start = vma->vm_prev ?
1823 [ + + ]: 13913750 : vma->vm_prev->vm_end : 0;
1824 : : goto check_current;
1825 : : }
1826 : : }
1827 : : }
1828 : :
1829 : : found:
1830 : : /* We found a suitable gap. Clip it with the original high_limit. */
1831 [ + + ]: 2011169 : if (gap_end > info->high_limit)
1832 : : gap_end = info->high_limit;
1833 : :
1834 : : found_highest:
1835 : : /* Compute highest gap address at the desired alignment */
1836 : 2011220 : gap_end -= info->length;
1837 : 2011220 : gap_end -= (gap_end - info->align_offset) & info->align_mask;
1838 : :
1839 : : VM_BUG_ON(gap_end < info->low_limit);
1840 : : VM_BUG_ON(gap_end < gap_start);
1841 : 2011220 : return gap_end;
1842 : : }
1843 : :
1844 : : /* Get an address range which is currently unmapped.
1845 : : * For shmat() with addr=0.
1846 : : *
1847 : : * Ugly calling convention alert:
1848 : : * Return value with the low bits set means error value,
1849 : : * ie
1850 : : * if (ret & ~PAGE_MASK)
1851 : : * error = ret;
1852 : : *
1853 : : * This function "knows" that -ENOMEM has the bits set.
1854 : : */
1855 : : #ifndef HAVE_ARCH_UNMAPPED_AREA
1856 : : unsigned long
1857 : : arch_get_unmapped_area(struct file *filp, unsigned long addr,
1858 : : unsigned long len, unsigned long pgoff, unsigned long flags)
1859 : : {
1860 : : struct mm_struct *mm = current->mm;
1861 : : struct vm_area_struct *vma;
1862 : : struct vm_unmapped_area_info info;
1863 : :
1864 : : if (len > TASK_SIZE - mmap_min_addr)
1865 : : return -ENOMEM;
1866 : :
1867 : : if (flags & MAP_FIXED)
1868 : : return addr;
1869 : :
1870 : : if (addr) {
1871 : : addr = PAGE_ALIGN(addr);
1872 : : vma = find_vma(mm, addr);
1873 : : if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
1874 : : (!vma || addr + len <= vma->vm_start))
1875 : : return addr;
1876 : : }
1877 : :
1878 : : info.flags = 0;
1879 : : info.length = len;
1880 : : info.low_limit = mm->mmap_base;
1881 : : info.high_limit = TASK_SIZE;
1882 : : info.align_mask = 0;
1883 : : return vm_unmapped_area(&info);
1884 : : }
1885 : : #endif
1886 : :
1887 : : /*
1888 : : * This mmap-allocator allocates new areas top-down from below the
1889 : : * stack's low limit (the base):
1890 : : */
1891 : : #ifndef HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
1892 : : unsigned long
1893 : : arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
1894 : : const unsigned long len, const unsigned long pgoff,
1895 : : const unsigned long flags)
1896 : : {
1897 : : struct vm_area_struct *vma;
1898 : : struct mm_struct *mm = current->mm;
1899 : : unsigned long addr = addr0;
1900 : : struct vm_unmapped_area_info info;
1901 : :
1902 : : /* requested length too big for entire address space */
1903 : : if (len > TASK_SIZE - mmap_min_addr)
1904 : : return -ENOMEM;
1905 : :
1906 : : if (flags & MAP_FIXED)
1907 : : return addr;
1908 : :
1909 : : /* requesting a specific address */
1910 : : if (addr) {
1911 : : addr = PAGE_ALIGN(addr);
1912 : : vma = find_vma(mm, addr);
1913 : : if (TASK_SIZE - len >= addr && addr >= mmap_min_addr &&
1914 : : (!vma || addr + len <= vma->vm_start))
1915 : : return addr;
1916 : : }
1917 : :
1918 : : info.flags = VM_UNMAPPED_AREA_TOPDOWN;
1919 : : info.length = len;
1920 : : info.low_limit = max(PAGE_SIZE, mmap_min_addr);
1921 : : info.high_limit = mm->mmap_base;
1922 : : info.align_mask = 0;
1923 : : addr = vm_unmapped_area(&info);
1924 : :
1925 : : /*
1926 : : * A failed mmap() very likely causes application failure,
1927 : : * so fall back to the bottom-up function here. This scenario
1928 : : * can happen with large stack limits and large mmap()
1929 : : * allocations.
1930 : : */
1931 : : if (addr & ~PAGE_MASK) {
1932 : : VM_BUG_ON(addr != -ENOMEM);
1933 : : info.flags = 0;
1934 : : info.low_limit = TASK_UNMAPPED_BASE;
1935 : : info.high_limit = TASK_SIZE;
1936 : : addr = vm_unmapped_area(&info);
1937 : : }
1938 : :
1939 : : return addr;
1940 : : }
1941 : : #endif
1942 : :
1943 : : unsigned long
1944 : 0 : get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1945 : : unsigned long pgoff, unsigned long flags)
1946 : : {
1947 : : unsigned long (*get_area)(struct file *, unsigned long,
1948 : : unsigned long, unsigned long, unsigned long);
1949 : :
1950 [ + + ][ + + ]: 3118403 : unsigned long error = arch_mmap_check(addr, len, flags);
1951 [ + + ]: 3118403 : if (error)
1952 : : return error;
1953 : :
1954 : : /* Careful about overflows.. */
1955 [ + + ]: 3118376 : if (len > TASK_SIZE)
1956 : : return -ENOMEM;
1957 : :
1958 : 3118252 : get_area = current->mm->get_unmapped_area;
1959 [ + + ][ - + ]: 3118252 : if (file && file->f_op->get_unmapped_area)
1960 : : get_area = file->f_op->get_unmapped_area;
1961 : 3118252 : addr = get_area(file, addr, len, pgoff, flags);
1962 [ + + ]: 3118248 : if (IS_ERR_VALUE(addr))
1963 : : return addr;
1964 : :
1965 [ + + ]: 3095803 : if (addr > TASK_SIZE - len)
1966 : : return -ENOMEM;
1967 [ + + ]: 3095783 : if (addr & ~PAGE_MASK)
1968 : : return -EINVAL;
1969 : :
1970 : : addr = arch_rebalance_pgtables(addr, len);
1971 : 3095767 : error = security_mmap_addr(addr);
1972 [ + + ]: 3095731 : return error ? error : addr;
1973 : : }
1974 : :
1975 : : EXPORT_SYMBOL(get_unmapped_area);
1976 : :
1977 : : /* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
1978 : 0 : struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1979 : : {
1980 : : struct vm_area_struct *vma = NULL;
1981 : :
1982 : : /* Check the cache first. */
1983 : : /* (Cache hit rate is typically around 35%.) */
1984 : 91849709 : vma = ACCESS_ONCE(mm->mmap_cache);
1985 [ + + ][ + + ]: 91849709 : if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
[ + + ]
1986 : : struct rb_node *rb_node;
1987 : :
1988 : 52253619 : rb_node = mm->mm_rb.rb_node;
1989 : : vma = NULL;
1990 : :
1991 [ + + ]: 210669697 : while (rb_node) {
1992 : : struct vm_area_struct *vma_tmp;
1993 : :
1994 : 209836089 : vma_tmp = rb_entry(rb_node,
1995 : : struct vm_area_struct, vm_rb);
1996 : :
1997 [ + + ]: 209836089 : if (vma_tmp->vm_end > addr) {
1998 : : vma = vma_tmp;
1999 [ + + ]: 114571633 : if (vma_tmp->vm_start <= addr)
2000 : : break;
2001 : 63151622 : rb_node = rb_node->rb_left;
2002 : : } else
2003 : 158416078 : rb_node = rb_node->rb_right;
2004 : : }
2005 [ + ]: 52253619 : if (vma)
2006 : 52249843 : mm->mmap_cache = vma;
2007 : : }
2008 : 0 : return vma;
2009 : : }
2010 : :
2011 : : EXPORT_SYMBOL(find_vma);
2012 : :
2013 : : /*
2014 : : * Same as find_vma, but also return a pointer to the previous VMA in *pprev.
2015 : : */
2016 : : struct vm_area_struct *
2017 : 0 : find_vma_prev(struct mm_struct *mm, unsigned long addr,
2018 : : struct vm_area_struct **pprev)
2019 : : {
2020 : : struct vm_area_struct *vma;
2021 : :
2022 : 2643 : vma = find_vma(mm, addr);
2023 [ + - ]: 2643 : if (vma) {
2024 : 2643 : *pprev = vma->vm_prev;
2025 : : } else {
2026 : 0 : struct rb_node *rb_node = mm->mm_rb.rb_node;
2027 : 0 : *pprev = NULL;
2028 [ # # ]: 0 : while (rb_node) {
2029 : 0 : *pprev = rb_entry(rb_node, struct vm_area_struct, vm_rb);
2030 : 0 : rb_node = rb_node->rb_right;
2031 : : }
2032 : : }
2033 : 0 : return vma;
2034 : : }
2035 : :
2036 : : /*
2037 : : * Verify that the stack growth is acceptable and
2038 : : * update accounting. This is shared with both the
2039 : : * grow-up and grow-down cases.
2040 : : */
2041 : 0 : static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, unsigned long grow)
2042 : : {
2043 : 2036834 : struct mm_struct *mm = vma->vm_mm;
2044 : 2036834 : struct rlimit *rlim = current->signal->rlim;
2045 : : unsigned long new_start;
2046 : :
2047 : : /* address space limit tests */
2048 [ + + ]: 2036834 : if (!may_expand_vm(mm, grow))
2049 : : return -ENOMEM;
2050 : :
2051 : : /* Stack limit test */
2052 [ + + ]: 2036831 : if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
2053 : : return -ENOMEM;
2054 : :
2055 : : /* mlock limit tests */
2056 [ - + ]: 2036828 : if (vma->vm_flags & VM_LOCKED) {
2057 : : unsigned long locked;
2058 : : unsigned long limit;
2059 : 0 : locked = mm->locked_vm + grow;
2060 : 0 : limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
2061 : 0 : limit >>= PAGE_SHIFT;
2062 [ # # ][ # # ]: 0 : if (locked > limit && !capable(CAP_IPC_LOCK))
2063 : : return -ENOMEM;
2064 : : }
2065 : :
2066 : : /* Check to ensure the stack will not grow into a hugetlb-only region */
2067 : : new_start = (vma->vm_flags & VM_GROWSUP) ? vma->vm_start :
2068 : : vma->vm_end - size;
2069 : : if (is_hugepage_only_range(vma->vm_mm, new_start, size))
2070 : : return -EFAULT;
2071 : :
2072 : : /*
2073 : : * Overcommit.. This must be the final test, as it will
2074 : : * update security statistics.
2075 : : */
2076 [ + + ]: 2036828 : if (security_vm_enough_memory_mm(mm, grow))
2077 : : return -ENOMEM;
2078 : :
2079 : : /* Ok, everything looks good - let it rip */
2080 [ - + ]: 2036827 : if (vma->vm_flags & VM_LOCKED)
2081 : 0 : mm->locked_vm += grow;
2082 : 2036827 : vm_stat_account(mm, vma->vm_flags, vma->vm_file, grow);
2083 : : return 0;
2084 : : }
2085 : :
2086 : : #if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
2087 : : /*
2088 : : * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
2089 : : * vma is the last one with address > vma->vm_end. Have to extend vma.
2090 : : */
2091 : : int expand_upwards(struct vm_area_struct *vma, unsigned long address)
2092 : : {
2093 : : int error;
2094 : :
2095 : : if (!(vma->vm_flags & VM_GROWSUP))
2096 : : return -EFAULT;
2097 : :
2098 : : /*
2099 : : * We must make sure the anon_vma is allocated
2100 : : * so that the anon_vma locking is not a noop.
2101 : : */
2102 : : if (unlikely(anon_vma_prepare(vma)))
2103 : : return -ENOMEM;
2104 : : vma_lock_anon_vma(vma);
2105 : :
2106 : : /*
2107 : : * vma->vm_start/vm_end cannot change under us because the caller
2108 : : * is required to hold the mmap_sem in read mode. We need the
2109 : : * anon_vma lock to serialize against concurrent expand_stacks.
2110 : : * Also guard against wrapping around to address 0.
2111 : : */
2112 : : if (address < PAGE_ALIGN(address+4))
2113 : : address = PAGE_ALIGN(address+4);
2114 : : else {
2115 : : vma_unlock_anon_vma(vma);
2116 : : return -ENOMEM;
2117 : : }
2118 : : error = 0;
2119 : :
2120 : : /* Somebody else might have raced and expanded it already */
2121 : : if (address > vma->vm_end) {
2122 : : unsigned long size, grow;
2123 : :
2124 : : size = address - vma->vm_start;
2125 : : grow = (address - vma->vm_end) >> PAGE_SHIFT;
2126 : :
2127 : : error = -ENOMEM;
2128 : : if (vma->vm_pgoff + (size >> PAGE_SHIFT) >= vma->vm_pgoff) {
2129 : : error = acct_stack_growth(vma, size, grow);
2130 : : if (!error) {
2131 : : /*
2132 : : * vma_gap_update() doesn't support concurrent
2133 : : * updates, but we only hold a shared mmap_sem
2134 : : * lock here, so we need to protect against
2135 : : * concurrent vma expansions.
2136 : : * vma_lock_anon_vma() doesn't help here, as
2137 : : * we don't guarantee that all growable vmas
2138 : : * in a mm share the same root anon vma.
2139 : : * So, we reuse mm->page_table_lock to guard
2140 : : * against concurrent vma expansions.
2141 : : */
2142 : : spin_lock(&vma->vm_mm->page_table_lock);
2143 : : anon_vma_interval_tree_pre_update_vma(vma);
2144 : : vma->vm_end = address;
2145 : : anon_vma_interval_tree_post_update_vma(vma);
2146 : : if (vma->vm_next)
2147 : : vma_gap_update(vma->vm_next);
2148 : : else
2149 : : vma->vm_mm->highest_vm_end = address;
2150 : : spin_unlock(&vma->vm_mm->page_table_lock);
2151 : :
2152 : : perf_event_mmap(vma);
2153 : : }
2154 : : }
2155 : : }
2156 : : vma_unlock_anon_vma(vma);
2157 : : khugepaged_enter_vma_merge(vma);
2158 : : validate_mm(vma->vm_mm);
2159 : : return error;
2160 : : }
2161 : : #endif /* CONFIG_STACK_GROWSUP || CONFIG_IA64 */
2162 : :
2163 : : /*
2164 : : * vma is the first one with address < vma->vm_start. Have to extend vma.
2165 : : */
2166 : 0 : int expand_downwards(struct vm_area_struct *vma,
2167 : : unsigned long address)
2168 : : {
2169 : : int error;
2170 : :
2171 : : /*
2172 : : * We must make sure the anon_vma is allocated
2173 : : * so that the anon_vma locking is not a noop.
2174 : : */
2175 [ + - ]: 2036832 : if (unlikely(anon_vma_prepare(vma)))
2176 : : return -ENOMEM;
2177 : :
2178 : 2036833 : address &= PAGE_MASK;
2179 : 2036833 : error = security_mmap_addr(address);
2180 [ + ]: 2036823 : if (error)
2181 : : return error;
2182 : :
2183 : : vma_lock_anon_vma(vma);
2184 : :
2185 : : /*
2186 : : * vma->vm_start/vm_end cannot change under us because the caller
2187 : : * is required to hold the mmap_sem in read mode. We need the
2188 : : * anon_vma lock to serialize against concurrent expand_stacks.
2189 : : */
2190 : :
2191 : : /* Somebody else might have raced and expanded it already */
2192 [ + + ]: 2036862 : if (address < vma->vm_start) {
2193 : : unsigned long size, grow;
2194 : :
2195 : 2036830 : size = vma->vm_end - address;
2196 : 2036830 : grow = (vma->vm_start - address) >> PAGE_SHIFT;
2197 : :
2198 : : error = -ENOMEM;
2199 [ + + ]: 2036830 : if (grow <= vma->vm_pgoff) {
2200 : 2036821 : error = acct_stack_growth(vma, size, grow);
2201 [ + - ]: 2036823 : if (!error) {
2202 : : /*
2203 : : * vma_gap_update() doesn't support concurrent
2204 : : * updates, but we only hold a shared mmap_sem
2205 : : * lock here, so we need to protect against
2206 : : * concurrent vma expansions.
2207 : : * vma_lock_anon_vma() doesn't help here, as
2208 : : * we don't guarantee that all growable vmas
2209 : : * in a mm share the same root anon vma.
2210 : : * So, we reuse mm->page_table_lock to guard
2211 : : * against concurrent vma expansions.
2212 : : */
2213 : 2036823 : spin_lock(&vma->vm_mm->page_table_lock);
2214 : : anon_vma_interval_tree_pre_update_vma(vma);
2215 : 2036832 : vma->vm_start = address;
2216 : 2036832 : vma->vm_pgoff -= grow;
2217 : : anon_vma_interval_tree_post_update_vma(vma);
2218 : 2036799 : vma_gap_update(vma);
2219 : 2036813 : spin_unlock(&vma->vm_mm->page_table_lock);
2220 : :
2221 : 2036783 : perf_event_mmap(vma);
2222 : : }
2223 : : }
2224 : : }
2225 : : vma_unlock_anon_vma(vma);
2226 : : khugepaged_enter_vma_merge(vma);
2227 : : validate_mm(vma->vm_mm);
2228 : 2036820 : return error;
2229 : : }
2230 : :
2231 : : /*
2232 : : * Note how expand_stack() refuses to expand the stack all the way to
2233 : : * abut the next virtual mapping, *unless* that mapping itself is also
2234 : : * a stack mapping. We want to leave room for a guard page, after all
2235 : : * (the guard page itself is not added here, that is done by the
2236 : : * actual page faulting logic)
2237 : : *
2238 : : * This matches the behavior of the guard page logic (see mm/memory.c:
2239 : : * check_stack_guard_page()), which only allows the guard page to be
2240 : : * removed under these circumstances.
2241 : : */
2242 : : #ifdef CONFIG_STACK_GROWSUP
2243 : : int expand_stack(struct vm_area_struct *vma, unsigned long address)
2244 : : {
2245 : : struct vm_area_struct *next;
2246 : :
2247 : : address &= PAGE_MASK;
2248 : : next = vma->vm_next;
2249 : : if (next && next->vm_start == address + PAGE_SIZE) {
2250 : : if (!(next->vm_flags & VM_GROWSUP))
2251 : : return -ENOMEM;
2252 : : }
2253 : : return expand_upwards(vma, address);
2254 : : }
2255 : :
2256 : : struct vm_area_struct *
2257 : : find_extend_vma(struct mm_struct *mm, unsigned long addr)
2258 : : {
2259 : : struct vm_area_struct *vma, *prev;
2260 : :
2261 : : addr &= PAGE_MASK;
2262 : : vma = find_vma_prev(mm, addr, &prev);
2263 : : if (vma && (vma->vm_start <= addr))
2264 : : return vma;
2265 : : if (!prev || expand_stack(prev, addr))
2266 : : return NULL;
2267 : : if (prev->vm_flags & VM_LOCKED)
2268 : : __mlock_vma_pages_range(prev, addr, prev->vm_end, NULL);
2269 : : return prev;
2270 : : }
2271 : : #else
2272 : 0 : int expand_stack(struct vm_area_struct *vma, unsigned long address)
2273 : : {
2274 : : struct vm_area_struct *prev;
2275 : :
2276 : 59034 : address &= PAGE_MASK;
2277 : 59034 : prev = vma->vm_prev;
2278 [ + + ][ - + ]: 59034 : if (prev && prev->vm_end == address) {
2279 [ # # ]: 0 : if (!(prev->vm_flags & VM_GROWSDOWN))
2280 : : return -ENOMEM;
2281 : : }
2282 : 59034 : return expand_downwards(vma, address);
2283 : : }
2284 : :
2285 : : struct vm_area_struct *
2286 : 0 : find_extend_vma(struct mm_struct * mm, unsigned long addr)
2287 : : {
2288 : : struct vm_area_struct * vma;
2289 : : unsigned long start;
2290 : :
2291 : 2526743 : addr &= PAGE_MASK;
2292 : 2526743 : vma = find_vma(mm,addr);
2293 [ + + ]: 2526722 : if (!vma)
2294 : : return NULL;
2295 [ + + ]: 2526707 : if (vma->vm_start <= addr)
2296 : : return vma;
2297 [ - + ]: 6 : if (!(vma->vm_flags & VM_GROWSDOWN))
2298 : : return NULL;
2299 : : start = vma->vm_start;
2300 [ # # ]: 0 : if (expand_stack(vma, addr))
2301 : : return NULL;
2302 [ # # ]: 0 : if (vma->vm_flags & VM_LOCKED)
2303 : 0 : __mlock_vma_pages_range(vma, addr, start, NULL);
2304 : 0 : return vma;
2305 : : }
2306 : : #endif
2307 : :
2308 : : /*
2309 : : * Ok - we have the memory areas we should free on the vma list,
2310 : : * so release them, and do the vma updates.
2311 : : *
2312 : : * Called with the mm semaphore held.
2313 : : */
2314 : 0 : static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
2315 : : {
2316 : : unsigned long nr_accounted = 0;
2317 : :
2318 : : /* Update high watermark before we lower total_vm */
2319 : : update_hiwater_vm(mm);
2320 : : do {
2321 : 1193126 : long nrpages = vma_pages(vma);
2322 : :
2323 [ + + ]: 1193126 : if (vma->vm_flags & VM_ACCOUNT)
2324 : 617285 : nr_accounted += nrpages;
2325 : 1193126 : vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
2326 : 1193126 : vma = remove_vma(vma);
2327 [ + + ]: 2371895 : } while (vma);
2328 : 1178769 : vm_unacct_memory(nr_accounted);
2329 : : validate_mm(mm);
2330 : 1178854 : }
2331 : :
2332 : : /*
2333 : : * Get rid of page table information in the indicated region.
2334 : : *
2335 : : * Called with the mm semaphore held.
2336 : : */
2337 : 0 : static void unmap_region(struct mm_struct *mm,
2338 : : struct vm_area_struct *vma, struct vm_area_struct *prev,
2339 : : unsigned long start, unsigned long end)
2340 : : {
2341 [ + + ]: 1179282 : struct vm_area_struct *next = prev? prev->vm_next: mm->mmap;
2342 : : struct mmu_gather tlb;
2343 : :
2344 : 1179282 : lru_add_drain();
2345 : : tlb_gather_mmu(&tlb, mm, start, end);
2346 : : update_hiwater_rss(mm);
2347 : 1179264 : unmap_vmas(&tlb, vma, start, end);
2348 [ + + ][ + + ]: 1179303 : free_pgtables(&tlb, vma, prev ? prev->vm_end : FIRST_USER_ADDRESS,
2349 : : next ? next->vm_start : USER_PGTABLES_CEILING);
2350 : : tlb_finish_mmu(&tlb, start, end);
2351 : 1179299 : }
2352 : :
2353 : : /*
2354 : : * Create a list of vma's touched by the unmap, removing them from the mm's
2355 : : * vma list as we go..
2356 : : */
2357 : : static void
2358 : 0 : detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
2359 : : struct vm_area_struct *prev, unsigned long end)
2360 : : {
2361 : : struct vm_area_struct **insertion_point;
2362 : : struct vm_area_struct *tail_vma = NULL;
2363 : :
2364 [ + + ]: 1179304 : insertion_point = (prev ? &prev->vm_next : &mm->mmap);
2365 : 1179304 : vma->vm_prev = NULL;
2366 : : do {
2367 : 1193314 : vma_rb_erase(vma, &mm->mm_rb);
2368 : 2372620 : mm->map_count--;
2369 : : tail_vma = vma;
2370 : 2372620 : vma = vma->vm_next;
2371 [ + + ][ + + ]: 2372620 : } while (vma && vma->vm_start < end);
2372 : 1179306 : *insertion_point = vma;
2373 [ + + ]: 2358610 : if (vma) {
2374 : 1179305 : vma->vm_prev = prev;
2375 : 1179305 : vma_gap_update(vma);
2376 : : } else
2377 [ + - ]: 1 : mm->highest_vm_end = prev ? prev->vm_end : 0;
2378 : 1179300 : tail_vma->vm_next = NULL;
2379 : 1179300 : mm->mmap_cache = NULL; /* Kill the cache. */
2380 : 1179300 : }
2381 : :
2382 : : /*
2383 : : * __split_vma() bypasses sysctl_max_map_count checking. We use this on the
2384 : : * munmap path where it doesn't make sense to fail.
2385 : : */
2386 : 1544877 : static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
2387 : : unsigned long addr, int new_below)
2388 : : {
2389 : : struct vm_area_struct *new;
2390 : : int err = -ENOMEM;
2391 : :
2392 : : if (is_vm_hugetlb_page(vma) && (addr &
2393 : : ~(huge_page_mask(hstate_vma(vma)))))
2394 : : return -EINVAL;
2395 : :
2396 : 1544877 : new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2397 [ + + ]: 1544869 : if (!new)
2398 : : goto out_err;
2399 : :
2400 : : /* most fields are the same, copy all, and then fixup */
2401 : 1544848 : *new = *vma;
2402 : :
2403 : 1544848 : INIT_LIST_HEAD(&new->anon_vma_chain);
2404 : :
2405 [ + + ]: 1544848 : if (new_below)
2406 : 462181 : new->vm_end = addr;
2407 : : else {
2408 : 1082667 : new->vm_start = addr;
2409 : 1082667 : new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
2410 : : }
2411 : :
2412 : : err = vma_dup_policy(vma, new);
2413 : : if (err)
2414 : : goto out_free_vma;
2415 : :
2416 [ + ]: 1544848 : if (anon_vma_clone(new, vma))
2417 : : goto out_free_mpol;
2418 : :
2419 [ + + ]: 1544823 : if (new->vm_file)
2420 : : get_file(new->vm_file);
2421 : :
2422 [ + + ][ - + ]: 3089753 : if (new->vm_ops && new->vm_ops->open)
2423 : 0 : new->vm_ops->open(new);
2424 : :
2425 [ + + ]: 1544876 : if (new_below)
2426 : 462195 : err = vma_adjust(vma, addr, vma->vm_end, vma->vm_pgoff +
2427 : 462195 : ((addr - new->vm_start) >> PAGE_SHIFT), new);
2428 : : else
2429 : 1082681 : err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
2430 : :
2431 : : /* Success. */
2432 [ - + ]: 1544897 : if (!err)
2433 : : return 0;
2434 : :
2435 : : /* Clean everything up if vma_adjust failed. */
2436 [ # # ][ # # ]: 0 : if (new->vm_ops && new->vm_ops->close)
2437 : 0 : new->vm_ops->close(new);
2438 [ # # ]: 0 : if (new->vm_file)
2439 : 0 : fput(new->vm_file);
2440 : 0 : unlink_anon_vmas(new);
2441 : : out_free_mpol:
2442 : : mpol_put(vma_policy(new));
2443 : : out_free_vma:
2444 : 11 : kmem_cache_free(vm_area_cachep, new);
2445 : : out_err:
2446 : : return err;
2447 : : }
2448 : :
2449 : : /*
2450 : : * Split a vma into two pieces at address 'addr', a new vma is allocated
2451 : : * either for the first part or the tail.
2452 : : */
2453 : 0 : int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
2454 : : unsigned long addr, int new_below)
2455 : : {
2456 [ + ]: 840432 : if (mm->map_count >= sysctl_max_map_count)
2457 : : return -ENOMEM;
2458 : :
2459 : 840436 : return __split_vma(mm, vma, addr, new_below);
2460 : : }
2461 : :
2462 : : /* Munmap is split into 2 main parts -- this part which finds
2463 : : * what needs doing, and the areas themselves, which do the
2464 : : * work. This now handles partial unmappings.
2465 : : * Jeremy Fitzhardinge <jeremy@goop.org>
2466 : : */
2467 : 0 : int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
2468 : : {
2469 : : unsigned long end;
2470 : : struct vm_area_struct *vma, *prev, *last;
2471 : :
2472 [ + + ][ + + ]: 1179313 : if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
[ + ]
2473 : : return -EINVAL;
2474 : :
2475 [ + + ]: 1179272 : if ((len = PAGE_ALIGN(len)) == 0)
2476 : : return -EINVAL;
2477 : :
2478 : : /* Find the first overlapping VMA */
2479 : 1179163 : vma = find_vma(mm, start);
2480 [ + ]: 1179246 : if (!vma)
2481 : : return 0;
2482 : 1179255 : prev = vma->vm_prev;
2483 : : /* we have start < vma->vm_end */
2484 : :
2485 : : /* if it doesn't overlap, we have nothing.. */
2486 : 1179255 : end = start + len;
2487 [ + ]: 1179255 : if (vma->vm_start >= end)
2488 : : return 0;
2489 : :
2490 : : /*
2491 : : * If we need to split any vma, do it now to save pain later.
2492 : : *
2493 : : * Note: mremap's move_vma VM_ACCOUNT handling assumes a partially
2494 : : * unmapped vm_area_struct will remain in use: so lower split_vma
2495 : : * places tmp vma above, and higher split_vma places tmp vma below.
2496 : : */
2497 [ + + ]: 1179311 : if (start > vma->vm_start) {
2498 : : int error;
2499 : :
2500 : : /*
2501 : : * Make sure that map_count on return from munmap() will
2502 : : * not exceed its limit; but let map_count go just above
2503 : : * its limit temporarily, to help free resources as expected.
2504 : : */
2505 [ + + ][ + - ]: 482311 : if (end < vma->vm_end && mm->map_count >= sysctl_max_map_count)
2506 : : return -ENOMEM;
2507 : :
2508 : 482311 : error = __split_vma(mm, vma, start, 0);
2509 [ + - ]: 482311 : if (error)
2510 : : return error;
2511 : : prev = vma;
2512 : : }
2513 : :
2514 : : /* Does it split the last one? */
2515 : 1179311 : last = find_vma(mm, end);
2516 [ + ][ + + ]: 1179281 : if (last && end > last->vm_start) {
2517 : 222145 : int error = __split_vma(mm, last, end, 1);
2518 [ + + ]: 222144 : if (error)
2519 : : return error;
2520 : : }
2521 [ + + ]: 1179277 : vma = prev? prev->vm_next: mm->mmap;
2522 : :
2523 : : /*
2524 : : * unlock any mlock()ed ranges before detaching vmas
2525 : : */
2526 [ + + ]: 1179277 : if (mm->locked_vm) {
2527 : 193 : struct vm_area_struct *tmp = vma;
2528 [ + - ][ + + ]: 388 : while (tmp && tmp->vm_start < end) {
2529 [ + + ]: 194 : if (tmp->vm_flags & VM_LOCKED) {
2530 : 386 : mm->locked_vm -= vma_pages(tmp);
2531 : : munlock_vma_pages_all(tmp);
2532 : : }
2533 : 194 : tmp = tmp->vm_next;
2534 : : }
2535 : : }
2536 : :
2537 : : /*
2538 : : * Remove the vma's, and unmap the actual pages
2539 : : */
2540 : 1179277 : detach_vmas_to_be_unmapped(mm, vma, prev, end);
2541 : 1179283 : unmap_region(mm, vma, prev, start, end);
2542 : :
2543 : : /* Fix up all other VM information */
2544 : 1179295 : remove_vma_list(mm, vma);
2545 : :
2546 : 1178950 : return 0;
2547 : : }
2548 : :
2549 : 0 : int vm_munmap(unsigned long start, size_t len)
2550 : : {
2551 : : int ret;
2552 : 532826 : struct mm_struct *mm = current->mm;
2553 : :
2554 : 532826 : down_write(&mm->mmap_sem);
2555 : 532608 : ret = do_munmap(mm, start, len);
2556 : 532726 : up_write(&mm->mmap_sem);
2557 : 532593 : return ret;
2558 : : }
2559 : : EXPORT_SYMBOL(vm_munmap);
2560 : :
2561 : 0 : SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
2562 : : {
2563 : 473884 : profile_munmap(addr);
2564 : 473844 : return vm_munmap(addr, len);
2565 : : }
2566 : :
2567 : : static inline void verify_mm_writelocked(struct mm_struct *mm)
2568 : : {
2569 : : #ifdef CONFIG_DEBUG_VM
2570 : : if (unlikely(down_read_trylock(&mm->mmap_sem))) {
2571 : : WARN_ON(1);
2572 : : up_read(&mm->mmap_sem);
2573 : : }
2574 : : #endif
2575 : : }
2576 : :
2577 : : /*
2578 : : * this is really a simplified "do_mmap". it only handles
2579 : : * anonymous maps. eventually we may be able to do some
2580 : : * brk-specific accounting here.
2581 : : */
2582 : 0 : static unsigned long do_brk(unsigned long addr, unsigned long len)
2583 : : {
2584 : 235199 : struct mm_struct * mm = current->mm;
2585 : : struct vm_area_struct * vma, * prev;
2586 : : unsigned long flags;
2587 : : struct rb_node ** rb_link, * rb_parent;
2588 : 235199 : pgoff_t pgoff = addr >> PAGE_SHIFT;
2589 : : int error;
2590 : :
2591 : 235199 : len = PAGE_ALIGN(len);
2592 [ + + ]: 235199 : if (!len)
2593 : : return addr;
2594 : :
2595 [ + - ]: 176225 : flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2596 : :
2597 : 176225 : error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
2598 [ + - ]: 176225 : if (error & ~PAGE_MASK)
2599 : : return error;
2600 : :
2601 : : /*
2602 : : * mlock MCL_FUTURE?
2603 : : */
2604 [ + + ]: 176225 : if (mm->def_flags & VM_LOCKED) {
2605 : : unsigned long locked, lock_limit;
2606 : 1 : locked = len >> PAGE_SHIFT;
2607 : 1 : locked += mm->locked_vm;
2608 : : lock_limit = rlimit(RLIMIT_MEMLOCK);
2609 : 1 : lock_limit >>= PAGE_SHIFT;
2610 [ + - ][ + ]: 176225 : if (locked > lock_limit && !capable(CAP_IPC_LOCK))
2611 : : return -EAGAIN;
2612 : : }
2613 : :
2614 : : /*
2615 : : * mm->mmap_sem is required to protect against another thread
2616 : : * changing the mappings in case we sleep.
2617 : : */
2618 : : verify_mm_writelocked(mm);
2619 : :
2620 : : /*
2621 : : * Clear old maps. this also does some error checking for us
2622 : : */
2623 : : munmap_back:
2624 [ - + ]: 176222 : if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
2625 [ # # ]: 0 : if (do_munmap(mm, addr, len))
2626 : : return -ENOMEM;
2627 : : goto munmap_back;
2628 : : }
2629 : :
2630 : : /* Check against address space limits *after* clearing old maps... */
2631 [ + + ]: 176222 : if (!may_expand_vm(mm, len >> PAGE_SHIFT))
2632 : : return -ENOMEM;
2633 : :
2634 [ + + ]: 176221 : if (mm->map_count > sysctl_max_map_count)
2635 : : return -ENOMEM;
2636 : :
2637 [ + + ]: 176213 : if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
2638 : : return -ENOMEM;
2639 : :
2640 : : /* Can we just expand an old private anonymous mapping? */
2641 : 176218 : vma = vma_merge(mm, prev, addr, addr + len, flags,
2642 : : NULL, NULL, pgoff, NULL, NULL);
2643 [ + + ]: 176175 : if (vma)
2644 : : goto out;
2645 : :
2646 : : /*
2647 : : * create a vma struct for an anonymous mapping
2648 : : */
2649 : 90377 : vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2650 [ - + ]: 90424 : if (!vma) {
2651 : : vm_unacct_memory(len >> PAGE_SHIFT);
2652 : 0 : return -ENOMEM;
2653 : : }
2654 : :
2655 : 90424 : INIT_LIST_HEAD(&vma->anon_vma_chain);
2656 : 90424 : vma->vm_mm = mm;
2657 : 90424 : vma->vm_start = addr;
2658 : 90424 : vma->vm_end = addr + len;
2659 : 90424 : vma->vm_pgoff = pgoff;
2660 : 90424 : vma->vm_flags = flags;
2661 : 90424 : vma->vm_page_prot = vm_get_page_prot(flags);
2662 : 90424 : vma_link(mm, vma, prev, rb_link, rb_parent);
2663 : : out:
2664 : 176221 : perf_event_mmap(vma);
2665 : 176223 : mm->total_vm += len >> PAGE_SHIFT;
2666 [ + + ]: 176223 : if (flags & VM_LOCKED)
2667 : 1 : mm->locked_vm += (len >> PAGE_SHIFT);
2668 : : vma->vm_flags |= VM_SOFTDIRTY;
2669 : 176223 : return addr;
2670 : : }
2671 : :
2672 : 0 : unsigned long vm_brk(unsigned long addr, unsigned long len)
2673 : : {
2674 : 92239 : struct mm_struct *mm = current->mm;
2675 : : unsigned long ret;
2676 : : bool populate;
2677 : :
2678 : 92239 : down_write(&mm->mmap_sem);
2679 : 92236 : ret = do_brk(addr, len);
2680 : 92237 : populate = ((mm->def_flags & VM_LOCKED) != 0);
2681 : 92237 : up_write(&mm->mmap_sem);
2682 [ - + ]: 92237 : if (populate)
2683 : : mm_populate(addr, len);
2684 : 0 : return ret;
2685 : : }
2686 : : EXPORT_SYMBOL(vm_brk);
2687 : :
2688 : : /* Release all mmaps. */
2689 : 0 : void exit_mmap(struct mm_struct *mm)
2690 : : {
2691 : : struct mmu_gather tlb;
2692 : 13770439 : struct vm_area_struct *vma;
2693 : : unsigned long nr_accounted = 0;
2694 : :
2695 : : /* mm's last user has gone, and its about to be pulled down */
2696 : : mmu_notifier_release(mm);
2697 : :
2698 [ + + ]: 1196458 : if (mm->locked_vm) {
2699 : 4 : vma = mm->mmap;
2700 [ + + ]: 78 : while (vma) {
2701 [ + + ]: 74 : if (vma->vm_flags & VM_LOCKED)
2702 : : munlock_vma_pages_all(vma);
2703 : 74 : vma = vma->vm_next;
2704 : : }
2705 : : }
2706 : :
2707 : : arch_exit_mmap(mm);
2708 : :
2709 : 1196458 : vma = mm->mmap;
2710 [ + - ]: 1196458 : if (!vma) /* Can happen if dup_mmap() received an OOM */
2711 : 0 : return;
2712 : :
2713 : 1196458 : lru_add_drain();
2714 : 1196459 : flush_cache_mm(mm);
2715 : : tlb_gather_mmu(&tlb, mm, 0, -1);
2716 : : /* update_hiwater_rss(mm) here? but nobody should be looking */
2717 : : /* Use -1 here to ensure all VMAs in the mm are unmapped */
2718 : 1196458 : unmap_vmas(&tlb, vma, 0, -1);
2719 : :
2720 : 1196459 : free_pgtables(&tlb, vma, FIRST_USER_ADDRESS, USER_PGTABLES_CEILING);
2721 : : tlb_finish_mmu(&tlb, 0, -1);
2722 : :
2723 : : /*
2724 : : * Walk the list again, actually closing and freeing it,
2725 : : * with preemption enabled, without holding any MM locks.
2726 : : */
2727 [ + + ]: 23299897 : while (vma) {
2728 [ + + ]: 22103438 : if (vma->vm_flags & VM_ACCOUNT)
2729 : 13770439 : nr_accounted += vma_pages(vma);
2730 : 22103438 : vma = remove_vma(vma);
2731 : : }
2732 : 1196459 : vm_unacct_memory(nr_accounted);
2733 : :
2734 [ - + ]: 1196459 : WARN_ON(atomic_long_read(&mm->nr_ptes) >
2735 : : (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT);
2736 : : }
2737 : :
2738 : : /* Insert vm structure into process list sorted by address
2739 : : * and into the inode's i_mmap tree. If vm_file is non-NULL
2740 : : * then i_mmap_mutex is taken here.
2741 : : */
2742 : 0 : int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
2743 : : {
2744 : : struct vm_area_struct *prev;
2745 : : struct rb_node **rb_link, *rb_parent;
2746 : :
2747 : : /*
2748 : : * The vm_pgoff of a purely anonymous vma should be irrelevant
2749 : : * until its first write fault, when page's anon_vma and index
2750 : : * are set. But now set the vm_pgoff it will almost certainly
2751 : : * end up with (unless mremap moves it elsewhere before that
2752 : : * first wfault), so /proc/pid/maps tells a consistent story.
2753 : : *
2754 : : * By setting it to reflect the virtual start address of the
2755 : : * vma, merges and splits can happen in a seamless way, just
2756 : : * using the existing file pgoff checks and manipulations.
2757 : : * Similarly in do_mmap_pgoff and in do_brk.
2758 : : */
2759 [ + - ]: 117960 : if (!vma->vm_file) {
2760 [ - + ]: 117960 : BUG_ON(vma->anon_vma);
2761 : 117960 : vma->vm_pgoff = vma->vm_start >> PAGE_SHIFT;
2762 : : }
2763 [ + - ]: 117960 : if (find_vma_links(mm, vma->vm_start, vma->vm_end,
2764 : : &prev, &rb_link, &rb_parent))
2765 : : return -ENOMEM;
2766 [ + + + - ]: 176916 : if ((vma->vm_flags & VM_ACCOUNT) &&
2767 : 58982 : security_vm_enough_memory_mm(mm, vma_pages(vma)))
2768 : : return -ENOMEM;
2769 : :
2770 : 117934 : vma_link(mm, vma, prev, rb_link, rb_parent);
2771 : 117904 : return 0;
2772 : : }
2773 : :
2774 : : /*
2775 : : * Copy the vma structure to a new location in the same mm,
2776 : : * prior to moving page table entries, to effect an mremap move.
2777 : : */
2778 : 0 : struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
2779 : : unsigned long addr, unsigned long len, pgoff_t pgoff,
2780 : : bool *need_rmap_locks)
2781 : : {
2782 : 20 : struct vm_area_struct *vma = *vmap;
2783 : 10 : unsigned long vma_start = vma->vm_start;
2784 : 10 : struct mm_struct *mm = vma->vm_mm;
2785 : : struct vm_area_struct *new_vma, *prev;
2786 : : struct rb_node **rb_link, *rb_parent;
2787 : : bool faulted_in_anon_vma = true;
2788 : :
2789 : : /*
2790 : : * If anonymous vma has not yet been faulted, update new pgoff
2791 : : * to match new location, to increase its chance of merging.
2792 : : */
2793 [ + + ][ - + ]: 10 : if (unlikely(!vma->vm_file && !vma->anon_vma)) {
2794 : 0 : pgoff = addr >> PAGE_SHIFT;
2795 : : faulted_in_anon_vma = false;
2796 : : }
2797 : :
2798 [ + - ]: 10 : if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent))
2799 : : return NULL; /* should never get here */
2800 : 10 : new_vma = vma_merge(mm, prev, addr, addr + len, vma->vm_flags,
2801 : : vma->anon_vma, vma->vm_file, pgoff, vma_policy(vma),
2802 : : vma_get_anon_name(vma));
2803 [ - + ]: 20 : if (new_vma) {
2804 : : /*
2805 : : * Source vma may have been merged into new_vma
2806 : : */
2807 [ # # ][ # # ]: 0 : if (unlikely(vma_start >= new_vma->vm_start &&
2808 : : vma_start < new_vma->vm_end)) {
2809 : : /*
2810 : : * The only way we can get a vma_merge with
2811 : : * self during an mremap is if the vma hasn't
2812 : : * been faulted in yet and we were allowed to
2813 : : * reset the dst vma->vm_pgoff to the
2814 : : * destination address of the mremap to allow
2815 : : * the merge to happen. mremap must change the
2816 : : * vm_pgoff linearity between src and dst vmas
2817 : : * (in turn preventing a vma_merge) to be
2818 : : * safe. It is only safe to keep the vm_pgoff
2819 : : * linear if there are no pages mapped yet.
2820 : : */
2821 : : VM_BUG_ON(faulted_in_anon_vma);
2822 : 0 : *vmap = vma = new_vma;
2823 : : }
2824 : 0 : *need_rmap_locks = (new_vma->vm_pgoff <= vma->vm_pgoff);
2825 : : } else {
2826 : 10 : new_vma = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
2827 [ + - ]: 10 : if (new_vma) {
2828 : 10 : *new_vma = *vma;
2829 : 10 : new_vma->vm_start = addr;
2830 : 10 : new_vma->vm_end = addr + len;
2831 : 10 : new_vma->vm_pgoff = pgoff;
2832 : : if (vma_dup_policy(vma, new_vma))
2833 : : goto out_free_vma;
2834 : 10 : INIT_LIST_HEAD(&new_vma->anon_vma_chain);
2835 [ + - ]: 10 : if (anon_vma_clone(new_vma, vma))
2836 : : goto out_free_mempol;
2837 [ + + ]: 10 : if (new_vma->vm_file)
2838 : : get_file(new_vma->vm_file);
2839 [ + + ][ - + ]: 10 : if (new_vma->vm_ops && new_vma->vm_ops->open)
2840 : 0 : new_vma->vm_ops->open(new_vma);
2841 : 10 : vma_link(mm, new_vma, prev, rb_link, rb_parent);
2842 : 10 : *need_rmap_locks = false;
2843 : : }
2844 : : }
2845 : 10 : return new_vma;
2846 : :
2847 : : out_free_mempol:
2848 : : mpol_put(vma_policy(new_vma));
2849 : : out_free_vma:
2850 : 0 : kmem_cache_free(vm_area_cachep, new_vma);
2851 : 0 : return NULL;
2852 : : }
2853 : :
2854 : : /*
2855 : : * Return true if the calling process may expand its vm space by the passed
2856 : : * number of pages
2857 : : */
2858 : 0 : int may_expand_vm(struct mm_struct *mm, unsigned long npages)
2859 : : {
2860 : 5073587 : unsigned long cur = mm->total_vm; /* pages */
2861 : : unsigned long lim;
2862 : :
2863 : 5073587 : lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
2864 : :
2865 [ + - ][ + + ]: 5073587 : if (cur + npages > lim)
[ + + ][ + ]
[ # # ]
2866 : : return 0;
2867 : 20 : return 1;
2868 : : }
2869 : :
2870 : :
2871 : 0 : static int special_mapping_fault(struct vm_area_struct *vma,
2872 : : struct vm_fault *vmf)
2873 : : {
2874 : : pgoff_t pgoff;
2875 : : struct page **pages;
2876 : :
2877 : : /*
2878 : : * special mappings have no vm_file, and in that case, the mm
2879 : : * uses vm_pgoff internally. So we have to subtract it from here.
2880 : : * We are allowed to do this because we are the mm; do not copy
2881 : : * this code into drivers!
2882 : : */
2883 : 24 : pgoff = vmf->pgoff - vma->vm_pgoff;
2884 : :
2885 [ - + ][ # # ]: 24 : for (pages = vma->vm_private_data; pgoff && *pages; ++pages)
2886 : 0 : pgoff--;
2887 : :
2888 [ + - ]: 24 : if (*pages) {
2889 : : struct page *page = *pages;
2890 : : get_page(page);
2891 : 24 : vmf->page = page;
2892 : 24 : return 0;
2893 : : }
2894 : :
2895 : : return VM_FAULT_SIGBUS;
2896 : : }
2897 : :
2898 : : /*
2899 : : * Having a close hook prevents vma merging regardless of flags.
2900 : : */
2901 : 0 : static void special_mapping_close(struct vm_area_struct *vma)
2902 : : {
2903 : 1196451 : }
2904 : :
2905 : : static const struct vm_operations_struct special_mapping_vmops = {
2906 : : .close = special_mapping_close,
2907 : : .fault = special_mapping_fault,
2908 : : };
2909 : :
2910 : : /*
2911 : : * Called with mm->mmap_sem held for writing.
2912 : : * Insert a new vma covering the given region, with the given flags.
2913 : : * Its pages are supplied by the given array of struct page *.
2914 : : * The array can be shorter than len >> PAGE_SHIFT if it's null-terminated.
2915 : : * The region past the last page supplied will always produce SIGBUS.
2916 : : * The array pointer and the pages it points to are assumed to stay alive
2917 : : * for as long as this mapping might exist.
2918 : : */
2919 : 0 : int install_special_mapping(struct mm_struct *mm,
2920 : : unsigned long addr, unsigned long len,
2921 : : unsigned long vm_flags, struct page **pages)
2922 : : {
2923 : : int ret;
2924 : : struct vm_area_struct *vma;
2925 : :
2926 : 58976 : vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
2927 [ + - ]: 58976 : if (unlikely(vma == NULL))
2928 : : return -ENOMEM;
2929 : :
2930 : 58976 : INIT_LIST_HEAD(&vma->anon_vma_chain);
2931 : 58976 : vma->vm_mm = mm;
2932 : 58976 : vma->vm_start = addr;
2933 : 58976 : vma->vm_end = addr + len;
2934 : :
2935 : 58976 : vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND | VM_SOFTDIRTY;
2936 : 58976 : vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
2937 : :
2938 : 58976 : vma->vm_ops = &special_mapping_vmops;
2939 : 58976 : vma->vm_private_data = pages;
2940 : :
2941 : 58976 : ret = insert_vm_struct(mm, vma);
2942 [ + - ]: 58976 : if (ret)
2943 : : goto out;
2944 : :
2945 : 58976 : mm->total_vm += len >> PAGE_SHIFT;
2946 : :
2947 : 58976 : perf_event_mmap(vma);
2948 : :
2949 : 58976 : return 0;
2950 : :
2951 : : out:
2952 : 0 : kmem_cache_free(vm_area_cachep, vma);
2953 : 0 : return ret;
2954 : : }
2955 : :
2956 : : static DEFINE_MUTEX(mm_all_locks_mutex);
2957 : :
2958 : 0 : static void vm_lock_anon_vma(struct mm_struct *mm, struct anon_vma *anon_vma)
2959 : : {
2960 [ # # ]: 0 : if (!test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
2961 : : /*
2962 : : * The LSB of head.next can't change from under us
2963 : : * because we hold the mm_all_locks_mutex.
2964 : : */
2965 : 0 : down_write_nest_lock(&anon_vma->root->rwsem, &mm->mmap_sem);
2966 : : /*
2967 : : * We can safely modify head.next after taking the
2968 : : * anon_vma->root->rwsem. If some other vma in this mm shares
2969 : : * the same anon_vma we won't take it again.
2970 : : *
2971 : : * No need of atomic instructions here, head.next
2972 : : * can't change from under us thanks to the
2973 : : * anon_vma->root->rwsem.
2974 : : */
2975 [ # # ]: 0 : if (__test_and_set_bit(0, (unsigned long *)
2976 : 0 : &anon_vma->root->rb_root.rb_node))
2977 : 0 : BUG();
2978 : : }
2979 : 0 : }
2980 : :
2981 : 0 : static void vm_lock_mapping(struct mm_struct *mm, struct address_space *mapping)
2982 : : {
2983 [ # # ]: 0 : if (!test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
2984 : : /*
2985 : : * AS_MM_ALL_LOCKS can't change from under us because
2986 : : * we hold the mm_all_locks_mutex.
2987 : : *
2988 : : * Operations on ->flags have to be atomic because
2989 : : * even if AS_MM_ALL_LOCKS is stable thanks to the
2990 : : * mm_all_locks_mutex, there may be other cpus
2991 : : * changing other bitflags in parallel to us.
2992 : : */
2993 [ # # ]: 0 : if (test_and_set_bit(AS_MM_ALL_LOCKS, &mapping->flags))
2994 : 0 : BUG();
2995 : 0 : mutex_lock_nest_lock(&mapping->i_mmap_mutex, &mm->mmap_sem);
2996 : : }
2997 : 0 : }
2998 : :
2999 : : /*
3000 : : * This operation locks against the VM for all pte/vma/mm related
3001 : : * operations that could ever happen on a certain mm. This includes
3002 : : * vmtruncate, try_to_unmap, and all page faults.
3003 : : *
3004 : : * The caller must take the mmap_sem in write mode before calling
3005 : : * mm_take_all_locks(). The caller isn't allowed to release the
3006 : : * mmap_sem until mm_drop_all_locks() returns.
3007 : : *
3008 : : * mmap_sem in write mode is required in order to block all operations
3009 : : * that could modify pagetables and free pages without need of
3010 : : * altering the vma layout (for example populate_range() with
3011 : : * nonlinear vmas). It's also needed in write mode to avoid new
3012 : : * anon_vmas to be associated with existing vmas.
3013 : : *
3014 : : * A single task can't take more than one mm_take_all_locks() in a row
3015 : : * or it would deadlock.
3016 : : *
3017 : : * The LSB in anon_vma->rb_root.rb_node and the AS_MM_ALL_LOCKS bitflag in
3018 : : * mapping->flags avoid to take the same lock twice, if more than one
3019 : : * vma in this mm is backed by the same anon_vma or address_space.
3020 : : *
3021 : : * We can take all the locks in random order because the VM code
3022 : : * taking i_mmap_mutex or anon_vma->rwsem outside the mmap_sem never
3023 : : * takes more than one of them in a row. Secondly we're protected
3024 : : * against a concurrent mm_take_all_locks() by the mm_all_locks_mutex.
3025 : : *
3026 : : * mm_take_all_locks() and mm_drop_all_locks are expensive operations
3027 : : * that may have to take thousand of locks.
3028 : : *
3029 : : * mm_take_all_locks() can fail if it's interrupted by signals.
3030 : : */
3031 : 0 : int mm_take_all_locks(struct mm_struct *mm)
3032 : : {
3033 : : struct vm_area_struct *vma;
3034 : : struct anon_vma_chain *avc;
3035 : :
3036 [ # # ]: 0 : BUG_ON(down_read_trylock(&mm->mmap_sem));
3037 : :
3038 : 0 : mutex_lock(&mm_all_locks_mutex);
3039 : :
3040 [ # # ]: 0 : for (vma = mm->mmap; vma; vma = vma->vm_next) {
3041 [ # # ]: 0 : if (signal_pending(current))
3042 : : goto out_unlock;
3043 [ # # ][ # # ]: 0 : if (vma->vm_file && vma->vm_file->f_mapping)
3044 : 0 : vm_lock_mapping(mm, vma->vm_file->f_mapping);
3045 : : }
3046 : :
3047 [ # # ]: 0 : for (vma = mm->mmap; vma; vma = vma->vm_next) {
3048 [ # # ]: 0 : if (signal_pending(current))
3049 : : goto out_unlock;
3050 [ # # ]: 0 : if (vma->anon_vma)
3051 [ # # ]: 0 : list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3052 : 0 : vm_lock_anon_vma(mm, avc->anon_vma);
3053 : : }
3054 : :
3055 : : return 0;
3056 : :
3057 : : out_unlock:
3058 : 0 : mm_drop_all_locks(mm);
3059 : 0 : return -EINTR;
3060 : : }
3061 : :
3062 : 0 : static void vm_unlock_anon_vma(struct anon_vma *anon_vma)
3063 : : {
3064 [ # # ]: 0 : if (test_bit(0, (unsigned long *) &anon_vma->root->rb_root.rb_node)) {
3065 : : /*
3066 : : * The LSB of head.next can't change to 0 from under
3067 : : * us because we hold the mm_all_locks_mutex.
3068 : : *
3069 : : * We must however clear the bitflag before unlocking
3070 : : * the vma so the users using the anon_vma->rb_root will
3071 : : * never see our bitflag.
3072 : : *
3073 : : * No need of atomic instructions here, head.next
3074 : : * can't change from under us until we release the
3075 : : * anon_vma->root->rwsem.
3076 : : */
3077 [ # # ]: 0 : if (!__test_and_clear_bit(0, (unsigned long *)
3078 : : &anon_vma->root->rb_root.rb_node))
3079 : 0 : BUG();
3080 : : anon_vma_unlock_write(anon_vma);
3081 : : }
3082 : 0 : }
3083 : :
3084 : 0 : static void vm_unlock_mapping(struct address_space *mapping)
3085 : : {
3086 [ # # ]: 0 : if (test_bit(AS_MM_ALL_LOCKS, &mapping->flags)) {
3087 : : /*
3088 : : * AS_MM_ALL_LOCKS can't change to 0 from under us
3089 : : * because we hold the mm_all_locks_mutex.
3090 : : */
3091 : 0 : mutex_unlock(&mapping->i_mmap_mutex);
3092 [ # # ]: 0 : if (!test_and_clear_bit(AS_MM_ALL_LOCKS,
3093 : : &mapping->flags))
3094 : 0 : BUG();
3095 : : }
3096 : 0 : }
3097 : :
3098 : : /*
3099 : : * The mmap_sem cannot be released by the caller until
3100 : : * mm_drop_all_locks() returns.
3101 : : */
3102 : 0 : void mm_drop_all_locks(struct mm_struct *mm)
3103 : : {
3104 : : struct vm_area_struct *vma;
3105 : : struct anon_vma_chain *avc;
3106 : :
3107 [ # # ]: 0 : BUG_ON(down_read_trylock(&mm->mmap_sem));
3108 [ # # ]: 0 : BUG_ON(!mutex_is_locked(&mm_all_locks_mutex));
3109 : :
3110 [ # # ]: 0 : for (vma = mm->mmap; vma; vma = vma->vm_next) {
3111 [ # # ]: 0 : if (vma->anon_vma)
3112 [ # # ]: 0 : list_for_each_entry(avc, &vma->anon_vma_chain, same_vma)
3113 : 0 : vm_unlock_anon_vma(avc->anon_vma);
3114 [ # # ][ # # ]: 0 : if (vma->vm_file && vma->vm_file->f_mapping)
3115 : 0 : vm_unlock_mapping(vma->vm_file->f_mapping);
3116 : : }
3117 : :
3118 : 0 : mutex_unlock(&mm_all_locks_mutex);
3119 : 0 : }
3120 : :
3121 : : /*
3122 : : * initialise the VMA slab
3123 : : */
3124 : 0 : void __init mmap_init(void)
3125 : : {
3126 : : int ret;
3127 : :
3128 : 0 : ret = percpu_counter_init(&vm_committed_as, 0);
3129 : : VM_BUG_ON(ret);
3130 : 0 : }
3131 : :
3132 : : /*
3133 : : * Initialise sysctl_user_reserve_kbytes.
3134 : : *
3135 : : * This is intended to prevent a user from starting a single memory hogging
3136 : : * process, such that they cannot recover (kill the hog) in OVERCOMMIT_NEVER
3137 : : * mode.
3138 : : *
3139 : : * The default value is min(3% of free memory, 128MB)
3140 : : * 128MB is enough to recover with sshd/login, bash, and top/kill.
3141 : : */
3142 : 0 : static int init_user_reserve(void)
3143 : : {
3144 : : unsigned long free_kbytes;
3145 : :
3146 : 0 : free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3147 : :
3148 : 0 : sysctl_user_reserve_kbytes = min(free_kbytes / 32, 1UL << 17);
3149 : 0 : return 0;
3150 : : }
3151 : : module_init(init_user_reserve)
3152 : :
3153 : : /*
3154 : : * Initialise sysctl_admin_reserve_kbytes.
3155 : : *
3156 : : * The purpose of sysctl_admin_reserve_kbytes is to allow the sys admin
3157 : : * to log in and kill a memory hogging process.
3158 : : *
3159 : : * Systems with more than 256MB will reserve 8MB, enough to recover
3160 : : * with sshd, bash, and top in OVERCOMMIT_GUESS. Smaller systems will
3161 : : * only reserve 3% of free pages by default.
3162 : : */
3163 : 0 : static int init_admin_reserve(void)
3164 : : {
3165 : : unsigned long free_kbytes;
3166 : :
3167 : 0 : free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3168 : :
3169 : 0 : sysctl_admin_reserve_kbytes = min(free_kbytes / 32, 1UL << 13);
3170 : 0 : return 0;
3171 : : }
3172 : : module_init(init_admin_reserve)
3173 : :
3174 : : /*
3175 : : * Reinititalise user and admin reserves if memory is added or removed.
3176 : : *
3177 : : * The default user reserve max is 128MB, and the default max for the
3178 : : * admin reserve is 8MB. These are usually, but not always, enough to
3179 : : * enable recovery from a memory hogging process using login/sshd, a shell,
3180 : : * and tools like top. It may make sense to increase or even disable the
3181 : : * reserve depending on the existence of swap or variations in the recovery
3182 : : * tools. So, the admin may have changed them.
3183 : : *
3184 : : * If memory is added and the reserves have been eliminated or increased above
3185 : : * the default max, then we'll trust the admin.
3186 : : *
3187 : : * If memory is removed and there isn't enough free memory, then we
3188 : : * need to reset the reserves.
3189 : : *
3190 : : * Otherwise keep the reserve set by the admin.
3191 : : */
3192 : : static int reserve_mem_notifier(struct notifier_block *nb,
3193 : : unsigned long action, void *data)
3194 : : {
3195 : : unsigned long tmp, free_kbytes;
3196 : :
3197 : : switch (action) {
3198 : : case MEM_ONLINE:
3199 : : /* Default max is 128MB. Leave alone if modified by operator. */
3200 : : tmp = sysctl_user_reserve_kbytes;
3201 : : if (0 < tmp && tmp < (1UL << 17))
3202 : : init_user_reserve();
3203 : :
3204 : : /* Default max is 8MB. Leave alone if modified by operator. */
3205 : : tmp = sysctl_admin_reserve_kbytes;
3206 : : if (0 < tmp && tmp < (1UL << 13))
3207 : : init_admin_reserve();
3208 : :
3209 : : break;
3210 : : case MEM_OFFLINE:
3211 : : free_kbytes = global_page_state(NR_FREE_PAGES) << (PAGE_SHIFT - 10);
3212 : :
3213 : : if (sysctl_user_reserve_kbytes > free_kbytes) {
3214 : : init_user_reserve();
3215 : : pr_info("vm.user_reserve_kbytes reset to %lu\n",
3216 : : sysctl_user_reserve_kbytes);
3217 : : }
3218 : :
3219 : : if (sysctl_admin_reserve_kbytes > free_kbytes) {
3220 : : init_admin_reserve();
3221 : : pr_info("vm.admin_reserve_kbytes reset to %lu\n",
3222 : : sysctl_admin_reserve_kbytes);
3223 : : }
3224 : : break;
3225 : : default:
3226 : : break;
3227 : : }
3228 : : return NOTIFY_OK;
3229 : : }
3230 : :
3231 : : static struct notifier_block reserve_mem_nb = {
3232 : : .notifier_call = reserve_mem_notifier,
3233 : : };
3234 : :
3235 : 0 : static int __meminit init_reserve_notifier(void)
3236 : : {
3237 : : if (register_hotmemory_notifier(&reserve_mem_nb))
3238 : : printk("Failed registering memory add/remove notifier for admin reserve");
3239 : :
3240 : 0 : return 0;
3241 : : }
3242 : : module_init(init_reserve_notifier)
|