Branch data Line data Source code
1 : : /*
2 : : * Macros for manipulating and testing page->flags
3 : : */
4 : :
5 : : #ifndef PAGE_FLAGS_H
6 : : #define PAGE_FLAGS_H
7 : :
8 : : #include <linux/types.h>
9 : : #include <linux/bug.h>
10 : : #include <linux/mmdebug.h>
11 : : #ifndef __GENERATING_BOUNDS_H
12 : : #include <linux/mm_types.h>
13 : : #include <generated/bounds.h>
14 : : #endif /* !__GENERATING_BOUNDS_H */
15 : :
16 : : /*
17 : : * Various page->flags bits:
18 : : *
19 : : * PG_reserved is set for special pages, which can never be swapped out. Some
20 : : * of them might not even exist (eg empty_bad_page)...
21 : : *
22 : : * The PG_private bitflag is set on pagecache pages if they contain filesystem
23 : : * specific data (which is normally at page->private). It can be used by
24 : : * private allocations for its own usage.
25 : : *
26 : : * During initiation of disk I/O, PG_locked is set. This bit is set before I/O
27 : : * and cleared when writeback _starts_ or when read _completes_. PG_writeback
28 : : * is set before writeback starts and cleared when it finishes.
29 : : *
30 : : * PG_locked also pins a page in pagecache, and blocks truncation of the file
31 : : * while it is held.
32 : : *
33 : : * page_waitqueue(page) is a wait queue of all tasks waiting for the page
34 : : * to become unlocked.
35 : : *
36 : : * PG_uptodate tells whether the page's contents is valid. When a read
37 : : * completes, the page becomes uptodate, unless a disk I/O error happened.
38 : : *
39 : : * PG_referenced, PG_reclaim are used for page reclaim for anonymous and
40 : : * file-backed pagecache (see mm/vmscan.c).
41 : : *
42 : : * PG_error is set to indicate that an I/O error occurred on this page.
43 : : *
44 : : * PG_arch_1 is an architecture specific page state bit. The generic code
45 : : * guarantees that this bit is cleared for a page when it first is entered into
46 : : * the page cache.
47 : : *
48 : : * PG_highmem pages are not permanently mapped into the kernel virtual address
49 : : * space, they need to be kmapped separately for doing IO on the pages. The
50 : : * struct page (these bits with information) are always mapped into kernel
51 : : * address space...
52 : : *
53 : : * PG_hwpoison indicates that a page got corrupted in hardware and contains
54 : : * data with incorrect ECC bits that triggered a machine check. Accessing is
55 : : * not safe since it may cause another machine check. Don't touch!
56 : : */
57 : :
58 : : /*
59 : : * Don't use the *_dontuse flags. Use the macros. Otherwise you'll break
60 : : * locked- and dirty-page accounting.
61 : : *
62 : : * The page flags field is split into two parts, the main flags area
63 : : * which extends from the low bits upwards, and the fields area which
64 : : * extends from the high bits downwards.
65 : : *
66 : : * | FIELD | ... | FLAGS |
67 : : * N-1 ^ 0
68 : : * (NR_PAGEFLAGS)
69 : : *
70 : : * The fields area is reserved for fields mapping zone, node (for NUMA) and
71 : : * SPARSEMEM section (for variants of SPARSEMEM that require section ids like
72 : : * SPARSEMEM_EXTREME with !SPARSEMEM_VMEMMAP).
73 : : */
74 : : enum pageflags {
75 : : PG_locked, /* Page is locked. Don't touch. */
76 : : PG_error,
77 : : PG_referenced,
78 : : PG_uptodate,
79 : : PG_dirty,
80 : : PG_lru,
81 : : PG_active,
82 : : PG_slab,
83 : : PG_owner_priv_1, /* Owner use. If pagecache, fs may use*/
84 : : PG_arch_1,
85 : : PG_reserved,
86 : : PG_private, /* If pagecache, has fs-private data */
87 : : PG_private_2, /* If pagecache, has fs aux data */
88 : : PG_writeback, /* Page is under writeback */
89 : : #ifdef CONFIG_PAGEFLAGS_EXTENDED
90 : : PG_head, /* A head page */
91 : : PG_tail, /* A tail page */
92 : : #else
93 : : PG_compound, /* A compound page */
94 : : #endif
95 : : PG_swapcache, /* Swap page: swp_entry_t in private */
96 : : PG_mappedtodisk, /* Has blocks allocated on-disk */
97 : : PG_reclaim, /* To be reclaimed asap */
98 : : PG_swapbacked, /* Page is backed by RAM/swap */
99 : : PG_unevictable, /* Page is "unevictable" */
100 : : #ifdef CONFIG_MMU
101 : : PG_mlocked, /* Page is vma mlocked */
102 : : #endif
103 : : #ifdef CONFIG_ARCH_USES_PG_UNCACHED
104 : : PG_uncached, /* Page has been mapped as uncached */
105 : : #endif
106 : : #ifdef CONFIG_MEMORY_FAILURE
107 : : PG_hwpoison, /* hardware poisoned page. Don't touch */
108 : : #endif
109 : : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
110 : : PG_compound_lock,
111 : : #endif
112 : : __NR_PAGEFLAGS,
113 : :
114 : : /* Filesystems */
115 : : PG_checked = PG_owner_priv_1,
116 : :
117 : : /* Two page bits are conscripted by FS-Cache to maintain local caching
118 : : * state. These bits are set on pages belonging to the netfs's inodes
119 : : * when those inodes are being locally cached.
120 : : */
121 : : PG_fscache = PG_private_2, /* page backed by cache */
122 : :
123 : : /* XEN */
124 : : PG_pinned = PG_owner_priv_1,
125 : : PG_savepinned = PG_dirty,
126 : :
127 : : /* SLOB */
128 : : PG_slob_free = PG_private,
129 : : };
130 : :
131 : : #ifndef __GENERATING_BOUNDS_H
132 : :
133 : : /*
134 : : * Macros to create function definitions for page flags
135 : : */
136 : : #define TESTPAGEFLAG(uname, lname) \
137 : : static inline int Page##uname(const struct page *page) \
138 : : { return test_bit(PG_##lname, &page->flags); }
139 : :
140 : : #define SETPAGEFLAG(uname, lname) \
141 : : static inline void SetPage##uname(struct page *page) \
142 : : { set_bit(PG_##lname, &page->flags); }
143 : :
144 : : #define CLEARPAGEFLAG(uname, lname) \
145 : : static inline void ClearPage##uname(struct page *page) \
146 : : { clear_bit(PG_##lname, &page->flags); }
147 : :
148 : : #define __SETPAGEFLAG(uname, lname) \
149 : : static inline void __SetPage##uname(struct page *page) \
150 : : { __set_bit(PG_##lname, &page->flags); }
151 : :
152 : : #define __CLEARPAGEFLAG(uname, lname) \
153 : : static inline void __ClearPage##uname(struct page *page) \
154 : : { __clear_bit(PG_##lname, &page->flags); }
155 : :
156 : : #define TESTSETFLAG(uname, lname) \
157 : : static inline int TestSetPage##uname(struct page *page) \
158 : : { return test_and_set_bit(PG_##lname, &page->flags); }
159 : :
160 : : #define TESTCLEARFLAG(uname, lname) \
161 : : static inline int TestClearPage##uname(struct page *page) \
162 : : { return test_and_clear_bit(PG_##lname, &page->flags); }
163 : :
164 : : #define __TESTCLEARFLAG(uname, lname) \
165 : : static inline int __TestClearPage##uname(struct page *page) \
166 : : { return __test_and_clear_bit(PG_##lname, &page->flags); }
167 : :
168 : : #define PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
169 : : SETPAGEFLAG(uname, lname) CLEARPAGEFLAG(uname, lname)
170 : :
171 : : #define __PAGEFLAG(uname, lname) TESTPAGEFLAG(uname, lname) \
172 : : __SETPAGEFLAG(uname, lname) __CLEARPAGEFLAG(uname, lname)
173 : :
174 : : #define PAGEFLAG_FALSE(uname) \
175 : : static inline int Page##uname(const struct page *page) \
176 : : { return 0; }
177 : :
178 : : #define TESTSCFLAG(uname, lname) \
179 : : TESTSETFLAG(uname, lname) TESTCLEARFLAG(uname, lname)
180 : :
181 : : #define SETPAGEFLAG_NOOP(uname) \
182 : : static inline void SetPage##uname(struct page *page) { }
183 : :
184 : : #define CLEARPAGEFLAG_NOOP(uname) \
185 : : static inline void ClearPage##uname(struct page *page) { }
186 : :
187 : : #define __CLEARPAGEFLAG_NOOP(uname) \
188 : : static inline void __ClearPage##uname(struct page *page) { }
189 : :
190 : : #define TESTCLEARFLAG_FALSE(uname) \
191 : : static inline int TestClearPage##uname(struct page *page) { return 0; }
192 : :
193 : : #define __TESTCLEARFLAG_FALSE(uname) \
194 : : static inline int __TestClearPage##uname(struct page *page) { return 0; }
195 : :
196 : : struct page; /* forward declaration */
197 : :
198 : : TESTPAGEFLAG(Locked, locked)
199 : 934639 : PAGEFLAG(Error, error) TESTCLEARFLAG(Error, error)
200 : 6501580 : PAGEFLAG(Referenced, referenced) TESTCLEARFLAG(Referenced, referenced)
201 : 7421041 : PAGEFLAG(Dirty, dirty) TESTSCFLAG(Dirty, dirty) __CLEARPAGEFLAG(Dirty, dirty)
202 : 20414362 : PAGEFLAG(LRU, lru) __CLEARPAGEFLAG(LRU, lru)
203 : 44427415 : PAGEFLAG(Active, active) __CLEARPAGEFLAG(Active, active)
204 : 0 : TESTCLEARFLAG(Active, active)
205 : : __PAGEFLAG(Slab, slab)
206 : 2634 : PAGEFLAG(Checked, checked) /* Used by some filesystems */
207 : : PAGEFLAG(Pinned, pinned) TESTSCFLAG(Pinned, pinned) /* Xen */
208 : : PAGEFLAG(SavePinned, savepinned); /* Xen */
209 : 0 : PAGEFLAG(Reserved, reserved) __CLEARPAGEFLAG(Reserved, reserved)
210 : 18044394 : PAGEFLAG(SwapBacked, swapbacked) __CLEARPAGEFLAG(SwapBacked, swapbacked)
211 : :
212 : : __PAGEFLAG(SlobFree, slob_free)
213 : :
214 : : /*
215 : : * Private page markings that may be used by the filesystem that owns the page
216 : : * for its own purposes.
217 : : * - PG_private and PG_private_2 cause releasepage() and co to be invoked
218 : : */
219 : 3650966 : PAGEFLAG(Private, private) __SETPAGEFLAG(Private, private)
220 : : __CLEARPAGEFLAG(Private, private)
221 : 0 : PAGEFLAG(Private2, private_2) TESTSCFLAG(Private2, private_2)
222 : : PAGEFLAG(OwnerPriv1, owner_priv_1) TESTCLEARFLAG(OwnerPriv1, owner_priv_1)
223 : :
224 : : /*
225 : : * Only test-and-set exist for PG_writeback. The unconditional operators are
226 : : * risky: they bypass page accounting.
227 : : */
228 : 1917268 : TESTPAGEFLAG(Writeback, writeback) TESTSCFLAG(Writeback, writeback)
229 : 2099918 : PAGEFLAG(MappedToDisk, mappedtodisk)
230 : :
231 : : /* PG_readahead is only used for reads; PG_reclaim is only for writes */
232 : 1753358 : PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
233 : 43611 : PAGEFLAG(Readahead, reclaim) TESTCLEARFLAG(Readahead, reclaim)
234 : :
235 : : #ifdef CONFIG_HIGHMEM
236 : : /*
237 : : * Must use a macro here due to header dependency issues. page_zone() is not
238 : : * available at this point.
239 : : */
240 : : #define PageHighMem(__p) is_highmem(page_zone(__p))
241 : : #else
242 : : PAGEFLAG_FALSE(HighMem)
243 : : #endif
244 : :
245 : : #ifdef CONFIG_SWAP
246 : 0 : PAGEFLAG(SwapCache, swapcache)
247 : : #else
248 : : PAGEFLAG_FALSE(SwapCache)
249 : : SETPAGEFLAG_NOOP(SwapCache) CLEARPAGEFLAG_NOOP(SwapCache)
250 : : #endif
251 : :
252 : 15725 : PAGEFLAG(Unevictable, unevictable) __CLEARPAGEFLAG(Unevictable, unevictable)
253 : 8175 : TESTCLEARFLAG(Unevictable, unevictable)
254 : :
255 : : #ifdef CONFIG_MMU
256 : 0 : PAGEFLAG(Mlocked, mlocked) __CLEARPAGEFLAG(Mlocked, mlocked)
257 : 26741 : TESTSCFLAG(Mlocked, mlocked) __TESTCLEARFLAG(Mlocked, mlocked)
258 : : #else
259 : : PAGEFLAG_FALSE(Mlocked) SETPAGEFLAG_NOOP(Mlocked)
260 : : TESTCLEARFLAG_FALSE(Mlocked) __TESTCLEARFLAG_FALSE(Mlocked)
261 : : #endif
262 : :
263 : : #ifdef CONFIG_ARCH_USES_PG_UNCACHED
264 : : PAGEFLAG(Uncached, uncached)
265 : : #else
266 : : PAGEFLAG_FALSE(Uncached)
267 : : #endif
268 : :
269 : : #ifdef CONFIG_MEMORY_FAILURE
270 : : PAGEFLAG(HWPoison, hwpoison)
271 : : TESTSCFLAG(HWPoison, hwpoison)
272 : : #define __PG_HWPOISON (1UL << PG_hwpoison)
273 : : #else
274 : : PAGEFLAG_FALSE(HWPoison)
275 : : #define __PG_HWPOISON 0
276 : : #endif
277 : :
278 : : u64 stable_page_flags(struct page *page);
279 : :
280 : : static inline int PageUptodate(struct page *page)
281 : : {
282 : : int ret = test_bit(PG_uptodate, &(page)->flags);
283 : :
284 : : /*
285 : : * Must ensure that the data we read out of the page is loaded
286 : : * _after_ we've loaded page->flags to check for PageUptodate.
287 : : * We can skip the barrier if the page is not uptodate, because
288 : : * we wouldn't be reading anything from it.
289 : : *
290 : : * See SetPageUptodate() for the other side of the story.
291 : : */
292 [ + - ][ + + ]: 89811008 : if (ret)
[ + + ][ + + ]
[ + + ][ + + ]
[ + + ][ + + ]
[ + + ][ + + ]
[ + + ]
[ - + + + ]
[ + + ][ - + ]
[ # # ][ # # ]
[ # # ][ # # ]
[ # # ][ + + ]
293 : 47854235 : smp_rmb();
294 : :
295 : : return ret;
296 : : }
297 : :
298 : : static inline void __SetPageUptodate(struct page *page)
299 : : {
300 : 18032868 : smp_wmb();
301 : : __set_bit(PG_uptodate, &(page)->flags);
302 : : }
303 : :
304 : : static inline void SetPageUptodate(struct page *page)
305 : : {
306 : : /*
307 : : * Memory barrier must be issued before setting the PG_uptodate bit,
308 : : * so that all previous stores issued in order to bring the page
309 : : * uptodate are actually visible before PageUptodate becomes true.
310 : : */
311 : 6915748 : smp_wmb();
312 : 6910992 : set_bit(PG_uptodate, &(page)->flags);
313 : : }
314 : :
315 : 0 : CLEARPAGEFLAG(Uptodate, uptodate)
316 : :
317 : : extern void cancel_dirty_page(struct page *page, unsigned int account_size);
318 : :
319 : : int test_clear_page_writeback(struct page *page);
320 : : int test_set_page_writeback(struct page *page);
321 : :
322 : : static inline void set_page_writeback(struct page *page)
323 : : {
324 : 958610 : test_set_page_writeback(page);
325 : : }
326 : :
327 : : #ifdef CONFIG_PAGEFLAGS_EXTENDED
328 : : /*
329 : : * System with lots of page flags available. This allows separate
330 : : * flags for PageHead() and PageTail() checks of compound pages so that bit
331 : : * tests can be used in performance sensitive paths. PageCompound is
332 : : * generally not used in hot code paths except arch/powerpc/mm/init_64.c
333 : : * and arch/powerpc/kvm/book3s_64_vio_hv.c which use it to detect huge pages
334 : : * and avoid handling those in real mode.
335 : : */
336 : : __PAGEFLAG(Head, head) CLEARPAGEFLAG(Head, head)
337 : : __PAGEFLAG(Tail, tail)
338 : :
339 : : static inline int PageCompound(struct page *page)
340 : : {
341 : 153289147 : return page->flags & ((1L << PG_head) | (1L << PG_tail));
342 : :
343 : : }
344 : : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
345 : : static inline void ClearPageCompound(struct page *page)
346 : : {
347 : : BUG_ON(!PageHead(page));
348 : : ClearPageHead(page);
349 : : }
350 : : #endif
351 : : #else
352 : : /*
353 : : * Reduce page flag use as much as possible by overlapping
354 : : * compound page flags with the flags used for page cache pages. Possible
355 : : * because PageCompound is always set for compound pages and not for
356 : : * pages on the LRU and/or pagecache.
357 : : */
358 : : TESTPAGEFLAG(Compound, compound)
359 : : __SETPAGEFLAG(Head, compound) __CLEARPAGEFLAG(Head, compound)
360 : :
361 : : /*
362 : : * PG_reclaim is used in combination with PG_compound to mark the
363 : : * head and tail of a compound page. This saves one page flag
364 : : * but makes it impossible to use compound pages for the page cache.
365 : : * The PG_reclaim bit would have to be used for reclaim or readahead
366 : : * if compound pages enter the page cache.
367 : : *
368 : : * PG_compound & PG_reclaim => Tail page
369 : : * PG_compound & ~PG_reclaim => Head page
370 : : */
371 : : #define PG_head_mask ((1L << PG_compound))
372 : : #define PG_head_tail_mask ((1L << PG_compound) | (1L << PG_reclaim))
373 : :
374 : : static inline int PageHead(struct page *page)
375 : : {
376 : : return ((page->flags & PG_head_tail_mask) == PG_head_mask);
377 : : }
378 : :
379 : : static inline int PageTail(struct page *page)
380 : : {
381 : : return ((page->flags & PG_head_tail_mask) == PG_head_tail_mask);
382 : : }
383 : :
384 : : static inline void __SetPageTail(struct page *page)
385 : : {
386 : : page->flags |= PG_head_tail_mask;
387 : : }
388 : :
389 : : static inline void __ClearPageTail(struct page *page)
390 : : {
391 : : page->flags &= ~PG_head_tail_mask;
392 : : }
393 : :
394 : : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
395 : : static inline void ClearPageCompound(struct page *page)
396 : : {
397 : : BUG_ON((page->flags & PG_head_tail_mask) != (1 << PG_compound));
398 : : clear_bit(PG_compound, &page->flags);
399 : : }
400 : : #endif
401 : :
402 : : #endif /* !PAGEFLAGS_EXTENDED */
403 : :
404 : : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
405 : : /*
406 : : * PageHuge() only returns true for hugetlbfs pages, but not for
407 : : * normal or transparent huge pages.
408 : : *
409 : : * PageTransHuge() returns true for both transparent huge and
410 : : * hugetlbfs pages, but not normal pages. PageTransHuge() can only be
411 : : * called only in the core VM paths where hugetlbfs pages can't exist.
412 : : */
413 : : static inline int PageTransHuge(struct page *page)
414 : : {
415 : : VM_BUG_ON_PAGE(PageTail(page), page);
416 : : return PageHead(page);
417 : : }
418 : :
419 : : /*
420 : : * PageTransCompound returns true for both transparent huge pages
421 : : * and hugetlbfs pages, so it should only be called when it's known
422 : : * that hugetlbfs pages aren't involved.
423 : : */
424 : : static inline int PageTransCompound(struct page *page)
425 : : {
426 : : return PageCompound(page);
427 : : }
428 : :
429 : : /*
430 : : * PageTransTail returns true for both transparent huge pages
431 : : * and hugetlbfs pages, so it should only be called when it's known
432 : : * that hugetlbfs pages aren't involved.
433 : : */
434 : : static inline int PageTransTail(struct page *page)
435 : : {
436 : : return PageTail(page);
437 : : }
438 : :
439 : : #else
440 : :
441 : : static inline int PageTransHuge(struct page *page)
442 : : {
443 : : return 0;
444 : : }
445 : :
446 : : static inline int PageTransCompound(struct page *page)
447 : : {
448 : : return 0;
449 : : }
450 : :
451 : : static inline int PageTransTail(struct page *page)
452 : : {
453 : : return 0;
454 : : }
455 : : #endif
456 : :
457 : : /*
458 : : * If network-based swap is enabled, sl*b must keep track of whether pages
459 : : * were allocated from pfmemalloc reserves.
460 : : */
461 : : static inline int PageSlabPfmemalloc(struct page *page)
462 : : {
463 : : VM_BUG_ON_PAGE(!PageSlab(page), page);
464 : : return PageActive(page);
465 : : }
466 : :
467 : : static inline void SetPageSlabPfmemalloc(struct page *page)
468 : : {
469 : : VM_BUG_ON_PAGE(!PageSlab(page), page);
470 : : SetPageActive(page);
471 : : }
472 : :
473 : : static inline void __ClearPageSlabPfmemalloc(struct page *page)
474 : : {
475 : : VM_BUG_ON_PAGE(!PageSlab(page), page);
476 : : __ClearPageActive(page);
477 : : }
478 : :
479 : : static inline void ClearPageSlabPfmemalloc(struct page *page)
480 : : {
481 : : VM_BUG_ON_PAGE(!PageSlab(page), page);
482 : : ClearPageActive(page);
483 : : }
484 : :
485 : : #ifdef CONFIG_MMU
486 : : #define __PG_MLOCKED (1 << PG_mlocked)
487 : : #else
488 : : #define __PG_MLOCKED 0
489 : : #endif
490 : :
491 : : #ifdef CONFIG_TRANSPARENT_HUGEPAGE
492 : : #define __PG_COMPOUND_LOCK (1 << PG_compound_lock)
493 : : #else
494 : : #define __PG_COMPOUND_LOCK 0
495 : : #endif
496 : :
497 : : /*
498 : : * Flags checked when a page is freed. Pages being freed should not have
499 : : * these flags set. It they are, there is a problem.
500 : : */
501 : : #define PAGE_FLAGS_CHECK_AT_FREE \
502 : : (1 << PG_lru | 1 << PG_locked | \
503 : : 1 << PG_private | 1 << PG_private_2 | \
504 : : 1 << PG_writeback | 1 << PG_reserved | \
505 : : 1 << PG_slab | 1 << PG_swapcache | 1 << PG_active | \
506 : : 1 << PG_unevictable | __PG_MLOCKED | __PG_HWPOISON | \
507 : : __PG_COMPOUND_LOCK)
508 : :
509 : : /*
510 : : * Flags checked when a page is prepped for return by the page allocator.
511 : : * Pages being prepped should not have any flags set. It they are set,
512 : : * there has been a kernel bug or struct page corruption.
513 : : */
514 : : #define PAGE_FLAGS_CHECK_AT_PREP ((1 << NR_PAGEFLAGS) - 1)
515 : :
516 : : #define PAGE_FLAGS_PRIVATE \
517 : : (1 << PG_private | 1 << PG_private_2)
518 : : /**
519 : : * page_has_private - Determine if page has private stuff
520 : : * @page: The page to be checked
521 : : *
522 : : * Determine if a page has private stuff, indicating that release routines
523 : : * should be invoked upon it.
524 : : */
525 : : static inline int page_has_private(struct page *page)
526 : : {
527 : 2303705 : return !!(page->flags & PAGE_FLAGS_PRIVATE);
528 : : }
529 : :
530 : : #endif /* !__GENERATING_BOUNDS_H */
531 : :
532 : : #endif /* PAGE_FLAGS_H */
|