Branch data Line data Source code
1 : : /*
2 : : * fs/mpage.c
3 : : *
4 : : * Copyright (C) 2002, Linus Torvalds.
5 : : *
6 : : * Contains functions related to preparing and submitting BIOs which contain
7 : : * multiple pagecache pages.
8 : : *
9 : : * 15May2002 Andrew Morton
10 : : * Initial version
11 : : * 27Jun2002 axboe@suse.de
12 : : * use bio_add_page() to build bio's just the right size
13 : : */
14 : :
15 : : #include <linux/kernel.h>
16 : : #include <linux/export.h>
17 : : #include <linux/mm.h>
18 : : #include <linux/kdev_t.h>
19 : : #include <linux/gfp.h>
20 : : #include <linux/bio.h>
21 : : #include <linux/fs.h>
22 : : #include <linux/buffer_head.h>
23 : : #include <linux/blkdev.h>
24 : : #include <linux/highmem.h>
25 : : #include <linux/prefetch.h>
26 : : #include <linux/mpage.h>
27 : : #include <linux/writeback.h>
28 : : #include <linux/backing-dev.h>
29 : : #include <linux/pagevec.h>
30 : : #include <linux/cleancache.h>
31 : :
32 : : /*
33 : : * I/O completion handler for multipage BIOs.
34 : : *
35 : : * The mpage code never puts partial pages into a BIO (except for end-of-file).
36 : : * If a page does not map to a contiguous run of blocks then it simply falls
37 : : * back to block_read_full_page().
38 : : *
39 : : * Why is this? If a page's completion depends on a number of different BIOs
40 : : * which can complete in any order (or at the same time) then determining the
41 : : * status of that page is hard. See end_buffer_async_read() for the details.
42 : : * There is no point in duplicating all that complexity.
43 : : */
44 : 0 : static void mpage_end_io(struct bio *bio, int err)
45 : : {
46 : : const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
47 : 43549 : struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
48 : :
49 : : do {
50 : 271920 : struct page *page = bvec->bv_page;
51 : :
52 [ + + ]: 271920 : if (--bvec >= bio->bi_io_vec)
53 : 228371 : prefetchw(&bvec->bv_page->flags);
54 [ + - ]: 271920 : if (bio_data_dir(bio) == READ) {
55 [ + - ]: 271920 : if (uptodate) {
56 : : SetPageUptodate(page);
57 : : } else {
58 : : ClearPageUptodate(page);
59 : : SetPageError(page);
60 : : }
61 : 271920 : unlock_page(page);
62 : : } else { /* bio_data_dir(bio) == WRITE */
63 [ # # ]: 0 : if (!uptodate) {
64 : : SetPageError(page);
65 [ # # ]: 0 : if (page->mapping)
66 : 0 : set_bit(AS_EIO, &page->mapping->flags);
67 : : }
68 : 0 : end_page_writeback(page);
69 : : }
70 [ + + ]: 315469 : } while (bvec >= bio->bi_io_vec);
71 : 43549 : bio_put(bio);
72 : 43549 : }
73 : :
74 : : static struct bio *mpage_bio_submit(int rw, struct bio *bio)
75 : : {
76 : 43549 : bio->bi_end_io = mpage_end_io;
77 : 314076 : submit_bio(rw, bio);
78 : : return NULL;
79 : : }
80 : :
81 : : static struct bio *
82 : 0 : mpage_alloc(struct block_device *bdev,
83 : : sector_t first_sector, int nr_vecs,
84 : : gfp_t gfp_flags)
85 : : {
86 : : struct bio *bio;
87 : :
88 : : bio = bio_alloc(gfp_flags, nr_vecs);
89 : :
90 [ - + ][ # # ]: 43549 : if (bio == NULL && (current->flags & PF_MEMALLOC)) {
91 [ - - ][ # # ]: 43547 : while (!bio && (nr_vecs /= 2))
92 : : bio = bio_alloc(gfp_flags, nr_vecs);
93 : : }
94 : :
95 [ + - ]: 43549 : if (bio) {
96 : 43549 : bio->bi_bdev = bdev;
97 : 43549 : bio->bi_sector = first_sector;
98 : : }
99 : 43549 : return bio;
100 : : }
101 : :
102 : : /*
103 : : * support function for mpage_readpages. The fs supplied get_block might
104 : : * return an up to date buffer. This is used to map that buffer into
105 : : * the page, which allows readpage to avoid triggering a duplicate call
106 : : * to get_block.
107 : : *
108 : : * The idea is to avoid adding buffers to pages that don't already have
109 : : * them. So when the buffer is up to date and the page size == block size,
110 : : * this marks the page up to date instead of adding new buffers.
111 : : */
112 : : static void
113 : 0 : map_buffer_to_page(struct page *page, struct buffer_head *bh, int page_block)
114 : : {
115 : 0 : struct inode *inode = page->mapping->host;
116 : : struct buffer_head *page_bh, *head;
117 : : int block = 0;
118 : :
119 [ # # ]: 0 : if (!page_has_buffers(page)) {
120 : : /*
121 : : * don't make any buffers if there is only one buffer on
122 : : * the page and the page just needs to be set up to date
123 : : */
124 [ # # ][ # # ]: 0 : if (inode->i_blkbits == PAGE_CACHE_SHIFT &&
125 : : buffer_uptodate(bh)) {
126 : : SetPageUptodate(page);
127 : 0 : return;
128 : : }
129 : 0 : create_empty_buffers(page, 1 << inode->i_blkbits, 0);
130 : : }
131 [ # # ]: 0 : head = page_buffers(page);
132 : : page_bh = head;
133 : : do {
134 [ # # ]: 0 : if (block == page_block) {
135 : 0 : page_bh->b_state = bh->b_state;
136 : 0 : page_bh->b_bdev = bh->b_bdev;
137 : 0 : page_bh->b_blocknr = bh->b_blocknr;
138 : 0 : break;
139 : : }
140 : 0 : page_bh = page_bh->b_this_page;
141 : 0 : block++;
142 [ # # ]: 0 : } while (page_bh != head);
143 : : }
144 : :
145 : : /*
146 : : * This is the worker routine which does all the work of mapping the disk
147 : : * blocks and constructs largest possible bios, submits them for IO if the
148 : : * blocks are not contiguous on the disk.
149 : : *
150 : : * We pass a buffer_head back and forth and use its buffer_mapped() flag to
151 : : * represent the validity of its disk mapping and to decide when to do the next
152 : : * get_block() call.
153 : : */
154 : : static struct bio *
155 : 0 : do_mpage_readpage(struct bio *bio, struct page *page, unsigned nr_pages,
156 : : sector_t *last_block_in_bio, struct buffer_head *map_bh,
157 : : unsigned long *first_logical_block, get_block_t get_block)
158 : : {
159 : 470896 : struct inode *inode = page->mapping->host;
160 : 470896 : const unsigned blkbits = inode->i_blkbits;
161 : 470896 : const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
162 : 470896 : const unsigned blocksize = 1 << blkbits;
163 : : sector_t block_in_file;
164 : : sector_t last_block;
165 : : sector_t last_block_in_file;
166 : : sector_t blocks[MAX_BUF_PER_PAGE];
167 : : unsigned page_block;
168 : : unsigned first_hole = blocks_per_page;
169 : : struct block_device *bdev = NULL;
170 : : int length;
171 : : int fully_mapped = 1;
172 : : unsigned nblocks;
173 : : unsigned relative_block;
174 : :
175 [ + + ]: 470896 : if (page_has_buffers(page))
176 : : goto confused;
177 : :
178 : 469806 : block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
179 : 469806 : last_block = block_in_file + nr_pages * blocks_per_page;
180 : 469428 : last_block_in_file = (i_size_read(inode) + blocksize - 1) >> blkbits;
181 [ + + ]: 940324 : if (last_block > last_block_in_file)
182 : : last_block = last_block_in_file;
183 : : page_block = 0;
184 : :
185 : : /*
186 : : * Map blocks using the result from the previous get_blocks call first.
187 : : */
188 : 469428 : nblocks = map_bh->b_size >> blkbits;
189 [ + + ][ + - ]: 469428 : if (buffer_mapped(map_bh) && block_in_file > *first_logical_block &&
[ + + ]
190 : 232621 : block_in_file < (*first_logical_block + nblocks)) {
191 : 232535 : unsigned map_offset = block_in_file - *first_logical_block;
192 : 232535 : unsigned last = nblocks - map_offset;
193 : :
194 : 232533 : for (relative_block = 0; ; relative_block++) {
195 [ + + ]: 465068 : if (relative_block == last) {
196 : : clear_buffer_mapped(map_bh);
197 : : break;
198 : : }
199 [ + + ]: 442666 : if (page_block == blocks_per_page)
200 : : break;
201 : 232533 : blocks[page_block] = map_bh->b_blocknr + map_offset +
202 : : relative_block;
203 : 232533 : page_block++;
204 : 232533 : block_in_file++;
205 : 232533 : }
206 : 232534 : bdev = map_bh->b_bdev;
207 : : }
208 : :
209 : : /*
210 : : * Then do more get_blocks calls until we are done with this page.
211 : : */
212 : 469427 : map_bh->b_page = page;
213 [ + + ]: 706735 : while (page_block < blocks_per_page) {
214 : 236949 : map_bh->b_state = 0;
215 : 236949 : map_bh->b_size = 0;
216 : :
217 [ + + ]: 236949 : if (block_in_file < last_block) {
218 : 223950 : map_bh->b_size = (last_block-block_in_file) << blkbits;
219 [ + ]: 223950 : if (get_block(inode, block_in_file, map_bh, 0))
220 : : goto confused;
221 : 224309 : *first_logical_block = block_in_file;
222 : : }
223 : :
224 [ + + ]: 237308 : if (!buffer_mapped(map_bh)) {
225 : : fully_mapped = 0;
226 [ + ]: 197922 : if (first_hole == blocks_per_page)
227 : : first_hole = page_block;
228 : 197922 : page_block++;
229 : 197922 : block_in_file++;
230 : 197922 : continue;
231 : : }
232 : :
233 : : /* some filesystems will copy data into the page during
234 : : * the get_block call, in which case we don't want to
235 : : * read it again. map_buffer_to_page copies the data
236 : : * we just collected from get_block into the page's buffers
237 : : * so readpage doesn't have to repeat the get_block call
238 : : */
239 [ - + ]: 39386 : if (buffer_uptodate(map_bh)) {
240 : 0 : map_buffer_to_page(page, map_bh, page_block);
241 : 0 : goto confused;
242 : : }
243 : :
244 [ + - ]: 39386 : if (first_hole != blocks_per_page)
245 : : goto confused; /* hole -> non-hole */
246 : :
247 : : /* Contiguous blocks? */
248 [ - + ][ # # ]: 39386 : if (page_block && blocks[page_block-1] != map_bh->b_blocknr-1)
249 : : goto confused;
250 : 39387 : nblocks = map_bh->b_size >> blkbits;
251 : 39385 : for (relative_block = 0; ; relative_block++) {
252 [ + + ]: 78772 : if (relative_block == nblocks) {
253 : : clear_buffer_mapped(map_bh);
254 : : break;
255 [ + + ]: 61880 : } else if (page_block == blocks_per_page)
256 : : break;
257 : 39385 : blocks[page_block] = map_bh->b_blocknr+relative_block;
258 : 39385 : page_block++;
259 : 39385 : block_in_file++;
260 : 39385 : }
261 : 237308 : bdev = map_bh->b_bdev;
262 : : }
263 : :
264 [ + + ]: 469786 : if (first_hole != blocks_per_page) {
265 : 197866 : zero_user_segment(page, first_hole << blkbits, PAGE_CACHE_SIZE);
266 [ + ]: 197948 : if (first_hole == 0) {
267 : : SetPageUptodate(page);
268 : 197930 : unlock_page(page);
269 : 197866 : goto out;
270 : : }
271 [ + - ]: 271920 : } else if (fully_mapped) {
272 : : SetPageMappedToDisk(page);
273 : : }
274 : :
275 [ + ]: 271919 : if (fully_mapped && blocks_per_page == 1 && !PageUptodate(page) &&
276 : : cleancache_get_page(page) == 0) {
277 : : SetPageUptodate(page);
278 : : goto confused;
279 : : }
280 : :
281 : : /*
282 : : * This page will go to BIO. Do we need to send this BIO off first?
283 : : */
284 [ + + ][ + + ]: 271919 : if (bio && (*last_block_in_bio != blocks[0] - 1))
285 : : bio = mpage_bio_submit(READ, bio);
286 : :
287 : : alloc_new:
288 [ + + ]: 275956 : if (bio == NULL) {
289 : 43548 : bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
290 : 43548 : min_t(int, nr_pages, bio_get_nr_vecs(bdev)),
291 : : GFP_KERNEL);
292 [ + + ]: 43548 : if (bio == NULL)
293 : : goto confused;
294 : : }
295 : :
296 : 275908 : length = first_hole << blkbits;
297 [ + + ]: 275908 : if (bio_add_page(bio, page, length, 0) < length) {
298 : : bio = mpage_bio_submit(READ, bio);
299 : 4037 : goto alloc_new;
300 : : }
301 : :
302 : 271918 : relative_block = block_in_file - *first_logical_block;
303 : 271918 : nblocks = map_bh->b_size >> blkbits;
304 [ - + ][ # # ]: 271918 : if ((buffer_boundary(map_bh) && relative_block == nblocks) ||
[ - + ]
305 : : (first_hole != blocks_per_page))
306 : 0 : bio = mpage_bio_submit(READ, bio);
307 : : else
308 : 271918 : *last_block_in_bio = blocks[blocks_per_page - 1];
309 : : out:
310 : 470863 : return bio;
311 : :
312 : : confused:
313 [ - + ]: 1079 : if (bio)
314 : : bio = mpage_bio_submit(READ, bio);
315 [ + - ]: 1079 : if (!PageUptodate(page))
316 : 1079 : block_read_full_page(page, get_block);
317 : : else
318 : 0 : unlock_page(page);
319 : : goto out;
320 : : }
321 : :
322 : : /**
323 : : * mpage_readpages - populate an address space with some pages & start reads against them
324 : : * @mapping: the address_space
325 : : * @pages: The address of a list_head which contains the target pages. These
326 : : * pages have their ->index populated and are otherwise uninitialised.
327 : : * The page at @pages->prev has the lowest file offset, and reads should be
328 : : * issued in @pages->prev to @pages->next order.
329 : : * @nr_pages: The number of pages at *@pages
330 : : * @get_block: The filesystem's block mapper function.
331 : : *
332 : : * This function walks the pages and the blocks within each page, building and
333 : : * emitting large BIOs.
334 : : *
335 : : * If anything unusual happens, such as:
336 : : *
337 : : * - encountering a page which has buffers
338 : : * - encountering a page which has a non-hole after a hole
339 : : * - encountering a page with non-contiguous blocks
340 : : *
341 : : * then this code just gives up and calls the buffer_head-based read function.
342 : : * It does handle a page which has holes at the end - that is a common case:
343 : : * the end-of-file on blocksize < PAGE_CACHE_SIZE setups.
344 : : *
345 : : * BH_Boundary explanation:
346 : : *
347 : : * There is a problem. The mpage read code assembles several pages, gets all
348 : : * their disk mappings, and then submits them all. That's fine, but obtaining
349 : : * the disk mappings may require I/O. Reads of indirect blocks, for example.
350 : : *
351 : : * So an mpage read of the first 16 blocks of an ext2 file will cause I/O to be
352 : : * submitted in the following order:
353 : : * 12 0 1 2 3 4 5 6 7 8 9 10 11 13 14 15 16
354 : : *
355 : : * because the indirect block has to be read to get the mappings of blocks
356 : : * 13,14,15,16. Obviously, this impacts performance.
357 : : *
358 : : * So what we do it to allow the filesystem's get_block() function to set
359 : : * BH_Boundary when it maps block 11. BH_Boundary says: mapping of the block
360 : : * after this one will require I/O against a block which is probably close to
361 : : * this one. So you should push what I/O you have currently accumulated.
362 : : *
363 : : * This all causes the disk requests to be issued in the correct order.
364 : : */
365 : : int
366 : 0 : mpage_readpages(struct address_space *mapping, struct list_head *pages,
367 : : unsigned nr_pages, get_block_t get_block)
368 : : {
369 : : struct bio *bio = NULL;
370 : : unsigned page_idx;
371 : 121790 : sector_t last_block_in_bio = 0;
372 : : struct buffer_head map_bh;
373 : 121790 : unsigned long first_logical_block = 0;
374 : :
375 : 121790 : map_bh.b_state = 0;
376 : 121790 : map_bh.b_size = 0;
377 [ + + ]: 578656 : for (page_idx = 0; page_idx < nr_pages; page_idx++) {
378 : 456743 : struct page *page = list_entry(pages->prev, struct page, lru);
379 : :
380 : 456743 : prefetchw(&page->flags);
381 : : list_del(&page->lru);
382 [ + + ]: 456759 : if (!add_to_page_cache_lru(page, mapping,
383 : : page->index, GFP_KERNEL)) {
384 : 456750 : bio = do_mpage_readpage(bio, page,
385 : : nr_pages - page_idx,
386 : : &last_block_in_bio, &map_bh,
387 : : &first_logical_block,
388 : : get_block);
389 : : }
390 : 456807 : page_cache_release(page);
391 : : }
392 [ - + ]: 121913 : BUG_ON(!list_empty(pages));
393 [ + + ]: 121913 : if (bio)
394 : : mpage_bio_submit(READ, bio);
395 : 121913 : return 0;
396 : : }
397 : : EXPORT_SYMBOL(mpage_readpages);
398 : :
399 : : /*
400 : : * This isn't called much at all
401 : : */
402 : 0 : int mpage_readpage(struct page *page, get_block_t get_block)
403 : : {
404 : : struct bio *bio = NULL;
405 : 14210 : sector_t last_block_in_bio = 0;
406 : : struct buffer_head map_bh;
407 : 14210 : unsigned long first_logical_block = 0;
408 : :
409 : 14210 : map_bh.b_state = 0;
410 : 14210 : map_bh.b_size = 0;
411 : 14210 : bio = do_mpage_readpage(bio, page, 1, &last_block_in_bio,
412 : : &map_bh, &first_logical_block, get_block);
413 [ + + ]: 14213 : if (bio)
414 : : mpage_bio_submit(READ, bio);
415 : 3 : return 0;
416 : : }
417 : : EXPORT_SYMBOL(mpage_readpage);
418 : :
419 : : /*
420 : : * Writing is not so simple.
421 : : *
422 : : * If the page has buffers then they will be used for obtaining the disk
423 : : * mapping. We only support pages which are fully mapped-and-dirty, with a
424 : : * special case for pages which are unmapped at the end: end-of-file.
425 : : *
426 : : * If the page has no buffers (preferred) then the page is mapped here.
427 : : *
428 : : * If all blocks are found to be contiguous then the page can go into the
429 : : * BIO. Otherwise fall back to the mapping's writepage().
430 : : *
431 : : * FIXME: This code wants an estimate of how many pages are still to be
432 : : * written, so it can intelligently allocate a suitably-sized BIO. For now,
433 : : * just allocate full-size (16-page) BIOs.
434 : : */
435 : :
436 : : struct mpage_data {
437 : : struct bio *bio;
438 : : sector_t last_block_in_bio;
439 : : get_block_t *get_block;
440 : : unsigned use_writepage;
441 : : };
442 : :
443 : 0 : static int __mpage_writepage(struct page *page, struct writeback_control *wbc,
444 : : void *data)
445 : : {
446 : : struct mpage_data *mpd = data;
447 : 0 : struct bio *bio = mpd->bio;
448 : 0 : struct address_space *mapping = page->mapping;
449 : 0 : struct inode *inode = page->mapping->host;
450 : 0 : const unsigned blkbits = inode->i_blkbits;
451 : : unsigned long end_index;
452 : 0 : const unsigned blocks_per_page = PAGE_CACHE_SIZE >> blkbits;
453 : : sector_t last_block;
454 : : sector_t block_in_file;
455 : : sector_t blocks[MAX_BUF_PER_PAGE];
456 : : unsigned page_block;
457 : : unsigned first_unmapped = blocks_per_page;
458 : : struct block_device *bdev = NULL;
459 : : int boundary = 0;
460 : : sector_t boundary_block = 0;
461 : : struct block_device *boundary_bdev = NULL;
462 : : int length;
463 : : struct buffer_head map_bh;
464 : : loff_t i_size = i_size_read(inode);
465 : : int ret = 0;
466 : :
467 [ # # ]: 0 : if (page_has_buffers(page)) {
468 [ # # ]: 0 : struct buffer_head *head = page_buffers(page);
469 : : struct buffer_head *bh = head;
470 : :
471 : : /* If they're all mapped and dirty, do it */
472 : : page_block = 0;
473 : : do {
474 [ # # ]: 0 : BUG_ON(buffer_locked(bh));
475 [ # # ]: 0 : if (!buffer_mapped(bh)) {
476 : : /*
477 : : * unmapped dirty buffers are created by
478 : : * __set_page_dirty_buffers -> mmapped data
479 : : */
480 [ # # ]: 0 : if (buffer_dirty(bh))
481 : : goto confused;
482 [ # # ]: 0 : if (first_unmapped == blocks_per_page)
483 : : first_unmapped = page_block;
484 : 0 : continue;
485 : : }
486 : :
487 [ # # ]: 0 : if (first_unmapped != blocks_per_page)
488 : : goto confused; /* hole -> non-hole */
489 : :
490 [ # # ][ # # ]: 0 : if (!buffer_dirty(bh) || !buffer_uptodate(bh))
491 : : goto confused;
492 [ # # ]: 0 : if (page_block) {
493 [ # # ]: 0 : if (bh->b_blocknr != blocks[page_block-1] + 1)
494 : : goto confused;
495 : : }
496 : 0 : blocks[page_block++] = bh->b_blocknr;
497 : : boundary = buffer_boundary(bh);
498 [ # # ]: 0 : if (boundary) {
499 : : boundary_block = bh->b_blocknr;
500 : 0 : boundary_bdev = bh->b_bdev;
501 : : }
502 : 0 : bdev = bh->b_bdev;
503 [ # # ]: 0 : } while ((bh = bh->b_this_page) != head);
504 : :
505 [ # # ]: 0 : if (first_unmapped)
506 : : goto page_is_mapped;
507 : :
508 : : /*
509 : : * Page has buffers, but they are all unmapped. The page was
510 : : * created by pagein or read over a hole which was handled by
511 : : * block_read_full_page(). If this address_space is also
512 : : * using mpage_readpages then this can rarely happen.
513 : : */
514 : : goto confused;
515 : : }
516 : :
517 : : /*
518 : : * The page has no buffers: map it to disk
519 : : */
520 [ # # ]: 0 : BUG_ON(!PageUptodate(page));
521 : 0 : block_in_file = (sector_t)page->index << (PAGE_CACHE_SHIFT - blkbits);
522 : 0 : last_block = (i_size - 1) >> blkbits;
523 : 0 : map_bh.b_page = page;
524 [ # # ]: 0 : for (page_block = 0; page_block < blocks_per_page; ) {
525 : :
526 : 0 : map_bh.b_state = 0;
527 : 0 : map_bh.b_size = 1 << blkbits;
528 [ # # ]: 0 : if (mpd->get_block(inode, block_in_file, &map_bh, 1))
529 : : goto confused;
530 [ # # ]: 0 : if (buffer_new(&map_bh))
531 : 0 : unmap_underlying_metadata(map_bh.b_bdev,
532 : : map_bh.b_blocknr);
533 [ # # ]: 0 : if (buffer_boundary(&map_bh)) {
534 : 0 : boundary_block = map_bh.b_blocknr;
535 : 0 : boundary_bdev = map_bh.b_bdev;
536 : : }
537 [ # # ]: 0 : if (page_block) {
538 [ # # ]: 0 : if (map_bh.b_blocknr != blocks[page_block-1] + 1)
539 : : goto confused;
540 : : }
541 : 0 : blocks[page_block++] = map_bh.b_blocknr;
542 : : boundary = buffer_boundary(&map_bh);
543 : 0 : bdev = map_bh.b_bdev;
544 [ # # ]: 0 : if (block_in_file == last_block)
545 : : break;
546 : 0 : block_in_file++;
547 : : }
548 [ # # ]: 0 : BUG_ON(page_block == 0);
549 : :
550 : : first_unmapped = page_block;
551 : :
552 : : page_is_mapped:
553 : 0 : end_index = i_size >> PAGE_CACHE_SHIFT;
554 [ # # ]: 0 : if (page->index >= end_index) {
555 : : /*
556 : : * The page straddles i_size. It must be zeroed out on each
557 : : * and every writepage invocation because it may be mmapped.
558 : : * "A file is mapped in multiples of the page size. For a file
559 : : * that is not a multiple of the page size, the remaining memory
560 : : * is zeroed when mapped, and writes to that region are not
561 : : * written out to the file."
562 : : */
563 : 0 : unsigned offset = i_size & (PAGE_CACHE_SIZE - 1);
564 : :
565 [ # # ][ # # ]: 0 : if (page->index > end_index || !offset)
566 : : goto confused;
567 : : zero_user_segment(page, offset, PAGE_CACHE_SIZE);
568 : : }
569 : :
570 : : /*
571 : : * This page will go to BIO. Do we need to send this BIO off first?
572 : : */
573 [ # # ][ # # ]: 0 : if (bio && mpd->last_block_in_bio != blocks[0] - 1)
574 : : bio = mpage_bio_submit(WRITE, bio);
575 : :
576 : : alloc_new:
577 [ # # ]: 0 : if (bio == NULL) {
578 : 0 : bio = mpage_alloc(bdev, blocks[0] << (blkbits - 9),
579 : : bio_get_nr_vecs(bdev), GFP_NOFS|__GFP_HIGH);
580 [ # # ]: 0 : if (bio == NULL)
581 : : goto confused;
582 : : }
583 : :
584 : : /*
585 : : * Must try to add the page before marking the buffer clean or
586 : : * the confused fail path above (OOM) will be very confused when
587 : : * it finds all bh marked clean (i.e. it will not write anything)
588 : : */
589 : 0 : length = first_unmapped << blkbits;
590 [ # # ]: 0 : if (bio_add_page(bio, page, length, 0) < length) {
591 : : bio = mpage_bio_submit(WRITE, bio);
592 : 0 : goto alloc_new;
593 : : }
594 : :
595 : : /*
596 : : * OK, we have our BIO, so we can now mark the buffers clean. Make
597 : : * sure to only clean buffers which we know we'll be writing.
598 : : */
599 [ # # ]: 0 : if (page_has_buffers(page)) {
600 [ # # ]: 0 : struct buffer_head *head = page_buffers(page);
601 : : struct buffer_head *bh = head;
602 : : unsigned buffer_counter = 0;
603 : :
604 : : do {
605 [ # # ]: 0 : if (buffer_counter++ == first_unmapped)
606 : : break;
607 : : clear_buffer_dirty(bh);
608 : 0 : bh = bh->b_this_page;
609 [ # # ]: 0 : } while (bh != head);
610 : :
611 : : /*
612 : : * we cannot drop the bh if the page is not uptodate
613 : : * or a concurrent readpage would fail to serialize with the bh
614 : : * and it would read from disk before we reach the platter.
615 : : */
616 [ # # ][ # # ]: 0 : if (buffer_heads_over_limit && PageUptodate(page))
617 : 0 : try_to_free_buffers(page);
618 : : }
619 : :
620 [ # # ]: 0 : BUG_ON(PageWriteback(page));
621 : : set_page_writeback(page);
622 : 0 : unlock_page(page);
623 [ # # ]: 0 : if (boundary || (first_unmapped != blocks_per_page)) {
624 : : bio = mpage_bio_submit(WRITE, bio);
625 [ # # ]: 0 : if (boundary_block) {
626 : 0 : write_boundary_block(boundary_bdev,
627 : 0 : boundary_block, 1 << blkbits);
628 : : }
629 : : } else {
630 : 0 : mpd->last_block_in_bio = blocks[blocks_per_page - 1];
631 : : }
632 : : goto out;
633 : :
634 : : confused:
635 [ # # ]: 0 : if (bio)
636 : : bio = mpage_bio_submit(WRITE, bio);
637 : :
638 [ # # ]: 0 : if (mpd->use_writepage) {
639 : 0 : ret = mapping->a_ops->writepage(page, wbc);
640 : : } else {
641 : : ret = -EAGAIN;
642 : : goto out;
643 : : }
644 : : /*
645 : : * The caller has a ref on the inode, so *mapping is stable
646 : : */
647 : : mapping_set_error(mapping, ret);
648 : : out:
649 : 0 : mpd->bio = bio;
650 : 0 : return ret;
651 : : }
652 : :
653 : : /**
654 : : * mpage_writepages - walk the list of dirty pages of the given address space & writepage() all of them
655 : : * @mapping: address space structure to write
656 : : * @wbc: subtract the number of written pages from *@wbc->nr_to_write
657 : : * @get_block: the filesystem's block mapper function.
658 : : * If this is NULL then use a_ops->writepage. Otherwise, go
659 : : * direct-to-BIO.
660 : : *
661 : : * This is a library function, which implements the writepages()
662 : : * address_space_operation.
663 : : *
664 : : * If a page is already under I/O, generic_writepages() skips it, even
665 : : * if it's dirty. This is desirable behaviour for memory-cleaning writeback,
666 : : * but it is INCORRECT for data-integrity system calls such as fsync(). fsync()
667 : : * and msync() need to guarantee that all the data which was dirty at the time
668 : : * the call was made get new I/O started against them. If wbc->sync_mode is
669 : : * WB_SYNC_ALL then we were called for data integrity and we must wait for
670 : : * existing IO to complete.
671 : : */
672 : : int
673 : 0 : mpage_writepages(struct address_space *mapping,
674 : : struct writeback_control *wbc, get_block_t get_block)
675 : : {
676 : : struct blk_plug plug;
677 : : int ret;
678 : :
679 : 0 : blk_start_plug(&plug);
680 : :
681 [ # # ]: 0 : if (!get_block)
682 : 0 : ret = generic_writepages(mapping, wbc);
683 : : else {
684 : 0 : struct mpage_data mpd = {
685 : : .bio = NULL,
686 : : .last_block_in_bio = 0,
687 : : .get_block = get_block,
688 : : .use_writepage = 1,
689 : : };
690 : :
691 : 0 : ret = write_cache_pages(mapping, wbc, __mpage_writepage, &mpd);
692 [ # # ]: 0 : if (mpd.bio)
693 : : mpage_bio_submit(WRITE, mpd.bio);
694 : : }
695 : 0 : blk_finish_plug(&plug);
696 : 0 : return ret;
697 : : }
698 : : EXPORT_SYMBOL(mpage_writepages);
699 : :
700 : 0 : int mpage_writepage(struct page *page, get_block_t get_block,
701 : : struct writeback_control *wbc)
702 : : {
703 : 0 : struct mpage_data mpd = {
704 : : .bio = NULL,
705 : : .last_block_in_bio = 0,
706 : : .get_block = get_block,
707 : : .use_writepage = 0,
708 : : };
709 : 0 : int ret = __mpage_writepage(page, wbc, &mpd);
710 [ # # ]: 0 : if (mpd.bio)
711 : : mpage_bio_submit(WRITE, mpd.bio);
712 : 0 : return ret;
713 : : }
714 : : EXPORT_SYMBOL(mpage_writepage);
|