Branch data Line data Source code
1 : : /*
2 : : * linux/fs/ext4/inode.c
3 : : *
4 : : * Copyright (C) 1992, 1993, 1994, 1995
5 : : * Remy Card (card@masi.ibp.fr)
6 : : * Laboratoire MASI - Institut Blaise Pascal
7 : : * Universite Pierre et Marie Curie (Paris VI)
8 : : *
9 : : * from
10 : : *
11 : : * linux/fs/minix/inode.c
12 : : *
13 : : * Copyright (C) 1991, 1992 Linus Torvalds
14 : : *
15 : : * 64-bit file support on 64-bit platforms by Jakub Jelinek
16 : : * (jj@sunsite.ms.mff.cuni.cz)
17 : : *
18 : : * Assorted race fixes, rewrite of ext4_get_block() by Al Viro, 2000
19 : : */
20 : :
21 : : #include <linux/fs.h>
22 : : #include <linux/time.h>
23 : : #include <linux/jbd2.h>
24 : : #include <linux/highuid.h>
25 : : #include <linux/pagemap.h>
26 : : #include <linux/quotaops.h>
27 : : #include <linux/string.h>
28 : : #include <linux/buffer_head.h>
29 : : #include <linux/writeback.h>
30 : : #include <linux/pagevec.h>
31 : : #include <linux/mpage.h>
32 : : #include <linux/namei.h>
33 : : #include <linux/uio.h>
34 : : #include <linux/bio.h>
35 : : #include <linux/workqueue.h>
36 : : #include <linux/kernel.h>
37 : : #include <linux/printk.h>
38 : : #include <linux/slab.h>
39 : : #include <linux/ratelimit.h>
40 : : #include <linux/aio.h>
41 : : #include <linux/bitops.h>
42 : :
43 : : #include "ext4_jbd2.h"
44 : : #include "xattr.h"
45 : : #include "acl.h"
46 : : #include "truncate.h"
47 : :
48 : : #include <trace/events/ext4.h>
49 : :
50 : : #define MPAGE_DA_EXTENT_TAIL 0x01
51 : :
52 : 0 : static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
53 : : struct ext4_inode_info *ei)
54 : : {
55 : 0 : struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
56 : : __u16 csum_lo;
57 : : __u16 csum_hi = 0;
58 : : __u32 csum;
59 : :
60 : 0 : csum_lo = le16_to_cpu(raw->i_checksum_lo);
61 : 0 : raw->i_checksum_lo = 0;
62 [ # # ][ # # ]: 0 : if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
63 : 0 : EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
64 : 0 : csum_hi = le16_to_cpu(raw->i_checksum_hi);
65 : 0 : raw->i_checksum_hi = 0;
66 : : }
67 : :
68 : 0 : csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
69 : 0 : EXT4_INODE_SIZE(inode->i_sb));
70 : :
71 : 0 : raw->i_checksum_lo = cpu_to_le16(csum_lo);
72 [ # # ][ # # ]: 0 : if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
73 : 0 : EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
74 : 0 : raw->i_checksum_hi = cpu_to_le16(csum_hi);
75 : :
76 : 0 : return csum;
77 : : }
78 : :
79 : 0 : static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
80 : : struct ext4_inode_info *ei)
81 : : {
82 : : __u32 provided, calculated;
83 : :
84 [ + - ]: 8710 : if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
85 [ - + ]: 8710 : cpu_to_le32(EXT4_OS_LINUX) ||
86 : 8710 : !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
87 : : EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
88 : : return 1;
89 : :
90 : 0 : provided = le16_to_cpu(raw->i_checksum_lo);
91 : 0 : calculated = ext4_inode_csum(inode, raw, ei);
92 [ # # ][ # # ]: 8710 : if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
93 : 0 : EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
94 : 0 : provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
95 : : else
96 : 0 : calculated &= 0xFFFF;
97 : :
98 : 0 : return provided == calculated;
99 : : }
100 : :
101 : 0 : static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
102 : : struct ext4_inode_info *ei)
103 : : {
104 : : __u32 csum;
105 : :
106 [ + ]: 8413792 : if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
107 [ - + ]: 8416108 : cpu_to_le32(EXT4_OS_LINUX) ||
108 : 8416108 : !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
109 : : EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
110 : 0 : return;
111 : :
112 : 0 : csum = ext4_inode_csum(inode, raw, ei);
113 : 0 : raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
114 [ # # ][ # # ]: 0 : if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
115 : 0 : EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
116 : 0 : raw->i_checksum_hi = cpu_to_le16(csum >> 16);
117 : : }
118 : :
119 : : static inline int ext4_begin_ordered_truncate(struct inode *inode,
120 : : loff_t new_size)
121 : : {
122 : : trace_ext4_begin_ordered_truncate(inode, new_size);
123 : : /*
124 : : * If jinode is zero, then we never opened the file for
125 : : * writing, so there's no need to call
126 : : * jbd2_journal_begin_ordered_truncate() since there's no
127 : : * outstanding writes we need to flush.
128 : : */
129 [ + - ][ + + ]: 220269 : if (!EXT4_I(inode)->jinode)
130 : : return 0;
131 : 220147 : return jbd2_journal_begin_ordered_truncate(EXT4_JOURNAL(inode),
132 : : EXT4_I(inode)->jinode,
133 : : new_size);
134 : : }
135 : :
136 : : static void ext4_invalidatepage(struct page *page, unsigned int offset,
137 : : unsigned int length);
138 : : static int __ext4_journalled_writepage(struct page *page, unsigned int len);
139 : : static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh);
140 : : static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
141 : : int pextents);
142 : :
143 : : /*
144 : : * Test whether an inode is a fast symlink.
145 : : */
146 : 0 : static int ext4_inode_is_fast_symlink(struct inode *inode)
147 : : {
148 [ - + ]: 3188 : int ea_blocks = EXT4_I(inode)->i_file_acl ?
149 : 0 : EXT4_CLUSTER_SIZE(inode->i_sb) >> 9 : 0;
150 : :
151 [ + ][ + ]: 3188 : return (S_ISLNK(inode->i_mode) && inode->i_blocks - ea_blocks == 0);
152 : : }
153 : :
154 : : /*
155 : : * Restart the transaction associated with *handle. This does a commit,
156 : : * so before we call here everything must be consistently dirtied against
157 : : * this transaction.
158 : : */
159 : 0 : int ext4_truncate_restart_trans(handle_t *handle, struct inode *inode,
160 : : int nblocks)
161 : : {
162 : : int ret;
163 : :
164 : : /*
165 : : * Drop i_data_sem to avoid deadlock with ext4_map_blocks. At this
166 : : * moment, get_block can be called only for blocks inside i_size since
167 : : * page cache has been already dropped and writes are blocked by
168 : : * i_mutex. So we can safely drop the i_data_sem here.
169 : : */
170 [ - + ]: 9 : BUG_ON(EXT4_JOURNAL(inode) == NULL);
171 : : jbd_debug(2, "restarting handle %p\n", handle);
172 : 9 : up_write(&EXT4_I(inode)->i_data_sem);
173 : : ret = ext4_journal_restart(handle, nblocks);
174 : 9 : down_write(&EXT4_I(inode)->i_data_sem);
175 : 9 : ext4_discard_preallocations(inode);
176 : :
177 : 9 : return ret;
178 : : }
179 : :
180 : : /*
181 : : * Called at the last iput() if i_nlink is zero.
182 : : */
183 : 0 : void ext4_evict_inode(struct inode *inode)
184 : : {
185 : : handle_t *handle;
186 : : int err;
187 : :
188 : : trace_ext4_evict_inode(inode);
189 : :
190 [ + + ]: 275039 : if (inode->i_nlink) {
191 : : /*
192 : : * When journalling data dirty buffers are tracked only in the
193 : : * journal. So although mm thinks everything is clean and
194 : : * ready for reaping the inode might still have some pages to
195 : : * write in the running transaction or waiting to be
196 : : * checkpointed. Thus calling jbd2_journal_invalidatepage()
197 : : * (via truncate_inode_pages()) to discard these buffers can
198 : : * cause data loss. Also even if we did not discard these
199 : : * buffers, we would have no way to find them after the inode
200 : : * is reaped and thus user could see stale data if he tries to
201 : : * read them before the transaction is checkpointed. So be
202 : : * careful and force everything to disk here... We use
203 : : * ei->i_datasync_tid to store the newest transaction
204 : : * containing inode's data.
205 : : *
206 : : * Note that directories do not have this problem because they
207 : : * don't use page cache.
208 : : */
209 [ + + ][ + + ]: 3945 : if (ext4_should_journal_data(inode) &&
210 [ + - ]: 102 : (S_ISLNK(inode->i_mode) || S_ISREG(inode->i_mode)) &&
211 : 102 : inode->i_ino != EXT4_JOURNAL_INO) {
212 : : journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
213 : 102 : tid_t commit_tid = EXT4_I(inode)->i_datasync_tid;
214 : :
215 : 102 : jbd2_complete_transaction(journal, commit_tid);
216 : 102 : filemap_write_and_wait(&inode->i_data);
217 : : }
218 : 3945 : truncate_inode_pages(&inode->i_data, 0);
219 : :
220 [ - + ]: 3945 : WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count));
221 : : goto no_delete;
222 : : }
223 : :
224 [ + + ]: 271094 : if (!is_bad_inode(inode))
225 : 271093 : dquot_initialize(inode);
226 : :
227 [ + + ]: 271095 : if (ext4_should_order_data(inode))
228 : : ext4_begin_ordered_truncate(inode, 0);
229 : 271095 : truncate_inode_pages(&inode->i_data, 0);
230 : :
231 [ - + ]: 271090 : WARN_ON(atomic_read(&EXT4_I(inode)->i_ioend_count));
232 [ + + ]: 271090 : if (is_bad_inode(inode))
233 : : goto no_delete;
234 : :
235 : : /*
236 : : * Protect us against freezing - iput() caller didn't have to have any
237 : : * protection against it
238 : : */
239 : 271089 : sb_start_intwrite(inode->i_sb);
240 : 0 : handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE,
241 : : ext4_blocks_for_truncate(inode)+3);
242 [ - + ]: 271087 : if (IS_ERR(handle)) {
243 [ # # ]: 0 : ext4_std_error(inode->i_sb, PTR_ERR(handle));
244 : : /*
245 : : * If we're going to skip the normal cleanup, we still need to
246 : : * make sure that the in-core orphan linked list is properly
247 : : * cleaned up.
248 : : */
249 : 0 : ext4_orphan_del(NULL, inode);
250 : 0 : sb_end_intwrite(inode->i_sb);
251 : : goto no_delete;
252 : : }
253 : :
254 [ + ][ + + ]: 271087 : if (IS_SYNC(inode))
255 : : ext4_handle_sync(handle);
256 : 271087 : inode->i_size = 0;
257 : 271087 : err = ext4_mark_inode_dirty(handle, inode);
258 [ - + ]: 271048 : if (err) {
259 : 0 : ext4_warning(inode->i_sb,
260 : : "couldn't mark inode dirty (err %d)", err);
261 : 0 : goto stop_handle;
262 : : }
263 [ + + ]: 271048 : if (inode->i_blocks)
264 : 40141 : ext4_truncate(inode);
265 : :
266 : : /*
267 : : * ext4_ext_truncate() doesn't reserve any slop when it
268 : : * restarts journal transactions; therefore there may not be
269 : : * enough credits left in the handle to remove the inode from
270 : : * the orphan list and set the dtime field.
271 : : */
272 [ - + ]: 271075 : if (!ext4_handle_has_enough_credits(handle, 3)) {
273 : : err = ext4_journal_extend(handle, 3);
274 [ # # ]: 0 : if (err > 0)
275 : : err = ext4_journal_restart(handle, 3);
276 [ # # ]: 0 : if (err != 0) {
277 : 0 : ext4_warning(inode->i_sb,
278 : : "couldn't extend journal (err %d)", err);
279 : : stop_handle:
280 : 0 : ext4_journal_stop(handle);
281 : 0 : ext4_orphan_del(NULL, inode);
282 : 0 : sb_end_intwrite(inode->i_sb);
283 : : goto no_delete;
284 : : }
285 : : }
286 : :
287 : : /*
288 : : * Kill off the orphan record which ext4_truncate created.
289 : : * AKPM: I think this can be inside the above `if'.
290 : : * Note that ext4_orphan_del() has to be able to cope with the
291 : : * deletion of a non-existent orphan - this is because we don't
292 : : * know if ext4_truncate() actually created an orphan record.
293 : : * (Well, we could do this if we need to, but heck - it works)
294 : : */
295 : 271075 : ext4_orphan_del(handle, inode);
296 : 271095 : EXT4_I(inode)->i_dtime = get_seconds();
297 : :
298 : : /*
299 : : * One subtle ordering requirement: if anything has gone wrong
300 : : * (transaction abort, IO errors, whatever), then we can still
301 : : * do these next steps (the fs will already have been marked as
302 : : * having errors), but we can't free the inode if the mark_dirty
303 : : * fails.
304 : : */
305 [ - + ]: 271095 : if (ext4_mark_inode_dirty(handle, inode))
306 : : /* If that failed, just do the required in-core inode clear. */
307 : 0 : ext4_clear_inode(inode);
308 : : else
309 : 271095 : ext4_free_inode(handle, inode);
310 : 271091 : ext4_journal_stop(handle);
311 : 271092 : sb_end_intwrite(inode->i_sb);
312 : 275040 : return;
313 : : no_delete:
314 : 3947 : ext4_clear_inode(inode); /* We must guarantee clearing of inode... */
315 : : }
316 : :
317 : : #ifdef CONFIG_QUOTA
318 : 0 : qsize_t *ext4_get_reserved_space(struct inode *inode)
319 : : {
320 : 2993882 : return &EXT4_I(inode)->i_reserved_quota;
321 : : }
322 : : #endif
323 : :
324 : : /*
325 : : * Calculate the number of metadata blocks need to reserve
326 : : * to allocate a block located at @lblock
327 : : */
328 : 0 : static int ext4_calc_metadata_amount(struct inode *inode, ext4_lblk_t lblock)
329 : : {
330 [ + - ]: 1761914 : if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
331 : 1761914 : return ext4_ext_calc_metadata_amount(inode, lblock);
332 : :
333 : 0 : return ext4_ind_calc_metadata_amount(inode, lblock);
334 : : }
335 : :
336 : : /*
337 : : * Called with i_data_sem down, which is important since we can call
338 : : * ext4_discard_preallocations() from here.
339 : : */
340 : 0 : void ext4_da_update_reserve_space(struct inode *inode,
341 : : int used, int quota_claim)
342 : : {
343 : 86043 : struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
344 : : struct ext4_inode_info *ei = EXT4_I(inode);
345 : :
346 : : spin_lock(&ei->i_block_reservation_lock);
347 : : trace_ext4_da_update_reserve_space(inode, used, quota_claim);
348 [ - + ]: 86030 : if (unlikely(used > ei->i_reserved_data_blocks)) {
349 : 0 : ext4_warning(inode->i_sb, "%s: ino %lu, used %d "
350 : : "with only %d reserved data blocks",
351 : : __func__, inode->i_ino, used,
352 : : ei->i_reserved_data_blocks);
353 : 0 : WARN_ON(1);
354 : 0 : used = ei->i_reserved_data_blocks;
355 : : }
356 : :
357 [ - + ]: 86030 : if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
358 : 0 : ext4_warning(inode->i_sb, "ino %lu, allocated %d "
359 : : "with only %d reserved metadata blocks "
360 : : "(releasing %d blocks with reserved %d data blocks)",
361 : : inode->i_ino, ei->i_allocated_meta_blocks,
362 : : ei->i_reserved_meta_blocks, used,
363 : : ei->i_reserved_data_blocks);
364 : 0 : WARN_ON(1);
365 : 0 : ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
366 : : }
367 : :
368 : : /* Update per-inode reservations */
369 : 86030 : ei->i_reserved_data_blocks -= used;
370 : 86030 : ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
371 : 172060 : percpu_counter_sub(&sbi->s_dirtyclusters_counter,
372 : 86030 : used + ei->i_allocated_meta_blocks);
373 : 86049 : ei->i_allocated_meta_blocks = 0;
374 : :
375 [ + + ]: 86049 : if (ei->i_reserved_data_blocks == 0) {
376 : : /*
377 : : * We can release all of the reserved metadata blocks
378 : : * only when we have written all of the delayed
379 : : * allocation blocks.
380 : : */
381 : 12450 : percpu_counter_sub(&sbi->s_dirtyclusters_counter,
382 : 12450 : ei->i_reserved_meta_blocks);
383 : 12450 : ei->i_reserved_meta_blocks = 0;
384 : 12450 : ei->i_da_metadata_calc_len = 0;
385 : : }
386 : : spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
387 : :
388 : : /* Update quota subsystem for data blocks */
389 [ + - ]: 86050 : if (quota_claim)
390 : 86050 : dquot_claim_block(inode, EXT4_C2B(sbi, used));
391 : : else {
392 : : /*
393 : : * We did fallocate with an offset that is already delayed
394 : : * allocated. So on delayed allocated writeback we should
395 : : * not re-claim the quota for fallocated blocks.
396 : : */
397 : 0 : dquot_release_reservation_block(inode, EXT4_C2B(sbi, used));
398 : : }
399 : :
400 : : /*
401 : : * If we have done all the pending block allocations and if
402 : : * there aren't any writers on the inode, we can discard the
403 : : * inode's preallocations.
404 : : */
405 [ + + ][ + + ]: 86027 : if ((ei->i_reserved_data_blocks == 0) &&
406 : 12450 : (atomic_read(&inode->i_writecount) == 0))
407 : 1989 : ext4_discard_preallocations(inode);
408 : 86027 : }
409 : :
410 : 0 : static int __check_block_validity(struct inode *inode, const char *func,
411 : : unsigned int line,
412 : : struct ext4_map_blocks *map)
413 : : {
414 [ - + ]: 1631852 : if (!ext4_data_block_valid(EXT4_SB(inode->i_sb), map->m_pblk,
415 : : map->m_len)) {
416 : 0 : ext4_error_inode(inode, func, line, map->m_pblk,
417 : : "lblock %lu mapped to illegal pblock "
418 : : "(length %d)", (unsigned long) map->m_lblk,
419 : : map->m_len);
420 : 0 : return -EIO;
421 : : }
422 : : return 0;
423 : : }
424 : :
425 : : #define check_block_validity(inode, map) \
426 : : __check_block_validity((inode), __func__, __LINE__, (map))
427 : :
428 : : #ifdef ES_AGGRESSIVE_TEST
429 : : static void ext4_map_blocks_es_recheck(handle_t *handle,
430 : : struct inode *inode,
431 : : struct ext4_map_blocks *es_map,
432 : : struct ext4_map_blocks *map,
433 : : int flags)
434 : : {
435 : : int retval;
436 : :
437 : : map->m_flags = 0;
438 : : /*
439 : : * There is a race window that the result is not the same.
440 : : * e.g. xfstests #223 when dioread_nolock enables. The reason
441 : : * is that we lookup a block mapping in extent status tree with
442 : : * out taking i_data_sem. So at the time the unwritten extent
443 : : * could be converted.
444 : : */
445 : : if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
446 : : down_read((&EXT4_I(inode)->i_data_sem));
447 : : if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
448 : : retval = ext4_ext_map_blocks(handle, inode, map, flags &
449 : : EXT4_GET_BLOCKS_KEEP_SIZE);
450 : : } else {
451 : : retval = ext4_ind_map_blocks(handle, inode, map, flags &
452 : : EXT4_GET_BLOCKS_KEEP_SIZE);
453 : : }
454 : : if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
455 : : up_read((&EXT4_I(inode)->i_data_sem));
456 : : /*
457 : : * Clear EXT4_MAP_FROM_CLUSTER and EXT4_MAP_BOUNDARY flag
458 : : * because it shouldn't be marked in es_map->m_flags.
459 : : */
460 : : map->m_flags &= ~(EXT4_MAP_FROM_CLUSTER | EXT4_MAP_BOUNDARY);
461 : :
462 : : /*
463 : : * We don't check m_len because extent will be collpased in status
464 : : * tree. So the m_len might not equal.
465 : : */
466 : : if (es_map->m_lblk != map->m_lblk ||
467 : : es_map->m_flags != map->m_flags ||
468 : : es_map->m_pblk != map->m_pblk) {
469 : : printk("ES cache assertion failed for inode: %lu "
470 : : "es_cached ex [%d/%d/%llu/%x] != "
471 : : "found ex [%d/%d/%llu/%x] retval %d flags %x\n",
472 : : inode->i_ino, es_map->m_lblk, es_map->m_len,
473 : : es_map->m_pblk, es_map->m_flags, map->m_lblk,
474 : : map->m_len, map->m_pblk, map->m_flags,
475 : : retval, flags);
476 : : }
477 : : }
478 : : #endif /* ES_AGGRESSIVE_TEST */
479 : :
480 : : /*
481 : : * The ext4_map_blocks() function tries to look up the requested blocks,
482 : : * and returns if the blocks are already mapped.
483 : : *
484 : : * Otherwise it takes the write lock of the i_data_sem and allocate blocks
485 : : * and store the allocated blocks in the result buffer head and mark it
486 : : * mapped.
487 : : *
488 : : * If file type is extents based, it will call ext4_ext_map_blocks(),
489 : : * Otherwise, call with ext4_ind_map_blocks() to handle indirect mapping
490 : : * based files
491 : : *
492 : : * On success, it returns the number of blocks being mapped or allocate.
493 : : * if create==0 and the blocks are pre-allocated and uninitialized block,
494 : : * the result buffer head is unmapped. If the create ==1, it will make sure
495 : : * the buffer head is mapped.
496 : : *
497 : : * It returns 0 if plain look up failed (blocks have not been allocated), in
498 : : * that case, buffer head is unmapped
499 : : *
500 : : * It returns the error in case of allocation failure.
501 : : */
502 : 0 : int ext4_map_blocks(handle_t *handle, struct inode *inode,
503 : : struct ext4_map_blocks *map, int flags)
504 : : {
505 : : struct extent_status es;
506 : : int retval;
507 : : #ifdef ES_AGGRESSIVE_TEST
508 : : struct ext4_map_blocks orig_map;
509 : :
510 : : memcpy(&orig_map, map, sizeof(*map));
511 : : #endif
512 : :
513 : 1816113 : map->m_flags = 0;
514 : : ext_debug("ext4_map_blocks(): inode %lu, flag %d, max_blocks %u,"
515 : : "logical block %lu\n", inode->i_ino, flags, map->m_len,
516 : : (unsigned long) map->m_lblk);
517 : :
518 : : /* Lookup extent status tree firstly */
519 [ + + ]: 1816113 : if (ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
520 : 1620051 : ext4_es_lru_add(inode);
521 [ + + ][ + + ]: 1619314 : if (ext4_es_is_written(&es) || ext4_es_is_unwritten(&es)) {
522 : 2775394 : map->m_pblk = ext4_es_pblock(&es) +
523 : 2775394 : map->m_lblk - es.es_lblk;
524 [ + + ]: 1387697 : map->m_flags |= ext4_es_is_written(&es) ?
525 : : EXT4_MAP_MAPPED : EXT4_MAP_UNWRITTEN;
526 : 1387697 : retval = es.es_len - (map->m_lblk - es.es_lblk);
527 [ + + ]: 1387697 : if (retval > map->m_len)
528 : 352016 : retval = map->m_len;
529 : 1387697 : map->m_len = retval;
530 [ + + ][ - + ]: 231617 : } else if (ext4_es_is_delayed(&es) || ext4_es_is_hole(&es)) {
531 : : retval = 0;
532 : : } else {
533 : 0 : BUG_ON(1);
534 : : }
535 : : #ifdef ES_AGGRESSIVE_TEST
536 : : ext4_map_blocks_es_recheck(handle, inode, map,
537 : : &orig_map, flags);
538 : : #endif
539 : : goto found;
540 : : }
541 : :
542 : : /*
543 : : * Try to see if we can get the block without requesting a new
544 : : * file system block.
545 : : */
546 [ + + ]: 196884 : if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
547 : 196798 : down_read((&EXT4_I(inode)->i_data_sem));
548 [ + - ]: 196873 : if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
549 : 196873 : retval = ext4_ext_map_blocks(handle, inode, map, flags &
550 : : EXT4_GET_BLOCKS_KEEP_SIZE);
551 : : } else {
552 : 0 : retval = ext4_ind_map_blocks(handle, inode, map, flags &
553 : : EXT4_GET_BLOCKS_KEEP_SIZE);
554 : : }
555 [ + + ]: 2013802 : if (retval > 0) {
556 : : int ret;
557 : : unsigned int status;
558 : :
559 [ - + ]: 49664 : if (unlikely(retval != map->m_len)) {
560 : 0 : ext4_warning(inode->i_sb,
561 : : "ES len assertion failed for inode "
562 : : "%lu: retval %d != map->m_len %d",
563 : : inode->i_ino, retval, map->m_len);
564 : 0 : WARN_ON(1);
565 : : }
566 : :
567 [ + - ]: 49664 : status = map->m_flags & EXT4_MAP_UNWRITTEN ?
568 : : EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
569 [ + - - + ]: 99328 : if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
570 : 49664 : ext4_find_delalloc_range(inode, map->m_lblk,
571 : 49664 : map->m_lblk + map->m_len - 1))
572 : 0 : status |= EXTENT_STATUS_DELAYED;
573 : 49664 : ret = ext4_es_insert_extent(inode, map->m_lblk,
574 : : map->m_len, map->m_pblk, status);
575 [ - + ]: 49664 : if (ret < 0)
576 : : retval = ret;
577 : : }
578 [ + + ]: 197689 : if (!(flags & EXT4_GET_BLOCKS_NO_LOCK))
579 : 196889 : up_read((&EXT4_I(inode)->i_data_sem));
580 : :
581 : : found:
582 [ + + ][ + ]: 1817015 : if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
583 : 1438038 : int ret = check_block_validity(inode, map);
584 [ + ]: 1437718 : if (ret != 0)
585 : : return ret;
586 : : }
587 : :
588 : : /* If it is only a block(s) look up */
589 [ + + ]: 1816720 : if ((flags & EXT4_GET_BLOCKS_CREATE) == 0)
590 : : return retval;
591 : :
592 : : /*
593 : : * Returns if the blocks have already allocated
594 : : *
595 : : * Note that if blocks have been preallocated
596 : : * ext4_ext_get_block() returns the create = 0
597 : : * with buffer head unmapped.
598 : : */
599 [ + + ][ + + ]: 338850 : if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED)
600 : : return retval;
601 : :
602 : : /*
603 : : * Here we clear m_flags because after allocating an new extent,
604 : : * it will be set again.
605 : : */
606 : 194078 : map->m_flags &= ~EXT4_MAP_FLAGS;
607 : :
608 : : /*
609 : : * New blocks allocate and/or writing to uninitialized extent
610 : : * will possibly result in updating i_data, so we take
611 : : * the write lock of i_data_sem, and call get_blocks()
612 : : * with create == 1 flag.
613 : : */
614 : 194078 : down_write((&EXT4_I(inode)->i_data_sem));
615 : :
616 : : /*
617 : : * if the caller is from delayed allocation writeout path
618 : : * we have already reserved fs blocks for allocation
619 : : * let the underlying get_block() function know to
620 : : * avoid double accounting
621 : : */
622 [ + + ]: 194104 : if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
623 : : ext4_set_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
624 : : /*
625 : : * We need to check for EXT4 here because migrate
626 : : * could have changed the inode type in between
627 : : */
628 [ + - ]: 194069 : if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
629 : 194069 : retval = ext4_ext_map_blocks(handle, inode, map, flags);
630 : : } else {
631 : 0 : retval = ext4_ind_map_blocks(handle, inode, map, flags);
632 : :
633 [ # # ][ # # ]: 0 : if (retval > 0 && map->m_flags & EXT4_MAP_NEW) {
634 : : /*
635 : : * We allocated new blocks which will result in
636 : : * i_data's format changing. Force the migrate
637 : : * to fail by clearing migrate flags
638 : : */
639 : : ext4_clear_inode_state(inode, EXT4_STATE_EXT_MIGRATE);
640 : : }
641 : :
642 : : /*
643 : : * Update reserved blocks/metadata blocks after successful
644 : : * block allocation which had been deferred till now. We don't
645 : : * support fallocate for non extent files. So we can update
646 : : * reserve space here.
647 : : */
648 [ # # ][ # # ]: 0 : if ((retval > 0) &&
649 : : (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE))
650 : 0 : ext4_da_update_reserve_space(inode, retval, 1);
651 : : }
652 [ + + ]: 194060 : if (flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE)
653 : : ext4_clear_inode_state(inode, EXT4_STATE_DELALLOC_RESERVED);
654 : :
655 [ + - ]: 194039 : if (retval > 0) {
656 : : int ret;
657 : : unsigned int status;
658 : :
659 [ - + ]: 194039 : if (unlikely(retval != map->m_len)) {
660 : 0 : ext4_warning(inode->i_sb,
661 : : "ES len assertion failed for inode "
662 : : "%lu: retval %d != map->m_len %d",
663 : : inode->i_ino, retval, map->m_len);
664 : 0 : WARN_ON(1);
665 : : }
666 : :
667 : : /*
668 : : * If the extent has been zeroed out, we don't need to update
669 : : * extent status tree.
670 : : */
671 [ - + # # ]: 194083 : if ((flags & EXT4_GET_BLOCKS_PRE_IO) &&
672 : 0 : ext4_es_lookup_extent(inode, map->m_lblk, &es)) {
673 [ # # ]: 0 : if (ext4_es_is_written(&es))
674 : : goto has_zeroout;
675 : : }
676 [ + + ]: 194083 : status = map->m_flags & EXT4_MAP_UNWRITTEN ?
677 : : EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
678 [ + + + + ]: 302136 : if (!(flags & EXT4_GET_BLOCKS_DELALLOC_RESERVE) &&
679 : 108051 : ext4_find_delalloc_range(inode, map->m_lblk,
680 : 108051 : map->m_lblk + map->m_len - 1))
681 : 5 : status |= EXTENT_STATUS_DELAYED;
682 : 194085 : ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
683 : : map->m_pblk, status);
684 [ - + ]: 194017 : if (ret < 0)
685 : : retval = ret;
686 : : }
687 : :
688 : : has_zeroout:
689 : 194017 : up_write((&EXT4_I(inode)->i_data_sem));
690 [ + ][ + ]: 194069 : if (retval > 0 && map->m_flags & EXT4_MAP_MAPPED) {
691 : 194094 : int ret = check_block_validity(inode, map);
692 [ + ]: 194046 : if (ret != 0)
693 : : return ret;
694 : : }
695 : 194062 : return retval;
696 : : }
697 : :
698 : : /* Maximum number of blocks we map for direct IO at once. */
699 : : #define DIO_MAX_BLOCKS 4096
700 : :
701 : 0 : static int _ext4_get_block(struct inode *inode, sector_t iblock,
702 : : struct buffer_head *bh, int flags)
703 : : {
704 : : handle_t *handle = ext4_journal_current_handle();
705 : : struct ext4_map_blocks map;
706 : : int ret = 0, started = 0;
707 : : int dio_credits;
708 : :
709 [ + + ]: 708679 : if (ext4_has_inline_data(inode))
710 : : return -ERANGE;
711 : :
712 : 708743 : map.m_lblk = iblock;
713 : 708743 : map.m_len = bh->b_size >> inode->i_blkbits;
714 : :
715 [ + + ][ + - ]: 708743 : if (flags && !(flags & EXT4_GET_BLOCKS_NO_LOCK) && !handle) {
[ + + ]
716 : : /* Direct IO write... */
717 [ - + ]: 214625 : if (map.m_len > DIO_MAX_BLOCKS)
718 : 0 : map.m_len = DIO_MAX_BLOCKS;
719 : 214625 : dio_credits = ext4_chunk_trans_blocks(inode, map.m_len);
720 : : handle = ext4_journal_start(inode, EXT4_HT_MAP_BLOCKS,
721 : : dio_credits);
722 [ - + ]: 214625 : if (IS_ERR(handle)) {
723 : : ret = PTR_ERR(handle);
724 : 0 : return ret;
725 : : }
726 : : started = 1;
727 : : }
728 : :
729 : 708743 : ret = ext4_map_blocks(handle, inode, &map, flags);
730 [ + + ]: 708853 : if (ret > 0) {
731 : : ext4_io_end_t *io_end = ext4_inode_aio(inode);
732 : :
733 : 524073 : map_bh(bh, inode->i_sb, map.m_pblk);
734 : 524073 : bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
735 [ - + ][ # # ]: 524073 : if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN)
736 : : set_buffer_defer_completion(bh);
737 : 524072 : bh->b_size = inode->i_sb->s_blocksize * map.m_len;
738 : : ret = 0;
739 : : }
740 [ + ]: 708852 : if (started)
741 : 214625 : ext4_journal_stop(handle);
742 : 708823 : return ret;
743 : : }
744 : :
745 : 0 : int ext4_get_block(struct inode *inode, sector_t iblock,
746 : : struct buffer_head *bh, int create)
747 : : {
748 : 563883 : return _ext4_get_block(inode, iblock, bh,
749 : : create ? EXT4_GET_BLOCKS_CREATE : 0);
750 : : }
751 : :
752 : : /*
753 : : * `handle' can be NULL if create is zero
754 : : */
755 : 0 : struct buffer_head *ext4_getblk(handle_t *handle, struct inode *inode,
756 : : ext4_lblk_t block, int create, int *errp)
757 : : {
758 : : struct ext4_map_blocks map;
759 : : struct buffer_head *bh;
760 : : int fatal = 0, err;
761 : :
762 [ - + ]: 1021474 : J_ASSERT(handle != NULL || create == 0);
763 : :
764 : 1021474 : map.m_lblk = block;
765 : 1021474 : map.m_len = 1;
766 : 1021474 : err = ext4_map_blocks(handle, inode, &map,
767 : : create ? EXT4_GET_BLOCKS_CREATE : 0);
768 : :
769 : : /* ensure we send some value back into *errp */
770 : 1021803 : *errp = 0;
771 : :
772 [ - + ]: 1021803 : if (create && err == 0)
773 : : err = -ENOSPC; /* should never happen */
774 [ - + ]: 1021803 : if (err < 0)
775 : 0 : *errp = err;
776 [ + ]: 1021803 : if (err <= 0)
777 : : return NULL;
778 : :
779 : 1021982 : bh = sb_getblk(inode->i_sb, map.m_pblk);
780 [ - + ]: 1021889 : if (unlikely(!bh)) {
781 : 0 : *errp = -ENOMEM;
782 : 0 : return NULL;
783 : : }
784 [ + + ]: 1021889 : if (map.m_flags & EXT4_MAP_NEW) {
785 [ - + ]: 35213 : J_ASSERT(create != 0);
786 [ - + ]: 35213 : J_ASSERT(handle != NULL);
787 : :
788 : : /*
789 : : * Now that we do not always journal data, we should
790 : : * keep in mind whether this should always journal the
791 : : * new buffer as metadata. For now, regular file
792 : : * writes use ext4_get_block instead, so it's not a
793 : : * problem.
794 : : */
795 : : lock_buffer(bh);
796 : : BUFFER_TRACE(bh, "call get_create_access");
797 : 35167 : fatal = ext4_journal_get_create_access(handle, bh);
798 [ + - ][ + + ]: 35236 : if (!fatal && !buffer_uptodate(bh)) {
799 [ + ]: 20188 : memset(bh->b_data, 0, inode->i_sb->s_blocksize);
800 : : set_buffer_uptodate(bh);
801 : : }
802 : 35235 : unlock_buffer(bh);
803 : : BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
804 : 35219 : err = ext4_handle_dirty_metadata(handle, inode, bh);
805 [ + - ]: 35241 : if (!fatal)
806 : : fatal = err;
807 : : } else {
808 : : BUFFER_TRACE(bh, "not a new buffer");
809 : : }
810 [ - + ]: 1021917 : if (fatal) {
811 : 0 : *errp = fatal;
812 : : brelse(bh);
813 : : bh = NULL;
814 : : }
815 : 1021836 : return bh;
816 : : }
817 : :
818 : 0 : struct buffer_head *ext4_bread(handle_t *handle, struct inode *inode,
819 : : ext4_lblk_t block, int create, int *err)
820 : : {
821 : : struct buffer_head *bh;
822 : :
823 : 402807 : bh = ext4_getblk(handle, inode, block, create, err);
824 [ + + ]: 403039 : if (!bh)
825 : : return bh;
826 [ + + ]: 403033 : if (buffer_uptodate(bh))
827 : : return bh;
828 : 290 : ll_rw_block(READ | REQ_META | REQ_PRIO, 1, &bh);
829 : 290 : wait_on_buffer(bh);
830 [ - + ]: 290 : if (buffer_uptodate(bh))
831 : : return bh;
832 : : put_bh(bh);
833 : 0 : *err = -EIO;
834 : 0 : return NULL;
835 : : }
836 : :
837 : 0 : int ext4_walk_page_buffers(handle_t *handle,
838 : : struct buffer_head *head,
839 : : unsigned from,
840 : : unsigned to,
841 : : int *partial,
842 : : int (*fn)(handle_t *handle,
843 : : struct buffer_head *bh))
844 : : {
845 : : struct buffer_head *bh;
846 : : unsigned block_start, block_end;
847 : 50518 : unsigned blocksize = head->b_size;
848 : : int err, ret = 0;
849 : : struct buffer_head *next;
850 : :
851 [ + + ]: 101036 : for (bh = head, block_start = 0;
852 [ + + ]: 57967 : ret == 0 && (bh != head || !block_start);
853 : : block_start = block_end, bh = next) {
854 : 50518 : next = bh->b_this_page;
855 : 50518 : block_end = block_start + blocksize;
856 [ - + ]: 50518 : if (block_end <= from || block_start >= to) {
857 [ # # ][ # # ]: 0 : if (partial && !buffer_uptodate(bh))
858 : 0 : *partial = 1;
859 : 0 : continue;
860 : : }
861 : 50518 : err = (*fn)(handle, bh);
862 [ + - ]: 50518 : if (!ret)
863 : : ret = err;
864 : : }
865 : 50518 : return ret;
866 : : }
867 : :
868 : : /*
869 : : * To preserve ordering, it is essential that the hole instantiation and
870 : : * the data write be encapsulated in a single transaction. We cannot
871 : : * close off a transaction and start a new one between the ext4_get_block()
872 : : * and the commit_write(). So doing the jbd2_journal_start at the start of
873 : : * prepare_write() is the right place.
874 : : *
875 : : * Also, this function can nest inside ext4_writepage(). In that case, we
876 : : * *know* that ext4_writepage() has generated enough buffer credits to do the
877 : : * whole page. So we won't block on the journal in that case, which is good,
878 : : * because the caller may be PF_MEMALLOC.
879 : : *
880 : : * By accident, ext4 can be reentered when a transaction is open via
881 : : * quota file writes. If we were to commit the transaction while thus
882 : : * reentered, there can be a deadlock - we would be holding a quota
883 : : * lock, and the commit would never complete if another thread had a
884 : : * transaction open and was blocking on the quota lock - a ranking
885 : : * violation.
886 : : *
887 : : * So what we do is to rely on the fact that jbd2_journal_stop/journal_start
888 : : * will _not_ run commit under these circumstances because handle->h_ref
889 : : * is elevated. We'll still have enough credits for the tiny quotafile
890 : : * write.
891 : : */
892 : 0 : int do_journal_get_write_access(handle_t *handle,
893 : : struct buffer_head *bh)
894 : : {
895 : : int dirty = buffer_dirty(bh);
896 : : int ret;
897 : :
898 [ + - ][ + - ]: 2961 : if (!buffer_mapped(bh) || buffer_freed(bh))
899 : : return 0;
900 : : /*
901 : : * __block_write_begin() could have dirtied some buffers. Clean
902 : : * the dirty bit as jbd2_journal_get_write_access() could complain
903 : : * otherwise about fs integrity issues. Setting of the dirty bit
904 : : * by __block_write_begin() isn't a real problem here as we clear
905 : : * the bit before releasing a page lock and thus writeback cannot
906 : : * ever write the buffer.
907 : : */
908 [ - + ]: 2961 : if (dirty)
909 : : clear_buffer_dirty(bh);
910 : 2961 : ret = ext4_journal_get_write_access(handle, bh);
911 [ - + ]: 2961 : if (!ret && dirty)
912 : 0 : ret = ext4_handle_dirty_metadata(handle, NULL, bh);
913 : 2961 : return ret;
914 : : }
915 : :
916 : : static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
917 : : struct buffer_head *bh_result, int create);
918 : 0 : static int ext4_write_begin(struct file *file, struct address_space *mapping,
919 : : loff_t pos, unsigned len, unsigned flags,
920 : : struct page **pagep, void **fsdata)
921 : : {
922 : 5922 : struct inode *inode = mapping->host;
923 : : int ret, needed_blocks;
924 : : handle_t *handle;
925 : 2961 : int retries = 0;
926 : : struct page *page;
927 : : pgoff_t index;
928 : : unsigned from, to;
929 : :
930 : : trace_ext4_write_begin(inode, pos, len, flags);
931 : : /*
932 : : * Reserve one block more for addition to orphan list in case
933 : : * we allocate blocks but write fails for some reason
934 : : */
935 : 2961 : needed_blocks = ext4_writepage_trans_blocks(inode) + 1;
936 : 2961 : index = pos >> PAGE_CACHE_SHIFT;
937 : 2961 : from = pos & (PAGE_CACHE_SIZE - 1);
938 : 2961 : to = from + len;
939 : :
940 [ + - ]: 2961 : if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
941 : 0 : ret = ext4_try_to_write_inline_data(mapping, inode, pos, len,
942 : : flags, pagep);
943 [ # # ]: 0 : if (ret < 0)
944 : : return ret;
945 [ # # ]: 0 : if (ret == 1)
946 : : return 0;
947 : : }
948 : :
949 : : /*
950 : : * grab_cache_page_write_begin() can take a long time if the
951 : : * system is thrashing due to memory pressure, or if the page
952 : : * is being written back. So grab it first before we start
953 : : * the transaction handle. This also allows us to allocate
954 : : * the page (if needed) without using GFP_NOFS.
955 : : */
956 : : retry_grab:
957 : 2961 : page = grab_cache_page_write_begin(mapping, index, flags);
958 [ + - ]: 2961 : if (!page)
959 : : return -ENOMEM;
960 : 2961 : unlock_page(page);
961 : :
962 : : retry_journal:
963 : : handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, needed_blocks);
964 [ - + ]: 2961 : if (IS_ERR(handle)) {
965 : 0 : page_cache_release(page);
966 : 0 : return PTR_ERR(handle);
967 : : }
968 : :
969 : : lock_page(page);
970 [ - + ]: 2961 : if (page->mapping != mapping) {
971 : : /* The page got truncated from under us */
972 : 0 : unlock_page(page);
973 : 0 : page_cache_release(page);
974 : 0 : ext4_journal_stop(handle);
975 : 0 : goto retry_grab;
976 : : }
977 : : /* In case writeback began while the page was unlocked */
978 : 2961 : wait_for_stable_page(page);
979 : :
980 [ - + ]: 2961 : if (ext4_should_dioread_nolock(inode))
981 : 0 : ret = __block_write_begin(page, pos, len, ext4_get_block_write);
982 : : else
983 : 2961 : ret = __block_write_begin(page, pos, len, ext4_get_block);
984 : :
985 [ + - ][ + - ]: 8883 : if (!ret && ext4_should_journal_data(inode)) {
986 [ - + ]: 2961 : ret = ext4_walk_page_buffers(handle, page_buffers(page),
987 : : from, to, NULL,
988 : : do_journal_get_write_access);
989 : : }
990 : :
991 [ - + ]: 2961 : if (ret) {
992 : 0 : unlock_page(page);
993 : : /*
994 : : * __block_write_begin may have instantiated a few blocks
995 : : * outside i_size. Trim these off again. Don't need
996 : : * i_size_read because we hold i_mutex.
997 : : *
998 : : * Add inode to orphan list in case we crash before
999 : : * truncate finishes
1000 : : */
1001 [ # # ][ # # ]: 0 : if (pos + len > inode->i_size && ext4_can_truncate(inode))
1002 : 0 : ext4_orphan_add(handle, inode);
1003 : :
1004 : 0 : ext4_journal_stop(handle);
1005 [ # # ]: 0 : if (pos + len > inode->i_size) {
1006 : : ext4_truncate_failed_write(inode);
1007 : : /*
1008 : : * If truncate failed early the inode might
1009 : : * still be on the orphan list; we need to
1010 : : * make sure the inode is removed from the
1011 : : * orphan list in that case.
1012 : : */
1013 [ # # ]: 0 : if (inode->i_nlink)
1014 : 0 : ext4_orphan_del(NULL, inode);
1015 : : }
1016 : :
1017 [ # # # # ]: 0 : if (ret == -ENOSPC &&
1018 : 0 : ext4_should_retry_alloc(inode->i_sb, &retries))
1019 : : goto retry_journal;
1020 : 0 : page_cache_release(page);
1021 : 0 : return ret;
1022 : : }
1023 : 2961 : *pagep = page;
1024 : 2961 : return ret;
1025 : : }
1026 : :
1027 : : /* For write_end() in data=journal mode */
1028 : 0 : static int write_end_fn(handle_t *handle, struct buffer_head *bh)
1029 : : {
1030 : : int ret;
1031 [ + - ][ + - ]: 2961 : if (!buffer_mapped(bh) || buffer_freed(bh))
1032 : : return 0;
1033 : : set_buffer_uptodate(bh);
1034 : 2961 : ret = ext4_handle_dirty_metadata(handle, NULL, bh);
1035 : : clear_buffer_meta(bh);
1036 : : clear_buffer_prio(bh);
1037 : 2961 : return ret;
1038 : : }
1039 : :
1040 : : /*
1041 : : * We need to pick up the new inode size which generic_commit_write gave us
1042 : : * `file' can be NULL - eg, when called from page_symlink().
1043 : : *
1044 : : * ext4 never places buffers on inode->i_mapping->private_list. metadata
1045 : : * buffers are managed internally.
1046 : : */
1047 : 0 : static int ext4_write_end(struct file *file,
1048 : : struct address_space *mapping,
1049 : : loff_t pos, unsigned len, unsigned copied,
1050 : : struct page *page, void *fsdata)
1051 : : {
1052 : : handle_t *handle = ext4_journal_current_handle();
1053 : 0 : struct inode *inode = mapping->host;
1054 : : int ret = 0, ret2;
1055 : : int i_size_changed = 0;
1056 : :
1057 : : trace_ext4_write_end(inode, pos, len, copied);
1058 [ # # ]: 0 : if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE)) {
1059 : : ret = ext4_jbd2_file_inode(handle, inode);
1060 [ # # ]: 0 : if (ret) {
1061 : 0 : unlock_page(page);
1062 : 0 : page_cache_release(page);
1063 : 0 : goto errout;
1064 : : }
1065 : : }
1066 : :
1067 [ # # ]: 0 : if (ext4_has_inline_data(inode)) {
1068 : 0 : ret = ext4_write_inline_data_end(inode, pos, len,
1069 : : copied, page);
1070 [ # # ]: 0 : if (ret < 0)
1071 : : goto errout;
1072 : 0 : copied = ret;
1073 : : } else
1074 : 0 : copied = block_write_end(file, mapping, pos,
1075 : : len, copied, page, fsdata);
1076 : :
1077 : : /*
1078 : : * No need to use i_size_read() here, the i_size
1079 : : * cannot change under us because we hole i_mutex.
1080 : : *
1081 : : * But it's important to update i_size while still holding page lock:
1082 : : * page writeout could otherwise come in and zero beyond i_size.
1083 : : */
1084 [ # # ]: 0 : if (pos + copied > inode->i_size) {
1085 : : i_size_write(inode, pos + copied);
1086 : : i_size_changed = 1;
1087 : : }
1088 : :
1089 [ # # ]: 0 : if (pos + copied > EXT4_I(inode)->i_disksize) {
1090 : : /* We need to mark inode dirty even if
1091 : : * new_i_size is less that inode->i_size
1092 : : * but greater than i_disksize. (hint delalloc)
1093 : : */
1094 : : ext4_update_i_disksize(inode, (pos + copied));
1095 : : i_size_changed = 1;
1096 : : }
1097 : 0 : unlock_page(page);
1098 : 0 : page_cache_release(page);
1099 : :
1100 : : /*
1101 : : * Don't mark the inode dirty under page lock. First, it unnecessarily
1102 : : * makes the holding time of page lock longer. Second, it forces lock
1103 : : * ordering of page lock and transaction start for journaling
1104 : : * filesystems.
1105 : : */
1106 [ # # ]: 0 : if (i_size_changed)
1107 : 0 : ext4_mark_inode_dirty(handle, inode);
1108 : :
1109 [ # # ][ # # ]: 0 : if (pos + len > inode->i_size && ext4_can_truncate(inode))
1110 : : /* if we have allocated more blocks and copied
1111 : : * less. We will have blocks allocated outside
1112 : : * inode->i_size. So truncate them
1113 : : */
1114 : 0 : ext4_orphan_add(handle, inode);
1115 : : errout:
1116 : 0 : ret2 = ext4_journal_stop(handle);
1117 [ # # ]: 0 : if (!ret)
1118 : : ret = ret2;
1119 : :
1120 [ # # ]: 0 : if (pos + len > inode->i_size) {
1121 : : ext4_truncate_failed_write(inode);
1122 : : /*
1123 : : * If truncate failed early the inode might still be
1124 : : * on the orphan list; we need to make sure the inode
1125 : : * is removed from the orphan list in that case.
1126 : : */
1127 [ # # ]: 0 : if (inode->i_nlink)
1128 : 0 : ext4_orphan_del(NULL, inode);
1129 : : }
1130 : :
1131 [ # # ]: 0 : return ret ? ret : copied;
1132 : : }
1133 : :
1134 : 0 : static int ext4_journalled_write_end(struct file *file,
1135 : : struct address_space *mapping,
1136 : : loff_t pos, unsigned len, unsigned copied,
1137 : : struct page *page, void *fsdata)
1138 : : {
1139 : : handle_t *handle = ext4_journal_current_handle();
1140 : 2961 : struct inode *inode = mapping->host;
1141 : : int ret = 0, ret2;
1142 : 2961 : int partial = 0;
1143 : : unsigned from, to;
1144 : : loff_t new_i_size;
1145 : :
1146 : : trace_ext4_journalled_write_end(inode, pos, len, copied);
1147 : 2961 : from = pos & (PAGE_CACHE_SIZE - 1);
1148 : 2961 : to = from + len;
1149 : :
1150 [ - + ]: 2961 : BUG_ON(!ext4_handle_valid(handle));
1151 : :
1152 [ - + ]: 2961 : if (ext4_has_inline_data(inode))
1153 : 0 : copied = ext4_write_inline_data_end(inode, pos, len,
1154 : : copied, page);
1155 : : else {
1156 [ - + ]: 2961 : if (copied < len) {
1157 [ # # ]: 0 : if (!PageUptodate(page))
1158 : : copied = 0;
1159 : 0 : page_zero_new_buffers(page, from+copied, to);
1160 : : }
1161 : :
1162 [ - + ]: 2961 : ret = ext4_walk_page_buffers(handle, page_buffers(page), from,
1163 : : to, &partial, write_end_fn);
1164 [ + - ]: 2961 : if (!partial)
1165 : : SetPageUptodate(page);
1166 : : }
1167 : 2961 : new_i_size = pos + copied;
1168 [ + - ]: 2961 : if (new_i_size > inode->i_size)
1169 : : i_size_write(inode, pos+copied);
1170 : : ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1171 : 2961 : EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1172 [ + - ]: 2961 : if (new_i_size > EXT4_I(inode)->i_disksize) {
1173 : : ext4_update_i_disksize(inode, new_i_size);
1174 : 2961 : ret2 = ext4_mark_inode_dirty(handle, inode);
1175 [ + - ]: 2961 : if (!ret)
1176 : : ret = ret2;
1177 : : }
1178 : :
1179 : 2961 : unlock_page(page);
1180 : 2961 : page_cache_release(page);
1181 [ - + ][ # # ]: 2961 : if (pos + len > inode->i_size && ext4_can_truncate(inode))
1182 : : /* if we have allocated more blocks and copied
1183 : : * less. We will have blocks allocated outside
1184 : : * inode->i_size. So truncate them
1185 : : */
1186 : 0 : ext4_orphan_add(handle, inode);
1187 : :
1188 : 2961 : ret2 = ext4_journal_stop(handle);
1189 [ + - ]: 2961 : if (!ret)
1190 : : ret = ret2;
1191 [ - + ]: 2961 : if (pos + len > inode->i_size) {
1192 : : ext4_truncate_failed_write(inode);
1193 : : /*
1194 : : * If truncate failed early the inode might still be
1195 : : * on the orphan list; we need to make sure the inode
1196 : : * is removed from the orphan list in that case.
1197 : : */
1198 [ # # ]: 0 : if (inode->i_nlink)
1199 : 0 : ext4_orphan_del(NULL, inode);
1200 : : }
1201 : :
1202 [ + - ]: 2961 : return ret ? ret : copied;
1203 : : }
1204 : :
1205 : : /*
1206 : : * Reserve a metadata for a single block located at lblock
1207 : : */
1208 : 0 : static int ext4_da_reserve_metadata(struct inode *inode, ext4_lblk_t lblock)
1209 : : {
1210 : 0 : struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1211 : : struct ext4_inode_info *ei = EXT4_I(inode);
1212 : : unsigned int md_needed;
1213 : : ext4_lblk_t save_last_lblock;
1214 : : int save_len;
1215 : :
1216 : : /*
1217 : : * recalculate the amount of metadata blocks to reserve
1218 : : * in order to allocate nrblocks
1219 : : * worse case is one extent per block
1220 : : */
1221 : : spin_lock(&ei->i_block_reservation_lock);
1222 : : /*
1223 : : * ext4_calc_metadata_amount() has side effects, which we have
1224 : : * to be prepared undo if we fail to claim space.
1225 : : */
1226 : 0 : save_len = ei->i_da_metadata_calc_len;
1227 : 0 : save_last_lblock = ei->i_da_metadata_calc_last_lblock;
1228 : 0 : md_needed = EXT4_NUM_B2C(sbi,
1229 : : ext4_calc_metadata_amount(inode, lblock));
1230 : 0 : trace_ext4_da_reserve_space(inode, md_needed);
1231 : :
1232 : : /*
1233 : : * We do still charge estimated metadata to the sb though;
1234 : : * we cannot afford to run out of free blocks.
1235 : : */
1236 [ # # ]: 0 : if (ext4_claim_free_clusters(sbi, md_needed, 0)) {
1237 : 0 : ei->i_da_metadata_calc_len = save_len;
1238 : 0 : ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1239 : : spin_unlock(&ei->i_block_reservation_lock);
1240 : 0 : return -ENOSPC;
1241 : : }
1242 : 0 : ei->i_reserved_meta_blocks += md_needed;
1243 : : spin_unlock(&ei->i_block_reservation_lock);
1244 : :
1245 : 0 : return 0; /* success */
1246 : : }
1247 : :
1248 : : /*
1249 : : * Reserve a single cluster located at lblock
1250 : : */
1251 : 0 : static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1252 : : {
1253 : 1760018 : struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1254 : : struct ext4_inode_info *ei = EXT4_I(inode);
1255 : : unsigned int md_needed;
1256 : : int ret;
1257 : : ext4_lblk_t save_last_lblock;
1258 : : int save_len;
1259 : :
1260 : : /*
1261 : : * We will charge metadata quota at writeout time; this saves
1262 : : * us from metadata over-estimation, though we may go over by
1263 : : * a small amount in the end. Here we just reserve for data.
1264 : : */
1265 : 1760018 : ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
1266 [ + ]: 1764065 : if (ret)
1267 : : return ret;
1268 : :
1269 : : /*
1270 : : * recalculate the amount of metadata blocks to reserve
1271 : : * in order to allocate nrblocks
1272 : : * worse case is one extent per block
1273 : : */
1274 : : spin_lock(&ei->i_block_reservation_lock);
1275 : : /*
1276 : : * ext4_calc_metadata_amount() has side effects, which we have
1277 : : * to be prepared undo if we fail to claim space.
1278 : : */
1279 : 1763236 : save_len = ei->i_da_metadata_calc_len;
1280 : 1763236 : save_last_lblock = ei->i_da_metadata_calc_last_lblock;
1281 : 1763236 : md_needed = EXT4_NUM_B2C(sbi,
1282 : : ext4_calc_metadata_amount(inode, lblock));
1283 : 1761349 : trace_ext4_da_reserve_space(inode, md_needed);
1284 : :
1285 : : /*
1286 : : * We do still charge estimated metadata to the sb though;
1287 : : * we cannot afford to run out of free blocks.
1288 : : */
1289 [ - + ]: 1761349 : if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
1290 : 0 : ei->i_da_metadata_calc_len = save_len;
1291 : 0 : ei->i_da_metadata_calc_last_lblock = save_last_lblock;
1292 : : spin_unlock(&ei->i_block_reservation_lock);
1293 : 0 : dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
1294 : 0 : return -ENOSPC;
1295 : : }
1296 : 1762601 : ei->i_reserved_data_blocks++;
1297 : 1762601 : ei->i_reserved_meta_blocks += md_needed;
1298 : : spin_unlock(&ei->i_block_reservation_lock);
1299 : :
1300 : 1763443 : return 0; /* success */
1301 : : }
1302 : :
1303 : 0 : static void ext4_da_release_space(struct inode *inode, int to_free)
1304 : : {
1305 : 1147491 : struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1306 : : struct ext4_inode_info *ei = EXT4_I(inode);
1307 : :
1308 [ + + ]: 1147491 : if (!to_free)
1309 : 1147494 : return; /* Nothing to release, exit */
1310 : :
1311 : : spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
1312 : :
1313 : : trace_ext4_da_release_space(inode, to_free);
1314 [ - + ]: 2294978 : if (unlikely(to_free > ei->i_reserved_data_blocks)) {
1315 : : /*
1316 : : * if there aren't enough reserved blocks, then the
1317 : : * counter is messed up somewhere. Since this
1318 : : * function is called from invalidate page, it's
1319 : : * harmless to return without any action.
1320 : : */
1321 : 0 : ext4_warning(inode->i_sb, "ext4_da_release_space: "
1322 : : "ino %lu, to_free %d with only %d reserved "
1323 : : "data blocks", inode->i_ino, to_free,
1324 : : ei->i_reserved_data_blocks);
1325 : 0 : WARN_ON(1);
1326 : 0 : to_free = ei->i_reserved_data_blocks;
1327 : : }
1328 : 1147487 : ei->i_reserved_data_blocks -= to_free;
1329 : :
1330 [ + + ]: 1147487 : if (ei->i_reserved_data_blocks == 0) {
1331 : : /*
1332 : : * We can release all of the reserved metadata blocks
1333 : : * only when we have written all of the delayed
1334 : : * allocation blocks.
1335 : : * Note that in case of bigalloc, i_reserved_meta_blocks,
1336 : : * i_reserved_data_blocks, etc. refer to number of clusters.
1337 : : */
1338 : 43078 : percpu_counter_sub(&sbi->s_dirtyclusters_counter,
1339 : 21539 : ei->i_reserved_meta_blocks);
1340 : 21539 : ei->i_reserved_meta_blocks = 0;
1341 : 21539 : ei->i_da_metadata_calc_len = 0;
1342 : : }
1343 : :
1344 : : /* update fs dirty data blocks counter */
1345 : 1147487 : percpu_counter_sub(&sbi->s_dirtyclusters_counter, to_free);
1346 : :
1347 : : spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
1348 : :
1349 : 1147491 : dquot_release_reservation_block(inode, EXT4_C2B(sbi, to_free));
1350 : : }
1351 : :
1352 : 0 : static void ext4_da_page_release_reservation(struct page *page,
1353 : : unsigned int offset,
1354 : : unsigned int length)
1355 : : {
1356 : : int to_release = 0;
1357 : : struct buffer_head *head, *bh;
1358 : : unsigned int curr_off = 0;
1359 : 1726434 : struct inode *inode = page->mapping->host;
1360 : 1726434 : struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1361 : 1726434 : unsigned int stop = offset + length;
1362 : : int num_clusters;
1363 : : ext4_fsblk_t lblk;
1364 : :
1365 [ - + ]: 1726434 : BUG_ON(stop > PAGE_CACHE_SIZE || stop < length);
1366 : :
1367 [ - + ]: 1726434 : head = page_buffers(page);
1368 : : bh = head;
1369 : : do {
1370 : 1726390 : unsigned int next_off = curr_off + bh->b_size;
1371 : :
1372 [ + + ]: 3452824 : if (next_off > stop)
1373 : : break;
1374 : :
1375 [ + + ][ + + ]: 1726378 : if ((offset <= curr_off) && (buffer_delay(bh))) {
1376 : 1147489 : to_release++;
1377 : : clear_buffer_delay(bh);
1378 : : }
1379 : : curr_off = next_off;
1380 [ + + ]: 3452758 : } while ((bh = bh->b_this_page) != head);
1381 : :
1382 [ + + ]: 1726380 : if (to_release) {
1383 : 1147491 : lblk = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1384 : 1147491 : ext4_es_remove_extent(inode, lblk, to_release);
1385 : : }
1386 : :
1387 : : /* If we have released all the blocks belonging to a cluster, then we
1388 : : * need to release the reserved space for that cluster. */
1389 : 1726378 : num_clusters = EXT4_NUM_B2C(sbi, to_release);
1390 [ + + ]: 2873870 : while (num_clusters > 0) {
1391 : 2294978 : lblk = (page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits)) +
1392 : 1147489 : ((num_clusters - 1) << sbi->s_cluster_bits);
1393 [ - + # # ]: 1147489 : if (sbi->s_cluster_ratio == 1 ||
1394 : 0 : !ext4_find_delalloc_cluster(inode, lblk))
1395 : 1147489 : ext4_da_release_space(inode, 1);
1396 : :
1397 : : num_clusters--;
1398 : : }
1399 : 1726381 : }
1400 : :
1401 : : /*
1402 : : * Delayed allocation stuff
1403 : : */
1404 : :
1405 : : struct mpage_da_data {
1406 : : struct inode *inode;
1407 : : struct writeback_control *wbc;
1408 : :
1409 : : pgoff_t first_page; /* The first page to write */
1410 : : pgoff_t next_page; /* Current page to examine */
1411 : : pgoff_t last_page; /* Last page to examine */
1412 : : /*
1413 : : * Extent to map - this can be after first_page because that can be
1414 : : * fully mapped. We somewhat abuse m_flags to store whether the extent
1415 : : * is delalloc or unwritten.
1416 : : */
1417 : : struct ext4_map_blocks map;
1418 : : struct ext4_io_submit io_submit; /* IO submission data */
1419 : : };
1420 : :
1421 : 0 : static void mpage_release_unused_pages(struct mpage_da_data *mpd,
1422 : : bool invalidate)
1423 : : {
1424 : : int nr_pages, i;
1425 : : pgoff_t index, end;
1426 : : struct pagevec pvec;
1427 : 128362 : struct inode *inode = mpd->inode;
1428 : 128362 : struct address_space *mapping = inode->i_mapping;
1429 : :
1430 : : /* This is necessary when next_page == 0. */
1431 [ + + ]: 128362 : if (mpd->first_page >= mpd->next_page)
1432 : 126604 : return;
1433 : :
1434 : : index = mpd->first_page;
1435 : 1758 : end = mpd->next_page - 1;
1436 [ - + ]: 1758 : if (invalidate) {
1437 : : ext4_lblk_t start, last;
1438 : 0 : start = index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1439 : 0 : last = end << (PAGE_CACHE_SHIFT - inode->i_blkbits);
1440 : 0 : ext4_es_remove_extent(inode, start, last - start + 1);
1441 : : }
1442 : :
1443 : : pagevec_init(&pvec, 0);
1444 [ + + ]: 5082 : while (index <= end) {
1445 : 3324 : nr_pages = pagevec_lookup(&pvec, mapping, index, PAGEVEC_SIZE);
1446 [ + - ]: 3324 : if (nr_pages == 0)
1447 : : break;
1448 [ + + ]: 27380 : for (i = 0; i < nr_pages; i++) {
1449 : 25753 : struct page *page = pvec.pages[i];
1450 [ + + ]: 25753 : if (page->index > end)
1451 : : break;
1452 [ - + ]: 24057 : BUG_ON(!PageLocked(page));
1453 [ - + ]: 24057 : BUG_ON(PageWriteback(page));
1454 [ - + ]: 24057 : if (invalidate) {
1455 : 0 : block_invalidatepage(page, 0, PAGE_CACHE_SIZE);
1456 : : ClearPageUptodate(page);
1457 : : }
1458 : 24057 : unlock_page(page);
1459 : : }
1460 : 3323 : index = pvec.pages[nr_pages - 1]->index + 1;
1461 : : pagevec_release(&pvec);
1462 : : }
1463 : : }
1464 : :
1465 : 0 : static void ext4_print_free_blocks(struct inode *inode)
1466 : : {
1467 : 0 : struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
1468 : 0 : struct super_block *sb = inode->i_sb;
1469 : : struct ext4_inode_info *ei = EXT4_I(inode);
1470 : :
1471 : 0 : ext4_msg(sb, KERN_CRIT, "Total free blocks count %lld",
1472 : : EXT4_C2B(EXT4_SB(inode->i_sb),
1473 : : ext4_count_free_clusters(sb)));
1474 : 0 : ext4_msg(sb, KERN_CRIT, "Free/Dirty block details");
1475 : 0 : ext4_msg(sb, KERN_CRIT, "free_blocks=%lld",
1476 : : (long long) EXT4_C2B(EXT4_SB(sb),
1477 : : percpu_counter_sum(&sbi->s_freeclusters_counter)));
1478 : 0 : ext4_msg(sb, KERN_CRIT, "dirty_blocks=%lld",
1479 : : (long long) EXT4_C2B(EXT4_SB(sb),
1480 : : percpu_counter_sum(&sbi->s_dirtyclusters_counter)));
1481 : 0 : ext4_msg(sb, KERN_CRIT, "Block reservation details");
1482 : 0 : ext4_msg(sb, KERN_CRIT, "i_reserved_data_blocks=%u",
1483 : : ei->i_reserved_data_blocks);
1484 : 0 : ext4_msg(sb, KERN_CRIT, "i_reserved_meta_blocks=%u",
1485 : : ei->i_reserved_meta_blocks);
1486 : 0 : ext4_msg(sb, KERN_CRIT, "i_allocated_meta_blocks=%u",
1487 : : ei->i_allocated_meta_blocks);
1488 : 0 : return;
1489 : : }
1490 : :
1491 : 0 : static int ext4_bh_delay_or_unwritten(handle_t *handle, struct buffer_head *bh)
1492 : : {
1493 [ + + ][ + ]: 44596 : return (buffer_delay(bh) || buffer_unwritten(bh)) && buffer_dirty(bh);
[ - + ]
1494 : : }
1495 : :
1496 : : /*
1497 : : * This function is grabs code from the very beginning of
1498 : : * ext4_map_blocks, but assumes that the caller is from delayed write
1499 : : * time. This function looks up the requested blocks and sets the
1500 : : * buffer delay bit under the protection of i_data_sem.
1501 : : */
1502 : 0 : static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1503 : : struct ext4_map_blocks *map,
1504 : : struct buffer_head *bh)
1505 : : {
1506 : : struct extent_status es;
1507 : : int retval;
1508 : : sector_t invalid_block = ~((sector_t) 0xffff);
1509 : : #ifdef ES_AGGRESSIVE_TEST
1510 : : struct ext4_map_blocks orig_map;
1511 : :
1512 : : memcpy(&orig_map, map, sizeof(*map));
1513 : : #endif
1514 : :
1515 [ - + ]: 1760553 : if (invalid_block < ext4_blocks_count(EXT4_SB(inode->i_sb)->s_es))
1516 : : invalid_block = ~0;
1517 : :
1518 : 1760553 : map->m_flags = 0;
1519 : : ext_debug("ext4_da_map_blocks(): inode %lu, max_blocks %u,"
1520 : : "logical block %lu\n", inode->i_ino, map->m_len,
1521 : : (unsigned long) map->m_lblk);
1522 : :
1523 : : /* Lookup extent status tree firstly */
1524 [ + + ]: 1760553 : if (ext4_es_lookup_extent(inode, iblock, &es)) {
1525 : 71651 : ext4_es_lru_add(inode);
1526 [ + + ]: 1832093 : if (ext4_es_is_hole(&es)) {
1527 : : retval = 0;
1528 : 71531 : down_read((&EXT4_I(inode)->i_data_sem));
1529 : 71661 : goto add_delayed;
1530 : : }
1531 : :
1532 : : /*
1533 : : * Delayed extent could be allocated by fallocate.
1534 : : * So we need to check it.
1535 : : */
1536 [ - + ][ # # ]: 1760562 : if (ext4_es_is_delayed(&es) && !ext4_es_is_unwritten(&es)) {
1537 : 0 : map_bh(bh, inode->i_sb, invalid_block);
1538 : : set_buffer_new(bh);
1539 : : set_buffer_delay(bh);
1540 : 0 : return 0;
1541 : : }
1542 : :
1543 : 9 : map->m_pblk = ext4_es_pblock(&es) + iblock - es.es_lblk;
1544 : 9 : retval = es.es_len - (iblock - es.es_lblk);
1545 [ - + ]: 9 : if (retval > map->m_len)
1546 : 0 : retval = map->m_len;
1547 : 9 : map->m_len = retval;
1548 [ + + ]: 9 : if (ext4_es_is_written(&es))
1549 : 7 : map->m_flags |= EXT4_MAP_MAPPED;
1550 [ + - ]: 2 : else if (ext4_es_is_unwritten(&es))
1551 : 2 : map->m_flags |= EXT4_MAP_UNWRITTEN;
1552 : : else
1553 : 0 : BUG_ON(1);
1554 : :
1555 : : #ifdef ES_AGGRESSIVE_TEST
1556 : : ext4_map_blocks_es_recheck(NULL, inode, map, &orig_map, 0);
1557 : : #endif
1558 : 9 : return retval;
1559 : : }
1560 : :
1561 : : /*
1562 : : * Try to see if we can get the block without requesting a new
1563 : : * file system block.
1564 : : */
1565 : 1692696 : down_read((&EXT4_I(inode)->i_data_sem));
1566 [ - + ]: 1690652 : if (ext4_has_inline_data(inode)) {
1567 : : /*
1568 : : * We will soon create blocks for this page, and let
1569 : : * us pretend as if the blocks aren't allocated yet.
1570 : : * In case of clusters, we have to handle the work
1571 : : * of mapping from cluster so that the reserved space
1572 : : * is calculated properly.
1573 : : */
1574 [ # # # # ]: 0 : if ((EXT4_SB(inode->i_sb)->s_cluster_ratio > 1) &&
1575 : 0 : ext4_find_delalloc_cluster(inode, map->m_lblk))
1576 : 0 : map->m_flags |= EXT4_MAP_FROM_CLUSTER;
1577 : : retval = 0;
1578 [ + - ]: 1692478 : } else if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
1579 : 1692478 : retval = ext4_ext_map_blocks(NULL, inode, map,
1580 : : EXT4_GET_BLOCKS_NO_PUT_HOLE);
1581 : : else
1582 : 0 : retval = ext4_ind_map_blocks(NULL, inode, map,
1583 : : EXT4_GET_BLOCKS_NO_PUT_HOLE);
1584 : :
1585 : : add_delayed:
1586 [ + + ]: 1759157 : if (retval == 0) {
1587 : : int ret;
1588 : : /*
1589 : : * XXX: __block_prepare_write() unmaps passed block,
1590 : : * is it OK?
1591 : : */
1592 : : /*
1593 : : * If the block was allocated from previously allocated cluster,
1594 : : * then we don't need to reserve it again. However we still need
1595 : : * to reserve metadata for every block we're going to write.
1596 : : */
1597 [ + - ]: 1759144 : if (!(map->m_flags & EXT4_MAP_FROM_CLUSTER)) {
1598 : 1759144 : ret = ext4_da_reserve_space(inode, iblock);
1599 [ + - ]: 1764052 : if (ret) {
1600 : : /* not enough space to reserve */
1601 : : retval = ret;
1602 : : goto out_unlock;
1603 : : }
1604 : : } else {
1605 : 0 : ret = ext4_da_reserve_metadata(inode, iblock);
1606 [ # # ]: 0 : if (ret) {
1607 : : /* not enough space to reserve */
1608 : : retval = ret;
1609 : : goto out_unlock;
1610 : : }
1611 : : }
1612 : :
1613 : 1764052 : ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1614 : : ~0, EXTENT_STATUS_DELAYED);
1615 [ + + ]: 1764181 : if (ret) {
1616 : : retval = ret;
1617 : : goto out_unlock;
1618 : : }
1619 : :
1620 : : /* Clear EXT4_MAP_FROM_CLUSTER flag since its purpose is served
1621 : : * and it should not appear on the bh->b_state.
1622 : : */
1623 : 1762091 : map->m_flags &= ~EXT4_MAP_FROM_CLUSTER;
1624 : :
1625 : 1762091 : map_bh(bh, inode->i_sb, invalid_block);
1626 : : set_buffer_new(bh);
1627 : : set_buffer_delay(bh);
1628 [ + - ]: 13 : } else if (retval > 0) {
1629 : : int ret;
1630 : : unsigned int status;
1631 : :
1632 [ - + ]: 13 : if (unlikely(retval != map->m_len)) {
1633 : 0 : ext4_warning(inode->i_sb,
1634 : : "ES len assertion failed for inode "
1635 : : "%lu: retval %d != map->m_len %d",
1636 : : inode->i_ino, retval, map->m_len);
1637 : 0 : WARN_ON(1);
1638 : : }
1639 : :
1640 [ + - ]: 13 : status = map->m_flags & EXT4_MAP_UNWRITTEN ?
1641 : : EXTENT_STATUS_UNWRITTEN : EXTENT_STATUS_WRITTEN;
1642 : 13 : ret = ext4_es_insert_extent(inode, map->m_lblk, map->m_len,
1643 : : map->m_pblk, status);
1644 [ - + ]: 13 : if (ret != 0)
1645 : : retval = ret;
1646 : : }
1647 : :
1648 : : out_unlock:
1649 : 1763796 : up_read((&EXT4_I(inode)->i_data_sem));
1650 : :
1651 : 1764129 : return retval;
1652 : : }
1653 : :
1654 : : /*
1655 : : * This is a special get_blocks_t callback which is used by
1656 : : * ext4_da_write_begin(). It will either return mapped block or
1657 : : * reserve space for a single block.
1658 : : *
1659 : : * For delayed buffer_head we have BH_Mapped, BH_New, BH_Delay set.
1660 : : * We also have b_blocknr = -1 and b_bdev initialized properly
1661 : : *
1662 : : * For unwritten buffer_head we have BH_Mapped, BH_New, BH_Unwritten set.
1663 : : * We also have b_blocknr = physicalblock mapping unwritten extent and b_bdev
1664 : : * initialized properly.
1665 : : */
1666 : 0 : int ext4_da_get_block_prep(struct inode *inode, sector_t iblock,
1667 : : struct buffer_head *bh, int create)
1668 : : {
1669 : : struct ext4_map_blocks map;
1670 : : int ret = 0;
1671 : :
1672 [ - + ]: 1760106 : BUG_ON(create == 0);
1673 [ - + ]: 1760106 : BUG_ON(bh->b_size != inode->i_sb->s_blocksize);
1674 : :
1675 : 1760106 : map.m_lblk = iblock;
1676 : 1760106 : map.m_len = 1;
1677 : :
1678 : : /*
1679 : : * first, we need to know whether the block is allocated already
1680 : : * preallocated blocks are unmapped but should treated
1681 : : * the same as allocated blocks.
1682 : : */
1683 : 1760106 : ret = ext4_da_map_blocks(inode, iblock, &map, bh);
1684 [ + + ]: 1763629 : if (ret <= 0)
1685 : : return ret;
1686 : :
1687 : 22 : map_bh(bh, inode->i_sb, map.m_pblk);
1688 : 22 : bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
1689 : :
1690 [ + + ]: 22 : if (buffer_unwritten(bh)) {
1691 : : /* A delayed write to unwritten bh should be marked
1692 : : * new and mapped. Mapped ensures that we don't do
1693 : : * get_block multiple times when we write to the same
1694 : : * offset and new ensures that we do proper zero out
1695 : : * for partial write.
1696 : : */
1697 : : set_buffer_new(bh);
1698 : : set_buffer_mapped(bh);
1699 : : }
1700 : : return 0;
1701 : : }
1702 : :
1703 : 0 : static int bget_one(handle_t *handle, struct buffer_head *bh)
1704 : : {
1705 : : get_bh(bh);
1706 : 0 : return 0;
1707 : : }
1708 : :
1709 : 0 : static int bput_one(handle_t *handle, struct buffer_head *bh)
1710 : : {
1711 : : put_bh(bh);
1712 : 0 : return 0;
1713 : : }
1714 : :
1715 : 0 : static int __ext4_journalled_writepage(struct page *page,
1716 : : unsigned int len)
1717 : : {
1718 : 0 : struct address_space *mapping = page->mapping;
1719 : 0 : struct inode *inode = mapping->host;
1720 : : struct buffer_head *page_bufs = NULL;
1721 : : handle_t *handle = NULL;
1722 : : int ret = 0, err = 0;
1723 : 0 : int inline_data = ext4_has_inline_data(inode);
1724 : : struct buffer_head *inode_bh = NULL;
1725 : :
1726 : : ClearPageChecked(page);
1727 : :
1728 [ # # ]: 0 : if (inline_data) {
1729 [ # # ]: 0 : BUG_ON(page->index != 0);
1730 [ # # ]: 0 : BUG_ON(len > ext4_get_max_inline_size(inode));
1731 : 0 : inode_bh = ext4_journalled_write_inline_data(inode, len, page);
1732 [ # # ]: 0 : if (inode_bh == NULL)
1733 : : goto out;
1734 : : } else {
1735 [ # # ]: 0 : page_bufs = page_buffers(page);
1736 [ # # ]: 0 : if (!page_bufs) {
1737 : 0 : BUG();
1738 : : goto out;
1739 : : }
1740 : 0 : ext4_walk_page_buffers(handle, page_bufs, 0, len,
1741 : : NULL, bget_one);
1742 : : }
1743 : : /* As soon as we unlock the page, it can go away, but we have
1744 : : * references to buffers so we are safe */
1745 : 0 : unlock_page(page);
1746 : :
1747 : 0 : handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
1748 : : ext4_writepage_trans_blocks(inode));
1749 [ # # ]: 0 : if (IS_ERR(handle)) {
1750 : : ret = PTR_ERR(handle);
1751 : 0 : goto out;
1752 : : }
1753 : :
1754 [ # # ]: 0 : BUG_ON(!ext4_handle_valid(handle));
1755 : :
1756 [ # # ]: 0 : if (inline_data) {
1757 : 0 : ret = ext4_journal_get_write_access(handle, inode_bh);
1758 : :
1759 : 0 : err = ext4_handle_dirty_metadata(handle, inode, inode_bh);
1760 : :
1761 : : } else {
1762 : 0 : ret = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1763 : : do_journal_get_write_access);
1764 : :
1765 : 0 : err = ext4_walk_page_buffers(handle, page_bufs, 0, len, NULL,
1766 : : write_end_fn);
1767 : : }
1768 [ # # ]: 0 : if (ret == 0)
1769 : : ret = err;
1770 : 0 : EXT4_I(inode)->i_datasync_tid = handle->h_transaction->t_tid;
1771 : 0 : err = ext4_journal_stop(handle);
1772 [ # # ]: 0 : if (!ret)
1773 : : ret = err;
1774 : :
1775 [ # # ]: 0 : if (!ext4_has_inline_data(inode))
1776 : 0 : ext4_walk_page_buffers(NULL, page_bufs, 0, len,
1777 : : NULL, bput_one);
1778 : : ext4_set_inode_state(inode, EXT4_STATE_JDATA);
1779 : : out:
1780 : : brelse(inode_bh);
1781 : 0 : return ret;
1782 : : }
1783 : :
1784 : : /*
1785 : : * Note that we don't need to start a transaction unless we're journaling data
1786 : : * because we should have holes filled from ext4_page_mkwrite(). We even don't
1787 : : * need to file the inode to the transaction's list in ordered mode because if
1788 : : * we are writing back data added by write(), the inode is already there and if
1789 : : * we are writing back data modified via mmap(), no one guarantees in which
1790 : : * transaction the data will hit the disk. In case we are journaling data, we
1791 : : * cannot start transaction directly because transaction start ranks above page
1792 : : * lock so we have to do some magic.
1793 : : *
1794 : : * This function can get called via...
1795 : : * - ext4_writepages after taking page lock (have journal handle)
1796 : : * - journal_submit_inode_data_buffers (no journal handle)
1797 : : * - shrink_page_list via the kswapd/direct reclaim (no journal handle)
1798 : : * - grab_page_cache when doing write_begin (have journal handle)
1799 : : *
1800 : : * We don't do any block allocation in this function. If we have page with
1801 : : * multiple blocks we need to write those buffer_heads that are mapped. This
1802 : : * is important for mmaped based write. So if we do with blocksize 1K
1803 : : * truncate(f, 1024);
1804 : : * a = mmap(f, 0, 4096);
1805 : : * a[0] = 'a';
1806 : : * truncate(f, 4096);
1807 : : * we have in the page first buffer_head mapped via page_mkwrite call back
1808 : : * but other buffer_heads would be unmapped but dirty (dirty done via the
1809 : : * do_wp_page). So writepage should write the first block. If we modify
1810 : : * the mmap area beyond 1024 we will again get a page_fault and the
1811 : : * page_mkwrite callback will do the block allocation and mark the
1812 : : * buffer_heads mapped.
1813 : : *
1814 : : * We redirty the page if we have any buffer_heads that is either delay or
1815 : : * unwritten in the page.
1816 : : *
1817 : : * We can get recursively called as show below.
1818 : : *
1819 : : * ext4_writepage() -> kmalloc() -> __alloc_pages() -> page_launder() ->
1820 : : * ext4_writepage()
1821 : : *
1822 : : * But since we don't do any block allocation we should not deadlock.
1823 : : * Page also have the dirty flag cleared so we don't get recurive page_lock.
1824 : : */
1825 : 0 : static int ext4_writepage(struct page *page,
1826 : : struct writeback_control *wbc)
1827 : : {
1828 : : int ret = 0;
1829 : : loff_t size;
1830 : : unsigned int len;
1831 : : struct buffer_head *page_bufs = NULL;
1832 : 44596 : struct inode *inode = page->mapping->host;
1833 : : struct ext4_io_submit io_submit;
1834 : :
1835 : : trace_ext4_writepage(page);
1836 : : size = i_size_read(inode);
1837 [ + + ]: 44596 : if (page->index == size >> PAGE_CACHE_SHIFT)
1838 : 1784 : len = size & ~PAGE_CACHE_MASK;
1839 : : else
1840 : : len = PAGE_CACHE_SIZE;
1841 : :
1842 [ - + ]: 44596 : page_bufs = page_buffers(page);
1843 : : /*
1844 : : * We cannot do block allocation or other extent handling in this
1845 : : * function. If there are buffers needing that, we have to redirty
1846 : : * the page. But we may reach here when we do a journal commit via
1847 : : * journal_submit_inode_data_buffers() and in that case we must write
1848 : : * allocated buffers to achieve data=ordered mode guarantees.
1849 : : */
1850 [ + + ]: 44596 : if (ext4_walk_page_buffers(NULL, page_bufs, 0, len, NULL,
1851 : : ext4_bh_delay_or_unwritten)) {
1852 : 43069 : redirty_page_for_writepage(wbc, page);
1853 [ + + ]: 43069 : if (current->flags & PF_MEMALLOC) {
1854 : : /*
1855 : : * For memory cleaning there's no point in writing only
1856 : : * some buffers. So just bail out. Warn if we came here
1857 : : * from direct reclaim.
1858 : : */
1859 [ - + ][ # # ]: 4 : WARN_ON_ONCE((current->flags & (PF_MEMALLOC|PF_KSWAPD))
[ # # ]
1860 : : == PF_MEMALLOC);
1861 : 4 : unlock_page(page);
1862 : 4 : return 0;
1863 : : }
1864 : : }
1865 : :
1866 [ - + ][ # # ]: 44592 : if (PageChecked(page) && ext4_should_journal_data(inode))
1867 : : /*
1868 : : * It's mmapped pagecache. Add buffers and journal it. There
1869 : : * doesn't seem much point in redirtying the page here.
1870 : : */
1871 : 0 : return __ext4_journalled_writepage(page, len);
1872 : :
1873 : 44592 : ext4_io_submit_init(&io_submit, wbc);
1874 : 44592 : io_submit.io_end = ext4_init_io_end(inode, GFP_NOFS);
1875 [ - + ]: 44592 : if (!io_submit.io_end) {
1876 : 0 : redirty_page_for_writepage(wbc, page);
1877 : 0 : unlock_page(page);
1878 : 0 : return -ENOMEM;
1879 : : }
1880 : 44592 : ret = ext4_bio_write_page(&io_submit, page, len, wbc);
1881 : 44592 : ext4_io_submit(&io_submit);
1882 : : /* Drop io_end reference we got from init */
1883 : 44592 : ext4_put_io_end_defer(io_submit.io_end);
1884 : 44592 : return ret;
1885 : : }
1886 : :
1887 : 0 : static int mpage_submit_page(struct mpage_da_data *mpd, struct page *page)
1888 : : {
1889 : : int len;
1890 : 902751 : loff_t size = i_size_read(mpd->inode);
1891 : : int err;
1892 : :
1893 [ - + ]: 890026 : BUG_ON(page->index != mpd->first_page);
1894 [ + + ]: 890026 : if (page->index == size >> PAGE_CACHE_SHIFT)
1895 : 9810 : len = size & ~PAGE_CACHE_MASK;
1896 : : else
1897 : : len = PAGE_CACHE_SIZE;
1898 : 890026 : clear_page_dirty_for_io(page);
1899 : 890030 : err = ext4_bio_write_page(&mpd->io_submit, page, len, mpd->wbc);
1900 [ + ]: 890001 : if (!err)
1901 : 890025 : mpd->wbc->nr_to_write--;
1902 : 890001 : mpd->first_page++;
1903 : :
1904 : 890001 : return err;
1905 : : }
1906 : :
1907 : : #define BH_FLAGS ((1 << BH_Unwritten) | (1 << BH_Delay))
1908 : :
1909 : : /*
1910 : : * mballoc gives us at most this number of blocks...
1911 : : * XXX: That seems to be only a limitation of ext4_mb_normalize_request().
1912 : : * The rest of mballoc seems to handle chunks up to full group size.
1913 : : */
1914 : : #define MAX_WRITEPAGES_EXTENT_LEN 2048
1915 : :
1916 : : /*
1917 : : * mpage_add_bh_to_extent - try to add bh to extent of blocks to map
1918 : : *
1919 : : * @mpd - extent of blocks
1920 : : * @lblk - logical number of the block in the file
1921 : : * @bh - buffer head we want to add to the extent
1922 : : *
1923 : : * The function is used to collect contig. blocks in the same state. If the
1924 : : * buffer doesn't require mapping for writeback and we haven't started the
1925 : : * extent of buffers to map yet, the function returns 'true' immediately - the
1926 : : * caller can write the buffer right away. Otherwise the function returns true
1927 : : * if the block has been added to the extent, false if the block couldn't be
1928 : : * added.
1929 : : */
1930 : 0 : static bool mpage_add_bh_to_extent(struct mpage_da_data *mpd, ext4_lblk_t lblk,
1931 : : struct buffer_head *bh)
1932 : : {
1933 : : struct ext4_map_blocks *map = &mpd->map;
1934 : :
1935 : : /* Buffer that doesn't need mapping for writeback? */
1936 [ + ][ + ]: 913637 : if (!buffer_dirty(bh) || !buffer_mapped(bh) ||
[ + + ]
1937 [ + + ]: 274804 : (!buffer_delay(bh) && !buffer_unwritten(bh))) {
1938 : : /* So far no extent to map => we write the buffer right away */
1939 [ + ]: 274664 : if (map->m_len == 0)
1940 : : return true;
1941 : 1630 : return false;
1942 : : }
1943 : :
1944 : : /* First block in the extent? */
1945 [ + + ]: 638973 : if (map->m_len == 0) {
1946 : 86039 : map->m_lblk = lblk;
1947 : 86039 : map->m_len = 1;
1948 : 86039 : map->m_flags = bh->b_state & BH_FLAGS;
1949 : 86039 : return true;
1950 : : }
1951 : :
1952 : : /* Don't go larger than mballoc is willing to allocate */
1953 [ + + ]: 552934 : if (map->m_len >= MAX_WRITEPAGES_EXTENT_LEN)
1954 : : return false;
1955 : :
1956 : : /* Can we merge the block to our big extent? */
1957 [ + ][ + ]: 552906 : if (lblk == map->m_lblk + map->m_len &&
1958 : 552962 : (bh->b_state & BH_FLAGS) == map->m_flags) {
1959 : 552978 : map->m_len++;
1960 : 552978 : return true;
1961 : : }
1962 : : return false;
1963 : : }
1964 : :
1965 : : /*
1966 : : * mpage_process_page_bufs - submit page buffers for IO or add them to extent
1967 : : *
1968 : : * @mpd - extent of blocks for mapping
1969 : : * @head - the first buffer in the page
1970 : : * @bh - buffer we should start processing from
1971 : : * @lblk - logical number of the block in the file corresponding to @bh
1972 : : *
1973 : : * Walk through page buffers from @bh upto @head (exclusive) and either submit
1974 : : * the page for IO if all buffers in this page were mapped and there's no
1975 : : * accumulated extent of buffers to map or add buffers in the page to the
1976 : : * extent of buffers to map. The function returns 1 if the caller can continue
1977 : : * by processing the next page, 0 if it should stop adding buffers to the
1978 : : * extent to map because we cannot extend it anymore. It can also return value
1979 : : * < 0 in case of error during IO submission.
1980 : : */
1981 : 0 : static int mpage_process_page_bufs(struct mpage_da_data *mpd,
1982 : : struct buffer_head *head,
1983 : : struct buffer_head *bh,
1984 : : ext4_lblk_t lblk)
1985 : : {
1986 : 953355 : struct inode *inode = mpd->inode;
1987 : : int err;
1988 : 913917 : ext4_lblk_t blocks = (i_size_read(inode) + (1 << inode->i_blkbits) - 1)
1989 : 913917 : >> inode->i_blkbits;
1990 : :
1991 : : do {
1992 [ - + ]: 913804 : BUG_ON(buffer_locked(bh));
1993 : :
1994 [ + + ][ + + ]: 913804 : if (lblk >= blocks || !mpage_add_bh_to_extent(mpd, lblk, bh)) {
1995 : : /* Found extent to map? */
1996 [ + ]: 1682 : if (mpd->map.m_len)
1997 : : return 0;
1998 : : /* Everything mapped so far and we hit EOF */
1999 : : break;
2000 : : }
2001 [ + ]: 911936 : } while (lblk++, (bh = bh->b_this_page) != head);
2002 : : /* So far everything mapped? Submit the page for IO. */
2003 [ + + ]: 1825975 : if (mpd->map.m_len == 0) {
2004 : 273217 : err = mpage_submit_page(mpd, head->b_page);
2005 [ + ]: 273232 : if (err < 0)
2006 : : return err;
2007 : : }
2008 : 912147 : return lblk < blocks;
2009 : : }
2010 : :
2011 : : /*
2012 : : * mpage_map_buffers - update buffers corresponding to changed extent and
2013 : : * submit fully mapped pages for IO
2014 : : *
2015 : : * @mpd - description of extent to map, on return next extent to map
2016 : : *
2017 : : * Scan buffers corresponding to changed extent (we expect corresponding pages
2018 : : * to be already locked) and update buffer state according to new extent state.
2019 : : * We map delalloc buffers to their physical location, clear unwritten bits,
2020 : : * and mark buffers as uninit when we perform writes to uninitialized extents
2021 : : * and do extent conversion after IO is finished. If the last page is not fully
2022 : : * mapped, we update @map to the next extent in the last page that needs
2023 : : * mapping. Otherwise we submit the page for IO.
2024 : : */
2025 : 0 : static int mpage_map_and_submit_buffers(struct mpage_da_data *mpd)
2026 : : {
2027 : : struct pagevec pvec;
2028 : : int nr_pages, i;
2029 : 86015 : struct inode *inode = mpd->inode;
2030 : : struct buffer_head *head, *bh;
2031 : 86015 : int bpp_bits = PAGE_CACHE_SHIFT - inode->i_blkbits;
2032 : : pgoff_t start, end;
2033 : : ext4_lblk_t lblk;
2034 : : sector_t pblock;
2035 : : int err;
2036 : :
2037 : 86015 : start = mpd->map.m_lblk >> bpp_bits;
2038 : 86015 : end = (mpd->map.m_lblk + mpd->map.m_len - 1) >> bpp_bits;
2039 : 86015 : lblk = start << bpp_bits;
2040 : 86015 : pblock = mpd->map.m_pblk;
2041 : :
2042 : : pagevec_init(&pvec, 0);
2043 [ + + ]: 207462 : while (start <= end) {
2044 : 121433 : nr_pages = pagevec_lookup(&pvec, inode->i_mapping, start,
2045 : : PAGEVEC_SIZE);
2046 [ + + ]: 121450 : if (nr_pages == 0)
2047 : : break;
2048 [ + + ]: 738248 : for (i = 0; i < nr_pages; i++) {
2049 : 692008 : struct page *page = pvec.pages[i];
2050 : :
2051 [ + + ]: 692008 : if (page->index > end)
2052 : : break;
2053 : : /* Up to 'end' pages must be contiguous */
2054 [ - + ]: 616806 : BUG_ON(page->index != start);
2055 [ - + ]: 616806 : bh = head = page_buffers(page);
2056 : : do {
2057 [ - + ]: 616811 : if (lblk < mpd->map.m_lblk)
2058 : 0 : continue;
2059 [ - + ]: 616811 : if (lblk >= mpd->map.m_lblk + mpd->map.m_len) {
2060 : : /*
2061 : : * Buffer after end of mapped extent.
2062 : : * Find next buffer in the page to map.
2063 : : */
2064 : 0 : mpd->map.m_len = 0;
2065 : 0 : mpd->map.m_flags = 0;
2066 : : /*
2067 : : * FIXME: If dioread_nolock supports
2068 : : * blocksize < pagesize, we need to make
2069 : : * sure we add size mapped so far to
2070 : : * io_end->size as the following call
2071 : : * can submit the page for IO.
2072 : : */
2073 : 0 : err = mpage_process_page_bufs(mpd, head,
2074 : : bh, lblk);
2075 : : pagevec_release(&pvec);
2076 [ # # ]: 0 : if (err > 0)
2077 : : err = 0;
2078 : 0 : return err;
2079 : : }
2080 [ + + ]: 616811 : if (buffer_delay(bh)) {
2081 : : clear_buffer_delay(bh);
2082 : 616816 : bh->b_blocknr = pblock++;
2083 : : }
2084 : : clear_buffer_unwritten(bh);
2085 [ + + ]: 616815 : } while (lblk++, (bh = bh->b_this_page) != head);
2086 : :
2087 : : /*
2088 : : * FIXME: This is going to break if dioread_nolock
2089 : : * supports blocksize < pagesize as we will try to
2090 : : * convert potentially unmapped parts of inode.
2091 : : */
2092 : 616810 : mpd->io_submit.io_end->size += PAGE_CACHE_SIZE;
2093 : : /* Page fully mapped - let IO run! */
2094 : 616810 : err = mpage_submit_page(mpd, page);
2095 [ - + ]: 616799 : if (err < 0) {
2096 : : pagevec_release(&pvec);
2097 : 0 : return err;
2098 : : }
2099 : 616799 : start++;
2100 : : }
2101 : : pagevec_release(&pvec);
2102 : : }
2103 : : /* Extent fully mapped and matches with page boundary. We are done. */
2104 : 86030 : mpd->map.m_len = 0;
2105 : 86030 : mpd->map.m_flags = 0;
2106 : 86030 : return 0;
2107 : : }
2108 : :
2109 : 0 : static int mpage_map_one_extent(handle_t *handle, struct mpage_da_data *mpd)
2110 : : {
2111 : 86041 : struct inode *inode = mpd->inode;
2112 : 86041 : struct ext4_map_blocks *map = &mpd->map;
2113 : : int get_blocks_flags;
2114 : : int err;
2115 : :
2116 : : trace_ext4_da_write_pages_extent(inode, map);
2117 : : /*
2118 : : * Call ext4_map_blocks() to allocate any delayed allocation blocks, or
2119 : : * to convert an uninitialized extent to be initialized (in the case
2120 : : * where we have written into one or more preallocated blocks). It is
2121 : : * possible that we're going to need more metadata blocks than
2122 : : * previously reserved. However we must not fail because we're in
2123 : : * writeback and there is nothing we can do about it so it might result
2124 : : * in data loss. So use reserved blocks to allocate metadata if
2125 : : * possible.
2126 : : *
2127 : : * We pass in the magic EXT4_GET_BLOCKS_DELALLOC_RESERVE if the blocks
2128 : : * in question are delalloc blocks. This affects functions in many
2129 : : * different parts of the allocation call path. This flag exists
2130 : : * primarily because we don't want to change *many* call functions, so
2131 : : * ext4_map_blocks() will set the EXT4_STATE_DELALLOC_RESERVED flag
2132 : : * once the inode's allocation semaphore is taken.
2133 : : */
2134 : : get_blocks_flags = EXT4_GET_BLOCKS_CREATE |
2135 : : EXT4_GET_BLOCKS_METADATA_NOFAIL;
2136 [ - + ]: 86045 : if (ext4_should_dioread_nolock(inode))
2137 : : get_blocks_flags |= EXT4_GET_BLOCKS_IO_CREATE_EXT;
2138 [ + - ]: 86045 : if (map->m_flags & (1 << BH_Delay))
2139 : 86045 : get_blocks_flags |= EXT4_GET_BLOCKS_DELALLOC_RESERVE;
2140 : :
2141 : 86045 : err = ext4_map_blocks(handle, inode, map, get_blocks_flags);
2142 [ + + ]: 86044 : if (err < 0)
2143 : : return err;
2144 [ - + ]: 86034 : if (map->m_flags & EXT4_MAP_UNINIT) {
2145 [ # # ][ # # ]: 0 : if (!mpd->io_submit.io_end->handle &&
2146 : : ext4_handle_valid(handle)) {
2147 : 0 : mpd->io_submit.io_end->handle = handle->h_rsv_handle;
2148 : 0 : handle->h_rsv_handle = NULL;
2149 : : }
2150 : 0 : ext4_set_io_unwritten_flag(inode, mpd->io_submit.io_end);
2151 : : }
2152 : :
2153 [ - + ]: 86037 : BUG_ON(map->m_len == 0);
2154 [ + + ]: 86037 : if (map->m_flags & EXT4_MAP_NEW) {
2155 : 86029 : struct block_device *bdev = inode->i_sb->s_bdev;
2156 : : int i;
2157 : :
2158 [ + + ]: 702453 : for (i = 0; i < map->m_len; i++)
2159 : 616466 : unmap_underlying_metadata(bdev, map->m_pblk + i);
2160 : : }
2161 : : return 0;
2162 : : }
2163 : :
2164 : : /*
2165 : : * mpage_map_and_submit_extent - map extent starting at mpd->lblk of length
2166 : : * mpd->len and submit pages underlying it for IO
2167 : : *
2168 : : * @handle - handle for journal operations
2169 : : * @mpd - extent to map
2170 : : * @give_up_on_write - we set this to true iff there is a fatal error and there
2171 : : * is no hope of writing the data. The caller should discard
2172 : : * dirty pages to avoid infinite loops.
2173 : : *
2174 : : * The function maps extent starting at mpd->lblk of length mpd->len. If it is
2175 : : * delayed, blocks are allocated, if it is unwritten, we may need to convert
2176 : : * them to initialized or split the described range from larger unwritten
2177 : : * extent. Note that we need not map all the described range since allocation
2178 : : * can return less blocks or the range is covered by more unwritten extents. We
2179 : : * cannot map more because we are limited by reserved transaction credits. On
2180 : : * the other hand we always make sure that the last touched page is fully
2181 : : * mapped so that it can be written out (and thus forward progress is
2182 : : * guaranteed). After mapping we submit all mapped pages for IO.
2183 : : */
2184 : 0 : static int mpage_map_and_submit_extent(handle_t *handle,
2185 : : struct mpage_da_data *mpd,
2186 : : bool *give_up_on_write)
2187 : : {
2188 : 86042 : struct inode *inode = mpd->inode;
2189 : : struct ext4_map_blocks *map = &mpd->map;
2190 : : int err;
2191 : : loff_t disksize;
2192 : :
2193 : 86042 : mpd->io_submit.io_end->offset =
2194 : 86042 : ((loff_t)map->m_lblk) << inode->i_blkbits;
2195 : : do {
2196 : 86043 : err = mpage_map_one_extent(handle, mpd);
2197 [ - + ]: 86019 : if (err < 0) {
2198 : 0 : struct super_block *sb = inode->i_sb;
2199 : :
2200 [ # # ]: 0 : if (EXT4_SB(sb)->s_mount_flags & EXT4_MF_FS_ABORTED)
2201 : : goto invalidate_dirty_pages;
2202 : : /*
2203 : : * Let the uper layers retry transient errors.
2204 : : * In the case of ENOSPC, if ext4_count_free_blocks()
2205 : : * is non-zero, a commit should free up blocks.
2206 : : */
2207 [ # # ][ # # ]: 0 : if ((err == -ENOMEM) ||
2208 [ # # ]: 0 : (err == -ENOSPC && ext4_count_free_clusters(sb)))
2209 : 0 : return err;
2210 : 0 : ext4_msg(sb, KERN_CRIT,
2211 : : "Delayed block allocation failed for "
2212 : : "inode %lu at logical offset %llu with"
2213 : : " max blocks %u with error %d",
2214 : : inode->i_ino,
2215 : : (unsigned long long)map->m_lblk,
2216 : : (unsigned)map->m_len, -err);
2217 : 0 : ext4_msg(sb, KERN_CRIT,
2218 : : "This should not happen!! Data will "
2219 : : "be lost\n");
2220 [ # # ]: 0 : if (err == -ENOSPC)
2221 : 0 : ext4_print_free_blocks(inode);
2222 : : invalidate_dirty_pages:
2223 : 0 : *give_up_on_write = true;
2224 : 0 : return err;
2225 : : }
2226 : : /*
2227 : : * Update buffer state, submit mapped pages, and get us new
2228 : : * extent to map
2229 : : */
2230 : 86019 : err = mpage_map_and_submit_buffers(mpd);
2231 [ + ]: 86043 : if (err < 0)
2232 : : return err;
2233 [ + + ]: 86049 : } while (map->m_len);
2234 : :
2235 : : /* Update on-disk size after IO is submitted */
2236 : 86048 : disksize = ((loff_t)mpd->first_page) << PAGE_CACHE_SHIFT;
2237 [ + + ]: 86048 : if (disksize > EXT4_I(inode)->i_disksize) {
2238 : : int err2;
2239 : :
2240 : : ext4_wb_update_i_disksize(inode, disksize);
2241 : 33188 : err2 = ext4_mark_inode_dirty(handle, inode);
2242 [ - + ]: 33187 : if (err2)
2243 : 0 : ext4_error(inode->i_sb,
2244 : : "Failed to mark inode %lu dirty",
2245 : : inode->i_ino);
2246 [ + + ]: 33189 : if (!err)
2247 : : err = err2;
2248 : : }
2249 : 86049 : return err;
2250 : : }
2251 : :
2252 : : /*
2253 : : * Calculate the total number of credits to reserve for one writepages
2254 : : * iteration. This is called from ext4_writepages(). We map an extent of
2255 : : * up to MAX_WRITEPAGES_EXTENT_LEN blocks and then we go on and finish mapping
2256 : : * the last partial page. So in total we can map MAX_WRITEPAGES_EXTENT_LEN +
2257 : : * bpp - 1 blocks in bpp different extents.
2258 : : */
2259 : 0 : static int ext4_da_writepages_trans_blocks(struct inode *inode)
2260 : : {
2261 : : int bpp = ext4_journal_blocks_per_page(inode);
2262 : :
2263 : 128351 : return ext4_meta_trans_blocks(inode,
2264 : : MAX_WRITEPAGES_EXTENT_LEN + bpp - 1, bpp);
2265 : : }
2266 : :
2267 : : /*
2268 : : * mpage_prepare_extent_to_map - find & lock contiguous range of dirty pages
2269 : : * and underlying extent to map
2270 : : *
2271 : : * @mpd - where to look for pages
2272 : : *
2273 : : * Walk dirty pages in the mapping. If they are fully mapped, submit them for
2274 : : * IO immediately. When we find a page which isn't mapped we start accumulating
2275 : : * extent of buffers underlying these pages that needs mapping (formed by
2276 : : * either delayed or unwritten buffers). We also lock the pages containing
2277 : : * these buffers. The extent found is returned in @mpd structure (starting at
2278 : : * mpd->lblk with length mpd->len blocks).
2279 : : *
2280 : : * Note that this function can attach bios to one io_end structure which are
2281 : : * neither logically nor physically contiguous. Although it may seem as an
2282 : : * unnecessary complication, it is actually inevitable in blocksize < pagesize
2283 : : * case as we need to track IO to all buffers underlying a page in one io_end.
2284 : : */
2285 : 0 : static int mpage_prepare_extent_to_map(struct mpage_da_data *mpd)
2286 : : {
2287 : 128363 : struct address_space *mapping = mpd->inode->i_mapping;
2288 : : struct pagevec pvec;
2289 : : unsigned int nr_pages;
2290 : 128363 : long left = mpd->wbc->nr_to_write;
2291 : 128363 : pgoff_t index = mpd->first_page;
2292 : 128363 : pgoff_t end = mpd->last_page;
2293 : : int tag;
2294 : : int i, err = 0;
2295 : 128363 : int blkbits = mpd->inode->i_blkbits;
2296 : : ext4_lblk_t lblk;
2297 : : struct buffer_head *head;
2298 : :
2299 [ + + ][ + + ]: 128363 : if (mpd->wbc->sync_mode == WB_SYNC_ALL || mpd->wbc->tagged_writepages)
2300 : : tag = PAGECACHE_TAG_TOWRITE;
2301 : : else
2302 : : tag = PAGECACHE_TAG_DIRTY;
2303 : :
2304 : : pagevec_init(&pvec, 0);
2305 : 128363 : mpd->map.m_len = 0;
2306 : 128363 : mpd->next_page = index;
2307 [ + + ]: 191364 : while (index <= end) {
2308 : 186819 : nr_pages = pagevec_lookup_tag(&pvec, mapping, &index, tag,
2309 : 186819 : min(end - index, (pgoff_t)PAGEVEC_SIZE-1) + 1);
2310 [ + + ]: 186827 : if (nr_pages == 0)
2311 : : goto out;
2312 : :
2313 [ + + ]: 1086255 : for (i = 0; i < nr_pages; i++) {
2314 : 1023245 : struct page *page = pvec.pages[i];
2315 : :
2316 : : /*
2317 : : * At this point, the page may be truncated or
2318 : : * invalidated (changing page->mapping to NULL), or
2319 : : * even swizzled back from swapper_space to tmpfs file
2320 : : * mapping. However, page->index will not change
2321 : : * because we have a reference on the page.
2322 : : */
2323 [ + + ]: 1023245 : if (page->index > end)
2324 : : goto out;
2325 : :
2326 : : /*
2327 : : * Accumulated enough dirty pages? This doesn't apply
2328 : : * to WB_SYNC_ALL mode. For integrity sync we have to
2329 : : * keep going because someone may be concurrently
2330 : : * dirtying pages, and we might have synced a lot of
2331 : : * newly appeared dirty pages, but have not synced all
2332 : : * of the old dirty pages.
2333 : : */
2334 [ + + ][ + + ]: 1023131 : if (mpd->wbc->sync_mode == WB_SYNC_NONE && left <= 0)
2335 : : goto out;
2336 : :
2337 : : /* If we can't merge this page, we are done. */
2338 [ + + ][ + + ]: 1022603 : if (mpd->map.m_len > 0 && mpd->next_page != page->index)
2339 : : goto out;
2340 : :
2341 : : lock_page(page);
2342 : : /*
2343 : : * If the page is no longer dirty, or its mapping no
2344 : : * longer corresponds to inode we are writing (which
2345 : : * means it has been truncated or invalidated), or the
2346 : : * page is already under writeback and we are not doing
2347 : : * a data integrity writeback, skip the page
2348 : : */
2349 [ + + ][ + + ]: 951188 : if (!PageDirty(page) ||
2350 [ + + ]: 5410 : (PageWriteback(page) &&
2351 [ - + ]: 913867 : (mpd->wbc->sync_mode == WB_SYNC_NONE)) ||
2352 : 913867 : unlikely(page->mapping != mapping)) {
2353 : 37321 : unlock_page(page);
2354 : 37442 : continue;
2355 : : }
2356 : :
2357 : : wait_on_page_writeback(page);
2358 [ - + ]: 913793 : BUG_ON(PageWriteback(page));
2359 : :
2360 [ + + ]: 913793 : if (mpd->map.m_len == 0)
2361 : 359257 : mpd->first_page = page->index;
2362 : 913793 : mpd->next_page = page->index + 1;
2363 : : /* Add all dirty buffers to mpd */
2364 : 1827586 : lblk = ((ext4_lblk_t)page->index) <<
2365 : 913793 : (PAGE_CACHE_SHIFT - blkbits);
2366 [ - + ]: 913793 : head = page_buffers(page);
2367 : 913793 : err = mpage_process_page_bufs(mpd, head, head, lblk);
2368 [ + + ]: 913799 : if (err <= 0)
2369 : : goto out;
2370 : : err = 0;
2371 : 882677 : left--;
2372 : : }
2373 : : pagevec_release(&pvec);
2374 : 63002 : cond_resched();
2375 : : }
2376 : : return 0;
2377 : : out:
2378 : : pagevec_release(&pvec);
2379 : 123794 : return err;
2380 : : }
2381 : :
2382 : 0 : static int __writepage(struct page *page, struct writeback_control *wbc,
2383 : : void *data)
2384 : : {
2385 : : struct address_space *mapping = data;
2386 : 1354 : int ret = ext4_writepage(page, wbc);
2387 : : mapping_set_error(mapping, ret);
2388 : 0 : return ret;
2389 : : }
2390 : :
2391 : 0 : static int ext4_writepages(struct address_space *mapping,
2392 : : struct writeback_control *wbc)
2393 : : {
2394 : : pgoff_t writeback_index = 0;
2395 : 88781 : long nr_to_write = wbc->nr_to_write;
2396 : : int range_whole = 0;
2397 : : int cycled = 1;
2398 : : handle_t *handle = NULL;
2399 : : struct mpage_da_data mpd;
2400 : 217145 : struct inode *inode = mapping->host;
2401 : : int needed_blocks, rsv_blocks = 0, ret = 0;
2402 : 88781 : struct ext4_sb_info *sbi = EXT4_SB(mapping->host->i_sb);
2403 : : bool done;
2404 : : struct blk_plug plug;
2405 : 88781 : bool give_up_on_write = false;
2406 : :
2407 : : trace_ext4_writepages(inode, wbc);
2408 : :
2409 : : /*
2410 : : * No pages to write? This is mainly a kludge to avoid starting
2411 : : * a transaction for special inodes like journal inode on last iput()
2412 : : * because that could violate lock ordering on umount
2413 : : */
2414 [ + + ][ + + ]: 88780 : if (!mapping->nrpages || !mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
2415 : : goto out_writepages;
2416 : :
2417 [ + + ]: 43506 : if (ext4_should_journal_data(inode)) {
2418 : : struct blk_plug plug;
2419 : :
2420 : 1354 : blk_start_plug(&plug);
2421 : 1354 : ret = write_cache_pages(mapping, wbc, __writepage, mapping);
2422 : 1354 : blk_finish_plug(&plug);
2423 : : goto out_writepages;
2424 : : }
2425 : :
2426 : : /*
2427 : : * If the filesystem has aborted, it is read-only, so return
2428 : : * right away instead of dumping stack traces later on that
2429 : : * will obscure the real source of the problem. We test
2430 : : * EXT4_MF_FS_ABORTED instead of sb->s_flag's MS_RDONLY because
2431 : : * the latter could be true if the filesystem is mounted
2432 : : * read-only, and in that case, ext4_writepages should
2433 : : * *never* be called, so if that ever happens, we would want
2434 : : * the stack trace.
2435 : : */
2436 [ + ]: 42152 : if (unlikely(sbi->s_mount_flags & EXT4_MF_FS_ABORTED)) {
2437 : : ret = -EROFS;
2438 : : goto out_writepages;
2439 : : }
2440 : :
2441 [ - + ]: 42155 : if (ext4_should_dioread_nolock(inode)) {
2442 : : /*
2443 : : * We may need to convert up to one extent per block in
2444 : : * the page and we may dirty the inode.
2445 : : */
2446 : 0 : rsv_blocks = 1 + (PAGE_CACHE_SIZE >> inode->i_blkbits);
2447 : : }
2448 : :
2449 : : /*
2450 : : * If we have inline data and arrive here, it means that
2451 : : * we will soon create the block for the 1st page, so
2452 : : * we'd better clear the inline data here.
2453 : : */
2454 [ - + ]: 42155 : if (ext4_has_inline_data(inode)) {
2455 : : /* Just inode will be modified... */
2456 : : handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
2457 [ # # ]: 0 : if (IS_ERR(handle)) {
2458 : : ret = PTR_ERR(handle);
2459 : 0 : goto out_writepages;
2460 : : }
2461 [ # # ]: 0 : BUG_ON(ext4_test_inode_state(inode,
2462 : : EXT4_STATE_MAY_INLINE_DATA));
2463 : 0 : ext4_destroy_inline_data(handle, inode);
2464 : 0 : ext4_journal_stop(handle);
2465 : : }
2466 : :
2467 [ + + ][ + + ]: 130932 : if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
2468 : : range_whole = 1;
2469 : :
2470 [ + + ]: 42151 : if (wbc->range_cyclic) {
2471 : 2497 : writeback_index = mapping->writeback_index;
2472 [ + + ]: 2497 : if (writeback_index)
2473 : : cycled = 0;
2474 : 2497 : mpd.first_page = writeback_index;
2475 : 2497 : mpd.last_page = -1;
2476 : : } else {
2477 : 39654 : mpd.first_page = wbc->range_start >> PAGE_CACHE_SHIFT;
2478 : 39654 : mpd.last_page = wbc->range_end >> PAGE_CACHE_SHIFT;
2479 : : }
2480 : :
2481 : 42151 : mpd.inode = inode;
2482 : 42151 : mpd.wbc = wbc;
2483 : 42151 : ext4_io_submit_init(&mpd.io_submit, wbc);
2484 : : retry:
2485 [ + + ][ + + ]: 42594 : if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
2486 : 32901 : tag_pages_for_writeback(mapping, mpd.first_page, mpd.last_page);
2487 : : done = false;
2488 : 42595 : blk_start_plug(&plug);
2489 [ + + ][ + + ]: 170929 : while (!done && mpd.first_page <= mpd.last_page) {
2490 : : /* For each extent of pages we use new io_end */
2491 : 128365 : mpd.io_submit.io_end = ext4_init_io_end(inode, GFP_KERNEL);
2492 [ + + ]: 128375 : if (!mpd.io_submit.io_end) {
2493 : : ret = -ENOMEM;
2494 : : break;
2495 : : }
2496 : :
2497 : : /*
2498 : : * We have two constraints: We find one extent to map and we
2499 : : * must always write out whole page (makes a difference when
2500 : : * blocksize < pagesize) so that we don't block on IO when we
2501 : : * try to write out the rest of the page. Journalled mode is
2502 : : * not supported by delalloc.
2503 : : */
2504 [ - + ]: 128349 : BUG_ON(ext4_should_journal_data(inode));
2505 : 128349 : needed_blocks = ext4_da_writepages_trans_blocks(inode);
2506 : :
2507 : : /* start a new transaction */
2508 : : handle = ext4_journal_start_with_reserve(inode,
2509 : : EXT4_HT_WRITE_PAGE, needed_blocks, rsv_blocks);
2510 [ - + ]: 128373 : if (IS_ERR(handle)) {
2511 : : ret = PTR_ERR(handle);
2512 : 0 : ext4_msg(inode->i_sb, KERN_CRIT, "%s: jbd2_start: "
2513 : : "%ld pages, ino %lu; err %d", __func__,
2514 : : wbc->nr_to_write, inode->i_ino, ret);
2515 : : /* Release allocated io_end */
2516 : 0 : ext4_put_io_end(mpd.io_submit.io_end);
2517 : 0 : break;
2518 : : }
2519 : :
2520 : 128373 : trace_ext4_da_write_pages(inode, mpd.first_page, mpd.wbc);
2521 : 128373 : ret = mpage_prepare_extent_to_map(&mpd);
2522 [ + - ]: 128373 : if (!ret) {
2523 [ + + ]: 128373 : if (mpd.map.m_len)
2524 : 86042 : ret = mpage_map_and_submit_extent(handle, &mpd,
2525 : : &give_up_on_write);
2526 : : else {
2527 : : /*
2528 : : * We scanned the whole range (or exhausted
2529 : : * nr_to_write), submitted what was mapped and
2530 : : * didn't find anything needing mapping. We are
2531 : : * done.
2532 : : */
2533 : : done = true;
2534 : : }
2535 : : }
2536 : 128372 : ext4_journal_stop(handle);
2537 : : /* Submit prepared bio */
2538 : 128379 : ext4_io_submit(&mpd.io_submit);
2539 : : /* Unlock pages we didn't use */
2540 : 128343 : mpage_release_unused_pages(&mpd, give_up_on_write);
2541 : : /* Drop our io_end reference we got from init */
2542 : 128238 : ext4_put_io_end(mpd.io_submit.io_end);
2543 : :
2544 [ - + ][ # # ]: 128291 : if (ret == -ENOSPC && sbi->s_journal) {
2545 : : /*
2546 : : * Commit the transaction which would
2547 : : * free blocks released in the transaction
2548 : : * and try again
2549 : : */
2550 : 0 : jbd2_journal_force_commit_nested(sbi->s_journal);
2551 : : ret = 0;
2552 : 0 : continue;
2553 : : }
2554 : : /* Fatal error - ENOMEM, EIO... */
2555 [ + ]: 128291 : if (ret)
2556 : : break;
2557 : : }
2558 : 42538 : blk_finish_plug(&plug);
2559 [ + + ][ + + ]: 42596 : if (!ret && !cycled && wbc->nr_to_write > 0) {
2560 : : cycled = 1;
2561 : 439 : mpd.last_page = writeback_index - 1;
2562 : 439 : mpd.first_page = 0;
2563 : 439 : goto retry;
2564 : : }
2565 : :
2566 : : /* Update index */
2567 [ + + ][ + + ]: 42157 : if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
[ + - ]
2568 : : /*
2569 : : * Set the writeback_index so that range_cyclic
2570 : : * mode will write it back later
2571 : : */
2572 : 27875 : mapping->writeback_index = mpd.first_page;
2573 : :
2574 : : out_writepages:
2575 : : trace_ext4_writepages_result(inode, wbc, ret,
2576 : 88782 : nr_to_write - wbc->nr_to_write);
2577 : 88782 : return ret;
2578 : : }
2579 : :
2580 : 0 : static int ext4_nonda_switch(struct super_block *sb)
2581 : : {
2582 : : s64 free_clusters, dirty_clusters;
2583 : : struct ext4_sb_info *sbi = EXT4_SB(sb);
2584 : :
2585 : : /*
2586 : : * switch to non delalloc mode if we are running low
2587 : : * on free block. The free block accounting via percpu
2588 : : * counters can get slightly wrong with percpu_counter_batch getting
2589 : : * accumulated on each CPU without updating global counters
2590 : : * Delalloc need an accurate free block accounting. So switch
2591 : : * to non delalloc when we are near to error range.
2592 : : */
2593 : : free_clusters =
2594 : 6469296 : percpu_counter_read_positive(&sbi->s_freeclusters_counter);
2595 : : dirty_clusters =
2596 : 6469661 : percpu_counter_read_positive(&sbi->s_dirtyclusters_counter);
2597 : : /*
2598 : : * Start pushing delalloc when 1/2 of free blocks are dirty.
2599 : : */
2600 [ + + ][ - + ]: 6471825 : if (dirty_clusters && (free_clusters < 2 * dirty_clusters))
2601 : 0 : try_to_writeback_inodes_sb(sb, WB_REASON_FS_FREE_SPACE);
2602 : :
2603 [ + - ][ + ]: 6469519 : if (2 * free_clusters < 3 * dirty_clusters ||
2604 : 6469519 : free_clusters < (dirty_clusters + EXT4_FREECLUSTERS_WATERMARK)) {
2605 : : /*
2606 : : * free block count is less than 150% of dirty blocks
2607 : : * or free blocks is less than watermark
2608 : : */
2609 : : return 1;
2610 : : }
2611 : 6469710 : return 0;
2612 : : }
2613 : :
2614 : 0 : static int ext4_da_write_begin(struct file *file, struct address_space *mapping,
2615 : : loff_t pos, unsigned len, unsigned flags,
2616 : : struct page **pagep, void **fsdata)
2617 : : {
2618 : 6264849 : int ret, retries = 0;
2619 : : struct page *page;
2620 : : pgoff_t index;
2621 : 12527255 : struct inode *inode = mapping->host;
2622 : : handle_t *handle;
2623 : :
2624 : 6264849 : index = pos >> PAGE_CACHE_SHIFT;
2625 : :
2626 [ - + ]: 6264849 : if (ext4_nonda_switch(inode->i_sb)) {
2627 : 0 : *fsdata = (void *)FALL_BACK_TO_NONDELALLOC;
2628 : 0 : return ext4_write_begin(file, mapping, pos,
2629 : : len, flags, pagep, fsdata);
2630 : : }
2631 : 6264363 : *fsdata = (void *)0;
2632 : : trace_ext4_da_write_begin(inode, pos, len, flags);
2633 : :
2634 [ + - ]: 6263743 : if (ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA)) {
2635 : 0 : ret = ext4_da_write_inline_data_begin(mapping, inode,
2636 : : pos, len, flags,
2637 : : pagep, fsdata);
2638 [ # # ]: 0 : if (ret < 0)
2639 : : return ret;
2640 [ # # ]: 0 : if (ret == 1)
2641 : : return 0;
2642 : : }
2643 : :
2644 : : /*
2645 : : * grab_cache_page_write_begin() can take a long time if the
2646 : : * system is thrashing due to memory pressure, or if the page
2647 : : * is being written back. So grab it first before we start
2648 : : * the transaction handle. This also allows us to allocate
2649 : : * the page (if needed) without using GFP_NOFS.
2650 : : */
2651 : : retry_grab:
2652 : 6263743 : page = grab_cache_page_write_begin(mapping, index, flags);
2653 [ + ]: 6263146 : if (!page)
2654 : : return -ENOMEM;
2655 : 6265347 : unlock_page(page);
2656 : :
2657 : : /*
2658 : : * With delayed allocation, we don't log the i_disksize update
2659 : : * if there is delayed block allocation. But we still need
2660 : : * to journalling the i_disksize update if writes to the end
2661 : : * of file which has an already mapped buffer.
2662 : : */
2663 : : retry_journal:
2664 : : handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE, 1);
2665 [ - + ]: 6265404 : if (IS_ERR(handle)) {
2666 : 0 : page_cache_release(page);
2667 : 0 : return PTR_ERR(handle);
2668 : : }
2669 : :
2670 : : lock_page(page);
2671 [ - + ]: 6263588 : if (page->mapping != mapping) {
2672 : : /* The page got truncated from under us */
2673 : 0 : unlock_page(page);
2674 : 0 : page_cache_release(page);
2675 : 0 : ext4_journal_stop(handle);
2676 : 0 : goto retry_grab;
2677 : : }
2678 : : /* In case writeback began while the page was unlocked */
2679 : 6263588 : wait_for_stable_page(page);
2680 : :
2681 : 6265596 : ret = __block_write_begin(page, pos, len, ext4_da_get_block_prep);
2682 [ - + ]: 12527609 : if (ret < 0) {
2683 : 0 : unlock_page(page);
2684 : 0 : ext4_journal_stop(handle);
2685 : : /*
2686 : : * block_write_begin may have instantiated a few blocks
2687 : : * outside i_size. Trim these off again. Don't need
2688 : : * i_size_read because we hold i_mutex.
2689 : : */
2690 [ # # ]: 0 : if (pos + len > inode->i_size)
2691 : : ext4_truncate_failed_write(inode);
2692 : :
2693 [ # # # # ]: 0 : if (ret == -ENOSPC &&
2694 : 0 : ext4_should_retry_alloc(inode->i_sb, &retries))
2695 : : goto retry_journal;
2696 : :
2697 : 0 : page_cache_release(page);
2698 : 0 : return ret;
2699 : : }
2700 : :
2701 : 6262760 : *pagep = page;
2702 : 6262760 : return ret;
2703 : : }
2704 : :
2705 : : /*
2706 : : * Check if we should update i_disksize
2707 : : * when write to the end of file but not require block allocation
2708 : : */
2709 : 0 : static int ext4_da_should_update_i_disksize(struct page *page,
2710 : : unsigned long offset)
2711 : : {
2712 : : struct buffer_head *bh;
2713 : 4981620 : struct inode *inode = page->mapping->host;
2714 : : unsigned int idx;
2715 : : int i;
2716 : :
2717 [ - + ]: 4981620 : bh = page_buffers(page);
2718 : 4981620 : idx = offset >> inode->i_blkbits;
2719 : :
2720 [ - + ]: 4981620 : for (i = 0; i < idx; i++)
2721 : 0 : bh = bh->b_this_page;
2722 : :
2723 [ + ][ + + ]: 4981620 : if (!buffer_mapped(bh) || (buffer_delay(bh)) || buffer_unwritten(bh))
[ + + ]
2724 : : return 0;
2725 : : return 1;
2726 : : }
2727 : :
2728 : 0 : static int ext4_da_write_end(struct file *file,
2729 : : struct address_space *mapping,
2730 : : loff_t pos, unsigned len, unsigned copied,
2731 : : struct page *page, void *fsdata)
2732 : : {
2733 : 6266081 : struct inode *inode = mapping->host;
2734 : : int ret = 0, ret2;
2735 : : handle_t *handle = ext4_journal_current_handle();
2736 : : loff_t new_i_size;
2737 : : unsigned long start, end;
2738 : 6266081 : int write_mode = (int)(unsigned long)fsdata;
2739 : :
2740 [ - + ]: 6266081 : if (write_mode == FALL_BACK_TO_NONDELALLOC)
2741 : 0 : return ext4_write_end(file, mapping, pos,
2742 : : len, copied, page, fsdata);
2743 : :
2744 : : trace_ext4_da_write_end(inode, pos, len, copied);
2745 : 6265716 : start = pos & (PAGE_CACHE_SIZE - 1);
2746 : 6265716 : end = start + copied - 1;
2747 : :
2748 : : /*
2749 : : * generic_write_end() will run mark_inode_dirty() if i_size
2750 : : * changes. So let's piggyback the i_disksize mark_inode_dirty
2751 : : * into that.
2752 : : */
2753 : 6265716 : new_i_size = pos + copied;
2754 [ + + ][ + + ]: 6265716 : if (copied && new_i_size > EXT4_I(inode)->i_disksize) {
2755 [ + + + + ]: 9963958 : if (ext4_has_inline_data(inode) ||
2756 : 4982063 : ext4_da_should_update_i_disksize(page, end)) {
2757 : 2921 : down_write(&EXT4_I(inode)->i_data_sem);
2758 [ + - ]: 2870 : if (new_i_size > EXT4_I(inode)->i_disksize)
2759 : 2870 : EXT4_I(inode)->i_disksize = new_i_size;
2760 : 2870 : up_write(&EXT4_I(inode)->i_data_sem);
2761 : : /* We need to mark inode dirty even if
2762 : : * new_i_size is less that inode->i_size
2763 : : * bu greater than i_disksize.(hint delalloc)
2764 : : */
2765 : 2870 : ext4_mark_inode_dirty(handle, inode);
2766 : : }
2767 : : }
2768 : :
2769 [ + - ][ - + ]: 6266061 : if (write_mode != CONVERT_INLINE_DATA &&
2770 [ # # ]: 0 : ext4_test_inode_state(inode, EXT4_STATE_MAY_INLINE_DATA) &&
2771 : 0 : ext4_has_inline_data(inode))
2772 : 0 : ret2 = ext4_da_write_inline_data_end(inode, pos, len, copied,
2773 : : page);
2774 : : else
2775 : 6266061 : ret2 = generic_write_end(file, mapping, pos, len, copied,
2776 : : page, fsdata);
2777 : :
2778 : : copied = ret2;
2779 [ - + ]: 6261593 : if (ret2 < 0)
2780 : : ret = ret2;
2781 : 6261593 : ret2 = ext4_journal_stop(handle);
2782 [ + + ]: 12532505 : if (!ret)
2783 : : ret = ret2;
2784 : :
2785 [ - + ]: 6266424 : return ret ? ret : copied;
2786 : : }
2787 : :
2788 : 0 : static void ext4_da_invalidatepage(struct page *page, unsigned int offset,
2789 : : unsigned int length)
2790 : : {
2791 : : /*
2792 : : * Drop reserved blocks
2793 : : */
2794 [ - + ]: 1726430 : BUG_ON(!PageLocked(page));
2795 [ + ]: 1726430 : if (!page_has_buffers(page))
2796 : : goto out;
2797 : :
2798 : 1726441 : ext4_da_page_release_reservation(page, offset, length);
2799 : :
2800 : : out:
2801 : 1726354 : ext4_invalidatepage(page, offset, length);
2802 : :
2803 : 1726426 : return;
2804 : : }
2805 : :
2806 : : /*
2807 : : * Force all delayed allocation blocks to be allocated for a given inode.
2808 : : */
2809 : 0 : int ext4_alloc_da_blocks(struct inode *inode)
2810 : : {
2811 : : trace_ext4_alloc_da_blocks(inode);
2812 : :
2813 [ + + ][ - + ]: 155889 : if (!EXT4_I(inode)->i_reserved_data_blocks &&
2814 : 152841 : !EXT4_I(inode)->i_reserved_meta_blocks)
2815 : : return 0;
2816 : :
2817 : : /*
2818 : : * We do something simple for now. The filemap_flush() will
2819 : : * also start triggering a write of the data blocks, which is
2820 : : * not strictly speaking necessary (and for users of
2821 : : * laptop_mode, not even desirable). However, to do otherwise
2822 : : * would require replicating code paths in:
2823 : : *
2824 : : * ext4_writepages() ->
2825 : : * write_cache_pages() ---> (via passed in callback function)
2826 : : * __mpage_da_writepage() -->
2827 : : * mpage_add_bh_to_extent()
2828 : : * mpage_da_map_blocks()
2829 : : *
2830 : : * The problem is that write_cache_pages(), located in
2831 : : * mm/page-writeback.c, marks pages clean in preparation for
2832 : : * doing I/O, which is not desirable if we're not planning on
2833 : : * doing I/O at all.
2834 : : *
2835 : : * We could call write_cache_pages(), and then redirty all of
2836 : : * the pages by calling redirty_page_for_writepage() but that
2837 : : * would be ugly in the extreme. So instead we would need to
2838 : : * replicate parts of the code in the above functions,
2839 : : * simplifying them because we wouldn't actually intend to
2840 : : * write out the pages, but rather only collect contiguous
2841 : : * logical block extents, call the multi-block allocator, and
2842 : : * then update the buffer heads with the block allocations.
2843 : : *
2844 : : * For now, though, we'll cheat by calling filemap_flush(),
2845 : : * which will map the blocks, and start the I/O, but not
2846 : : * actually wait for the I/O to complete.
2847 : : */
2848 : 3048 : return filemap_flush(inode->i_mapping);
2849 : : }
2850 : :
2851 : : /*
2852 : : * bmap() is special. It gets used by applications such as lilo and by
2853 : : * the swapper to find the on-disk block of a specific piece of data.
2854 : : *
2855 : : * Naturally, this is dangerous if the block concerned is still in the
2856 : : * journal. If somebody makes a swapfile on an ext4 data-journaling
2857 : : * filesystem and enables swap, then they may get a nasty shock when the
2858 : : * data getting swapped to that swapfile suddenly gets overwritten by
2859 : : * the original zero's written out previously to the journal and
2860 : : * awaiting writeback in the kernel's buffer cache.
2861 : : *
2862 : : * So, if we see any bmap calls here on a modified, data-journaled file,
2863 : : * take extra steps to flush any blocks which might be in the cache.
2864 : : */
2865 : 0 : static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
2866 : : {
2867 : 123426 : struct inode *inode = mapping->host;
2868 : : journal_t *journal;
2869 : : int err;
2870 : :
2871 : : /*
2872 : : * We can get here for an inline file via the FIBMAP ioctl
2873 : : */
2874 [ + - ]: 123426 : if (ext4_has_inline_data(inode))
2875 : : return 0;
2876 : :
2877 [ - + ][ # # ]: 123426 : if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) &&
2878 : 123426 : test_opt(inode->i_sb, DELALLOC)) {
2879 : : /*
2880 : : * With delalloc we want to sync the file
2881 : : * so that we can make sure we allocate
2882 : : * blocks for file
2883 : : */
2884 : 0 : filemap_write_and_wait(mapping);
2885 : : }
2886 : :
2887 [ + - ][ - + ]: 123426 : if (EXT4_JOURNAL(inode) &&
2888 : : ext4_test_inode_state(inode, EXT4_STATE_JDATA)) {
2889 : : /*
2890 : : * This is a REALLY heavyweight approach, but the use of
2891 : : * bmap on dirty files is expected to be extremely rare:
2892 : : * only if we run lilo or swapon on a freshly made file
2893 : : * do we expect this to happen.
2894 : : *
2895 : : * (bmap requires CAP_SYS_RAWIO so this does not
2896 : : * represent an unprivileged user DOS attack --- we'd be
2897 : : * in trouble if mortal users could trigger this path at
2898 : : * will.)
2899 : : *
2900 : : * NB. EXT4_STATE_JDATA is not set on files other than
2901 : : * regular files. If somebody wants to bmap a directory
2902 : : * or symlink and gets confused because the buffer
2903 : : * hasn't yet been flushed to disk, they deserve
2904 : : * everything they get.
2905 : : */
2906 : :
2907 : : ext4_clear_inode_state(inode, EXT4_STATE_JDATA);
2908 : 0 : journal = EXT4_JOURNAL(inode);
2909 : 0 : jbd2_journal_lock_updates(journal);
2910 : 0 : err = jbd2_journal_flush(journal);
2911 : 0 : jbd2_journal_unlock_updates(journal);
2912 : :
2913 [ # # ]: 0 : if (err)
2914 : : return 0;
2915 : : }
2916 : :
2917 : 123426 : return generic_block_bmap(mapping, block, ext4_get_block);
2918 : : }
2919 : :
2920 : 0 : static int ext4_readpage(struct file *file, struct page *page)
2921 : : {
2922 : : int ret = -EAGAIN;
2923 : 12741 : struct inode *inode = page->mapping->host;
2924 : :
2925 : : trace_ext4_readpage(page);
2926 : :
2927 [ - + ]: 12741 : if (ext4_has_inline_data(inode))
2928 : 0 : ret = ext4_readpage_inline(inode, page);
2929 : :
2930 [ + - ]: 12741 : if (ret == -EAGAIN)
2931 : 12741 : return mpage_readpage(page, ext4_get_block);
2932 : :
2933 : : return ret;
2934 : : }
2935 : :
2936 : : static int
2937 : 0 : ext4_readpages(struct file *file, struct address_space *mapping,
2938 : : struct list_head *pages, unsigned nr_pages)
2939 : : {
2940 : 118639 : struct inode *inode = mapping->host;
2941 : :
2942 : : /* If the file has inline data, no need to do readpages. */
2943 [ + + ]: 118639 : if (ext4_has_inline_data(inode))
2944 : : return 0;
2945 : :
2946 : 118715 : return mpage_readpages(mapping, pages, nr_pages, ext4_get_block);
2947 : : }
2948 : :
2949 : 0 : static void ext4_invalidatepage(struct page *page, unsigned int offset,
2950 : : unsigned int length)
2951 : : {
2952 : : trace_ext4_invalidatepage(page, offset, length);
2953 : :
2954 : : /* No journalling happens on data buffers when this function is used */
2955 [ + + ][ - + ]: 3452842 : WARN_ON(page_has_buffers(page) && buffer_jbd(page_buffers(page)));
[ + ][ - + ]
2956 : :
2957 : 1726427 : block_invalidatepage(page, offset, length);
2958 : 1726418 : }
2959 : :
2960 : 0 : static int __ext4_journalled_invalidatepage(struct page *page,
2961 : : unsigned int offset,
2962 : : unsigned int length)
2963 : : {
2964 : 2618 : journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2965 : :
2966 : : trace_ext4_journalled_invalidatepage(page, offset, length);
2967 : :
2968 : : /*
2969 : : * If it's a full truncate we just forget about the pending dirtying
2970 : : */
2971 [ + - ]: 2618 : if (offset == 0 && length == PAGE_CACHE_SIZE)
2972 : : ClearPageChecked(page);
2973 : :
2974 : 2618 : return jbd2_journal_invalidatepage(journal, page, offset, length);
2975 : : }
2976 : :
2977 : : /* Wrapper for aops... */
2978 : 0 : static void ext4_journalled_invalidatepage(struct page *page,
2979 : : unsigned int offset,
2980 : : unsigned int length)
2981 : : {
2982 [ - + ]: 2618 : WARN_ON(__ext4_journalled_invalidatepage(page, offset, length) < 0);
2983 : 0 : }
2984 : :
2985 : 0 : static int ext4_releasepage(struct page *page, gfp_t wait)
2986 : : {
2987 : 1763653 : journal_t *journal = EXT4_JOURNAL(page->mapping->host);
2988 : :
2989 : : trace_ext4_releasepage(page);
2990 : :
2991 : : /* Page has dirty journalled data -> cannot release */
2992 [ + - ]: 3527290 : if (PageChecked(page))
2993 : : return 0;
2994 [ + - ]: 1763637 : if (journal)
2995 : 1763637 : return jbd2_journal_try_to_free_buffers(journal, page, wait);
2996 : : else
2997 : 0 : return try_to_free_buffers(page);
2998 : : }
2999 : :
3000 : : /*
3001 : : * ext4_get_block used when preparing for a DIO write or buffer write.
3002 : : * We allocate an uinitialized extent if blocks haven't been allocated.
3003 : : * The extent will be converted to initialized after the IO is complete.
3004 : : */
3005 : 0 : int ext4_get_block_write(struct inode *inode, sector_t iblock,
3006 : : struct buffer_head *bh_result, int create)
3007 : : {
3008 : : ext4_debug("ext4_get_block_write: inode %lu, create flag %d\n",
3009 : : inode->i_ino, create);
3010 : 144772 : return _ext4_get_block(inode, iblock, bh_result,
3011 : : EXT4_GET_BLOCKS_IO_CREATE_EXT);
3012 : : }
3013 : :
3014 : 0 : static int ext4_get_block_write_nolock(struct inode *inode, sector_t iblock,
3015 : : struct buffer_head *bh_result, int create)
3016 : : {
3017 : : ext4_debug("ext4_get_block_write_nolock: inode %lu, create flag %d\n",
3018 : : inode->i_ino, create);
3019 : 0 : return _ext4_get_block(inode, iblock, bh_result,
3020 : : EXT4_GET_BLOCKS_NO_LOCK);
3021 : : }
3022 : :
3023 : 0 : static void ext4_end_io_dio(struct kiocb *iocb, loff_t offset,
3024 : : ssize_t size, void *private)
3025 : : {
3026 : 29442 : ext4_io_end_t *io_end = iocb->private;
3027 : :
3028 : : /* if not async direct IO just return */
3029 [ - + ]: 29442 : if (!io_end)
3030 : 0 : return;
3031 : :
3032 : : ext_debug("ext4_end_io_dio(): io_end 0x%p "
3033 : : "for inode %lu, iocb 0x%p, offset %llu, size %zd\n",
3034 : : iocb->private, io_end->inode->i_ino, iocb, offset,
3035 : : size);
3036 : :
3037 : 0 : iocb->private = NULL;
3038 : 0 : io_end->offset = offset;
3039 : 0 : io_end->size = size;
3040 : 0 : ext4_put_io_end(io_end);
3041 : : }
3042 : :
3043 : : /*
3044 : : * For ext4 extent files, ext4 will do direct-io write to holes,
3045 : : * preallocated extents, and those write extend the file, no need to
3046 : : * fall back to buffered IO.
3047 : : *
3048 : : * For holes, we fallocate those blocks, mark them as uninitialized
3049 : : * If those blocks were preallocated, we mark sure they are split, but
3050 : : * still keep the range to write as uninitialized.
3051 : : *
3052 : : * The unwritten extents will be converted to written when DIO is completed.
3053 : : * For async direct IO, since the IO may still pending when return, we
3054 : : * set up an end_io call back function, which will do the conversion
3055 : : * when async direct IO completed.
3056 : : *
3057 : : * If the O_DIRECT write will extend the file then add this inode to the
3058 : : * orphan list. So recovery will truncate it back to the original size
3059 : : * if the machine crashes during the write.
3060 : : *
3061 : : */
3062 : 0 : static ssize_t ext4_ext_direct_IO(int rw, struct kiocb *iocb,
3063 : : const struct iovec *iov, loff_t offset,
3064 : : unsigned long nr_segs)
3065 : : {
3066 : 130650 : struct file *file = iocb->ki_filp;
3067 : 130650 : struct inode *inode = file->f_mapping->host;
3068 : : ssize_t ret;
3069 : : size_t count = iov_length(iov, nr_segs);
3070 : : int overwrite = 0;
3071 : : get_block_t *get_block_func = NULL;
3072 : : int dio_flags = 0;
3073 : 130650 : loff_t final_size = offset + count;
3074 : : ext4_io_end_t *io_end = NULL;
3075 : :
3076 : : /* Use the old path for reads and writes beyond i_size. */
3077 [ + + ][ + + ]: 130650 : if (rw != WRITE || final_size > inode->i_size)
3078 : 101202 : return ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3079 : :
3080 [ - + ]: 29448 : BUG_ON(iocb->private == NULL);
3081 : :
3082 : : /*
3083 : : * Make all waiters for direct IO properly wait also for extent
3084 : : * conversion. This also disallows race between truncate() and
3085 : : * overwrite DIO as i_dio_count needs to be incremented under i_mutex.
3086 : : */
3087 [ + - ]: 29448 : if (rw == WRITE)
3088 : 29448 : atomic_inc(&inode->i_dio_count);
3089 : :
3090 : : /* If we do a overwrite dio, i_mutex locking can be released */
3091 : 29448 : overwrite = *((int *)iocb->private);
3092 : :
3093 [ - + ]: 160098 : if (overwrite) {
3094 : 0 : down_read(&EXT4_I(inode)->i_data_sem);
3095 : 0 : mutex_unlock(&inode->i_mutex);
3096 : : }
3097 : :
3098 : : /*
3099 : : * We could direct write to holes and fallocate.
3100 : : *
3101 : : * Allocated blocks to fill the hole are marked as
3102 : : * uninitialized to prevent parallel buffered read to expose
3103 : : * the stale data before DIO complete the data IO.
3104 : : *
3105 : : * As to previously fallocated extents, ext4 get_block will
3106 : : * just simply mark the buffer mapped but still keep the
3107 : : * extents uninitialized.
3108 : : *
3109 : : * For non AIO case, we will convert those unwritten extents
3110 : : * to written after return back from blockdev_direct_IO.
3111 : : *
3112 : : * For async DIO, the conversion needs to be deferred when the
3113 : : * IO is completed. The ext4 end_io callback function will be
3114 : : * called to take care of the conversion work. Here for async
3115 : : * case, we allocate an io_end structure to hook to the iocb.
3116 : : */
3117 : 29448 : iocb->private = NULL;
3118 : : ext4_inode_aio_set(inode, NULL);
3119 [ - + ]: 29448 : if (!is_sync_kiocb(iocb)) {
3120 : 0 : io_end = ext4_init_io_end(inode, GFP_NOFS);
3121 [ # # ]: 0 : if (!io_end) {
3122 : : ret = -ENOMEM;
3123 : : goto retake_lock;
3124 : : }
3125 : : /*
3126 : : * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
3127 : : */
3128 : 0 : iocb->private = ext4_get_io_end(io_end);
3129 : : /*
3130 : : * we save the io structure for current async direct
3131 : : * IO, so that later ext4_map_blocks() could flag the
3132 : : * io structure whether there is a unwritten extents
3133 : : * needs to be converted when IO is completed.
3134 : : */
3135 : : ext4_inode_aio_set(inode, io_end);
3136 : : }
3137 : :
3138 [ + - ]: 29448 : if (overwrite) {
3139 : : get_block_func = ext4_get_block_write_nolock;
3140 : : } else {
3141 : : get_block_func = ext4_get_block_write;
3142 : : dio_flags = DIO_LOCKING;
3143 : : }
3144 : 29448 : ret = __blockdev_direct_IO(rw, iocb, inode,
3145 : 29448 : inode->i_sb->s_bdev, iov,
3146 : : offset, nr_segs,
3147 : : get_block_func,
3148 : : ext4_end_io_dio,
3149 : : NULL,
3150 : : dio_flags);
3151 : :
3152 : : /*
3153 : : * Put our reference to io_end. This can free the io_end structure e.g.
3154 : : * in sync IO case or in case of error. It can even perform extent
3155 : : * conversion if all bios we submitted finished before we got here.
3156 : : * Note that in that case iocb->private can be already set to NULL
3157 : : * here.
3158 : : */
3159 [ - + ]: 29448 : if (io_end) {
3160 : : ext4_inode_aio_set(inode, NULL);
3161 : 0 : ext4_put_io_end(io_end);
3162 : : /*
3163 : : * When no IO was submitted ext4_end_io_dio() was not
3164 : : * called so we have to put iocb's reference.
3165 : : */
3166 [ # # ][ # # ]: 0 : if (ret <= 0 && ret != -EIOCBQUEUED && iocb->private) {
3167 [ # # ]: 0 : WARN_ON(iocb->private != io_end);
3168 [ # # ]: 0 : WARN_ON(io_end->flag & EXT4_IO_END_UNWRITTEN);
3169 : 0 : ext4_put_io_end(io_end);
3170 : 0 : iocb->private = NULL;
3171 : : }
3172 : : }
3173 [ + + ][ - + ]: 29448 : if (ret > 0 && !overwrite && ext4_test_inode_state(inode,
3174 : : EXT4_STATE_DIO_UNWRITTEN)) {
3175 : : int err;
3176 : : /*
3177 : : * for non AIO case, since the IO is already
3178 : : * completed, we could do the conversion right here
3179 : : */
3180 : 0 : err = ext4_convert_unwritten_extents(NULL, inode,
3181 : : offset, ret);
3182 [ # # ]: 0 : if (err < 0)
3183 : : ret = err;
3184 : : ext4_clear_inode_state(inode, EXT4_STATE_DIO_UNWRITTEN);
3185 : : }
3186 : :
3187 : : retake_lock:
3188 [ + - ]: 29448 : if (rw == WRITE)
3189 : 29448 : inode_dio_done(inode);
3190 : : /* take i_mutex locking again if we do a ovewrite dio */
3191 [ - + ]: 29448 : if (overwrite) {
3192 : 0 : up_read(&EXT4_I(inode)->i_data_sem);
3193 : 0 : mutex_lock(&inode->i_mutex);
3194 : : }
3195 : :
3196 : 29448 : return ret;
3197 : : }
3198 : :
3199 : 0 : static ssize_t ext4_direct_IO(int rw, struct kiocb *iocb,
3200 : : const struct iovec *iov, loff_t offset,
3201 : : unsigned long nr_segs)
3202 : : {
3203 : 130650 : struct file *file = iocb->ki_filp;
3204 : 130650 : struct inode *inode = file->f_mapping->host;
3205 : : ssize_t ret;
3206 : :
3207 : : /*
3208 : : * If we are doing data journalling we don't support O_DIRECT
3209 : : */
3210 [ + - ]: 130650 : if (ext4_should_journal_data(inode))
3211 : : return 0;
3212 : :
3213 : : /* Let buffer I/O handle the inline data case. */
3214 [ + - ]: 130650 : if (ext4_has_inline_data(inode))
3215 : : return 0;
3216 : :
3217 : : trace_ext4_direct_IO_enter(inode, offset, iov_length(iov, nr_segs), rw);
3218 [ + - ]: 130650 : if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3219 : 130650 : ret = ext4_ext_direct_IO(rw, iocb, iov, offset, nr_segs);
3220 : : else
3221 : 130650 : ret = ext4_ind_direct_IO(rw, iocb, iov, offset, nr_segs);
3222 : : trace_ext4_direct_IO_exit(inode, offset,
3223 : : iov_length(iov, nr_segs), rw, ret);
3224 : 130650 : return ret;
3225 : : }
3226 : :
3227 : : /*
3228 : : * Pages can be marked dirty completely asynchronously from ext4's journalling
3229 : : * activity. By filemap_sync_pte(), try_to_unmap_one(), etc. We cannot do
3230 : : * much here because ->set_page_dirty is called under VFS locks. The page is
3231 : : * not necessarily locked.
3232 : : *
3233 : : * We cannot just dirty the page and leave attached buffers clean, because the
3234 : : * buffers' dirty state is "definitive". We cannot just set the buffers dirty
3235 : : * or jbddirty because all the journalling code will explode.
3236 : : *
3237 : : * So what we do is to mark the page "pending dirty" and next time writepage
3238 : : * is called, propagate that into the buffers appropriately.
3239 : : */
3240 : 0 : static int ext4_journalled_set_page_dirty(struct page *page)
3241 : : {
3242 : : SetPageChecked(page);
3243 : 0 : return __set_page_dirty_nobuffers(page);
3244 : : }
3245 : :
3246 : : static const struct address_space_operations ext4_aops = {
3247 : : .readpage = ext4_readpage,
3248 : : .readpages = ext4_readpages,
3249 : : .writepage = ext4_writepage,
3250 : : .writepages = ext4_writepages,
3251 : : .write_begin = ext4_write_begin,
3252 : : .write_end = ext4_write_end,
3253 : : .bmap = ext4_bmap,
3254 : : .invalidatepage = ext4_invalidatepage,
3255 : : .releasepage = ext4_releasepage,
3256 : : .direct_IO = ext4_direct_IO,
3257 : : .migratepage = buffer_migrate_page,
3258 : : .is_partially_uptodate = block_is_partially_uptodate,
3259 : : .error_remove_page = generic_error_remove_page,
3260 : : };
3261 : :
3262 : : static const struct address_space_operations ext4_journalled_aops = {
3263 : : .readpage = ext4_readpage,
3264 : : .readpages = ext4_readpages,
3265 : : .writepage = ext4_writepage,
3266 : : .writepages = ext4_writepages,
3267 : : .write_begin = ext4_write_begin,
3268 : : .write_end = ext4_journalled_write_end,
3269 : : .set_page_dirty = ext4_journalled_set_page_dirty,
3270 : : .bmap = ext4_bmap,
3271 : : .invalidatepage = ext4_journalled_invalidatepage,
3272 : : .releasepage = ext4_releasepage,
3273 : : .direct_IO = ext4_direct_IO,
3274 : : .is_partially_uptodate = block_is_partially_uptodate,
3275 : : .error_remove_page = generic_error_remove_page,
3276 : : };
3277 : :
3278 : : static const struct address_space_operations ext4_da_aops = {
3279 : : .readpage = ext4_readpage,
3280 : : .readpages = ext4_readpages,
3281 : : .writepage = ext4_writepage,
3282 : : .writepages = ext4_writepages,
3283 : : .write_begin = ext4_da_write_begin,
3284 : : .write_end = ext4_da_write_end,
3285 : : .bmap = ext4_bmap,
3286 : : .invalidatepage = ext4_da_invalidatepage,
3287 : : .releasepage = ext4_releasepage,
3288 : : .direct_IO = ext4_direct_IO,
3289 : : .migratepage = buffer_migrate_page,
3290 : : .is_partially_uptodate = block_is_partially_uptodate,
3291 : : .error_remove_page = generic_error_remove_page,
3292 : : };
3293 : :
3294 : 0 : void ext4_set_aops(struct inode *inode)
3295 : : {
3296 [ + - + - ]: 196474 : switch (ext4_inode_journal_mode(inode)) {
3297 : : case EXT4_INODE_ORDERED_DATA_MODE:
3298 : : ext4_set_inode_state(inode, EXT4_STATE_ORDERED_MODE);
3299 : : break;
3300 : : case EXT4_INODE_WRITEBACK_DATA_MODE:
3301 : : ext4_clear_inode_state(inode, EXT4_STATE_ORDERED_MODE);
3302 : : break;
3303 : : case EXT4_INODE_JOURNAL_DATA_MODE:
3304 : 2961 : inode->i_mapping->a_ops = &ext4_journalled_aops;
3305 : 2961 : return;
3306 : : default:
3307 : 0 : BUG();
3308 : : }
3309 [ + - ]: 389893 : if (test_opt(inode->i_sb, DELALLOC))
3310 : 193419 : inode->i_mapping->a_ops = &ext4_da_aops;
3311 : : else
3312 : 0 : inode->i_mapping->a_ops = &ext4_aops;
3313 : : }
3314 : :
3315 : : /*
3316 : : * ext4_block_truncate_page() zeroes out a mapping from file offset `from'
3317 : : * up to the end of the block which corresponds to `from'.
3318 : : * This required during truncate. We need to physically zero the tail end
3319 : : * of that block so it doesn't yield old data if the file is later grown.
3320 : : */
3321 : 0 : int ext4_block_truncate_page(handle_t *handle,
3322 : : struct address_space *mapping, loff_t from)
3323 : : {
3324 : 21088 : unsigned offset = from & (PAGE_CACHE_SIZE-1);
3325 : : unsigned length;
3326 : : unsigned blocksize;
3327 : 21088 : struct inode *inode = mapping->host;
3328 : :
3329 : 21088 : blocksize = inode->i_sb->s_blocksize;
3330 : 21088 : length = blocksize - (offset & (blocksize - 1));
3331 : :
3332 : 21088 : return ext4_block_zero_page_range(handle, mapping, from, length);
3333 : : }
3334 : :
3335 : : /*
3336 : : * ext4_block_zero_page_range() zeros out a mapping of length 'length'
3337 : : * starting from file offset 'from'. The range to be zero'd must
3338 : : * be contained with in one block. If the specified range exceeds
3339 : : * the end of the block it will be shortened to end of the block
3340 : : * that cooresponds to 'from'
3341 : : */
3342 : 0 : int ext4_block_zero_page_range(handle_t *handle,
3343 : 21088 : struct address_space *mapping, loff_t from, loff_t length)
3344 : : {
3345 : 21088 : ext4_fsblk_t index = from >> PAGE_CACHE_SHIFT;
3346 : 21088 : unsigned offset = from & (PAGE_CACHE_SIZE-1);
3347 : : unsigned blocksize, max, pos;
3348 : : ext4_lblk_t iblock;
3349 : 21088 : struct inode *inode = mapping->host;
3350 : : struct buffer_head *bh;
3351 : : struct page *page;
3352 : : int err = 0;
3353 : :
3354 : 21088 : page = find_or_create_page(mapping, from >> PAGE_CACHE_SHIFT,
3355 : : mapping_gfp_mask(mapping) & ~__GFP_FS);
3356 [ + - ]: 21088 : if (!page)
3357 : : return -ENOMEM;
3358 : :
3359 : 21088 : blocksize = inode->i_sb->s_blocksize;
3360 : 21088 : max = blocksize - (offset & (blocksize - 1));
3361 : :
3362 : : /*
3363 : : * correct length if it does not fall between
3364 : : * 'from' and the end of the block
3365 : : */
3366 [ + - ][ - + ]: 21088 : if (length > max || length < 0)
3367 : : length = max;
3368 : :
3369 : 21088 : iblock = index << (PAGE_CACHE_SHIFT - inode->i_sb->s_blocksize_bits);
3370 : :
3371 [ + + ]: 21088 : if (!page_has_buffers(page))
3372 : 5586 : create_empty_buffers(page, blocksize, 0);
3373 : :
3374 : : /* Find the buffer that contains "offset" */
3375 [ - + ]: 21088 : bh = page_buffers(page);
3376 : : pos = blocksize;
3377 [ - + ]: 21088 : while (offset >= pos) {
3378 : 0 : bh = bh->b_this_page;
3379 : 0 : iblock++;
3380 : 0 : pos += blocksize;
3381 : : }
3382 [ + + ]: 21088 : if (buffer_freed(bh)) {
3383 : : BUFFER_TRACE(bh, "freed: skip");
3384 : : goto unlock;
3385 : : }
3386 [ + + ]: 21087 : if (!buffer_mapped(bh)) {
3387 : : BUFFER_TRACE(bh, "unmapped");
3388 : 5662 : ext4_get_block(inode, iblock, bh, 0);
3389 : : /* unmapped? It's a hole - nothing to do */
3390 [ + ]: 5662 : if (!buffer_mapped(bh)) {
3391 : : BUFFER_TRACE(bh, "still unmapped");
3392 : : goto unlock;
3393 : : }
3394 : : }
3395 : :
3396 : : /* Ok, it's mapped. Make sure it's up-to-date */
3397 [ + - ]: 15426 : if (PageUptodate(page))
3398 : 15426 : set_buffer_uptodate(bh);
3399 : :
3400 [ - + ]: 15426 : if (!buffer_uptodate(bh)) {
3401 : : err = -EIO;
3402 : 0 : ll_rw_block(READ, 1, &bh);
3403 : 0 : wait_on_buffer(bh);
3404 : : /* Uhhuh. Read error. Complain and punt. */
3405 [ # # ]: 0 : if (!buffer_uptodate(bh))
3406 : : goto unlock;
3407 : : }
3408 [ - + ]: 15426 : if (ext4_should_journal_data(inode)) {
3409 : : BUFFER_TRACE(bh, "get write access");
3410 : 0 : err = ext4_journal_get_write_access(handle, bh);
3411 [ # # ]: 0 : if (err)
3412 : : goto unlock;
3413 : : }
3414 : 15426 : zero_user(page, offset, length);
3415 : : BUFFER_TRACE(bh, "zeroed end of block");
3416 : :
3417 [ - + ]: 15426 : if (ext4_should_journal_data(inode)) {
3418 : 0 : err = ext4_handle_dirty_metadata(handle, inode, bh);
3419 : : } else {
3420 : : err = 0;
3421 : 15426 : mark_buffer_dirty(bh);
3422 [ + - ]: 15426 : if (ext4_test_inode_state(inode, EXT4_STATE_ORDERED_MODE))
3423 : : err = ext4_jbd2_file_inode(handle, inode);
3424 : : }
3425 : :
3426 : : unlock:
3427 : 0 : unlock_page(page);
3428 : 21088 : page_cache_release(page);
3429 : 21088 : return err;
3430 : : }
3431 : :
3432 : 0 : int ext4_zero_partial_blocks(handle_t *handle, struct inode *inode,
3433 : : loff_t lstart, loff_t length)
3434 : : {
3435 : 36 : struct super_block *sb = inode->i_sb;
3436 : 36 : struct address_space *mapping = inode->i_mapping;
3437 : : unsigned partial_start, partial_end;
3438 : : ext4_fsblk_t start, end;
3439 : 36 : loff_t byte_end = (lstart + length - 1);
3440 : : int err = 0;
3441 : :
3442 : 36 : partial_start = lstart & (sb->s_blocksize - 1);
3443 : 36 : partial_end = byte_end & (sb->s_blocksize - 1);
3444 : :
3445 : 36 : start = lstart >> sb->s_blocksize_bits;
3446 : 36 : end = byte_end >> sb->s_blocksize_bits;
3447 : :
3448 : : /* Handle partial zero within the single block */
3449 [ - + ][ # # ]: 36 : if (start == end &&
3450 [ # # ]: 0 : (partial_start || (partial_end != sb->s_blocksize - 1))) {
3451 : 0 : err = ext4_block_zero_page_range(handle, mapping,
3452 : : lstart, length);
3453 : 0 : return err;
3454 : : }
3455 : : /* Handle partial zero out on the start of the range */
3456 [ - + ]: 72 : if (partial_start) {
3457 : 0 : err = ext4_block_zero_page_range(handle, mapping,
3458 : : lstart, sb->s_blocksize);
3459 [ # # ]: 0 : if (err)
3460 : : return err;
3461 : : }
3462 : : /* Handle partial zero out on the end of the range */
3463 [ - + ]: 36 : if (partial_end != sb->s_blocksize - 1)
3464 : 0 : err = ext4_block_zero_page_range(handle, mapping,
3465 : : byte_end - partial_end,
3466 : 0 : partial_end + 1);
3467 : 36 : return err;
3468 : : }
3469 : :
3470 : 0 : int ext4_can_truncate(struct inode *inode)
3471 : : {
3472 [ + + ]: 77533 : if (S_ISREG(inode->i_mode))
3473 : : return 1;
3474 [ + + ]: 37780 : if (S_ISDIR(inode->i_mode))
3475 : : return 1;
3476 [ + - ]: 2618 : if (S_ISLNK(inode->i_mode))
3477 : 2618 : return !ext4_inode_is_fast_symlink(inode);
3478 : : return 0;
3479 : : }
3480 : :
3481 : : /*
3482 : : * ext4_punch_hole: punches a hole in a file by releaseing the blocks
3483 : : * associated with the given offset and length
3484 : : *
3485 : : * @inode: File inode
3486 : : * @offset: The offset where the hole will begin
3487 : : * @len: The length of the hole
3488 : : *
3489 : : * Returns: 0 on success or negative on failure
3490 : : */
3491 : :
3492 : 0 : int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
3493 : : {
3494 : 36 : struct super_block *sb = inode->i_sb;
3495 : : ext4_lblk_t first_block, stop_block;
3496 : 36 : struct address_space *mapping = inode->i_mapping;
3497 : : loff_t first_block_offset, last_block_offset;
3498 : : handle_t *handle;
3499 : : unsigned int credits;
3500 : : int ret = 0;
3501 : :
3502 [ + - ]: 36 : if (!S_ISREG(inode->i_mode))
3503 : : return -EOPNOTSUPP;
3504 : :
3505 : : trace_ext4_punch_hole(inode, offset, length);
3506 : :
3507 : : /*
3508 : : * Write out all dirty pages to avoid race conditions
3509 : : * Then release them.
3510 : : */
3511 [ + + ][ + + ]: 36 : if (mapping->nrpages && mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
3512 : 14 : ret = filemap_write_and_wait_range(mapping, offset,
3513 : 14 : offset + length - 1);
3514 [ + - ]: 14 : if (ret)
3515 : : return ret;
3516 : : }
3517 : :
3518 : 36 : mutex_lock(&inode->i_mutex);
3519 : : /* It's not possible punch hole on append only file */
3520 [ + - ]: 36 : if (IS_APPEND(inode) || IS_IMMUTABLE(inode)) {
3521 : : ret = -EPERM;
3522 : : goto out_mutex;
3523 : : }
3524 [ + - ]: 36 : if (IS_SWAPFILE(inode)) {
3525 : : ret = -ETXTBSY;
3526 : : goto out_mutex;
3527 : : }
3528 : :
3529 : : /* No need to punch hole beyond i_size */
3530 [ + - ]: 36 : if (offset >= inode->i_size)
3531 : : goto out_mutex;
3532 : :
3533 : : /*
3534 : : * If the hole extends beyond i_size, set the hole
3535 : : * to end after the page that contains i_size
3536 : : */
3537 [ - + ]: 36 : if (offset + length > inode->i_size) {
3538 : 0 : length = inode->i_size +
3539 : 0 : PAGE_CACHE_SIZE - (inode->i_size & (PAGE_CACHE_SIZE - 1)) -
3540 : : offset;
3541 : : }
3542 : :
3543 [ + - ][ - + ]: 36 : if (offset & (sb->s_blocksize - 1) ||
3544 : 36 : (offset + length) & (sb->s_blocksize - 1)) {
3545 : : /*
3546 : : * Attach jinode to inode for jbd2 if we do any zeroing of
3547 : : * partial block
3548 : : */
3549 : 0 : ret = ext4_inode_attach_jinode(inode);
3550 [ # # ]: 0 : if (ret < 0)
3551 : : goto out_mutex;
3552 : :
3553 : : }
3554 : :
3555 : 36 : first_block_offset = round_up(offset, sb->s_blocksize);
3556 : 36 : last_block_offset = round_down((offset + length), sb->s_blocksize) - 1;
3557 : :
3558 : : /* Now release the pages and zero block aligned part of pages*/
3559 [ + - ]: 36 : if (last_block_offset > first_block_offset)
3560 : 36 : truncate_pagecache_range(inode, first_block_offset,
3561 : : last_block_offset);
3562 : :
3563 : : /* Wait all existing dio workers, newcomers will block on i_mutex */
3564 : : ext4_inode_block_unlocked_dio(inode);
3565 : 36 : inode_dio_wait(inode);
3566 : :
3567 [ + - ]: 72 : if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3568 : 36 : credits = ext4_writepage_trans_blocks(inode);
3569 : : else
3570 : : credits = ext4_blocks_for_truncate(inode);
3571 : 36 : handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
3572 [ - + ]: 36 : if (IS_ERR(handle)) {
3573 : : ret = PTR_ERR(handle);
3574 [ # # ]: 0 : ext4_std_error(sb, ret);
3575 : : goto out_dio;
3576 : : }
3577 : :
3578 : 36 : ret = ext4_zero_partial_blocks(handle, inode, offset,
3579 : : length);
3580 [ + - ]: 36 : if (ret)
3581 : : goto out_stop;
3582 : :
3583 : 72 : first_block = (offset + sb->s_blocksize - 1) >>
3584 : 36 : EXT4_BLOCK_SIZE_BITS(sb);
3585 : 36 : stop_block = (offset + length) >> EXT4_BLOCK_SIZE_BITS(sb);
3586 : :
3587 : : /* If there are no blocks to remove, return now */
3588 [ + - ]: 36 : if (first_block >= stop_block)
3589 : : goto out_stop;
3590 : :
3591 : 36 : down_write(&EXT4_I(inode)->i_data_sem);
3592 : 36 : ext4_discard_preallocations(inode);
3593 : :
3594 : 36 : ret = ext4_es_remove_extent(inode, first_block,
3595 : : stop_block - first_block);
3596 [ - + ]: 36 : if (ret) {
3597 : 0 : up_write(&EXT4_I(inode)->i_data_sem);
3598 : 0 : goto out_stop;
3599 : : }
3600 : :
3601 [ + - ]: 36 : if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3602 : 36 : ret = ext4_ext_remove_space(inode, first_block,
3603 : : stop_block - 1);
3604 : : else
3605 : 0 : ret = ext4_free_hole_blocks(handle, inode, first_block,
3606 : : stop_block);
3607 : :
3608 : 36 : ext4_discard_preallocations(inode);
3609 : 36 : up_write(&EXT4_I(inode)->i_data_sem);
3610 [ + - ][ - + ]: 36 : if (IS_SYNC(inode))
3611 : : ext4_handle_sync(handle);
3612 : 36 : inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
3613 : 36 : ext4_mark_inode_dirty(handle, inode);
3614 : : out_stop:
3615 : 36 : ext4_journal_stop(handle);
3616 : : out_dio:
3617 : : ext4_inode_resume_unlocked_dio(inode);
3618 : : out_mutex:
3619 : 36 : mutex_unlock(&inode->i_mutex);
3620 : 36 : return ret;
3621 : : }
3622 : :
3623 : 0 : int ext4_inode_attach_jinode(struct inode *inode)
3624 : : {
3625 : : struct ext4_inode_info *ei = EXT4_I(inode);
3626 : : struct jbd2_inode *jinode;
3627 : :
3628 [ + + ][ + + ]: 1500831 : if (ei->jinode || !EXT4_SB(inode->i_sb)->s_journal)
3629 : : return 0;
3630 : :
3631 : : jinode = jbd2_alloc_inode(GFP_KERNEL);
3632 : : spin_lock(&inode->i_lock);
3633 [ + ]: 185506 : if (!ei->jinode) {
3634 [ - + ]: 185509 : if (!jinode) {
3635 : : spin_unlock(&inode->i_lock);
3636 : 0 : return -ENOMEM;
3637 : : }
3638 : 185509 : ei->jinode = jinode;
3639 : 185509 : jbd2_journal_init_jbd_inode(ei->jinode, inode);
3640 : : jinode = NULL;
3641 : : }
3642 : : spin_unlock(&inode->i_lock);
3643 [ - + ]: 185632 : if (unlikely(jinode != NULL))
3644 : : jbd2_free_inode(jinode);
3645 : : return 0;
3646 : : }
3647 : :
3648 : : /*
3649 : : * ext4_truncate()
3650 : : *
3651 : : * We block out ext4_get_block() block instantiations across the entire
3652 : : * transaction, and VFS/VM ensures that ext4_truncate() cannot run
3653 : : * simultaneously on behalf of the same inode.
3654 : : *
3655 : : * As we work through the truncate and commit bits of it to the journal there
3656 : : * is one core, guiding principle: the file's tree must always be consistent on
3657 : : * disk. We must be able to restart the truncate after a crash.
3658 : : *
3659 : : * The file's tree may be transiently inconsistent in memory (although it
3660 : : * probably isn't), but whenever we close off and commit a journal transaction,
3661 : : * the contents of (the filesystem + the journal) must be consistent and
3662 : : * restartable. It's pretty simple, really: bottom up, right to left (although
3663 : : * left-to-right works OK too).
3664 : : *
3665 : : * Note that at recovery time, journal replay occurs *before* the restart of
3666 : : * truncate against the orphan inode list.
3667 : : *
3668 : : * The committed inode has the new, desired i_size (which is the same as
3669 : : * i_disksize in this case). After a crash, ext4_orphan_cleanup() will see
3670 : : * that this inode's truncate did not complete and it will again call
3671 : : * ext4_truncate() to have another go. So there will be instantiated blocks
3672 : : * to the right of the truncation point in a crashed ext4 filesystem. But
3673 : : * that's fine - as long as they are linked from the inode, the post-crash
3674 : : * ext4_truncate() run will find them and release them.
3675 : : */
3676 : 0 : void ext4_truncate(struct inode *inode)
3677 : : {
3678 : : struct ext4_inode_info *ei = EXT4_I(inode);
3679 : : unsigned int credits;
3680 : : handle_t *handle;
3681 : 77534 : struct address_space *mapping = inode->i_mapping;
3682 : :
3683 : : /*
3684 : : * There is a possibility that we're either freeing the inode
3685 : : * or it completely new indode. In those cases we might not
3686 : : * have i_mutex locked because it's not necessary.
3687 : : */
3688 [ + + ]: 77534 : if (!(inode->i_state & (I_NEW|I_FREEING)))
3689 [ - + ]: 37393 : WARN_ON(!mutex_is_locked(&inode->i_mutex));
3690 : : trace_ext4_truncate_enter(inode);
3691 : :
3692 [ + - ]: 77533 : if (!ext4_can_truncate(inode))
3693 : : return;
3694 : :
3695 : : ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
3696 : :
3697 [ + + ][ + - ]: 77533 : if (inode->i_size == 0 && !test_opt(inode->i_sb, NO_AUTO_DA_ALLOC))
3698 : : ext4_set_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE);
3699 : :
3700 [ - + ]: 77532 : if (ext4_has_inline_data(inode)) {
3701 : 0 : int has_inline = 1;
3702 : :
3703 : 0 : ext4_inline_data_truncate(inode, &has_inline);
3704 [ # # ]: 0 : if (has_inline)
3705 : 0 : return;
3706 : : }
3707 : :
3708 : : /* If we zero-out tail of the page, we have to create jinode for jbd2 */
3709 [ + + ]: 77533 : if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
3710 [ + ]: 21087 : if (ext4_inode_attach_jinode(inode) < 0)
3711 : : return;
3712 : : }
3713 : :
3714 [ + - ]: 155067 : if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3715 : 77533 : credits = ext4_writepage_trans_blocks(inode);
3716 : : else
3717 : : credits = ext4_blocks_for_truncate(inode);
3718 : :
3719 : 77531 : handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
3720 [ - + ]: 77533 : if (IS_ERR(handle)) {
3721 [ # # ]: 0 : ext4_std_error(inode->i_sb, PTR_ERR(handle));
3722 : : return;
3723 : : }
3724 : :
3725 [ + + ]: 77533 : if (inode->i_size & (inode->i_sb->s_blocksize - 1))
3726 : 21088 : ext4_block_truncate_page(handle, mapping, inode->i_size);
3727 : :
3728 : : /*
3729 : : * We add the inode to the orphan list, so that if this
3730 : : * truncate spans multiple transactions, and we crash, we will
3731 : : * resume the truncate when the filesystem recovers. It also
3732 : : * marks the inode dirty, to catch the new size.
3733 : : *
3734 : : * Implication: the file must always be in a sane, consistent
3735 : : * truncatable state while each transaction commits.
3736 : : */
3737 [ + - ]: 77532 : if (ext4_orphan_add(handle, inode))
3738 : : goto out_stop;
3739 : :
3740 : 77534 : down_write(&EXT4_I(inode)->i_data_sem);
3741 : :
3742 : 77534 : ext4_discard_preallocations(inode);
3743 : :
3744 [ + - ]: 77534 : if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
3745 : 77534 : ext4_ext_truncate(handle, inode);
3746 : : else
3747 : 0 : ext4_ind_truncate(handle, inode);
3748 : :
3749 : 77481 : up_write(&ei->i_data_sem);
3750 : :
3751 [ + ][ + + ]: 77530 : if (IS_SYNC(inode))
3752 : : ext4_handle_sync(handle);
3753 : :
3754 : : out_stop:
3755 : : /*
3756 : : * If this was a simple ftruncate() and the file will remain alive,
3757 : : * then we need to clear up the orphan record which we created above.
3758 : : * However, if this was a real unlink then we were called by
3759 : : * ext4_delete_inode(), and we allow that function to clean up the
3760 : : * orphan info for us.
3761 : : */
3762 [ + + ]: 77530 : if (inode->i_nlink)
3763 : 37392 : ext4_orphan_del(handle, inode);
3764 : :
3765 : 77482 : inode->i_mtime = inode->i_ctime = ext4_current_time(inode);
3766 : 77482 : ext4_mark_inode_dirty(handle, inode);
3767 : 77514 : ext4_journal_stop(handle);
3768 : :
3769 : : trace_ext4_truncate_exit(inode);
3770 : : }
3771 : :
3772 : : /*
3773 : : * ext4_get_inode_loc returns with an extra refcount against the inode's
3774 : : * underlying buffer_head on success. If 'in_mem' is true, we have all
3775 : : * data in memory that is needed to recreate the on-disk version of this
3776 : : * inode.
3777 : : */
3778 : 0 : static int __ext4_get_inode_loc(struct inode *inode,
3779 : : struct ext4_iloc *iloc, int in_mem)
3780 : : {
3781 : : struct ext4_group_desc *gdp;
3782 : : struct buffer_head *bh;
3783 : 42099712 : struct super_block *sb = inode->i_sb;
3784 : : ext4_fsblk_t block;
3785 : : int inodes_per_block, inode_offset;
3786 : :
3787 : 8420687 : iloc->bh = NULL;
3788 [ + ]: 8420687 : if (!ext4_valid_inum(sb, inode->i_ino))
3789 : : return -EIO;
3790 : :
3791 : 16844802 : iloc->block_group = (inode->i_ino - 1) / EXT4_INODES_PER_GROUP(sb);
3792 : 8422401 : gdp = ext4_get_group_desc(sb, iloc->block_group, NULL);
3793 [ + ]: 8411551 : if (!gdp)
3794 : : return -EIO;
3795 : :
3796 : : /*
3797 : : * Figure out the offset within the block group inode table
3798 : : */
3799 : 8412465 : inodes_per_block = EXT4_SB(sb)->s_inodes_per_block;
3800 : 16824930 : inode_offset = ((inode->i_ino - 1) %
3801 : 8412465 : EXT4_INODES_PER_GROUP(sb));
3802 : 8412465 : block = ext4_inode_table(sb, gdp) + (inode_offset / inodes_per_block);
3803 : 16836870 : iloc->offset = (inode_offset % inodes_per_block) * EXT4_INODE_SIZE(sb);
3804 : :
3805 : : bh = sb_getblk(sb, block);
3806 [ + ]: 8423931 : if (unlikely(!bh))
3807 : : return -ENOMEM;
3808 [ + + ]: 8424229 : if (!buffer_uptodate(bh)) {
3809 : : lock_buffer(bh);
3810 : :
3811 : : /*
3812 : : * If the buffer has the write error flag, we have failed
3813 : : * to write out another inode in the same block. In this
3814 : : * case, we don't have to read the block because we may
3815 : : * read the old inode data successfully.
3816 : : */
3817 [ - + ][ # # ]: 615 : if (buffer_write_io_error(bh) && !buffer_uptodate(bh))
3818 : : set_buffer_uptodate(bh);
3819 : :
3820 [ + + ]: 615 : if (buffer_uptodate(bh)) {
3821 : : /* someone brought it uptodate while we waited */
3822 : 38 : unlock_buffer(bh);
3823 : 38 : goto has_buffer;
3824 : : }
3825 : :
3826 : : /*
3827 : : * If we have all information of the inode in memory and this
3828 : : * is the only valid inode in the block, we need not read the
3829 : : * block.
3830 : : */
3831 [ + + ]: 577 : if (in_mem) {
3832 : : struct buffer_head *bitmap_bh;
3833 : : int i, start;
3834 : :
3835 : 410 : start = inode_offset & ~(inodes_per_block - 1);
3836 : :
3837 : : /* Is the inode bitmap in cache? */
3838 : 410 : bitmap_bh = sb_getblk(sb, ext4_inode_bitmap(sb, gdp));
3839 [ + - ]: 410 : if (unlikely(!bitmap_bh))
3840 : : goto make_io;
3841 : :
3842 : : /*
3843 : : * If the inode bitmap isn't in cache then the
3844 : : * optimisation may end up performing two reads instead
3845 : : * of one, so skip it.
3846 : : */
3847 [ + + ]: 410 : if (!buffer_uptodate(bitmap_bh)) {
3848 : : brelse(bitmap_bh);
3849 : : goto make_io;
3850 : : }
3851 [ + + ]: 6421 : for (i = start; i < start + inodes_per_block; i++) {
3852 [ + + ]: 6045 : if (i == inode_offset)
3853 : 382 : continue;
3854 [ + + ]: 5663 : if (ext4_test_bit(i, bitmap_bh->b_data))
3855 : : break;
3856 : : }
3857 : : brelse(bitmap_bh);
3858 [ + + ]: 399 : if (i == start + inodes_per_block) {
3859 : : /* all other inodes are free, so skip I/O */
3860 [ + - ]: 376 : memset(bh->b_data, 0, bh->b_size);
3861 : : set_buffer_uptodate(bh);
3862 : 376 : unlock_buffer(bh);
3863 : 376 : goto has_buffer;
3864 : : }
3865 : : }
3866 : :
3867 : : make_io:
3868 : : /*
3869 : : * If we need to do any I/O, try to pre-readahead extra
3870 : : * blocks from the inode table.
3871 : : */
3872 [ + - ]: 201 : if (EXT4_SB(sb)->s_inode_readahead_blks) {
3873 : : ext4_fsblk_t b, end, table;
3874 : : unsigned num;
3875 : : __u32 ra_blks = EXT4_SB(sb)->s_inode_readahead_blks;
3876 : :
3877 : 201 : table = ext4_inode_table(sb, gdp);
3878 : : /* s_inode_readahead_blks is always a power of 2 */
3879 : 201 : b = block & ~((ext4_fsblk_t) ra_blks - 1);
3880 [ + + ]: 8420888 : if (table > b)
3881 : : b = table;
3882 : 201 : end = b + ra_blks;
3883 : 201 : num = EXT4_INODES_PER_GROUP(sb);
3884 [ + - ]: 201 : if (ext4_has_group_desc_csum(sb))
3885 : 201 : num -= ext4_itable_unused_count(sb, gdp);
3886 : 201 : table += num / inodes_per_block;
3887 [ + + ]: 201 : if (end > table)
3888 : : end = table;
3889 [ + + ]: 6678 : while (b <= end)
3890 : 6477 : sb_breadahead(sb, b++);
3891 : : }
3892 : :
3893 : : /*
3894 : : * There are other valid inodes in the buffer, this inode
3895 : : * has in-inode xattrs, or we don't have this inode in memory.
3896 : : * Read the block from disk.
3897 : : */
3898 : : trace_ext4_load_inode(inode);
3899 : : get_bh(bh);
3900 : 201 : bh->b_end_io = end_buffer_read_sync;
3901 : 201 : submit_bh(READ | REQ_META | REQ_PRIO, bh);
3902 : : wait_on_buffer(bh);
3903 [ - ]: 0 : if (!buffer_uptodate(bh)) {
3904 : 0 : EXT4_ERROR_INODE_BLOCK(inode, block,
3905 : : "unable to read itable block");
3906 : : brelse(bh);
3907 : : return -EIO;
3908 : : }
3909 : : }
3910 : : has_buffer:
3911 : 8421293 : iloc->bh = bh;
3912 : 8421293 : return 0;
3913 : : }
3914 : :
3915 : 0 : int ext4_get_inode_loc(struct inode *inode, struct ext4_iloc *iloc)
3916 : : {
3917 : : /* We have all inode data except xattrs in memory here. */
3918 : 8412188 : return __ext4_get_inode_loc(inode, iloc,
3919 : : !ext4_test_inode_state(inode, EXT4_STATE_XATTR));
3920 : : }
3921 : :
3922 : 0 : void ext4_set_inode_flags(struct inode *inode)
3923 : : {
3924 : 280565 : unsigned int flags = EXT4_I(inode)->i_flags;
3925 : : unsigned int new_fl = 0;
3926 : :
3927 [ - + ]: 280565 : if (flags & EXT4_SYNC_FL)
3928 : : new_fl |= S_SYNC;
3929 [ + + ]: 280565 : if (flags & EXT4_APPEND_FL)
3930 : 30 : new_fl |= S_APPEND;
3931 [ + + ]: 280565 : if (flags & EXT4_IMMUTABLE_FL)
3932 : 24 : new_fl |= S_IMMUTABLE;
3933 [ - + ]: 280565 : if (flags & EXT4_NOATIME_FL)
3934 : 0 : new_fl |= S_NOATIME;
3935 [ - + ]: 280565 : if (flags & EXT4_DIRSYNC_FL)
3936 : 280565 : new_fl |= S_DIRSYNC;
3937 [ - + ]: 561042 : set_mask_bits(&inode->i_flags,
3938 : : S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC, new_fl);
3939 : 280477 : }
3940 : :
3941 : : /* Propagate flags from i_flags to EXT4_I(inode)->i_flags */
3942 : 8407090 : void ext4_get_inode_flags(struct ext4_inode_info *ei)
3943 : : {
3944 : : unsigned int vfs_fl;
3945 : : unsigned long old_fl, new_fl;
3946 : :
3947 : : do {
3948 : 8407090 : vfs_fl = ei->vfs_inode.i_flags;
3949 : 8407090 : old_fl = ei->i_flags;
3950 : 8407090 : new_fl = old_fl & ~(EXT4_SYNC_FL|EXT4_APPEND_FL|
3951 : : EXT4_IMMUTABLE_FL|EXT4_NOATIME_FL|
3952 : : EXT4_DIRSYNC_FL);
3953 [ - + ]: 8407090 : if (vfs_fl & S_SYNC)
3954 : 0 : new_fl |= EXT4_SYNC_FL;
3955 [ + + ]: 8407090 : if (vfs_fl & S_APPEND)
3956 : 96 : new_fl |= EXT4_APPEND_FL;
3957 [ + + ]: 8407090 : if (vfs_fl & S_IMMUTABLE)
3958 : 72 : new_fl |= EXT4_IMMUTABLE_FL;
3959 [ - + ]: 8407090 : if (vfs_fl & S_NOATIME)
3960 : 0 : new_fl |= EXT4_NOATIME_FL;
3961 [ # # ]: 8407090 : if (vfs_fl & S_DIRSYNC)
3962 : 0 : new_fl |= EXT4_DIRSYNC_FL;
3963 [ - + ]: 8410676 : } while (cmpxchg(&ei->i_flags, old_fl, new_fl) != old_fl);
3964 : 8410676 : }
3965 : :
3966 : : static blkcnt_t ext4_inode_blocks(struct ext4_inode *raw_inode,
3967 : : struct ext4_inode_info *ei)
3968 : : {
3969 : : blkcnt_t i_blocks ;
3970 : : struct inode *inode = &(ei->vfs_inode);
3971 : 8710 : struct super_block *sb = inode->i_sb;
3972 : :
3973 [ + - ]: 8710 : if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3974 : : EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
3975 : : /* we are using combined 48 bit field */
3976 : 8710 : i_blocks = ((u64)le16_to_cpu(raw_inode->i_blocks_high)) << 32 |
3977 : : le32_to_cpu(raw_inode->i_blocks_lo);
3978 [ - + ]: 8710 : if (ext4_test_inode_flag(inode, EXT4_INODE_HUGE_FILE)) {
3979 : : /* i_blocks represent file system block size */
3980 : 0 : return i_blocks << (inode->i_blkbits - 9);
3981 : : } else {
3982 : : return i_blocks;
3983 : : }
3984 : : } else {
3985 : 0 : return le32_to_cpu(raw_inode->i_blocks_lo);
3986 : : }
3987 : : }
3988 : :
3989 : : static inline void ext4_iget_extra_inode(struct inode *inode,
3990 : : struct ext4_inode *raw_inode,
3991 : : struct ext4_inode_info *ei)
3992 : : {
3993 : 8710 : __le32 *magic = (void *)raw_inode +
3994 : 8710 : EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize;
3995 [ - + ]: 8710 : if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) {
3996 : : ext4_set_inode_state(inode, EXT4_STATE_XATTR);
3997 : 0 : ext4_find_inline_data_nolock(inode);
3998 : : } else
3999 : 8710 : EXT4_I(inode)->i_inline_off = 0;
4000 : : }
4001 : :
4002 : 0 : struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
4003 : : {
4004 : : struct ext4_iloc iloc;
4005 : 8710 : struct ext4_inode *raw_inode;
4006 : 8710 : struct ext4_inode_info *ei;
4007 : : struct inode *inode;
4008 : 9224 : journal_t *journal = EXT4_SB(sb)->s_journal;
4009 : : long ret;
4010 : : int block;
4011 : : uid_t i_uid;
4012 : : gid_t i_gid;
4013 : :
4014 : 9224 : inode = iget_locked(sb, ino);
4015 [ + - ]: 9224 : if (!inode)
4016 : : return ERR_PTR(-ENOMEM);
4017 [ + + ]: 9224 : if (!(inode->i_state & I_NEW))
4018 : : return inode;
4019 : :
4020 : 8710 : ei = EXT4_I(inode);
4021 : 8710 : iloc.bh = NULL;
4022 : :
4023 : 8710 : ret = __ext4_get_inode_loc(inode, &iloc, 0);
4024 [ + - ]: 8710 : if (ret < 0)
4025 : : goto bad_inode;
4026 : 8710 : raw_inode = ext4_raw_inode(&iloc);
4027 : :
4028 [ + - ]: 8710 : if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4029 : 8710 : ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
4030 [ - + ]: 8710 : if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
4031 : 8710 : EXT4_INODE_SIZE(inode->i_sb)) {
4032 : 0 : EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
4033 : : EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
4034 : : EXT4_INODE_SIZE(inode->i_sb));
4035 : : ret = -EIO;
4036 : 0 : goto bad_inode;
4037 : : }
4038 : : } else
4039 : 0 : ei->i_extra_isize = 0;
4040 : :
4041 : : /* Precompute checksum seed for inode metadata */
4042 [ - + ]: 8710 : if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
4043 : : EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
4044 : 0 : struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4045 : : __u32 csum;
4046 : 0 : __le32 inum = cpu_to_le32(inode->i_ino);
4047 : 0 : __le32 gen = raw_inode->i_generation;
4048 : 0 : csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
4049 : : sizeof(inum));
4050 : 0 : ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
4051 : : sizeof(gen));
4052 : : }
4053 : :
4054 [ - + ]: 8710 : if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
4055 : 0 : EXT4_ERROR_INODE(inode, "checksum invalid");
4056 : : ret = -EIO;
4057 : 0 : goto bad_inode;
4058 : : }
4059 : :
4060 : 8710 : inode->i_mode = le16_to_cpu(raw_inode->i_mode);
4061 : 8710 : i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
4062 : 8710 : i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
4063 [ + - ]: 8710 : if (!(test_opt(inode->i_sb, NO_UID32))) {
4064 : 8710 : i_uid |= le16_to_cpu(raw_inode->i_uid_high) << 16;
4065 : 8710 : i_gid |= le16_to_cpu(raw_inode->i_gid_high) << 16;
4066 : : }
4067 : : i_uid_write(inode, i_uid);
4068 : : i_gid_write(inode, i_gid);
4069 : 8710 : set_nlink(inode, le16_to_cpu(raw_inode->i_links_count));
4070 : :
4071 : : ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
4072 : 8710 : ei->i_inline_off = 0;
4073 : 8710 : ei->i_dir_start_lookup = 0;
4074 : 8710 : ei->i_dtime = le32_to_cpu(raw_inode->i_dtime);
4075 : : /* We now have enough fields to check if the inode was active or not.
4076 : : * This is needed because nfsd might try to access dead inodes
4077 : : * the test is that same one that e2fsck uses
4078 : : * NeilBrown 1999oct15
4079 : : */
4080 [ - + ]: 8710 : if (inode->i_nlink == 0) {
4081 [ # # ][ # # ]: 0 : if ((inode->i_mode == 0 ||
4082 [ # # ]: 0 : !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) &&
4083 : : ino != EXT4_BOOT_LOADER_INO) {
4084 : : /* this inode is deleted */
4085 : : ret = -ESTALE;
4086 : : goto bad_inode;
4087 : : }
4088 : : /* The only unlinked inodes we let through here have
4089 : : * valid i_mode and are being read by the orphan
4090 : : * recovery code: that's fine, we're about to complete
4091 : : * the process of deleting those.
4092 : : * OR it is the EXT4_BOOT_LOADER_INO which is
4093 : : * not initialized on a new filesystem. */
4094 : : }
4095 : 8710 : ei->i_flags = le32_to_cpu(raw_inode->i_flags);
4096 : 8710 : inode->i_blocks = ext4_inode_blocks(raw_inode, ei);
4097 : 8710 : ei->i_file_acl = le32_to_cpu(raw_inode->i_file_acl_lo);
4098 [ - + ]: 8710 : if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT))
4099 : 0 : ei->i_file_acl |=
4100 : 0 : ((__u64)le16_to_cpu(raw_inode->i_file_acl_high)) << 32;
4101 : 8710 : inode->i_size = ext4_isize(raw_inode);
4102 : 8710 : ei->i_disksize = inode->i_size;
4103 : : #ifdef CONFIG_QUOTA
4104 : 8710 : ei->i_reserved_quota = 0;
4105 : : #endif
4106 : 8710 : inode->i_generation = le32_to_cpu(raw_inode->i_generation);
4107 : 8710 : ei->i_block_group = iloc.block_group;
4108 : 8710 : ei->i_last_alloc_group = ~0;
4109 : : /*
4110 : : * NOTE! The in-memory inode i_data array is in little-endian order
4111 : : * even on big-endian machines: we do NOT byteswap the block numbers!
4112 : : */
4113 [ + + ]: 139360 : for (block = 0; block < EXT4_N_BLOCKS; block++)
4114 : 130650 : ei->i_data[block] = raw_inode->i_block[block];
4115 : 8710 : INIT_LIST_HEAD(&ei->i_orphan);
4116 : :
4117 : : /*
4118 : : * Set transaction id's of transactions that have to be committed
4119 : : * to finish f[data]sync. We set them to currently running transaction
4120 : : * as we cannot be sure that the inode or some of its metadata isn't
4121 : : * part of the transaction - the inode could have been reclaimed and
4122 : : * now it is reread from disk.
4123 : : */
4124 [ + - ]: 8710 : if (journal) {
4125 : : transaction_t *transaction;
4126 : : tid_t tid;
4127 : :
4128 : 8710 : read_lock(&journal->j_state_lock);
4129 [ + + ]: 8710 : if (journal->j_running_transaction)
4130 : : transaction = journal->j_running_transaction;
4131 : : else
4132 : 91 : transaction = journal->j_committing_transaction;
4133 [ + + ]: 8710 : if (transaction)
4134 : 8656 : tid = transaction->t_tid;
4135 : : else
4136 : 54 : tid = journal->j_commit_sequence;
4137 : : read_unlock(&journal->j_state_lock);
4138 : 8710 : ei->i_sync_tid = tid;
4139 : 8710 : ei->i_datasync_tid = tid;
4140 : : }
4141 : :
4142 [ + - ]: 8710 : if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4143 [ - + ]: 8710 : if (ei->i_extra_isize == 0) {
4144 : : /* The extra space is currently unused. Use it. */
4145 : 0 : ei->i_extra_isize = sizeof(struct ext4_inode) -
4146 : : EXT4_GOOD_OLD_INODE_SIZE;
4147 : : } else {
4148 : : ext4_iget_extra_inode(inode, raw_inode, ei);
4149 : : }
4150 : : }
4151 : :
4152 [ + - ]: 17934 : EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
4153 [ + - ]: 8710 : EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
4154 [ + - ]: 8710 : EXT4_INODE_GET_XTIME(i_atime, inode, raw_inode);
4155 [ + - ][ + - ]: 8710 : EXT4_EINODE_GET_XTIME(i_crtime, ei, raw_inode);
4156 : :
4157 : 8710 : inode->i_version = le32_to_cpu(raw_inode->i_disk_version);
4158 [ + - ]: 8710 : if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
4159 [ + - ]: 8710 : if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4160 : 8710 : inode->i_version |=
4161 : 8710 : (__u64)(le32_to_cpu(raw_inode->i_version_hi)) << 32;
4162 : : }
4163 : :
4164 : : ret = 0;
4165 [ - + # # ]: 8710 : if (ei->i_file_acl &&
4166 : 0 : !ext4_data_block_valid(EXT4_SB(sb), ei->i_file_acl, 1)) {
4167 : 0 : EXT4_ERROR_INODE(inode, "bad extended attribute block %llu",
4168 : : ei->i_file_acl);
4169 : : ret = -EIO;
4170 : 0 : goto bad_inode;
4171 [ + - ]: 8710 : } else if (!ext4_has_inline_data(inode)) {
4172 [ + + ]: 8710 : if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)) {
4173 [ - + ][ # # ]: 8425 : if ((S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4174 [ # # ]: 0 : (S_ISLNK(inode->i_mode) &&
4175 : 0 : !ext4_inode_is_fast_symlink(inode))))
4176 : : /* Validate extent which is part of inode */
4177 : 8425 : ret = ext4_ext_check_inode(inode);
4178 [ + - ][ + - ]: 285 : } else if (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
4179 [ - + ]: 285 : (S_ISLNK(inode->i_mode) &&
4180 : 285 : !ext4_inode_is_fast_symlink(inode))) {
4181 : : /* Validate block references which are part of inode */
4182 : 0 : ret = ext4_ind_check_inode(inode);
4183 : : }
4184 : : }
4185 [ + - ]: 8710 : if (ret)
4186 : : goto bad_inode;
4187 : :
4188 [ + + ]: 8710 : if (S_ISREG(inode->i_mode)) {
4189 : 7937 : inode->i_op = &ext4_file_inode_operations;
4190 : 7937 : inode->i_fop = &ext4_file_operations;
4191 : 7937 : ext4_set_aops(inode);
4192 [ + + ]: 773 : } else if (S_ISDIR(inode->i_mode)) {
4193 : 488 : inode->i_op = &ext4_dir_inode_operations;
4194 : 488 : inode->i_fop = &ext4_dir_operations;
4195 [ + - ]: 285 : } else if (S_ISLNK(inode->i_mode)) {
4196 [ + - ]: 285 : if (ext4_inode_is_fast_symlink(inode)) {
4197 : 285 : inode->i_op = &ext4_fast_symlink_inode_operations;
4198 : 285 : nd_terminate_link(ei->i_data, inode->i_size,
4199 : : sizeof(ei->i_data) - 1);
4200 : : } else {
4201 : 0 : inode->i_op = &ext4_symlink_inode_operations;
4202 : 0 : ext4_set_aops(inode);
4203 : : }
4204 [ # # ][ # # ]: 0 : } else if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode) ||
4205 [ # # ]: 0 : S_ISFIFO(inode->i_mode) || S_ISSOCK(inode->i_mode)) {
4206 : 0 : inode->i_op = &ext4_special_inode_operations;
4207 [ # # ]: 0 : if (raw_inode->i_block[0])
4208 : 0 : init_special_inode(inode, inode->i_mode,
4209 : : old_decode_dev(le32_to_cpu(raw_inode->i_block[0])));
4210 : : else
4211 : 0 : init_special_inode(inode, inode->i_mode,
4212 : : new_decode_dev(le32_to_cpu(raw_inode->i_block[1])));
4213 [ # # ]: 0 : } else if (ino == EXT4_BOOT_LOADER_INO) {
4214 : 0 : make_bad_inode(inode);
4215 : : } else {
4216 : : ret = -EIO;
4217 : 0 : EXT4_ERROR_INODE(inode, "bogus i_mode (%o)", inode->i_mode);
4218 : 0 : goto bad_inode;
4219 : : }
4220 : 8710 : brelse(iloc.bh);
4221 : 8710 : ext4_set_inode_flags(inode);
4222 : 8710 : unlock_new_inode(inode);
4223 : 8710 : return inode;
4224 : :
4225 : : bad_inode:
4226 : 0 : brelse(iloc.bh);
4227 : 0 : iget_failed(inode);
4228 : 0 : return ERR_PTR(ret);
4229 : : }
4230 : :
4231 : 8413388 : static int ext4_inode_blocks_set(handle_t *handle,
4232 : : struct ext4_inode *raw_inode,
4233 : : struct ext4_inode_info *ei)
4234 : : {
4235 : : struct inode *inode = &(ei->vfs_inode);
4236 : 8413388 : u64 i_blocks = inode->i_blocks;
4237 : 8413388 : struct super_block *sb = inode->i_sb;
4238 : :
4239 [ + - ]: 8413388 : if (i_blocks <= ~0U) {
4240 : : /*
4241 : : * i_blocks can be represented in a 32 bit variable
4242 : : * as multiple of 512 bytes
4243 : : */
4244 : 8413388 : raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4245 : 8413388 : raw_inode->i_blocks_high = 0;
4246 : : ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4247 : : return 0;
4248 : : }
4249 [ # # ]: 0 : if (!EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE))
4250 : : return -EFBIG;
4251 : :
4252 [ # # ]: 0 : if (i_blocks <= 0xffffffffffffULL) {
4253 : : /*
4254 : : * i_blocks can be represented in a 48 bit variable
4255 : : * as multiple of 512 bytes
4256 : : */
4257 : 0 : raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4258 : 0 : raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4259 : : ext4_clear_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4260 : : } else {
4261 : : ext4_set_inode_flag(inode, EXT4_INODE_HUGE_FILE);
4262 : : /* i_block is stored in file system block size */
4263 : 0 : i_blocks = i_blocks >> (inode->i_blkbits - 9);
4264 : 0 : raw_inode->i_blocks_lo = cpu_to_le32(i_blocks);
4265 : 0 : raw_inode->i_blocks_high = cpu_to_le16(i_blocks >> 32);
4266 : : }
4267 : : return 0;
4268 : : }
4269 : :
4270 : : /*
4271 : : * Post the struct inode info into an on-disk inode location in the
4272 : : * buffer-cache. This gobbles the caller's reference to the
4273 : : * buffer_head in the inode location struct.
4274 : : *
4275 : : * The caller must have write access to iloc->bh.
4276 : : */
4277 : 0 : static int ext4_do_update_inode(handle_t *handle,
4278 : : struct inode *inode,
4279 : 8412828 : struct ext4_iloc *iloc)
4280 : : {
4281 : : struct ext4_inode *raw_inode = ext4_raw_inode(iloc);
4282 : 8412828 : struct ext4_inode_info *ei = EXT4_I(inode);
4283 : : struct buffer_head *bh = iloc->bh;
4284 : : int err = 0, rc, block;
4285 : : int need_datasync = 0;
4286 : : uid_t i_uid;
4287 : : gid_t i_gid;
4288 : :
4289 : : /* For fields not not tracking in the in-memory inode,
4290 : : * initialise them to zero for new inodes. */
4291 [ + + ]: 8412828 : if (ext4_test_inode_state(inode, EXT4_STATE_NEW))
4292 [ + ]: 271517 : memset(raw_inode, 0, EXT4_SB(inode->i_sb)->s_inode_size);
4293 : :
4294 : 8412786 : ext4_get_inode_flags(ei);
4295 : 8408485 : raw_inode->i_mode = cpu_to_le16(inode->i_mode);
4296 : : i_uid = i_uid_read(inode);
4297 : : i_gid = i_gid_read(inode);
4298 [ + - ]: 8408485 : if (!(test_opt(inode->i_sb, NO_UID32))) {
4299 : 8408485 : raw_inode->i_uid_low = cpu_to_le16(low_16_bits(i_uid));
4300 : 8408485 : raw_inode->i_gid_low = cpu_to_le16(low_16_bits(i_gid));
4301 : : /*
4302 : : * Fix up interoperability with old kernels. Otherwise, old inodes get
4303 : : * re-used with the upper 16 bits of the uid/gid intact
4304 : : */
4305 [ + + ]: 8408485 : if (!ei->i_dtime) {
4306 : 8054602 : raw_inode->i_uid_high =
4307 : 8054602 : cpu_to_le16(high_16_bits(i_uid));
4308 : 8054602 : raw_inode->i_gid_high =
4309 : 8054602 : cpu_to_le16(high_16_bits(i_gid));
4310 : : } else {
4311 : 353883 : raw_inode->i_uid_high = 0;
4312 : 353883 : raw_inode->i_gid_high = 0;
4313 : : }
4314 : : } else {
4315 [ # # ]: 0 : raw_inode->i_uid_low = cpu_to_le16(fs_high2lowuid(i_uid));
4316 [ # # ]: 0 : raw_inode->i_gid_low = cpu_to_le16(fs_high2lowgid(i_gid));
4317 : 0 : raw_inode->i_uid_high = 0;
4318 : 0 : raw_inode->i_gid_high = 0;
4319 : : }
4320 : 8408485 : raw_inode->i_links_count = cpu_to_le16(inode->i_nlink);
4321 : :
4322 [ + ]: 8408485 : EXT4_INODE_SET_XTIME(i_ctime, inode, raw_inode);
4323 [ + ]: 8408485 : EXT4_INODE_SET_XTIME(i_mtime, inode, raw_inode);
4324 [ + ]: 8408485 : EXT4_INODE_SET_XTIME(i_atime, inode, raw_inode);
4325 [ + ][ + ]: 8408485 : EXT4_EINODE_SET_XTIME(i_crtime, ei, raw_inode);
4326 : :
4327 [ + ]: 8408485 : if (ext4_inode_blocks_set(handle, raw_inode, ei))
4328 : : goto out_brelse;
4329 : 8413332 : raw_inode->i_dtime = cpu_to_le32(ei->i_dtime);
4330 : 8413332 : raw_inode->i_flags = cpu_to_le32(ei->i_flags & 0xFFFFFFFF);
4331 [ + + ]: 8413332 : if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
4332 : : cpu_to_le32(EXT4_OS_HURD))
4333 : 8413325 : raw_inode->i_file_acl_high =
4334 : 8413325 : cpu_to_le16(ei->i_file_acl >> 32);
4335 : 8413332 : raw_inode->i_file_acl_lo = cpu_to_le32(ei->i_file_acl);
4336 [ + + ]: 8413332 : if (ei->i_disksize != ext4_isize(raw_inode)) {
4337 : : ext4_isize_set(raw_inode, ei->i_disksize);
4338 : : need_datasync = 1;
4339 : : }
4340 [ - + ]: 8413332 : if (ei->i_disksize > 0x7fffffffULL) {
4341 : 0 : struct super_block *sb = inode->i_sb;
4342 [ # # ]: 0 : if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
4343 [ # # ]: 0 : EXT4_FEATURE_RO_COMPAT_LARGE_FILE) ||
4344 : 0 : EXT4_SB(sb)->s_es->s_rev_level ==
4345 : : cpu_to_le32(EXT4_GOOD_OLD_REV)) {
4346 : : /* If this is the first large file
4347 : : * created, add a flag to the superblock.
4348 : : */
4349 : 5798 : err = ext4_journal_get_write_access(handle,
4350 : : EXT4_SB(sb)->s_sbh);
4351 [ # # ]: 0 : if (err)
4352 : : goto out_brelse;
4353 : 0 : ext4_update_dynamic_rev(sb);
4354 : 0 : EXT4_SET_RO_COMPAT_FEATURE(sb,
4355 : : EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
4356 : : ext4_handle_sync(handle);
4357 : 0 : err = ext4_handle_dirty_super(handle, sb);
4358 : : }
4359 : : }
4360 : 8407534 : raw_inode->i_generation = cpu_to_le32(inode->i_generation);
4361 [ + + ]: 8407534 : if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
4362 [ + - ]: 28 : if (old_valid_dev(inode->i_rdev)) {
4363 : 28 : raw_inode->i_block[0] =
4364 : 28 : cpu_to_le32(old_encode_dev(inode->i_rdev));
4365 : 28 : raw_inode->i_block[1] = 0;
4366 : : } else {
4367 : 0 : raw_inode->i_block[0] = 0;
4368 : 0 : raw_inode->i_block[1] =
4369 : 0 : cpu_to_le32(new_encode_dev(inode->i_rdev));
4370 : 0 : raw_inode->i_block[2] = 0;
4371 : : }
4372 [ + + ]: 8407506 : } else if (!ext4_has_inline_data(inode)) {
4373 [ + + ]: 134563529 : for (block = 0; block < EXT4_N_BLOCKS; block++)
4374 : 126150407 : raw_inode->i_block[block] = ei->i_data[block];
4375 : : }
4376 : :
4377 : 8416020 : raw_inode->i_disk_version = cpu_to_le32(inode->i_version);
4378 [ + + ]: 8416020 : if (ei->i_extra_isize) {
4379 [ + ]: 8412681 : if (EXT4_FITS_IN_INODE(raw_inode, ei, i_version_hi))
4380 : 8413136 : raw_inode->i_version_hi =
4381 : 8413136 : cpu_to_le32(inode->i_version >> 32);
4382 : 8412681 : raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
4383 : : }
4384 : :
4385 : 8416020 : ext4_inode_csum_set(inode, raw_inode, ei);
4386 : :
4387 : : BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
4388 : 8414057 : rc = ext4_handle_dirty_metadata(handle, NULL, bh);
4389 [ + ]: 8409684 : if (!err)
4390 : : err = rc;
4391 : : ext4_clear_inode_state(inode, EXT4_STATE_NEW);
4392 : :
4393 : : ext4_update_inode_fsync_trans(handle, inode, need_datasync);
4394 : : out_brelse:
4395 : : brelse(bh);
4396 [ - + ]: 8414058 : ext4_std_error(inode->i_sb, err);
4397 : 8414058 : return err;
4398 : : }
4399 : :
4400 : : /*
4401 : : * ext4_write_inode()
4402 : : *
4403 : : * We are called from a few places:
4404 : : *
4405 : : * - Within generic_file_write() for O_SYNC files.
4406 : : * Here, there will be no transaction running. We wait for any running
4407 : : * transaction to commit.
4408 : : *
4409 : : * - Within sys_sync(), kupdate and such.
4410 : : * We wait on commit, if tol to.
4411 : : *
4412 : : * - Within prune_icache() (PF_MEMALLOC == true)
4413 : : * Here we simply return. We can't afford to block kswapd on the
4414 : : * journal commit.
4415 : : *
4416 : : * In all cases it is actually safe for us to return without doing anything,
4417 : : * because the inode has been copied into a raw inode buffer in
4418 : : * ext4_mark_inode_dirty(). This is a correctness thing for O_SYNC and for
4419 : : * knfsd.
4420 : : *
4421 : : * Note that we are absolutely dependent upon all inode dirtiers doing the
4422 : : * right thing: they *must* call mark_inode_dirty() after dirtying info in
4423 : : * which we are interested.
4424 : : *
4425 : : * It would be a bug for them to not do this. The code:
4426 : : *
4427 : : * mark_inode_dirty(inode)
4428 : : * stuff();
4429 : : * inode->i_size = expr;
4430 : : *
4431 : : * is in error because a kswapd-driven write_inode() could occur while
4432 : : * `stuff()' is running, and the new i_size will be lost. Plus the inode
4433 : : * will no longer be on the superblock's dirty inode list.
4434 : : */
4435 : 0 : int ext4_write_inode(struct inode *inode, struct writeback_control *wbc)
4436 : : {
4437 : : int err;
4438 : :
4439 [ + - ]: 14788 : if (current->flags & PF_MEMALLOC)
4440 : : return 0;
4441 : :
4442 [ + - ]: 14788 : if (EXT4_SB(inode->i_sb)->s_journal) {
4443 [ - + ]: 14788 : if (ext4_journal_current_handle()) {
4444 : : jbd_debug(1, "called recursively, non-PF_MEMALLOC!\n");
4445 : 0 : dump_stack();
4446 : 0 : return -EIO;
4447 : : }
4448 : :
4449 [ + + ]: 14788 : if (wbc->sync_mode != WB_SYNC_ALL)
4450 : : return 0;
4451 : :
4452 : 307 : err = ext4_force_commit(inode->i_sb);
4453 : : } else {
4454 : : struct ext4_iloc iloc;
4455 : :
4456 : 0 : err = __ext4_get_inode_loc(inode, &iloc, 0);
4457 [ # # ]: 0 : if (err)
4458 : 0 : return err;
4459 [ # # ]: 0 : if (wbc->sync_mode == WB_SYNC_ALL)
4460 : 0 : sync_dirty_buffer(iloc.bh);
4461 [ # # ][ # # ]: 14788 : if (buffer_req(iloc.bh) && !buffer_uptodate(iloc.bh)) {
4462 : 0 : EXT4_ERROR_INODE_BLOCK(inode, iloc.bh->b_blocknr,
4463 : : "IO error syncing inode");
4464 : : err = -EIO;
4465 : : }
4466 : 0 : brelse(iloc.bh);
4467 : : }
4468 : 307 : return err;
4469 : : }
4470 : :
4471 : : /*
4472 : : * In data=journal mode ext4_journalled_invalidatepage() may fail to invalidate
4473 : : * buffers that are attached to a page stradding i_size and are undergoing
4474 : : * commit. In that case we have to wait for commit to finish and try again.
4475 : : */
4476 : 0 : static void ext4_wait_for_tail_page_commit(struct inode *inode)
4477 : : {
4478 : : struct page *page;
4479 : : unsigned offset;
4480 : 0 : journal_t *journal = EXT4_SB(inode->i_sb)->s_journal;
4481 : : tid_t commit_tid = 0;
4482 : : int ret;
4483 : :
4484 : 0 : offset = inode->i_size & (PAGE_CACHE_SIZE - 1);
4485 : : /*
4486 : : * All buffers in the last page remain valid? Then there's nothing to
4487 : : * do. We do the check mainly to optimize the common PAGE_CACHE_SIZE ==
4488 : : * blocksize case
4489 : : */
4490 [ # # ]: 0 : if (offset > PAGE_CACHE_SIZE - (1 << inode->i_blkbits))
4491 : : return;
4492 : : while (1) {
4493 : 0 : page = find_lock_page(inode->i_mapping,
4494 : 0 : inode->i_size >> PAGE_CACHE_SHIFT);
4495 [ # # ]: 0 : if (!page)
4496 : : return;
4497 : 0 : ret = __ext4_journalled_invalidatepage(page, offset,
4498 : 0 : PAGE_CACHE_SIZE - offset);
4499 : 0 : unlock_page(page);
4500 : 0 : page_cache_release(page);
4501 [ # # ]: 0 : if (ret != -EBUSY)
4502 : : return;
4503 : : commit_tid = 0;
4504 : 0 : read_lock(&journal->j_state_lock);
4505 [ # # ]: 0 : if (journal->j_committing_transaction)
4506 : 0 : commit_tid = journal->j_committing_transaction->t_tid;
4507 : : read_unlock(&journal->j_state_lock);
4508 [ # # ]: 0 : if (commit_tid)
4509 : 0 : jbd2_log_wait_commit(journal, commit_tid);
4510 : : }
4511 : : }
4512 : :
4513 : : /*
4514 : : * ext4_setattr()
4515 : : *
4516 : : * Called from notify_change.
4517 : : *
4518 : : * We want to trap VFS attempts to truncate the file as soon as
4519 : : * possible. In particular, we want to make sure that when the VFS
4520 : : * shrinks i_size, we put the inode on the orphan list and modify
4521 : : * i_disksize immediately, so that during the subsequent flushing of
4522 : : * dirty pages and freeing of disk blocks, we can guarantee that any
4523 : : * commit will leave the blocks being flushed in an unused state on
4524 : : * disk. (On recovery, the inode will get truncated and the blocks will
4525 : : * be freed, so we have a strong guarantee that no future commit will
4526 : : * leave these blocks visible to the user.)
4527 : : *
4528 : : * Another thing we have to assure is that if we are in ordered mode
4529 : : * and inode is still attached to the committing transaction, we must
4530 : : * we start writeout of all the dirty pages which are being truncated.
4531 : : * This way we are sure that all the data written in the previous
4532 : : * transaction are already on disk (truncate waits for pages under
4533 : : * writeback).
4534 : : *
4535 : : * Called with inode->i_mutex down.
4536 : : */
4537 : 0 : int ext4_setattr(struct dentry *dentry, struct iattr *attr)
4538 : : {
4539 : 78839 : struct inode *inode = dentry->d_inode;
4540 : : int error, rc = 0;
4541 : : int orphan = 0;
4542 : 43838 : const unsigned int ia_valid = attr->ia_valid;
4543 : :
4544 : 43838 : error = inode_change_ok(inode, attr);
4545 [ + + ]: 43836 : if (error)
4546 : : return error;
4547 : :
4548 [ + + ]: 43819 : if (is_quota_modification(inode, attr))
4549 : 37148 : dquot_initialize(inode);
4550 [ + + ][ + + ]: 87660 : if ((ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
[ + + ]
4551 [ + + ]: 1308 : (ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
4552 : : handle_t *handle;
4553 : :
4554 : : /* (user+group)*(old+new) structure, inode write (sb,
4555 : : * inode block, ? - but truncate inode update has it) */
4556 [ + - ][ - + ]: 166 : handle = ext4_journal_start(inode, EXT4_HT_QUOTA,
[ # # ][ + - ]
[ - + ]
4557 : : (EXT4_MAXQUOTAS_INIT_BLOCKS(inode->i_sb) +
4558 : : EXT4_MAXQUOTAS_DEL_BLOCKS(inode->i_sb)) + 3);
4559 [ - + ]: 166 : if (IS_ERR(handle)) {
4560 : : error = PTR_ERR(handle);
4561 : 0 : goto err_out;
4562 : : }
4563 : 166 : error = dquot_transfer(inode, attr);
4564 [ - + ]: 166 : if (error) {
4565 : 0 : ext4_journal_stop(handle);
4566 : 0 : return error;
4567 : : }
4568 : : /* Update corresponding info in inode so that everything is in
4569 : : * one transaction */
4570 [ + + ]: 166 : if (attr->ia_valid & ATTR_UID)
4571 : 154 : inode->i_uid = attr->ia_uid;
4572 [ + + ]: 166 : if (attr->ia_valid & ATTR_GID)
4573 : 136 : inode->i_gid = attr->ia_gid;
4574 : 166 : error = ext4_mark_inode_dirty(handle, inode);
4575 : 166 : ext4_journal_stop(handle);
4576 : : }
4577 : :
4578 [ + + ][ + + ]: 43822 : if (attr->ia_valid & ATTR_SIZE && attr->ia_size != inode->i_size) {
4579 : : handle_t *handle;
4580 : :
4581 [ - + ]: 36982 : if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) {
4582 : 0 : struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4583 : :
4584 [ # # ]: 0 : if (attr->ia_size > sbi->s_bitmap_maxbytes)
4585 : : return -EFBIG;
4586 : : }
4587 : :
4588 [ - + ][ # # ]: 36978 : if (IS_I_VERSION(inode) && attr->ia_size != inode->i_size)
4589 : : inode_inc_iversion(inode);
4590 : :
4591 [ + - ][ + + ]: 36978 : if (S_ISREG(inode->i_mode) &&
4592 : 36978 : (attr->ia_size < inode->i_size)) {
4593 [ + + ]: 34994 : if (ext4_should_order_data(inode)) {
4594 : : error = ext4_begin_ordered_truncate(inode,
4595 : : attr->ia_size);
4596 [ + - ]: 34991 : if (error)
4597 : : goto err_out;
4598 : : }
4599 : : handle = ext4_journal_start(inode, EXT4_HT_INODE, 3);
4600 [ - + ]: 34994 : if (IS_ERR(handle)) {
4601 : : error = PTR_ERR(handle);
4602 : 0 : goto err_out;
4603 : : }
4604 [ + ]: 34994 : if (ext4_handle_valid(handle)) {
4605 : 34995 : error = ext4_orphan_add(handle, inode);
4606 : : orphan = 1;
4607 : : }
4608 : 34994 : down_write(&EXT4_I(inode)->i_data_sem);
4609 : 34995 : EXT4_I(inode)->i_disksize = attr->ia_size;
4610 : 34995 : rc = ext4_mark_inode_dirty(handle, inode);
4611 [ + - ]: 34995 : if (!error)
4612 : : error = rc;
4613 : : /*
4614 : : * We have to update i_size under i_data_sem together
4615 : : * with i_disksize to avoid races with writeback code
4616 : : * running ext4_wb_update_i_disksize().
4617 : : */
4618 [ + - ]: 34995 : if (!error)
4619 : 34995 : i_size_write(inode, attr->ia_size);
4620 : 34995 : up_write(&EXT4_I(inode)->i_data_sem);
4621 : 34995 : ext4_journal_stop(handle);
4622 [ - + ]: 34995 : if (error) {
4623 : 0 : ext4_orphan_del(NULL, inode);
4624 : 0 : goto err_out;
4625 : : }
4626 : : } else
4627 : 1984 : i_size_write(inode, attr->ia_size);
4628 : :
4629 : : /*
4630 : : * Blocks are going to be removed from the inode. Wait
4631 : : * for dio in flight. Temporarily disable
4632 : : * dioread_nolock to prevent livelock.
4633 : : */
4634 [ + + ]: 36981 : if (orphan) {
4635 [ + - ]: 34994 : if (!ext4_should_journal_data(inode)) {
4636 : : ext4_inode_block_unlocked_dio(inode);
4637 : 34995 : inode_dio_wait(inode);
4638 : : ext4_inode_resume_unlocked_dio(inode);
4639 : : } else
4640 : 0 : ext4_wait_for_tail_page_commit(inode);
4641 : : }
4642 : : /*
4643 : : * Truncate pagecache after we've waited for commit
4644 : : * in data=journal mode to make pages freeable.
4645 : : */
4646 : 36982 : truncate_pagecache(inode, inode->i_size);
4647 : : }
4648 : : /*
4649 : : * We want to call ext4_truncate() even if attr->ia_size ==
4650 : : * inode->i_size for cases like truncation of fallocated space
4651 : : */
4652 [ + + ]: 43822 : if (attr->ia_valid & ATTR_SIZE)
4653 : 37393 : ext4_truncate(inode);
4654 : :
4655 [ + - ]: 43822 : if (!rc) {
4656 : 43822 : setattr_copy(inode, attr);
4657 : : mark_inode_dirty(inode);
4658 : : }
4659 : :
4660 : : /*
4661 : : * If the call to ext4_truncate failed to get a transaction handle at
4662 : : * all, we need to clean up the in-core orphan list manually.
4663 : : */
4664 [ + + ][ + - ]: 43822 : if (orphan && inode->i_nlink)
4665 : 34995 : ext4_orphan_del(NULL, inode);
4666 : :
4667 [ + - ][ + + ]: 43822 : if (!rc && (ia_valid & ATTR_MODE))
4668 : 3615 : rc = posix_acl_chmod(inode, inode->i_mode);
4669 : :
4670 : : err_out:
4671 [ - + ]: 43822 : ext4_std_error(inode->i_sb, error);
4672 [ + - ]: 43822 : if (!error)
4673 : : error = rc;
4674 : 43822 : return error;
4675 : : }
4676 : :
4677 : 0 : int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
4678 : : struct kstat *stat)
4679 : : {
4680 : : struct inode *inode;
4681 : : unsigned long long delalloc_blocks;
4682 : :
4683 : 3973341 : inode = dentry->d_inode;
4684 : 3973341 : generic_fillattr(inode, stat);
4685 : :
4686 : : /*
4687 : : * If there is inline data in the inode, the inode will normally not
4688 : : * have data blocks allocated (it may have an external xattr block).
4689 : : * Report at least one sector for such files, so tools like tar, rsync,
4690 : : * others doen't incorrectly think the file is completely sparse.
4691 : : */
4692 [ - + ]: 3973329 : if (unlikely(ext4_has_inline_data(inode)))
4693 : 0 : stat->blocks += (stat->size + 511) >> 9;
4694 : :
4695 : : /*
4696 : : * We can't update i_blocks if the block allocation is delayed
4697 : : * otherwise in the case of system crash before the real block
4698 : : * allocation is done, we will have i_blocks inconsistent with
4699 : : * on-disk file blocks.
4700 : : * We always keep i_blocks updated together with real
4701 : : * allocation. But to not confuse with user, stat
4702 : : * will return the blocks that include the delayed allocation
4703 : : * blocks for this file.
4704 : : */
4705 : 32 : delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
4706 : : EXT4_I(inode)->i_reserved_data_blocks);
4707 : 32 : stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits - 9);
4708 : 32 : return 0;
4709 : : }
4710 : :
4711 : 0 : static int ext4_index_trans_blocks(struct inode *inode, int lblocks,
4712 : : int pextents)
4713 : : {
4714 [ - + ]: 423504 : if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS)))
4715 : 0 : return ext4_ind_trans_blocks(inode, lblocks);
4716 : 423504 : return ext4_ext_index_trans_blocks(inode, pextents);
4717 : : }
4718 : :
4719 : : /*
4720 : : * Account for index blocks, block groups bitmaps and block group
4721 : : * descriptor blocks if modify datablocks and index blocks
4722 : : * worse case, the indexs blocks spread over different block groups
4723 : : *
4724 : : * If datablocks are discontiguous, they are possible to spread over
4725 : : * different block groups too. If they are contiguous, with flexbg,
4726 : : * they could still across block group boundary.
4727 : : *
4728 : : * Also account for superblock, inode, quota and xattr blocks
4729 : : */
4730 : 0 : static int ext4_meta_trans_blocks(struct inode *inode, int lblocks,
4731 : : int pextents)
4732 : : {
4733 : 847042 : ext4_group_t groups, ngroups = ext4_get_groups_count(inode->i_sb);
4734 : : int gdpblocks;
4735 : : int idxblocks;
4736 : : int ret = 0;
4737 : :
4738 : : /*
4739 : : * How many index blocks need to touch to map @lblocks logical blocks
4740 : : * to @pextents physical extents?
4741 : : */
4742 : 423504 : idxblocks = ext4_index_trans_blocks(inode, lblocks, pextents);
4743 : :
4744 : : ret = idxblocks;
4745 : :
4746 : : /*
4747 : : * Now let's see how many group bitmaps and group descriptors need
4748 : : * to account
4749 : : */
4750 : 423520 : groups = idxblocks + pextents;
4751 : : gdpblocks = groups;
4752 [ - + ]: 423520 : if (groups > ngroups)
4753 : : groups = ngroups;
4754 [ + + ]: 423520 : if (groups > EXT4_SB(inode->i_sb)->s_gdb_count)
4755 : 158093 : gdpblocks = EXT4_SB(inode->i_sb)->s_gdb_count;
4756 : :
4757 : : /* bitmaps and block group descriptor blocks */
4758 : 0 : ret += groups + gdpblocks;
4759 : :
4760 : : /* Blocks for super block, inode, quota and xattr blocks */
4761 [ + ][ + ]: 423520 : ret += EXT4_META_TRANS_BLOCKS(inode->i_sb);
4762 : :
4763 : 0 : return ret;
4764 : : }
4765 : :
4766 : : /*
4767 : : * Calculate the total number of credits to reserve to fit
4768 : : * the modification of a single pages into a single transaction,
4769 : : * which may include multiple chunks of block allocations.
4770 : : *
4771 : : * This could be called via ext4_write_begin()
4772 : : *
4773 : : * We need to consider the worse case, when
4774 : : * one new block per extent.
4775 : : */
4776 : 0 : int ext4_writepage_trans_blocks(struct inode *inode)
4777 : : {
4778 : : int bpp = ext4_journal_blocks_per_page(inode);
4779 : : int ret;
4780 : :
4781 : 80530 : ret = ext4_meta_trans_blocks(inode, bpp, bpp);
4782 : :
4783 : : /* Account for data blocks for journalled mode */
4784 [ + + ]: 80531 : if (ext4_should_journal_data(inode))
4785 : 40741 : ret += bpp;
4786 : 1 : return ret;
4787 : : }
4788 : :
4789 : : /*
4790 : : * Calculate the journal credits for a chunk of data modification.
4791 : : *
4792 : : * This is called from DIO, fallocate or whoever calling
4793 : : * ext4_map_blocks() to map/allocate a chunk of contiguous disk blocks.
4794 : : *
4795 : : * journal buffers for data blocks are not included here, as DIO
4796 : : * and fallocate do no need to journal data buffers.
4797 : : */
4798 : 0 : int ext4_chunk_trans_blocks(struct inode *inode, int nrblocks)
4799 : : {
4800 : 214636 : return ext4_meta_trans_blocks(inode, nrblocks, 1);
4801 : : }
4802 : :
4803 : : /*
4804 : : * The caller must have previously called ext4_reserve_inode_write().
4805 : : * Give this, we know that the caller already has write access to iloc->bh.
4806 : : */
4807 : 0 : int ext4_mark_iloc_dirty(handle_t *handle,
4808 : : struct inode *inode, struct ext4_iloc *iloc)
4809 : : {
4810 : : int err = 0;
4811 : :
4812 [ - + ]: 8410814 : if (IS_I_VERSION(inode))
4813 : : inode_inc_iversion(inode);
4814 : :
4815 : : /* the do_update_inode consumes one bh->b_count */
4816 : 0 : get_bh(iloc->bh);
4817 : :
4818 : : /* ext4_do_update_inode() does jbd2_journal_dirty_metadata */
4819 : 8412725 : err = ext4_do_update_inode(handle, inode, iloc);
4820 : 8412809 : put_bh(iloc->bh);
4821 : 8415303 : return err;
4822 : : }
4823 : :
4824 : : /*
4825 : : * On success, We end up with an outstanding reference count against
4826 : : * iloc->bh. This _must_ be cleaned up later.
4827 : : */
4828 : :
4829 : : int
4830 : 0 : ext4_reserve_inode_write(handle_t *handle, struct inode *inode,
4831 : : struct ext4_iloc *iloc)
4832 : : {
4833 : : int err;
4834 : :
4835 : : err = ext4_get_inode_loc(inode, iloc);
4836 [ + ]: 8413769 : if (!err) {
4837 : : BUFFER_TRACE(iloc->bh, "get_write_access");
4838 : 8415469 : err = ext4_journal_get_write_access(handle, iloc->bh);
4839 [ - + ]: 8409874 : if (err) {
4840 : 0 : brelse(iloc->bh);
4841 : 0 : iloc->bh = NULL;
4842 : : }
4843 : : }
4844 [ - + ]: 8408174 : ext4_std_error(inode->i_sb, err);
4845 : 0 : return err;
4846 : : }
4847 : :
4848 : : /*
4849 : : * Expand an inode by new_extra_isize bytes.
4850 : : * Returns 0 on success or negative error number on failure.
4851 : : */
4852 : 0 : static int ext4_expand_extra_isize(struct inode *inode,
4853 : : unsigned int new_extra_isize,
4854 : : struct ext4_iloc iloc,
4855 : : handle_t *handle)
4856 : : {
4857 : : struct ext4_inode *raw_inode;
4858 : : struct ext4_xattr_ibody_header *header;
4859 : :
4860 [ # # ]: 0 : if (EXT4_I(inode)->i_extra_isize >= new_extra_isize)
4861 : : return 0;
4862 : :
4863 : 0 : raw_inode = ext4_raw_inode(&iloc);
4864 : :
4865 : 0 : header = IHDR(inode, raw_inode);
4866 : :
4867 : : /* No extended attributes present */
4868 [ # # ][ # # ]: 0 : if (!ext4_test_inode_state(inode, EXT4_STATE_XATTR) ||
4869 : 0 : header->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC)) {
4870 [ # # ]: 0 : memset((void *)raw_inode + EXT4_GOOD_OLD_INODE_SIZE, 0,
4871 : : new_extra_isize);
4872 : 0 : EXT4_I(inode)->i_extra_isize = new_extra_isize;
4873 : : return 0;
4874 : : }
4875 : :
4876 : : /* try to expand with EAs present */
4877 : 0 : return ext4_expand_extra_isize_ea(inode, new_extra_isize,
4878 : : raw_inode, handle);
4879 : : }
4880 : :
4881 : : /*
4882 : : * What we do here is to mark the in-core inode as clean with respect to inode
4883 : : * dirtiness (it may still be data-dirty).
4884 : : * This means that the in-core inode may be reaped by prune_icache
4885 : : * without having to perform any I/O. This is a very good thing,
4886 : : * because *any* task may call prune_icache - even ones which
4887 : : * have a transaction open against a different journal.
4888 : : *
4889 : : * Is this cheating? Not really. Sure, we haven't written the
4890 : : * inode out, but prune_icache isn't a user-visible syncing function.
4891 : : * Whenever the user wants stuff synced (sys_sync, sys_msync, sys_fsync)
4892 : : * we start and wait on commits.
4893 : : */
4894 : 0 : int ext4_mark_inode_dirty(handle_t *handle, struct inode *inode)
4895 : : {
4896 : 0 : struct ext4_iloc iloc;
4897 : 7640684 : struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
4898 : : static unsigned int mnt_count;
4899 : : int err, ret;
4900 : :
4901 : : might_sleep();
4902 : 7640684 : trace_ext4_mark_inode_dirty(inode, _RET_IP_);
4903 : 7640722 : err = ext4_reserve_inode_write(handle, inode, &iloc);
4904 [ + ][ - + ]: 7640146 : if (ext4_handle_valid(handle) &&
4905 [ # # ]: 0 : EXT4_I(inode)->i_extra_isize < sbi->s_want_extra_isize &&
4906 : : !ext4_test_inode_state(inode, EXT4_STATE_NO_EXPAND)) {
4907 : : /*
4908 : : * We need extra buffer credits since we may write into EA block
4909 : : * with this same handle. If journal_extend fails, then it will
4910 : : * only result in a minor loss of functionality for that inode.
4911 : : * If this is felt to be critical, then e2fsck should be run to
4912 : : * force a large enough s_min_extra_isize.
4913 : : */
4914 [ # # ]: 0 : if ((jbd2_journal_extend(handle,
4915 [ # # ][ # # ]: 0 : EXT4_DATA_TRANS_BLOCKS(inode->i_sb))) == 0) {
[ # # ]
4916 : 0 : ret = ext4_expand_extra_isize(inode,
4917 : : sbi->s_want_extra_isize,
4918 : : iloc, handle);
4919 [ # # ]: 0 : if (ret) {
4920 : : ext4_set_inode_state(inode,
4921 : : EXT4_STATE_NO_EXPAND);
4922 [ # # ]: 0 : if (mnt_count !=
4923 : 0 : le16_to_cpu(sbi->s_es->s_mnt_count)) {
4924 : 0 : ext4_warning(inode->i_sb,
4925 : : "Unable to expand inode %lu. Delete"
4926 : : " some EAs or run e2fsck.",
4927 : : inode->i_ino);
4928 : 0 : mnt_count =
4929 : 0 : le16_to_cpu(sbi->s_es->s_mnt_count);
4930 : : }
4931 : : }
4932 : : }
4933 : : }
4934 [ + ]: 7640146 : if (!err)
4935 : 7641563 : err = ext4_mark_iloc_dirty(handle, inode, &iloc);
4936 : 1233 : return err;
4937 : : }
4938 : :
4939 : : /*
4940 : : * ext4_dirty_inode() is called from __mark_inode_dirty()
4941 : : *
4942 : : * We're really interested in the case where a file is being extended.
4943 : : * i_size has been changed by generic_commit_write() and we thus need
4944 : : * to include the updated inode in the current transaction.
4945 : : *
4946 : : * Also, dquot_alloc_block() will always dirty the inode when blocks
4947 : : * are allocated to the file.
4948 : : *
4949 : : * If the inode is marked synchronous, we don't honour that here - doing
4950 : : * so would cause a commit on atime updates, which we don't bother doing.
4951 : : * We handle synchronous inodes at the highest possible level.
4952 : : */
4953 : 0 : void ext4_dirty_inode(struct inode *inode, int flags)
4954 : : {
4955 : : handle_t *handle;
4956 : :
4957 : : handle = ext4_journal_start(inode, EXT4_HT_INODE, 2);
4958 [ + ]: 4707903 : if (IS_ERR(handle))
4959 : : goto out;
4960 : :
4961 : 4708098 : ext4_mark_inode_dirty(handle, inode);
4962 : :
4963 : 4711010 : ext4_journal_stop(handle);
4964 : : out:
4965 : 0 : return;
4966 : : }
4967 : :
4968 : : #if 0
4969 : : /*
4970 : : * Bind an inode's backing buffer_head into this transaction, to prevent
4971 : : * it from being flushed to disk early. Unlike
4972 : : * ext4_reserve_inode_write, this leaves behind no bh reference and
4973 : : * returns no iloc structure, so the caller needs to repeat the iloc
4974 : : * lookup to mark the inode dirty later.
4975 : : */
4976 : : static int ext4_pin_inode(handle_t *handle, struct inode *inode)
4977 : : {
4978 : : struct ext4_iloc iloc;
4979 : :
4980 : : int err = 0;
4981 : : if (handle) {
4982 : : err = ext4_get_inode_loc(inode, &iloc);
4983 : : if (!err) {
4984 : : BUFFER_TRACE(iloc.bh, "get_write_access");
4985 : : err = jbd2_journal_get_write_access(handle, iloc.bh);
4986 : : if (!err)
4987 : : err = ext4_handle_dirty_metadata(handle,
4988 : : NULL,
4989 : : iloc.bh);
4990 : : brelse(iloc.bh);
4991 : : }
4992 : : }
4993 : : ext4_std_error(inode->i_sb, err);
4994 : : return err;
4995 : : }
4996 : : #endif
4997 : :
4998 : 0 : int ext4_change_inode_journal_flag(struct inode *inode, int val)
4999 : : {
5000 : 0 : journal_t *journal;
5001 : : handle_t *handle;
5002 : : int err;
5003 : :
5004 : : /*
5005 : : * We have to be very careful here: changing a data block's
5006 : : * journaling status dynamically is dangerous. If we write a
5007 : : * data block to the journal, change the status and then delete
5008 : : * that block, we risk forgetting to revoke the old log record
5009 : : * from the journal and so a subsequent replay can corrupt data.
5010 : : * So, first we make sure that the journal is empty and that
5011 : : * nobody is changing anything.
5012 : : */
5013 : :
5014 : 0 : journal = EXT4_JOURNAL(inode);
5015 [ # # ]: 0 : if (!journal)
5016 : : return 0;
5017 [ # # ]: 0 : if (is_journal_aborted(journal))
5018 : : return -EROFS;
5019 : : /* We have to allocate physical blocks for delalloc blocks
5020 : : * before flushing journal. otherwise delalloc blocks can not
5021 : : * be allocated any more. even more truncate on delalloc blocks
5022 : : * could trigger BUG by flushing delalloc blocks in journal.
5023 : : * There is no delalloc block in non-journal data mode.
5024 : : */
5025 [ # # ][ # # ]: 0 : if (val && test_opt(inode->i_sb, DELALLOC)) {
5026 : 0 : err = ext4_alloc_da_blocks(inode);
5027 [ # # ]: 0 : if (err < 0)
5028 : : return err;
5029 : : }
5030 : :
5031 : : /* Wait for all existing dio workers */
5032 : : ext4_inode_block_unlocked_dio(inode);
5033 : 0 : inode_dio_wait(inode);
5034 : :
5035 : 0 : jbd2_journal_lock_updates(journal);
5036 : :
5037 : : /*
5038 : : * OK, there are no updates running now, and all cached data is
5039 : : * synced to disk. We are now in a completely consistent state
5040 : : * which doesn't have anything in the journal, and we know that
5041 : : * no filesystem updates are running, so it is safe to modify
5042 : : * the inode's in-core data-journaling state flag now.
5043 : : */
5044 : :
5045 [ # # ]: 0 : if (val)
5046 : : ext4_set_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5047 : : else {
5048 : 0 : jbd2_journal_flush(journal);
5049 : : ext4_clear_inode_flag(inode, EXT4_INODE_JOURNAL_DATA);
5050 : : }
5051 : 0 : ext4_set_aops(inode);
5052 : :
5053 : 0 : jbd2_journal_unlock_updates(journal);
5054 : : ext4_inode_resume_unlocked_dio(inode);
5055 : :
5056 : : /* Finally we can mark the inode as dirty. */
5057 : :
5058 : : handle = ext4_journal_start(inode, EXT4_HT_INODE, 1);
5059 [ # # ]: 0 : if (IS_ERR(handle))
5060 : 0 : return PTR_ERR(handle);
5061 : :
5062 : 0 : err = ext4_mark_inode_dirty(handle, inode);
5063 : : ext4_handle_sync(handle);
5064 : 0 : ext4_journal_stop(handle);
5065 [ # # ]: 0 : ext4_std_error(inode->i_sb, err);
5066 : :
5067 : 0 : return err;
5068 : : }
5069 : :
5070 : 0 : static int ext4_bh_unmapped(handle_t *handle, struct buffer_head *bh)
5071 : : {
5072 : 0 : return !buffer_mapped(bh);
5073 : : }
5074 : :
5075 : 0 : int ext4_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
5076 : : {
5077 : 205379 : struct page *page = vmf->page;
5078 : : loff_t size;
5079 : : unsigned long len;
5080 : : int ret;
5081 : 205379 : struct file *file = vma->vm_file;
5082 : 0 : struct inode *inode = file_inode(file);
5083 : 205379 : struct address_space *mapping = inode->i_mapping;
5084 : : handle_t *handle;
5085 : : get_block_t *get_block;
5086 : 205379 : int retries = 0;
5087 : :
5088 : 410180 : sb_start_pagefault(inode->i_sb);
5089 : 205197 : file_update_time(vma->vm_file);
5090 : : /* Delalloc case is easy... */
5091 [ + ][ + ]: 409790 : if (test_opt(inode->i_sb, DELALLOC) &&
5092 [ + ]: 205113 : !ext4_should_journal_data(inode) &&
5093 : 205347 : !ext4_nonda_switch(inode->i_sb)) {
5094 : : do {
5095 : 205176 : ret = __block_page_mkwrite(vma, vmf,
5096 : : ext4_da_get_block_prep);
5097 [ # # ]: 0 : } while (ret == -ENOSPC &&
5098 [ - + ]: 205594 : ext4_should_retry_alloc(inode->i_sb, &retries));
5099 : : goto out_ret;
5100 : : }
5101 : :
5102 : : lock_page(page);
5103 : : size = i_size_read(inode);
5104 : : /* Page got truncated from under us? */
5105 [ # # ][ # # ]: 0 : if (page->mapping != mapping || page_offset(page) > size) {
5106 : 0 : unlock_page(page);
5107 : : ret = VM_FAULT_NOPAGE;
5108 : 0 : goto out;
5109 : : }
5110 : :
5111 [ # # ]: 0 : if (page->index == size >> PAGE_CACHE_SHIFT)
5112 : 0 : len = size & ~PAGE_CACHE_MASK;
5113 : : else
5114 : : len = PAGE_CACHE_SIZE;
5115 : : /*
5116 : : * Return if we have all the buffers mapped. This avoids the need to do
5117 : : * journal_start/journal_stop which can block and take a long time
5118 : : */
5119 [ # # ]: 0 : if (page_has_buffers(page)) {
5120 [ # # ][ # # ]: 0 : if (!ext4_walk_page_buffers(NULL, page_buffers(page),
5121 : : 0, len, NULL,
5122 : : ext4_bh_unmapped)) {
5123 : : /* Wait so that we don't change page under IO */
5124 : 0 : wait_for_stable_page(page);
5125 : : ret = VM_FAULT_LOCKED;
5126 : 0 : goto out;
5127 : : }
5128 : : }
5129 : 0 : unlock_page(page);
5130 : : /* OK, we need to fill the hole... */
5131 [ # # ]: 0 : if (ext4_should_dioread_nolock(inode))
5132 : : get_block = ext4_get_block_write;
5133 : : else
5134 : : get_block = ext4_get_block;
5135 : : retry_alloc:
5136 : 0 : handle = ext4_journal_start(inode, EXT4_HT_WRITE_PAGE,
5137 : : ext4_writepage_trans_blocks(inode));
5138 [ # # ]: 0 : if (IS_ERR(handle)) {
5139 : : ret = VM_FAULT_SIGBUS;
5140 : : goto out;
5141 : : }
5142 : 0 : ret = __block_page_mkwrite(vma, vmf, get_block);
5143 [ # # ][ # # ]: 0 : if (!ret && ext4_should_journal_data(inode)) {
5144 [ # # ][ # # ]: 0 : if (ext4_walk_page_buffers(handle, page_buffers(page), 0,
5145 : : PAGE_CACHE_SIZE, NULL, do_journal_get_write_access)) {
5146 : 0 : unlock_page(page);
5147 : : ret = VM_FAULT_SIGBUS;
5148 : 0 : ext4_journal_stop(handle);
5149 : 0 : goto out;
5150 : : }
5151 : : ext4_set_inode_state(inode, EXT4_STATE_JDATA);
5152 : : }
5153 : 0 : ext4_journal_stop(handle);
5154 [ # # ][ # # ]: 0 : if (ret == -ENOSPC && ext4_should_retry_alloc(inode->i_sb, &retries))
5155 : : goto retry_alloc;
5156 : : out_ret:
5157 : : ret = block_page_mkwrite_return(ret);
5158 : : out:
5159 : 205596 : sb_end_pagefault(inode->i_sb);
5160 : 205596 : return ret;
5161 : : }
|