LCOV - code coverage report
Current view: top level - fs - fs-writeback.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 265 390 67.9 %
Date: 2014-02-18 Functions: 31 41 75.6 %
Branches: 173 274 63.1 %

           Branch data     Line data    Source code
       1                 :            : /*
       2                 :            :  * fs/fs-writeback.c
       3                 :            :  *
       4                 :            :  * Copyright (C) 2002, Linus Torvalds.
       5                 :            :  *
       6                 :            :  * Contains all the functions related to writing back and waiting
       7                 :            :  * upon dirty inodes against superblocks, and writing back dirty
       8                 :            :  * pages against inodes.  ie: data writeback.  Writeout of the
       9                 :            :  * inode itself is not handled here.
      10                 :            :  *
      11                 :            :  * 10Apr2002    Andrew Morton
      12                 :            :  *              Split out of fs/inode.c
      13                 :            :  *              Additions for address_space-based writeback
      14                 :            :  */
      15                 :            : 
      16                 :            : #include <linux/kernel.h>
      17                 :            : #include <linux/export.h>
      18                 :            : #include <linux/spinlock.h>
      19                 :            : #include <linux/slab.h>
      20                 :            : #include <linux/sched.h>
      21                 :            : #include <linux/fs.h>
      22                 :            : #include <linux/mm.h>
      23                 :            : #include <linux/pagemap.h>
      24                 :            : #include <linux/kthread.h>
      25                 :            : #include <linux/writeback.h>
      26                 :            : #include <linux/blkdev.h>
      27                 :            : #include <linux/backing-dev.h>
      28                 :            : #include <linux/tracepoint.h>
      29                 :            : #include <linux/device.h>
      30                 :            : #include "internal.h"
      31                 :            : 
      32                 :            : /*
      33                 :            :  * 4MB minimal write chunk size
      34                 :            :  */
      35                 :            : #define MIN_WRITEBACK_PAGES     (4096UL >> (PAGE_CACHE_SHIFT - 10))
      36                 :            : 
      37                 :            : /*
      38                 :            :  * Passed into wb_writeback(), essentially a subset of writeback_control
      39                 :            :  */
      40                 :            : struct wb_writeback_work {
      41                 :            :         long nr_pages;
      42                 :            :         struct super_block *sb;
      43                 :            :         /*
      44                 :            :          * Write only inodes dirtied before this time. Don't forget to set
      45                 :            :          * older_than_this_is_set when you set this.
      46                 :            :          */
      47                 :            :         unsigned long older_than_this;
      48                 :            :         enum writeback_sync_modes sync_mode;
      49                 :            :         unsigned int tagged_writepages:1;
      50                 :            :         unsigned int for_kupdate:1;
      51                 :            :         unsigned int range_cyclic:1;
      52                 :            :         unsigned int for_background:1;
      53                 :            :         unsigned int for_sync:1;        /* sync(2) WB_SYNC_ALL writeback */
      54                 :            :         unsigned int older_than_this_is_set:1;
      55                 :            :         enum wb_reason reason;          /* why was writeback initiated? */
      56                 :            : 
      57                 :            :         struct list_head list;          /* pending work list */
      58                 :            :         struct completion *done;        /* set if the caller waits */
      59                 :            : };
      60                 :            : 
      61                 :            : /**
      62                 :            :  * writeback_in_progress - determine whether there is writeback in progress
      63                 :            :  * @bdi: the device's backing_dev_info structure.
      64                 :            :  *
      65                 :            :  * Determine whether there is writeback waiting to be handled against a
      66                 :            :  * backing device.
      67                 :            :  */
      68                 :          0 : int writeback_in_progress(struct backing_dev_info *bdi)
      69                 :            : {
      70                 :      28538 :         return test_bit(BDI_writeback_running, &bdi->state);
      71                 :            : }
      72                 :            : EXPORT_SYMBOL(writeback_in_progress);
      73                 :            : 
      74                 :            : static inline struct backing_dev_info *inode_to_bdi(struct inode *inode)
      75                 :            : {
      76                 :            :         struct super_block *sb = inode->i_sb;
      77                 :            : 
      78 [ #  # ][ #  # ]:     199593 :         if (sb_is_blkdev_sb(sb))
         [ +  + ][ -  + ]
         [ #  # ][ #  #  
             #  #  #  # ]
      79                 :       1268 :                 return inode->i_mapping->backing_dev_info;
      80                 :            : 
      81                 :     198339 :         return sb->s_bdi;
      82                 :            : }
      83                 :            : 
      84                 :            : static inline struct inode *wb_inode(struct list_head *head)
      85                 :            : {
      86                 :            :         return list_entry(head, struct inode, i_wb_list);
      87                 :            : }
      88                 :            : 
      89                 :            : /*
      90                 :            :  * Include the creation of the trace points after defining the
      91                 :            :  * wb_writeback_work structure and inline functions so that the definition
      92                 :            :  * remains local to this file.
      93                 :            :  */
      94                 :            : #define CREATE_TRACE_POINTS
      95                 :            : #include <trace/events/writeback.h>
      96                 :            : 
      97                 :          0 : static void bdi_queue_work(struct backing_dev_info *bdi,
      98                 :            :                            struct wb_writeback_work *work)
      99                 :            : {
     100                 :            :         trace_writeback_queue(bdi, work);
     101                 :            : 
     102                 :            :         spin_lock_bh(&bdi->wb_lock);
     103                 :       5901 :         list_add_tail(&work->list, &bdi->work_list);
     104                 :            :         spin_unlock_bh(&bdi->wb_lock);
     105                 :            : 
     106                 :       5896 :         mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
     107                 :       5901 : }
     108                 :            : 
     109                 :            : static void
     110                 :          0 : __bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
     111                 :            :                       bool range_cyclic, enum wb_reason reason)
     112                 :            : {
     113                 :            :         struct wb_writeback_work *work;
     114                 :            : 
     115                 :            :         /*
     116                 :            :          * This is WB_SYNC_NONE writeback, so if allocation fails just
     117                 :            :          * wakeup the thread for old dirty data writeback
     118                 :            :          */
     119                 :            :         work = kzalloc(sizeof(*work), GFP_ATOMIC);
     120         [ -  + ]:       2939 :         if (!work) {
     121                 :            :                 trace_writeback_nowork(bdi);
     122                 :          0 :                 mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
     123                 :       2939 :                 return;
     124                 :            :         }
     125                 :            : 
     126                 :       2939 :         work->sync_mode      = WB_SYNC_NONE;
     127                 :       2939 :         work->nr_pages       = nr_pages;
     128                 :       2939 :         work->range_cyclic = range_cyclic;
     129                 :       2939 :         work->reason = reason;
     130                 :            : 
     131                 :       2939 :         bdi_queue_work(bdi, work);
     132                 :            : }
     133                 :            : 
     134                 :            : /**
     135                 :            :  * bdi_start_writeback - start writeback
     136                 :            :  * @bdi: the backing device to write from
     137                 :            :  * @nr_pages: the number of pages to write
     138                 :            :  * @reason: reason why some writeback work was initiated
     139                 :            :  *
     140                 :            :  * Description:
     141                 :            :  *   This does WB_SYNC_NONE opportunistic writeback. The IO is only
     142                 :            :  *   started when this function returns, we make no guarantees on
     143                 :            :  *   completion. Caller need not hold sb s_umount semaphore.
     144                 :            :  *
     145                 :            :  */
     146                 :          0 : void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
     147                 :            :                         enum wb_reason reason)
     148                 :            : {
     149                 :          0 :         __bdi_start_writeback(bdi, nr_pages, true, reason);
     150                 :          0 : }
     151                 :            : 
     152                 :            : /**
     153                 :            :  * bdi_start_background_writeback - start background writeback
     154                 :            :  * @bdi: the backing device to write from
     155                 :            :  *
     156                 :            :  * Description:
     157                 :            :  *   This makes sure WB_SYNC_NONE background writeback happens. When
     158                 :            :  *   this function returns, it is only guaranteed that for given BDI
     159                 :            :  *   some IO is happening if we are over background dirty threshold.
     160                 :            :  *   Caller need not hold sb s_umount semaphore.
     161                 :            :  */
     162                 :          0 : void bdi_start_background_writeback(struct backing_dev_info *bdi)
     163                 :            : {
     164                 :            :         /*
     165                 :            :          * We just wake up the flusher thread. It will perform background
     166                 :            :          * writeback as soon as there is no other work to do.
     167                 :            :          */
     168                 :            :         trace_writeback_wake_background(bdi);
     169                 :          0 :         mod_delayed_work(bdi_wq, &bdi->wb.dwork, 0);
     170                 :         30 : }
     171                 :            : 
     172                 :            : /*
     173                 :            :  * Remove the inode from the writeback list it is on.
     174                 :            :  */
     175                 :          0 : void inode_wb_list_del(struct inode *inode)
     176                 :            : {
     177                 :            :         struct backing_dev_info *bdi = inode_to_bdi(inode);
     178                 :            : 
     179                 :            :         spin_lock(&bdi->wb.list_lock);
     180                 :      79995 :         list_del_init(&inode->i_wb_list);
     181                 :            :         spin_unlock(&bdi->wb.list_lock);
     182                 :      79995 : }
     183                 :            : 
     184                 :            : /*
     185                 :            :  * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
     186                 :            :  * furthest end of its superblock's dirty-inode list.
     187                 :            :  *
     188                 :            :  * Before stamping the inode's ->dirtied_when, we check to see whether it is
     189                 :            :  * already the most-recently-dirtied inode on the b_dirty list.  If that is
     190                 :            :  * the case then the inode must have been redirtied while it was being written
     191                 :            :  * out and we don't reset its dirtied_when.
     192                 :            :  */
     193                 :          0 : static void redirty_tail(struct inode *inode, struct bdi_writeback *wb)
     194                 :            : {
     195         [ -  + ]:       1280 :         assert_spin_locked(&wb->list_lock);
     196         [ +  + ]:       1280 :         if (!list_empty(&wb->b_dirty)) {
     197                 :            :                 struct inode *tail;
     198                 :            : 
     199                 :            :                 tail = wb_inode(wb->b_dirty.next);
     200         [ +  + ]:        383 :                 if (time_before(inode->dirtied_when, tail->dirtied_when))
     201                 :        277 :                         inode->dirtied_when = jiffies;
     202                 :            :         }
     203                 :          0 :         list_move(&inode->i_wb_list, &wb->b_dirty);
     204                 :          0 : }
     205                 :            : 
     206                 :            : /*
     207                 :            :  * requeue inode for re-scanning after bdi->b_io list is exhausted.
     208                 :            :  */
     209                 :          0 : static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
     210                 :            : {
     211         [ -  + ]:        253 :         assert_spin_locked(&wb->list_lock);
     212                 :        253 :         list_move(&inode->i_wb_list, &wb->b_more_io);
     213                 :        253 : }
     214                 :            : 
     215                 :          0 : static void inode_sync_complete(struct inode *inode)
     216                 :            : {
     217                 :      40782 :         inode->i_state &= ~I_SYNC;
     218                 :            :         /* If inode is clean an unused, put it into LRU now... */
     219                 :      40782 :         inode_add_lru(inode);
     220                 :            :         /* Waiters must see I_SYNC cleared before being woken up */
     221                 :      40782 :         smp_mb();
     222                 :      40782 :         wake_up_bit(&inode->i_state, __I_SYNC);
     223                 :      40782 : }
     224                 :            : 
     225                 :            : static bool inode_dirtied_after(struct inode *inode, unsigned long t)
     226                 :            : {
     227         [ +  + ]:      44498 :         bool ret = time_after(inode->dirtied_when, t);
     228                 :            : #ifndef CONFIG_64BIT
     229                 :            :         /*
     230                 :            :          * For inodes being constantly redirtied, dirtied_when can get stuck.
     231                 :            :          * It _appears_ to be in the future, but is actually in distant past.
     232                 :            :          * This test is necessary to prevent such wrapped-around relative times
     233                 :            :          * from permanently stopping the whole bdi writeback.
     234                 :            :          */
     235 [ +  + ][ -  + ]:      44498 :         ret = ret && time_before_eq(inode->dirtied_when, jiffies);
     236                 :            : #endif
     237                 :            :         return ret;
     238                 :            : }
     239                 :            : 
     240                 :            : /*
     241                 :            :  * Move expired (dirtied before work->older_than_this) dirty inodes from
     242                 :            :  * @delaying_queue to @dispatch_queue.
     243                 :            :  */
     244                 :          0 : static int move_expired_inodes(struct list_head *delaying_queue,
     245                 :            :                                struct list_head *dispatch_queue,
     246                 :            :                                struct wb_writeback_work *work)
     247                 :            : {
     248                 :      12072 :         LIST_HEAD(tmp);
     249                 :            :         struct list_head *pos, *node;
     250                 :            :         struct super_block *sb = NULL;
     251                 :      44498 :         struct inode *inode;
     252                 :            :         int do_sb_sort = 0;
     253                 :            :         int moved = 0;
     254                 :            : 
     255 [ -  + ][ #  # ]:      12072 :         WARN_ON_ONCE(!work->older_than_this_is_set);
                 [ #  # ]
     256         [ +  + ]:      52635 :         while (!list_empty(delaying_queue)) {
     257                 :      44498 :                 inode = wb_inode(delaying_queue->prev);
     258         [ +  + ]:      44498 :                 if (inode_dirtied_after(inode, work->older_than_this))
     259                 :            :                         break;
     260                 :      40563 :                 list_move(&inode->i_wb_list, &tmp);
     261                 :      40563 :                 moved++;
     262         [ +  + ]:      40563 :                 if (sb_is_blkdev_sb(inode->i_sb))
     263                 :       1285 :                         continue;
     264 [ +  + ][ -  + ]:      39278 :                 if (sb && sb != inode->i_sb)
     265                 :            :                         do_sb_sort = 1;
     266                 :      40563 :                 sb = inode->i_sb;
     267                 :            :         }
     268                 :            : 
     269                 :            :         /* just one sb in list, splice to dispatch_queue and we're done */
     270         [ +  - ]:      12072 :         if (!do_sb_sort) {
     271                 :            :                 list_splice(&tmp, dispatch_queue);
     272                 :            :                 goto out;
     273                 :            :         }
     274                 :            : 
     275                 :            :         /* Move inodes from one superblock together */
     276         [ #  # ]:          0 :         while (!list_empty(&tmp)) {
     277                 :          0 :                 sb = wb_inode(tmp.prev)->i_sb;
     278         [ #  # ]:          0 :                 list_for_each_prev_safe(pos, node, &tmp) {
     279                 :            :                         inode = wb_inode(pos);
     280         [ #  # ]:          0 :                         if (inode->i_sb == sb)
     281                 :          0 :                                 list_move(&inode->i_wb_list, dispatch_queue);
     282                 :            :                 }
     283                 :            :         }
     284                 :            : out:
     285                 :      12072 :         return moved;
     286                 :            : }
     287                 :            : 
     288                 :            : /*
     289                 :            :  * Queue all expired dirty inodes for io, eldest first.
     290                 :            :  * Before
     291                 :            :  *         newly dirtied     b_dirty    b_io    b_more_io
     292                 :            :  *         =============>    gf         edc     BA
     293                 :            :  * After
     294                 :            :  *         newly dirtied     b_dirty    b_io    b_more_io
     295                 :            :  *         =============>    g          fBAedc
     296                 :            :  *                                           |
     297                 :            :  *                                           +--> dequeue for IO
     298                 :            :  */
     299                 :          0 : static void queue_io(struct bdi_writeback *wb, struct wb_writeback_work *work)
     300                 :            : {
     301                 :            :         int moved;
     302         [ -  + ]:      12072 :         assert_spin_locked(&wb->list_lock);
     303                 :      12072 :         list_splice_init(&wb->b_more_io, &wb->b_io);
     304                 :      12072 :         moved = move_expired_inodes(&wb->b_dirty, &wb->b_io, work);
     305                 :            :         trace_writeback_queue_io(wb, work, moved);
     306                 :          0 : }
     307                 :            : 
     308                 :          0 : static int write_inode(struct inode *inode, struct writeback_control *wbc)
     309                 :            : {
     310                 :            :         int ret;
     311                 :            : 
     312 [ +  - ][ +  - ]:      38757 :         if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode)) {
     313                 :            :                 trace_writeback_write_inode_start(inode, wbc);
     314                 :      38757 :                 ret = inode->i_sb->s_op->write_inode(inode, wbc);
     315                 :            :                 trace_writeback_write_inode(inode, wbc);
     316                 :      38757 :                 return ret;
     317                 :            :         }
     318                 :            :         return 0;
     319                 :            : }
     320                 :            : 
     321                 :            : /*
     322                 :            :  * Wait for writeback on an inode to complete. Called with i_lock held.
     323                 :            :  * Caller must make sure inode cannot go away when we drop i_lock.
     324                 :            :  */
     325                 :          0 : static void __inode_wait_for_writeback(struct inode *inode)
     326                 :            :         __releases(inode->i_lock)
     327                 :            :         __acquires(inode->i_lock)
     328                 :            : {
     329                 :    3966362 :         DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
     330                 :            :         wait_queue_head_t *wqh;
     331                 :            : 
     332                 :    1983181 :         wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
     333         [ +  + ]:    1983167 :         while (inode->i_state & I_SYNC) {
     334                 :            :                 spin_unlock(&inode->i_lock);
     335                 :         17 :                 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
     336                 :            :                 spin_lock(&inode->i_lock);
     337                 :            :         }
     338                 :    1983150 : }
     339                 :            : 
     340                 :            : /*
     341                 :            :  * Wait for writeback on an inode to complete. Caller must have inode pinned.
     342                 :            :  */
     343                 :          0 : void inode_wait_for_writeback(struct inode *inode)
     344                 :            : {
     345                 :            :         spin_lock(&inode->i_lock);
     346                 :    1983178 :         __inode_wait_for_writeback(inode);
     347                 :            :         spin_unlock(&inode->i_lock);
     348                 :    1983182 : }
     349                 :            : 
     350                 :            : /*
     351                 :            :  * Sleep until I_SYNC is cleared. This function must be called with i_lock
     352                 :            :  * held and drops it. It is aimed for callers not holding any inode reference
     353                 :            :  * so once i_lock is dropped, inode can go away.
     354                 :            :  */
     355                 :          0 : static void inode_sleep_on_writeback(struct inode *inode)
     356                 :            :         __releases(inode->i_lock)
     357                 :            : {
     358                 :          0 :         DEFINE_WAIT(wait);
     359                 :          0 :         wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
     360                 :            :         int sleep;
     361                 :            : 
     362                 :          0 :         prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
     363                 :          0 :         sleep = inode->i_state & I_SYNC;
     364                 :            :         spin_unlock(&inode->i_lock);
     365         [ #  # ]:          0 :         if (sleep)
     366                 :          0 :                 schedule();
     367                 :          0 :         finish_wait(wqh, &wait);
     368                 :          0 : }
     369                 :            : 
     370                 :            : /*
     371                 :            :  * Find proper writeback list for the inode depending on its current state and
     372                 :            :  * possibly also change of its state while we were doing writeback.  Here we
     373                 :            :  * handle things such as livelock prevention or fairness of writeback among
     374                 :            :  * inodes. This function can be called only by flusher thread - noone else
     375                 :            :  * processes all inodes in writeback lists and requeueing inodes behind flusher
     376                 :            :  * thread's back can have unexpected consequences.
     377                 :            :  */
     378                 :          0 : static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
     379                 :            :                           struct writeback_control *wbc)
     380                 :            : {
     381         [ +  + ]:      40782 :         if (inode->i_state & I_FREEING)
     382                 :            :                 return;
     383                 :            : 
     384                 :            :         /*
     385                 :            :          * Sync livelock prevention. Each inode is tagged and synced in one
     386                 :            :          * shot. If still dirty, it will be redirty_tail()'ed below.  Update
     387                 :            :          * the dirty time to prevent enqueue and sync it again.
     388                 :            :          */
     389 [ +  + ][ +  + ]:      40765 :         if ((inode->i_state & I_DIRTY) &&
     390         [ -  + ]:       1479 :             (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
     391                 :         26 :                 inode->dirtied_when = jiffies;
     392                 :            : 
     393         [ +  + ]:      40765 :         if (wbc->pages_skipped) {
     394                 :            :                 /*
     395                 :            :                  * writeback is not making progress due to locked
     396                 :            :                  * buffers. Skip this inode for now.
     397                 :            :                  */
     398                 :          4 :                 redirty_tail(inode, wb);
     399                 :          4 :                 return;
     400                 :            :         }
     401                 :            : 
     402         [ +  + ]:      40761 :         if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
     403                 :            :                 /*
     404                 :            :                  * We didn't write back all the pages.  nfs_writepages()
     405                 :            :                  * sometimes bales out without doing anything.
     406                 :            :                  */
     407         [ +  + ]:       1501 :                 if (wbc->nr_to_write <= 0) {
     408                 :            :                         /* Slice used up. Queue for next turn. */
     409                 :        253 :                         requeue_io(inode, wb);
     410                 :            :                 } else {
     411                 :            :                         /*
     412                 :            :                          * Writeback blocked by something other than
     413                 :            :                          * congestion. Delay the inode for some time to
     414                 :            :                          * avoid spinning on the CPU (100% iowait)
     415                 :            :                          * retrying writeback of the dirty page/inode
     416                 :            :                          * that cannot be performed immediately.
     417                 :            :                          */
     418                 :       1248 :                         redirty_tail(inode, wb);
     419                 :            :                 }
     420         [ +  + ]:      39260 :         } else if (inode->i_state & I_DIRTY) {
     421                 :            :                 /*
     422                 :            :                  * Filesystems can dirty the inode during writeback operations,
     423                 :            :                  * such as delayed allocation during submission or metadata
     424                 :            :                  * updates after data IO completion.
     425                 :            :                  */
     426                 :          4 :                 redirty_tail(inode, wb);
     427                 :            :         } else {
     428                 :            :                 /* The inode is clean. Remove from writeback lists. */
     429                 :      39256 :                 list_del_init(&inode->i_wb_list);
     430                 :            :         }
     431                 :            : }
     432                 :            : 
     433                 :            : /*
     434                 :            :  * Write out an inode and its dirty pages. Do not update the writeback list
     435                 :            :  * linkage. That is left to the caller. The caller is also responsible for
     436                 :            :  * setting I_SYNC flag and calling inode_sync_complete() to clear it.
     437                 :            :  */
     438                 :            : static int
     439                 :          0 : __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
     440                 :            : {
     441                 :      40782 :         struct address_space *mapping = inode->i_mapping;
     442                 :      40782 :         long nr_to_write = wbc->nr_to_write;
     443                 :            :         unsigned dirty;
     444                 :            :         int ret;
     445                 :            : 
     446         [ -  + ]:      40782 :         WARN_ON(!(inode->i_state & I_SYNC));
     447                 :            : 
     448                 :      40782 :         trace_writeback_single_inode_start(inode, wbc, nr_to_write);
     449                 :            : 
     450                 :      40782 :         ret = do_writepages(mapping, wbc);
     451                 :            : 
     452                 :            :         /*
     453                 :            :          * Make sure to wait on the data before writing out the metadata.
     454                 :            :          * This is important for filesystems that modify metadata on data
     455                 :            :          * I/O completion. We don't do it for sync(2) writeback because it has a
     456                 :            :          * separate, external IO completion path and ->sync_fs for guaranteeing
     457                 :            :          * inode metadata is written back correctly.
     458                 :            :          */
     459 [ +  + ][ -  + ]:      40782 :         if (wbc->sync_mode == WB_SYNC_ALL && !wbc->for_sync) {
     460                 :          0 :                 int err = filemap_fdatawait(mapping);
     461         [ #  # ]:          0 :                 if (ret == 0)
     462                 :            :                         ret = err;
     463                 :            :         }
     464                 :            : 
     465                 :            :         /*
     466                 :            :          * Some filesystems may redirty the inode during the writeback
     467                 :            :          * due to delalloc, clear dirty metadata flags right before
     468                 :            :          * write_inode()
     469                 :            :          */
     470                 :            :         spin_lock(&inode->i_lock);
     471                 :            :         /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
     472         [ +  + ]:      40782 :         if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
     473                 :      39278 :                 inode->i_state &= ~I_DIRTY_PAGES;
     474                 :      40782 :         dirty = inode->i_state & I_DIRTY;
     475                 :      40782 :         inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
     476                 :            :         spin_unlock(&inode->i_lock);
     477                 :            :         /* Don't write the inode if only I_DIRTY_PAGES was set */
     478         [ +  + ]:      40782 :         if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
     479                 :      38757 :                 int err = write_inode(inode, wbc);
     480         [ +  - ]:      38757 :                 if (ret == 0)
     481                 :            :                         ret = err;
     482                 :            :         }
     483                 :            :         trace_writeback_single_inode(inode, wbc, nr_to_write);
     484                 :          0 :         return ret;
     485                 :            : }
     486                 :            : 
     487                 :            : /*
     488                 :            :  * Write out an inode's dirty pages. Either the caller has an active reference
     489                 :            :  * on the inode or the inode has I_WILL_FREE set.
     490                 :            :  *
     491                 :            :  * This function is designed to be called for writing back one inode which
     492                 :            :  * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
     493                 :            :  * and does more profound writeback list handling in writeback_sb_inodes().
     494                 :            :  */
     495                 :            : static int
     496                 :          0 : writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
     497                 :            :                        struct writeback_control *wbc)
     498                 :            : {
     499                 :            :         int ret = 0;
     500                 :            : 
     501                 :            :         spin_lock(&inode->i_lock);
     502         [ #  # ]:          0 :         if (!atomic_read(&inode->i_count))
     503         [ #  # ]:          0 :                 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
     504                 :            :         else
     505         [ #  # ]:          0 :                 WARN_ON(inode->i_state & I_WILL_FREE);
     506                 :            : 
     507         [ #  # ]:          0 :         if (inode->i_state & I_SYNC) {
     508         [ #  # ]:          0 :                 if (wbc->sync_mode != WB_SYNC_ALL)
     509                 :            :                         goto out;
     510                 :            :                 /*
     511                 :            :                  * It's a data-integrity sync. We must wait. Since callers hold
     512                 :            :                  * inode reference or inode has I_WILL_FREE set, it cannot go
     513                 :            :                  * away under us.
     514                 :            :                  */
     515                 :          0 :                 __inode_wait_for_writeback(inode);
     516                 :            :         }
     517         [ #  # ]:          0 :         WARN_ON(inode->i_state & I_SYNC);
     518                 :            :         /*
     519                 :            :          * Skip inode if it is clean and we have no outstanding writeback in
     520                 :            :          * WB_SYNC_ALL mode. We don't want to mess with writeback lists in this
     521                 :            :          * function since flusher thread may be doing for example sync in
     522                 :            :          * parallel and if we move the inode, it could get skipped. So here we
     523                 :            :          * make sure inode is on some writeback list and leave it there unless
     524                 :            :          * we have completely cleaned the inode.
     525                 :            :          */
     526 [ #  # ][ #  # ]:          0 :         if (!(inode->i_state & I_DIRTY) &&
     527         [ #  # ]:          0 :             (wbc->sync_mode != WB_SYNC_ALL ||
     528                 :          0 :              !mapping_tagged(inode->i_mapping, PAGECACHE_TAG_WRITEBACK)))
     529                 :            :                 goto out;
     530                 :          0 :         inode->i_state |= I_SYNC;
     531                 :            :         spin_unlock(&inode->i_lock);
     532                 :            : 
     533                 :          0 :         ret = __writeback_single_inode(inode, wbc);
     534                 :            : 
     535                 :            :         spin_lock(&wb->list_lock);
     536                 :            :         spin_lock(&inode->i_lock);
     537                 :            :         /*
     538                 :            :          * If inode is clean, remove it from writeback lists. Otherwise don't
     539                 :            :          * touch it. See comment above for explanation.
     540                 :            :          */
     541         [ #  # ]:          0 :         if (!(inode->i_state & I_DIRTY))
     542                 :          0 :                 list_del_init(&inode->i_wb_list);
     543                 :            :         spin_unlock(&wb->list_lock);
     544                 :          0 :         inode_sync_complete(inode);
     545                 :            : out:
     546                 :            :         spin_unlock(&inode->i_lock);
     547                 :          0 :         return ret;
     548                 :            : }
     549                 :            : 
     550                 :            : static long writeback_chunk_size(struct backing_dev_info *bdi,
     551                 :            :                                  struct wb_writeback_work *work)
     552                 :            : {
     553                 :            :         long pages;
     554                 :            : 
     555                 :            :         /*
     556                 :            :          * WB_SYNC_ALL mode does livelock avoidance by syncing dirty
     557                 :            :          * inodes/pages in one big loop. Setting wbc.nr_to_write=LONG_MAX
     558                 :            :          * here avoids calling into writeback_inodes_wb() more than once.
     559                 :            :          *
     560                 :            :          * The intended call sequence for WB_SYNC_ALL writeback is:
     561                 :            :          *
     562                 :            :          *      wb_writeback()
     563                 :            :          *          writeback_sb_inodes()       <== called only once
     564                 :            :          *              write_cache_pages()     <== called once for each inode
     565                 :            :          *                   (quickly) tag currently dirty pages
     566                 :            :          *                   (maybe slowly) sync all tagged pages
     567                 :            :          */
     568 [ +  + ][ +  + ]:      40782 :         if (work->sync_mode == WB_SYNC_ALL || work->tagged_writepages)
     569                 :            :                 pages = LONG_MAX;
     570                 :            :         else {
     571                 :      40586 :                 pages = min(bdi->avg_write_bandwidth / 2,
     572                 :            :                             global_dirty_limit / DIRTY_SCOPE);
     573                 :      40586 :                 pages = min(pages, work->nr_pages);
     574                 :      40586 :                 pages = round_down(pages + MIN_WRITEBACK_PAGES,
     575                 :            :                                    MIN_WRITEBACK_PAGES);
     576                 :            :         }
     577                 :            : 
     578                 :            :         return pages;
     579                 :            : }
     580                 :            : 
     581                 :            : /*
     582                 :            :  * Write a portion of b_io inodes which belong to @sb.
     583                 :            :  *
     584                 :            :  * Return the number of pages and/or inodes written.
     585                 :            :  */
     586                 :          0 : static long writeback_sb_inodes(struct super_block *sb,
     587                 :            :                                 struct bdi_writeback *wb,
     588                 :            :                                 struct wb_writeback_work *work)
     589                 :            : {
     590                 :      59717 :         struct writeback_control wbc = {
     591                 :       8531 :                 .sync_mode              = work->sync_mode,
     592                 :       8531 :                 .tagged_writepages      = work->tagged_writepages,
     593                 :       8531 :                 .for_kupdate            = work->for_kupdate,
     594                 :       8531 :                 .for_background         = work->for_background,
     595                 :       8531 :                 .for_sync               = work->for_sync,
     596                 :       8531 :                 .range_cyclic           = work->range_cyclic,
     597                 :            :                 .range_start            = 0,
     598                 :            :                 .range_end              = LLONG_MAX,
     599                 :            :         };
     600                 :       8531 :         unsigned long start_time = jiffies;
     601                 :            :         long write_chunk;
     602                 :            :         long wrote = 0;  /* count both pages and inodes */
     603                 :            : 
     604         [ +  + ]:      48958 :         while (!list_empty(&wb->b_io)) {
     605                 :      42007 :                 struct inode *inode = wb_inode(wb->b_io.prev);
     606                 :            : 
     607         [ +  + ]:      42007 :                 if (inode->i_sb != sb) {
     608         [ +  + ]:       1214 :                         if (work->sb) {
     609                 :            :                                 /*
     610                 :            :                                  * We only want to write back data for this
     611                 :            :                                  * superblock, move all inodes not belonging
     612                 :            :                                  * to it back onto the dirty list.
     613                 :            :                                  */
     614                 :         13 :                                 redirty_tail(inode, wb);
     615                 :         13 :                                 continue;
     616                 :            :                         }
     617                 :            : 
     618                 :            :                         /*
     619                 :            :                          * The inode belongs to a different superblock.
     620                 :            :                          * Bounce back to the caller to unpin this and
     621                 :            :                          * pin the next superblock.
     622                 :            :                          */
     623                 :            :                         break;
     624                 :            :                 }
     625                 :            : 
     626                 :            :                 /*
     627                 :            :                  * Don't bother with new inodes or inodes being freed, first
     628                 :            :                  * kind does not need periodic writeout yet, and for the latter
     629                 :            :                  * kind writeout is handled by the freer.
     630                 :            :                  */
     631                 :            :                 spin_lock(&inode->i_lock);
     632         [ +  + ]:      40793 :                 if (inode->i_state & (I_NEW | I_FREEING | I_WILL_FREE)) {
     633                 :            :                         spin_unlock(&inode->i_lock);
     634                 :         11 :                         redirty_tail(inode, wb);
     635                 :         11 :                         continue;
     636                 :            :                 }
     637 [ -  + ][ #  # ]:      40782 :                 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
     638                 :            :                         /*
     639                 :            :                          * If this inode is locked for writeback and we are not
     640                 :            :                          * doing writeback-for-data-integrity, move it to
     641                 :            :                          * b_more_io so that writeback can proceed with the
     642                 :            :                          * other inodes on s_io.
     643                 :            :                          *
     644                 :            :                          * We'll have another go at writing back this inode
     645                 :            :                          * when we completed a full scan of b_io.
     646                 :            :                          */
     647                 :            :                         spin_unlock(&inode->i_lock);
     648                 :          0 :                         requeue_io(inode, wb);
     649                 :            :                         trace_writeback_sb_inodes_requeue(inode);
     650                 :          0 :                         continue;
     651                 :            :                 }
     652                 :            :                 spin_unlock(&wb->list_lock);
     653                 :            : 
     654                 :            :                 /*
     655                 :            :                  * We already requeued the inode if it had I_SYNC set and we
     656                 :            :                  * are doing WB_SYNC_NONE writeback. So this catches only the
     657                 :            :                  * WB_SYNC_ALL case.
     658                 :            :                  */
     659         [ -  + ]:      40782 :                 if (inode->i_state & I_SYNC) {
     660                 :            :                         /* Wait for I_SYNC. This function drops i_lock... */
     661                 :          0 :                         inode_sleep_on_writeback(inode);
     662                 :            :                         /* Inode may be gone, start again */
     663                 :            :                         spin_lock(&wb->list_lock);
     664                 :          0 :                         continue;
     665                 :            :                 }
     666                 :      40782 :                 inode->i_state |= I_SYNC;
     667                 :            :                 spin_unlock(&inode->i_lock);
     668                 :            : 
     669                 :      40782 :                 write_chunk = writeback_chunk_size(wb->bdi, work);
     670                 :      40782 :                 wbc.nr_to_write = write_chunk;
     671                 :      40782 :                 wbc.pages_skipped = 0;
     672                 :            : 
     673                 :            :                 /*
     674                 :            :                  * We use I_SYNC to pin the inode in memory. While it is set
     675                 :            :                  * evict_inode() will wait so the inode cannot be freed.
     676                 :            :                  */
     677                 :      40782 :                 __writeback_single_inode(inode, &wbc);
     678                 :            : 
     679                 :      40782 :                 work->nr_pages -= write_chunk - wbc.nr_to_write;
     680                 :      40782 :                 wrote += write_chunk - wbc.nr_to_write;
     681                 :            :                 spin_lock(&wb->list_lock);
     682                 :            :                 spin_lock(&inode->i_lock);
     683         [ +  + ]:      40782 :                 if (!(inode->i_state & I_DIRTY))
     684                 :      39268 :                         wrote++;
     685                 :      40782 :                 requeue_inode(inode, wb, &wbc);
     686                 :      40782 :                 inode_sync_complete(inode);
     687                 :            :                 spin_unlock(&inode->i_lock);
     688                 :      40782 :                 cond_resched_lock(&wb->list_lock);
     689                 :            :                 /*
     690                 :            :                  * bail out to wb_writeback() often enough to check
     691                 :            :                  * background threshold and other termination conditions.
     692                 :            :                  */
     693         [ +  + ]:      40782 :                 if (wrote) {
     694         [ +  + ]:      40548 :                         if (time_is_before_jiffies(start_time + HZ / 10UL))
     695                 :            :                                 break;
     696         [ +  - ]:      40427 :                         if (work->nr_pages <= 0)
     697                 :            :                                 break;
     698                 :            :                 }
     699                 :            :         }
     700                 :          0 :         return wrote;
     701                 :            : }
     702                 :            : 
     703                 :          0 : static long __writeback_inodes_wb(struct bdi_writeback *wb,
     704                 :            :                                   struct wb_writeback_work *work)
     705                 :            : {
     706                 :       9130 :         unsigned long start_time = jiffies;
     707                 :            :         long wrote = 0;
     708                 :            : 
     709         [ +  + ]:      14186 :         while (!list_empty(&wb->b_io)) {
     710                 :       5397 :                 struct inode *inode = wb_inode(wb->b_io.prev);
     711                 :       5397 :                 struct super_block *sb = inode->i_sb;
     712                 :            : 
     713         [ -  + ]:       5397 :                 if (!grab_super_passive(sb)) {
     714                 :            :                         /*
     715                 :            :                          * grab_super_passive() may fail consistently due to
     716                 :            :                          * s_umount being grabbed by someone else. Don't use
     717                 :            :                          * requeue_io() to avoid busy retrying the inode/sb.
     718                 :            :                          */
     719                 :          0 :                         redirty_tail(inode, wb);
     720                 :          0 :                         continue;
     721                 :            :                 }
     722                 :       5397 :                 wrote += writeback_sb_inodes(sb, wb, work);
     723                 :       5397 :                 drop_super(sb);
     724                 :            : 
     725                 :            :                 /* refer to the same tests at the end of writeback_sb_inodes */
     726         [ +  + ]:       5397 :                 if (wrote) {
     727         [ +  + ]:       5175 :                         if (time_is_before_jiffies(start_time + HZ / 10UL))
     728                 :            :                                 break;
     729         [ +  - ]:       5056 :                         if (work->nr_pages <= 0)
     730                 :            :                                 break;
     731                 :            :                 }
     732                 :            :         }
     733                 :            :         /* Leave any unwritten inodes on b_io */
     734                 :          0 :         return wrote;
     735                 :            : }
     736                 :            : 
     737                 :          0 : static long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
     738                 :            :                                 enum wb_reason reason)
     739                 :            : {
     740                 :          0 :         struct wb_writeback_work work = {
     741                 :            :                 .nr_pages       = nr_pages,
     742                 :            :                 .sync_mode      = WB_SYNC_NONE,
     743                 :            :                 .range_cyclic   = 1,
     744                 :            :                 .reason         = reason,
     745                 :            :                 .older_than_this = jiffies,
     746                 :            :                 .older_than_this_is_set = 1,
     747                 :            :         };
     748                 :            : 
     749                 :            :         spin_lock(&wb->list_lock);
     750         [ #  # ]:          0 :         if (list_empty(&wb->b_io))
     751                 :          0 :                 queue_io(wb, &work);
     752                 :          0 :         __writeback_inodes_wb(wb, &work);
     753                 :            :         spin_unlock(&wb->list_lock);
     754                 :            : 
     755                 :          0 :         return nr_pages - work.nr_pages;
     756                 :            : }
     757                 :            : 
     758                 :          0 : static bool over_bground_thresh(struct backing_dev_info *bdi)
     759                 :            : {
     760                 :            :         unsigned long background_thresh, dirty_thresh;
     761                 :            : 
     762                 :       4608 :         global_dirty_limits(&background_thresh, &dirty_thresh);
     763                 :            : 
     764         [ +  + ]:       4608 :         if (global_page_state(NR_FILE_DIRTY) +
     765                 :       4608 :             global_page_state(NR_UNSTABLE_NFS) > background_thresh)
     766                 :            :                 return true;
     767                 :            : 
     768         [ +  + ]:       4458 :         if (bdi_stat(bdi, BDI_RECLAIMABLE) >
     769                 :       4458 :                                 bdi_dirty_limit(bdi, background_thresh))
     770                 :            :                 return true;
     771                 :            : 
     772                 :       4373 :         return false;
     773                 :            : }
     774                 :            : 
     775                 :            : /*
     776                 :            :  * Called under wb->list_lock. If there are multiple wb per bdi,
     777                 :            :  * only the flusher working on the first wb should do it.
     778                 :            :  */
     779                 :            : static void wb_update_bandwidth(struct bdi_writeback *wb,
     780                 :            :                                 unsigned long start_time)
     781                 :            : {
     782                 :      12264 :         __bdi_update_bandwidth(wb->bdi, 0, 0, 0, 0, 0, start_time);
     783                 :            : }
     784                 :            : 
     785                 :            : /*
     786                 :            :  * Explicit flushing or periodic writeback of "old" data.
     787                 :            :  *
     788                 :            :  * Define "old": the first time one of an inode's pages is dirtied, we mark the
     789                 :            :  * dirtying-time in the inode's address_space.  So this periodic writeback code
     790                 :            :  * just walks the superblock inode list, writing back any inodes which are
     791                 :            :  * older than a specific point in time.
     792                 :            :  *
     793                 :            :  * Try to run once per dirty_writeback_interval.  But if a writeback event
     794                 :            :  * takes longer than a dirty_writeback_interval interval, then leave a
     795                 :            :  * one-second gap.
     796                 :            :  *
     797                 :            :  * older_than_this takes precedence over nr_to_write.  So we'll only write back
     798                 :            :  * all dirty pages if they are all attached to "old" mappings.
     799                 :            :  */
     800                 :          0 : static long wb_writeback(struct bdi_writeback *wb,
     801                 :            :                          struct wb_writeback_work *work)
     802                 :            : {
     803                 :       8162 :         unsigned long wb_start = jiffies;
     804                 :       8162 :         long nr_pages = work->nr_pages;
     805                 :            :         struct inode *inode;
     806                 :            :         long progress;
     807                 :            : 
     808         [ +  + ]:       8162 :         if (!work->older_than_this_is_set) {
     809                 :       5204 :                 work->older_than_this = jiffies;
     810                 :       5204 :                 work->older_than_this_is_set = 1;
     811                 :            :         }
     812                 :            : 
     813                 :            :         spin_lock(&wb->list_lock);
     814                 :            :         for (;;) {
     815                 :            :                 /*
     816                 :            :                  * Stop writeback when nr_pages has been consumed
     817                 :            :                  */
     818         [ +  + ]:      12308 :                 if (work->nr_pages <= 0)
     819                 :            :                         break;
     820                 :            : 
     821                 :            :                 /*
     822                 :            :                  * Background writeout and kupdate-style writeback may
     823                 :            :                  * run forever. Stop them if there is other work to do
     824                 :            :                  * so that e.g. sync can proceed. They'll be restarted
     825                 :            :                  * after the other works are all done.
     826                 :            :                  */
     827 [ +  + ][ +  - ]:      12306 :                 if ((work->for_background || work->for_kupdate) &&
     828                 :       3689 :                     !list_empty(&wb->bdi->work_list))
     829                 :            :                         break;
     830                 :            : 
     831                 :            :                 /*
     832                 :            :                  * For background writeout, stop when we are below the
     833                 :            :                  * background dirty threshold
     834                 :            :                  */
     835 [ +  + ][ +  + ]:      12306 :                 if (work->for_background && !over_bground_thresh(wb->bdi))
     836                 :            :                         break;
     837                 :            : 
     838                 :            :                 /*
     839                 :            :                  * Kupdate and background works are special and we want to
     840                 :            :                  * include all inodes that need writing. Livelock avoidance is
     841                 :            :                  * handled by these works yielding to any other work so we are
     842                 :            :                  * safe.
     843                 :            :                  */
     844         [ +  + ]:      12264 :                 if (work->for_kupdate) {
     845                 :       3471 :                         work->older_than_this = jiffies -
     846                 :       3471 :                                 msecs_to_jiffies(dirty_expire_interval * 10);
     847         [ +  + ]:       8793 :                 } else if (work->for_background)
     848                 :        176 :                         work->older_than_this = jiffies;
     849                 :            : 
     850                 :      12264 :                 trace_writeback_start(wb->bdi, work);
     851         [ +  + ]:      12264 :                 if (list_empty(&wb->b_io))
     852                 :      12072 :                         queue_io(wb, work);
     853         [ +  + ]:      12264 :                 if (work->sb)
     854                 :       3134 :                         progress = writeback_sb_inodes(work->sb, wb, work);
     855                 :            :                 else
     856                 :       9130 :                         progress = __writeback_inodes_wb(wb, work);
     857                 :      12264 :                 trace_writeback_written(wb->bdi, work);
     858                 :            : 
     859                 :            :                 wb_update_bandwidth(wb, wb_start);
     860                 :            : 
     861                 :            :                 /*
     862                 :            :                  * Did we write something? Try for more
     863                 :            :                  *
     864                 :            :                  * Dirty inodes are moved to b_io for writeback in batches.
     865                 :            :                  * The completion of the current batch does not necessarily
     866                 :            :                  * mean the overall work is done. So we keep looping as long
     867                 :            :                  * as made some progress on cleaning pages or inodes.
     868                 :            :                  */
     869         [ +  + ]:      12264 :                 if (progress)
     870                 :       4146 :                         continue;
     871                 :            :                 /*
     872                 :            :                  * No more inodes for IO, bail
     873                 :            :                  */
     874         [ -  + ]:       8118 :                 if (list_empty(&wb->b_more_io))
     875                 :            :                         break;
     876                 :            :                 /*
     877                 :            :                  * Nothing written. Wait for some inode to
     878                 :            :                  * become available for writeback. Otherwise
     879                 :            :                  * we'll just busyloop.
     880                 :            :                  */
     881         [ #  # ]:          0 :                 if (!list_empty(&wb->b_more_io))  {
     882                 :          0 :                         trace_writeback_wait(wb->bdi, work);
     883                 :          0 :                         inode = wb_inode(wb->b_more_io.prev);
     884                 :            :                         spin_lock(&inode->i_lock);
     885                 :            :                         spin_unlock(&wb->list_lock);
     886                 :            :                         /* This function drops i_lock... */
     887                 :          0 :                         inode_sleep_on_writeback(inode);
     888                 :            :                         spin_lock(&wb->list_lock);
     889                 :            :                 }
     890                 :            :         }
     891                 :            :         spin_unlock(&wb->list_lock);
     892                 :            : 
     893                 :       8162 :         return nr_pages - work->nr_pages;
     894                 :            : }
     895                 :            : 
     896                 :            : /*
     897                 :            :  * Return the next wb_writeback_work struct that hasn't been processed yet.
     898                 :            :  */
     899                 :            : static struct wb_writeback_work *
     900                 :          0 : get_next_work_item(struct backing_dev_info *bdi)
     901                 :            : {
     902                 :            :         struct wb_writeback_work *work = NULL;
     903                 :            : 
     904                 :            :         spin_lock_bh(&bdi->wb_lock);
     905         [ +  + ]:      10291 :         if (!list_empty(&bdi->work_list)) {
     906                 :       5901 :                 work = list_entry(bdi->work_list.next,
     907                 :            :                                   struct wb_writeback_work, list);
     908                 :       5901 :                 list_del_init(&work->list);
     909                 :            :         }
     910                 :            :         spin_unlock_bh(&bdi->wb_lock);
     911                 :      10291 :         return work;
     912                 :            : }
     913                 :            : 
     914                 :            : /*
     915                 :            :  * Add in the number of potentially dirty inodes, because each inode
     916                 :            :  * write can dirty pagecache in the underlying blockdev.
     917                 :            :  */
     918                 :          0 : static unsigned long get_nr_dirty_pages(void)
     919                 :            : {
     920                 :       5163 :         return global_page_state(NR_FILE_DIRTY) +
     921                 :       5163 :                 global_page_state(NR_UNSTABLE_NFS) +
     922                 :       5163 :                 get_nr_dirty_inodes();
     923                 :            : }
     924                 :            : 
     925                 :          0 : static long wb_check_background_flush(struct bdi_writeback *wb)
     926                 :            : {
     927         [ +  + ]:       4390 :         if (over_bground_thresh(wb->bdi)) {
     928                 :            : 
     929                 :         59 :                 struct wb_writeback_work work = {
     930                 :            :                         .nr_pages       = LONG_MAX,
     931                 :            :                         .sync_mode      = WB_SYNC_NONE,
     932                 :            :                         .for_background = 1,
     933                 :            :                         .range_cyclic   = 1,
     934                 :            :                         .reason         = WB_REASON_BACKGROUND,
     935                 :            :                 };
     936                 :            : 
     937                 :         59 :                 return wb_writeback(wb, &work);
     938                 :            :         }
     939                 :            : 
     940                 :            :         return 0;
     941                 :            : }
     942                 :            : 
     943                 :          0 : static long wb_check_old_data_flush(struct bdi_writeback *wb)
     944                 :            : {
     945                 :            :         unsigned long expired;
     946                 :            :         long nr_pages;
     947                 :            : 
     948                 :            :         /*
     949                 :            :          * When set to zero, disable periodic writeback
     950                 :            :          */
     951         [ +  - ]:       4390 :         if (!dirty_writeback_interval)
     952                 :            :                 return 0;
     953                 :            : 
     954                 :       8780 :         expired = wb->last_old_flush +
     955                 :       4390 :                         msecs_to_jiffies(dirty_writeback_interval * 10);
     956         [ +  + ]:       4390 :         if (time_before(jiffies, expired))
     957                 :            :                 return 0;
     958                 :            : 
     959                 :       2202 :         wb->last_old_flush = jiffies;
     960                 :       2202 :         nr_pages = get_nr_dirty_pages();
     961                 :            : 
     962         [ +  - ]:       2202 :         if (nr_pages) {
     963                 :       2202 :                 struct wb_writeback_work work = {
     964                 :            :                         .nr_pages       = nr_pages,
     965                 :            :                         .sync_mode      = WB_SYNC_NONE,
     966                 :            :                         .for_kupdate    = 1,
     967                 :            :                         .range_cyclic   = 1,
     968                 :            :                         .reason         = WB_REASON_PERIODIC,
     969                 :            :                 };
     970                 :            : 
     971                 :       2202 :                 return wb_writeback(wb, &work);
     972                 :            :         }
     973                 :            : 
     974                 :            :         return 0;
     975                 :            : }
     976                 :            : 
     977                 :            : /*
     978                 :            :  * Retrieve work items and do the writeback they describe
     979                 :            :  */
     980                 :          0 : static long wb_do_writeback(struct bdi_writeback *wb)
     981                 :            : {
     982                 :       4390 :         struct backing_dev_info *bdi = wb->bdi;
     983                 :            :         struct wb_writeback_work *work;
     984                 :            :         long wrote = 0;
     985                 :            : 
     986                 :       4390 :         set_bit(BDI_writeback_running, &wb->bdi->state);
     987         [ +  + ]:      10291 :         while ((work = get_next_work_item(bdi)) != NULL) {
     988                 :            : 
     989                 :            :                 trace_writeback_exec(bdi, work);
     990                 :            : 
     991                 :       5901 :                 wrote += wb_writeback(wb, work);
     992                 :            : 
     993                 :            :                 /*
     994                 :            :                  * Notify the caller of completion if this is a synchronous
     995                 :            :                  * work item, otherwise just free it.
     996                 :            :                  */
     997         [ +  + ]:       5901 :                 if (work->done)
     998                 :       2962 :                         complete(work->done);
     999                 :            :                 else
    1000                 :       5901 :                         kfree(work);
    1001                 :            :         }
    1002                 :            : 
    1003                 :            :         /*
    1004                 :            :          * Check for periodic writeback, kupdated() style
    1005                 :            :          */
    1006                 :       4390 :         wrote += wb_check_old_data_flush(wb);
    1007                 :       4390 :         wrote += wb_check_background_flush(wb);
    1008                 :       4390 :         clear_bit(BDI_writeback_running, &wb->bdi->state);
    1009                 :            : 
    1010                 :       4390 :         return wrote;
    1011                 :            : }
    1012                 :            : 
    1013                 :            : /*
    1014                 :            :  * Handle writeback of dirty data for the device backed by this bdi. Also
    1015                 :            :  * reschedules periodically and does kupdated style flushing.
    1016                 :            :  */
    1017                 :          0 : void bdi_writeback_workfn(struct work_struct *work)
    1018                 :            : {
    1019                 :       4390 :         struct bdi_writeback *wb = container_of(to_delayed_work(work),
    1020                 :            :                                                 struct bdi_writeback, dwork);
    1021                 :       4390 :         struct backing_dev_info *bdi = wb->bdi;
    1022                 :            :         long pages_written;
    1023                 :            : 
    1024                 :       4390 :         set_worker_desc("flush-%s", dev_name(bdi->dev));
    1025                 :       4390 :         current->flags |= PF_SWAPWRITE;
    1026                 :            : 
    1027 [ -  + ][ #  # ]:       4390 :         if (likely(!current_is_workqueue_rescuer() ||
    1028                 :            :                    list_empty(&bdi->bdi_list))) {
    1029                 :            :                 /*
    1030                 :            :                  * The normal path.  Keep writing back @bdi until its
    1031                 :            :                  * work_list is empty.  Note that this path is also taken
    1032                 :            :                  * if @bdi is shutting down even when we're running off the
    1033                 :            :                  * rescuer as work_list needs to be drained.
    1034                 :            :                  */
    1035                 :            :                 do {
    1036                 :       4390 :                         pages_written = wb_do_writeback(wb);
    1037                 :            :                         trace_writeback_pages_written(pages_written);
    1038         [ -  + ]:       4390 :                 } while (!list_empty(&bdi->work_list));
    1039                 :            :         } else {
    1040                 :            :                 /*
    1041                 :            :                  * bdi_wq can't get enough workers and we're running off
    1042                 :            :                  * the emergency worker.  Don't hog it.  Hopefully, 1024 is
    1043                 :            :                  * enough for efficient IO.
    1044                 :            :                  */
    1045                 :          0 :                 pages_written = writeback_inodes_wb(&bdi->wb, 1024,
    1046                 :            :                                                     WB_REASON_FORKER_THREAD);
    1047                 :            :                 trace_writeback_pages_written(pages_written);
    1048                 :            :         }
    1049                 :            : 
    1050 [ +  - ][ +  + ]:       8780 :         if (!list_empty(&bdi->work_list) ||
    1051         [ +  - ]:       2105 :             (wb_has_dirty_io(wb) && dirty_writeback_interval))
    1052                 :       2105 :                 queue_delayed_work(bdi_wq, &wb->dwork,
    1053                 :            :                         msecs_to_jiffies(dirty_writeback_interval * 10));
    1054                 :            : 
    1055                 :       4390 :         current->flags &= ~PF_SWAPWRITE;
    1056                 :       4390 : }
    1057                 :            : 
    1058                 :            : /*
    1059                 :            :  * Start writeback of `nr_pages' pages.  If `nr_pages' is zero, write back
    1060                 :            :  * the whole world.
    1061                 :            :  */
    1062                 :          0 : void wakeup_flusher_threads(long nr_pages, enum wb_reason reason)
    1063                 :            : {
    1064                 :            :         struct backing_dev_info *bdi;
    1065                 :            : 
    1066         [ +  + ]:       2959 :         if (!nr_pages)
    1067                 :       2954 :                 nr_pages = get_nr_dirty_pages();
    1068                 :            : 
    1069                 :            :         rcu_read_lock();
    1070         [ +  + ]:      68057 :         list_for_each_entry_rcu(bdi, &bdi_list, bdi_list) {
    1071         [ +  + ]:      62139 :                 if (!bdi_has_dirty_io(bdi))
    1072                 :      59198 :                         continue;
    1073                 :       2939 :                 __bdi_start_writeback(bdi, nr_pages, false, reason);
    1074                 :            :         }
    1075                 :            :         rcu_read_unlock();
    1076                 :       2959 : }
    1077                 :            : 
    1078                 :          0 : static noinline void block_dump___mark_inode_dirty(struct inode *inode)
    1079                 :            : {
    1080 [ #  # ][ #  # ]:          0 :         if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev")) {
    1081                 :            :                 struct dentry *dentry;
    1082                 :            :                 const char *name = "?";
    1083                 :            : 
    1084                 :          0 :                 dentry = d_find_alias(inode);
    1085         [ #  # ]:          0 :                 if (dentry) {
    1086                 :            :                         spin_lock(&dentry->d_lock);
    1087                 :          0 :                         name = (const char *) dentry->d_name.name;
    1088                 :            :                 }
    1089                 :          0 :                 printk(KERN_DEBUG
    1090                 :            :                        "%s(%d): dirtied inode %lu (%s) on %s\n",
    1091                 :          0 :                        current->comm, task_pid_nr(current), inode->i_ino,
    1092                 :          0 :                        name, inode->i_sb->s_id);
    1093         [ #  # ]:          0 :                 if (dentry) {
    1094                 :            :                         spin_unlock(&dentry->d_lock);
    1095                 :          0 :                         dput(dentry);
    1096                 :            :                 }
    1097                 :            :         }
    1098                 :          0 : }
    1099                 :            : 
    1100                 :            : /**
    1101                 :            :  *      __mark_inode_dirty -    internal function
    1102                 :            :  *      @inode: inode to mark
    1103                 :            :  *      @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
    1104                 :            :  *      Mark an inode as dirty. Callers should use mark_inode_dirty or
    1105                 :            :  *      mark_inode_dirty_sync.
    1106                 :            :  *
    1107                 :            :  * Put the inode on the super block's dirty list.
    1108                 :            :  *
    1109                 :            :  * CAREFUL! We mark it dirty unconditionally, but move it onto the
    1110                 :            :  * dirty list only if it is hashed or if it refers to a blockdev.
    1111                 :            :  * If it was not hashed, it will never be added to the dirty list
    1112                 :            :  * even if it is later hashed, as it will have been marked dirty already.
    1113                 :            :  *
    1114                 :            :  * In short, make sure you hash any inodes _before_ you start marking
    1115                 :            :  * them dirty.
    1116                 :            :  *
    1117                 :            :  * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
    1118                 :            :  * the block-special inode (/dev/hda1) itself.  And the ->dirtied_when field of
    1119                 :            :  * the kernel-internal blockdev inode represents the dirtying time of the
    1120                 :            :  * blockdev's pages.  This is why for I_DIRTY_PAGES we always use
    1121                 :            :  * page->mapping->host, so the page-dirtying time is recorded in the internal
    1122                 :            :  * blockdev inode.
    1123                 :            :  */
    1124                 :          0 : void __mark_inode_dirty(struct inode *inode, int flags)
    1125                 :            : {
    1126                 :    7117899 :         struct super_block *sb = inode->i_sb;
    1127                 :     119614 :         struct backing_dev_info *bdi = NULL;
    1128                 :            : 
    1129                 :            :         /*
    1130                 :            :          * Don't do this for I_DIRTY_PAGES - that doesn't actually
    1131                 :            :          * dirty the inode itself
    1132                 :            :          */
    1133         [ +  + ]:    7117899 :         if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
    1134                 :            :                 trace_writeback_dirty_inode_start(inode, flags);
    1135                 :            : 
    1136         [ +  + ]:    4936849 :                 if (sb->s_op->dirty_inode)
    1137                 :    4882397 :                         sb->s_op->dirty_inode(inode, flags);
    1138                 :            : 
    1139                 :            :                 trace_writeback_dirty_inode(inode, flags);
    1140                 :            :         }
    1141                 :            : 
    1142                 :            :         /*
    1143                 :            :          * make sure that changes are seen by all cpus before we test i_state
    1144                 :            :          * -- mikulas
    1145                 :            :          */
    1146                 :    7119044 :         smp_mb();
    1147                 :            : 
    1148                 :            :         /* avoid the locking if we can */
    1149         [ +  + ]:    7118232 :         if ((inode->i_state & flags) == flags)
    1150                 :            :                 return;
    1151                 :            : 
    1152         [ -  + ]:     189747 :         if (unlikely(block_dump > 1))
    1153                 :          0 :                 block_dump___mark_inode_dirty(inode);
    1154                 :            : 
    1155                 :            :         spin_lock(&inode->i_lock);
    1156         [ +  + ]:     189761 :         if ((inode->i_state & flags) != flags) {
    1157                 :     189756 :                 const int was_dirty = inode->i_state & I_DIRTY;
    1158                 :            : 
    1159                 :     189756 :                 inode->i_state |= flags;
    1160                 :            : 
    1161                 :            :                 /*
    1162                 :            :                  * If the inode is being synced, just update its dirty state.
    1163                 :            :                  * The unlocker will place the inode on the appropriate
    1164                 :            :                  * superblock list, based upon its state.
    1165                 :            :                  */
    1166         [ +  + ]:     189756 :                 if (inode->i_state & I_SYNC)
    1167                 :            :                         goto out_unlock_inode;
    1168                 :            : 
    1169                 :            :                 /*
    1170                 :            :                  * Only add valid (hashed) inodes to the superblock's
    1171                 :            :                  * dirty list.  Add blockdev inodes as well.
    1172                 :            :                  */
    1173         [ +  + ]:     189006 :                 if (!S_ISBLK(inode->i_mode)) {
    1174         [ +  + ]:     187751 :                         if (inode_unhashed(inode))
    1175                 :            :                                 goto out_unlock_inode;
    1176                 :            :                 }
    1177         [ +  + ]:     186914 :                 if (inode->i_state & I_FREEING)
    1178                 :            :                         goto out_unlock_inode;
    1179                 :            : 
    1180                 :            :                 /*
    1181                 :            :                  * If the inode was already on b_dirty/b_io/b_more_io, don't
    1182                 :            :                  * reposition it (that would break b_dirty time-ordering).
    1183                 :            :                  */
    1184         [ +  + ]:     175307 :                 if (!was_dirty) {
    1185                 :            :                         bool wakeup_bdi = false;
    1186                 :            :                         bdi = inode_to_bdi(inode);
    1187                 :            : 
    1188                 :            :                         spin_unlock(&inode->i_lock);
    1189                 :            :                         spin_lock(&bdi->wb.list_lock);
    1190         [ +  + ]:     119614 :                         if (bdi_cap_writeback_dirty(bdi)) {
    1191         [ -  + ]:     119613 :                                 WARN(!test_bit(BDI_registered, &bdi->state),
    1192                 :            :                                      "bdi-%s not registered\n", bdi->name);
    1193                 :            : 
    1194                 :            :                                 /*
    1195                 :            :                                  * If this is the first dirty inode for this
    1196                 :            :                                  * bdi, we have to wake-up the corresponding
    1197                 :            :                                  * bdi thread to make sure background
    1198                 :            :                                  * write-back happens later.
    1199                 :            :                                  */
    1200         [ +  + ]:     119613 :                                 if (!wb_has_dirty_io(&bdi->wb))
    1201                 :            :                                         wakeup_bdi = true;
    1202                 :            :                         }
    1203                 :            : 
    1204                 :     119614 :                         inode->dirtied_when = jiffies;
    1205                 :     119614 :                         list_move(&inode->i_wb_list, &bdi->wb.b_dirty);
    1206                 :            :                         spin_unlock(&bdi->wb.list_lock);
    1207                 :            : 
    1208         [ +  + ]:     119614 :                         if (wakeup_bdi)
    1209                 :       1551 :                                 bdi_wakeup_thread_delayed(bdi);
    1210                 :            :                         return;
    1211                 :            :                 }
    1212                 :            :         }
    1213                 :            : out_unlock_inode:
    1214                 :            :         spin_unlock(&inode->i_lock);
    1215                 :            : 
    1216                 :            : }
    1217                 :            : EXPORT_SYMBOL(__mark_inode_dirty);
    1218                 :            : 
    1219                 :          0 : static void wait_sb_inodes(struct super_block *sb)
    1220                 :            : {
    1221                 :            :         struct inode *inode, *old_inode = NULL;
    1222                 :            : 
    1223                 :            :         /*
    1224                 :            :          * We need to be protected against the filesystem going from
    1225                 :            :          * r/o to r/w or vice versa.
    1226                 :            :          */
    1227         [ -  + ]:       2957 :         WARN_ON(!rwsem_is_locked(&sb->s_umount));
    1228                 :            : 
    1229                 :            :         spin_lock(&inode_sb_list_lock);
    1230                 :            : 
    1231                 :            :         /*
    1232                 :            :          * Data integrity sync. Must wait for all pages under writeback,
    1233                 :            :          * because there may have been pages dirtied before our sync
    1234                 :            :          * call, but which had writeout started before we write it out.
    1235                 :            :          * In which case, the inode may not be on the dirty list, but
    1236                 :            :          * we still have to wait for that writeout.
    1237                 :            :          */
    1238         [ +  + ]:   49360817 :         list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
    1239                 :   49354902 :                 struct address_space *mapping = inode->i_mapping;
    1240                 :            : 
    1241                 :            :                 spin_lock(&inode->i_lock);
    1242 [ +  + ][ +  + ]:   49354902 :                 if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
    1243                 :   49354892 :                     (mapping->nrpages == 0)) {
    1244                 :            :                         spin_unlock(&inode->i_lock);
    1245                 :   47473985 :                         continue;
    1246                 :            :                 }
    1247                 :    1880917 :                 __iget(inode);
    1248                 :            :                 spin_unlock(&inode->i_lock);
    1249                 :            :                 spin_unlock(&inode_sb_list_lock);
    1250                 :            : 
    1251                 :            :                 /*
    1252                 :            :                  * We hold a reference to 'inode' so it couldn't have been
    1253                 :            :                  * removed from s_inodes list while we dropped the
    1254                 :            :                  * inode_sb_list_lock.  We cannot iput the inode now as we can
    1255                 :            :                  * be holding the last reference and we cannot iput it under
    1256                 :            :                  * inode_sb_list_lock. So we keep the reference and iput it
    1257                 :            :                  * later.
    1258                 :            :                  */
    1259                 :    1880861 :                 iput(old_inode);
    1260                 :            :                 old_inode = inode;
    1261                 :            : 
    1262                 :    1880883 :                 filemap_fdatawait(mapping);
    1263                 :            : 
    1264                 :    1879052 :                 cond_resched();
    1265                 :            : 
    1266                 :            :                 spin_lock(&inode_sb_list_lock);
    1267                 :            :         }
    1268                 :            :         spin_unlock(&inode_sb_list_lock);
    1269                 :       2958 :         iput(old_inode);
    1270                 :       2958 : }
    1271                 :            : 
    1272                 :            : /**
    1273                 :            :  * writeback_inodes_sb_nr -     writeback dirty inodes from given super_block
    1274                 :            :  * @sb: the superblock
    1275                 :            :  * @nr: the number of pages to write
    1276                 :            :  * @reason: reason why some writeback work initiated
    1277                 :            :  *
    1278                 :            :  * Start writeback on some inodes on this super_block. No guarantees are made
    1279                 :            :  * on how many (if any) will be written, and this function does not wait
    1280                 :            :  * for IO completion of submitted IO.
    1281                 :            :  */
    1282                 :          0 : void writeback_inodes_sb_nr(struct super_block *sb,
    1283                 :            :                             unsigned long nr,
    1284                 :            :                             enum wb_reason reason)
    1285                 :            : {
    1286                 :          7 :         DECLARE_COMPLETION_ONSTACK(done);
    1287                 :          7 :         struct wb_writeback_work work = {
    1288                 :            :                 .sb                     = sb,
    1289                 :            :                 .sync_mode              = WB_SYNC_NONE,
    1290                 :            :                 .tagged_writepages      = 1,
    1291                 :            :                 .done                   = &done,
    1292                 :            :                 .nr_pages               = nr,
    1293                 :            :                 .reason                 = reason,
    1294                 :            :         };
    1295                 :            : 
    1296         [ +  + ]:          7 :         if (sb->s_bdi == &noop_backing_dev_info)
    1297                 :          3 :                 return;
    1298         [ -  + ]:          4 :         WARN_ON(!rwsem_is_locked(&sb->s_umount));
    1299                 :          4 :         bdi_queue_work(sb->s_bdi, &work);
    1300                 :          4 :         wait_for_completion(&done);
    1301                 :            : }
    1302                 :            : EXPORT_SYMBOL(writeback_inodes_sb_nr);
    1303                 :            : 
    1304                 :            : /**
    1305                 :            :  * writeback_inodes_sb  -       writeback dirty inodes from given super_block
    1306                 :            :  * @sb: the superblock
    1307                 :            :  * @reason: reason why some writeback work was initiated
    1308                 :            :  *
    1309                 :            :  * Start writeback on some inodes on this super_block. No guarantees are made
    1310                 :            :  * on how many (if any) will be written, and this function does not wait
    1311                 :            :  * for IO completion of submitted IO.
    1312                 :            :  */
    1313                 :          0 : void writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
    1314                 :            : {
    1315                 :          7 :         return writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
    1316                 :            : }
    1317                 :            : EXPORT_SYMBOL(writeback_inodes_sb);
    1318                 :            : 
    1319                 :            : /**
    1320                 :            :  * try_to_writeback_inodes_sb_nr - try to start writeback if none underway
    1321                 :            :  * @sb: the superblock
    1322                 :            :  * @nr: the number of pages to write
    1323                 :            :  * @reason: the reason of writeback
    1324                 :            :  *
    1325                 :            :  * Invoke writeback_inodes_sb_nr if no writeback is currently underway.
    1326                 :            :  * Returns 1 if writeback was started, 0 if not.
    1327                 :            :  */
    1328                 :          0 : int try_to_writeback_inodes_sb_nr(struct super_block *sb,
    1329                 :            :                                   unsigned long nr,
    1330                 :            :                                   enum wb_reason reason)
    1331                 :            : {
    1332         [ #  # ]:          0 :         if (writeback_in_progress(sb->s_bdi))
    1333                 :            :                 return 1;
    1334                 :            : 
    1335         [ #  # ]:          0 :         if (!down_read_trylock(&sb->s_umount))
    1336                 :            :                 return 0;
    1337                 :            : 
    1338                 :          0 :         writeback_inodes_sb_nr(sb, nr, reason);
    1339                 :          0 :         up_read(&sb->s_umount);
    1340                 :          0 :         return 1;
    1341                 :            : }
    1342                 :            : EXPORT_SYMBOL(try_to_writeback_inodes_sb_nr);
    1343                 :            : 
    1344                 :            : /**
    1345                 :            :  * try_to_writeback_inodes_sb - try to start writeback if none underway
    1346                 :            :  * @sb: the superblock
    1347                 :            :  * @reason: reason why some writeback work was initiated
    1348                 :            :  *
    1349                 :            :  * Implement by try_to_writeback_inodes_sb_nr()
    1350                 :            :  * Returns 1 if writeback was started, 0 if not.
    1351                 :            :  */
    1352                 :          0 : int try_to_writeback_inodes_sb(struct super_block *sb, enum wb_reason reason)
    1353                 :            : {
    1354                 :          0 :         return try_to_writeback_inodes_sb_nr(sb, get_nr_dirty_pages(), reason);
    1355                 :            : }
    1356                 :            : EXPORT_SYMBOL(try_to_writeback_inodes_sb);
    1357                 :            : 
    1358                 :            : /**
    1359                 :            :  * sync_inodes_sb       -       sync sb inode pages
    1360                 :            :  * @sb:                 the superblock
    1361                 :            :  * @older_than_this:    timestamp
    1362                 :            :  *
    1363                 :            :  * This function writes and waits on any dirty inode belonging to this
    1364                 :            :  * superblock that has been dirtied before given timestamp.
    1365                 :            :  */
    1366                 :          0 : void sync_inodes_sb(struct super_block *sb, unsigned long older_than_this)
    1367                 :            : {
    1368                 :      64984 :         DECLARE_COMPLETION_ONSTACK(done);
    1369                 :      64984 :         struct wb_writeback_work work = {
    1370                 :            :                 .sb             = sb,
    1371                 :            :                 .sync_mode      = WB_SYNC_ALL,
    1372                 :            :                 .nr_pages       = LONG_MAX,
    1373                 :            :                 .older_than_this = older_than_this,
    1374                 :            :                 .older_than_this_is_set = 1,
    1375                 :            :                 .range_cyclic   = 0,
    1376                 :            :                 .done           = &done,
    1377                 :            :                 .reason         = WB_REASON_SYNC,
    1378                 :            :                 .for_sync       = 1,
    1379                 :            :         };
    1380                 :            : 
    1381                 :            :         /* Nothing to do? */
    1382         [ +  + ]:      64984 :         if (sb->s_bdi == &noop_backing_dev_info)
    1383                 :      62026 :                 return;
    1384         [ -  + ]:       2958 :         WARN_ON(!rwsem_is_locked(&sb->s_umount));
    1385                 :            : 
    1386                 :       2958 :         bdi_queue_work(sb->s_bdi, &work);
    1387                 :       2958 :         wait_for_completion(&done);
    1388                 :            : 
    1389                 :       2958 :         wait_sb_inodes(sb);
    1390                 :            : }
    1391                 :            : EXPORT_SYMBOL(sync_inodes_sb);
    1392                 :            : 
    1393                 :            : /**
    1394                 :            :  * write_inode_now      -       write an inode to disk
    1395                 :            :  * @inode: inode to write to disk
    1396                 :            :  * @sync: whether the write should be synchronous or not
    1397                 :            :  *
    1398                 :            :  * This function commits an inode to disk immediately if it is dirty. This is
    1399                 :            :  * primarily needed by knfsd.
    1400                 :            :  *
    1401                 :            :  * The caller must either have a ref on the inode or must have set I_WILL_FREE.
    1402                 :            :  */
    1403                 :          0 : int write_inode_now(struct inode *inode, int sync)
    1404                 :            : {
    1405                 :          0 :         struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
    1406                 :          0 :         struct writeback_control wbc = {
    1407                 :            :                 .nr_to_write = LONG_MAX,
    1408                 :          0 :                 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
    1409                 :            :                 .range_start = 0,
    1410                 :            :                 .range_end = LLONG_MAX,
    1411                 :            :         };
    1412                 :            : 
    1413         [ #  # ]:          0 :         if (!mapping_cap_writeback_dirty(inode->i_mapping))
    1414                 :          0 :                 wbc.nr_to_write = 0;
    1415                 :            : 
    1416                 :            :         might_sleep();
    1417                 :          0 :         return writeback_single_inode(inode, wb, &wbc);
    1418                 :            : }
    1419                 :            : EXPORT_SYMBOL(write_inode_now);
    1420                 :            : 
    1421                 :            : /**
    1422                 :            :  * sync_inode - write an inode and its pages to disk.
    1423                 :            :  * @inode: the inode to sync
    1424                 :            :  * @wbc: controls the writeback mode
    1425                 :            :  *
    1426                 :            :  * sync_inode() will write an inode and its pages to disk.  It will also
    1427                 :            :  * correctly update the inode on its superblock's dirty inode lists and will
    1428                 :            :  * update inode->i_state.
    1429                 :            :  *
    1430                 :            :  * The caller must have a ref on the inode.
    1431                 :            :  */
    1432                 :          0 : int sync_inode(struct inode *inode, struct writeback_control *wbc)
    1433                 :            : {
    1434                 :          0 :         return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
    1435                 :            : }
    1436                 :            : EXPORT_SYMBOL(sync_inode);
    1437                 :            : 
    1438                 :            : /**
    1439                 :            :  * sync_inode_metadata - write an inode to disk
    1440                 :            :  * @inode: the inode to sync
    1441                 :            :  * @wait: wait for I/O to complete.
    1442                 :            :  *
    1443                 :            :  * Write an inode to disk and adjust its dirty state after completion.
    1444                 :            :  *
    1445                 :            :  * Note: only writes the actual inode, no associated data or other metadata.
    1446                 :            :  */
    1447                 :          0 : int sync_inode_metadata(struct inode *inode, int wait)
    1448                 :            : {
    1449                 :          0 :         struct writeback_control wbc = {
    1450                 :          0 :                 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
    1451                 :            :                 .nr_to_write = 0, /* metadata-only */
    1452                 :            :         };
    1453                 :            : 
    1454                 :          0 :         return sync_inode(inode, &wbc);
    1455                 :            : }
    1456                 :            : EXPORT_SYMBOL(sync_inode_metadata);

Generated by: LCOV version 1.9