4 * Copyright (C) 2002, Linus Torvalds.
6 * Contains all the functions related to writing back and waiting
7 * upon dirty inodes against superblocks, and writing back dirty
8 * pages against inodes. ie: data writeback. Writeout of the
9 * inode itself is not handled here.
11 * 10Apr2002 Andrew Morton
12 * Split out of fs/inode.c
13 * Additions for address_space-based writeback
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/spinlock.h>
19 #include <linux/sched.h>
22 #include <linux/writeback.h>
23 #include <linux/blkdev.h>
24 #include <linux/backing-dev.h>
25 #include <linux/buffer_head.h>
30 * writeback_acquire - attempt to get exclusive writeback access to a device
31 * @bdi: the device's backing_dev_info structure
33 * It is a waste of resources to have more than one pdflush thread blocked on
34 * a single request queue. Exclusion at the request_queue level is obtained
35 * via a flag in the request_queue's backing_dev_info.state.
37 * Non-request_queue-backed address_spaces will share default_backing_dev_info,
38 * unless they implement their own. Which is somewhat inefficient, as this
39 * may prevent concurrent writeback against multiple devices.
41 static int writeback_acquire(struct backing_dev_info *bdi)
43 return !test_and_set_bit(BDI_pdflush, &bdi->state);
47 * writeback_in_progress - determine whether there is writeback in progress
48 * @bdi: the device's backing_dev_info structure.
50 * Determine whether there is writeback in progress against a backing device.
52 int writeback_in_progress(struct backing_dev_info *bdi)
54 return test_bit(BDI_pdflush, &bdi->state);
58 * writeback_release - relinquish exclusive writeback access against a device.
59 * @bdi: the device's backing_dev_info structure
61 static void writeback_release(struct backing_dev_info *bdi)
63 BUG_ON(!writeback_in_progress(bdi));
64 clear_bit(BDI_pdflush, &bdi->state);
68 * enable_pwb - enable periodic write-back after an inode was marked as dirty.
69 * @inode: the inode which was marked as dirty
71 * This is a helper function for '__mark_inode_dirty()' which enables the
72 * periodic write-back, unless:
73 * * the backing device @inode belongs to does not support write-back;
74 * * periodic write-back is already enabled.
76 static void enable_pwb(struct inode *inode)
78 struct backing_dev_info *bdi = inode->i_mapping->backing_dev_info;
80 if (bdi_cap_writeback_dirty(bdi) &&
81 atomic_add_unless(&periodic_wb_enabled, 1, 1))
86 * __mark_inode_dirty - internal function
87 * @inode: inode to mark
88 * @flags: what kind of dirty (i.e. I_DIRTY_SYNC)
89 * Mark an inode as dirty. Callers should use mark_inode_dirty or
90 * mark_inode_dirty_sync.
92 * Put the inode on the super block's dirty list.
94 * CAREFUL! We mark it dirty unconditionally, but move it onto the
95 * dirty list only if it is hashed or if it refers to a blockdev.
96 * If it was not hashed, it will never be added to the dirty list
97 * even if it is later hashed, as it will have been marked dirty already.
99 * In short, make sure you hash any inodes _before_ you start marking
102 * This function *must* be atomic for the I_DIRTY_PAGES case -
103 * set_page_dirty() is called under spinlock in several places.
105 * Note that for blockdevs, inode->dirtied_when represents the dirtying time of
106 * the block-special inode (/dev/hda1) itself. And the ->dirtied_when field of
107 * the kernel-internal blockdev inode represents the dirtying time of the
108 * blockdev's pages. This is why for I_DIRTY_PAGES we always use
109 * page->mapping->host, so the page-dirtying time is recorded in the internal
112 void __mark_inode_dirty(struct inode *inode, int flags)
114 struct super_block *sb = inode->i_sb;
117 * Don't do this for I_DIRTY_PAGES - that doesn't actually
118 * dirty the inode itself
120 if (flags & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
121 if (sb->s_op->dirty_inode)
122 sb->s_op->dirty_inode(inode);
126 * make sure that changes are seen by all cpus before we test i_state
131 /* avoid the locking if we can */
132 if ((inode->i_state & flags) == flags)
135 if (unlikely(block_dump)) {
136 struct dentry *dentry = NULL;
137 const char *name = "?";
139 if (!list_empty(&inode->i_dentry)) {
140 dentry = list_entry(inode->i_dentry.next,
141 struct dentry, d_alias);
142 if (dentry && dentry->d_name.name)
143 name = (const char *) dentry->d_name.name;
146 if (inode->i_ino || strcmp(inode->i_sb->s_id, "bdev"))
148 "%s(%d): dirtied inode %lu (%s) on %s\n",
149 current->comm, task_pid_nr(current), inode->i_ino,
150 name, inode->i_sb->s_id);
153 spin_lock(&inode_lock);
154 if ((inode->i_state & flags) != flags) {
155 const int was_dirty = inode->i_state & I_DIRTY;
157 inode->i_state |= flags;
160 * If the inode is being synced, just update its dirty state.
161 * The unlocker will place the inode on the appropriate
162 * superblock list, based upon its state.
164 if (inode->i_state & I_SYNC)
168 * Only add valid (hashed) inodes to the superblock's
169 * dirty list. Add blockdev inodes as well.
171 if (!S_ISBLK(inode->i_mode)) {
172 if (hlist_unhashed(&inode->i_hash))
175 if (inode->i_state & (I_FREEING|I_CLEAR))
179 * If the inode was already on s_dirty/s_io/s_more_io, don't
180 * reposition it (that would break s_dirty time-ordering).
183 inode->dirtied_when = jiffies;
184 list_move(&inode->i_list, &sb->s_dirty);
189 spin_unlock(&inode_lock);
192 EXPORT_SYMBOL(__mark_inode_dirty);
195 * mark_sb_dirty - mark super block as dirty.
196 * @sb: the super block to mark as dirty
198 * This function marks super block @sb as dirty and enables the periodic
199 * write-back, unless it is already enabled. Note, VFS does not serialize the
200 * super block clean/dirty (@sb->s_dirt) state changes, and each FS is
201 * responsible for doing its own serialization.
203 void mark_sb_dirty(struct super_block *sb)
207 * If 'periodic_wb_enabled' is 0, set it to 1 and enable the periodic
210 if (atomic_add_unless(&periodic_wb_enabled, 1, 1))
211 enable_periodic_wb();
214 EXPORT_SYMBOL(mark_sb_dirty);
216 static int write_inode(struct inode *inode, int sync)
218 if (inode->i_sb->s_op->write_inode && !is_bad_inode(inode))
219 return inode->i_sb->s_op->write_inode(inode, sync);
224 * Redirty an inode: set its when-it-was dirtied timestamp and move it to the
225 * furthest end of its superblock's dirty-inode list.
227 * Before stamping the inode's ->dirtied_when, we check to see whether it is
228 * already the most-recently-dirtied inode on the s_dirty list. If that is
229 * the case then the inode must have been redirtied while it was being written
230 * out and we don't reset its dirtied_when.
232 static void redirty_tail(struct inode *inode)
234 struct super_block *sb = inode->i_sb;
236 if (!list_empty(&sb->s_dirty)) {
237 struct inode *tail_inode;
239 tail_inode = list_entry(sb->s_dirty.next, struct inode, i_list);
240 if (!time_after_eq(inode->dirtied_when,
241 tail_inode->dirtied_when))
242 inode->dirtied_when = jiffies;
244 list_move(&inode->i_list, &sb->s_dirty);
248 * requeue inode for re-scanning after sb->s_io list is exhausted.
250 static void requeue_io(struct inode *inode)
252 list_move(&inode->i_list, &inode->i_sb->s_more_io);
255 static void inode_sync_complete(struct inode *inode)
258 * Prevent speculative execution through spin_unlock(&inode_lock);
261 wake_up_bit(&inode->i_state, __I_SYNC);
265 * Move expired dirty inodes from @delaying_queue to @dispatch_queue.
267 static void move_expired_inodes(struct list_head *delaying_queue,
268 struct list_head *dispatch_queue,
269 unsigned long *older_than_this)
271 while (!list_empty(delaying_queue)) {
272 struct inode *inode = list_entry(delaying_queue->prev,
273 struct inode, i_list);
274 if (older_than_this &&
275 time_after(inode->dirtied_when, *older_than_this))
277 list_move(&inode->i_list, dispatch_queue);
282 * Queue all expired dirty inodes for io, eldest first.
284 static void queue_io(struct super_block *sb,
285 unsigned long *older_than_this)
287 list_splice_init(&sb->s_more_io, sb->s_io.prev);
288 move_expired_inodes(&sb->s_dirty, &sb->s_io, older_than_this);
291 int sb_has_dirty_inodes(struct super_block *sb)
293 return !list_empty(&sb->s_dirty) ||
294 !list_empty(&sb->s_io) ||
295 !list_empty(&sb->s_more_io);
297 EXPORT_SYMBOL(sb_has_dirty_inodes);
300 * Write a single inode's dirty pages and inode data out to disk.
301 * If `wait' is set, wait on the writeout.
303 * The whole writeout design is quite complex and fragile. We want to avoid
304 * starvation of particular inodes when others are being redirtied, prevent
307 * Called under inode_lock.
310 __sync_single_inode(struct inode *inode, struct writeback_control *wbc)
313 struct address_space *mapping = inode->i_mapping;
314 int wait = wbc->sync_mode == WB_SYNC_ALL;
317 BUG_ON(inode->i_state & I_SYNC);
319 /* Set I_SYNC, reset I_DIRTY */
320 dirty = inode->i_state & I_DIRTY;
321 inode->i_state |= I_SYNC;
322 inode->i_state &= ~I_DIRTY;
324 spin_unlock(&inode_lock);
326 ret = do_writepages(mapping, wbc);
328 /* Don't write the inode if only I_DIRTY_PAGES was set */
329 if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
330 int err = write_inode(inode, wait);
336 int err = filemap_fdatawait(mapping);
341 spin_lock(&inode_lock);
342 inode->i_state &= ~I_SYNC;
343 if (!(inode->i_state & I_FREEING)) {
344 if (!(inode->i_state & I_DIRTY) &&
345 mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
347 * We didn't write back all the pages. nfs_writepages()
348 * sometimes bales out without doing anything. Redirty
349 * the inode; Move it from s_io onto s_more_io/s_dirty.
352 * akpm: if the caller was the kupdate function we put
353 * this inode at the head of s_dirty so it gets first
354 * consideration. Otherwise, move it to the tail, for
355 * the reasons described there. I'm not really sure
356 * how much sense this makes. Presumably I had a good
357 * reasons for doing it this way, and I'd rather not
358 * muck with it at present.
360 if (wbc->for_kupdate) {
362 * For the kupdate function we move the inode
363 * to s_more_io so it will get more writeout as
364 * soon as the queue becomes uncongested.
366 inode->i_state |= I_DIRTY_PAGES;
367 if (wbc->nr_to_write <= 0) {
369 * slice used up: queue for next turn
374 * somehow blocked: retry later
380 * Otherwise fully redirty the inode so that
381 * other inodes on this superblock will get some
382 * writeout. Otherwise heavy writing to one
383 * file would indefinitely suspend writeout of
384 * all the other files.
386 inode->i_state |= I_DIRTY_PAGES;
389 } else if (inode->i_state & I_DIRTY) {
391 * Someone redirtied the inode while were writing back
395 } else if (atomic_read(&inode->i_count)) {
397 * The inode is clean, inuse
399 list_move(&inode->i_list, &inode_in_use);
402 * The inode is clean, unused
404 list_move(&inode->i_list, &inode_unused);
407 inode_sync_complete(inode);
412 * Write out an inode's dirty pages. Called under inode_lock. Either the
413 * caller has ref on the inode (either via __iget or via syscall against an fd)
414 * or the inode has I_WILL_FREE set (via generic_forget_inode)
417 __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
419 wait_queue_head_t *wqh;
421 if (!atomic_read(&inode->i_count))
422 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
424 WARN_ON(inode->i_state & I_WILL_FREE);
426 if ((wbc->sync_mode != WB_SYNC_ALL) && (inode->i_state & I_SYNC)) {
428 * We're skipping this inode because it's locked, and we're not
429 * doing writeback-for-data-integrity. Move it to s_more_io so
430 * that writeback can proceed with the other inodes on s_io.
431 * We'll have another go at writing back this inode when we
432 * completed a full scan of s_io.
439 * It's a data-integrity sync. We must wait.
441 if (inode->i_state & I_SYNC) {
442 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
444 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
446 spin_unlock(&inode_lock);
447 __wait_on_bit(wqh, &wq, inode_wait,
448 TASK_UNINTERRUPTIBLE);
449 spin_lock(&inode_lock);
450 } while (inode->i_state & I_SYNC);
452 return __sync_single_inode(inode, wbc);
456 * Write out a superblock's list of dirty inodes. A wait will be performed
457 * upon no inodes, all inodes or the final one, depending upon sync_mode.
459 * If older_than_this is non-NULL, then only write out inodes which
460 * had their first dirtying at a time earlier than *older_than_this.
462 * If we're a pdlfush thread, then implement pdflush collision avoidance
463 * against the entire list.
465 * WB_SYNC_HOLD is a hack for sys_sync(): reattach the inode to sb->s_dirty so
466 * that it can be located for waiting on in __writeback_single_inode().
468 * If `bdi' is non-zero then we're being asked to writeback a specific queue.
469 * This function assumes that the blockdev superblock's inodes are backed by
470 * a variety of queues, so all inodes are searched. For other superblocks,
471 * assume that all inodes are backed by the same queue.
473 * FIXME: this linear search could get expensive with many fileystems. But
474 * how to fix? We need to go from an address_space to all inodes which share
475 * a queue with that address_space. (Easy: have a global "dirty superblocks"
478 * The inodes to be written are parked on sb->s_io. They are moved back onto
479 * sb->s_dirty as they are selected for writing. This way, none can be missed
480 * on the writer throttling path, and we get decent balancing between many
481 * throttled threads: we don't want them all piling up on inode_sync_wait.
483 void generic_sync_sb_inodes(struct super_block *sb,
484 struct writeback_control *wbc)
486 const unsigned long start = jiffies; /* livelock avoidance */
488 spin_lock(&inode_lock);
489 if (!wbc->for_kupdate || list_empty(&sb->s_io))
490 queue_io(sb, wbc->older_than_this);
492 while (!list_empty(&sb->s_io)) {
493 struct inode *inode = list_entry(sb->s_io.prev,
494 struct inode, i_list);
495 struct address_space *mapping = inode->i_mapping;
496 struct backing_dev_info *bdi = mapping->backing_dev_info;
499 if (!bdi_cap_writeback_dirty(bdi)) {
501 if (sb_is_blkdev_sb(sb)) {
503 * Dirty memory-backed blockdev: the ramdisk
504 * driver does this. Skip just this inode
509 * Dirty memory-backed inode against a filesystem other
510 * than the kernel-internal bdev filesystem. Skip the
516 if (wbc->nonblocking && bdi_write_congested(bdi)) {
517 wbc->encountered_congestion = 1;
518 if (!sb_is_blkdev_sb(sb))
519 break; /* Skip a congested fs */
521 continue; /* Skip a congested blockdev */
524 if (wbc->bdi && bdi != wbc->bdi) {
525 if (!sb_is_blkdev_sb(sb))
526 break; /* fs has the wrong queue */
528 continue; /* blockdev has wrong queue */
531 /* Was this inode dirtied after sync_sb_inodes was called? */
532 if (time_after(inode->dirtied_when, start))
535 /* Is another pdflush already flushing this queue? */
536 if (current_is_pdflush() && !writeback_acquire(bdi))
539 BUG_ON(inode->i_state & I_FREEING);
541 pages_skipped = wbc->pages_skipped;
542 __writeback_single_inode(inode, wbc);
543 if (wbc->sync_mode == WB_SYNC_HOLD) {
544 inode->dirtied_when = jiffies;
545 list_move(&inode->i_list, &sb->s_dirty);
547 if (current_is_pdflush())
548 writeback_release(bdi);
549 if (wbc->pages_skipped != pages_skipped) {
551 * writeback is not making progress due to locked
552 * buffers. Skip this inode for now.
556 spin_unlock(&inode_lock);
559 spin_lock(&inode_lock);
560 if (wbc->nr_to_write <= 0) {
564 if (!list_empty(&sb->s_more_io))
567 spin_unlock(&inode_lock);
568 return; /* Leave any unwritten inodes on s_io */
570 EXPORT_SYMBOL_GPL(generic_sync_sb_inodes);
572 static void sync_sb_inodes(struct super_block *sb,
573 struct writeback_control *wbc)
575 generic_sync_sb_inodes(sb, wbc);
579 * Start writeback of dirty pagecache data against all unlocked inodes.
582 * We don't need to grab a reference to superblock here. If it has non-empty
583 * ->s_dirty it's hadn't been killed yet and kill_super() won't proceed
584 * past sync_inodes_sb() until the ->s_dirty/s_io/s_more_io lists are all
585 * empty. Since __sync_single_inode() regains inode_lock before it finally moves
586 * inode from superblock lists we are OK.
588 * If `older_than_this' is non-zero then only flush inodes which have a
589 * flushtime older than *older_than_this.
591 * If `bdi' is non-zero then we will scan the first inode against each
592 * superblock until we find the matching ones. One group will be the dirty
593 * inodes against a filesystem. Then when we hit the dummy blockdev superblock,
594 * sync_sb_inodes will seekout the blockdev which matches `bdi'. Maybe not
595 * super-efficient but we're about to do a ton of I/O...
598 writeback_inodes(struct writeback_control *wbc)
600 struct super_block *sb;
605 list_for_each_entry_reverse(sb, &super_blocks, s_list) {
606 if (sb_has_dirty_inodes(sb)) {
607 /* we're making our own get_super here */
609 spin_unlock(&sb_lock);
611 * If we can't get the readlock, there's no sense in
612 * waiting around, most of the time the FS is going to
613 * be unmounted by the time it is released.
615 if (down_read_trylock(&sb->s_umount)) {
617 sync_sb_inodes(sb, wbc);
618 up_read(&sb->s_umount);
621 if (__put_super_and_need_restart(sb))
624 if (wbc->nr_to_write <= 0)
627 spin_unlock(&sb_lock);
631 * writeback and wait upon the filesystem's dirty inodes. The caller will
632 * do this in two passes - one to write, and one to wait. WB_SYNC_HOLD is
633 * used to park the written inodes on sb->s_dirty for the wait pass.
635 * A finite limit is set on the number of pages which will be written.
636 * To prevent infinite livelock of sys_sync().
638 * We add in the number of potentially dirty inodes, because each inode write
639 * can dirty pagecache in the underlying blockdev.
641 void sync_inodes_sb(struct super_block *sb, int wait)
643 struct writeback_control wbc = {
644 .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_HOLD,
646 .range_end = LLONG_MAX,
648 unsigned long nr_dirty = global_page_state(NR_FILE_DIRTY);
649 unsigned long nr_unstable = global_page_state(NR_UNSTABLE_NFS);
651 wbc.nr_to_write = nr_dirty + nr_unstable +
652 (inodes_stat.nr_inodes - inodes_stat.nr_unused) +
653 nr_dirty + nr_unstable;
654 wbc.nr_to_write += wbc.nr_to_write / 2; /* Bit more for luck */
655 sync_sb_inodes(sb, &wbc);
659 * Rather lame livelock avoidance.
661 static void set_sb_syncing(int val)
663 struct super_block *sb;
665 list_for_each_entry_reverse(sb, &super_blocks, s_list)
667 spin_unlock(&sb_lock);
671 * sync_inodes - writes all inodes to disk
672 * @wait: wait for completion
674 * sync_inodes() goes through each super block's dirty inode list, writes the
675 * inodes out, waits on the writeout and puts the inodes back on the normal
678 * This is for sys_sync(). fsync_dev() uses the same algorithm. The subtle
679 * part of the sync functions is that the blockdev "superblock" is processed
680 * last. This is because the write_inode() function of a typical fs will
681 * perform no I/O, but will mark buffers in the blockdev mapping as dirty.
682 * What we want to do is to perform all that dirtying first, and then write
683 * back all those inode blocks via the blockdev mapping in one sweep. So the
684 * additional (somewhat redundant) sync_blockdev() calls here are to make
685 * sure that really happens. Because if we call sync_inodes_sb(wait=1) with
686 * outstanding dirty inodes, the writeback goes block-at-a-time within the
687 * filesystem's write_inode(). This is extremely slow.
689 static void __sync_inodes(int wait)
691 struct super_block *sb;
695 list_for_each_entry(sb, &super_blocks, s_list) {
700 spin_unlock(&sb_lock);
701 down_read(&sb->s_umount);
703 sync_inodes_sb(sb, wait);
704 sync_blockdev(sb->s_bdev);
706 up_read(&sb->s_umount);
708 if (__put_super_and_need_restart(sb))
711 spin_unlock(&sb_lock);
714 void sync_inodes(int wait)
726 * write_inode_now - write an inode to disk
727 * @inode: inode to write to disk
728 * @sync: whether the write should be synchronous or not
730 * This function commits an inode to disk immediately if it is dirty. This is
731 * primarily needed by knfsd.
733 * The caller must either have a ref on the inode or must have set I_WILL_FREE.
735 int write_inode_now(struct inode *inode, int sync)
738 struct writeback_control wbc = {
739 .nr_to_write = LONG_MAX,
740 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
742 .range_end = LLONG_MAX,
745 if (!mapping_cap_writeback_dirty(inode->i_mapping))
749 spin_lock(&inode_lock);
750 ret = __writeback_single_inode(inode, &wbc);
751 spin_unlock(&inode_lock);
753 inode_sync_wait(inode);
756 EXPORT_SYMBOL(write_inode_now);
759 * sync_inode - write an inode and its pages to disk.
760 * @inode: the inode to sync
761 * @wbc: controls the writeback mode
763 * sync_inode() will write an inode and its pages to disk. It will also
764 * correctly update the inode on its superblock's dirty inode lists and will
765 * update inode->i_state.
767 * The caller must have a ref on the inode.
769 int sync_inode(struct inode *inode, struct writeback_control *wbc)
773 spin_lock(&inode_lock);
774 ret = __writeback_single_inode(inode, wbc);
775 spin_unlock(&inode_lock);
778 EXPORT_SYMBOL(sync_inode);
781 * generic_osync_inode - flush all dirty data for a given inode to disk
782 * @inode: inode to write
783 * @mapping: the address_space that should be flushed
784 * @what: what to write and wait upon
786 * This can be called by file_write functions for files which have the
787 * O_SYNC flag set, to flush dirty writes to disk.
789 * @what is a bitmask, specifying which part of the inode's data should be
790 * written and waited upon.
792 * OSYNC_DATA: i_mapping's dirty data
793 * OSYNC_METADATA: the buffers at i_mapping->private_list
794 * OSYNC_INODE: the inode itself
797 int generic_osync_inode(struct inode *inode, struct address_space *mapping, int what)
800 int need_write_inode_now = 0;
803 if (what & OSYNC_DATA)
804 err = filemap_fdatawrite(mapping);
805 if (what & (OSYNC_METADATA|OSYNC_DATA)) {
806 err2 = sync_mapping_buffers(mapping);
810 if (what & OSYNC_DATA) {
811 err2 = filemap_fdatawait(mapping);
816 spin_lock(&inode_lock);
817 if ((inode->i_state & I_DIRTY) &&
818 ((what & OSYNC_INODE) || (inode->i_state & I_DIRTY_DATASYNC)))
819 need_write_inode_now = 1;
820 spin_unlock(&inode_lock);
822 if (need_write_inode_now) {
823 err2 = write_inode_now(inode, 1);
828 inode_sync_wait(inode);
832 EXPORT_SYMBOL(generic_osync_inode);