From: Ingo Molnar Semaphore to mutex conversion. The conversion was generated via scripts, and the result was validated automatically via a script as well. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton --- fs/dquot.c | 6 +++--- fs/inode.c | 16 ++++++++-------- fs/inotify.c | 6 +++--- include/linux/fs.h | 2 +- 4 files changed, 15 insertions(+), 15 deletions(-) diff -puN fs/dquot.c~sem2mutex-iprune fs/dquot.c --- devel/fs/dquot.c~sem2mutex-iprune 2006-01-23 16:41:35.000000000 -0800 +++ devel-akpm/fs/dquot.c 2006-01-23 16:41:35.000000000 -0800 @@ -118,7 +118,7 @@ * spinlock to internal buffers before writing. * * Lock ordering (including related VFS locks) is the following: - * i_mutex > dqonoff_mutex > iprune_sem > journal_lock > dqptr_sem > + * i_mutex > dqonoff_mutex > iprune_mutex > journal_lock > dqptr_sem > * > dquot->dq_lock > dqio_mutex * i_mutex on quota files is special (it's below dqio_mutex) */ @@ -734,11 +734,11 @@ static void drop_dquot_ref(struct super_ /* We need to be guarded against prune_icache to reach all the * inodes - otherwise some can be on the local list of prune_icache */ - down(&iprune_sem); + mutex_lock(&iprune_mutex); down_write(&sb_dqopt(sb)->dqptr_sem); remove_dquot_ref(sb, type, &tofree_head); up_write(&sb_dqopt(sb)->dqptr_sem); - up(&iprune_sem); + mutex_unlock(&iprune_mutex); put_dquot_list(&tofree_head); } diff -puN fs/inode.c~sem2mutex-iprune fs/inode.c --- devel/fs/inode.c~sem2mutex-iprune 2006-01-23 16:41:35.000000000 -0800 +++ devel-akpm/fs/inode.c 2006-01-23 16:41:35.000000000 -0800 @@ -84,14 +84,14 @@ static struct hlist_head *inode_hashtabl DEFINE_SPINLOCK(inode_lock); /* - * iprune_sem provides exclusion between the kswapd or try_to_free_pages + * iprune_mutex provides exclusion between the kswapd or try_to_free_pages * icache shrinking path, and the umount path. Without this exclusion, * by the time prune_icache calls iput for the inode whose pages it has * been invalidating, or by the time it calls clear_inode & destroy_inode * from its final dispose_list, the struct super_block they refer to * (for inode->i_sb->s_op) may already have been freed and reused. */ -DECLARE_MUTEX(iprune_sem); +DEFINE_MUTEX(iprune_mutex); /* * Statistics gathering.. @@ -319,7 +319,7 @@ static int invalidate_list(struct list_h /* * We can reschedule here without worrying about the list's * consistency because the per-sb list of inodes must not - * change during umount anymore, and because iprune_sem keeps + * change during umount anymore, and because iprune_mutex keeps * shrink_icache_memory() away. */ cond_resched_lock(&inode_lock); @@ -355,14 +355,14 @@ int invalidate_inodes(struct super_block int busy; LIST_HEAD(throw_away); - down(&iprune_sem); + mutex_lock(&iprune_mutex); spin_lock(&inode_lock); inotify_unmount_inodes(&sb->s_inodes); busy = invalidate_list(&sb->s_inodes, &throw_away); spin_unlock(&inode_lock); dispose_list(&throw_away); - up(&iprune_sem); + mutex_unlock(&iprune_mutex); return busy; } @@ -377,7 +377,7 @@ int __invalidate_device(struct block_dev if (sb) { /* * no need to lock the super, get_super holds the - * read semaphore so the filesystem cannot go away + * read mutex so the filesystem cannot go away * under us (->put_super runs with the write lock * hold). */ @@ -423,7 +423,7 @@ static void prune_icache(int nr_to_scan) int nr_scanned; unsigned long reap = 0; - down(&iprune_sem); + mutex_lock(&iprune_mutex); spin_lock(&inode_lock); for (nr_scanned = 0; nr_scanned < nr_to_scan; nr_scanned++) { struct inode *inode; @@ -459,7 +459,7 @@ static void prune_icache(int nr_to_scan) spin_unlock(&inode_lock); dispose_list(&freeable); - up(&iprune_sem); + mutex_unlock(&iprune_mutex); if (current_is_kswapd()) mod_page_state(kswapd_inodesteal, reap); diff -puN fs/inotify.c~sem2mutex-iprune fs/inotify.c --- devel/fs/inotify.c~sem2mutex-iprune 2006-01-23 16:41:35.000000000 -0800 +++ devel-akpm/fs/inotify.c 2006-01-23 16:41:35.000000000 -0800 @@ -54,7 +54,7 @@ int inotify_max_queued_events; * Lock ordering: * * dentry->d_lock (used to keep d_move() away from dentry->d_parent) - * iprune_sem (synchronize shrink_icache_memory()) + * iprune_mutex (synchronize shrink_icache_memory()) * inode_lock (protects the super_block->s_inodes list) * inode->inotify_mutex (protects inode->inotify_watches and watches->i_list) * inotify_dev->mutex (protects inotify_device and watches->d_list) @@ -569,7 +569,7 @@ EXPORT_SYMBOL_GPL(inotify_get_cookie); * @list: list of inodes being unmounted (sb->s_inodes) * * Called with inode_lock held, protecting the unmounting super block's list - * of inodes, and with iprune_sem held, keeping shrink_icache_memory() at bay. + * of inodes, and with iprune_mutex held, keeping shrink_icache_memory() at bay. * We temporarily drop inode_lock, however, and CAN block. */ void inotify_unmount_inodes(struct list_head *list) @@ -618,7 +618,7 @@ void inotify_unmount_inodes(struct list_ * We can safely drop inode_lock here because we hold * references on both inode and next_i. Also no new inodes * will be added since the umount has begun. Finally, - * iprune_sem keeps shrink_icache_memory() away. + * iprune_mutex keeps shrink_icache_memory() away. */ spin_unlock(&inode_lock); diff -puN include/linux/fs.h~sem2mutex-iprune include/linux/fs.h --- devel/include/linux/fs.h~sem2mutex-iprune 2006-01-23 16:41:35.000000000 -0800 +++ devel-akpm/include/linux/fs.h 2006-01-23 16:41:35.000000000 -0800 @@ -1538,7 +1538,7 @@ extern void destroy_inode(struct inode * extern struct inode *new_inode(struct super_block *); extern int remove_suid(struct dentry *); extern void remove_dquot_ref(struct super_block *, int, struct list_head *); -extern struct semaphore iprune_sem; +extern struct mutex iprune_mutex; extern void __insert_inode_hash(struct inode *, unsigned long hashval); extern void remove_inode_hash(struct inode *); _