The gap between checking d_bucket and sampling d_move_count looks like a bug
to me.

It feels safer to be checking d_bucket after taking the lock, when we know
that it is stable.

And it's a little faster to check d_bucket after having checked the hash
rather than before.


---

 25-akpm/fs/dcache.c |   14 ++++++++------
 1 files changed, 8 insertions(+), 6 deletions(-)

diff -puN fs/dcache.c~dentry-d_bucket-fix fs/dcache.c
--- 25/fs/dcache.c~dentry-d_bucket-fix	2004-05-09 14:06:51.816554824 -0700
+++ 25-akpm/fs/dcache.c	2004-05-09 14:07:41.932935976 -0700
@@ -975,12 +975,6 @@ struct dentry * __d_lookup(struct dentry
 		smp_read_barrier_depends();
 		dentry = hlist_entry(node, struct dentry, d_hash);
 
-		/* if lookup ends up in a different bucket 
-		 * due to concurrent rename, fail it
-		 */
-		if (unlikely(dentry->d_bucket != head))
-			break;
-
 		smp_rmb();
 
 		if (dentry->d_name.hash != hash)
@@ -991,6 +985,13 @@ struct dentry * __d_lookup(struct dentry
 		spin_lock(&dentry->d_lock);
 
 		/*
+		 * If lookup ends up in a different bucket due to concurrent
+		 * rename, fail it
+		 */
+		if (unlikely(dentry->d_bucket != head))
+			goto terminate;
+
+		/*
 		 * Recheck the dentry after taking the lock - d_move may have
 		 * changed things.  Don't bother checking the hash because we're
 		 * about to compare the whole name anyway.
@@ -1014,6 +1015,7 @@ struct dentry * __d_lookup(struct dentry
 			atomic_inc(&dentry->d_count);
 			found = dentry;
 		}
+terminate:
 		spin_unlock(&dentry->d_lock);
 		break;
 next:

_