In
4e57f6dd3a156e35ccb587fc5c003805dd73ecb7, I accidentally
introduced a new way for duplicate directory entries to be returned
from readdir(). That patch fails to properly decrement the nlupgs
counter when breaking out of the inner-for loop. This accounting
error causes an extra iteration of the inner-for loop when processing
the next cfs page and a bad ldp_hash_end value is then saved in the
lu_dirpage. To fix this, always decrement the nlupgs counter on
entry into the inner loop.
Note: this bug only affects architectures with > 4k-sized pages, e.g.
PowerPC.
Signed-off-by: Ned Bass <bass6@llnl.gov>
Change-Id: I9fa0fd8f34081f834f1d8b4df633d6e08926dcc9
Reviewed-on: http://review.whamcloud.com/6405
Reviewed-by: Fan Yong <fan.yong@intel.com>
Reviewed-by: Andreas Dilger <andreas.dilger@intel.com>
Reviewed-by: Bobi Jam <bobijam@gmail.com>
Tested-by: Hudson
Tested-by: Maloo <whamcloud.maloo@gmail.com>
__u64 hash_end = dp->ldp_hash_end;
__u32 flags = dp->ldp_flags;
- for (; nlupgs > 1; nlupgs--) {
+ while (--nlupgs > 0) {
ent = lu_dirent_start(dp);
for (end_dirent = ent; ent != NULL;
end_dirent = ent, ent = lu_dirent_next(ent));
kunmap(pages[i]);
}
+ LASSERTF(nlupgs == 0, "left = %d", nlupgs);
}
#else
#define lmv_adjust_dirpages(pages, ncfspgs, nlupgs) do {} while (0)
}
run_test 24z "rename one remote dir to another remote dir should fail"
+test_24A() { # LU-3182
+ local NFILES=5000
+
+ mkdir -p $DIR/$tdir
+ createmany -m $DIR/$tdir/$tfile $NFILES
+ local t=`ls $DIR/$tdir | wc -l`
+ local u=`ls $DIR/$tdir | sort -u | wc -l`
+ if [ $t -ne $NFILES -o $u -ne $NFILES ] ; then
+ error "Expected $NFILES files, got $t ($u unique)"
+ fi
+
+ rm -rf $DIR/$tdir || error "Can not delete directories"
+}
+run_test 24A "readdir() returns correct number of entries."
+
test_25a() {
echo '== symlink sanity ============================================='