* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2014, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_FLD
-#ifdef __KERNEL__
-# include <libcfs/libcfs.h>
-# include <linux/module.h>
-# include <linux/math64.h>
-#else /* __KERNEL__ */
-# include <liblustre.h>
-# include <libcfs/list.h>
-#endif
+#include <libcfs/libcfs.h>
+#include <linux/module.h>
+#include <linux/math64.h>
#include <obd.h>
#include <obd_class.h>
#include <lustre_mdc.h>
#include "fld_internal.h"
-static int fld_rrb_hash(struct lu_client_fld *fld,
- seqno_t seq)
+static int fld_rrb_hash(struct lu_client_fld *fld, u64 seq)
{
- LASSERT(fld->lcf_count > 0);
- return do_div(seq, fld->lcf_count);
+ LASSERT(fld->lcf_count > 0);
+ return do_div(seq, fld->lcf_count);
}
static struct lu_fld_target *
-fld_rrb_scan(struct lu_client_fld *fld, seqno_t seq)
+fld_rrb_scan(struct lu_client_fld *fld, u64 seq)
{
struct lu_fld_target *target;
int hash;
.fh_scan_func = fld_rrb_scan
},
{
- 0,
+ NULL,
}
};
static struct lu_fld_target *
-fld_client_get_target(struct lu_client_fld *fld, seqno_t seq)
+fld_client_get_target(struct lu_client_fld *fld, u64 seq)
{
struct lu_fld_target *target;
ENTRY;
}
EXIT;
}
-#else
+#else /* LPROCFS */
static int fld_client_proc_init(struct lu_client_fld *fld)
{
return 0;
{
return;
}
-#endif
+#endif /* !LPROCFS */
EXPORT_SYMBOL(fld_client_proc_fini);
obd_get_request_slot(&exp->exp_obd->u.cli);
rc = ptlrpc_queue_wait(req);
obd_put_request_slot(&exp->exp_obd->u.cli);
+
+ if (rc == -ENOENT) {
+ /* Don't loop forever on non-existing FID sequences. */
+ GOTO(out_req, rc);
+ }
+
if (rc != 0) {
- if (rc == -EWOULDBLOCK || rc == -ESHUTDOWN) {
- /* For no_delay req(see above), EWOULDBLOCK and
- * ESHUTDOWN means the connection is being evicted,
- * but this seq lookup should not return error,
- * since it would cause unecessary failure of the
- * application, instead it should retry here */
+ if (imp->imp_state != LUSTRE_IMP_CLOSED) {
+ /* Since LWP is not replayable, so it will keep
+ * trying unless umount happens, otherwise it would
+ * cause unecessary failure of the application. */
ptlrpc_req_finished(req);
rc = 0;
goto again;
return rc;
}
-int fld_client_lookup(struct lu_client_fld *fld, seqno_t seq, mdsno_t *mds,
+int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds,
__u32 flags, const struct lu_env *env)
{
struct lu_seq_range res = { 0 };
res.lsr_start = seq;
fld_range_set_type(&res, flags);
-#if defined(__KERNEL__) && defined(HAVE_SERVER_SUPPORT)
+#ifdef HAVE_SERVER_SUPPORT
if (target->ft_srv != NULL) {
LASSERT(env != NULL);
rc = fld_server_lookup(env, target->ft_srv, seq, &res);
} else
-#endif
+#endif /* HAVE_SERVER_SUPPORT */
{
rc = fld_client_rpc(target->ft_exp, &res, FLD_QUERY, NULL);
}
}
EXPORT_SYMBOL(fld_client_flush);
-#ifdef __KERNEL__
struct proc_dir_entry *fld_type_proc_dir;
#ifdef HAVE_SERVER_SUPPORT
fld_server_mod_init();
-#endif
+#endif /* HAVE_SERVER_SUPPORT */
return 0;
}
{
#ifdef HAVE_SERVER_SUPPORT
fld_server_mod_exit();
-#endif
+#endif /* HAVE_SERVER_SUPPORT */
if (fld_type_proc_dir != NULL && !IS_ERR(fld_type_proc_dir)) {
lprocfs_remove(&fld_type_proc_dir);
MODULE_LICENSE("GPL");
cfs_module(mdd, LUSTRE_VERSION_STRING, fld_mod_init, fld_mod_exit);
-#endif /* __KERNEL__ */