#define DEBUG_SUBSYSTEM S_FLD
-#ifdef __KERNEL__
-# include <libcfs/libcfs.h>
-# include <linux/module.h>
-# include <linux/math64.h>
-#else /* __KERNEL__ */
-# include <liblustre.h>
-# include <libcfs/list.h>
-#endif
+#include <libcfs/libcfs.h>
+#include <linux/module.h>
+#include <linux/math64.h>
#include <obd.h>
#include <obd_class.h>
#include <lustre_mdc.h>
#include "fld_internal.h"
-static int fld_rrb_hash(struct lu_client_fld *fld,
- seqno_t seq)
+static int fld_rrb_hash(struct lu_client_fld *fld, u64 seq)
{
- LASSERT(fld->lcf_count > 0);
- return do_div(seq, fld->lcf_count);
+ LASSERT(fld->lcf_count > 0);
+ return do_div(seq, fld->lcf_count);
}
static struct lu_fld_target *
-fld_rrb_scan(struct lu_client_fld *fld, seqno_t seq)
+fld_rrb_scan(struct lu_client_fld *fld, u64 seq)
{
struct lu_fld_target *target;
int hash;
else
hash = 0;
- cfs_list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
+again:
+ list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
if (target->ft_idx == hash)
RETURN(target);
}
+ if (hash != 0) {
+ /* It is possible the remote target(MDT) are not connected to
+ * with client yet, so we will refer this to MDT0, which should
+ * be connected during mount */
+ hash = 0;
+ goto again;
+ }
+
CERROR("%s: Can't find target by hash %d (seq "LPX64"). "
"Targets (%d):\n", fld->lcf_name, hash, seq,
fld->lcf_count);
- cfs_list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
+ list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
const char *srv_name = target->ft_srv != NULL ?
target->ft_srv->lsf_name : "<null>";
const char *exp_name = target->ft_exp != NULL ?
};
static struct lu_fld_target *
-fld_client_get_target(struct lu_client_fld *fld, seqno_t seq)
+fld_client_get_target(struct lu_client_fld *fld, u64 seq)
{
struct lu_fld_target *target;
ENTRY;
RETURN(-ENOMEM);
spin_lock(&fld->lcf_lock);
- cfs_list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) {
+ list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) {
if (tmp->ft_idx == tar->ft_idx) {
spin_unlock(&fld->lcf_lock);
OBD_FREE_PTR(target);
target->ft_srv = tar->ft_srv;
target->ft_idx = tar->ft_idx;
- cfs_list_add_tail(&target->ft_chain,
- &fld->lcf_targets);
+ list_add_tail(&target->ft_chain, &fld->lcf_targets);
fld->lcf_count++;
spin_unlock(&fld->lcf_lock);
ENTRY;
spin_lock(&fld->lcf_lock);
- cfs_list_for_each_entry_safe(target, tmp,
- &fld->lcf_targets, ft_chain) {
+ list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) {
if (target->ft_idx == idx) {
fld->lcf_count--;
- cfs_list_del(&target->ft_chain);
+ list_del(&target->ft_chain);
spin_unlock(&fld->lcf_lock);
if (target->ft_exp != NULL)
}
EXIT;
}
-#else
+#else /* LPROCFS */
static int fld_client_proc_init(struct lu_client_fld *fld)
{
return 0;
{
return;
}
-#endif
+#endif /* !LPROCFS */
EXPORT_SYMBOL(fld_client_proc_fini);
spin_lock_init(&fld->lcf_lock);
fld->lcf_hash = &fld_hash[hash];
fld->lcf_flags = LUSTRE_FLD_INIT;
- CFS_INIT_LIST_HEAD(&fld->lcf_targets);
+ INIT_LIST_HEAD(&fld->lcf_targets);
cache_size = FLD_CLIENT_CACHE_SIZE /
sizeof(struct fld_cache_entry);
ENTRY;
spin_lock(&fld->lcf_lock);
- cfs_list_for_each_entry_safe(target, tmp,
- &fld->lcf_targets, ft_chain) {
+ list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) {
fld->lcf_count--;
- cfs_list_del(&target->ft_chain);
+ list_del(&target->ft_chain);
if (target->ft_exp != NULL)
class_export_put(target->ft_exp);
OBD_FREE_PTR(target);
rc = ptlrpc_queue_wait(req);
obd_put_request_slot(&exp->exp_obd->u.cli);
if (rc != 0) {
- if (rc == -EWOULDBLOCK) {
- /* For no_delay req(see above), EWOULDBLOCK means the
- * connection is being evicted, but this seq lookup
- * should not return error, since it would cause
- * unecessary failure of the application, instead
- * it should retry here */
+ if (imp->imp_state != LUSTRE_IMP_CLOSED) {
+ /* Since LWP is not replayable, so it will keep
+ * trying unless umount happens, otherwise it would
+ * cause unecessary failure of the application. */
ptlrpc_req_finished(req);
rc = 0;
goto again;
return rc;
}
-int fld_client_lookup(struct lu_client_fld *fld, seqno_t seq, mdsno_t *mds,
+int fld_client_lookup(struct lu_client_fld *fld, u64 seq, u32 *mds,
__u32 flags, const struct lu_env *env)
{
struct lu_seq_range res = { 0 };
res.lsr_start = seq;
fld_range_set_type(&res, flags);
-#if defined(__KERNEL__) && defined(HAVE_SERVER_SUPPORT)
+#ifdef HAVE_SERVER_SUPPORT
if (target->ft_srv != NULL) {
LASSERT(env != NULL);
rc = fld_server_lookup(env, target->ft_srv, seq, &res);
} else
-#endif
+#endif /* HAVE_SERVER_SUPPORT */
{
rc = fld_client_rpc(target->ft_exp, &res, FLD_QUERY, NULL);
}
}
EXPORT_SYMBOL(fld_client_flush);
-#ifdef __KERNEL__
struct proc_dir_entry *fld_type_proc_dir;
#ifdef HAVE_SERVER_SUPPORT
fld_server_mod_init();
-#endif
+#endif /* HAVE_SERVER_SUPPORT */
return 0;
}
{
#ifdef HAVE_SERVER_SUPPORT
fld_server_mod_exit();
-#endif
+#endif /* HAVE_SERVER_SUPPORT */
if (fld_type_proc_dir != NULL && !IS_ERR(fld_type_proc_dir)) {
lprocfs_remove(&fld_type_proc_dir);
MODULE_LICENSE("GPL");
cfs_module(mdd, LUSTRE_VERSION_STRING, fld_mod_init, fld_mod_exit);
-#endif /* __KERNEL__ */