#ifdef __KERNEL__
# include <libcfs/libcfs.h>
# include <linux/module.h>
-# include <asm/div64.h>
+# include <linux/math64.h>
#else /* __KERNEL__ */
# include <liblustre.h>
# include <libcfs/list.h>
#include <lustre_mdc.h>
#include "fld_internal.h"
-/* TODO: these 3 functions are copies of flow-control code from mdc_lib.c
- * It should be common thing. The same about mdc RPC lock */
-static int fld_req_avail(struct client_obd *cli, struct mdc_cache_waiter *mcw)
-{
- int rc;
- ENTRY;
- client_obd_list_lock(&cli->cl_loi_list_lock);
- rc = cfs_list_empty(&mcw->mcw_entry);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- RETURN(rc);
-};
-
-static void fld_enter_request(struct client_obd *cli)
-{
- struct mdc_cache_waiter mcw;
- struct l_wait_info lwi = { 0 };
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- cfs_list_add_tail(&mcw.mcw_entry, &cli->cl_cache_waiters);
- init_waitqueue_head(&mcw.mcw_waitq);
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- l_wait_event(mcw.mcw_waitq, fld_req_avail(cli, &mcw), &lwi);
- } else {
- cli->cl_r_in_flight++;
- client_obd_list_unlock(&cli->cl_loi_list_lock);
- }
-}
-
-static void fld_exit_request(struct client_obd *cli)
-{
- cfs_list_t *l, *tmp;
- struct mdc_cache_waiter *mcw;
-
- client_obd_list_lock(&cli->cl_loi_list_lock);
- cli->cl_r_in_flight--;
- cfs_list_for_each_safe(l, tmp, &cli->cl_cache_waiters) {
-
- if (cli->cl_r_in_flight >= cli->cl_max_rpcs_in_flight) {
- /* No free request slots anymore */
- break;
- }
-
- mcw = cfs_list_entry(l, struct mdc_cache_waiter, mcw_entry);
- cfs_list_del_init(&mcw->mcw_entry);
- cli->cl_r_in_flight++;
- wake_up(&mcw->mcw_waitq);
- }
- client_obd_list_unlock(&cli->cl_loi_list_lock);
-}
-
static int fld_rrb_hash(struct lu_client_fld *fld,
seqno_t seq)
{
else
hash = 0;
- cfs_list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
+again:
+ list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
if (target->ft_idx == hash)
RETURN(target);
}
+ if (hash != 0) {
+ /* It is possible the remote target(MDT) are not connected to
+ * with client yet, so we will refer this to MDT0, which should
+ * be connected during mount */
+ hash = 0;
+ goto again;
+ }
+
CERROR("%s: Can't find target by hash %d (seq "LPX64"). "
"Targets (%d):\n", fld->lcf_name, hash, seq,
fld->lcf_count);
- cfs_list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
+ list_for_each_entry(target, &fld->lcf_targets, ft_chain) {
const char *srv_name = target->ft_srv != NULL ?
target->ft_srv->lsf_name : "<null>";
const char *exp_name = target->ft_exp != NULL ?
RETURN(-ENOMEM);
spin_lock(&fld->lcf_lock);
- cfs_list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) {
+ list_for_each_entry(tmp, &fld->lcf_targets, ft_chain) {
if (tmp->ft_idx == tar->ft_idx) {
spin_unlock(&fld->lcf_lock);
OBD_FREE_PTR(target);
target->ft_srv = tar->ft_srv;
target->ft_idx = tar->ft_idx;
- cfs_list_add_tail(&target->ft_chain,
- &fld->lcf_targets);
+ list_add_tail(&target->ft_chain, &fld->lcf_targets);
fld->lcf_count++;
spin_unlock(&fld->lcf_lock);
ENTRY;
spin_lock(&fld->lcf_lock);
- cfs_list_for_each_entry_safe(target, tmp,
- &fld->lcf_targets, ft_chain) {
+ list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) {
if (target->ft_idx == idx) {
fld->lcf_count--;
- cfs_list_del(&target->ft_chain);
+ list_del(&target->ft_chain);
spin_unlock(&fld->lcf_lock);
if (target->ft_exp != NULL)
spin_lock_init(&fld->lcf_lock);
fld->lcf_hash = &fld_hash[hash];
fld->lcf_flags = LUSTRE_FLD_INIT;
- CFS_INIT_LIST_HEAD(&fld->lcf_targets);
+ INIT_LIST_HEAD(&fld->lcf_targets);
cache_size = FLD_CLIENT_CACHE_SIZE /
sizeof(struct fld_cache_entry);
ENTRY;
spin_lock(&fld->lcf_lock);
- cfs_list_for_each_entry_safe(target, tmp,
- &fld->lcf_targets, ft_chain) {
+ list_for_each_entry_safe(target, tmp, &fld->lcf_targets, ft_chain) {
fld->lcf_count--;
- cfs_list_del(&target->ft_chain);
+ list_del(&target->ft_chain);
if (target->ft_exp != NULL)
class_export_put(target->ft_exp);
OBD_FREE_PTR(target);
req->rq_reply_portal = MDC_REPLY_PORTAL;
ptlrpc_at_set_req_timeout(req);
- fld_enter_request(&exp->exp_obd->u.cli);
+ obd_get_request_slot(&exp->exp_obd->u.cli);
rc = ptlrpc_queue_wait(req);
- fld_exit_request(&exp->exp_obd->u.cli);
+ obd_put_request_slot(&exp->exp_obd->u.cli);
if (rc != 0) {
- if (rc == -EWOULDBLOCK) {
- /* For no_delay req(see above), EWOULDBLOCK means the
- * connection is being evicted, but this seq lookup
- * should not return error, since it would cause
- * unecessary failure of the application, instead
- * it should retry here */
+ if (imp->imp_state != LUSTRE_IMP_CLOSED) {
+ /* Since LWP is not replayable, so it will keep
+ * trying unless umount happens, otherwise it would
+ * cause unecessary failure of the application. */
ptlrpc_req_finished(req);
rc = 0;
goto again;