Whamcloud - gitweb
LU-4379 procfs: dont always check max_pages_per_rpc alignement
[fs/lustre-release.git] / lustre / ldlm / ldlm_lib.c
index fc570c3..47279ba 100644 (file)
@@ -270,34 +270,41 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
         char *name = obddev->obd_type->typ_name;
         ldlm_ns_type_t ns_type = LDLM_NS_TYPE_UNKNOWN;
         int rc;
-       char    *cli_name = lustre_cfg_buf(lcfg, 0);
         ENTRY;
 
         /* In a more perfect world, we would hang a ptlrpc_client off of
          * obd_type and just use the values from there. */
-       if (!strcmp(name, LUSTRE_OSC_NAME) ||
-           (!(strcmp(name, LUSTRE_OSP_NAME)) &&
-            (is_osp_on_mdt(cli_name) &&
-              strstr(lustre_cfg_buf(lcfg, 1), "OST") != NULL))) {
-               /* OSC or OSP_on_MDT for OSTs */
-                rq_portal = OST_REQUEST_PORTAL;
-                rp_portal = OSC_REPLY_PORTAL;
-                connect_op = OST_CONNECT;
-                cli->cl_sp_me = LUSTRE_SP_CLI;
-                cli->cl_sp_to = LUSTRE_SP_OST;
-                ns_type = LDLM_NS_TYPE_OSC;
+       if (!strcmp(name, LUSTRE_OSC_NAME)) {
+               rq_portal = OST_REQUEST_PORTAL;
+               rp_portal = OSC_REPLY_PORTAL;
+               connect_op = OST_CONNECT;
+               cli->cl_sp_me = LUSTRE_SP_CLI;
+               cli->cl_sp_to = LUSTRE_SP_OST;
+               ns_type = LDLM_NS_TYPE_OSC;
        } else if (!strcmp(name, LUSTRE_MDC_NAME) ||
-                  !strcmp(name, LUSTRE_LWP_NAME) ||
-                  (!strcmp(name, LUSTRE_OSP_NAME) &&
-                   (is_osp_on_mdt(cli_name) &&
-                    strstr(lustre_cfg_buf(lcfg, 1), "OST") == NULL))) {
-               /* MDC or OSP_on_MDT for other MDTs */
-                rq_portal = MDS_REQUEST_PORTAL;
-                rp_portal = MDC_REPLY_PORTAL;
-                connect_op = MDS_CONNECT;
-                cli->cl_sp_me = LUSTRE_SP_CLI;
-                cli->cl_sp_to = LUSTRE_SP_MDT;
-                ns_type = LDLM_NS_TYPE_MDC;
+                  !strcmp(name, LUSTRE_LWP_NAME)) {
+               rq_portal = MDS_REQUEST_PORTAL;
+               rp_portal = MDC_REPLY_PORTAL;
+               connect_op = MDS_CONNECT;
+               cli->cl_sp_me = LUSTRE_SP_CLI;
+               cli->cl_sp_to = LUSTRE_SP_MDT;
+               ns_type = LDLM_NS_TYPE_MDC;
+       } else if (!strcmp(name, LUSTRE_OSP_NAME)) {
+               if (strstr(lustre_cfg_buf(lcfg, 1), "OST") == NULL) {
+                       /* OSP_on_MDT for other MDTs */
+                       connect_op = MDS_CONNECT;
+                       cli->cl_sp_to = LUSTRE_SP_MDT;
+                       ns_type = LDLM_NS_TYPE_MDC;
+                       rq_portal = OUT_PORTAL;
+               } else {
+                       /* OSP on MDT for OST */
+                       connect_op = OST_CONNECT;
+                       cli->cl_sp_to = LUSTRE_SP_OST;
+                       ns_type = LDLM_NS_TYPE_OSC;
+                       rq_portal = OST_REQUEST_PORTAL;
+               }
+               rp_portal = OSC_REPLY_PORTAL;
+               cli->cl_sp_me = LUSTRE_SP_CLI;
         } else if (!strcmp(name, LUSTRE_MGC_NAME)) {
                 rq_portal = MGS_REQUEST_PORTAL;
                 rp_portal = MGC_REPLY_PORTAL;
@@ -343,8 +350,8 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
         cli->cl_avail_grant = 0;
        /* FIXME: Should limit this for the sum of all cl_dirty_max. */
        cli->cl_dirty_max = OSC_MAX_DIRTY_DEFAULT * 1024 * 1024;
-       if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > num_physpages / 8)
-               cli->cl_dirty_max = num_physpages << (PAGE_CACHE_SHIFT - 3);
+       if (cli->cl_dirty_max >> PAGE_CACHE_SHIFT > totalram_pages / 8)
+               cli->cl_dirty_max = totalram_pages << (PAGE_CACHE_SHIFT - 3);
         CFS_INIT_LIST_HEAD(&cli->cl_cache_waiters);
         CFS_INIT_LIST_HEAD(&cli->cl_loi_ready_list);
         CFS_INIT_LIST_HEAD(&cli->cl_loi_hp_ready_list);
@@ -370,6 +377,7 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
        cfs_atomic_set(&cli->cl_lru_in_list, 0);
        CFS_INIT_LIST_HEAD(&cli->cl_lru_list);
        client_obd_list_lock_init(&cli->cl_lru_list_lock);
+       cfs_atomic_set(&cli->cl_unstable_count, 0);
 
        init_waitqueue_head(&cli->cl_destroy_waitq);
        cfs_atomic_set(&cli->cl_destroy_in_flight, 0);
@@ -392,13 +400,17 @@ int client_obd_setup(struct obd_device *obddev, struct lustre_cfg *lcfg)
        cli->cl_max_pages_per_rpc = min_t(int, PTLRPC_MAX_BRW_PAGES,
                                          LNET_MTU >> PAGE_CACHE_SHIFT);
 
+       /* set cl_chunkbits default value to PAGE_CACHE_SHIFT,
+        * it will be updated at OSC connection time. */
+       cli->cl_chunkbits = PAGE_CACHE_SHIFT;
+
        if (!strcmp(name, LUSTRE_MDC_NAME)) {
                cli->cl_max_rpcs_in_flight = MDC_MAX_RIF_DEFAULT;
-       } else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
+       } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 128 /* MB */) {
                cli->cl_max_rpcs_in_flight = 2;
-       } else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
+       } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 256 /* MB */) {
                cli->cl_max_rpcs_in_flight = 3;
-       } else if (num_physpages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
+       } else if (totalram_pages >> (20 - PAGE_CACHE_SHIFT) <= 512 /* MB */) {
                cli->cl_max_rpcs_in_flight = 4;
        } else {
                if (osc_on_mdt(obddev->obd_name))
@@ -472,15 +484,16 @@ EXPORT_SYMBOL(client_obd_setup);
 
 int client_obd_cleanup(struct obd_device *obddev)
 {
-        ENTRY;
+       ENTRY;
 
-        ldlm_namespace_free_post(obddev->obd_namespace);
-        obddev->obd_namespace = NULL;
+       ldlm_namespace_free_post(obddev->obd_namespace);
+       obddev->obd_namespace = NULL;
 
-        LASSERT(obddev->u.cli.cl_import == NULL);
+       obd_cleanup_client_import(obddev);
+       LASSERT(obddev->u.cli.cl_import == NULL);
 
-        ldlm_put_ref();
-        RETURN(0);
+       ldlm_put_ref();
+       RETURN(0);
 }
 EXPORT_SYMBOL(client_obd_cleanup);
 
@@ -794,7 +807,9 @@ int target_handle_connect(struct ptlrpc_request *req)
        if (!target) {
                deuuidify(str, NULL, &target_start, &target_len);
                LCONSOLE_ERROR_MSG(0x137, "%s: not available for connect "
-                                  "from %s (no target)\n", str,
+                                  "from %s (no target). If you are running "
+                                  "an HA pair check that the target is "
+                                  "mounted on the other server.\n", str,
                                   libcfs_nid2str(req->rq_peer.nid));
                GOTO(out, rc = -ENODEV);
        }
@@ -824,7 +839,7 @@ int target_handle_connect(struct ptlrpc_request *req)
        /* Make sure the target isn't cleaned up while we're here. Yes,
         * there's still a race between the above check and our incref here.
         * Really, class_uuid2obd should take the ref. */
-        targref = class_incref(target, __FUNCTION__, cfs_current());
+       targref = class_incref(target, __FUNCTION__, current);
 
        target->obd_conn_inprogress++;
        spin_unlock(&target->obd_dev_lock);
@@ -972,21 +987,6 @@ no_export:
                               libcfs_nid2str(req->rq_peer.nid),
                               cfs_atomic_read(&export->exp_refcount));
                 GOTO(out, rc = -EBUSY);
-        } else if (req->rq_export != NULL &&
-                   (cfs_atomic_read(&export->exp_rpc_count) > 1)) {
-               /* The current connect RPC has increased exp_rpc_count. */
-                LCONSOLE_WARN("%s: Client %s (at %s) refused reconnection, "
-                              "still busy with %d active RPCs\n",
-                              target->obd_name, cluuid.uuid,
-                              libcfs_nid2str(req->rq_peer.nid),
-                              cfs_atomic_read(&export->exp_rpc_count) - 1);
-               spin_lock(&export->exp_lock);
-               if (req->rq_export->exp_conn_cnt <
-                   lustre_msg_get_conn_cnt(req->rq_reqmsg))
-                       /* try to abort active requests */
-                       req->rq_export->exp_abort_active_req = 1;
-               spin_unlock(&export->exp_lock);
-               GOTO(out, rc = -EBUSY);
         } else if (lustre_msg_get_conn_cnt(req->rq_reqmsg) == 1) {
                 if (!strstr(cluuid.uuid, "mdt"))
                         LCONSOLE_WARN("%s: Rejecting reconnect from the "
@@ -1009,7 +1009,7 @@ no_export:
               export, (long)cfs_time_current_sec(),
               export ? (long)export->exp_last_request_time : 0);
 
-        /* If this is the first time a client connects, reset the recovery
+       /* If this is the first time a client connects, reset the recovery
         * timer. Discard lightweight connections which might be local. */
        if (!lw_client && rc == 0 && target->obd_recovering)
                check_and_start_recovery_timer(target, req, export == NULL);
@@ -1128,7 +1128,6 @@ dont_check_exports:
         }
         LASSERT(lustre_msg_get_conn_cnt(req->rq_reqmsg) > 0);
         export->exp_conn_cnt = lustre_msg_get_conn_cnt(req->rq_reqmsg);
-        export->exp_abort_active_req = 0;
 
        /* Don't evict liblustre clients for not pinging. */
         if (lustre_msg_get_op_flags(req->rq_reqmsg) & MSG_CONNECT_LIBCLIENT) {
@@ -1269,7 +1268,7 @@ out:
                target->obd_conn_inprogress--;
                spin_unlock(&target->obd_dev_lock);
 
-               class_decref(targref, __func__, cfs_current());
+               class_decref(targref, __func__, current);
        }
        if (rc)
                req->rq_status = rc;
@@ -1910,7 +1909,7 @@ static int handle_recovery_req(struct ptlrpc_thread *thread,
         if (req->rq_export->exp_disconnected)
                 GOTO(reqcopy_put, rc = 0);
 
-        rc = lu_context_init(&req->rq_recov_session, LCT_SESSION);
+        rc = lu_context_init(&req->rq_recov_session, LCT_SERVER_SESSION);
         if (rc) {
                 CERROR("Failure to initialize session: %d\n", rc);
                 GOTO(reqcopy_put, rc);
@@ -1993,8 +1992,8 @@ static int target_recovery_thread(void *arg)
         thread->t_env = env;
         thread->t_id = -1; /* force filter_iobuf_get/put to use local buffers */
         env->le_ctx.lc_thread = thread;
-        thread->t_data = NULL;
-        thread->t_watchdog = NULL;
+       tgt_io_thread_init(thread); /* init thread_big_cache for IO requests */
+       thread->t_watchdog = NULL;
 
        CDEBUG(D_HA, "%s: started recovery thread pid %d\n", obd->obd_name,
               current_pid());
@@ -2089,9 +2088,10 @@ static int target_recovery_thread(void *arg)
         trd->trd_processing_task = 0;
        complete(&trd->trd_finishing);
 
-        OBD_FREE_PTR(thread);
-        OBD_FREE_PTR(env);
-        RETURN(rc);
+       tgt_io_thread_done(thread);
+       OBD_FREE_PTR(thread);
+       OBD_FREE_PTR(env);
+       RETURN(rc);
 }
 
 static int target_start_recovery_thread(struct lu_target *lut,
@@ -2638,89 +2638,91 @@ static inline char *bulk2type(struct ptlrpc_bulk_desc *desc)
 int target_bulk_io(struct obd_export *exp, struct ptlrpc_bulk_desc *desc,
                    struct l_wait_info *lwi)
 {
-        struct ptlrpc_request *req = desc->bd_req;
-        int rc = 0;
-        ENTRY;
+       struct ptlrpc_request   *req = desc->bd_req;
+       time_t                   start = cfs_time_current_sec();
+       int                      rc = 0;
+
+       ENTRY;
 
        /* If there is eviction in progress, wait for it to finish. */
-        if (unlikely(cfs_atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
-                *lwi = LWI_INTR(NULL, NULL);
-                rc = l_wait_event(exp->exp_obd->obd_evict_inprogress_waitq,
-                                  !cfs_atomic_read(&exp->exp_obd->
-                                                   obd_evict_inprogress),
-                                  lwi);
-        }
+       if (unlikely(cfs_atomic_read(&exp->exp_obd->obd_evict_inprogress))) {
+               *lwi = LWI_INTR(NULL, NULL);
+               rc = l_wait_event(exp->exp_obd->obd_evict_inprogress_waitq,
+                                 !cfs_atomic_read(&exp->exp_obd->
+                                                  obd_evict_inprogress),
+                                 lwi);
+       }
 
-       /* Check if client was evicted or tried to reconnect already. */
-        if (exp->exp_failed || exp->exp_abort_active_req) {
-                rc = -ENOTCONN;
-        } else {
-                if (desc->bd_type == BULK_PUT_SINK)
-                        rc = sptlrpc_svc_wrap_bulk(req, desc);
-                if (rc == 0)
-                        rc = ptlrpc_start_bulk_transfer(desc);
-        }
-
-        if (rc == 0 && OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) {
-                ptlrpc_abort_bulk(desc);
-        } else if (rc == 0) {
-                time_t start = cfs_time_current_sec();
-                do {
-                        long timeoutl = req->rq_deadline - cfs_time_current_sec();
-                        cfs_duration_t timeout = timeoutl <= 0 ?
-                                CFS_TICK : cfs_time_seconds(timeoutl);
-                        *lwi = LWI_TIMEOUT_INTERVAL(timeout,
-                                                    cfs_time_seconds(1),
-                                                   target_bulk_timeout,
-                                                   desc);
-                        rc = l_wait_event(desc->bd_waitq,
-                                          !ptlrpc_server_bulk_active(desc) ||
-                                          exp->exp_failed ||
-                                          exp->exp_abort_active_req,
-                                          lwi);
-                        LASSERT(rc == 0 || rc == -ETIMEDOUT);
-                       /* Wait again if we changed deadline. */
-                } while ((rc == -ETIMEDOUT) &&
-                         (req->rq_deadline > cfs_time_current_sec()));
-
-                if (rc == -ETIMEDOUT) {
-                        DEBUG_REQ(D_ERROR, req,
-                                  "timeout on bulk %s after %ld%+lds",
-                                  bulk2type(desc),
-                                  req->rq_deadline - start,
-                                  cfs_time_current_sec() -
-                                  req->rq_deadline);
-                        ptlrpc_abort_bulk(desc);
-                } else if (exp->exp_failed) {
-                        DEBUG_REQ(D_ERROR, req, "Eviction on bulk %s",
-                                  bulk2type(desc));
-                        rc = -ENOTCONN;
-                        ptlrpc_abort_bulk(desc);
-                } else if (exp->exp_abort_active_req) {
-                        DEBUG_REQ(D_ERROR, req, "Reconnect on bulk %s",
-                                  bulk2type(desc));
-                       /* We don't reply anyway. */
-                        rc = -ETIMEDOUT;
-                        ptlrpc_abort_bulk(desc);
-               } else if (desc->bd_failure ||
-                          desc->bd_nob_transferred != desc->bd_nob) {
-                       DEBUG_REQ(D_ERROR, req, "%s bulk %s %d(%d)",
-                                 desc->bd_failure ?
-                                 "network error on" : "truncated",
-                                 bulk2type(desc),
-                                 desc->bd_nob_transferred,
-                                 desc->bd_nob);
-                       /* XXX Should this be a different errno? */
-                       rc = -ETIMEDOUT;
-                } else if (desc->bd_type == BULK_GET_SINK) {
-                        rc = sptlrpc_svc_unwrap_bulk(req, desc);
-                }
-        } else {
-                DEBUG_REQ(D_ERROR, req, "bulk %s failed: rc %d",
-                          bulk2type(desc), rc);
-        }
+       /* Check if client was evicted or reconnected already. */
+       if (exp->exp_failed ||
+           exp->exp_conn_cnt > lustre_msg_get_conn_cnt(req->rq_reqmsg)) {
+               rc = -ENOTCONN;
+       } else {
+               if (desc->bd_type == BULK_PUT_SINK)
+                       rc = sptlrpc_svc_wrap_bulk(req, desc);
+               if (rc == 0)
+                       rc = ptlrpc_start_bulk_transfer(desc);
+       }
 
-        RETURN(rc);
+       if (rc < 0) {
+               DEBUG_REQ(D_ERROR, req, "bulk %s failed: rc %d",
+                         bulk2type(desc), rc);
+               RETURN(rc);
+       }
+
+       if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) {
+               ptlrpc_abort_bulk(desc);
+               RETURN(0);
+       }
+
+       do {
+               long timeoutl = req->rq_deadline - cfs_time_current_sec();
+               cfs_duration_t timeout = timeoutl <= 0 ?
+                                        CFS_TICK : cfs_time_seconds(timeoutl);
+
+               *lwi = LWI_TIMEOUT_INTERVAL(timeout, cfs_time_seconds(1),
+                                           target_bulk_timeout, desc);
+               rc = l_wait_event(desc->bd_waitq,
+                                 !ptlrpc_server_bulk_active(desc) ||
+                                 exp->exp_failed ||
+                                 exp->exp_conn_cnt >
+                                 lustre_msg_get_conn_cnt(req->rq_reqmsg),
+                                 lwi);
+               LASSERT(rc == 0 || rc == -ETIMEDOUT);
+               /* Wait again if we changed deadline. */
+       } while ((rc == -ETIMEDOUT) &&
+                (req->rq_deadline > cfs_time_current_sec()));
+
+       if (rc == -ETIMEDOUT) {
+               DEBUG_REQ(D_ERROR, req, "timeout on bulk %s after %ld%+lds",
+                         bulk2type(desc), req->rq_deadline - start,
+                         cfs_time_current_sec() - req->rq_deadline);
+               ptlrpc_abort_bulk(desc);
+       } else if (exp->exp_failed) {
+               DEBUG_REQ(D_ERROR, req, "Eviction on bulk %s",
+                         bulk2type(desc));
+               rc = -ENOTCONN;
+               ptlrpc_abort_bulk(desc);
+       } else if (exp->exp_conn_cnt >
+                  lustre_msg_get_conn_cnt(req->rq_reqmsg)) {
+               DEBUG_REQ(D_ERROR, req, "Reconnect on bulk %s",
+                         bulk2type(desc));
+               /* We don't reply anyway. */
+               rc = -ETIMEDOUT;
+               ptlrpc_abort_bulk(desc);
+       } else if (desc->bd_failure ||
+                  desc->bd_nob_transferred != desc->bd_nob) {
+               DEBUG_REQ(D_ERROR, req, "%s bulk %s %d(%d)",
+                         desc->bd_failure ? "network error on" : "truncated",
+                         bulk2type(desc), desc->bd_nob_transferred,
+                         desc->bd_nob);
+               /* XXX Should this be a different errno? */
+               rc = -ETIMEDOUT;
+       } else if (desc->bd_type == BULK_GET_SINK) {
+               rc = sptlrpc_svc_unwrap_bulk(req, desc);
+       }
+
+       RETURN(rc);
 }
 EXPORT_SYMBOL(target_bulk_io);