Whamcloud - gitweb
LU-8710 ptlrpc: use current CPU instead of harcoded 0
[fs/lustre-release.git] / lustre / ptlrpc / service.c
index 693391d..4b57b9e 100644 (file)
  *
  * You should have received a copy of the GNU General Public License
  * version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
  *
  * GPL HEADER END
  */
@@ -27,7 +23,7 @@
  * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
  * Use is subject to license terms.
  *
- * Copyright (c) 2010, 2015, Intel Corporation.
+ * Copyright (c) 2010, 2016, Intel Corporation.
  */
 /*
  * This file is part of Lustre, http://www.lustre.org/
 
 /* The following are visible and mutable through /sys/module/ptlrpc */
 int test_req_buffer_pressure = 0;
-CFS_MODULE_PARM(test_req_buffer_pressure, "i", int, 0444,
-                "set non-zero to put pressure on request buffer pools");
-CFS_MODULE_PARM(at_min, "i", int, 0644,
-                "Adaptive timeout minimum (sec)");
-CFS_MODULE_PARM(at_max, "i", int, 0644,
-                "Adaptive timeout maximum (sec)");
-CFS_MODULE_PARM(at_history, "i", int, 0644,
-                "Adaptive timeouts remember the slowest event that took place "
-                "within this period (sec)");
-CFS_MODULE_PARM(at_early_margin, "i", int, 0644,
-                "How soon before an RPC deadline to send an early reply");
-CFS_MODULE_PARM(at_extra, "i", int, 0644,
-                "How much extra time to give with each early reply");
-
+module_param(test_req_buffer_pressure, int, 0444);
+MODULE_PARM_DESC(test_req_buffer_pressure, "set non-zero to put pressure on request buffer pools");
+module_param(at_min, int, 0644);
+MODULE_PARM_DESC(at_min, "Adaptive timeout minimum (sec)");
+module_param(at_max, int, 0644);
+MODULE_PARM_DESC(at_max, "Adaptive timeout maximum (sec)");
+module_param(at_history, int, 0644);
+MODULE_PARM_DESC(at_history,
+                "Adaptive timeouts remember the slowest event that took place within this period (sec)");
+module_param(at_early_margin, int, 0644);
+MODULE_PARM_DESC(at_early_margin, "How soon before an RPC deadline to send an early reply");
+module_param(at_extra, int, 0644);
+MODULE_PARM_DESC(at_extra, "How much extra time to give with each early reply");
 
 /* forward ref */
 static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt);
@@ -564,7 +559,8 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
                 * have too many threads no matter how many cores/HTs
                 * there are.
                 */
-               if (cfs_cpu_ht_nsiblings(0) > 1) { /* weight is # of HTs */
+               if (cfs_cpu_ht_nsiblings(smp_processor_id()) > 1) {
+                       /* weight is # of HTs */
                        /* depress thread factor for hyper-thread */
                        factor = factor - (factor >> 1) + (factor >> 3);
                }
@@ -651,7 +647,9 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc,
        if (array->paa_reqs_count == NULL)
                goto failed;
 
-       cfs_timer_init(&svcpt->scp_at_timer, ptlrpc_at_timer, svcpt);
+       setup_timer(&svcpt->scp_at_timer, ptlrpc_at_timer,
+                   (unsigned long)svcpt);
+
        /* At SOW, service time should be quick; 10s seems generous. If client
         * timeout is less than this, we'll be sending an early reply. */
        at_init(&svcpt->scp_at_estimate, 10, 0);
@@ -1144,7 +1142,7 @@ static int ptlrpc_check_req(struct ptlrpc_request *req)
         } else if (lustre_msg_get_transno(req->rq_reqmsg) != 0 &&
                   !obd->obd_recovering) {
                         DEBUG_REQ(D_ERROR, req, "Invalid req with transno "
-                                  LPU64" without recovery",
+                                 "%llu without recovery",
                                   lustre_msg_get_transno(req->rq_reqmsg));
                         class_fail_export(req->rq_export);
                         rc = -ENODEV;
@@ -1163,7 +1161,7 @@ static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt)
        __s32 next;
 
        if (array->paa_count == 0) {
-               cfs_timer_disarm(&svcpt->scp_at_timer);
+               del_timer(&svcpt->scp_at_timer);
                return;
        }
 
@@ -1173,7 +1171,7 @@ static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt)
        if (next <= 0) {
                ptlrpc_at_timer((unsigned long)svcpt);
        } else {
-               cfs_timer_arm(&svcpt->scp_at_timer, cfs_time_shift(next));
+               mod_timer(&svcpt->scp_at_timer, cfs_time_shift(next));
                CDEBUG(D_INFO, "armed %s at %+ds\n",
                       svcpt->scp_service->srv_name, next);
        }
@@ -1584,7 +1582,14 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
                 * list otherwise it may hit swab race at LU-1044. */
                if (req->rq_ops->hpreq_check != NULL) {
                        rc = req->rq_ops->hpreq_check(req);
-                       LASSERT(rc <= 1); /* can only return error, 0, or 1 */
+                       if (rc == -ESTALE) {
+                               req->rq_status = rc;
+                               ptlrpc_error(req);
+                       }
+                       /** can only return error,
+                        * 0 for normal request,
+                        *  or 1 for high priority request */
+                       LASSERT(rc <= 1);
                }
        }
 
@@ -1878,7 +1883,7 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
                 rc = ptlrpc_unpack_req_msg(req, req->rq_reqlen);
                 if (rc != 0) {
                         CERROR("error unpacking request: ptl %d from %s "
-                               "x"LPU64"\n", svc->srv_req_portal,
+                              "x%llu\n", svc->srv_req_portal,
                                libcfs_id2str(req->rq_peer), req->rq_xid);
                         goto err_req;
                 }
@@ -1887,14 +1892,14 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
         rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
         if (rc) {
                 CERROR ("error unpacking ptlrpc body: ptl %d from %s x"
-                        LPU64"\n", svc->srv_req_portal,
+                       "%llu\n", svc->srv_req_portal,
                         libcfs_id2str(req->rq_peer), req->rq_xid);
                 goto err_req;
         }
 
         if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_REQ_OPC) &&
             lustre_msg_get_opc(req->rq_reqmsg) == cfs_fail_val) {
-                CERROR("drop incoming rpc opc %u, x"LPU64"\n",
+               CERROR("drop incoming rpc opc %u, x%llu\n",
                        cfs_fail_val, req->rq_xid);
                 goto err_req;
         }
@@ -1920,7 +1925,7 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt,
                break;
        }
 
-        CDEBUG(D_RPCTRACE, "got req x"LPU64"\n", req->rq_xid);
+       CDEBUG(D_RPCTRACE, "got req x%llu\n", req->rq_xid);
 
         req->rq_export = class_conn2export(
                 lustre_msg_get_handle(req->rq_reqmsg));
@@ -2061,7 +2066,7 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
         }
 
        CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
-              "%s:%s+%d:%d:x"LPU64":%s:%d\n", current_comm(),
+              "%s:%s+%d:%d:x%llu:%s:%d\n", current_comm(),
               (request->rq_export ?
                (char *)request->rq_export->exp_client_uuid.uuid : "0"),
               (request->rq_export ?
@@ -2073,7 +2078,7 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt,
         if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
                 CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
 
-       CDEBUG(D_NET, "got req "LPU64"\n", request->rq_xid);
+       CDEBUG(D_NET, "got req %llu\n", request->rq_xid);
 
        /* re-assign request and sesson thread to the current one */
        request->rq_svc_thread = thread;
@@ -2100,8 +2105,8 @@ put_conn:
        do_gettimeofday(&work_end);
        timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
        CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
-              "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
-              "%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
+              "%s:%s+%d:%d:x%llu:%s:%d Request procesed in "
+              "%ldus (%ldus total) trans %llu rc %d/%d\n",
                current_comm(),
                (request->rq_export ?
                 (char *)request->rq_export->exp_client_uuid.uuid : "0"),
@@ -2206,7 +2211,7 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs)
         if (nlocks == 0 && !been_handled) {
                 /* If we see this, we should already have seen the warning
                  * in mds_steal_ack_locks()  */
-               CDEBUG(D_HA, "All locks stolen from rs %p x"LPD64".t"LPD64
+               CDEBUG(D_HA, "All locks stolen from rs %p x%lld.t%lld"
                       " o%d NID %s\n",
                       rs,
                       rs->rs_xid, rs->rs_transno, rs->rs_opc,
@@ -2581,16 +2586,17 @@ static int ptlrpc_hr_main(void *arg)
        struct ptlrpc_hr_thread         *hrt = (struct ptlrpc_hr_thread *)arg;
        struct ptlrpc_hr_partition      *hrp = hrt->hrt_partition;
        struct list_head                replies;
-       char                            threadname[20];
        int                             rc;
 
        INIT_LIST_HEAD(&replies);
-       snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d",
-                hrp->hrp_cpt, hrt->hrt_id);
        unshare_fs_struct();
 
        rc = cfs_cpt_bind(ptlrpc_hr.hr_cpt_table, hrp->hrp_cpt);
        if (rc != 0) {
+               char threadname[20];
+
+               snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d",
+                        hrp->hrp_cpt, hrt->hrt_id);
                CWARN("Failed to bind %s on CPT %d of CPT table %p: rc = %d\n",
                      threadname, hrp->hrp_cpt, ptlrpc_hr.hr_cpt_table, rc);
        }
@@ -2906,7 +2912,7 @@ int ptlrpc_hr_init(void)
 
        init_waitqueue_head(&ptlrpc_hr.hr_waitq);
 
-       weight = cfs_cpu_ht_nsiblings(0);
+       weight = cfs_cpu_ht_nsiblings(smp_processor_id());
 
        cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
                hrp->hrp_cpt = i;
@@ -2915,9 +2921,11 @@ int ptlrpc_hr_init(void)
                atomic_set(&hrp->hrp_nstopped, 0);
 
                hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i);
+
                hrp->hrp_nthrs /= weight;
+               if (hrp->hrp_nthrs == 0)
+                       hrp->hrp_nthrs = 1;
 
-               LASSERT(hrp->hrp_nthrs > 0);
                OBD_CPT_ALLOC(hrp->hrp_thrs, ptlrpc_hr.hr_cpt_table, i,
                              hrp->hrp_nthrs * sizeof(*hrt));
                if (hrp->hrp_thrs == NULL)
@@ -2991,7 +2999,7 @@ ptlrpc_service_del_atimer(struct ptlrpc_service *svc)
        /* early disarm AT timer... */
        ptlrpc_service_for_each_part(svcpt, i, svc) {
                if (svcpt->scp_service != NULL)
-                       cfs_timer_disarm(&svcpt->scp_at_timer);
+                       del_timer(&svcpt->scp_at_timer);
        }
 }
 
@@ -3132,7 +3140,7 @@ ptlrpc_service_free(struct ptlrpc_service *svc)
                        break;
 
                /* In case somebody rearmed this in the meantime */
-               cfs_timer_disarm(&svcpt->scp_at_timer);
+               del_timer(&svcpt->scp_at_timer);
                array = &svcpt->scp_at_array;
 
                if (array->paa_reqs_array != NULL) {