X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fptlrpc%2Fservice.c;h=ecde6ab1ac04a91ac93418d3102c220d26724247;hp=63fee6dae84cec0d1cbd6bd44e953845c13b0771;hb=61c48e79fdfb825ea1ab2649cdadaccfb863155c;hpb=dad106e1272f2bae4920c081f56885efee274c57 diff --git a/lustre/ptlrpc/service.c b/lustre/ptlrpc/service.c index 63fee6d..ecde6ab 100644 --- a/lustre/ptlrpc/service.c +++ b/lustre/ptlrpc/service.c @@ -15,11 +15,7 @@ * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see - * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf - * - * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, - * CA 95054 USA or visit www.sun.com if you need additional information or - * have any questions. + * http://www.gnu.org/licenses/gpl-2.0.html * * GPL HEADER END */ @@ -27,7 +23,7 @@ * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2010, 2012, Intel Corporation. + * Copyright (c) 2010, 2016, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -35,9 +31,7 @@ */ #define DEBUG_SUBSYSTEM S_RPC -#ifndef __KERNEL__ -#include -#endif +#include #include #include #include @@ -47,20 +41,19 @@ /* The following are visible and mutable through /sys/module/ptlrpc */ int test_req_buffer_pressure = 0; -CFS_MODULE_PARM(test_req_buffer_pressure, "i", int, 0444, - "set non-zero to put pressure on request buffer pools"); -CFS_MODULE_PARM(at_min, "i", int, 0644, - "Adaptive timeout minimum (sec)"); -CFS_MODULE_PARM(at_max, "i", int, 0644, - "Adaptive timeout maximum (sec)"); -CFS_MODULE_PARM(at_history, "i", int, 0644, - "Adaptive timeouts remember the slowest event that took place " - "within this period (sec)"); -CFS_MODULE_PARM(at_early_margin, "i", int, 0644, - "How soon before an RPC deadline to send an early reply"); -CFS_MODULE_PARM(at_extra, "i", int, 0644, - "How much extra time to give with each early reply"); - +module_param(test_req_buffer_pressure, int, 0444); +MODULE_PARM_DESC(test_req_buffer_pressure, "set non-zero to put pressure on request buffer pools"); +module_param(at_min, int, 0644); +MODULE_PARM_DESC(at_min, "Adaptive timeout minimum (sec)"); +module_param(at_max, int, 0644); +MODULE_PARM_DESC(at_max, "Adaptive timeout maximum (sec)"); +module_param(at_history, int, 0644); +MODULE_PARM_DESC(at_history, + "Adaptive timeouts remember the slowest event that took place within this period (sec)"); +module_param(at_early_margin, int, 0644); +MODULE_PARM_DESC(at_early_margin, "How soon before an RPC deadline to send an early reply"); +module_param(at_extra, int, 0644); +MODULE_PARM_DESC(at_extra, "How much extra time to give with each early reply"); /* forward ref */ static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt); @@ -68,11 +61,11 @@ static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req); static void ptlrpc_at_remove_timed(struct ptlrpc_request *req); /** Holds a list of all PTLRPC services */ -CFS_LIST_HEAD(ptlrpc_all_services); +struct list_head ptlrpc_all_services; /** Used to protect the \e ptlrpc_all_services list */ struct mutex ptlrpc_all_services_mutex; -struct ptlrpc_request_buffer_desc * +static struct ptlrpc_request_buffer_desc * ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt) { struct ptlrpc_service *svc = svcpt->scp_service; @@ -86,7 +79,7 @@ ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt) rqbd->rqbd_refcount = 0; rqbd->rqbd_cbid.cbid_fn = request_in_callback; rqbd->rqbd_cbid.cbid_arg = rqbd; - CFS_INIT_LIST_HEAD(&rqbd->rqbd_reqs); + INIT_LIST_HEAD(&rqbd->rqbd_reqs); OBD_CPT_ALLOC_LARGE(rqbd->rqbd_buffer, svc->srv_cptable, svcpt->scp_cpt, svc->srv_buf_size); if (rqbd->rqbd_buffer == NULL) { @@ -95,23 +88,23 @@ ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt) } spin_lock(&svcpt->scp_lock); - cfs_list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle); + list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle); svcpt->scp_nrqbds_total++; spin_unlock(&svcpt->scp_lock); return rqbd; } -void +static void ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd) { struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt; LASSERT(rqbd->rqbd_refcount == 0); - LASSERT(cfs_list_empty(&rqbd->rqbd_reqs)); + LASSERT(list_empty(&rqbd->rqbd_reqs)); spin_lock(&svcpt->scp_lock); - cfs_list_del(&rqbd->rqbd_list); + list_del(&rqbd->rqbd_list); svcpt->scp_nrqbds_total--; spin_unlock(&svcpt->scp_lock); @@ -119,7 +112,7 @@ ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd) OBD_FREE_PTR(rqbd); } -int +static int ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post) { struct ptlrpc_service *svc = svcpt->scp_service; @@ -183,44 +176,40 @@ ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post) * Puts a lock and its mode into reply state assotiated to request reply. */ void -ptlrpc_save_lock(struct ptlrpc_request *req, - struct lustre_handle *lock, int mode, int no_ack) +ptlrpc_save_lock(struct ptlrpc_request *req, struct lustre_handle *lock, + int mode, bool no_ack, bool convert_lock) { - struct ptlrpc_reply_state *rs = req->rq_reply_state; - int idx; + struct ptlrpc_reply_state *rs = req->rq_reply_state; + int idx; - LASSERT(rs != NULL); - LASSERT(rs->rs_nlocks < RS_MAX_LOCKS); + LASSERT(rs != NULL); + LASSERT(rs->rs_nlocks < RS_MAX_LOCKS); - if (req->rq_export->exp_disconnected) { - ldlm_lock_decref(lock, mode); - } else { - idx = rs->rs_nlocks++; - rs->rs_locks[idx] = *lock; - rs->rs_modes[idx] = mode; - rs->rs_difficult = 1; - rs->rs_no_ack = !!no_ack; - } + idx = rs->rs_nlocks++; + rs->rs_locks[idx] = *lock; + rs->rs_modes[idx] = mode; + rs->rs_difficult = 1; + rs->rs_no_ack = no_ack; + rs->rs_convert_lock = convert_lock; } EXPORT_SYMBOL(ptlrpc_save_lock); -#ifdef __KERNEL__ struct ptlrpc_hr_partition; struct ptlrpc_hr_thread { int hrt_id; /* thread ID */ spinlock_t hrt_lock; - cfs_waitq_t hrt_waitq; - cfs_list_t hrt_queue; /* RS queue */ + wait_queue_head_t hrt_waitq; + struct list_head hrt_queue; struct ptlrpc_hr_partition *hrt_partition; }; struct ptlrpc_hr_partition { /* # of started threads */ - cfs_atomic_t hrp_nstarted; + atomic_t hrp_nstarted; /* # of stopped threads */ - cfs_atomic_t hrp_nstopped; + atomic_t hrp_nstopped; /* cpu partition id */ int hrp_cpt; /* round-robin rotor for choosing thread */ @@ -238,7 +227,7 @@ struct ptlrpc_hr_service { /* CPU partition table, it's just cfs_cpt_table for now */ struct cfs_cpt_table *hr_cpt_table; /** controller sleep waitq */ - cfs_waitq_t hr_waitq; + wait_queue_head_t hr_waitq; unsigned int hr_stopping; /** roundrobin rotor for non-affinity service */ unsigned int hr_rotor; @@ -247,7 +236,7 @@ struct ptlrpc_hr_service { }; struct rs_batch { - cfs_list_t rsb_replies; + struct list_head rsb_replies; unsigned int rsb_n_replies; struct ptlrpc_service_part *rsb_svcpt; }; @@ -267,8 +256,8 @@ static struct ptlrpc_hr_service ptlrpc_hr; */ static void rs_batch_init(struct rs_batch *b) { - memset(b, 0, sizeof *b); - CFS_INIT_LIST_HEAD(&b->rsb_replies); + memset(b, 0, sizeof *b); + INIT_LIST_HEAD(&b->rsb_replies); } /** @@ -310,10 +299,10 @@ static void rs_batch_dispatch(struct rs_batch *b) hrt = ptlrpc_hr_select(b->rsb_svcpt); spin_lock(&hrt->hrt_lock); - cfs_list_splice_init(&b->rsb_replies, &hrt->hrt_queue); + list_splice_init(&b->rsb_replies, &hrt->hrt_queue); spin_unlock(&hrt->hrt_lock); - cfs_waitq_signal(&hrt->hrt_waitq); + wake_up(&hrt->hrt_waitq); b->rsb_n_replies = 0; } } @@ -340,7 +329,7 @@ static void rs_batch_add(struct rs_batch *b, struct ptlrpc_reply_state *rs) spin_lock(&rs->rs_lock); rs->rs_scheduled_ever = 1; if (rs->rs_scheduled == 0) { - cfs_list_move(&rs->rs_list, &b->rsb_replies); + list_move(&rs->rs_list, &b->rsb_replies); rs->rs_scheduled = 1; b->rsb_n_replies++; } @@ -365,14 +354,6 @@ static void rs_batch_fini(struct rs_batch *b) #define DECLARE_RS_BATCH(b) struct rs_batch b -#else /* __KERNEL__ */ - -#define rs_batch_init(b) do{}while(0) -#define rs_batch_fini(b) do{}while(0) -#define rs_batch_add(b, r) ptlrpc_schedule_difficult_reply(r) -#define DECLARE_RS_BATCH(b) - -#endif /* __KERNEL__ */ /** * Put reply state into a queue for processing because we received @@ -380,23 +361,19 @@ static void rs_batch_fini(struct rs_batch *b) */ void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs) { -#ifdef __KERNEL__ struct ptlrpc_hr_thread *hrt; ENTRY; - LASSERT(cfs_list_empty(&rs->rs_list)); + LASSERT(list_empty(&rs->rs_list)); hrt = ptlrpc_hr_select(rs->rs_svcpt); spin_lock(&hrt->hrt_lock); - cfs_list_add_tail(&rs->rs_list, &hrt->hrt_queue); + list_add_tail(&rs->rs_list, &hrt->hrt_queue); spin_unlock(&hrt->hrt_lock); - cfs_waitq_signal(&hrt->hrt_waitq); + wake_up(&hrt->hrt_waitq); EXIT; -#else - cfs_list_add_tail(&rs->rs_list, &rs->rs_svcpt->scp_rep_queue); -#endif } void @@ -404,20 +381,20 @@ ptlrpc_schedule_difficult_reply(struct ptlrpc_reply_state *rs) { ENTRY; - LASSERT_SPIN_LOCKED(&rs->rs_svcpt->scp_rep_lock); - LASSERT_SPIN_LOCKED(&rs->rs_lock); - LASSERT (rs->rs_difficult); - rs->rs_scheduled_ever = 1; /* flag any notification attempt */ + assert_spin_locked(&rs->rs_svcpt->scp_rep_lock); + assert_spin_locked(&rs->rs_lock); + LASSERT (rs->rs_difficult); + rs->rs_scheduled_ever = 1; /* flag any notification attempt */ - if (rs->rs_scheduled) { /* being set up or already notified */ - EXIT; - return; - } + if (rs->rs_scheduled) { /* being set up or already notified */ + EXIT; + return; + } - rs->rs_scheduled = 1; - cfs_list_del_init(&rs->rs_list); - ptlrpc_dispatch_difficult_reply(rs); - EXIT; + rs->rs_scheduled = 1; + list_del_init(&rs->rs_list); + ptlrpc_dispatch_difficult_reply(rs); + EXIT; } EXPORT_SYMBOL(ptlrpc_schedule_difficult_reply); @@ -433,13 +410,13 @@ void ptlrpc_commit_replies(struct obd_export *exp) /* CAVEAT EMPTOR: spinlock ordering!!! */ spin_lock(&exp->exp_uncommitted_replies_lock); - cfs_list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies, + list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies, rs_obd_list) { LASSERT (rs->rs_difficult); /* VBR: per-export last_committed */ LASSERT(rs->rs_export); if (rs->rs_transno <= exp->exp_last_committed) { - cfs_list_del_init(&rs->rs_obd_list); + list_del_init(&rs->rs_obd_list); rs_batch_add(&batch, rs); } } @@ -447,7 +424,6 @@ void ptlrpc_commit_replies(struct obd_export *exp) rs_batch_fini(&batch); EXIT; } -EXPORT_SYMBOL(ptlrpc_commit_replies); static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt) @@ -459,19 +435,19 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt) for (;;) { spin_lock(&svcpt->scp_lock); - if (cfs_list_empty(&svcpt->scp_rqbd_idle)) { + if (list_empty(&svcpt->scp_rqbd_idle)) { spin_unlock(&svcpt->scp_lock); return posted; } - rqbd = cfs_list_entry(svcpt->scp_rqbd_idle.next, + rqbd = list_entry(svcpt->scp_rqbd_idle.next, struct ptlrpc_request_buffer_desc, rqbd_list); - cfs_list_del(&rqbd->rqbd_list); + list_del(&rqbd->rqbd_list); /* assume we will post successfully */ svcpt->scp_nrqbds_posted++; - cfs_list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_posted); + list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_posted); spin_unlock(&svcpt->scp_lock); @@ -485,8 +461,8 @@ ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt) spin_lock(&svcpt->scp_lock); svcpt->scp_nrqbds_posted--; - cfs_list_del(&rqbd->rqbd_list); - cfs_list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle); + list_del(&rqbd->rqbd_list); + list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle); /* Don't complain if no request buffers are posted right now; LNET * won't drop requests because we set the portal lazy! */ @@ -504,14 +480,13 @@ static void ptlrpc_at_timer(unsigned long castmeharder) svcpt->scp_at_check = 1; svcpt->scp_at_checktime = cfs_time_current(); - cfs_waitq_signal(&svcpt->scp_waitq); + wake_up(&svcpt->scp_waitq); } static void ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, struct ptlrpc_service_conf *conf) { -#ifdef __KERNEL__ struct ptlrpc_service_thr_conf *tc = &conf->psc_thr; unsigned init; unsigned total; @@ -571,7 +546,7 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, /* * User wants to increase number of threads with for - * each CPU core/HT, most likely the factor is larger then + * each CPU core/HT, most likely the factor is larger than * one thread/core because service threads are supposed to * be blocked by lock or wait for IO. */ @@ -581,13 +556,13 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, * have too many threads no matter how many cores/HTs * there are. */ - if (cfs_cpu_ht_nsiblings(0) > 1) { /* weight is # of HTs */ + if (cpumask_weight(topology_sibling_cpumask(smp_processor_id())) > 1) { + /* weight is # of HTs */ /* depress thread factor for hyper-thread */ factor = factor - (factor >> 1) + (factor >> 3); } weight = cfs_cpt_weight(svc->srv_cptable, 0); - LASSERT(weight > 0); for (; factor > 0 && weight > 0; factor--, weight -= fade) nthrs += min(weight, fade) * factor; @@ -603,12 +578,11 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc, svc->srv_nthrs_cpt_init = init; if (nthrs * svc->srv_ncpts > tc->tc_nthrs_max) { - LCONSOLE_WARN("%s: This service may have more threads (%d) " - "than the given soft limit (%d)\n", - svc->srv_name, nthrs * svc->srv_ncpts, - tc->tc_nthrs_max); + CDEBUG(D_OTHER, "%s: This service may have more threads (%d) " + "than the given soft limit (%d)\n", + svc->srv_name, nthrs * svc->srv_ncpts, + tc->tc_nthrs_max); } -#endif } /** @@ -624,30 +598,27 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc, int rc; svcpt->scp_cpt = cpt; - CFS_INIT_LIST_HEAD(&svcpt->scp_threads); + INIT_LIST_HEAD(&svcpt->scp_threads); /* rqbd and incoming request queue */ spin_lock_init(&svcpt->scp_lock); - CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_idle); - CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_posted); - CFS_INIT_LIST_HEAD(&svcpt->scp_req_incoming); - cfs_waitq_init(&svcpt->scp_waitq); + INIT_LIST_HEAD(&svcpt->scp_rqbd_idle); + INIT_LIST_HEAD(&svcpt->scp_rqbd_posted); + INIT_LIST_HEAD(&svcpt->scp_req_incoming); + init_waitqueue_head(&svcpt->scp_waitq); /* history request & rqbd list */ - CFS_INIT_LIST_HEAD(&svcpt->scp_hist_reqs); - CFS_INIT_LIST_HEAD(&svcpt->scp_hist_rqbds); + INIT_LIST_HEAD(&svcpt->scp_hist_reqs); + INIT_LIST_HEAD(&svcpt->scp_hist_rqbds); /* acitve requests and hp requests */ spin_lock_init(&svcpt->scp_req_lock); /* reply states */ spin_lock_init(&svcpt->scp_rep_lock); - CFS_INIT_LIST_HEAD(&svcpt->scp_rep_active); -#ifndef __KERNEL__ - CFS_INIT_LIST_HEAD(&svcpt->scp_rep_queue); -#endif - CFS_INIT_LIST_HEAD(&svcpt->scp_rep_idle); - cfs_waitq_init(&svcpt->scp_rep_waitq); - cfs_atomic_set(&svcpt->scp_nreps_difficult, 0); + INIT_LIST_HEAD(&svcpt->scp_rep_active); + INIT_LIST_HEAD(&svcpt->scp_rep_idle); + init_waitqueue_head(&svcpt->scp_rep_waitq); + atomic_set(&svcpt->scp_nreps_difficult, 0); /* adaptive timeout */ spin_lock_init(&svcpt->scp_at_lock); @@ -660,19 +631,21 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc, /* allocate memory for scp_at_array (ptlrpc_at_array) */ OBD_CPT_ALLOC(array->paa_reqs_array, - svc->srv_cptable, cpt, sizeof(cfs_list_t) * size); + svc->srv_cptable, cpt, sizeof(struct list_head) * size); if (array->paa_reqs_array == NULL) return -ENOMEM; for (index = 0; index < size; index++) - CFS_INIT_LIST_HEAD(&array->paa_reqs_array[index]); + INIT_LIST_HEAD(&array->paa_reqs_array[index]); OBD_CPT_ALLOC(array->paa_reqs_count, svc->srv_cptable, cpt, sizeof(__u32) * size); if (array->paa_reqs_count == NULL) goto failed; - cfs_timer_init(&svcpt->scp_at_timer, ptlrpc_at_timer, svcpt); + setup_timer(&svcpt->scp_at_timer, ptlrpc_at_timer, + (unsigned long)svcpt); + /* At SOW, service time should be quick; 10s seems generous. If client * timeout is less than this, we'll be sending an early reply. */ at_init(&svcpt->scp_at_estimate, 10, 0); @@ -696,7 +669,7 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc, if (array->paa_reqs_array != NULL) { OBD_FREE(array->paa_reqs_array, - sizeof(cfs_list_t) * array->paa_size); + sizeof(struct list_head) * array->paa_size); array->paa_reqs_array = NULL; } @@ -710,7 +683,8 @@ ptlrpc_service_part_init(struct ptlrpc_service *svc, */ struct ptlrpc_service * ptlrpc_register_service(struct ptlrpc_service_conf *conf, - cfs_proc_dir_entry_t *proc_entry) + struct kset *parent, + struct proc_dir_entry *proc_entry) { struct ptlrpc_service_cpt_conf *cconf = &conf->psc_cpt; struct ptlrpc_service *service; @@ -780,7 +754,7 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf, spin_lock_init(&service->srv_lock); service->srv_name = conf->psc_name; service->srv_watchdog_factor = conf->psc_watchdog_factor; - CFS_INIT_LIST_HEAD(&service->srv_list); /* for safty of cleanup */ + INIT_LIST_HEAD(&service->srv_list); /* for safty of cleanup */ /* buffer configuration */ service->srv_nbuf_per_group = test_req_buffer_pressure ? @@ -824,9 +798,15 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf, LASSERT(rc == 0); mutex_lock(&ptlrpc_all_services_mutex); - cfs_list_add (&service->srv_list, &ptlrpc_all_services); + list_add(&service->srv_list, &ptlrpc_all_services); mutex_unlock(&ptlrpc_all_services_mutex); + if (parent) { + rc = ptlrpc_sysfs_register_service(parent, service); + if (rc) + GOTO(failed, rc); + } + if (proc_entry != NULL) ptlrpc_lprocfs_register_service(proc_entry, service); @@ -837,14 +817,12 @@ ptlrpc_register_service(struct ptlrpc_service_conf *conf, CDEBUG(D_NET, "%s: Started, listening on portal %d\n", service->srv_name, service->srv_req_portal); -#ifdef __KERNEL__ rc = ptlrpc_start_threads(service); if (rc != 0) { CERROR("Failed to start threads for service %s: %d\n", service->srv_name, rc); GOTO(failed, rc); } -#endif RETURN(service); failed: @@ -859,21 +837,21 @@ EXPORT_SYMBOL(ptlrpc_register_service); */ static void ptlrpc_server_free_request(struct ptlrpc_request *req) { - LASSERT(cfs_atomic_read(&req->rq_refcount) == 0); - LASSERT(cfs_list_empty(&req->rq_timed_list)); + LASSERT(atomic_read(&req->rq_refcount) == 0); + LASSERT(list_empty(&req->rq_timed_list)); - /* DEBUG_REQ() assumes the reply state of a request with a valid - * ref will not be destroyed until that reference is dropped. */ - ptlrpc_req_drop_rs(req); + /* DEBUG_REQ() assumes the reply state of a request with a valid + * ref will not be destroyed until that reference is dropped. */ + ptlrpc_req_drop_rs(req); - sptlrpc_svc_ctx_decref(req); + sptlrpc_svc_ctx_decref(req); - if (req != &req->rq_rqbd->rqbd_req) { - /* NB request buffers use an embedded - * req if the incoming req unlinked the - * MD; this isn't one of them! */ - OBD_FREE(req, sizeof(*req)); - } + if (req != &req->rq_rqbd->rqbd_req) { + /* NB request buffers use an embedded + * req if the incoming req unlinked the + * MD; this isn't one of them! */ + ptlrpc_request_cache_free(req); + } } /** @@ -885,12 +863,17 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req) struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd; struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt; struct ptlrpc_service *svc = svcpt->scp_service; - int refcount; - cfs_list_t *tmp; - cfs_list_t *nxt; + int refcount; + struct list_head *tmp; + struct list_head *nxt; - if (!cfs_atomic_dec_and_test(&req->rq_refcount)) - return; + if (!atomic_dec_and_test(&req->rq_refcount)) + return; + + if (req->rq_session.lc_state == LCS_ENTERED) { + lu_context_exit(&req->rq_session); + lu_context_fini(&req->rq_session); + } if (req->rq_at_linked) { spin_lock(&svcpt->scp_at_lock); @@ -901,76 +884,86 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req) spin_unlock(&svcpt->scp_at_lock); } - LASSERT(cfs_list_empty(&req->rq_timed_list)); + LASSERT(list_empty(&req->rq_timed_list)); - /* finalize request */ - if (req->rq_export) { - class_export_put(req->rq_export); - req->rq_export = NULL; - } + /* finalize request */ + if (req->rq_export) { + class_export_put(req->rq_export); + req->rq_export = NULL; + } spin_lock(&svcpt->scp_lock); - cfs_list_add(&req->rq_list, &rqbd->rqbd_reqs); + list_add(&req->rq_list, &rqbd->rqbd_reqs); - refcount = --(rqbd->rqbd_refcount); - if (refcount == 0) { - /* request buffer is now idle: add to history */ - cfs_list_del(&rqbd->rqbd_list); + refcount = --(rqbd->rqbd_refcount); + if (refcount == 0) { + /* request buffer is now idle: add to history */ + list_del(&rqbd->rqbd_list); - cfs_list_add_tail(&rqbd->rqbd_list, &svcpt->scp_hist_rqbds); + list_add_tail(&rqbd->rqbd_list, &svcpt->scp_hist_rqbds); svcpt->scp_hist_nrqbds++; /* cull some history? * I expect only about 1 or 2 rqbds need to be recycled here */ while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) { - rqbd = cfs_list_entry(svcpt->scp_hist_rqbds.next, - struct ptlrpc_request_buffer_desc, - rqbd_list); + rqbd = list_entry(svcpt->scp_hist_rqbds.next, + struct ptlrpc_request_buffer_desc, + rqbd_list); - cfs_list_del(&rqbd->rqbd_list); + list_del(&rqbd->rqbd_list); svcpt->scp_hist_nrqbds--; - /* remove rqbd's reqs from svc's req history while - * I've got the service lock */ - cfs_list_for_each(tmp, &rqbd->rqbd_reqs) { - req = cfs_list_entry(tmp, struct ptlrpc_request, - rq_list); - /* Track the highest culled req seq */ + /* remove rqbd's reqs from svc's req history while + * I've got the service lock */ + list_for_each(tmp, &rqbd->rqbd_reqs) { + req = list_entry(tmp, struct ptlrpc_request, + rq_list); + /* Track the highest culled req seq */ if (req->rq_history_seq > svcpt->scp_hist_seq_culled) { svcpt->scp_hist_seq_culled = req->rq_history_seq; } - cfs_list_del(&req->rq_history_list); + list_del(&req->rq_history_list); } spin_unlock(&svcpt->scp_lock); - cfs_list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) { - req = cfs_list_entry(rqbd->rqbd_reqs.next, - struct ptlrpc_request, - rq_list); - cfs_list_del(&req->rq_list); - ptlrpc_server_free_request(req); - } + list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) { + req = list_entry(rqbd->rqbd_reqs.next, + struct ptlrpc_request, + rq_list); + list_del(&req->rq_list); + ptlrpc_server_free_request(req); + } spin_lock(&svcpt->scp_lock); /* * now all reqs including the embedded req has been - * disposed, schedule request buffer for re-use. + * disposed, schedule request buffer for re-use + * or free it to drain some in excess. */ - LASSERT(cfs_atomic_read(&rqbd->rqbd_req.rq_refcount) == - 0); - cfs_list_add_tail(&rqbd->rqbd_list, - &svcpt->scp_rqbd_idle); + LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) == 0); + if (svcpt->scp_nrqbds_posted >= + svc->srv_nbuf_per_group && + !test_req_buffer_pressure) { + /* like in ptlrpc_free_rqbd() */ + svcpt->scp_nrqbds_total--; + OBD_FREE_LARGE(rqbd->rqbd_buffer, + svc->srv_buf_size); + OBD_FREE_PTR(rqbd); + } else { + list_add_tail(&rqbd->rqbd_list, + &svcpt->scp_rqbd_idle); + } } spin_unlock(&svcpt->scp_lock); } else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) { /* If we are low on memory, we are not interested in history */ - cfs_list_del(&req->rq_list); - cfs_list_del_init(&req->rq_history_list); + list_del(&req->rq_list); + list_del_init(&req->rq_history_list); /* Track the highest culled req seq */ if (req->rq_history_seq > svcpt->scp_hist_seq_culled) @@ -984,15 +977,57 @@ void ptlrpc_server_drop_request(struct ptlrpc_request *req) } } +/** Change request export and move hp request from old export to new */ +void ptlrpc_request_change_export(struct ptlrpc_request *req, + struct obd_export *export) +{ + if (req->rq_export != NULL) { + LASSERT(!list_empty(&req->rq_exp_list)); + /* remove rq_exp_list from last export */ + spin_lock_bh(&req->rq_export->exp_rpc_lock); + list_del_init(&req->rq_exp_list); + spin_unlock_bh(&req->rq_export->exp_rpc_lock); + /* export has one reference already, so it`s safe to + * add req to export queue here and get another + * reference for request later */ + spin_lock_bh(&export->exp_rpc_lock); + if (req->rq_ops != NULL) /* hp request */ + list_add(&req->rq_exp_list, &export->exp_hp_rpcs); + else + list_add(&req->rq_exp_list, &export->exp_reg_rpcs); + spin_unlock_bh(&export->exp_rpc_lock); + + class_export_rpc_dec(req->rq_export); + class_export_put(req->rq_export); + } + + /* request takes one export refcount */ + req->rq_export = class_export_get(export); + class_export_rpc_inc(export); + + return; +} + /** * to finish a request: stop sending more early replies, and release - * the request. should be called after we finished handling the request. + * the request. */ static void ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt, struct ptlrpc_request *req) { ptlrpc_server_hpreq_fini(req); + ptlrpc_server_drop_request(req); +} + +/** + * to finish an active request: stop sending more early replies, and release + * the request. should be called after we finished handling the request. + */ +static void ptlrpc_server_finish_active_request( + struct ptlrpc_service_part *svcpt, + struct ptlrpc_request *req) +{ spin_lock(&svcpt->scp_req_lock); ptlrpc_nrs_req_stop_nolock(req); svcpt->scp_nreqs_active--; @@ -1002,7 +1037,10 @@ static void ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt, ptlrpc_nrs_req_finalize(req); - ptlrpc_server_drop_request(req); + if (req->rq_export != NULL) + class_export_rpc_dec(req->rq_export); + + ptlrpc_server_finish_request(svcpt, req); } /** @@ -1010,7 +1048,7 @@ static void ptlrpc_server_finish_request(struct ptlrpc_service_part *svcpt, * This function is only called when some export receives a message (i.e., * the network is up.) */ -static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay) +void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay) { struct obd_export *oldest_exp; time_t oldest_time, new_time; @@ -1031,27 +1069,24 @@ static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay) RETURN_EXIT; exp->exp_last_request_time = new_time; - CDEBUG(D_HA, "updating export %s at "CFS_TIME_T" exp %p\n", - exp->exp_client_uuid.uuid, - exp->exp_last_request_time, exp); - /* exports may get disconnected from the chain even though the - export has references, so we must keep the spin lock while - manipulating the lists */ + /* exports may get disconnected from the chain even though the + export has references, so we must keep the spin lock while + manipulating the lists */ spin_lock(&exp->exp_obd->obd_dev_lock); - if (cfs_list_empty(&exp->exp_obd_chain_timed)) { + if (list_empty(&exp->exp_obd_chain_timed)) { /* this one is not timed */ spin_unlock(&exp->exp_obd->obd_dev_lock); - RETURN_EXIT; - } + RETURN_EXIT; + } - cfs_list_move_tail(&exp->exp_obd_chain_timed, - &exp->exp_obd->obd_exports_timed); + list_move_tail(&exp->exp_obd_chain_timed, + &exp->exp_obd->obd_exports_timed); - oldest_exp = cfs_list_entry(exp->exp_obd->obd_exports_timed.next, - struct obd_export, exp_obd_chain_timed); - oldest_time = oldest_exp->exp_last_request_time; + oldest_exp = list_entry(exp->exp_obd->obd_exports_timed.next, + struct obd_export, exp_obd_chain_timed); + oldest_time = oldest_exp->exp_last_request_time; spin_unlock(&exp->exp_obd->obd_dev_lock); if (exp->exp_obd->obd_recovering) { @@ -1071,8 +1106,8 @@ static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay) * we better wait for 3. */ exp->exp_obd->obd_eviction_timer = cfs_time_current_sec() + 3 * PING_INTERVAL; - CDEBUG(D_HA, "%s: Think about evicting %s from "CFS_TIME_T"\n", - exp->exp_obd->obd_name, + CDEBUG(D_HA, "%s: Think about evicting %s from %ld\n", + exp->exp_obd->obd_name, obd_export_nid2str(oldest_exp), oldest_time); } } else { @@ -1095,7 +1130,8 @@ static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay) */ static int ptlrpc_check_req(struct ptlrpc_request *req) { - int rc = 0; + struct obd_device *obd = req->rq_export->exp_obd; + int rc = 0; if (unlikely(lustre_msg_get_conn_cnt(req->rq_reqmsg) < req->rq_export->exp_conn_cnt)) { @@ -1105,24 +1141,23 @@ static int ptlrpc_check_req(struct ptlrpc_request *req) req->rq_export->exp_conn_cnt); return -EEXIST; } - if (unlikely(req->rq_export->exp_obd && - req->rq_export->exp_obd->obd_fail)) { - /* Failing over, don't handle any more reqs, send - error response instead. */ - CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n", - req, req->rq_export->exp_obd->obd_name); + if (unlikely(obd == NULL || obd->obd_fail)) { + /* Failing over, don't handle any more reqs, + * send error response instead. */ + CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n", + req, (obd != NULL) ? obd->obd_name : "unknown"); rc = -ENODEV; } else if (lustre_msg_get_flags(req->rq_reqmsg) & (MSG_REPLAY | MSG_REQ_REPLAY_DONE) && - !(req->rq_export->exp_obd->obd_recovering)) { + !obd->obd_recovering) { DEBUG_REQ(D_ERROR, req, "Invalid replay without recovery"); class_fail_export(req->rq_export); rc = -ENODEV; } else if (lustre_msg_get_transno(req->rq_reqmsg) != 0 && - !(req->rq_export->exp_obd->obd_recovering)) { + !obd->obd_recovering) { DEBUG_REQ(D_ERROR, req, "Invalid req with transno " - LPU64" without recovery", + "%llu without recovery", lustre_msg_get_transno(req->rq_reqmsg)); class_fail_export(req->rq_export); rc = -ENODEV; @@ -1141,17 +1176,17 @@ static void ptlrpc_at_set_timer(struct ptlrpc_service_part *svcpt) __s32 next; if (array->paa_count == 0) { - cfs_timer_disarm(&svcpt->scp_at_timer); + del_timer(&svcpt->scp_at_timer); return; } /* Set timer for closest deadline */ - next = (__s32)(array->paa_deadline - cfs_time_current_sec() - + next = (__s32)(array->paa_deadline - ktime_get_real_seconds() - at_early_margin); if (next <= 0) { ptlrpc_at_timer((unsigned long)svcpt); } else { - cfs_timer_arm(&svcpt->scp_at_timer, cfs_time_shift(next)); + mod_timer(&svcpt->scp_at_timer, cfs_time_shift(next)); CDEBUG(D_INFO, "armed %s at %+ds\n", svcpt->scp_service->srv_name, next); } @@ -1175,17 +1210,17 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req) return(-ENOSYS); spin_lock(&svcpt->scp_at_lock); - LASSERT(cfs_list_empty(&req->rq_timed_list)); + LASSERT(list_empty(&req->rq_timed_list)); - index = (unsigned long)req->rq_deadline % array->paa_size; + div_u64_rem(req->rq_deadline, array->paa_size, &index); if (array->paa_reqs_count[index] > 0) { /* latest rpcs will have the latest deadlines in the list, * so search backward. */ - cfs_list_for_each_entry_reverse(rq, + list_for_each_entry_reverse(rq, &array->paa_reqs_array[index], rq_timed_list) { if (req->rq_deadline >= rq->rq_deadline) { - cfs_list_add(&req->rq_timed_list, + list_add(&req->rq_timed_list, &rq->rq_timed_list); break; } @@ -1193,8 +1228,8 @@ static int ptlrpc_at_add_timed(struct ptlrpc_request *req) } /* Add the request at the head of the list */ - if (cfs_list_empty(&req->rq_timed_list)) - cfs_list_add(&req->rq_timed_list, + if (list_empty(&req->rq_timed_list)) + list_add(&req->rq_timed_list, &array->paa_reqs_array[index]); spin_lock(&req->rq_lock); @@ -1220,8 +1255,8 @@ ptlrpc_at_remove_timed(struct ptlrpc_request *req) array = &req->rq_rqbd->rqbd_svcpt->scp_at_array; /* NB: must call with hold svcpt::scp_at_lock */ - LASSERT(!cfs_list_empty(&req->rq_timed_list)); - cfs_list_del_init(&req->rq_timed_list); + LASSERT(!list_empty(&req->rq_timed_list)); + list_del_init(&req->rq_timed_list); spin_lock(&req->rq_lock); req->rq_at_linked = 0; @@ -1231,31 +1266,41 @@ ptlrpc_at_remove_timed(struct ptlrpc_request *req) array->paa_count--; } +/* + * Attempt to extend the request deadline by sending an early reply to the + * client. + */ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) { struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt; - struct ptlrpc_request *reqcopy; - struct lustre_msg *reqmsg; - cfs_duration_t olddl = req->rq_deadline - cfs_time_current_sec(); - time_t newdl; - int rc; - ENTRY; + struct ptlrpc_request *reqcopy; + struct lustre_msg *reqmsg; + time64_t olddl = req->rq_deadline - ktime_get_real_seconds(); + time64_t newdl; + int rc; + + ENTRY; + + if (CFS_FAIL_CHECK(OBD_FAIL_TGT_REPLAY_RECONNECT)) { + /* don't send early reply */ + RETURN(1); + } /* deadline is when the client expects us to reply, margin is the difference between clients' and servers' expectations */ DEBUG_REQ(D_ADAPTTO, req, - "%ssending early reply (deadline %+lds, margin %+lds) for " + "%ssending early reply (deadline %+llds, margin %+llds) for " "%d+%d", AT_OFF ? "AT off - not " : "", - olddl, olddl - at_get(&svcpt->scp_at_estimate), + (s64)olddl, (s64)(olddl - at_get(&svcpt->scp_at_estimate)), at_get(&svcpt->scp_at_estimate), at_extra); if (AT_OFF) RETURN(0); if (olddl < 0) { - DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), " + DEBUG_REQ(D_WARNING, req, "Already past deadline (%+llds), " "not sending early reply. Consider increasing " - "at_early_margin (%d)?", olddl, at_early_margin); + "at_early_margin (%d)?", (s64)olddl, at_early_margin); /* Return an error so we're not re-added to the timed list. */ RETURN(-ETIMEDOUT); @@ -1267,46 +1312,54 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) RETURN(-ENOSYS); } - if (req->rq_export && - lustre_msg_get_flags(req->rq_reqmsg) & - (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) { - /* During recovery, we don't want to send too many early - * replies, but on the other hand we want to make sure the - * client has enough time to resend if the rpc is lost. So - * during the recovery period send at least 4 early replies, - * spacing them every at_extra if we can. at_estimate should - * always equal this fixed value during recovery. */ - at_measured(&svcpt->scp_at_estimate, min(at_extra, - req->rq_export->exp_obd->obd_recovery_timeout / 4)); + if (req->rq_export && + lustre_msg_get_flags(req->rq_reqmsg) & + (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) { + struct obd_device *obd_exp = req->rq_export->exp_obd; + + /* During recovery, we don't want to send too many early + * replies, but on the other hand we want to make sure the + * client has enough time to resend if the rpc is lost. So + * during the recovery period send at least 4 early replies, + * spacing them every at_extra if we can. at_estimate should + * always equal this fixed value during recovery. + */ + /* Don't account request processing time into AT history + * during recovery, it is not service time we need but + * includes also waiting time for recovering clients + */ + newdl = min_t(time64_t, at_extra, + obd_exp->obd_recovery_timeout / 4) + + ktime_get_real_seconds(); } else { - /* Fake our processing time into the future to ask the clients - * for some extra amount of time */ + /* We want to extend the request deadline by at_extra seconds, + * so we set our service estimate to reflect how much time has + * passed since this request arrived plus an additional + * at_extra seconds. The client will calculate the new deadline + * based on this service estimate (plus some additional time to + * account for network latency). See ptlrpc_at_recv_early_reply + */ at_measured(&svcpt->scp_at_estimate, at_extra + - cfs_time_current_sec() - + ktime_get_real_seconds() - req->rq_arrival_time.tv_sec); + newdl = req->rq_arrival_time.tv_sec + + at_get(&svcpt->scp_at_estimate); + } - /* Check to see if we've actually increased the deadline - - * we may be past adaptive_max */ - if (req->rq_deadline >= req->rq_arrival_time.tv_sec + - at_get(&svcpt->scp_at_estimate)) { - DEBUG_REQ(D_WARNING, req, "Couldn't add any time " - "(%ld/%ld), not sending early reply\n", - olddl, req->rq_arrival_time.tv_sec + - at_get(&svcpt->scp_at_estimate) - - cfs_time_current_sec()); - RETURN(-ETIMEDOUT); - } + /* Check to see if we've actually increased the deadline - + * we may be past adaptive_max */ + if (req->rq_deadline >= newdl) { + DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%lld/%lld), not sending early reply\n", + (s64)olddl, (s64)(newdl - ktime_get_real_seconds())); + RETURN(-ETIMEDOUT); } - newdl = cfs_time_current_sec() + at_get(&svcpt->scp_at_estimate); - OBD_ALLOC(reqcopy, sizeof *reqcopy); - if (reqcopy == NULL) - RETURN(-ENOMEM); - OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen); - if (!reqmsg) { - OBD_FREE(reqcopy, sizeof *reqcopy); - RETURN(-ENOMEM); - } + reqcopy = ptlrpc_request_cache_alloc(GFP_NOFS); + if (reqcopy == NULL) + RETURN(-ENOMEM); + OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen); + if (!reqmsg) + GOTO(out_free, rc = -ENOMEM); *reqcopy = *req; reqcopy->rq_reply_state = NULL; @@ -1319,13 +1372,21 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) reqcopy->rq_reqmsg = reqmsg; memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen); - LASSERT(cfs_atomic_read(&req->rq_refcount)); - /** if it is last refcount then early reply isn't needed */ - if (cfs_atomic_read(&req->rq_refcount) == 1) { - DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, " - "abort sending early reply\n"); - GOTO(out, rc = -EINVAL); - } + /* + * tgt_brw_read() and tgt_brw_write() may have decided not to reply. + * Without this check, we would fail the rq_no_reply assertion in + * ptlrpc_send_reply(). + */ + if (reqcopy->rq_no_reply) + GOTO(out, rc = -ETIMEDOUT); + + LASSERT(atomic_read(&req->rq_refcount)); + /** if it is last refcount then early reply isn't needed */ + if (atomic_read(&req->rq_refcount) == 1) { + DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, " + "abort sending early reply\n"); + GOTO(out, rc = -EINVAL); + } /* Connection ref */ reqcopy->rq_export = class_conn2export( @@ -1334,7 +1395,7 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) GOTO(out, rc = -ENODEV); /* RPC ref */ - class_export_rpc_get(reqcopy->rq_export); + class_export_rpc_inc(reqcopy->rq_export); if (reqcopy->rq_export->exp_obd && reqcopy->rq_export->exp_obd->obd_fail) GOTO(out_put, rc = -ENODEV); @@ -1345,26 +1406,27 @@ static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req) rc = ptlrpc_send_reply(reqcopy, PTLRPC_REPLY_EARLY); - if (!rc) { - /* Adjust our own deadline to what we told the client */ - req->rq_deadline = newdl; - req->rq_early_count++; /* number sent, server side */ - } else { - DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc); - } + if (!rc) { + /* Adjust our own deadline to what we told the client */ + req->rq_deadline = newdl; + req->rq_early_count++; /* number sent, server side */ + } else { + DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc); + } /* Free the (early) reply state from lustre_pack_reply. (ptlrpc_send_reply takes it's own rs ref, so this is safe here) */ ptlrpc_req_drop_rs(reqcopy); out_put: - class_export_rpc_put(reqcopy->rq_export); - class_export_put(reqcopy->rq_export); + class_export_rpc_dec(reqcopy->rq_export); + class_export_put(reqcopy->rq_export); out: - sptlrpc_svc_ctx_decref(reqcopy); - OBD_FREE_LARGE(reqmsg, req->rq_reqlen); - OBD_FREE(reqcopy, sizeof *reqcopy); - RETURN(rc); + sptlrpc_svc_ctx_decref(reqcopy); + OBD_FREE_LARGE(reqmsg, req->rq_reqlen); +out_free: + ptlrpc_request_cache_free(reqcopy); + RETURN(rc); } /* Send early replies to everybody expiring within at_early_margin @@ -1373,10 +1435,10 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) { struct ptlrpc_at_array *array = &svcpt->scp_at_array; struct ptlrpc_request *rq, *n; - cfs_list_t work_list; + struct list_head work_list; __u32 index, count; - time_t deadline; - time_t now = cfs_time_current_sec(); + time64_t deadline; + time64_t now = ktime_get_real_seconds(); cfs_duration_t delay; int first, counter = 0; ENTRY; @@ -1403,17 +1465,17 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) RETURN(0); } - /* We're close to a timeout, and we don't know how much longer the - server will take. Send early replies to everyone expiring soon. */ - CFS_INIT_LIST_HEAD(&work_list); - deadline = -1; - index = (unsigned long)array->paa_deadline % array->paa_size; - count = array->paa_count; - while (count > 0) { - count -= array->paa_reqs_count[index]; - cfs_list_for_each_entry_safe(rq, n, - &array->paa_reqs_array[index], - rq_timed_list) { + /* We're close to a timeout, and we don't know how much longer the + server will take. Send early replies to everyone expiring soon. */ + INIT_LIST_HEAD(&work_list); + deadline = -1; + div_u64_rem(array->paa_deadline, array->paa_size, &index); + count = array->paa_count; + while (count > 0) { + count -= array->paa_reqs_count[index]; + list_for_each_entry_safe(rq, n, + &array->paa_reqs_array[index], + rq_timed_list) { if (rq->rq_deadline > now + at_early_margin) { /* update the earliest deadline */ if (deadline == -1 || @@ -1428,15 +1490,15 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) * refcount to 0 already. Let's check this and * don't add entry to work_list */ - if (likely(cfs_atomic_inc_not_zero(&rq->rq_refcount))) - cfs_list_add(&rq->rq_timed_list, &work_list); + if (likely(atomic_inc_not_zero(&rq->rq_refcount))) + list_add(&rq->rq_timed_list, &work_list); counter++; - } + } - if (++index >= array->paa_size) - index = 0; - } - array->paa_deadline = deadline; + if (++index >= array->paa_size) + index = 0; + } + array->paa_deadline = deadline; /* we have a new earliest deadline, restart the timer */ ptlrpc_at_set_timer(svcpt); @@ -1450,8 +1512,7 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) LCONSOLE_WARN("%s: This server is not able to keep up with " "request traffic (cpu-bound).\n", svcpt->scp_service->srv_name); - CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, " - "delay="CFS_DURATION_T"(jiff)\n", + CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=%ld(jiff)\n", counter, svcpt->scp_nreqs_incoming, svcpt->scp_nreqs_active, at_get(&svcpt->scp_at_estimate), delay); @@ -1459,10 +1520,10 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) /* we took additional refcount so entries can't be deleted from list, no * locking is needed */ - while (!cfs_list_empty(&work_list)) { - rq = cfs_list_entry(work_list.next, struct ptlrpc_request, + while (!list_empty(&work_list)) { + rq = list_entry(work_list.next, struct ptlrpc_request, rq_timed_list); - cfs_list_del_init(&rq->rq_timed_list); + list_del_init(&rq->rq_timed_list); if (ptlrpc_at_send_early_reply(rq) == 0) ptlrpc_at_add_timed(rq); @@ -1473,9 +1534,51 @@ static int ptlrpc_at_check_timed(struct ptlrpc_service_part *svcpt) RETURN(1); /* return "did_something" for liblustre */ } +/* Check if we are already handling earlier incarnation of this request. + * Called under &req->rq_export->exp_rpc_lock locked */ +static int ptlrpc_server_check_resend_in_progress(struct ptlrpc_request *req) +{ + struct ptlrpc_request *tmp = NULL; + + if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) || + (atomic_read(&req->rq_export->exp_rpc_count) == 0)) + return 0; + + /* bulk request are aborted upon reconnect, don't try to + * find a match */ + if (req->rq_bulk_write || req->rq_bulk_read) + return 0; + + /* This list should not be longer than max_requests in + * flights on the client, so it is not all that long. + * Also we only hit this codepath in case of a resent + * request which makes it even more rarely hit */ + list_for_each_entry(tmp, &req->rq_export->exp_reg_rpcs, + rq_exp_list) { + /* Found duplicate one */ + if (tmp->rq_xid == req->rq_xid) + goto found; + } + list_for_each_entry(tmp, &req->rq_export->exp_hp_rpcs, + rq_exp_list) { + /* Found duplicate one */ + if (tmp->rq_xid == req->rq_xid) + goto found; + } + return 0; + +found: + DEBUG_REQ(D_HA, req, "Found duplicate req in processing"); + DEBUG_REQ(D_HA, tmp, "Request being processed"); + return -EBUSY; +} + /** - * Put the request to the export list if the request may become - * a high priority one. + * Check if a request should be assigned with a high priority. + * + * \retval < 0: error occurred + * 0: normal RPC request + * +1: high priority request */ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt, struct ptlrpc_request *req) @@ -1483,57 +1586,46 @@ static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt, int rc = 0; ENTRY; - if (svcpt->scp_service->srv_ops.so_hpreq_handler) { + if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL) { rc = svcpt->scp_service->srv_ops.so_hpreq_handler(req); if (rc < 0) RETURN(rc); + LASSERT(rc == 0); } - if (req->rq_export && req->rq_ops) { - /* Perform request specific check. We should do this check - * before the request is added into exp_hp_rpcs list otherwise - * it may hit swab race at LU-1044. */ - if (req->rq_ops->hpreq_check) { + + if (req->rq_export != NULL && req->rq_ops != NULL) { + /* Perform request specific check. We should do this + * check before the request is added into exp_hp_rpcs + * list otherwise it may hit swab race at LU-1044. */ + if (req->rq_ops->hpreq_check != NULL) { rc = req->rq_ops->hpreq_check(req); - /** - * XXX: Out of all current - * ptlrpc_hpreq_ops::hpreq_check(), only - * ldlm_cancel_hpreq_check() can return an error code; - * other functions assert in similar places, which seems - * odd. What also does not seem right is that handlers - * for those RPCs do not assert on the same checks, but - * rather handle the error cases. e.g. see - * ost_rw_hpreq_check(), and ost_brw_read(), - * ost_brw_write(). - */ - if (rc < 0) - RETURN(rc); - LASSERT(rc == 0 || rc == 1); + if (rc == -ESTALE) { + req->rq_status = rc; + ptlrpc_error(req); + } + /** can only return error, + * 0 for normal request, + * or 1 for high priority request */ + LASSERT(rc <= 1); } - - spin_lock_bh(&req->rq_export->exp_rpc_lock); - cfs_list_add(&req->rq_exp_list, - &req->rq_export->exp_hp_rpcs); - spin_unlock_bh(&req->rq_export->exp_rpc_lock); } - ptlrpc_nrs_req_initialize(svcpt, req, rc); - RETURN(rc); } /** Remove the request from the export list. */ static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req) { - ENTRY; - if (req->rq_export && req->rq_ops) { - /* refresh lock timeout again so that client has more - * room to send lock cancel RPC. */ - if (req->rq_ops->hpreq_fini) - req->rq_ops->hpreq_fini(req); + ENTRY; + if (req->rq_export) { + /* refresh lock timeout again so that client has more + * room to send lock cancel RPC. */ + if (req->rq_ops && req->rq_ops->hpreq_fini) + req->rq_ops->hpreq_fini(req); spin_lock_bh(&req->rq_export->exp_rpc_lock); - cfs_list_del_init(&req->rq_exp_list); + list_del_init(&req->rq_exp_list); spin_unlock_bh(&req->rq_export->exp_rpc_lock); } EXIT; @@ -1566,14 +1658,46 @@ EXPORT_SYMBOL(ptlrpc_hpreq_handler); static int ptlrpc_server_request_add(struct ptlrpc_service_part *svcpt, struct ptlrpc_request *req) { - int rc; + int rc; + bool hp; ENTRY; rc = ptlrpc_server_hpreq_init(svcpt, req); if (rc < 0) RETURN(rc); - ptlrpc_nrs_req_add(svcpt, req, !!rc); + hp = rc > 0; + ptlrpc_nrs_req_initialize(svcpt, req, hp); + + if (req->rq_export != NULL) { + struct obd_export *exp = req->rq_export; + + /* do search for duplicated xid and the adding to the list + * atomically */ + spin_lock_bh(&exp->exp_rpc_lock); + rc = ptlrpc_server_check_resend_in_progress(req); + if (rc < 0) { + spin_unlock_bh(&exp->exp_rpc_lock); + + ptlrpc_nrs_req_finalize(req); + RETURN(rc); + } + + if (hp || req->rq_ops != NULL) + list_add(&req->rq_exp_list, &exp->exp_hp_rpcs); + else + list_add(&req->rq_exp_list, &exp->exp_reg_rpcs); + spin_unlock_bh(&exp->exp_rpc_lock); + } + + /* the current thread is not the processing thread for this request + * since that, but request is in exp_hp_list and can be find there. + * Remove all relations between request and old thread. */ + req->rq_svc_thread->t_env->le_ses = NULL; + req->rq_svc_thread = NULL; + req->rq_session.lc_thread = NULL; + + ptlrpc_nrs_req_add(svcpt, req, hp); RETURN(0); } @@ -1594,6 +1718,9 @@ static bool ptlrpc_server_allow_high(struct ptlrpc_service_part *svcpt, if (force) return true; + if (ptlrpc_nrs_req_throttling_nolock(svcpt, true)) + return false; + if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL && CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) { /* leave just 1 thread for normal RPCs */ @@ -1632,10 +1759,6 @@ static bool ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt, bool force) { int running = svcpt->scp_nthrs_running; -#ifndef __KERNEL__ - if (1) /* always allow to handle normal request for liblustre */ - return true; -#endif if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL && CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) { /* leave just 1 thread for normal RPCs */ @@ -1644,8 +1767,13 @@ static bool ptlrpc_server_allow_normal(struct ptlrpc_service_part *svcpt, running += 1; } - if (force || - svcpt->scp_nreqs_active < running - 2) + if (force) + return true; + + if (ptlrpc_nrs_req_throttling_nolock(svcpt, false)) + return false; + + if (svcpt->scp_nreqs_active < running - 2) return true; if (svcpt->scp_nreqs_active >= running - 1) @@ -1684,14 +1812,16 @@ ptlrpc_server_request_pending(struct ptlrpc_service_part *svcpt, bool force) static struct ptlrpc_request * ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force) { - struct ptlrpc_request *req; + struct ptlrpc_request *req = NULL; ENTRY; + spin_lock(&svcpt->scp_req_lock); + if (ptlrpc_server_high_pending(svcpt, force)) { req = ptlrpc_nrs_req_get_nolock(svcpt, true, force); if (req != NULL) { svcpt->scp_hreq_count++; - RETURN(req); + goto got_request; } } @@ -1699,10 +1829,24 @@ ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force) req = ptlrpc_nrs_req_get_nolock(svcpt, false, force); if (req != NULL) { svcpt->scp_hreq_count = 0; - RETURN(req); + goto got_request; } } + + spin_unlock(&svcpt->scp_req_lock); RETURN(NULL); + +got_request: + svcpt->scp_nreqs_active++; + if (req->rq_hp) + svcpt->scp_nhreqs_active++; + + spin_unlock(&svcpt->scp_req_lock); + + if (likely(req->rq_export)) + class_export_rpc_inc(req->rq_export); + + RETURN(req); } /** @@ -1712,7 +1856,8 @@ ptlrpc_server_request_get(struct ptlrpc_service_part *svcpt, bool force) * ptlrpc_server_handle_req later on. */ static int -ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt) +ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt, + struct ptlrpc_thread *thread) { struct ptlrpc_service *svc = svcpt->scp_service; struct ptlrpc_request *req; @@ -1721,14 +1866,14 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt) ENTRY; spin_lock(&svcpt->scp_lock); - if (cfs_list_empty(&svcpt->scp_req_incoming)) { + if (list_empty(&svcpt->scp_req_incoming)) { spin_unlock(&svcpt->scp_lock); RETURN(0); } - req = cfs_list_entry(svcpt->scp_req_incoming.next, + req = list_entry(svcpt->scp_req_incoming.next, struct ptlrpc_request, rq_list); - cfs_list_del_init(&req->rq_list); + list_del_init(&req->rq_list); svcpt->scp_nreqs_incoming--; /* Consider this still a "queued" request as far as stats are * concerned */ @@ -1756,7 +1901,7 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt) rc = ptlrpc_unpack_req_msg(req, req->rq_reqlen); if (rc != 0) { CERROR("error unpacking request: ptl %d from %s " - "x"LPU64"\n", svc->srv_req_portal, + "x%llu\n", svc->srv_req_portal, libcfs_id2str(req->rq_peer), req->rq_xid); goto err_req; } @@ -1765,14 +1910,14 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt) rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF); if (rc) { CERROR ("error unpacking ptlrpc body: ptl %d from %s x" - LPU64"\n", svc->srv_req_portal, + "%llu\n", svc->srv_req_portal, libcfs_id2str(req->rq_peer), req->rq_xid); goto err_req; } if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_REQ_OPC) && lustre_msg_get_opc(req->rq_reqmsg) == cfs_fail_val) { - CERROR("drop incoming rpc opc %u, x"LPU64"\n", + CERROR("drop incoming rpc opc %u, x%llu\n", cfs_fail_val, req->rq_xid); goto err_req; } @@ -1785,24 +1930,24 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt) goto err_req; } - switch(lustre_msg_get_opc(req->rq_reqmsg)) { - case MDS_WRITEPAGE: - case OST_WRITE: - req->rq_bulk_write = 1; - break; - case MDS_READPAGE: - case OST_READ: - case MGS_CONFIG_READ: - req->rq_bulk_read = 1; - break; - } + switch (lustre_msg_get_opc(req->rq_reqmsg)) { + case MDS_WRITEPAGE: + case OST_WRITE: + case OUT_UPDATE: + req->rq_bulk_write = 1; + break; + case MDS_READPAGE: + case OST_READ: + case MGS_CONFIG_READ: + req->rq_bulk_read = 1; + break; + } - CDEBUG(D_RPCTRACE, "got req x"LPU64"\n", req->rq_xid); + CDEBUG(D_RPCTRACE, "got req x%llu\n", req->rq_xid); req->rq_export = class_conn2export( lustre_msg_get_handle(req->rq_reqmsg)); if (req->rq_export) { - class_export_rpc_get(req->rq_export); rc = ptlrpc_check_req(req); if (rc == 0) { rc = sptlrpc_target_export_check(req->rq_export, req); @@ -1817,39 +1962,54 @@ ptlrpc_server_handle_req_in(struct ptlrpc_service_part *svcpt) } /* req_in handling should/must be fast */ - if (cfs_time_current_sec() - req->rq_arrival_time.tv_sec > 5) - DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s", - cfs_time_sub(cfs_time_current_sec(), - req->rq_arrival_time.tv_sec)); + if (ktime_get_real_seconds() - req->rq_arrival_time.tv_sec > 5) + DEBUG_REQ(D_WARNING, req, "Slow req_in handling %llds", + (s64)(ktime_get_real_seconds() - + req->rq_arrival_time.tv_sec)); /* Set rpc server deadline and add it to the timed list */ deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) & MSGHDR_AT_SUPPORT) ? /* The max time the client expects us to take */ lustre_msg_get_timeout(req->rq_reqmsg) : obd_timeout; + req->rq_deadline = req->rq_arrival_time.tv_sec + deadline; if (unlikely(deadline == 0)) { DEBUG_REQ(D_ERROR, req, "Dropping request with 0 timeout"); goto err_req; } - ptlrpc_at_add_timed(req); + /* Skip early reply */ + if (OBD_FAIL_PRECHECK(OBD_FAIL_MDS_RESEND)) + req->rq_deadline += obd_timeout; + + req->rq_svc_thread = thread; + if (thread != NULL) { + /* initialize request session, it is needed for request + * processing by target */ + rc = lu_context_init(&req->rq_session, LCT_SERVER_SESSION | + LCT_NOREF); + if (rc) { + CERROR("%s: failure to initialize session: rc = %d\n", + thread->t_name, rc); + goto err_req; + } + req->rq_session.lc_thread = thread; + lu_context_enter(&req->rq_session); + thread->t_env->le_ses = &req->rq_session; + } + + ptlrpc_at_add_timed(req); - /* Move it over to the request processing queue */ + /* Move it over to the request processing queue */ rc = ptlrpc_server_request_add(svcpt, req); - if (rc) { - ptlrpc_server_hpreq_fini(req); + if (rc) GOTO(err_req, rc); - } - cfs_waitq_signal(&svcpt->scp_waitq); + + wake_up(&svcpt->scp_waitq); RETURN(1); err_req: - if (req->rq_export) - class_export_rpc_put(req->rq_export); - spin_lock(&svcpt->scp_req_lock); - svcpt->scp_nreqs_active++; - spin_unlock(&svcpt->scp_req_lock); ptlrpc_server_finish_request(svcpt, req); RETURN(1); @@ -1864,28 +2024,19 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt, struct ptlrpc_thread *thread) { struct ptlrpc_service *svc = svcpt->scp_service; - struct obd_export *export = NULL; - struct ptlrpc_request *request; - struct timeval work_start; - struct timeval work_end; - long timediff; - int rc; - int fail_opc = 0; - ENTRY; + struct ptlrpc_request *request; + ktime_t work_start; + ktime_t work_end; + ktime_t arrived; + s64 timediff_usecs; + s64 arrived_usecs; + int fail_opc = 0; + + ENTRY; - spin_lock(&svcpt->scp_req_lock); -#ifndef __KERNEL__ - /* !@%$# liblustre only has 1 thread */ - if (cfs_atomic_read(&svcpt->scp_nreps_difficult) != 0) { - spin_unlock(&svcpt->scp_req_lock); - RETURN(0); - } -#endif request = ptlrpc_server_request_get(svcpt, false); - if (request == NULL) { - spin_unlock(&svcpt->scp_req_lock); - RETURN(0); - } + if (request == NULL) + RETURN(0); if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT)) fail_opc = OBD_FAIL_PTLRPC_HPREQ_NOTIMEOUT; @@ -1893,30 +2044,21 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt, fail_opc = OBD_FAIL_PTLRPC_HPREQ_TIMEOUT; if (unlikely(fail_opc)) { - if (request->rq_export && request->rq_ops) { - spin_unlock(&svcpt->scp_req_lock); - + if (request->rq_export && request->rq_ops) OBD_FAIL_TIMEOUT(fail_opc, 4); - - spin_lock(&svcpt->scp_req_lock); - } } - svcpt->scp_nreqs_active++; - if (request->rq_hp) - svcpt->scp_nhreqs_active++; - - spin_unlock(&svcpt->scp_req_lock); ptlrpc_rqphase_move(request, RQ_PHASE_INTERPRET); - if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG)) - libcfs_debug_dumplog(); + if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG)) + libcfs_debug_dumplog(); - cfs_gettimeofday(&work_start); - timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL); - if (likely(svc->srv_stats != NULL)) { + work_start = ktime_get_real(); + arrived = timespec64_to_ktime(request->rq_arrival_time); + timediff_usecs = ktime_us_delta(work_start, arrived); + if (likely(svc->srv_stats != NULL)) { lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR, - timediff); + timediff_usecs); lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR, svcpt->scp_nreqs_incoming); lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR, @@ -1925,88 +2067,73 @@ ptlrpc_server_handle_request(struct ptlrpc_service_part *svcpt, at_get(&svcpt->scp_at_estimate)); } - export = request->rq_export; - rc = lu_context_init(&request->rq_session, LCT_SESSION | LCT_NOREF); - if (rc) { - CERROR("Failure to initialize session: %d\n", rc); - goto out_req; - } - request->rq_session.lc_thread = thread; - request->rq_session.lc_cookie = 0x5; - lu_context_enter(&request->rq_session); - - CDEBUG(D_NET, "got req "LPU64"\n", request->rq_xid); - - request->rq_svc_thread = thread; - if (thread) - request->rq_svc_thread->t_env->le_ses = &request->rq_session; - - if (likely(request->rq_export)) { + if (likely(request->rq_export)) { if (unlikely(ptlrpc_check_req(request))) goto put_conn; - ptlrpc_update_export_timer(request->rq_export, timediff >> 19); + ptlrpc_update_export_timer(request->rq_export, + timediff_usecs >> 19); } /* Discard requests queued for longer than the deadline. The deadline is increased if we send an early reply. */ - if (cfs_time_current_sec() > request->rq_deadline) { - DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s" - ": deadline "CFS_DURATION_T":"CFS_DURATION_T"s ago\n", + if (ktime_get_real_seconds() > request->rq_deadline) { + DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline %lld:%llds ago\n", libcfs_id2str(request->rq_peer), - cfs_time_sub(request->rq_deadline, - request->rq_arrival_time.tv_sec), - cfs_time_sub(cfs_time_current_sec(), - request->rq_deadline)); + request->rq_deadline - + request->rq_arrival_time.tv_sec, + ktime_get_real_seconds() - request->rq_deadline); goto put_conn; } - CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc " - "%s:%s+%d:%d:x"LPU64":%s:%d\n", cfs_curproc_comm(), - (request->rq_export ? - (char *)request->rq_export->exp_client_uuid.uuid : "0"), - (request->rq_export ? - cfs_atomic_read(&request->rq_export->exp_refcount) : -99), - lustre_msg_get_status(request->rq_reqmsg), request->rq_xid, - libcfs_id2str(request->rq_peer), - lustre_msg_get_opc(request->rq_reqmsg)); + CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc " + "%s:%s+%d:%d:x%llu:%s:%d\n", current_comm(), + (request->rq_export ? + (char *)request->rq_export->exp_client_uuid.uuid : "0"), + (request->rq_export ? + atomic_read(&request->rq_export->exp_refcount) : -99), + lustre_msg_get_status(request->rq_reqmsg), request->rq_xid, + libcfs_id2str(request->rq_peer), + lustre_msg_get_opc(request->rq_reqmsg)); if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING) CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val); - rc = svc->srv_ops.so_req_handler(request); + CDEBUG(D_NET, "got req %llu\n", request->rq_xid); - ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE); + /* re-assign request and sesson thread to the current one */ + request->rq_svc_thread = thread; + if (thread != NULL) { + LASSERT(request->rq_session.lc_thread == NULL); + request->rq_session.lc_thread = thread; + thread->t_env->le_ses = &request->rq_session; + } + svc->srv_ops.so_req_handler(request); + + ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE); put_conn: - lu_context_exit(&request->rq_session); - lu_context_fini(&request->rq_session); - - if (unlikely(cfs_time_current_sec() > request->rq_deadline)) { - DEBUG_REQ(D_WARNING, request, "Request took longer " - "than estimated ("CFS_DURATION_T":"CFS_DURATION_T"s);" - " client may timeout.", - cfs_time_sub(request->rq_deadline, - request->rq_arrival_time.tv_sec), - cfs_time_sub(cfs_time_current_sec(), - request->rq_deadline)); - } - - cfs_gettimeofday(&work_end); - timediff = cfs_timeval_sub(&work_end, &work_start, NULL); - CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc " - "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in " - "%ldus (%ldus total) trans "LPU64" rc %d/%d\n", - cfs_curproc_comm(), + if (unlikely(ktime_get_real_seconds() > request->rq_deadline)) { + DEBUG_REQ(D_WARNING, request, "Request took longer than estimated (%lld:%llds); client may timeout.", + request->rq_deadline - + request->rq_arrival_time.tv_sec, + ktime_get_real_seconds() - request->rq_deadline); + } + + work_end = ktime_get_real(); + timediff_usecs = ktime_us_delta(work_end, work_start); + arrived_usecs = ktime_us_delta(work_end, arrived); + CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d Request procesed in %lldus (%lldus total) trans %llu rc %d/%d\n", + current_comm(), + (request->rq_export ? + (char *)request->rq_export->exp_client_uuid.uuid : "0"), (request->rq_export ? - (char *)request->rq_export->exp_client_uuid.uuid : "0"), - (request->rq_export ? - cfs_atomic_read(&request->rq_export->exp_refcount) : -99), + atomic_read(&request->rq_export->exp_refcount) : -99), lustre_msg_get_status(request->rq_reqmsg), request->rq_xid, libcfs_id2str(request->rq_peer), lustre_msg_get_opc(request->rq_reqmsg), - timediff, - cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL), + timediff_usecs, + arrived_usecs, (request->rq_repmsg ? lustre_msg_get_transno(request->rq_repmsg) : request->rq_transno), @@ -2020,22 +2147,17 @@ put_conn: LASSERT(opc < LUSTRE_MAX_OPCODES); lprocfs_counter_add(svc->srv_stats, opc + EXTRA_MAX_OPCODES, - timediff); + timediff_usecs); } } if (unlikely(request->rq_early_count)) { DEBUG_REQ(D_ADAPTTO, request, - "sent %d early replies before finishing in " - CFS_DURATION_T"s", + "sent %d early replies before finishing in %llds", request->rq_early_count, - cfs_time_sub(work_end.tv_sec, - request->rq_arrival_time.tv_sec)); + arrived_usecs / USEC_PER_SEC); } -out_req: - if (export != NULL) - class_export_rpc_put(export); - ptlrpc_server_finish_request(svcpt, request); + ptlrpc_server_finish_active_request(svcpt, request); RETURN(1); } @@ -2048,69 +2170,115 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs) { struct ptlrpc_service_part *svcpt = rs->rs_svcpt; struct ptlrpc_service *svc = svcpt->scp_service; - struct obd_export *exp; - int nlocks; - int been_handled; - ENTRY; - - exp = rs->rs_export; + struct obd_export *exp; + int nlocks; + int been_handled; + ENTRY; - LASSERT (rs->rs_difficult); - LASSERT (rs->rs_scheduled); - LASSERT (cfs_list_empty(&rs->rs_list)); + exp = rs->rs_export; + + LASSERT(rs->rs_difficult); + LASSERT(rs->rs_scheduled); + LASSERT(list_empty(&rs->rs_list)); + + /* The disk commit callback holds exp_uncommitted_replies_lock while it + * iterates over newly committed replies, removing them from + * exp_uncommitted_replies. It then drops this lock and schedules the + * replies it found for handling here. + * + * We can avoid contention for exp_uncommitted_replies_lock between the + * HRT threads and further commit callbacks by checking rs_committed + * which is set in the commit callback while it holds both + * rs_lock and exp_uncommitted_reples. + * + * If we see rs_committed clear, the commit callback _may_ not have + * handled this reply yet and we race with it to grab + * exp_uncommitted_replies_lock before removing the reply from + * exp_uncommitted_replies. Note that if we lose the race and the + * reply has already been removed, list_del_init() is a noop. + * + * If we see rs_committed set, we know the commit callback is handling, + * or has handled this reply since store reordering might allow us to + * see rs_committed set out of sequence. But since this is done + * holding rs_lock, we can be sure it has all completed once we hold + * rs_lock, which we do right next. + */ + if (!rs->rs_committed) { + /* if rs was commited, no need to convert locks, don't check + * rs_committed here because rs may never be added into + * exp_uncommitted_replies and this flag never be set, see + * target_send_reply() */ + if (rs->rs_convert_lock && + rs->rs_transno > exp->exp_last_committed) { + struct ldlm_lock *lock; + struct ldlm_lock *ack_locks[RS_MAX_LOCKS] = { NULL }; - spin_lock(&exp->exp_lock); - /* Noop if removed already */ - cfs_list_del_init (&rs->rs_exp_list); - spin_unlock(&exp->exp_lock); + spin_lock(&rs->rs_lock); + if (rs->rs_convert_lock && + rs->rs_transno > exp->exp_last_committed) { + nlocks = rs->rs_nlocks; + while (nlocks-- > 0) { + /* + * NB don't assume rs is always handled + * by the same service thread (see + * ptlrpc_hr_select, so REP-ACK hr may + * race with trans commit, while the + * latter will release locks, get locks + * here early to convert to COS mode + * safely. + */ + lock = ldlm_handle2lock( + &rs->rs_locks[nlocks]); + LASSERT(lock); + ack_locks[nlocks] = lock; + rs->rs_modes[nlocks] = LCK_COS; + } + nlocks = rs->rs_nlocks; + rs->rs_convert_lock = 0; + /* clear rs_scheduled so that commit callback + * can schedule again */ + rs->rs_scheduled = 0; + spin_unlock(&rs->rs_lock); + + while (nlocks-- > 0) { + lock = ack_locks[nlocks]; + ldlm_lock_downgrade(lock, LCK_COS); + LDLM_LOCK_PUT(lock); + } + RETURN(0); + } + spin_unlock(&rs->rs_lock); + } - /* The disk commit callback holds exp_uncommitted_replies_lock while it - * iterates over newly committed replies, removing them from - * exp_uncommitted_replies. It then drops this lock and schedules the - * replies it found for handling here. - * - * We can avoid contention for exp_uncommitted_replies_lock between the - * HRT threads and further commit callbacks by checking rs_committed - * which is set in the commit callback while it holds both - * rs_lock and exp_uncommitted_reples. - * - * If we see rs_committed clear, the commit callback _may_ not have - * handled this reply yet and we race with it to grab - * exp_uncommitted_replies_lock before removing the reply from - * exp_uncommitted_replies. Note that if we lose the race and the - * reply has already been removed, list_del_init() is a noop. - * - * If we see rs_committed set, we know the commit callback is handling, - * or has handled this reply since store reordering might allow us to - * see rs_committed set out of sequence. But since this is done - * holding rs_lock, we can be sure it has all completed once we hold - * rs_lock, which we do right next. - */ - if (!rs->rs_committed) { spin_lock(&exp->exp_uncommitted_replies_lock); - cfs_list_del_init(&rs->rs_obd_list); + list_del_init(&rs->rs_obd_list); spin_unlock(&exp->exp_uncommitted_replies_lock); } + spin_lock(&exp->exp_lock); + /* Noop if removed already */ + list_del_init(&rs->rs_exp_list); + spin_unlock(&exp->exp_lock); + spin_lock(&rs->rs_lock); - been_handled = rs->rs_handled; - rs->rs_handled = 1; + been_handled = rs->rs_handled; + rs->rs_handled = 1; - nlocks = rs->rs_nlocks; /* atomic "steal", but */ - rs->rs_nlocks = 0; /* locks still on rs_locks! */ + nlocks = rs->rs_nlocks; /* atomic "steal", but */ + rs->rs_nlocks = 0; /* locks still on rs_locks! */ - if (nlocks == 0 && !been_handled) { - /* If we see this, we should already have seen the warning - * in mds_steal_ack_locks() */ - CDEBUG(D_HA, "All locks stolen from rs %p x"LPD64".t"LPD64 + if (nlocks == 0 && !been_handled) { + /* If we see this, we should already have seen the warning + * in mds_steal_ack_locks() */ + CDEBUG(D_HA, "All locks stolen from rs %p x%lld.t%lld" " o%d NID %s\n", rs, rs->rs_xid, rs->rs_transno, rs->rs_opc, libcfs_nid2str(exp->exp_connection->c_peer.nid)); - } + } - if ((!been_handled && rs->rs_on_net) || nlocks > 0) { + if ((!been_handled && rs->rs_on_net) || nlocks > 0) { spin_unlock(&rs->rs_lock); if (!been_handled && rs->rs_on_net) { @@ -2126,17 +2294,18 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs) } rs->rs_scheduled = 0; + rs->rs_convert_lock = 0; if (!rs->rs_on_net) { /* Off the net */ spin_unlock(&rs->rs_lock); - class_export_put (exp); - rs->rs_export = NULL; - ptlrpc_rs_decref (rs); - if (cfs_atomic_dec_and_test(&svcpt->scp_nreps_difficult) && + class_export_put (exp); + rs->rs_export = NULL; + ptlrpc_rs_decref(rs); + if (atomic_dec_and_test(&svcpt->scp_nreps_difficult) && svc->srv_is_stopping) - cfs_waitq_broadcast(&svcpt->scp_waitq); + wake_up_all(&svcpt->scp_waitq); RETURN(1); } @@ -2145,81 +2314,6 @@ ptlrpc_handle_rs(struct ptlrpc_reply_state *rs) RETURN(1); } -#ifndef __KERNEL__ - -/** - * Check whether given service has a reply available for processing - * and process it. - * - * \param svc a ptlrpc service - * \retval 0 no replies processed - * \retval 1 one reply processed - */ -static int -ptlrpc_server_handle_reply(struct ptlrpc_service_part *svcpt) -{ - struct ptlrpc_reply_state *rs = NULL; - ENTRY; - - spin_lock(&svcpt->scp_rep_lock); - if (!cfs_list_empty(&svcpt->scp_rep_queue)) { - rs = cfs_list_entry(svcpt->scp_rep_queue.prev, - struct ptlrpc_reply_state, - rs_list); - cfs_list_del_init(&rs->rs_list); - } - spin_unlock(&svcpt->scp_rep_lock); - if (rs != NULL) - ptlrpc_handle_rs(rs); - RETURN(rs != NULL); -} - -/* FIXME make use of timeout later */ -int -liblustre_check_services (void *arg) -{ - int did_something = 0; - int rc; - cfs_list_t *tmp, *nxt; - ENTRY; - - /* I'm relying on being single threaded, not to have to lock - * ptlrpc_all_services etc */ - cfs_list_for_each_safe (tmp, nxt, &ptlrpc_all_services) { - struct ptlrpc_service *svc = - cfs_list_entry (tmp, struct ptlrpc_service, srv_list); - struct ptlrpc_service_part *svcpt; - - LASSERT(svc->srv_ncpts == 1); - svcpt = svc->srv_parts[0]; - - if (svcpt->scp_nthrs_running != 0) /* I've recursed */ - continue; - - /* service threads can block for bulk, so this limits us - * (arbitrarily) to recursing 1 stack frame per service. - * Note that the problem with recursion is that we have to - * unwind completely before our caller can resume. */ - - svcpt->scp_nthrs_running++; - - do { - rc = ptlrpc_server_handle_req_in(svcpt); - rc |= ptlrpc_server_handle_reply(svcpt); - rc |= ptlrpc_at_check_timed(svcpt); - rc |= ptlrpc_server_handle_request(svcpt, NULL); - rc |= (ptlrpc_server_post_idle_rqbds(svcpt) > 0); - did_something |= rc; - } while (rc); - - svcpt->scp_nthrs_running--; - } - - RETURN(did_something); -} -#define ptlrpc_stop_all_threads(s) do {} while (0) - -#else /* __KERNEL__ */ static void ptlrpc_check_rqbd_pool(struct ptlrpc_service_part *svcpt) @@ -2294,7 +2388,7 @@ ptlrpc_thread_stopping(struct ptlrpc_thread *thread) static inline int ptlrpc_rqbd_pending(struct ptlrpc_service_part *svcpt) { - return !cfs_list_empty(&svcpt->scp_rqbd_idle) && + return !list_empty(&svcpt->scp_rqbd_idle) && svcpt->scp_rqbd_timeout == 0; } @@ -2312,7 +2406,7 @@ ptlrpc_at_check(struct ptlrpc_service_part *svcpt) static inline int ptlrpc_server_request_incoming(struct ptlrpc_service_part *svcpt) { - return !cfs_list_empty(&svcpt->scp_req_incoming); + return !list_empty(&svcpt->scp_req_incoming); } static __attribute__((__noinline__)) int @@ -2325,7 +2419,7 @@ ptlrpc_wait_event(struct ptlrpc_service_part *svcpt, lc_watchdog_disable(thread->t_watchdog); - cfs_cond_resched(); + cond_resched(); l_wait_event_exclusive_head(svcpt->scp_waitq, ptlrpc_thread_stopping(thread) || @@ -2354,15 +2448,13 @@ static int ptlrpc_main(void *arg) struct ptlrpc_service_part *svcpt = thread->t_svcpt; struct ptlrpc_service *svc = svcpt->scp_service; struct ptlrpc_reply_state *rs; -#ifdef WITH_GROUP_INFO - cfs_group_info_t *ginfo = NULL; -#endif - struct lu_env *env; - int counter = 0, rc = 0; - ENTRY; + struct group_info *ginfo = NULL; + struct lu_env *env; + int counter = 0, rc = 0; + ENTRY; - thread->t_pid = cfs_curproc_pid(); - cfs_daemonize_ctxt(thread->t_name); + thread->t_pid = current_pid(); + unshare_fs_struct(); /* NB: we will call cfs_cpt_bind() for all threads, because we * might want to run lustre server only on a subset of system CPUs, @@ -2373,16 +2465,14 @@ static int ptlrpc_main(void *arg) svc->srv_name, thread->t_name, svcpt->scp_cpt); } -#ifdef WITH_GROUP_INFO - ginfo = cfs_groups_alloc(0); - if (!ginfo) { - rc = -ENOMEM; - goto out; - } + ginfo = groups_alloc(0); + if (!ginfo) { + rc = -ENOMEM; + goto out; + } - cfs_set_current_groups(ginfo); - cfs_put_group_info(ginfo); -#endif + set_current_groups(ginfo); + put_group_info(ginfo); if (svc->srv_ops.so_thr_init != NULL) { rc = svc->srv_ops.so_thr_init(thread); @@ -2405,7 +2495,7 @@ static int ptlrpc_main(void *arg) env->le_ctx.lc_thread = thread; env->le_ctx.lc_cookie = 0x6; - while (!cfs_list_empty(&svcpt->scp_rqbd_idle)) { + while (!list_empty(&svcpt->scp_rqbd_idle)) { rc = ptlrpc_server_post_idle_rqbds(svcpt); if (rc >= 0) continue; @@ -2415,12 +2505,12 @@ static int ptlrpc_main(void *arg) goto out_srv_fini; } - /* Alloc reply state structure for this one */ - OBD_ALLOC_LARGE(rs, svc->srv_max_reply_size); - if (!rs) { - rc = -ENOMEM; - goto out_srv_fini; - } + /* Alloc reply state structure for this one */ + OBD_ALLOC_LARGE(rs, svc->srv_max_reply_size); + if (!rs) { + rc = -ENOMEM; + goto out_srv_fini; + } spin_lock(&svcpt->scp_lock); @@ -2439,14 +2529,14 @@ static int ptlrpc_main(void *arg) spin_unlock(&svcpt->scp_lock); /* wake up our creator in case he's still waiting. */ - cfs_waitq_signal(&thread->t_ctl_waitq); + wake_up(&thread->t_ctl_waitq); thread->t_watchdog = lc_watchdog_add(ptlrpc_server_get_timeout(svcpt), NULL, NULL); spin_lock(&svcpt->scp_rep_lock); - cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle); - cfs_waitq_signal(&svcpt->scp_rep_waitq); + list_add(&rs->rs_list, &svcpt->scp_rep_idle); + wake_up(&svcpt->scp_rep_waitq); spin_unlock(&svcpt->scp_rep_lock); CDEBUG(D_NET, "service thread %d (#%d) started\n", thread->t_id, @@ -2464,9 +2554,14 @@ static int ptlrpc_main(void *arg) ptlrpc_start_thread(svcpt, 0); } + /* reset le_ses to initial state */ + env->le_ses = NULL; /* Process all incoming reqs before handling any */ if (ptlrpc_server_request_incoming(svcpt)) { - ptlrpc_server_handle_req_in(svcpt); + lu_context_enter(&env->le_ctx); + ptlrpc_server_handle_req_in(svcpt, thread); + lu_context_exit(&env->le_ctx); + /* but limit ourselves in case of flood */ if (counter++ < 100) continue; @@ -2523,21 +2618,21 @@ out: thread->t_id = rc; thread_add_flags(thread, SVC_STOPPED); - cfs_waitq_signal(&thread->t_ctl_waitq); + wake_up(&thread->t_ctl_waitq); spin_unlock(&svcpt->scp_lock); return rc; } static int hrt_dont_sleep(struct ptlrpc_hr_thread *hrt, - cfs_list_t *replies) + struct list_head *replies) { int result; spin_lock(&hrt->hrt_lock); - cfs_list_splice_init(&hrt->hrt_queue, replies); - result = ptlrpc_hr.hr_stopping || !cfs_list_empty(replies); + list_splice_init(&hrt->hrt_queue, replies); + result = ptlrpc_hr.hr_stopping || !list_empty(replies); spin_unlock(&hrt->hrt_lock); return result; @@ -2551,39 +2646,41 @@ static int ptlrpc_hr_main(void *arg) { struct ptlrpc_hr_thread *hrt = (struct ptlrpc_hr_thread *)arg; struct ptlrpc_hr_partition *hrp = hrt->hrt_partition; - CFS_LIST_HEAD (replies); - char threadname[20]; + struct list_head replies; int rc; - snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d", - hrp->hrp_cpt, hrt->hrt_id); - cfs_daemonize_ctxt(threadname); + INIT_LIST_HEAD(&replies); + unshare_fs_struct(); rc = cfs_cpt_bind(ptlrpc_hr.hr_cpt_table, hrp->hrp_cpt); if (rc != 0) { + char threadname[20]; + + snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d", + hrp->hrp_cpt, hrt->hrt_id); CWARN("Failed to bind %s on CPT %d of CPT table %p: rc = %d\n", threadname, hrp->hrp_cpt, ptlrpc_hr.hr_cpt_table, rc); } - cfs_atomic_inc(&hrp->hrp_nstarted); - cfs_waitq_signal(&ptlrpc_hr.hr_waitq); + atomic_inc(&hrp->hrp_nstarted); + wake_up(&ptlrpc_hr.hr_waitq); while (!ptlrpc_hr.hr_stopping) { l_wait_condition(hrt->hrt_waitq, hrt_dont_sleep(hrt, &replies)); - while (!cfs_list_empty(&replies)) { - struct ptlrpc_reply_state *rs; + while (!list_empty(&replies)) { + struct ptlrpc_reply_state *rs; - rs = cfs_list_entry(replies.prev, - struct ptlrpc_reply_state, - rs_list); - cfs_list_del_init(&rs->rs_list); - ptlrpc_handle_rs(rs); - } - } + rs = list_entry(replies.prev, + struct ptlrpc_reply_state, + rs_list); + list_del_init(&rs->rs_list); + ptlrpc_handle_rs(rs); + } + } - cfs_atomic_inc(&hrp->hrp_nstopped); - cfs_waitq_signal(&ptlrpc_hr.hr_waitq); + atomic_inc(&hrp->hrp_nstopped); + wake_up(&ptlrpc_hr.hr_waitq); return 0; } @@ -2600,15 +2697,15 @@ static void ptlrpc_stop_hr_threads(void) if (hrp->hrp_thrs == NULL) continue; /* uninitialized */ for (j = 0; j < hrp->hrp_nthrs; j++) - cfs_waitq_broadcast(&hrp->hrp_thrs[j].hrt_waitq); + wake_up_all(&hrp->hrp_thrs[j].hrt_waitq); } cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { if (hrp->hrp_thrs == NULL) continue; /* uninitialized */ - cfs_wait_event(ptlrpc_hr.hr_waitq, - cfs_atomic_read(&hrp->hrp_nstopped) == - cfs_atomic_read(&hrp->hrp_nstarted)); + wait_event(ptlrpc_hr.hr_waitq, + atomic_read(&hrp->hrp_nstopped) == + atomic_read(&hrp->hrp_nstarted)); } } @@ -2623,22 +2720,31 @@ static int ptlrpc_start_hr_threads(void) int rc = 0; for (j = 0; j < hrp->hrp_nthrs; j++) { - rc = cfs_create_thread(ptlrpc_hr_main, - &hrp->hrp_thrs[j], - CLONE_VM | CLONE_FILES); - if (rc < 0) + struct ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j]; + struct task_struct *task; + + task = kthread_run(ptlrpc_hr_main, + &hrp->hrp_thrs[j], + "ptlrpc_hr%02d_%03d", + hrp->hrp_cpt, + hrt->hrt_id); + if (IS_ERR(task)) { + rc = PTR_ERR(task); break; + } } - cfs_wait_event(ptlrpc_hr.hr_waitq, - cfs_atomic_read(&hrp->hrp_nstarted) == j); - if (rc >= 0) - continue; - CERROR("Reply handling thread %d:%d Failed on starting: " - "rc = %d\n", i, j, rc); - ptlrpc_stop_hr_threads(); - RETURN(rc); + wait_event(ptlrpc_hr.hr_waitq, + atomic_read(&hrp->hrp_nstarted) == j); + + if (rc < 0) { + CERROR("cannot start reply handler thread %d:%d: " + "rc = %d\n", i, j, rc); + ptlrpc_stop_hr_threads(); + RETURN(rc); + } } + RETURN(0); } @@ -2646,13 +2752,14 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt) { struct l_wait_info lwi = { 0 }; struct ptlrpc_thread *thread; - CFS_LIST_HEAD (zombie); + struct list_head zombie; ENTRY; CDEBUG(D_INFO, "Stopping threads for service %s\n", svcpt->scp_service->srv_name); + INIT_LIST_HEAD(&zombie); spin_lock(&svcpt->scp_lock); /* let the thread know that we would like it to stop asap */ list_for_each_entry(thread, &svcpt->scp_threads, t_link) { @@ -2661,14 +2768,14 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt) thread_add_flags(thread, SVC_STOPPING); } - cfs_waitq_broadcast(&svcpt->scp_waitq); + wake_up_all(&svcpt->scp_waitq); - while (!cfs_list_empty(&svcpt->scp_threads)) { - thread = cfs_list_entry(svcpt->scp_threads.next, + while (!list_empty(&svcpt->scp_threads)) { + thread = list_entry(svcpt->scp_threads.next, struct ptlrpc_thread, t_link); if (thread_is_stopped(thread)) { - cfs_list_del(&thread->t_link); - cfs_list_add(&thread->t_link, &zombie); + list_del(&thread->t_link); + list_add(&thread->t_link, &zombie); continue; } spin_unlock(&svcpt->scp_lock); @@ -2683,10 +2790,10 @@ static void ptlrpc_svcpt_stop_threads(struct ptlrpc_service_part *svcpt) spin_unlock(&svcpt->scp_lock); - while (!cfs_list_empty(&zombie)) { - thread = cfs_list_entry(zombie.next, + while (!list_empty(&zombie)) { + thread = list_entry(zombie.next, struct ptlrpc_thread, t_link); - cfs_list_del(&thread->t_link); + list_del(&thread->t_link); OBD_FREE_PTR(thread); } EXIT; @@ -2708,7 +2815,6 @@ void ptlrpc_stop_all_threads(struct ptlrpc_service *svc) EXIT; } -EXPORT_SYMBOL(ptlrpc_stop_all_threads); int ptlrpc_start_threads(struct ptlrpc_service *svc) { @@ -2740,13 +2846,13 @@ int ptlrpc_start_threads(struct ptlrpc_service *svc) ptlrpc_stop_all_threads(svc); RETURN(rc); } -EXPORT_SYMBOL(ptlrpc_start_threads); int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) { struct l_wait_info lwi = { 0 }; struct ptlrpc_thread *thread; struct ptlrpc_service *svc; + struct task_struct *task; int rc; ENTRY; @@ -2770,7 +2876,7 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) OBD_CPT_ALLOC_PTR(thread, svc->srv_cptable, svcpt->scp_cpt); if (thread == NULL) RETURN(-ENOMEM); - cfs_waitq_init(&thread->t_ctl_waitq); + init_waitqueue_head(&thread->t_ctl_waitq); spin_lock(&svcpt->scp_lock); if (!ptlrpc_threads_increasable(svcpt)) { @@ -2788,7 +2894,7 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) if (wait) { CDEBUG(D_INFO, "Waiting for creating thread %s #%d\n", svc->srv_thread_name, svcpt->scp_thr_nextid); - cfs_schedule(); + schedule(); goto again; } @@ -2802,7 +2908,7 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) thread_add_flags(thread, SVC_STARTING); thread->t_svcpt = svcpt; - cfs_list_add(&thread->t_link, &svcpt->scp_threads); + list_add(&thread->t_link, &svcpt->scp_threads); spin_unlock(&svcpt->scp_lock); if (svcpt->scp_cpt >= 0) { @@ -2814,20 +2920,25 @@ int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait) } CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name); - /* - * CLONE_VM and CLONE_FILES just avoid a needless copy, because we - * just drop the VM and FILES in cfs_daemonize_ctxt() right away. - */ - rc = cfs_create_thread(ptlrpc_main, thread, CFS_DAEMON_FLAGS); - if (rc < 0) { - CERROR("cannot start thread '%s': rc %d\n", + task = kthread_run(ptlrpc_main, thread, "%s", thread->t_name); + if (IS_ERR(task)) { + rc = PTR_ERR(task); + CERROR("cannot start thread '%s': rc = %d\n", thread->t_name, rc); spin_lock(&svcpt->scp_lock); - cfs_list_del(&thread->t_link); --svcpt->scp_nthrs_starting; - spin_unlock(&svcpt->scp_lock); - - OBD_FREE(thread, sizeof(*thread)); + if (thread_is_stopping(thread)) { + /* this ptlrpc_thread is being hanled + * by ptlrpc_svcpt_stop_threads now + */ + thread_add_flags(thread, SVC_STOPPED); + wake_up(&thread->t_ctl_waitq); + spin_unlock(&svcpt->scp_lock); + } else { + list_del(&thread->t_link); + spin_unlock(&svcpt->scp_lock); + OBD_FREE_PTR(thread); + } RETURN(rc); } @@ -2847,8 +2958,9 @@ int ptlrpc_hr_init(void) struct ptlrpc_hr_partition *hrp; struct ptlrpc_hr_thread *hrt; int rc; + int cpt; int i; - int j; + int weight; ENTRY; memset(&ptlrpc_hr, 0, sizeof(ptlrpc_hr)); @@ -2859,31 +2971,34 @@ int ptlrpc_hr_init(void) if (ptlrpc_hr.hr_partitions == NULL) RETURN(-ENOMEM); - cfs_waitq_init(&ptlrpc_hr.hr_waitq); + init_waitqueue_head(&ptlrpc_hr.hr_waitq); - cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { - hrp->hrp_cpt = i; + weight = cpumask_weight(topology_sibling_cpumask(smp_processor_id())); + + cfs_percpt_for_each(hrp, cpt, ptlrpc_hr.hr_partitions) { + hrp->hrp_cpt = cpt; - cfs_atomic_set(&hrp->hrp_nstarted, 0); - cfs_atomic_set(&hrp->hrp_nstopped, 0); + atomic_set(&hrp->hrp_nstarted, 0); + atomic_set(&hrp->hrp_nstopped, 0); - hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i); - hrp->hrp_nthrs /= cfs_cpu_ht_nsiblings(0); + hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, cpt); + hrp->hrp_nthrs /= weight; + if (hrp->hrp_nthrs == 0) + hrp->hrp_nthrs = 1; - LASSERT(hrp->hrp_nthrs > 0); - OBD_CPT_ALLOC(hrp->hrp_thrs, ptlrpc_hr.hr_cpt_table, i, + OBD_CPT_ALLOC(hrp->hrp_thrs, ptlrpc_hr.hr_cpt_table, cpt, hrp->hrp_nthrs * sizeof(*hrt)); if (hrp->hrp_thrs == NULL) GOTO(out, rc = -ENOMEM); - for (j = 0; j < hrp->hrp_nthrs; j++) { - hrt = &hrp->hrp_thrs[j]; + for (i = 0; i < hrp->hrp_nthrs; i++) { + hrt = &hrp->hrp_thrs[i]; - hrt->hrt_id = j; + hrt->hrt_id = i; hrt->hrt_partition = hrp; - cfs_waitq_init(&hrt->hrt_waitq); + init_waitqueue_head(&hrt->hrt_waitq); spin_lock_init(&hrt->hrt_lock); - CFS_INIT_LIST_HEAD(&hrt->hrt_queue); + INIT_LIST_HEAD(&hrt->hrt_queue); } } @@ -2897,14 +3012,14 @@ out: void ptlrpc_hr_fini(void) { struct ptlrpc_hr_partition *hrp; - int i; + int cpt; if (ptlrpc_hr.hr_partitions == NULL) return; ptlrpc_stop_hr_threads(); - cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) { + cfs_percpt_for_each(hrp, cpt, ptlrpc_hr.hr_partitions) { if (hrp->hrp_thrs != NULL) { OBD_FREE(hrp->hrp_thrs, hrp->hrp_nthrs * sizeof(hrp->hrp_thrs[0])); @@ -2915,7 +3030,6 @@ void ptlrpc_hr_fini(void) ptlrpc_hr.hr_partitions = NULL; } -#endif /* __KERNEL__ */ /** * Wait until all already scheduled replies are processed. @@ -2928,7 +3042,7 @@ static void ptlrpc_wait_replies(struct ptlrpc_service_part *svcpt) NULL, NULL); rc = l_wait_event(svcpt->scp_waitq, - cfs_atomic_read(&svcpt->scp_nreps_difficult) == 0, &lwi); + atomic_read(&svcpt->scp_nreps_difficult) == 0, &lwi); if (rc == 0) break; CWARN("Unexpectedly long timeout %s %p\n", @@ -2945,7 +3059,7 @@ ptlrpc_service_del_atimer(struct ptlrpc_service *svc) /* early disarm AT timer... */ ptlrpc_service_for_each_part(svcpt, i, svc) { if (svcpt->scp_service != NULL) - cfs_timer_disarm(&svcpt->scp_at_timer); + del_timer(&svcpt->scp_at_timer); } } @@ -2971,7 +3085,7 @@ ptlrpc_service_unlink_rqbd(struct ptlrpc_service *svc) /* Unlink all the request buffers. This forces a 'final' * event with its 'unlink' flag set for each posted rqbd */ - cfs_list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted, + list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted, rqbd_list) { rc = LNetMDUnlink(rqbd->rqbd_md_h); LASSERT(rc == 0 || rc == -ENOENT); @@ -3020,8 +3134,8 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc) break; spin_lock(&svcpt->scp_rep_lock); - while (!cfs_list_empty(&svcpt->scp_rep_active)) { - rs = cfs_list_entry(svcpt->scp_rep_active.next, + while (!list_empty(&svcpt->scp_rep_active)) { + rs = list_entry(svcpt->scp_rep_active.next, struct ptlrpc_reply_state, rs_list); spin_lock(&rs->rs_lock); ptlrpc_schedule_difficult_reply(rs); @@ -3032,27 +3146,21 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc) /* purge the request queue. NB No new replies (rqbds * all unlinked) and no service threads, so I'm the only * thread noodling the request queue now */ - while (!cfs_list_empty(&svcpt->scp_req_incoming)) { - req = cfs_list_entry(svcpt->scp_req_incoming.next, + while (!list_empty(&svcpt->scp_req_incoming)) { + req = list_entry(svcpt->scp_req_incoming.next, struct ptlrpc_request, rq_list); - cfs_list_del(&req->rq_list); + list_del(&req->rq_list); svcpt->scp_nreqs_incoming--; - svcpt->scp_nreqs_active++; ptlrpc_server_finish_request(svcpt, req); } while (ptlrpc_server_request_pending(svcpt, true)) { req = ptlrpc_server_request_get(svcpt, true); - svcpt->scp_nreqs_active++; - ptlrpc_server_hpreq_fini(req); - - if (req->rq_export != NULL) - class_export_rpc_put(req->rq_export); - ptlrpc_server_finish_request(svcpt, req); + ptlrpc_server_finish_active_request(svcpt, req); } - LASSERT(cfs_list_empty(&svcpt->scp_rqbd_posted)); + LASSERT(list_empty(&svcpt->scp_rqbd_posted)); LASSERT(svcpt->scp_nreqs_incoming == 0); LASSERT(svcpt->scp_nreqs_active == 0); /* history should have been culled by @@ -3062,19 +3170,19 @@ ptlrpc_service_purge_all(struct ptlrpc_service *svc) /* Now free all the request buffers since nothing * references them any more... */ - while (!cfs_list_empty(&svcpt->scp_rqbd_idle)) { - rqbd = cfs_list_entry(svcpt->scp_rqbd_idle.next, + while (!list_empty(&svcpt->scp_rqbd_idle)) { + rqbd = list_entry(svcpt->scp_rqbd_idle.next, struct ptlrpc_request_buffer_desc, rqbd_list); ptlrpc_free_rqbd(rqbd); } ptlrpc_wait_replies(svcpt); - while (!cfs_list_empty(&svcpt->scp_rep_idle)) { - rs = cfs_list_entry(svcpt->scp_rep_idle.next, + while (!list_empty(&svcpt->scp_rep_idle)) { + rs = list_entry(svcpt->scp_rep_idle.next, struct ptlrpc_reply_state, rs_list); - cfs_list_del(&rs->rs_list); + list_del(&rs->rs_list); OBD_FREE_LARGE(rs, svc->srv_max_reply_size); } } @@ -3092,12 +3200,12 @@ ptlrpc_service_free(struct ptlrpc_service *svc) break; /* In case somebody rearmed this in the meantime */ - cfs_timer_disarm(&svcpt->scp_at_timer); + del_timer(&svcpt->scp_at_timer); array = &svcpt->scp_at_array; if (array->paa_reqs_array != NULL) { OBD_FREE(array->paa_reqs_array, - sizeof(cfs_list_t) * array->paa_size); + sizeof(struct list_head) * array->paa_size); array->paa_reqs_array = NULL; } @@ -3127,7 +3235,7 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service) service->srv_is_stopping = 1; mutex_lock(&ptlrpc_all_services_mutex); - cfs_list_del_init(&service->srv_list); + list_del_init(&service->srv_list); mutex_unlock(&ptlrpc_all_services_mutex); ptlrpc_service_del_atimer(service); @@ -3138,6 +3246,7 @@ int ptlrpc_unregister_service(struct ptlrpc_service *service) ptlrpc_service_nrs_cleanup(service); ptlrpc_lprocfs_unregister_service(service); + ptlrpc_sysfs_unregister_service(service); ptlrpc_service_free(service); @@ -3151,16 +3260,16 @@ EXPORT_SYMBOL(ptlrpc_unregister_service); * Right now, it just checks to make sure that requests aren't languishing * in the queue. We'll use this health check to govern whether a node needs * to be shot, so it's intentionally non-aggressive. */ -int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt) +static int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt) { - struct ptlrpc_request *request = NULL; - struct timeval right_now; - long timediff; + struct ptlrpc_request *request = NULL; + struct timespec64 right_now; + struct timespec64 timediff; - cfs_gettimeofday(&right_now); + ktime_get_real_ts64(&right_now); spin_lock(&svcpt->scp_req_lock); - /* How long has the next entry been waiting? */ + /* How long has the next entry been waiting? */ if (ptlrpc_server_high_pending(svcpt, true)) request = ptlrpc_nrs_req_peek_nolock(svcpt, true); else if (ptlrpc_server_normal_pending(svcpt, true)) @@ -3171,13 +3280,13 @@ int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt) return 0; } - timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL); + timediff = timespec64_sub(right_now, request->rq_arrival_time); spin_unlock(&svcpt->scp_req_lock); - if ((timediff / ONE_MILLION) > + if ((timediff.tv_sec) > (AT_OFF ? obd_timeout * 3 / 2 : at_max)) { - CERROR("%s: unhealthy - request has been waiting %lds\n", - svcpt->scp_service->srv_name, timediff / ONE_MILLION); + CERROR("%s: unhealthy - request has been waiting %llds\n", + svcpt->scp_service->srv_name, (s64)timediff.tv_sec); return -1; }