*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2010, 2013, Intel Corporation.
+ * Copyright (c) 2010, 2016, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
*/
#define DEBUG_SUBSYSTEM S_RPC
-#ifndef __KERNEL__
-#include <liblustre.h>
-#endif
+#include <linux/kthread.h>
#include <obd_support.h>
#include <obd_class.h>
#include <lustre_net.h>
/* The following are visible and mutable through /sys/module/ptlrpc */
int test_req_buffer_pressure = 0;
-CFS_MODULE_PARM(test_req_buffer_pressure, "i", int, 0444,
- "set non-zero to put pressure on request buffer pools");
-CFS_MODULE_PARM(at_min, "i", int, 0644,
- "Adaptive timeout minimum (sec)");
-CFS_MODULE_PARM(at_max, "i", int, 0644,
- "Adaptive timeout maximum (sec)");
-CFS_MODULE_PARM(at_history, "i", int, 0644,
- "Adaptive timeouts remember the slowest event that took place "
- "within this period (sec)");
-CFS_MODULE_PARM(at_early_margin, "i", int, 0644,
- "How soon before an RPC deadline to send an early reply");
-CFS_MODULE_PARM(at_extra, "i", int, 0644,
- "How much extra time to give with each early reply");
-
+module_param(test_req_buffer_pressure, int, 0444);
+MODULE_PARM_DESC(test_req_buffer_pressure, "set non-zero to put pressure on request buffer pools");
+module_param(at_min, int, 0644);
+MODULE_PARM_DESC(at_min, "Adaptive timeout minimum (sec)");
+module_param(at_max, int, 0644);
+MODULE_PARM_DESC(at_max, "Adaptive timeout maximum (sec)");
+module_param(at_history, int, 0644);
+MODULE_PARM_DESC(at_history,
+ "Adaptive timeouts remember the slowest event that took place within this period (sec)");
+module_param(at_early_margin, int, 0644);
+MODULE_PARM_DESC(at_early_margin, "How soon before an RPC deadline to send an early reply");
+module_param(at_extra, int, 0644);
+MODULE_PARM_DESC(at_extra, "How much extra time to give with each early reply");
/* forward ref */
static int ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt);
static void ptlrpc_at_remove_timed(struct ptlrpc_request *req);
/** Holds a list of all PTLRPC services */
-CFS_LIST_HEAD(ptlrpc_all_services);
+struct list_head ptlrpc_all_services;
/** Used to protect the \e ptlrpc_all_services list */
struct mutex ptlrpc_all_services_mutex;
-struct ptlrpc_request_buffer_desc *
+static struct ptlrpc_request_buffer_desc *
ptlrpc_alloc_rqbd(struct ptlrpc_service_part *svcpt)
{
struct ptlrpc_service *svc = svcpt->scp_service;
rqbd->rqbd_refcount = 0;
rqbd->rqbd_cbid.cbid_fn = request_in_callback;
rqbd->rqbd_cbid.cbid_arg = rqbd;
- CFS_INIT_LIST_HEAD(&rqbd->rqbd_reqs);
+ INIT_LIST_HEAD(&rqbd->rqbd_reqs);
OBD_CPT_ALLOC_LARGE(rqbd->rqbd_buffer, svc->srv_cptable,
svcpt->scp_cpt, svc->srv_buf_size);
if (rqbd->rqbd_buffer == NULL) {
}
spin_lock(&svcpt->scp_lock);
- cfs_list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
+ list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
svcpt->scp_nrqbds_total++;
spin_unlock(&svcpt->scp_lock);
return rqbd;
}
-void
+static void
ptlrpc_free_rqbd(struct ptlrpc_request_buffer_desc *rqbd)
{
struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
LASSERT(rqbd->rqbd_refcount == 0);
- LASSERT(cfs_list_empty(&rqbd->rqbd_reqs));
+ LASSERT(list_empty(&rqbd->rqbd_reqs));
spin_lock(&svcpt->scp_lock);
- cfs_list_del(&rqbd->rqbd_list);
+ list_del(&rqbd->rqbd_list);
svcpt->scp_nrqbds_total--;
spin_unlock(&svcpt->scp_lock);
OBD_FREE_PTR(rqbd);
}
-int
+static int
ptlrpc_grow_req_bufs(struct ptlrpc_service_part *svcpt, int post)
{
struct ptlrpc_service *svc = svcpt->scp_service;
* Puts a lock and its mode into reply state assotiated to request reply.
*/
void
-ptlrpc_save_lock(struct ptlrpc_request *req,
- struct lustre_handle *lock, int mode, int no_ack)
+ptlrpc_save_lock(struct ptlrpc_request *req, struct lustre_handle *lock,
+ int mode, bool no_ack, bool convert_lock)
{
- struct ptlrpc_reply_state *rs = req->rq_reply_state;
- int idx;
+ struct ptlrpc_reply_state *rs = req->rq_reply_state;
+ int idx;
- LASSERT(rs != NULL);
- LASSERT(rs->rs_nlocks < RS_MAX_LOCKS);
+ LASSERT(rs != NULL);
+ LASSERT(rs->rs_nlocks < RS_MAX_LOCKS);
- if (req->rq_export->exp_disconnected) {
- ldlm_lock_decref(lock, mode);
- } else {
- idx = rs->rs_nlocks++;
- rs->rs_locks[idx] = *lock;
- rs->rs_modes[idx] = mode;
- rs->rs_difficult = 1;
- rs->rs_no_ack = !!no_ack;
- }
+ idx = rs->rs_nlocks++;
+ rs->rs_locks[idx] = *lock;
+ rs->rs_modes[idx] = mode;
+ rs->rs_difficult = 1;
+ rs->rs_no_ack = no_ack;
+ rs->rs_convert_lock = convert_lock;
}
EXPORT_SYMBOL(ptlrpc_save_lock);
-#ifdef __KERNEL__
struct ptlrpc_hr_partition;
int hrt_id; /* thread ID */
spinlock_t hrt_lock;
wait_queue_head_t hrt_waitq;
- cfs_list_t hrt_queue; /* RS queue */
+ struct list_head hrt_queue;
struct ptlrpc_hr_partition *hrt_partition;
};
struct ptlrpc_hr_partition {
/* # of started threads */
- cfs_atomic_t hrp_nstarted;
+ atomic_t hrp_nstarted;
/* # of stopped threads */
- cfs_atomic_t hrp_nstopped;
+ atomic_t hrp_nstopped;
/* cpu partition id */
int hrp_cpt;
/* round-robin rotor for choosing thread */
};
struct rs_batch {
- cfs_list_t rsb_replies;
+ struct list_head rsb_replies;
unsigned int rsb_n_replies;
struct ptlrpc_service_part *rsb_svcpt;
};
*/
static void rs_batch_init(struct rs_batch *b)
{
- memset(b, 0, sizeof *b);
- CFS_INIT_LIST_HEAD(&b->rsb_replies);
+ memset(b, 0, sizeof *b);
+ INIT_LIST_HEAD(&b->rsb_replies);
}
/**
hrt = ptlrpc_hr_select(b->rsb_svcpt);
spin_lock(&hrt->hrt_lock);
- cfs_list_splice_init(&b->rsb_replies, &hrt->hrt_queue);
+ list_splice_init(&b->rsb_replies, &hrt->hrt_queue);
spin_unlock(&hrt->hrt_lock);
wake_up(&hrt->hrt_waitq);
spin_lock(&rs->rs_lock);
rs->rs_scheduled_ever = 1;
if (rs->rs_scheduled == 0) {
- cfs_list_move(&rs->rs_list, &b->rsb_replies);
+ list_move(&rs->rs_list, &b->rsb_replies);
rs->rs_scheduled = 1;
b->rsb_n_replies++;
}
#define DECLARE_RS_BATCH(b) struct rs_batch b
-#else /* __KERNEL__ */
-
-#define rs_batch_init(b) do{}while(0)
-#define rs_batch_fini(b) do{}while(0)
-#define rs_batch_add(b, r) ptlrpc_schedule_difficult_reply(r)
-#define DECLARE_RS_BATCH(b)
-
-#endif /* __KERNEL__ */
/**
* Put reply state into a queue for processing because we received
*/
void ptlrpc_dispatch_difficult_reply(struct ptlrpc_reply_state *rs)
{
-#ifdef __KERNEL__
struct ptlrpc_hr_thread *hrt;
ENTRY;
- LASSERT(cfs_list_empty(&rs->rs_list));
+ LASSERT(list_empty(&rs->rs_list));
hrt = ptlrpc_hr_select(rs->rs_svcpt);
spin_lock(&hrt->hrt_lock);
- cfs_list_add_tail(&rs->rs_list, &hrt->hrt_queue);
+ list_add_tail(&rs->rs_list, &hrt->hrt_queue);
spin_unlock(&hrt->hrt_lock);
wake_up(&hrt->hrt_waitq);
EXIT;
-#else
- cfs_list_add_tail(&rs->rs_list, &rs->rs_svcpt->scp_rep_queue);
-#endif
}
void
{
ENTRY;
- LASSERT(spin_is_locked(&rs->rs_svcpt->scp_rep_lock));
- LASSERT(spin_is_locked(&rs->rs_lock));
+ assert_spin_locked(&rs->rs_svcpt->scp_rep_lock);
+ assert_spin_locked(&rs->rs_lock);
LASSERT (rs->rs_difficult);
rs->rs_scheduled_ever = 1; /* flag any notification attempt */
}
rs->rs_scheduled = 1;
- cfs_list_del_init(&rs->rs_list);
+ list_del_init(&rs->rs_list);
ptlrpc_dispatch_difficult_reply(rs);
EXIT;
}
/* CAVEAT EMPTOR: spinlock ordering!!! */
spin_lock(&exp->exp_uncommitted_replies_lock);
- cfs_list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
+ list_for_each_entry_safe(rs, nxt, &exp->exp_uncommitted_replies,
rs_obd_list) {
LASSERT (rs->rs_difficult);
/* VBR: per-export last_committed */
LASSERT(rs->rs_export);
if (rs->rs_transno <= exp->exp_last_committed) {
- cfs_list_del_init(&rs->rs_obd_list);
+ list_del_init(&rs->rs_obd_list);
rs_batch_add(&batch, rs);
}
}
rs_batch_fini(&batch);
EXIT;
}
-EXPORT_SYMBOL(ptlrpc_commit_replies);
static int
ptlrpc_server_post_idle_rqbds(struct ptlrpc_service_part *svcpt)
for (;;) {
spin_lock(&svcpt->scp_lock);
- if (cfs_list_empty(&svcpt->scp_rqbd_idle)) {
+ if (list_empty(&svcpt->scp_rqbd_idle)) {
spin_unlock(&svcpt->scp_lock);
return posted;
}
- rqbd = cfs_list_entry(svcpt->scp_rqbd_idle.next,
+ rqbd = list_entry(svcpt->scp_rqbd_idle.next,
struct ptlrpc_request_buffer_desc,
rqbd_list);
- cfs_list_del(&rqbd->rqbd_list);
+ list_del(&rqbd->rqbd_list);
/* assume we will post successfully */
svcpt->scp_nrqbds_posted++;
- cfs_list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_posted);
+ list_add(&rqbd->rqbd_list, &svcpt->scp_rqbd_posted);
spin_unlock(&svcpt->scp_lock);
spin_lock(&svcpt->scp_lock);
svcpt->scp_nrqbds_posted--;
- cfs_list_del(&rqbd->rqbd_list);
- cfs_list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
+ list_del(&rqbd->rqbd_list);
+ list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
/* Don't complain if no request buffers are posted right now; LNET
* won't drop requests because we set the portal lazy! */
ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
struct ptlrpc_service_conf *conf)
{
-#ifdef __KERNEL__
struct ptlrpc_service_thr_conf *tc = &conf->psc_thr;
unsigned init;
unsigned total;
/*
* User wants to increase number of threads with for
- * each CPU core/HT, most likely the factor is larger then
+ * each CPU core/HT, most likely the factor is larger than
* one thread/core because service threads are supposed to
* be blocked by lock or wait for IO.
*/
* have too many threads no matter how many cores/HTs
* there are.
*/
- if (cfs_cpu_ht_nsiblings(0) > 1) { /* weight is # of HTs */
+ if (cpumask_weight(topology_sibling_cpumask(smp_processor_id())) > 1) {
+ /* weight is # of HTs */
/* depress thread factor for hyper-thread */
factor = factor - (factor >> 1) + (factor >> 3);
}
weight = cfs_cpt_weight(svc->srv_cptable, 0);
- LASSERT(weight > 0);
for (; factor > 0 && weight > 0; factor--, weight -= fade)
nthrs += min(weight, fade) * factor;
svc->srv_name, nthrs * svc->srv_ncpts,
tc->tc_nthrs_max);
}
-#endif
}
/**
int rc;
svcpt->scp_cpt = cpt;
- CFS_INIT_LIST_HEAD(&svcpt->scp_threads);
+ INIT_LIST_HEAD(&svcpt->scp_threads);
/* rqbd and incoming request queue */
spin_lock_init(&svcpt->scp_lock);
- CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
- CFS_INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
- CFS_INIT_LIST_HEAD(&svcpt->scp_req_incoming);
+ INIT_LIST_HEAD(&svcpt->scp_rqbd_idle);
+ INIT_LIST_HEAD(&svcpt->scp_rqbd_posted);
+ INIT_LIST_HEAD(&svcpt->scp_req_incoming);
init_waitqueue_head(&svcpt->scp_waitq);
/* history request & rqbd list */
- CFS_INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
- CFS_INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
+ INIT_LIST_HEAD(&svcpt->scp_hist_reqs);
+ INIT_LIST_HEAD(&svcpt->scp_hist_rqbds);
/* acitve requests and hp requests */
spin_lock_init(&svcpt->scp_req_lock);
/* reply states */
spin_lock_init(&svcpt->scp_rep_lock);
- CFS_INIT_LIST_HEAD(&svcpt->scp_rep_active);
-#ifndef __KERNEL__
- CFS_INIT_LIST_HEAD(&svcpt->scp_rep_queue);
-#endif
- CFS_INIT_LIST_HEAD(&svcpt->scp_rep_idle);
+ INIT_LIST_HEAD(&svcpt->scp_rep_active);
+ INIT_LIST_HEAD(&svcpt->scp_rep_idle);
init_waitqueue_head(&svcpt->scp_rep_waitq);
- cfs_atomic_set(&svcpt->scp_nreps_difficult, 0);
+ atomic_set(&svcpt->scp_nreps_difficult, 0);
/* adaptive timeout */
spin_lock_init(&svcpt->scp_at_lock);
/* allocate memory for scp_at_array (ptlrpc_at_array) */
OBD_CPT_ALLOC(array->paa_reqs_array,
- svc->srv_cptable, cpt, sizeof(cfs_list_t) * size);
+ svc->srv_cptable, cpt, sizeof(struct list_head) * size);
if (array->paa_reqs_array == NULL)
return -ENOMEM;
for (index = 0; index < size; index++)
- CFS_INIT_LIST_HEAD(&array->paa_reqs_array[index]);
+ INIT_LIST_HEAD(&array->paa_reqs_array[index]);
OBD_CPT_ALLOC(array->paa_reqs_count,
svc->srv_cptable, cpt, sizeof(__u32) * size);
if (array->paa_reqs_count == NULL)
goto failed;
- cfs_timer_init(&svcpt->scp_at_timer, ptlrpc_at_timer, svcpt);
+ setup_timer(&svcpt->scp_at_timer, ptlrpc_at_timer,
+ (unsigned long)svcpt);
+
/* At SOW, service time should be quick; 10s seems generous. If client
* timeout is less than this, we'll be sending an early reply. */
at_init(&svcpt->scp_at_estimate, 10, 0);
if (array->paa_reqs_array != NULL) {
OBD_FREE(array->paa_reqs_array,
- sizeof(cfs_list_t) * array->paa_size);
+ sizeof(struct list_head) * array->paa_size);
array->paa_reqs_array = NULL;
}
*/
struct ptlrpc_service *
ptlrpc_register_service(struct ptlrpc_service_conf *conf,
- cfs_proc_dir_entry_t *proc_entry)
+ struct kset *parent,
+ struct proc_dir_entry *proc_entry)
{
struct ptlrpc_service_cpt_conf *cconf = &conf->psc_cpt;
struct ptlrpc_service *service;
spin_lock_init(&service->srv_lock);
service->srv_name = conf->psc_name;
service->srv_watchdog_factor = conf->psc_watchdog_factor;
- CFS_INIT_LIST_HEAD(&service->srv_list); /* for safty of cleanup */
+ INIT_LIST_HEAD(&service->srv_list); /* for safty of cleanup */
/* buffer configuration */
service->srv_nbuf_per_group = test_req_buffer_pressure ?
LASSERT(rc == 0);
mutex_lock(&ptlrpc_all_services_mutex);
- cfs_list_add (&service->srv_list, &ptlrpc_all_services);
+ list_add(&service->srv_list, &ptlrpc_all_services);
mutex_unlock(&ptlrpc_all_services_mutex);
+ if (parent) {
+ rc = ptlrpc_sysfs_register_service(parent, service);
+ if (rc)
+ GOTO(failed, rc);
+ }
+
if (proc_entry != NULL)
ptlrpc_lprocfs_register_service(proc_entry, service);
CDEBUG(D_NET, "%s: Started, listening on portal %d\n",
service->srv_name, service->srv_req_portal);
-#ifdef __KERNEL__
rc = ptlrpc_start_threads(service);
if (rc != 0) {
CERROR("Failed to start threads for service %s: %d\n",
service->srv_name, rc);
GOTO(failed, rc);
}
-#endif
RETURN(service);
failed:
*/
static void ptlrpc_server_free_request(struct ptlrpc_request *req)
{
- LASSERT(cfs_atomic_read(&req->rq_refcount) == 0);
- LASSERT(cfs_list_empty(&req->rq_timed_list));
+ LASSERT(atomic_read(&req->rq_refcount) == 0);
+ LASSERT(list_empty(&req->rq_timed_list));
- /* DEBUG_REQ() assumes the reply state of a request with a valid
- * ref will not be destroyed until that reference is dropped. */
- ptlrpc_req_drop_rs(req);
+ /* DEBUG_REQ() assumes the reply state of a request with a valid
+ * ref will not be destroyed until that reference is dropped. */
+ ptlrpc_req_drop_rs(req);
- sptlrpc_svc_ctx_decref(req);
+ sptlrpc_svc_ctx_decref(req);
- if (req != &req->rq_rqbd->rqbd_req) {
+ if (req != &req->rq_rqbd->rqbd_req) {
/* NB request buffers use an embedded
* req if the incoming req unlinked the
* MD; this isn't one of them! */
struct ptlrpc_request_buffer_desc *rqbd = req->rq_rqbd;
struct ptlrpc_service_part *svcpt = rqbd->rqbd_svcpt;
struct ptlrpc_service *svc = svcpt->scp_service;
- int refcount;
- cfs_list_t *tmp;
- cfs_list_t *nxt;
+ int refcount;
+ struct list_head *tmp;
+ struct list_head *nxt;
- if (!cfs_atomic_dec_and_test(&req->rq_refcount))
- return;
+ if (!atomic_dec_and_test(&req->rq_refcount))
+ return;
+
+ if (req->rq_session.lc_state == LCS_ENTERED) {
+ lu_context_exit(&req->rq_session);
+ lu_context_fini(&req->rq_session);
+ }
if (req->rq_at_linked) {
spin_lock(&svcpt->scp_at_lock);
spin_unlock(&svcpt->scp_at_lock);
}
- LASSERT(cfs_list_empty(&req->rq_timed_list));
+ LASSERT(list_empty(&req->rq_timed_list));
- /* finalize request */
- if (req->rq_export) {
- class_export_put(req->rq_export);
- req->rq_export = NULL;
- }
+ /* finalize request */
+ if (req->rq_export) {
+ class_export_put(req->rq_export);
+ req->rq_export = NULL;
+ }
spin_lock(&svcpt->scp_lock);
- cfs_list_add(&req->rq_list, &rqbd->rqbd_reqs);
+ list_add(&req->rq_list, &rqbd->rqbd_reqs);
- refcount = --(rqbd->rqbd_refcount);
- if (refcount == 0) {
- /* request buffer is now idle: add to history */
- cfs_list_del(&rqbd->rqbd_list);
+ refcount = --(rqbd->rqbd_refcount);
+ if (refcount == 0) {
+ /* request buffer is now idle: add to history */
+ list_del(&rqbd->rqbd_list);
- cfs_list_add_tail(&rqbd->rqbd_list, &svcpt->scp_hist_rqbds);
+ list_add_tail(&rqbd->rqbd_list, &svcpt->scp_hist_rqbds);
svcpt->scp_hist_nrqbds++;
/* cull some history?
* I expect only about 1 or 2 rqbds need to be recycled here */
while (svcpt->scp_hist_nrqbds > svc->srv_hist_nrqbds_cpt_max) {
- rqbd = cfs_list_entry(svcpt->scp_hist_rqbds.next,
- struct ptlrpc_request_buffer_desc,
- rqbd_list);
+ rqbd = list_entry(svcpt->scp_hist_rqbds.next,
+ struct ptlrpc_request_buffer_desc,
+ rqbd_list);
- cfs_list_del(&rqbd->rqbd_list);
+ list_del(&rqbd->rqbd_list);
svcpt->scp_hist_nrqbds--;
- /* remove rqbd's reqs from svc's req history while
- * I've got the service lock */
- cfs_list_for_each(tmp, &rqbd->rqbd_reqs) {
- req = cfs_list_entry(tmp, struct ptlrpc_request,
- rq_list);
- /* Track the highest culled req seq */
+ /* remove rqbd's reqs from svc's req history while
+ * I've got the service lock */
+ list_for_each(tmp, &rqbd->rqbd_reqs) {
+ req = list_entry(tmp, struct ptlrpc_request,
+ rq_list);
+ /* Track the highest culled req seq */
if (req->rq_history_seq >
svcpt->scp_hist_seq_culled) {
svcpt->scp_hist_seq_culled =
req->rq_history_seq;
}
- cfs_list_del(&req->rq_history_list);
+ list_del(&req->rq_history_list);
}
spin_unlock(&svcpt->scp_lock);
- cfs_list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
- req = cfs_list_entry(rqbd->rqbd_reqs.next,
- struct ptlrpc_request,
- rq_list);
- cfs_list_del(&req->rq_list);
- ptlrpc_server_free_request(req);
- }
+ list_for_each_safe(tmp, nxt, &rqbd->rqbd_reqs) {
+ req = list_entry(rqbd->rqbd_reqs.next,
+ struct ptlrpc_request,
+ rq_list);
+ list_del(&req->rq_list);
+ ptlrpc_server_free_request(req);
+ }
spin_lock(&svcpt->scp_lock);
/*
* now all reqs including the embedded req has been
- * disposed, schedule request buffer for re-use.
+ * disposed, schedule request buffer for re-use
+ * or free it to drain some in excess.
*/
- LASSERT(cfs_atomic_read(&rqbd->rqbd_req.rq_refcount) ==
- 0);
- cfs_list_add_tail(&rqbd->rqbd_list,
- &svcpt->scp_rqbd_idle);
+ LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) == 0);
+ if (svcpt->scp_nrqbds_posted >=
+ svc->srv_nbuf_per_group &&
+ !test_req_buffer_pressure) {
+ /* like in ptlrpc_free_rqbd() */
+ svcpt->scp_nrqbds_total--;
+ OBD_FREE_LARGE(rqbd->rqbd_buffer,
+ svc->srv_buf_size);
+ OBD_FREE_PTR(rqbd);
+ } else {
+ list_add_tail(&rqbd->rqbd_list,
+ &svcpt->scp_rqbd_idle);
+ }
}
spin_unlock(&svcpt->scp_lock);
} else if (req->rq_reply_state && req->rq_reply_state->rs_prealloc) {
/* If we are low on memory, we are not interested in history */
- cfs_list_del(&req->rq_list);
- cfs_list_del_init(&req->rq_history_list);
+ list_del(&req->rq_list);
+ list_del_init(&req->rq_history_list);
/* Track the highest culled req seq */
if (req->rq_history_seq > svcpt->scp_hist_seq_culled)
struct obd_export *export)
{
if (req->rq_export != NULL) {
- if (!cfs_list_empty(&req->rq_exp_list)) {
- /* remove rq_exp_list from last export */
- spin_lock_bh(&req->rq_export->exp_rpc_lock);
- cfs_list_del_init(&req->rq_exp_list);
- spin_unlock_bh(&req->rq_export->exp_rpc_lock);
-
- /* export has one reference already, so it`s safe to
- * add req to export queue here and get another
- * reference for request later */
- spin_lock_bh(&export->exp_rpc_lock);
- cfs_list_add(&req->rq_exp_list, &export->exp_hp_rpcs);
- spin_unlock_bh(&export->exp_rpc_lock);
- }
+ LASSERT(!list_empty(&req->rq_exp_list));
+ /* remove rq_exp_list from last export */
+ spin_lock_bh(&req->rq_export->exp_rpc_lock);
+ list_del_init(&req->rq_exp_list);
+ spin_unlock_bh(&req->rq_export->exp_rpc_lock);
+ /* export has one reference already, so it`s safe to
+ * add req to export queue here and get another
+ * reference for request later */
+ spin_lock_bh(&export->exp_rpc_lock);
+ if (req->rq_ops != NULL) /* hp request */
+ list_add(&req->rq_exp_list, &export->exp_hp_rpcs);
+ else
+ list_add(&req->rq_exp_list, &export->exp_reg_rpcs);
+ spin_unlock_bh(&export->exp_rpc_lock);
+
class_export_rpc_dec(req->rq_export);
class_export_put(req->rq_export);
}
}
/**
- * to finish a active request: stop sending more early replies, and release
+ * to finish an active request: stop sending more early replies, and release
* the request. should be called after we finished handling the request.
*/
static void ptlrpc_server_finish_active_request(
* This function is only called when some export receives a message (i.e.,
* the network is up.)
*/
-static void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
+void ptlrpc_update_export_timer(struct obd_export *exp, long extra_delay)
{
struct obd_export *oldest_exp;
time_t oldest_time, new_time;
RETURN_EXIT;
exp->exp_last_request_time = new_time;
- CDEBUG(D_HA, "updating export %s at "CFS_TIME_T" exp %p\n",
- exp->exp_client_uuid.uuid,
- exp->exp_last_request_time, exp);
- /* exports may get disconnected from the chain even though the
- export has references, so we must keep the spin lock while
- manipulating the lists */
+ /* exports may get disconnected from the chain even though the
+ export has references, so we must keep the spin lock while
+ manipulating the lists */
spin_lock(&exp->exp_obd->obd_dev_lock);
- if (cfs_list_empty(&exp->exp_obd_chain_timed)) {
+ if (list_empty(&exp->exp_obd_chain_timed)) {
/* this one is not timed */
spin_unlock(&exp->exp_obd->obd_dev_lock);
- RETURN_EXIT;
- }
+ RETURN_EXIT;
+ }
- cfs_list_move_tail(&exp->exp_obd_chain_timed,
- &exp->exp_obd->obd_exports_timed);
+ list_move_tail(&exp->exp_obd_chain_timed,
+ &exp->exp_obd->obd_exports_timed);
- oldest_exp = cfs_list_entry(exp->exp_obd->obd_exports_timed.next,
- struct obd_export, exp_obd_chain_timed);
- oldest_time = oldest_exp->exp_last_request_time;
+ oldest_exp = list_entry(exp->exp_obd->obd_exports_timed.next,
+ struct obd_export, exp_obd_chain_timed);
+ oldest_time = oldest_exp->exp_last_request_time;
spin_unlock(&exp->exp_obd->obd_dev_lock);
if (exp->exp_obd->obd_recovering) {
* we better wait for 3. */
exp->exp_obd->obd_eviction_timer =
cfs_time_current_sec() + 3 * PING_INTERVAL;
- CDEBUG(D_HA, "%s: Think about evicting %s from "CFS_TIME_T"\n",
- exp->exp_obd->obd_name,
+ CDEBUG(D_HA, "%s: Think about evicting %s from %ld\n",
+ exp->exp_obd->obd_name,
obd_export_nid2str(oldest_exp), oldest_time);
}
} else {
*/
static int ptlrpc_check_req(struct ptlrpc_request *req)
{
- int rc = 0;
+ struct obd_device *obd = req->rq_export->exp_obd;
+ int rc = 0;
if (unlikely(lustre_msg_get_conn_cnt(req->rq_reqmsg) <
req->rq_export->exp_conn_cnt)) {
req->rq_export->exp_conn_cnt);
return -EEXIST;
}
- if (unlikely(req->rq_export->exp_obd &&
- req->rq_export->exp_obd->obd_fail)) {
- /* Failing over, don't handle any more reqs, send
- error response instead. */
- CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n",
- req, req->rq_export->exp_obd->obd_name);
+ if (unlikely(obd == NULL || obd->obd_fail)) {
+ /* Failing over, don't handle any more reqs,
+ * send error response instead. */
+ CDEBUG(D_RPCTRACE, "Dropping req %p for failed obd %s\n",
+ req, (obd != NULL) ? obd->obd_name : "unknown");
rc = -ENODEV;
} else if (lustre_msg_get_flags(req->rq_reqmsg) &
(MSG_REPLAY | MSG_REQ_REPLAY_DONE) &&
- !(req->rq_export->exp_obd->obd_recovering)) {
+ !obd->obd_recovering) {
DEBUG_REQ(D_ERROR, req,
"Invalid replay without recovery");
class_fail_export(req->rq_export);
rc = -ENODEV;
} else if (lustre_msg_get_transno(req->rq_reqmsg) != 0 &&
- !(req->rq_export->exp_obd->obd_recovering)) {
+ !obd->obd_recovering) {
DEBUG_REQ(D_ERROR, req, "Invalid req with transno "
- LPU64" without recovery",
+ "%llu without recovery",
lustre_msg_get_transno(req->rq_reqmsg));
class_fail_export(req->rq_export);
rc = -ENODEV;
__s32 next;
if (array->paa_count == 0) {
- cfs_timer_disarm(&svcpt->scp_at_timer);
+ del_timer(&svcpt->scp_at_timer);
return;
}
/* Set timer for closest deadline */
- next = (__s32)(array->paa_deadline - cfs_time_current_sec() -
+ next = (__s32)(array->paa_deadline - ktime_get_real_seconds() -
at_early_margin);
if (next <= 0) {
ptlrpc_at_timer((unsigned long)svcpt);
} else {
- cfs_timer_arm(&svcpt->scp_at_timer, cfs_time_shift(next));
+ mod_timer(&svcpt->scp_at_timer, cfs_time_shift(next));
CDEBUG(D_INFO, "armed %s at %+ds\n",
svcpt->scp_service->srv_name, next);
}
return(-ENOSYS);
spin_lock(&svcpt->scp_at_lock);
- LASSERT(cfs_list_empty(&req->rq_timed_list));
+ LASSERT(list_empty(&req->rq_timed_list));
- index = (unsigned long)req->rq_deadline % array->paa_size;
+ div_u64_rem(req->rq_deadline, array->paa_size, &index);
if (array->paa_reqs_count[index] > 0) {
/* latest rpcs will have the latest deadlines in the list,
* so search backward. */
- cfs_list_for_each_entry_reverse(rq,
+ list_for_each_entry_reverse(rq,
&array->paa_reqs_array[index],
rq_timed_list) {
if (req->rq_deadline >= rq->rq_deadline) {
- cfs_list_add(&req->rq_timed_list,
+ list_add(&req->rq_timed_list,
&rq->rq_timed_list);
break;
}
}
/* Add the request at the head of the list */
- if (cfs_list_empty(&req->rq_timed_list))
- cfs_list_add(&req->rq_timed_list,
+ if (list_empty(&req->rq_timed_list))
+ list_add(&req->rq_timed_list,
&array->paa_reqs_array[index]);
spin_lock(&req->rq_lock);
array = &req->rq_rqbd->rqbd_svcpt->scp_at_array;
/* NB: must call with hold svcpt::scp_at_lock */
- LASSERT(!cfs_list_empty(&req->rq_timed_list));
- cfs_list_del_init(&req->rq_timed_list);
+ LASSERT(!list_empty(&req->rq_timed_list));
+ list_del_init(&req->rq_timed_list);
spin_lock(&req->rq_lock);
req->rq_at_linked = 0;
array->paa_count--;
}
+/*
+ * Attempt to extend the request deadline by sending an early reply to the
+ * client.
+ */
static int ptlrpc_at_send_early_reply(struct ptlrpc_request *req)
{
struct ptlrpc_service_part *svcpt = req->rq_rqbd->rqbd_svcpt;
- struct ptlrpc_request *reqcopy;
- struct lustre_msg *reqmsg;
- cfs_duration_t olddl = req->rq_deadline - cfs_time_current_sec();
- time_t newdl;
- int rc;
- ENTRY;
+ struct ptlrpc_request *reqcopy;
+ struct lustre_msg *reqmsg;
+ time64_t olddl = req->rq_deadline - ktime_get_real_seconds();
+ time64_t newdl;
+ int rc;
+
+ ENTRY;
+
+ if (CFS_FAIL_CHECK(OBD_FAIL_TGT_REPLAY_RECONNECT)) {
+ /* don't send early reply */
+ RETURN(1);
+ }
/* deadline is when the client expects us to reply, margin is the
difference between clients' and servers' expectations */
DEBUG_REQ(D_ADAPTTO, req,
- "%ssending early reply (deadline %+lds, margin %+lds) for "
+ "%ssending early reply (deadline %+llds, margin %+llds) for "
"%d+%d", AT_OFF ? "AT off - not " : "",
- olddl, olddl - at_get(&svcpt->scp_at_estimate),
+ (s64)olddl, (s64)(olddl - at_get(&svcpt->scp_at_estimate)),
at_get(&svcpt->scp_at_estimate), at_extra);
if (AT_OFF)
RETURN(0);
if (olddl < 0) {
- DEBUG_REQ(D_WARNING, req, "Already past deadline (%+lds), "
+ DEBUG_REQ(D_WARNING, req, "Already past deadline (%+llds), "
"not sending early reply. Consider increasing "
- "at_early_margin (%d)?", olddl, at_early_margin);
+ "at_early_margin (%d)?", (s64)olddl, at_early_margin);
/* Return an error so we're not re-added to the timed list. */
RETURN(-ETIMEDOUT);
RETURN(-ENOSYS);
}
- if (req->rq_export &&
- lustre_msg_get_flags(req->rq_reqmsg) &
- (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) {
- /* During recovery, we don't want to send too many early
- * replies, but on the other hand we want to make sure the
- * client has enough time to resend if the rpc is lost. So
- * during the recovery period send at least 4 early replies,
- * spacing them every at_extra if we can. at_estimate should
- * always equal this fixed value during recovery. */
- at_measured(&svcpt->scp_at_estimate, min(at_extra,
- req->rq_export->exp_obd->obd_recovery_timeout / 4));
+ if (req->rq_export &&
+ lustre_msg_get_flags(req->rq_reqmsg) &
+ (MSG_REPLAY | MSG_REQ_REPLAY_DONE | MSG_LOCK_REPLAY_DONE)) {
+ struct obd_device *obd_exp = req->rq_export->exp_obd;
+
+ /* During recovery, we don't want to send too many early
+ * replies, but on the other hand we want to make sure the
+ * client has enough time to resend if the rpc is lost. So
+ * during the recovery period send at least 4 early replies,
+ * spacing them every at_extra if we can. at_estimate should
+ * always equal this fixed value during recovery.
+ */
+ /* Don't account request processing time into AT history
+ * during recovery, it is not service time we need but
+ * includes also waiting time for recovering clients
+ */
+ newdl = min_t(time64_t, at_extra,
+ obd_exp->obd_recovery_timeout / 4) +
+ ktime_get_real_seconds();
} else {
- /* Fake our processing time into the future to ask the clients
- * for some extra amount of time */
+ /* We want to extend the request deadline by at_extra seconds,
+ * so we set our service estimate to reflect how much time has
+ * passed since this request arrived plus an additional
+ * at_extra seconds. The client will calculate the new deadline
+ * based on this service estimate (plus some additional time to
+ * account for network latency). See ptlrpc_at_recv_early_reply
+ */
at_measured(&svcpt->scp_at_estimate, at_extra +
- cfs_time_current_sec() -
+ ktime_get_real_seconds() -
req->rq_arrival_time.tv_sec);
+ newdl = req->rq_arrival_time.tv_sec +
+ at_get(&svcpt->scp_at_estimate);
+ }
- /* Check to see if we've actually increased the deadline -
- * we may be past adaptive_max */
- if (req->rq_deadline >= req->rq_arrival_time.tv_sec +
- at_get(&svcpt->scp_at_estimate)) {
- DEBUG_REQ(D_WARNING, req, "Couldn't add any time "
- "(%ld/%ld), not sending early reply\n",
- olddl, req->rq_arrival_time.tv_sec +
- at_get(&svcpt->scp_at_estimate) -
- cfs_time_current_sec());
- RETURN(-ETIMEDOUT);
- }
+ /* Check to see if we've actually increased the deadline -
+ * we may be past adaptive_max */
+ if (req->rq_deadline >= newdl) {
+ DEBUG_REQ(D_WARNING, req, "Couldn't add any time (%lld/%lld), not sending early reply\n",
+ (s64)olddl, (s64)(newdl - ktime_get_real_seconds()));
+ RETURN(-ETIMEDOUT);
}
- newdl = cfs_time_current_sec() + at_get(&svcpt->scp_at_estimate);
- reqcopy = ptlrpc_request_cache_alloc(__GFP_IO);
+ reqcopy = ptlrpc_request_cache_alloc(GFP_NOFS);
if (reqcopy == NULL)
RETURN(-ENOMEM);
OBD_ALLOC_LARGE(reqmsg, req->rq_reqlen);
reqcopy->rq_reqmsg = reqmsg;
memcpy(reqmsg, req->rq_reqmsg, req->rq_reqlen);
- LASSERT(cfs_atomic_read(&req->rq_refcount));
- /** if it is last refcount then early reply isn't needed */
- if (cfs_atomic_read(&req->rq_refcount) == 1) {
- DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, "
- "abort sending early reply\n");
- GOTO(out, rc = -EINVAL);
- }
+ /*
+ * tgt_brw_read() and tgt_brw_write() may have decided not to reply.
+ * Without this check, we would fail the rq_no_reply assertion in
+ * ptlrpc_send_reply().
+ */
+ if (reqcopy->rq_no_reply)
+ GOTO(out, rc = -ETIMEDOUT);
+
+ LASSERT(atomic_read(&req->rq_refcount));
+ /** if it is last refcount then early reply isn't needed */
+ if (atomic_read(&req->rq_refcount) == 1) {
+ DEBUG_REQ(D_ADAPTTO, reqcopy, "Normal reply already sent out, "
+ "abort sending early reply\n");
+ GOTO(out, rc = -EINVAL);
+ }
/* Connection ref */
reqcopy->rq_export = class_conn2export(
rc = ptlrpc_send_reply(reqcopy, PTLRPC_REPLY_EARLY);
- if (!rc) {
- /* Adjust our own deadline to what we told the client */
- req->rq_deadline = newdl;
- req->rq_early_count++; /* number sent, server side */
- } else {
- DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc);
- }
+ if (!rc) {
+ /* Adjust our own deadline to what we told the client */
+ req->rq_deadline = newdl;
+ req->rq_early_count++; /* number sent, server side */
+ } else {
+ DEBUG_REQ(D_ERROR, req, "Early reply send failed %d", rc);
+ }
/* Free the (early) reply state from lustre_pack_reply.
(ptlrpc_send_reply takes it's own rs ref, so this is safe here) */
{
struct ptlrpc_at_array *array = &svcpt->scp_at_array;
struct ptlrpc_request *rq, *n;
- cfs_list_t work_list;
+ struct list_head work_list;
__u32 index, count;
- time_t deadline;
- time_t now = cfs_time_current_sec();
+ time64_t deadline;
+ time64_t now = ktime_get_real_seconds();
cfs_duration_t delay;
int first, counter = 0;
ENTRY;
RETURN(0);
}
- /* We're close to a timeout, and we don't know how much longer the
- server will take. Send early replies to everyone expiring soon. */
- CFS_INIT_LIST_HEAD(&work_list);
- deadline = -1;
- index = (unsigned long)array->paa_deadline % array->paa_size;
- count = array->paa_count;
- while (count > 0) {
- count -= array->paa_reqs_count[index];
- cfs_list_for_each_entry_safe(rq, n,
- &array->paa_reqs_array[index],
- rq_timed_list) {
+ /* We're close to a timeout, and we don't know how much longer the
+ server will take. Send early replies to everyone expiring soon. */
+ INIT_LIST_HEAD(&work_list);
+ deadline = -1;
+ div_u64_rem(array->paa_deadline, array->paa_size, &index);
+ count = array->paa_count;
+ while (count > 0) {
+ count -= array->paa_reqs_count[index];
+ list_for_each_entry_safe(rq, n,
+ &array->paa_reqs_array[index],
+ rq_timed_list) {
if (rq->rq_deadline > now + at_early_margin) {
/* update the earliest deadline */
if (deadline == -1 ||
* refcount to 0 already. Let's check this and
* don't add entry to work_list
*/
- if (likely(cfs_atomic_inc_not_zero(&rq->rq_refcount)))
- cfs_list_add(&rq->rq_timed_list, &work_list);
+ if (likely(atomic_inc_not_zero(&rq->rq_refcount)))
+ list_add(&rq->rq_timed_list, &work_list);
counter++;
- }
+ }
- if (++index >= array->paa_size)
- index = 0;
- }
- array->paa_deadline = deadline;
+ if (++index >= array->paa_size)
+ index = 0;
+ }
+ array->paa_deadline = deadline;
/* we have a new earliest deadline, restart the timer */
ptlrpc_at_set_timer(svcpt);
LCONSOLE_WARN("%s: This server is not able to keep up with "
"request traffic (cpu-bound).\n",
svcpt->scp_service->srv_name);
- CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, "
- "delay="CFS_DURATION_T"(jiff)\n",
+ CWARN("earlyQ=%d reqQ=%d recA=%d, svcEst=%d, delay=%ld(jiff)\n",
counter, svcpt->scp_nreqs_incoming,
svcpt->scp_nreqs_active,
at_get(&svcpt->scp_at_estimate), delay);
/* we took additional refcount so entries can't be deleted from list, no
* locking is needed */
- while (!cfs_list_empty(&work_list)) {
- rq = cfs_list_entry(work_list.next, struct ptlrpc_request,
+ while (!list_empty(&work_list)) {
+ rq = list_entry(work_list.next, struct ptlrpc_request,
rq_timed_list);
- cfs_list_del_init(&rq->rq_timed_list);
+ list_del_init(&rq->rq_timed_list);
if (ptlrpc_at_send_early_reply(rq) == 0)
ptlrpc_at_add_timed(rq);
RETURN(1); /* return "did_something" for liblustre */
}
+/* Check if we are already handling earlier incarnation of this request.
+ * Called under &req->rq_export->exp_rpc_lock locked */
+static int ptlrpc_server_check_resend_in_progress(struct ptlrpc_request *req)
+{
+ struct ptlrpc_request *tmp = NULL;
+
+ if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) ||
+ (atomic_read(&req->rq_export->exp_rpc_count) == 0))
+ return 0;
+
+ /* bulk request are aborted upon reconnect, don't try to
+ * find a match */
+ if (req->rq_bulk_write || req->rq_bulk_read)
+ return 0;
+
+ /* This list should not be longer than max_requests in
+ * flights on the client, so it is not all that long.
+ * Also we only hit this codepath in case of a resent
+ * request which makes it even more rarely hit */
+ list_for_each_entry(tmp, &req->rq_export->exp_reg_rpcs,
+ rq_exp_list) {
+ /* Found duplicate one */
+ if (tmp->rq_xid == req->rq_xid)
+ goto found;
+ }
+ list_for_each_entry(tmp, &req->rq_export->exp_hp_rpcs,
+ rq_exp_list) {
+ /* Found duplicate one */
+ if (tmp->rq_xid == req->rq_xid)
+ goto found;
+ }
+ return 0;
+
+found:
+ DEBUG_REQ(D_HA, req, "Found duplicate req in processing");
+ DEBUG_REQ(D_HA, tmp, "Request being processed");
+ return -EBUSY;
+}
+
/**
- * Put the request to the export list if the request may become
- * a high priority one.
+ * Check if a request should be assigned with a high priority.
+ *
+ * \retval < 0: error occurred
+ * 0: normal RPC request
+ * +1: high priority request
*/
static int ptlrpc_server_hpreq_init(struct ptlrpc_service_part *svcpt,
struct ptlrpc_request *req)
int rc = 0;
ENTRY;
- if (svcpt->scp_service->srv_ops.so_hpreq_handler) {
+ if (svcpt->scp_service->srv_ops.so_hpreq_handler != NULL) {
rc = svcpt->scp_service->srv_ops.so_hpreq_handler(req);
if (rc < 0)
RETURN(rc);
+
LASSERT(rc == 0);
}
- if (req->rq_export && req->rq_ops) {
- /* Perform request specific check. We should do this check
- * before the request is added into exp_hp_rpcs list otherwise
- * it may hit swab race at LU-1044. */
- if (req->rq_ops->hpreq_check) {
+
+ if (req->rq_export != NULL && req->rq_ops != NULL) {
+ /* Perform request specific check. We should do this
+ * check before the request is added into exp_hp_rpcs
+ * list otherwise it may hit swab race at LU-1044. */
+ if (req->rq_ops->hpreq_check != NULL) {
rc = req->rq_ops->hpreq_check(req);
- /**
- * XXX: Out of all current
- * ptlrpc_hpreq_ops::hpreq_check(), only
- * ldlm_cancel_hpreq_check() can return an error code;
- * other functions assert in similar places, which seems
- * odd. What also does not seem right is that handlers
- * for those RPCs do not assert on the same checks, but
- * rather handle the error cases. e.g. see
- * ost_rw_hpreq_check(), and ost_brw_read(),
- * ost_brw_write().
- */
- if (rc < 0)
- RETURN(rc);
- LASSERT(rc == 0 || rc == 1);
+ if (rc == -ESTALE) {
+ req->rq_status = rc;
+ ptlrpc_error(req);
+ }
+ /** can only return error,
+ * 0 for normal request,
+ * or 1 for high priority request */
+ LASSERT(rc <= 1);
}
-
- spin_lock_bh(&req->rq_export->exp_rpc_lock);
- cfs_list_add(&req->rq_exp_list,
- &req->rq_export->exp_hp_rpcs);
- spin_unlock_bh(&req->rq_export->exp_rpc_lock);
}
- ptlrpc_nrs_req_initialize(svcpt, req, rc);
-
RETURN(rc);
}
/** Remove the request from the export list. */
static void ptlrpc_server_hpreq_fini(struct ptlrpc_request *req)
{
- ENTRY;
- if (req->rq_export && req->rq_ops) {
- /* refresh lock timeout again so that client has more
- * room to send lock cancel RPC. */
- if (req->rq_ops->hpreq_fini)
- req->rq_ops->hpreq_fini(req);
+ ENTRY;
+ if (req->rq_export) {
+ /* refresh lock timeout again so that client has more
+ * room to send lock cancel RPC. */
+ if (req->rq_ops && req->rq_ops->hpreq_fini)
+ req->rq_ops->hpreq_fini(req);
spin_lock_bh(&req->rq_export->exp_rpc_lock);
- cfs_list_del_init(&req->rq_exp_list);
+ list_del_init(&req->rq_exp_list);
spin_unlock_bh(&req->rq_export->exp_rpc_lock);
}
EXIT;
static int ptlrpc_server_request_add(struct ptlrpc_service_part *svcpt,
struct ptlrpc_request *req)
{
- int rc;
+ int rc;
+ bool hp;
ENTRY;
rc = ptlrpc_server_hpreq_init(svcpt, req);
if (rc < 0)
RETURN(rc);
- ptlrpc_nrs_req_add(svcpt, req, !!rc);
+ hp = rc > 0;
+ ptlrpc_nrs_req_initialize(svcpt, req, hp);
+
+ if (req->rq_export != NULL) {
+ struct obd_export *exp = req->rq_export;
+
+ /* do search for duplicated xid and the adding to the list
+ * atomically */
+ spin_lock_bh(&exp->exp_rpc_lock);
+ rc = ptlrpc_server_check_resend_in_progress(req);
+ if (rc < 0) {
+ spin_unlock_bh(&exp->exp_rpc_lock);
+
+ ptlrpc_nrs_req_finalize(req);
+ RETURN(rc);
+ }
+
+ if (hp || req->rq_ops != NULL)
+ list_add(&req->rq_exp_list, &exp->exp_hp_rpcs);
+ else
+ list_add(&req->rq_exp_list, &exp->exp_reg_rpcs);
+ spin_unlock_bh(&exp->exp_rpc_lock);
+ }
+
+ /* the current thread is not the processing thread for this request
+ * since that, but request is in exp_hp_list and can be find there.
+ * Remove all relations between request and old thread. */
+ req->rq_svc_thread->t_env->le_ses = NULL;
+ req->rq_svc_thread = NULL;
+ req->rq_session.lc_thread = NULL;
+
+ ptlrpc_nrs_req_add(svcpt, req, hp);
RETURN(0);
}
if (force)
return true;
+ if (ptlrpc_nrs_req_throttling_nolock(svcpt, true))
+ return false;
+
if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
/* leave just 1 thread for normal RPCs */
bool force)
{
int running = svcpt->scp_nthrs_running;
-#ifndef __KERNEL__
- if (1) /* always allow to handle normal request for liblustre */
- return true;
-#endif
if (unlikely(svcpt->scp_service->srv_req_portal == MDS_REQUEST_PORTAL &&
CFS_FAIL_PRECHECK(OBD_FAIL_PTLRPC_CANCEL_RESEND))) {
/* leave just 1 thread for normal RPCs */
running += 1;
}
- if (force ||
- svcpt->scp_nreqs_active < running - 2)
+ if (force)
+ return true;
+
+ if (ptlrpc_nrs_req_throttling_nolock(svcpt, false))
+ return false;
+
+ if (svcpt->scp_nreqs_active < running - 2)
return true;
if (svcpt->scp_nreqs_active >= running - 1)
ENTRY;
spin_lock(&svcpt->scp_req_lock);
-#ifndef __KERNEL__
- /* !@%$# liblustre only has 1 thread */
- if (cfs_atomic_read(&svcpt->scp_nreps_difficult) != 0) {
- spin_unlock(&svcpt->scp_req_lock);
- RETURN(NULL);
- }
-#endif
if (ptlrpc_server_high_pending(svcpt, force)) {
req = ptlrpc_nrs_req_get_nolock(svcpt, true, force);
ENTRY;
spin_lock(&svcpt->scp_lock);
- if (cfs_list_empty(&svcpt->scp_req_incoming)) {
+ if (list_empty(&svcpt->scp_req_incoming)) {
spin_unlock(&svcpt->scp_lock);
RETURN(0);
}
- req = cfs_list_entry(svcpt->scp_req_incoming.next,
+ req = list_entry(svcpt->scp_req_incoming.next,
struct ptlrpc_request, rq_list);
- cfs_list_del_init(&req->rq_list);
+ list_del_init(&req->rq_list);
svcpt->scp_nreqs_incoming--;
/* Consider this still a "queued" request as far as stats are
* concerned */
rc = ptlrpc_unpack_req_msg(req, req->rq_reqlen);
if (rc != 0) {
CERROR("error unpacking request: ptl %d from %s "
- "x"LPU64"\n", svc->srv_req_portal,
+ "x%llu\n", svc->srv_req_portal,
libcfs_id2str(req->rq_peer), req->rq_xid);
goto err_req;
}
rc = lustre_unpack_req_ptlrpc_body(req, MSG_PTLRPC_BODY_OFF);
if (rc) {
CERROR ("error unpacking ptlrpc body: ptl %d from %s x"
- LPU64"\n", svc->srv_req_portal,
+ "%llu\n", svc->srv_req_portal,
libcfs_id2str(req->rq_peer), req->rq_xid);
goto err_req;
}
if (OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DROP_REQ_OPC) &&
lustre_msg_get_opc(req->rq_reqmsg) == cfs_fail_val) {
- CERROR("drop incoming rpc opc %u, x"LPU64"\n",
+ CERROR("drop incoming rpc opc %u, x%llu\n",
cfs_fail_val, req->rq_xid);
goto err_req;
}
goto err_req;
}
- switch(lustre_msg_get_opc(req->rq_reqmsg)) {
- case MDS_WRITEPAGE:
- case OST_WRITE:
- req->rq_bulk_write = 1;
- break;
- case MDS_READPAGE:
- case OST_READ:
- case MGS_CONFIG_READ:
- req->rq_bulk_read = 1;
- break;
- }
+ switch (lustre_msg_get_opc(req->rq_reqmsg)) {
+ case MDS_WRITEPAGE:
+ case OST_WRITE:
+ case OUT_UPDATE:
+ req->rq_bulk_write = 1;
+ break;
+ case MDS_READPAGE:
+ case OST_READ:
+ case MGS_CONFIG_READ:
+ req->rq_bulk_read = 1;
+ break;
+ }
- CDEBUG(D_RPCTRACE, "got req x"LPU64"\n", req->rq_xid);
+ CDEBUG(D_RPCTRACE, "got req x%llu\n", req->rq_xid);
req->rq_export = class_conn2export(
lustre_msg_get_handle(req->rq_reqmsg));
}
/* req_in handling should/must be fast */
- if (cfs_time_current_sec() - req->rq_arrival_time.tv_sec > 5)
- DEBUG_REQ(D_WARNING, req, "Slow req_in handling "CFS_DURATION_T"s",
- cfs_time_sub(cfs_time_current_sec(),
- req->rq_arrival_time.tv_sec));
+ if (ktime_get_real_seconds() - req->rq_arrival_time.tv_sec > 5)
+ DEBUG_REQ(D_WARNING, req, "Slow req_in handling %llds",
+ (s64)(ktime_get_real_seconds() -
+ req->rq_arrival_time.tv_sec));
/* Set rpc server deadline and add it to the timed list */
deadline = (lustre_msghdr_get_flags(req->rq_reqmsg) &
MSGHDR_AT_SUPPORT) ?
/* The max time the client expects us to take */
lustre_msg_get_timeout(req->rq_reqmsg) : obd_timeout;
+
req->rq_deadline = req->rq_arrival_time.tv_sec + deadline;
if (unlikely(deadline == 0)) {
DEBUG_REQ(D_ERROR, req, "Dropping request with 0 timeout");
goto err_req;
}
- req->rq_svc_thread = thread;
+ /* Skip early reply */
+ if (OBD_FAIL_PRECHECK(OBD_FAIL_MDS_RESEND))
+ req->rq_deadline += obd_timeout;
+
+ req->rq_svc_thread = thread;
+ if (thread != NULL) {
+ /* initialize request session, it is needed for request
+ * processing by target */
+ rc = lu_context_init(&req->rq_session, LCT_SERVER_SESSION |
+ LCT_NOREF);
+ if (rc) {
+ CERROR("%s: failure to initialize session: rc = %d\n",
+ thread->t_name, rc);
+ goto err_req;
+ }
+ req->rq_session.lc_thread = thread;
+ lu_context_enter(&req->rq_session);
+ thread->t_env->le_ses = &req->rq_session;
+ }
- ptlrpc_at_add_timed(req);
+ ptlrpc_at_add_timed(req);
- /* Move it over to the request processing queue */
+ /* Move it over to the request processing queue */
rc = ptlrpc_server_request_add(svcpt, req);
if (rc)
GOTO(err_req, rc);
struct ptlrpc_thread *thread)
{
struct ptlrpc_service *svc = svcpt->scp_service;
- struct ptlrpc_request *request;
- struct timeval work_start;
- struct timeval work_end;
- long timediff;
- int rc;
- int fail_opc = 0;
- ENTRY;
+ struct ptlrpc_request *request;
+ ktime_t work_start;
+ ktime_t work_end;
+ ktime_t arrived;
+ s64 timediff_usecs;
+ s64 arrived_usecs;
+ int fail_opc = 0;
+
+ ENTRY;
request = ptlrpc_server_request_get(svcpt, false);
if (request == NULL)
if(OBD_FAIL_CHECK(OBD_FAIL_PTLRPC_DUMP_LOG))
libcfs_debug_dumplog();
- do_gettimeofday(&work_start);
- timediff = cfs_timeval_sub(&work_start, &request->rq_arrival_time,NULL);
+ work_start = ktime_get_real();
+ arrived = timespec64_to_ktime(request->rq_arrival_time);
+ timediff_usecs = ktime_us_delta(work_start, arrived);
if (likely(svc->srv_stats != NULL)) {
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
- timediff);
+ timediff_usecs);
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQQDEPTH_CNTR,
svcpt->scp_nreqs_incoming);
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQACTIVE_CNTR,
at_get(&svcpt->scp_at_estimate));
}
- rc = lu_context_init(&request->rq_session, LCT_SESSION | LCT_NOREF);
- if (rc) {
- CERROR("Failure to initialize session: %d\n", rc);
- goto out_req;
- }
- request->rq_session.lc_thread = thread;
- request->rq_session.lc_cookie = 0x5;
- lu_context_enter(&request->rq_session);
-
- CDEBUG(D_NET, "got req "LPU64"\n", request->rq_xid);
-
- request->rq_svc_thread = thread;
- if (thread)
- request->rq_svc_thread->t_env->le_ses = &request->rq_session;
-
- if (likely(request->rq_export)) {
+ if (likely(request->rq_export)) {
if (unlikely(ptlrpc_check_req(request)))
goto put_conn;
- ptlrpc_update_export_timer(request->rq_export, timediff >> 19);
+ ptlrpc_update_export_timer(request->rq_export,
+ timediff_usecs >> 19);
}
/* Discard requests queued for longer than the deadline.
The deadline is increased if we send an early reply. */
- if (cfs_time_current_sec() > request->rq_deadline) {
- DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s"
- ": deadline "CFS_DURATION_T":"CFS_DURATION_T"s ago\n",
+ if (ktime_get_real_seconds() > request->rq_deadline) {
+ DEBUG_REQ(D_ERROR, request, "Dropping timed-out request from %s: deadline %lld:%llds ago\n",
libcfs_id2str(request->rq_peer),
- cfs_time_sub(request->rq_deadline,
- request->rq_arrival_time.tv_sec),
- cfs_time_sub(cfs_time_current_sec(),
- request->rq_deadline));
+ request->rq_deadline -
+ request->rq_arrival_time.tv_sec,
+ ktime_get_real_seconds() - request->rq_deadline);
goto put_conn;
}
CDEBUG(D_RPCTRACE, "Handling RPC pname:cluuid+ref:pid:xid:nid:opc "
- "%s:%s+%d:%d:x"LPU64":%s:%d\n", current_comm(),
+ "%s:%s+%d:%d:x%llu:%s:%d\n", current_comm(),
(request->rq_export ?
(char *)request->rq_export->exp_client_uuid.uuid : "0"),
(request->rq_export ?
- cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
+ atomic_read(&request->rq_export->exp_refcount) : -99),
lustre_msg_get_status(request->rq_reqmsg), request->rq_xid,
libcfs_id2str(request->rq_peer),
lustre_msg_get_opc(request->rq_reqmsg));
if (lustre_msg_get_opc(request->rq_reqmsg) != OBD_PING)
CFS_FAIL_TIMEOUT_MS(OBD_FAIL_PTLRPC_PAUSE_REQ, cfs_fail_val);
- rc = svc->srv_ops.so_req_handler(request);
+ CDEBUG(D_NET, "got req %llu\n", request->rq_xid);
- ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
+ /* re-assign request and sesson thread to the current one */
+ request->rq_svc_thread = thread;
+ if (thread != NULL) {
+ LASSERT(request->rq_session.lc_thread == NULL);
+ request->rq_session.lc_thread = thread;
+ thread->t_env->le_ses = &request->rq_session;
+ }
+ svc->srv_ops.so_req_handler(request);
+
+ ptlrpc_rqphase_move(request, RQ_PHASE_COMPLETE);
put_conn:
- lu_context_exit(&request->rq_session);
- lu_context_fini(&request->rq_session);
-
- if (unlikely(cfs_time_current_sec() > request->rq_deadline)) {
- DEBUG_REQ(D_WARNING, request, "Request took longer "
- "than estimated ("CFS_DURATION_T":"CFS_DURATION_T"s);"
- " client may timeout.",
- cfs_time_sub(request->rq_deadline,
- request->rq_arrival_time.tv_sec),
- cfs_time_sub(cfs_time_current_sec(),
- request->rq_deadline));
- }
-
- do_gettimeofday(&work_end);
- timediff = cfs_timeval_sub(&work_end, &work_start, NULL);
- CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc "
- "%s:%s+%d:%d:x"LPU64":%s:%d Request procesed in "
- "%ldus (%ldus total) trans "LPU64" rc %d/%d\n",
+ if (unlikely(ktime_get_real_seconds() > request->rq_deadline)) {
+ DEBUG_REQ(D_WARNING, request, "Request took longer than estimated (%lld:%llds); client may timeout.",
+ request->rq_deadline -
+ request->rq_arrival_time.tv_sec,
+ ktime_get_real_seconds() - request->rq_deadline);
+ }
+
+ work_end = ktime_get_real();
+ timediff_usecs = ktime_us_delta(work_end, work_start);
+ arrived_usecs = ktime_us_delta(work_end, arrived);
+ CDEBUG(D_RPCTRACE, "Handled RPC pname:cluuid+ref:pid:xid:nid:opc %s:%s+%d:%d:x%llu:%s:%d Request procesed in %lldus (%lldus total) trans %llu rc %d/%d\n",
current_comm(),
(request->rq_export ?
(char *)request->rq_export->exp_client_uuid.uuid : "0"),
(request->rq_export ?
- cfs_atomic_read(&request->rq_export->exp_refcount) : -99),
+ atomic_read(&request->rq_export->exp_refcount) : -99),
lustre_msg_get_status(request->rq_reqmsg),
request->rq_xid,
libcfs_id2str(request->rq_peer),
lustre_msg_get_opc(request->rq_reqmsg),
- timediff,
- cfs_timeval_sub(&work_end, &request->rq_arrival_time, NULL),
+ timediff_usecs,
+ arrived_usecs,
(request->rq_repmsg ?
lustre_msg_get_transno(request->rq_repmsg) :
request->rq_transno),
LASSERT(opc < LUSTRE_MAX_OPCODES);
lprocfs_counter_add(svc->srv_stats,
opc + EXTRA_MAX_OPCODES,
- timediff);
+ timediff_usecs);
}
}
if (unlikely(request->rq_early_count)) {
DEBUG_REQ(D_ADAPTTO, request,
- "sent %d early replies before finishing in "
- CFS_DURATION_T"s",
+ "sent %d early replies before finishing in %llds",
request->rq_early_count,
- cfs_time_sub(work_end.tv_sec,
- request->rq_arrival_time.tv_sec));
+ arrived_usecs / USEC_PER_SEC);
}
-out_req:
ptlrpc_server_finish_active_request(svcpt, request);
RETURN(1);
{
struct ptlrpc_service_part *svcpt = rs->rs_svcpt;
struct ptlrpc_service *svc = svcpt->scp_service;
- struct obd_export *exp;
- int nlocks;
- int been_handled;
- ENTRY;
-
- exp = rs->rs_export;
+ struct obd_export *exp;
+ int nlocks;
+ int been_handled;
+ ENTRY;
- LASSERT (rs->rs_difficult);
- LASSERT (rs->rs_scheduled);
- LASSERT (cfs_list_empty(&rs->rs_list));
+ exp = rs->rs_export;
+
+ LASSERT(rs->rs_difficult);
+ LASSERT(rs->rs_scheduled);
+ LASSERT(list_empty(&rs->rs_list));
+
+ /* The disk commit callback holds exp_uncommitted_replies_lock while it
+ * iterates over newly committed replies, removing them from
+ * exp_uncommitted_replies. It then drops this lock and schedules the
+ * replies it found for handling here.
+ *
+ * We can avoid contention for exp_uncommitted_replies_lock between the
+ * HRT threads and further commit callbacks by checking rs_committed
+ * which is set in the commit callback while it holds both
+ * rs_lock and exp_uncommitted_reples.
+ *
+ * If we see rs_committed clear, the commit callback _may_ not have
+ * handled this reply yet and we race with it to grab
+ * exp_uncommitted_replies_lock before removing the reply from
+ * exp_uncommitted_replies. Note that if we lose the race and the
+ * reply has already been removed, list_del_init() is a noop.
+ *
+ * If we see rs_committed set, we know the commit callback is handling,
+ * or has handled this reply since store reordering might allow us to
+ * see rs_committed set out of sequence. But since this is done
+ * holding rs_lock, we can be sure it has all completed once we hold
+ * rs_lock, which we do right next.
+ */
+ if (!rs->rs_committed) {
+ /* if rs was commited, no need to convert locks, don't check
+ * rs_committed here because rs may never be added into
+ * exp_uncommitted_replies and this flag never be set, see
+ * target_send_reply() */
+ if (rs->rs_convert_lock &&
+ rs->rs_transno > exp->exp_last_committed) {
+ struct ldlm_lock *lock;
+ struct ldlm_lock *ack_locks[RS_MAX_LOCKS] = { NULL };
- spin_lock(&exp->exp_lock);
- /* Noop if removed already */
- cfs_list_del_init (&rs->rs_exp_list);
- spin_unlock(&exp->exp_lock);
+ spin_lock(&rs->rs_lock);
+ if (rs->rs_convert_lock &&
+ rs->rs_transno > exp->exp_last_committed) {
+ nlocks = rs->rs_nlocks;
+ while (nlocks-- > 0) {
+ /*
+ * NB don't assume rs is always handled
+ * by the same service thread (see
+ * ptlrpc_hr_select, so REP-ACK hr may
+ * race with trans commit, while the
+ * latter will release locks, get locks
+ * here early to convert to COS mode
+ * safely.
+ */
+ lock = ldlm_handle2lock(
+ &rs->rs_locks[nlocks]);
+ LASSERT(lock);
+ ack_locks[nlocks] = lock;
+ rs->rs_modes[nlocks] = LCK_COS;
+ }
+ nlocks = rs->rs_nlocks;
+ rs->rs_convert_lock = 0;
+ /* clear rs_scheduled so that commit callback
+ * can schedule again */
+ rs->rs_scheduled = 0;
+ spin_unlock(&rs->rs_lock);
+
+ while (nlocks-- > 0) {
+ lock = ack_locks[nlocks];
+ ldlm_lock_downgrade(lock, LCK_COS);
+ LDLM_LOCK_PUT(lock);
+ }
+ RETURN(0);
+ }
+ spin_unlock(&rs->rs_lock);
+ }
- /* The disk commit callback holds exp_uncommitted_replies_lock while it
- * iterates over newly committed replies, removing them from
- * exp_uncommitted_replies. It then drops this lock and schedules the
- * replies it found for handling here.
- *
- * We can avoid contention for exp_uncommitted_replies_lock between the
- * HRT threads and further commit callbacks by checking rs_committed
- * which is set in the commit callback while it holds both
- * rs_lock and exp_uncommitted_reples.
- *
- * If we see rs_committed clear, the commit callback _may_ not have
- * handled this reply yet and we race with it to grab
- * exp_uncommitted_replies_lock before removing the reply from
- * exp_uncommitted_replies. Note that if we lose the race and the
- * reply has already been removed, list_del_init() is a noop.
- *
- * If we see rs_committed set, we know the commit callback is handling,
- * or has handled this reply since store reordering might allow us to
- * see rs_committed set out of sequence. But since this is done
- * holding rs_lock, we can be sure it has all completed once we hold
- * rs_lock, which we do right next.
- */
- if (!rs->rs_committed) {
spin_lock(&exp->exp_uncommitted_replies_lock);
- cfs_list_del_init(&rs->rs_obd_list);
+ list_del_init(&rs->rs_obd_list);
spin_unlock(&exp->exp_uncommitted_replies_lock);
}
+ spin_lock(&exp->exp_lock);
+ /* Noop if removed already */
+ list_del_init(&rs->rs_exp_list);
+ spin_unlock(&exp->exp_lock);
+
spin_lock(&rs->rs_lock);
- been_handled = rs->rs_handled;
- rs->rs_handled = 1;
+ been_handled = rs->rs_handled;
+ rs->rs_handled = 1;
- nlocks = rs->rs_nlocks; /* atomic "steal", but */
- rs->rs_nlocks = 0; /* locks still on rs_locks! */
+ nlocks = rs->rs_nlocks; /* atomic "steal", but */
+ rs->rs_nlocks = 0; /* locks still on rs_locks! */
- if (nlocks == 0 && !been_handled) {
- /* If we see this, we should already have seen the warning
- * in mds_steal_ack_locks() */
- CDEBUG(D_HA, "All locks stolen from rs %p x"LPD64".t"LPD64
+ if (nlocks == 0 && !been_handled) {
+ /* If we see this, we should already have seen the warning
+ * in mds_steal_ack_locks() */
+ CDEBUG(D_HA, "All locks stolen from rs %p x%lld.t%lld"
" o%d NID %s\n",
rs,
rs->rs_xid, rs->rs_transno, rs->rs_opc,
libcfs_nid2str(exp->exp_connection->c_peer.nid));
- }
+ }
- if ((!been_handled && rs->rs_on_net) || nlocks > 0) {
+ if ((!been_handled && rs->rs_on_net) || nlocks > 0) {
spin_unlock(&rs->rs_lock);
if (!been_handled && rs->rs_on_net) {
}
rs->rs_scheduled = 0;
+ rs->rs_convert_lock = 0;
if (!rs->rs_on_net) {
/* Off the net */
spin_unlock(&rs->rs_lock);
- class_export_put (exp);
- rs->rs_export = NULL;
- ptlrpc_rs_decref (rs);
- if (cfs_atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
+ class_export_put (exp);
+ rs->rs_export = NULL;
+ ptlrpc_rs_decref(rs);
+ if (atomic_dec_and_test(&svcpt->scp_nreps_difficult) &&
svc->srv_is_stopping)
wake_up_all(&svcpt->scp_waitq);
RETURN(1);
RETURN(1);
}
-#ifndef __KERNEL__
-
-/**
- * Check whether given service has a reply available for processing
- * and process it.
- *
- * \param svc a ptlrpc service
- * \retval 0 no replies processed
- * \retval 1 one reply processed
- */
-static int
-ptlrpc_server_handle_reply(struct ptlrpc_service_part *svcpt)
-{
- struct ptlrpc_reply_state *rs = NULL;
- ENTRY;
-
- spin_lock(&svcpt->scp_rep_lock);
- if (!cfs_list_empty(&svcpt->scp_rep_queue)) {
- rs = cfs_list_entry(svcpt->scp_rep_queue.prev,
- struct ptlrpc_reply_state,
- rs_list);
- cfs_list_del_init(&rs->rs_list);
- }
- spin_unlock(&svcpt->scp_rep_lock);
- if (rs != NULL)
- ptlrpc_handle_rs(rs);
- RETURN(rs != NULL);
-}
-
-/* FIXME make use of timeout later */
-int
-liblustre_check_services (void *arg)
-{
- int did_something = 0;
- int rc;
- cfs_list_t *tmp, *nxt;
- ENTRY;
-
- /* I'm relying on being single threaded, not to have to lock
- * ptlrpc_all_services etc */
- cfs_list_for_each_safe (tmp, nxt, &ptlrpc_all_services) {
- struct ptlrpc_service *svc =
- cfs_list_entry (tmp, struct ptlrpc_service, srv_list);
- struct ptlrpc_service_part *svcpt;
-
- LASSERT(svc->srv_ncpts == 1);
- svcpt = svc->srv_parts[0];
-
- if (svcpt->scp_nthrs_running != 0) /* I've recursed */
- continue;
-
- /* service threads can block for bulk, so this limits us
- * (arbitrarily) to recursing 1 stack frame per service.
- * Note that the problem with recursion is that we have to
- * unwind completely before our caller can resume. */
-
- svcpt->scp_nthrs_running++;
-
- do {
- rc = ptlrpc_server_handle_req_in(svcpt, NULL);
- rc |= ptlrpc_server_handle_reply(svcpt);
- rc |= ptlrpc_at_check_timed(svcpt);
- rc |= ptlrpc_server_handle_request(svcpt, NULL);
- rc |= (ptlrpc_server_post_idle_rqbds(svcpt) > 0);
- did_something |= rc;
- } while (rc);
-
- svcpt->scp_nthrs_running--;
- }
-
- RETURN(did_something);
-}
-#define ptlrpc_stop_all_threads(s) do {} while (0)
-
-#else /* __KERNEL__ */
static void
ptlrpc_check_rqbd_pool(struct ptlrpc_service_part *svcpt)
static inline int
ptlrpc_rqbd_pending(struct ptlrpc_service_part *svcpt)
{
- return !cfs_list_empty(&svcpt->scp_rqbd_idle) &&
+ return !list_empty(&svcpt->scp_rqbd_idle) &&
svcpt->scp_rqbd_timeout == 0;
}
static inline int
ptlrpc_server_request_incoming(struct ptlrpc_service_part *svcpt)
{
- return !cfs_list_empty(&svcpt->scp_req_incoming);
+ return !list_empty(&svcpt->scp_req_incoming);
}
static __attribute__((__noinline__)) int
struct ptlrpc_service_part *svcpt = thread->t_svcpt;
struct ptlrpc_service *svc = svcpt->scp_service;
struct ptlrpc_reply_state *rs;
-#ifdef WITH_GROUP_INFO
struct group_info *ginfo = NULL;
-#endif
struct lu_env *env;
int counter = 0, rc = 0;
ENTRY;
svc->srv_name, thread->t_name, svcpt->scp_cpt);
}
-#ifdef WITH_GROUP_INFO
ginfo = groups_alloc(0);
if (!ginfo) {
rc = -ENOMEM;
set_current_groups(ginfo);
put_group_info(ginfo);
-#endif
if (svc->srv_ops.so_thr_init != NULL) {
rc = svc->srv_ops.so_thr_init(thread);
env->le_ctx.lc_thread = thread;
env->le_ctx.lc_cookie = 0x6;
- while (!cfs_list_empty(&svcpt->scp_rqbd_idle)) {
+ while (!list_empty(&svcpt->scp_rqbd_idle)) {
rc = ptlrpc_server_post_idle_rqbds(svcpt);
if (rc >= 0)
continue;
goto out_srv_fini;
}
- /* Alloc reply state structure for this one */
- OBD_ALLOC_LARGE(rs, svc->srv_max_reply_size);
- if (!rs) {
- rc = -ENOMEM;
- goto out_srv_fini;
- }
+ /* Alloc reply state structure for this one */
+ OBD_ALLOC_LARGE(rs, svc->srv_max_reply_size);
+ if (!rs) {
+ rc = -ENOMEM;
+ goto out_srv_fini;
+ }
spin_lock(&svcpt->scp_lock);
NULL, NULL);
spin_lock(&svcpt->scp_rep_lock);
- cfs_list_add(&rs->rs_list, &svcpt->scp_rep_idle);
+ list_add(&rs->rs_list, &svcpt->scp_rep_idle);
wake_up(&svcpt->scp_rep_waitq);
spin_unlock(&svcpt->scp_rep_lock);
ptlrpc_start_thread(svcpt, 0);
}
+ /* reset le_ses to initial state */
+ env->le_ses = NULL;
/* Process all incoming reqs before handling any */
if (ptlrpc_server_request_incoming(svcpt)) {
lu_context_enter(&env->le_ctx);
- env->le_ses = NULL;
ptlrpc_server_handle_req_in(svcpt, thread);
lu_context_exit(&env->le_ctx);
}
static int hrt_dont_sleep(struct ptlrpc_hr_thread *hrt,
- cfs_list_t *replies)
+ struct list_head *replies)
{
int result;
spin_lock(&hrt->hrt_lock);
- cfs_list_splice_init(&hrt->hrt_queue, replies);
- result = ptlrpc_hr.hr_stopping || !cfs_list_empty(replies);
+ list_splice_init(&hrt->hrt_queue, replies);
+ result = ptlrpc_hr.hr_stopping || !list_empty(replies);
spin_unlock(&hrt->hrt_lock);
return result;
{
struct ptlrpc_hr_thread *hrt = (struct ptlrpc_hr_thread *)arg;
struct ptlrpc_hr_partition *hrp = hrt->hrt_partition;
- CFS_LIST_HEAD (replies);
- char threadname[20];
+ struct list_head replies;
int rc;
- snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d",
- hrp->hrp_cpt, hrt->hrt_id);
+ INIT_LIST_HEAD(&replies);
unshare_fs_struct();
rc = cfs_cpt_bind(ptlrpc_hr.hr_cpt_table, hrp->hrp_cpt);
if (rc != 0) {
+ char threadname[20];
+
+ snprintf(threadname, sizeof(threadname), "ptlrpc_hr%02d_%03d",
+ hrp->hrp_cpt, hrt->hrt_id);
CWARN("Failed to bind %s on CPT %d of CPT table %p: rc = %d\n",
threadname, hrp->hrp_cpt, ptlrpc_hr.hr_cpt_table, rc);
}
- cfs_atomic_inc(&hrp->hrp_nstarted);
+ atomic_inc(&hrp->hrp_nstarted);
wake_up(&ptlrpc_hr.hr_waitq);
while (!ptlrpc_hr.hr_stopping) {
l_wait_condition(hrt->hrt_waitq, hrt_dont_sleep(hrt, &replies));
- while (!cfs_list_empty(&replies)) {
- struct ptlrpc_reply_state *rs;
+ while (!list_empty(&replies)) {
+ struct ptlrpc_reply_state *rs;
- rs = cfs_list_entry(replies.prev,
- struct ptlrpc_reply_state,
- rs_list);
- cfs_list_del_init(&rs->rs_list);
- ptlrpc_handle_rs(rs);
- }
- }
+ rs = list_entry(replies.prev,
+ struct ptlrpc_reply_state,
+ rs_list);
+ list_del_init(&rs->rs_list);
+ ptlrpc_handle_rs(rs);
+ }
+ }
- cfs_atomic_inc(&hrp->hrp_nstopped);
+ atomic_inc(&hrp->hrp_nstopped);
wake_up(&ptlrpc_hr.hr_waitq);
return 0;
if (hrp->hrp_thrs == NULL)
continue; /* uninitialized */
wait_event(ptlrpc_hr.hr_waitq,
- cfs_atomic_read(&hrp->hrp_nstopped) ==
- cfs_atomic_read(&hrp->hrp_nstarted));
+ atomic_read(&hrp->hrp_nstopped) ==
+ atomic_read(&hrp->hrp_nstarted));
}
}
int rc = 0;
for (j = 0; j < hrp->hrp_nthrs; j++) {
- struct ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j];
- rc = PTR_ERR(kthread_run(ptlrpc_hr_main,
- &hrp->hrp_thrs[j],
- "ptlrpc_hr%02d_%03d",
- hrp->hrp_cpt,
- hrt->hrt_id));
- if (IS_ERR_VALUE(rc))
+ struct ptlrpc_hr_thread *hrt = &hrp->hrp_thrs[j];
+ struct task_struct *task;
+
+ task = kthread_run(ptlrpc_hr_main,
+ &hrp->hrp_thrs[j],
+ "ptlrpc_hr%02d_%03d",
+ hrp->hrp_cpt,
+ hrt->hrt_id);
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
break;
+ }
}
+
wait_event(ptlrpc_hr.hr_waitq,
- cfs_atomic_read(&hrp->hrp_nstarted) == j);
- if (!IS_ERR_VALUE(rc))
- continue;
+ atomic_read(&hrp->hrp_nstarted) == j);
- CERROR("Reply handling thread %d:%d Failed on starting: "
- "rc = %d\n", i, j, rc);
- ptlrpc_stop_hr_threads();
- RETURN(rc);
+ if (rc < 0) {
+ CERROR("cannot start reply handler thread %d:%d: "
+ "rc = %d\n", i, j, rc);
+ ptlrpc_stop_hr_threads();
+ RETURN(rc);
+ }
}
+
RETURN(0);
}
{
struct l_wait_info lwi = { 0 };
struct ptlrpc_thread *thread;
- CFS_LIST_HEAD (zombie);
+ struct list_head zombie;
ENTRY;
CDEBUG(D_INFO, "Stopping threads for service %s\n",
svcpt->scp_service->srv_name);
+ INIT_LIST_HEAD(&zombie);
spin_lock(&svcpt->scp_lock);
/* let the thread know that we would like it to stop asap */
list_for_each_entry(thread, &svcpt->scp_threads, t_link) {
wake_up_all(&svcpt->scp_waitq);
- while (!cfs_list_empty(&svcpt->scp_threads)) {
- thread = cfs_list_entry(svcpt->scp_threads.next,
+ while (!list_empty(&svcpt->scp_threads)) {
+ thread = list_entry(svcpt->scp_threads.next,
struct ptlrpc_thread, t_link);
if (thread_is_stopped(thread)) {
- cfs_list_del(&thread->t_link);
- cfs_list_add(&thread->t_link, &zombie);
+ list_del(&thread->t_link);
+ list_add(&thread->t_link, &zombie);
continue;
}
spin_unlock(&svcpt->scp_lock);
spin_unlock(&svcpt->scp_lock);
- while (!cfs_list_empty(&zombie)) {
- thread = cfs_list_entry(zombie.next,
+ while (!list_empty(&zombie)) {
+ thread = list_entry(zombie.next,
struct ptlrpc_thread, t_link);
- cfs_list_del(&thread->t_link);
+ list_del(&thread->t_link);
OBD_FREE_PTR(thread);
}
EXIT;
EXIT;
}
-EXPORT_SYMBOL(ptlrpc_stop_all_threads);
int ptlrpc_start_threads(struct ptlrpc_service *svc)
{
ptlrpc_stop_all_threads(svc);
RETURN(rc);
}
-EXPORT_SYMBOL(ptlrpc_start_threads);
int ptlrpc_start_thread(struct ptlrpc_service_part *svcpt, int wait)
{
struct l_wait_info lwi = { 0 };
struct ptlrpc_thread *thread;
struct ptlrpc_service *svc;
+ struct task_struct *task;
int rc;
ENTRY;
thread_add_flags(thread, SVC_STARTING);
thread->t_svcpt = svcpt;
- cfs_list_add(&thread->t_link, &svcpt->scp_threads);
+ list_add(&thread->t_link, &svcpt->scp_threads);
spin_unlock(&svcpt->scp_lock);
if (svcpt->scp_cpt >= 0) {
}
CDEBUG(D_RPCTRACE, "starting thread '%s'\n", thread->t_name);
- rc = PTR_ERR(kthread_run(ptlrpc_main, thread, thread->t_name));
- if (IS_ERR_VALUE(rc)) {
- CERROR("cannot start thread '%s': rc %d\n",
+ task = kthread_run(ptlrpc_main, thread, "%s", thread->t_name);
+ if (IS_ERR(task)) {
+ rc = PTR_ERR(task);
+ CERROR("cannot start thread '%s': rc = %d\n",
thread->t_name, rc);
spin_lock(&svcpt->scp_lock);
--svcpt->scp_nthrs_starting;
wake_up(&thread->t_ctl_waitq);
spin_unlock(&svcpt->scp_lock);
} else {
- cfs_list_del(&thread->t_link);
+ list_del(&thread->t_link);
spin_unlock(&svcpt->scp_lock);
OBD_FREE_PTR(thread);
}
struct ptlrpc_hr_partition *hrp;
struct ptlrpc_hr_thread *hrt;
int rc;
+ int cpt;
int i;
- int j;
+ int weight;
ENTRY;
memset(&ptlrpc_hr, 0, sizeof(ptlrpc_hr));
init_waitqueue_head(&ptlrpc_hr.hr_waitq);
- cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
- hrp->hrp_cpt = i;
+ weight = cpumask_weight(topology_sibling_cpumask(smp_processor_id()));
+
+ cfs_percpt_for_each(hrp, cpt, ptlrpc_hr.hr_partitions) {
+ hrp->hrp_cpt = cpt;
- cfs_atomic_set(&hrp->hrp_nstarted, 0);
- cfs_atomic_set(&hrp->hrp_nstopped, 0);
+ atomic_set(&hrp->hrp_nstarted, 0);
+ atomic_set(&hrp->hrp_nstopped, 0);
- hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i);
- hrp->hrp_nthrs /= cfs_cpu_ht_nsiblings(0);
+ hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, cpt);
+ hrp->hrp_nthrs /= weight;
+ if (hrp->hrp_nthrs == 0)
+ hrp->hrp_nthrs = 1;
- LASSERT(hrp->hrp_nthrs > 0);
- OBD_CPT_ALLOC(hrp->hrp_thrs, ptlrpc_hr.hr_cpt_table, i,
+ OBD_CPT_ALLOC(hrp->hrp_thrs, ptlrpc_hr.hr_cpt_table, cpt,
hrp->hrp_nthrs * sizeof(*hrt));
if (hrp->hrp_thrs == NULL)
GOTO(out, rc = -ENOMEM);
- for (j = 0; j < hrp->hrp_nthrs; j++) {
- hrt = &hrp->hrp_thrs[j];
+ for (i = 0; i < hrp->hrp_nthrs; i++) {
+ hrt = &hrp->hrp_thrs[i];
- hrt->hrt_id = j;
+ hrt->hrt_id = i;
hrt->hrt_partition = hrp;
init_waitqueue_head(&hrt->hrt_waitq);
spin_lock_init(&hrt->hrt_lock);
- CFS_INIT_LIST_HEAD(&hrt->hrt_queue);
+ INIT_LIST_HEAD(&hrt->hrt_queue);
}
}
void ptlrpc_hr_fini(void)
{
struct ptlrpc_hr_partition *hrp;
- int i;
+ int cpt;
if (ptlrpc_hr.hr_partitions == NULL)
return;
ptlrpc_stop_hr_threads();
- cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
+ cfs_percpt_for_each(hrp, cpt, ptlrpc_hr.hr_partitions) {
if (hrp->hrp_thrs != NULL) {
OBD_FREE(hrp->hrp_thrs,
hrp->hrp_nthrs * sizeof(hrp->hrp_thrs[0]));
ptlrpc_hr.hr_partitions = NULL;
}
-#endif /* __KERNEL__ */
/**
* Wait until all already scheduled replies are processed.
NULL, NULL);
rc = l_wait_event(svcpt->scp_waitq,
- cfs_atomic_read(&svcpt->scp_nreps_difficult) == 0, &lwi);
+ atomic_read(&svcpt->scp_nreps_difficult) == 0, &lwi);
if (rc == 0)
break;
CWARN("Unexpectedly long timeout %s %p\n",
/* early disarm AT timer... */
ptlrpc_service_for_each_part(svcpt, i, svc) {
if (svcpt->scp_service != NULL)
- cfs_timer_disarm(&svcpt->scp_at_timer);
+ del_timer(&svcpt->scp_at_timer);
}
}
/* Unlink all the request buffers. This forces a 'final'
* event with its 'unlink' flag set for each posted rqbd */
- cfs_list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted,
+ list_for_each_entry(rqbd, &svcpt->scp_rqbd_posted,
rqbd_list) {
rc = LNetMDUnlink(rqbd->rqbd_md_h);
LASSERT(rc == 0 || rc == -ENOENT);
break;
spin_lock(&svcpt->scp_rep_lock);
- while (!cfs_list_empty(&svcpt->scp_rep_active)) {
- rs = cfs_list_entry(svcpt->scp_rep_active.next,
+ while (!list_empty(&svcpt->scp_rep_active)) {
+ rs = list_entry(svcpt->scp_rep_active.next,
struct ptlrpc_reply_state, rs_list);
spin_lock(&rs->rs_lock);
ptlrpc_schedule_difficult_reply(rs);
/* purge the request queue. NB No new replies (rqbds
* all unlinked) and no service threads, so I'm the only
* thread noodling the request queue now */
- while (!cfs_list_empty(&svcpt->scp_req_incoming)) {
- req = cfs_list_entry(svcpt->scp_req_incoming.next,
+ while (!list_empty(&svcpt->scp_req_incoming)) {
+ req = list_entry(svcpt->scp_req_incoming.next,
struct ptlrpc_request, rq_list);
- cfs_list_del(&req->rq_list);
+ list_del(&req->rq_list);
svcpt->scp_nreqs_incoming--;
ptlrpc_server_finish_request(svcpt, req);
}
ptlrpc_server_finish_active_request(svcpt, req);
}
- LASSERT(cfs_list_empty(&svcpt->scp_rqbd_posted));
+ LASSERT(list_empty(&svcpt->scp_rqbd_posted));
LASSERT(svcpt->scp_nreqs_incoming == 0);
LASSERT(svcpt->scp_nreqs_active == 0);
/* history should have been culled by
/* Now free all the request buffers since nothing
* references them any more... */
- while (!cfs_list_empty(&svcpt->scp_rqbd_idle)) {
- rqbd = cfs_list_entry(svcpt->scp_rqbd_idle.next,
+ while (!list_empty(&svcpt->scp_rqbd_idle)) {
+ rqbd = list_entry(svcpt->scp_rqbd_idle.next,
struct ptlrpc_request_buffer_desc,
rqbd_list);
ptlrpc_free_rqbd(rqbd);
}
ptlrpc_wait_replies(svcpt);
- while (!cfs_list_empty(&svcpt->scp_rep_idle)) {
- rs = cfs_list_entry(svcpt->scp_rep_idle.next,
+ while (!list_empty(&svcpt->scp_rep_idle)) {
+ rs = list_entry(svcpt->scp_rep_idle.next,
struct ptlrpc_reply_state,
rs_list);
- cfs_list_del(&rs->rs_list);
+ list_del(&rs->rs_list);
OBD_FREE_LARGE(rs, svc->srv_max_reply_size);
}
}
break;
/* In case somebody rearmed this in the meantime */
- cfs_timer_disarm(&svcpt->scp_at_timer);
+ del_timer(&svcpt->scp_at_timer);
array = &svcpt->scp_at_array;
if (array->paa_reqs_array != NULL) {
OBD_FREE(array->paa_reqs_array,
- sizeof(cfs_list_t) * array->paa_size);
+ sizeof(struct list_head) * array->paa_size);
array->paa_reqs_array = NULL;
}
service->srv_is_stopping = 1;
mutex_lock(&ptlrpc_all_services_mutex);
- cfs_list_del_init(&service->srv_list);
+ list_del_init(&service->srv_list);
mutex_unlock(&ptlrpc_all_services_mutex);
ptlrpc_service_del_atimer(service);
ptlrpc_service_nrs_cleanup(service);
ptlrpc_lprocfs_unregister_service(service);
+ ptlrpc_sysfs_unregister_service(service);
ptlrpc_service_free(service);
* Right now, it just checks to make sure that requests aren't languishing
* in the queue. We'll use this health check to govern whether a node needs
* to be shot, so it's intentionally non-aggressive. */
-int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
+static int ptlrpc_svcpt_health_check(struct ptlrpc_service_part *svcpt)
{
- struct ptlrpc_request *request = NULL;
- struct timeval right_now;
- long timediff;
+ struct ptlrpc_request *request = NULL;
+ struct timespec64 right_now;
+ struct timespec64 timediff;
- do_gettimeofday(&right_now);
+ ktime_get_real_ts64(&right_now);
spin_lock(&svcpt->scp_req_lock);
/* How long has the next entry been waiting? */
return 0;
}
- timediff = cfs_timeval_sub(&right_now, &request->rq_arrival_time, NULL);
+ timediff = timespec64_sub(right_now, request->rq_arrival_time);
spin_unlock(&svcpt->scp_req_lock);
- if ((timediff / ONE_MILLION) >
+ if ((timediff.tv_sec) >
(AT_OFF ? obd_timeout * 3 / 2 : at_max)) {
- CERROR("%s: unhealthy - request has been waiting %lds\n",
- svcpt->scp_service->srv_name, timediff / ONE_MILLION);
+ CERROR("%s: unhealthy - request has been waiting %llds\n",
+ svcpt->scp_service->srv_name, (s64)timediff.tv_sec);
return -1;
}