LASSERT(rs != NULL);
LASSERT(rs->rs_nlocks < RS_MAX_LOCKS);
- if (req->rq_export->exp_disconnected) {
- ldlm_lock_decref(lock, mode);
- } else {
- idx = rs->rs_nlocks++;
- rs->rs_locks[idx] = *lock;
- rs->rs_modes[idx] = mode;
- rs->rs_difficult = 1;
- rs->rs_no_ack = no_ack;
- rs->rs_convert_lock = convert_lock;
- }
+ idx = rs->rs_nlocks++;
+ rs->rs_locks[idx] = *lock;
+ rs->rs_modes[idx] = mode;
+ rs->rs_difficult = 1;
+ rs->rs_no_ack = no_ack;
+ rs->rs_convert_lock = convert_lock;
}
EXPORT_SYMBOL(ptlrpc_save_lock);
}
weight = cfs_cpt_weight(svc->srv_cptable, 0);
- LASSERT(weight > 0);
for (; factor > 0 && weight > 0; factor--, weight -= fade)
nthrs += min(weight, fade) * factor;
*/
struct ptlrpc_service *
ptlrpc_register_service(struct ptlrpc_service_conf *conf,
+ struct kset *parent,
struct proc_dir_entry *proc_entry)
{
struct ptlrpc_service_cpt_conf *cconf = &conf->psc_cpt;
list_add(&service->srv_list, &ptlrpc_all_services);
mutex_unlock(&ptlrpc_all_services_mutex);
+ if (parent) {
+ rc = ptlrpc_sysfs_register_service(parent, service);
+ if (rc)
+ GOTO(failed, rc);
+ }
+
if (proc_entry != NULL)
ptlrpc_lprocfs_register_service(proc_entry, service);
spin_lock(&svcpt->scp_lock);
/*
* now all reqs including the embedded req has been
- * disposed, schedule request buffer for re-use.
+ * disposed, schedule request buffer for re-use
+ * or free it to drain some in excess.
*/
LASSERT(atomic_read(&rqbd->rqbd_req.rq_refcount) == 0);
- list_add_tail(&rqbd->rqbd_list, &svcpt->scp_rqbd_idle);
+ if (svcpt->scp_nrqbds_posted >=
+ svc->srv_nbuf_per_group &&
+ !test_req_buffer_pressure) {
+ /* like in ptlrpc_free_rqbd() */
+ svcpt->scp_nrqbds_total--;
+ OBD_FREE_LARGE(rqbd->rqbd_buffer,
+ svc->srv_buf_size);
+ OBD_FREE_PTR(rqbd);
+ } else {
+ list_add_tail(&rqbd->rqbd_list,
+ &svcpt->scp_rqbd_idle);
+ }
}
spin_unlock(&svcpt->scp_lock);
* we better wait for 3. */
exp->exp_obd->obd_eviction_timer =
cfs_time_current_sec() + 3 * PING_INTERVAL;
- CDEBUG(D_HA, "%s: Think about evicting %s from "CFS_TIME_T"\n",
+ CDEBUG(D_HA, "%s: Think about evicting %s from %ld\n",
exp->exp_obd->obd_name,
obd_export_nid2str(oldest_exp), oldest_time);
}
work_start = ktime_get_real();
arrived = timespec64_to_ktime(request->rq_arrival_time);
- timediff_usecs = ktime_us_delta(arrived, work_start);
+ timediff_usecs = ktime_us_delta(work_start, arrived);
if (likely(svc->srv_stats != NULL)) {
lprocfs_counter_add(svc->srv_stats, PTLRPC_REQWAIT_CNTR,
timediff_usecs);
if (rs->rs_convert_lock &&
rs->rs_transno > exp->exp_last_committed) {
struct ldlm_lock *lock;
+ struct ldlm_lock *ack_locks[RS_MAX_LOCKS] = { NULL };
spin_lock(&rs->rs_lock);
if (rs->rs_convert_lock &&
rs->rs_transno > exp->exp_last_committed) {
nlocks = rs->rs_nlocks;
- while (nlocks-- > 0)
+ while (nlocks-- > 0) {
+ /*
+ * NB don't assume rs is always handled
+ * by the same service thread (see
+ * ptlrpc_hr_select, so REP-ACK hr may
+ * race with trans commit, while the
+ * latter will release locks, get locks
+ * here early to convert to COS mode
+ * safely.
+ */
+ lock = ldlm_handle2lock(
+ &rs->rs_locks[nlocks]);
+ LASSERT(lock);
+ ack_locks[nlocks] = lock;
rs->rs_modes[nlocks] = LCK_COS;
+ }
nlocks = rs->rs_nlocks;
rs->rs_convert_lock = 0;
/* clear rs_scheduled so that commit callback
spin_unlock(&rs->rs_lock);
while (nlocks-- > 0) {
- lock = ldlm_handle2lock(
- &rs->rs_locks[nlocks]);
- LASSERT(lock != NULL);
+ lock = ack_locks[nlocks];
ldlm_lock_downgrade(lock, LCK_COS);
LDLM_LOCK_PUT(lock);
}
struct ptlrpc_hr_partition *hrp;
struct ptlrpc_hr_thread *hrt;
int rc;
+ int cpt;
int i;
- int j;
int weight;
ENTRY;
weight = cpumask_weight(topology_sibling_cpumask(smp_processor_id()));
- cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
- hrp->hrp_cpt = i;
+ cfs_percpt_for_each(hrp, cpt, ptlrpc_hr.hr_partitions) {
+ hrp->hrp_cpt = cpt;
atomic_set(&hrp->hrp_nstarted, 0);
atomic_set(&hrp->hrp_nstopped, 0);
- hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, i);
-
+ hrp->hrp_nthrs = cfs_cpt_weight(ptlrpc_hr.hr_cpt_table, cpt);
hrp->hrp_nthrs /= weight;
if (hrp->hrp_nthrs == 0)
hrp->hrp_nthrs = 1;
- OBD_CPT_ALLOC(hrp->hrp_thrs, ptlrpc_hr.hr_cpt_table, i,
+ OBD_CPT_ALLOC(hrp->hrp_thrs, ptlrpc_hr.hr_cpt_table, cpt,
hrp->hrp_nthrs * sizeof(*hrt));
if (hrp->hrp_thrs == NULL)
GOTO(out, rc = -ENOMEM);
- for (j = 0; j < hrp->hrp_nthrs; j++) {
- hrt = &hrp->hrp_thrs[j];
+ for (i = 0; i < hrp->hrp_nthrs; i++) {
+ hrt = &hrp->hrp_thrs[i];
- hrt->hrt_id = j;
+ hrt->hrt_id = i;
hrt->hrt_partition = hrp;
init_waitqueue_head(&hrt->hrt_waitq);
spin_lock_init(&hrt->hrt_lock);
void ptlrpc_hr_fini(void)
{
struct ptlrpc_hr_partition *hrp;
- int i;
+ int cpt;
if (ptlrpc_hr.hr_partitions == NULL)
return;
ptlrpc_stop_hr_threads();
- cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
+ cfs_percpt_for_each(hrp, cpt, ptlrpc_hr.hr_partitions) {
if (hrp->hrp_thrs != NULL) {
OBD_FREE(hrp->hrp_thrs,
hrp->hrp_nthrs * sizeof(hrp->hrp_thrs[0]));
ptlrpc_service_nrs_cleanup(service);
ptlrpc_lprocfs_unregister_service(service);
+ ptlrpc_sysfs_unregister_service(service);
ptlrpc_service_free(service);