X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Fptlrpc%2Flproc_ptlrpc.c;h=ab7d146b4f14432fd9d50237735dc2508d5a7595;hp=b42c41b2109b221575d371cba03f58d09ff51afa;hb=d62c7a3a191ec2834a91621709e9d1973ed5d0da;hpb=b59c628da398cf4cf2f002e56d70b3b7d261d5c9 diff --git a/lustre/ptlrpc/lproc_ptlrpc.c b/lustre/ptlrpc/lproc_ptlrpc.c index b42c41b..ab7d146 100644 --- a/lustre/ptlrpc/lproc_ptlrpc.c +++ b/lustre/ptlrpc/lproc_ptlrpc.c @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -28,6 +26,8 @@ /* * Copyright (c) 2002, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. + * + * Copyright (c) 2011, 2012, Whamcloud, Inc. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -107,9 +107,11 @@ struct ll_rpc_opcode { { MGS_TARGET_REG, "mgs_target_reg" }, { MGS_TARGET_DEL, "mgs_target_del" }, { MGS_SET_INFO, "mgs_set_info" }, + { MGS_CONFIG_READ, "mgs_config_read" }, { OBD_PING, "obd_ping" }, { OBD_LOG_CANCEL, "llog_origin_handle_cancel" }, { OBD_QC_CALLBACK, "obd_quota_callback" }, + { OBD_IDX_READ, "dt_index_read" }, { LLOG_ORIGIN_HANDLE_CREATE, "llog_origin_handle_create" }, { LLOG_ORIGIN_HANDLE_NEXT_BLOCK, "llog_origin_handle_next_block" }, { LLOG_ORIGIN_HANDLE_READ_HEADER,"llog_origin_handle_read_header" }, @@ -247,33 +249,47 @@ void ptlrpc_lprocfs_register(struct proc_dir_entry *root, char *dir, static int ptlrpc_lprocfs_read_req_history_len(char *page, char **start, off_t off, - int count, int *eof, void *data) + int count, int *eof, void *data) { - struct ptlrpc_service *svc = data; + struct ptlrpc_service *svc = data; + struct ptlrpc_service_part *svcpt; + int total = 0; + int i; + + *eof = 1; + + ptlrpc_service_for_each_part(svcpt, i, svc) + total += svcpt->scp_hist_nrqbds; - *eof = 1; - return snprintf(page, count, "%d\n", svc->srv_n_history_rqbds); + return snprintf(page, count, "%d\n", total); } static int ptlrpc_lprocfs_read_req_history_max(char *page, char **start, off_t off, int count, int *eof, void *data) { - struct ptlrpc_service *svc = data; + struct ptlrpc_service *svc = data; + struct ptlrpc_service_part *svcpt; + int total = 0; + int i; + + *eof = 1; + ptlrpc_service_for_each_part(svcpt, i, svc) + total += svc->srv_hist_nrqbds_cpt_max; - *eof = 1; - return snprintf(page, count, "%d\n", svc->srv_max_history_rqbds); + return snprintf(page, count, "%d\n", total); } static int ptlrpc_lprocfs_write_req_history_max(struct file *file, const char *buffer, unsigned long count, void *data) { - struct ptlrpc_service *svc = data; - int bufpages; - int val; - int rc = lprocfs_write_helper(buffer, count, &val); + struct ptlrpc_service *svc = data; + int bufpages; + int val; + int rc; + rc = lprocfs_write_helper(buffer, count, &val); if (rc < 0) return rc; @@ -287,103 +303,124 @@ ptlrpc_lprocfs_write_req_history_max(struct file *file, const char *buffer, if (val > cfs_num_physpages/(2 * bufpages)) return -ERANGE; - cfs_spin_lock(&svc->srv_lock); - svc->srv_max_history_rqbds = val; - cfs_spin_unlock(&svc->srv_lock); + spin_lock(&svc->srv_lock); - return count; + if (val == 0) + svc->srv_hist_nrqbds_cpt_max = 0; + else + svc->srv_hist_nrqbds_cpt_max = max(1, (val / svc->srv_ncpts)); + + spin_unlock(&svc->srv_lock); + + return count; } static int ptlrpc_lprocfs_rd_threads_min(char *page, char **start, off_t off, - int count, int *eof, void *data) + int count, int *eof, void *data) { - struct ptlrpc_service *svc = data; + struct ptlrpc_service *svc = data; - return snprintf(page, count, "%d\n", svc->srv_threads_min); + return snprintf(page, count, "%d\n", + svc->srv_nthrs_cpt_init * svc->srv_ncpts); } static int ptlrpc_lprocfs_wr_threads_min(struct file *file, const char *buffer, unsigned long count, void *data) { - struct ptlrpc_service *svc = data; - int val; - int rc = lprocfs_write_helper(buffer, count, &val); + struct ptlrpc_service *svc = data; + int val; + int rc = lprocfs_write_helper(buffer, count, &val); - if (rc < 0) - return rc; + if (rc < 0) + return rc; - if (val < 2) - return -ERANGE; + if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT) + return -ERANGE; - if (val > svc->srv_threads_max) - return -ERANGE; + spin_lock(&svc->srv_lock); + if (val > svc->srv_nthrs_cpt_limit * svc->srv_ncpts) { + spin_unlock(&svc->srv_lock); + return -ERANGE; + } - cfs_spin_lock(&svc->srv_lock); - svc->srv_threads_min = val; - cfs_spin_unlock(&svc->srv_lock); + svc->srv_nthrs_cpt_init = val / svc->srv_ncpts; - return count; + spin_unlock(&svc->srv_lock); + + return count; } static int ptlrpc_lprocfs_rd_threads_started(char *page, char **start, off_t off, - int count, int *eof, void *data) + int count, int *eof, void *data) { - struct ptlrpc_service *svc = data; + struct ptlrpc_service *svc = data; + struct ptlrpc_service_part *svcpt; + int total = 0; + int i; + + LASSERT(svc->srv_parts != NULL); + ptlrpc_service_for_each_part(svcpt, i, svc) + total += svcpt->scp_nthrs_running; - return snprintf(page, count, "%d\n", svc->srv_threads_running); + return snprintf(page, count, "%d\n", total); } static int ptlrpc_lprocfs_rd_threads_max(char *page, char **start, off_t off, - int count, int *eof, void *data) + int count, int *eof, void *data) { - struct ptlrpc_service *svc = data; + struct ptlrpc_service *svc = data; - return snprintf(page, count, "%d\n", svc->srv_threads_max); + return snprintf(page, count, "%d\n", + svc->srv_nthrs_cpt_limit * svc->srv_ncpts); } static int ptlrpc_lprocfs_wr_threads_max(struct file *file, const char *buffer, - unsigned long count, void *data) + unsigned long count, void *data) { - struct ptlrpc_service *svc = data; - int val; - int rc = lprocfs_write_helper(buffer, count, &val); + struct ptlrpc_service *svc = data; + int val; + int rc = lprocfs_write_helper(buffer, count, &val); - if (rc < 0) - return rc; + if (rc < 0) + return rc; - if (val < 2) - return -ERANGE; + if (val / svc->srv_ncpts < PTLRPC_NTHRS_INIT) + return -ERANGE; - if (val < svc->srv_threads_min) - return -ERANGE; + spin_lock(&svc->srv_lock); + if (val < svc->srv_nthrs_cpt_init * svc->srv_ncpts) { + spin_unlock(&svc->srv_lock); + return -ERANGE; + } - cfs_spin_lock(&svc->srv_lock); - svc->srv_threads_max = val; - cfs_spin_unlock(&svc->srv_lock); + svc->srv_nthrs_cpt_limit = val / svc->srv_ncpts; - return count; + spin_unlock(&svc->srv_lock); + + return count; } struct ptlrpc_srh_iterator { - __u64 srhi_seq; - struct ptlrpc_request *srhi_req; + int srhi_idx; + __u64 srhi_seq; + struct ptlrpc_request *srhi_req; }; int -ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service *svc, - struct ptlrpc_srh_iterator *srhi, - __u64 seq) +ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service_part *svcpt, + struct ptlrpc_srh_iterator *srhi, + __u64 seq) { - cfs_list_t *e; - struct ptlrpc_request *req; + cfs_list_t *e; + struct ptlrpc_request *req; - if (srhi->srhi_req != NULL && - srhi->srhi_seq > svc->srv_request_max_cull_seq && + if (srhi->srhi_req != NULL && + srhi->srhi_seq > svcpt->scp_hist_seq_culled && srhi->srhi_seq <= seq) { /* If srhi_req was set previously, hasn't been culled and * we're searching for a seq on or after it (i.e. more @@ -391,15 +428,22 @@ ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service *svc, * Since the service history is LRU (i.e. culled reqs will * be near the head), we shouldn't have to do long * re-scans */ - LASSERT (srhi->srhi_seq == srhi->srhi_req->rq_history_seq); - LASSERT (!cfs_list_empty(&svc->srv_request_history)); - e = &srhi->srhi_req->rq_history_list; - } else { - /* search from start */ - e = svc->srv_request_history.next; - } - - while (e != &svc->srv_request_history) { + LASSERTF(srhi->srhi_seq == srhi->srhi_req->rq_history_seq, + "%s:%d: seek seq "LPU64", request seq "LPU64"\n", + svcpt->scp_service->srv_name, svcpt->scp_cpt, + srhi->srhi_seq, srhi->srhi_req->rq_history_seq); + LASSERTF(!cfs_list_empty(&svcpt->scp_hist_reqs), + "%s:%d: seek offset "LPU64", request seq "LPU64", " + "last culled "LPU64"\n", + svcpt->scp_service->srv_name, svcpt->scp_cpt, + seq, srhi->srhi_seq, svcpt->scp_hist_seq_culled); + e = &srhi->srhi_req->rq_history_list; + } else { + /* search from start */ + e = svcpt->scp_hist_reqs.next; + } + + while (e != &svcpt->scp_hist_reqs) { req = cfs_list_entry(e, struct ptlrpc_request, rq_history_list); if (req->rq_history_seq >= seq) { @@ -413,31 +457,83 @@ ptlrpc_lprocfs_svc_req_history_seek(struct ptlrpc_service *svc, return -ENOENT; } -static void * -ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos) -{ - struct ptlrpc_service *svc = s->private; - struct ptlrpc_srh_iterator *srhi; - int rc; +/* + * ptlrpc history sequence is used as "position" of seq_file, in some case, + * seq_read() will increase "position" to indicate reading the next + * element, however, low bits of history sequence are reserved for CPT id + * (check the details from comments before ptlrpc_req_add_history), which + * means seq_read() might change CPT id of history sequence and never + * finish reading of requests on a CPT. To make it work, we have to shift + * CPT id to high bits and timestamp to low bits, so seq_read() will only + * increase timestamp which can correctly indicate the next position. + */ - OBD_ALLOC(srhi, sizeof(*srhi)); - if (srhi == NULL) - return NULL; +/* convert seq_file pos to cpt */ +#define PTLRPC_REQ_POS2CPT(svc, pos) \ + ((svc)->srv_cpt_bits == 0 ? 0 : \ + (__u64)(pos) >> (64 - (svc)->srv_cpt_bits)) - srhi->srhi_seq = 0; - srhi->srhi_req = NULL; +/* make up seq_file pos from cpt */ +#define PTLRPC_REQ_CPT2POS(svc, cpt) \ + ((svc)->srv_cpt_bits == 0 ? 0 : \ + (cpt) << (64 - (svc)->srv_cpt_bits)) - cfs_spin_lock(&svc->srv_lock); - rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, *pos); - cfs_spin_unlock(&svc->srv_lock); +/* convert sequence to position */ +#define PTLRPC_REQ_SEQ2POS(svc, seq) \ + ((svc)->srv_cpt_bits == 0 ? (seq) : \ + ((seq) >> (svc)->srv_cpt_bits) | \ + ((seq) << (64 - (svc)->srv_cpt_bits))) - if (rc == 0) { - *pos = srhi->srhi_seq; - return srhi; - } +/* convert position to sequence */ +#define PTLRPC_REQ_POS2SEQ(svc, pos) \ + ((svc)->srv_cpt_bits == 0 ? (pos) : \ + ((__u64)(pos) << (svc)->srv_cpt_bits) | \ + ((__u64)(pos) >> (64 - (svc)->srv_cpt_bits))) - OBD_FREE(srhi, sizeof(*srhi)); - return NULL; +static void * +ptlrpc_lprocfs_svc_req_history_start(struct seq_file *s, loff_t *pos) +{ + struct ptlrpc_service *svc = s->private; + struct ptlrpc_service_part *svcpt; + struct ptlrpc_srh_iterator *srhi; + unsigned int cpt; + int rc; + int i; + + if (sizeof(loff_t) != sizeof(__u64)) { /* can't support */ + CWARN("Failed to read request history because size of loff_t " + "%d can't match size of u64\n", (int)sizeof(loff_t)); + return NULL; + } + + OBD_ALLOC(srhi, sizeof(*srhi)); + if (srhi == NULL) + return NULL; + + srhi->srhi_seq = 0; + srhi->srhi_req = NULL; + + cpt = PTLRPC_REQ_POS2CPT(svc, *pos); + + ptlrpc_service_for_each_part(svcpt, i, svc) { + if (i < cpt) /* skip */ + continue; + if (i > cpt) /* make up the lowest position for this CPT */ + *pos = PTLRPC_REQ_CPT2POS(svc, i); + + spin_lock(&svcpt->scp_lock); + rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, + PTLRPC_REQ_POS2SEQ(svc, *pos)); + spin_unlock(&svcpt->scp_lock); + if (rc == 0) { + *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq); + srhi->srhi_idx = i; + return srhi; + } + } + + OBD_FREE(srhi, sizeof(*srhi)); + return NULL; } static void @@ -451,26 +547,40 @@ ptlrpc_lprocfs_svc_req_history_stop(struct seq_file *s, void *iter) static void * ptlrpc_lprocfs_svc_req_history_next(struct seq_file *s, - void *iter, loff_t *pos) + void *iter, loff_t *pos) { - struct ptlrpc_service *svc = s->private; - struct ptlrpc_srh_iterator *srhi = iter; - int rc; - - cfs_spin_lock(&svc->srv_lock); - rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, *pos + 1); - cfs_spin_unlock(&svc->srv_lock); - - if (rc != 0) { - OBD_FREE(srhi, sizeof(*srhi)); - return NULL; - } - - *pos = srhi->srhi_seq; - return srhi; + struct ptlrpc_service *svc = s->private; + struct ptlrpc_srh_iterator *srhi = iter; + struct ptlrpc_service_part *svcpt; + __u64 seq; + int rc; + int i; + + for (i = srhi->srhi_idx; i < svc->srv_ncpts; i++) { + svcpt = svc->srv_parts[i]; + + if (i > srhi->srhi_idx) { /* reset iterator for a new CPT */ + srhi->srhi_req = NULL; + seq = srhi->srhi_seq = 0; + } else { /* the next sequence */ + seq = srhi->srhi_seq + (1 << svc->srv_cpt_bits); + } + + spin_lock(&svcpt->scp_lock); + rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, seq); + spin_unlock(&svcpt->scp_lock); + if (rc == 0) { + *pos = PTLRPC_REQ_SEQ2POS(svc, srhi->srhi_seq); + srhi->srhi_idx = i; + return srhi; + } + } + + OBD_FREE(srhi, sizeof(*srhi)); + return NULL; } -/* common ost/mdt srv_req_printfn */ +/* common ost/mdt so_req_printer */ void target_print_req(void *seq_file, struct ptlrpc_request *req) { /* Called holding srv_lock with irqs disabled. @@ -502,14 +612,19 @@ EXPORT_SYMBOL(target_print_req); static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter) { - struct ptlrpc_service *svc = s->private; - struct ptlrpc_srh_iterator *srhi = iter; - struct ptlrpc_request *req; - int rc; + struct ptlrpc_service *svc = s->private; + struct ptlrpc_srh_iterator *srhi = iter; + struct ptlrpc_service_part *svcpt; + struct ptlrpc_request *req; + int rc; - cfs_spin_lock(&svc->srv_lock); + LASSERT(srhi->srhi_idx < svc->srv_ncpts); - rc = ptlrpc_lprocfs_svc_req_history_seek(svc, srhi, srhi->srhi_seq); + svcpt = svc->srv_parts[srhi->srhi_idx]; + + spin_lock(&svcpt->scp_lock); + + rc = ptlrpc_lprocfs_svc_req_history_seek(svcpt, srhi, srhi->srhi_seq); if (rc == 0) { req = srhi->srhi_req; @@ -527,15 +642,14 @@ static int ptlrpc_lprocfs_svc_req_history_show(struct seq_file *s, void *iter) req->rq_arrival_time.tv_sec, req->rq_sent - req->rq_arrival_time.tv_sec, req->rq_sent - req->rq_deadline); - if (svc->srv_req_printfn == NULL) - seq_printf(s, "\n"); - else - svc->srv_req_printfn(s, srhi->srhi_req); + if (svc->srv_ops.so_req_printer == NULL) + seq_printf(s, "\n"); + else + svc->srv_ops.so_req_printer(s, srhi->srhi_req); } - cfs_spin_unlock(&svc->srv_lock); - - return rc; + spin_unlock(&svcpt->scp_lock); + return rc; } static int @@ -565,30 +679,59 @@ ptlrpc_lprocfs_svc_req_history_open(struct inode *inode, struct file *file) /* See also lprocfs_rd_timeouts */ static int ptlrpc_lprocfs_rd_timeouts(char *page, char **start, off_t off, - int count, int *eof, void *data) + int count, int *eof, void *data) { - struct ptlrpc_service *svc = data; - unsigned int cur, worst; - time_t worstt; - struct dhms ts; - int rc = 0; - - *eof = 1; - cur = at_get(&svc->srv_at_estimate); - worst = svc->srv_at_estimate.at_worst_ever; - worstt = svc->srv_at_estimate.at_worst_time; - s2dhms(&ts, cfs_time_current_sec() - worstt); - if (AT_OFF) - rc += snprintf(page + rc, count - rc, - "adaptive timeouts off, using obd_timeout %u\n", - obd_timeout); - rc += snprintf(page + rc, count - rc, - "%10s : cur %3u worst %3u (at %ld, "DHMS_FMT" ago) ", - "service", cur, worst, worstt, - DHMS_VARS(&ts)); - rc = lprocfs_at_hist_helper(page, count, rc, - &svc->srv_at_estimate); - return rc; + struct ptlrpc_service *svc = data; + struct ptlrpc_service_part *svcpt; + struct dhms ts; + time_t worstt; + unsigned int cur; + unsigned int worst; + int nob = 0; + int rc = 0; + int i; + + LASSERT(svc->srv_parts != NULL); + + if (AT_OFF) { + rc += snprintf(page + rc, count - rc, + "adaptive timeouts off, using obd_timeout %u\n", + obd_timeout); + return rc; + } + + ptlrpc_service_for_each_part(svcpt, i, svc) { + cur = at_get(&svcpt->scp_at_estimate); + worst = svcpt->scp_at_estimate.at_worst_ever; + worstt = svcpt->scp_at_estimate.at_worst_time; + s2dhms(&ts, cfs_time_current_sec() - worstt); + + nob = snprintf(page, count, + "%10s : cur %3u worst %3u (at %ld, " + DHMS_FMT" ago) ", "service", + cur, worst, worstt, DHMS_VARS(&ts)); + + nob = lprocfs_at_hist_helper(page, count, nob, + &svcpt->scp_at_estimate); + rc += nob; + page += nob; + count -= nob; + + /* + * NB: for lustre proc read, the read count must be less + * than PAGE_SIZE, please see details in lprocfs_fops_read. + * It's unlikely that we exceed PAGE_SIZE at here because + * it means the service has more than 50 partitions. + */ + if (count <= 0) { + CWARN("Can't fit AT information of %s in one page, " + "please contact with developer to fix this.\n", + svc->srv_name); + break; + } + } + + return rc; } static int ptlrpc_lprocfs_rd_hp_ratio(char *page, char **start, off_t off, @@ -600,21 +743,24 @@ static int ptlrpc_lprocfs_rd_hp_ratio(char *page, char **start, off_t off, } static int ptlrpc_lprocfs_wr_hp_ratio(struct file *file, const char *buffer, - unsigned long count, void *data) + unsigned long count, void *data) { - struct ptlrpc_service *svc = data; - int rc, val; + struct ptlrpc_service *svc = data; + int rc; + int val; - rc = lprocfs_write_helper(buffer, count, &val); - if (rc < 0) - return rc; - if (val < 0) - return -ERANGE; + rc = lprocfs_write_helper(buffer, count, &val); + if (rc < 0) + return rc; - cfs_spin_lock(&svc->srv_lock); - svc->srv_hpreq_ratio = val; - cfs_spin_unlock(&svc->srv_lock); - return count; + if (val < 0) + return -ERANGE; + + spin_lock(&svc->srv_lock); + svc->srv_hpreq_ratio = val; + spin_unlock(&svc->srv_lock); + + return count; } void ptlrpc_lprocfs_register_service(struct proc_dir_entry *entry, @@ -743,12 +889,30 @@ void ptlrpc_lprocfs_unregister_obd(struct obd_device *obd) EXPORT_SYMBOL(ptlrpc_lprocfs_unregister_obd); +#define BUFLEN (UUID_MAX + 5) + int lprocfs_wr_evict_client(struct file *file, const char *buffer, unsigned long count, void *data) { struct obd_device *obd = data; - char tmpbuf[sizeof(struct obd_uuid)]; + char *kbuf; + char *tmpbuf; + + OBD_ALLOC(kbuf, BUFLEN); + if (kbuf == NULL) + return -ENOMEM; + /* + * OBD_ALLOC() will zero kbuf, but we only copy BUFLEN - 1 + * bytes into kbuf, to ensure that the string is NUL-terminated. + * UUID_MAX should include a trailing NUL already. + */ + if (cfs_copy_from_user(kbuf, buffer, + min_t(unsigned long, BUFLEN - 1, count))) { + count = -EFAULT; + goto out; + } + tmpbuf = cfs_firststr(kbuf, min_t(unsigned long, BUFLEN - 1, count)); /* Kludge code(deadlock situation): the lprocfs lock has been held * since the client is evicted by writting client's * uuid/nid to procfs "evict_client" entry. However, @@ -756,10 +920,9 @@ int lprocfs_wr_evict_client(struct file *file, const char *buffer, * the proc entries under the being destroyed export{}, so I have * to drop the lock at first here. * - jay, jxiong@clusterfs.com */ - class_incref(obd, __FUNCTION__, cfs_current()); LPROCFS_EXIT(); + class_incref(obd, __FUNCTION__, cfs_current()); - sscanf(buffer, "%40s", tmpbuf); if (strncmp(tmpbuf, "nid:", 4) == 0) obd_export_evict_by_nid(obd, tmpbuf + 4); else if (strncmp(tmpbuf, "uuid:", 5) == 0) @@ -767,13 +930,17 @@ int lprocfs_wr_evict_client(struct file *file, const char *buffer, else obd_export_evict_by_uuid(obd, tmpbuf); + class_decref(obd, __FUNCTION__, cfs_current()); LPROCFS_ENTRY(); - class_decref(obd, __FUNCTION__, cfs_current()); +out: + OBD_FREE(kbuf, BUFLEN); return count; } EXPORT_SYMBOL(lprocfs_wr_evict_client); +#undef BUFLEN + int lprocfs_wr_ping(struct file *file, const char *buffer, unsigned long count, void *data) { @@ -799,4 +966,111 @@ int lprocfs_wr_ping(struct file *file, const char *buffer, } EXPORT_SYMBOL(lprocfs_wr_ping); +/* Write the connection UUID to this file to attempt to connect to that node. + * The connection UUID is a node's primary NID. For example, + * "echo connection=192.168.0.1@tcp0::instance > .../import". + */ +int lprocfs_wr_import(struct file *file, const char *buffer, + unsigned long count, void *data) +{ + struct obd_device *obd = data; + struct obd_import *imp = obd->u.cli.cl_import; + char *kbuf = NULL; + char *uuid; + char *ptr; + int do_reconn = 1; + const char prefix[] = "connection="; + const int prefix_len = sizeof(prefix) - 1; + + if (count > CFS_PAGE_SIZE - 1 || count <= prefix_len) + return -EINVAL; + + OBD_ALLOC(kbuf, count + 1); + if (kbuf == NULL) + return -ENOMEM; + + if (cfs_copy_from_user(kbuf, buffer, count)) + GOTO(out, count = -EFAULT); + + kbuf[count] = 0; + + /* only support connection=uuid::instance now */ + if (strncmp(prefix, kbuf, prefix_len) != 0) + GOTO(out, count = -EINVAL); + + uuid = kbuf + prefix_len; + ptr = strstr(uuid, "::"); + if (ptr) { + __u32 inst; + char *endptr; + + *ptr = 0; + do_reconn = 0; + ptr += strlen("::"); + inst = simple_strtol(ptr, &endptr, 10); + if (*endptr) { + CERROR("config: wrong instance # %s\n", ptr); + } else if (inst != imp->imp_connect_data.ocd_instance) { + CDEBUG(D_INFO, "IR: %s is connecting to an obsoleted " + "target(%u/%u), reconnecting...\n", + imp->imp_obd->obd_name, + imp->imp_connect_data.ocd_instance, inst); + do_reconn = 1; + } else { + CDEBUG(D_INFO, "IR: %s has already been connecting to " + "new target(%u)\n", + imp->imp_obd->obd_name, inst); + } + } + + if (do_reconn) + ptlrpc_recover_import(imp, uuid, 1); + +out: + OBD_FREE(kbuf, count + 1); + return count; +} +EXPORT_SYMBOL(lprocfs_wr_import); + +int lprocfs_rd_pinger_recov(char *page, char **start, off_t off, + int count, int *eof, void *data) +{ + struct obd_device *obd = data; + struct obd_import *imp = obd->u.cli.cl_import; + int rc; + + LPROCFS_CLIMP_CHECK(obd); + rc = snprintf(page, count, "%d\n", !imp->imp_no_pinger_recover); + LPROCFS_CLIMP_EXIT(obd); + + return rc; +} +EXPORT_SYMBOL(lprocfs_rd_pinger_recov); + +int lprocfs_wr_pinger_recov(struct file *file, const char *buffer, + unsigned long count, void *data) +{ + struct obd_device *obd = data; + struct client_obd *cli = &obd->u.cli; + struct obd_import *imp = cli->cl_import; + int rc, val; + + rc = lprocfs_write_helper(buffer, count, &val); + if (rc < 0) + return rc; + + if (val != 0 && val != 1) + return -ERANGE; + + LPROCFS_CLIMP_CHECK(obd); + spin_lock(&imp->imp_lock); + imp->imp_no_pinger_recover = !val; + spin_unlock(&imp->imp_lock); + LPROCFS_CLIMP_EXIT(obd); + + return count; + +} +EXPORT_SYMBOL(lprocfs_wr_pinger_recov); + #endif /* LPROCFS */