From 2e27af9b3bd15f1e9ffaa397375253ebf60c7d8a Mon Sep 17 00:00:00 2001 From: isaac Date: Wed, 9 Sep 2009 16:36:52 +0000 Subject: [PATCH] b=20703,i=rread: - fixes typos and outdated comments. (comment-only changes) --- lustre/ldlm/ldlm_lib.c | 4 ++-- lustre/ldlm/ldlm_lockd.c | 14 +++++++------- lustre/mdt/mdt_handler.c | 6 +++--- lustre/mdt/mdt_open.c | 2 +- lustre/ptlrpc/client.c | 2 +- lustre/ptlrpc/events.c | 6 +++--- lustre/ptlrpc/import.c | 8 ++++---- lustre/ptlrpc/sec.c | 4 ++-- lustre/ptlrpc/service.c | 4 ++-- 9 files changed, 25 insertions(+), 25 deletions(-) diff --git a/lustre/ldlm/ldlm_lib.c b/lustre/ldlm/ldlm_lib.c index e0d3cba..c4150c8 100644 --- a/lustre/ldlm/ldlm_lib.c +++ b/lustre/ldlm/ldlm_lib.c @@ -2176,7 +2176,7 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id) exp = class_export_get (req->rq_export); obd = exp->exp_obd; - /* disable reply scheduling onto srv_reply_queue while I'm setting up */ + /* disable reply scheduling while I'm setting up */ rs->rs_scheduled = 1; rs->rs_on_net = 1; rs->rs_xid = req->rq_xid; @@ -2206,7 +2206,7 @@ void target_send_reply(struct ptlrpc_request *req, int rc, int fail_id) if (netrc != 0) { /* error sending: reply is off the net. Also we need +1 - * reply ref until ptlrpc_server_handle_reply() is done + * reply ref until ptlrpc_handle_rs() is done * with the reply state (if the send was successful, there * would have been +1 ref for the net, which * reply_out_callback leaves alone) */ diff --git a/lustre/ldlm/ldlm_lockd.c b/lustre/ldlm/ldlm_lockd.c index 14a628a..76f2923 100644 --- a/lustre/ldlm/ldlm_lockd.c +++ b/lustre/ldlm/ldlm_lockd.c @@ -634,7 +634,7 @@ static int ldlm_cb_interpret(const struct lu_env *env, LASSERT(lock != NULL); if (rc != 0) { /* If client canceled the lock but the cancel has not - * been recieved yet, we need to update lvbo to have the + * been received yet, we need to update lvbo to have the * proper attributes cached. */ if (rc == -EINVAL && arg->type == LDLM_BL_CALLBACK) ldlm_res_lvbo_update(lock->l_resource, NULL, @@ -779,7 +779,7 @@ int ldlm_server_blocking_ast(struct ldlm_lock *lock, } req->rq_send_state = LUSTRE_IMP_FULL; - /* ptlrpc_prep_req already set timeout */ + /* ptlrpc_request_alloc_pack already set timeout */ if (AT_OFF) req->rq_timeout = ldlm_get_rq_timeout(); @@ -865,7 +865,7 @@ int ldlm_server_completion_ast(struct ldlm_lock *lock, int flags, void *data) ptlrpc_request_set_replen(req); req->rq_send_state = LUSTRE_IMP_FULL; - /* ptlrpc_prep_req already set timeout */ + /* ptlrpc_request_pack already set timeout */ if (AT_OFF) req->rq_timeout = ldlm_get_rq_timeout(); @@ -934,7 +934,7 @@ int ldlm_server_glimpse_ast(struct ldlm_lock *lock, void *data) req->rq_send_state = LUSTRE_IMP_FULL; - /* ptlrpc_prep_req already set timeout */ + /* ptlrpc_request_alloc_pack already set timeout */ if (AT_OFF) req->rq_timeout = ldlm_get_rq_timeout(); @@ -1173,9 +1173,9 @@ existing_lock: if (lock->l_granted_mode == lock->l_req_mode) { /* * Only cancel lock if it was granted, because it would - * be destroyed immediatelly and would never be granted + * be destroyed immediately and would never be granted * in the future, causing timeouts on client. Not - * granted lock will be cancelled immediatelly after + * granted lock will be cancelled immediately after * sending completion AST. */ if (dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK) { @@ -1859,7 +1859,7 @@ static int ldlm_callback_handler(struct ptlrpc_request *req) lock_res_and_lock(lock); lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS); if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) { - /* If somebody cancels lock and cache is already droped, + /* If somebody cancels lock and cache is already dropped, * or lock is failed before cp_ast received on client, * we can tell the server we have no lock. Otherwise, we * should send cancel after dropping the cache. */ diff --git a/lustre/mdt/mdt_handler.c b/lustre/mdt/mdt_handler.c index 41fe562..8d24afe 100644 --- a/lustre/mdt/mdt_handler.c +++ b/lustre/mdt/mdt_handler.c @@ -5758,7 +5758,7 @@ struct md_ucred *mdt_ucred(const struct mdt_thread_info *info) } /** - * Enable/disable COS. + * Enable/disable COS (Commit On Sharing). * * Set/Clear the COS flag in mdt options. * @@ -5782,9 +5782,9 @@ void mdt_enable_cos(struct mdt_device *mdt, int val) } /** - * Check COS status. + * Check COS (Commit On Sharing) status. * - * Return COS flag status/ + * Return COS flag status. * * \param mdt mdt device */ diff --git a/lustre/mdt/mdt_open.c b/lustre/mdt/mdt_open.c index bd976a3..99a391a 100644 --- a/lustre/mdt/mdt_open.c +++ b/lustre/mdt/mdt_open.c @@ -343,7 +343,7 @@ static void mdt_empty_transno(struct mdt_thread_info* info) struct ptlrpc_request *req = mdt_info_req(info); ENTRY; - /* transaction is occured already */ + /* transaction has occurred already */ if (lustre_msg_get_transno(req->rq_repmsg) != 0) { EXIT; return; diff --git a/lustre/ptlrpc/client.c b/lustre/ptlrpc/client.c index cec226d..40500cf 100644 --- a/lustre/ptlrpc/client.c +++ b/lustre/ptlrpc/client.c @@ -2389,7 +2389,7 @@ after_send: /* success so far. Note that anything going wrong * with bulk now, is EXTREMELY strange, since the * server must have believed that the bulk - * tranferred OK before she replied with success to + * transferred OK before she replied with success to * me. */ lwi = LWI_TIMEOUT(timeout, NULL, NULL); brc = l_wait_event(req->rq_reply_waitq, diff --git a/lustre/ptlrpc/events.c b/lustre/ptlrpc/events.c index 92cdd7b..c08beb2 100644 --- a/lustre/ptlrpc/events.c +++ b/lustre/ptlrpc/events.c @@ -336,8 +336,8 @@ void reply_out_callback(lnet_event_t *ev) LASSERT (rs->rs_on_net); if (ev->unlinked) { - /* Last network callback. The net's ref on 'rs' stays put - * until ptlrpc_server_handle_reply() is done with it */ + /* Last network callback. The net's ref on 'rs' stays put + * until ptlrpc_handle_rs() is done with it */ spin_lock(&svc->srv_lock); spin_lock(&rs->rs_lock); rs->rs_on_net = 0; @@ -625,7 +625,7 @@ liblustre_check_events (int timeout) LASSERT (rc == -EOVERFLOW || rc == 1); - /* liblustre: no asynch callback so we can't affort to miss any + /* liblustre: no asynch callback so we can't afford to miss any * events... */ if (rc == -EOVERFLOW) { CERROR ("Dropped an event!!!\n"); diff --git a/lustre/ptlrpc/import.c b/lustre/ptlrpc/import.c index 73ae6c3..be0264d 100644 --- a/lustre/ptlrpc/import.c +++ b/lustre/ptlrpc/import.c @@ -478,7 +478,7 @@ static int import_select_connection(struct obd_import *imp) continue; } - /* If we have not tried this connection since the + /* If we have not tried this connection since the last successful attempt, go with this one */ if ((conn->oic_last_attempt == 0) || cfs_time_beforeq_64(conn->oic_last_attempt, @@ -766,7 +766,7 @@ static void ptlrpc_maybe_ping_import_soon(struct obd_import *imp) wake_pinger = 1; } #else - /* liblustre has no pinger thead, so we wakup pinger anyway */ + /* liblustre has no pinger thread, so we wakeup pinger anyway */ wake_pinger = 1; #endif @@ -1070,7 +1070,7 @@ finish: /* Reset ns_connect_flags only for initial connect. It might be * changed in while using FS and if we reset it in reconnect - * this leads to lossing user settings done before such as + * this leads to losing user settings done before such as * disable lru_resize, etc. */ if (old_connect_flags != exp->exp_connect_flags || aa->pcaa_initial_connect) { @@ -1277,7 +1277,7 @@ int ptlrpc_import_recovery_state_machine(struct obd_import *imp) #ifdef __KERNEL__ /* bug 17802: XXX client_disconnect_export vs connect request * race. if client will evicted at this time, we start - * invalidate thread without referece to import and import can + * invalidate thread without reference to import and import can * be freed at same time. */ class_import_get(imp); rc = cfs_kernel_thread(ptlrpc_invalidate_import_thread, imp, diff --git a/lustre/ptlrpc/sec.c b/lustre/ptlrpc/sec.c index e97dd78..4447b43 100644 --- a/lustre/ptlrpc/sec.c +++ b/lustre/ptlrpc/sec.c @@ -686,9 +686,9 @@ again: * Commonly the original context should be uptodate because we * have a expiry nice time; And server will keep their half part * context because we at least hold a ref of old context which - * prevent the context detroy RPC be sent. So server still can + * prevent the context destroy RPC be sent. So server still can * accept the request and finish RPC. Two cases: - * 1. If server side context has been trimed, a NO_CONTEXT will + * 1. If server side context has been trimmed, a NO_CONTEXT will * be returned, gss_cli_ctx_verify/unseal will switch to new * context by force. * 2. Current context never be refreshed, then we are fine: we diff --git a/lustre/ptlrpc/service.c b/lustre/ptlrpc/service.c index 799b462..76bcc24 100644 --- a/lustre/ptlrpc/service.c +++ b/lustre/ptlrpc/service.c @@ -231,7 +231,7 @@ static void rs_batch_init(struct rs_batch *b) /** * Dispatch all replies accumulated in the batch to one from - * dedicated reply handing threads. + * dedicated reply handling threads. * * \param b batch */ @@ -1740,7 +1740,7 @@ ptlrpc_handle_rs (struct ptlrpc_reply_state *rs) * and process it. * * \param svc a ptlrpc service - * \retval 0 no replies processes + * \retval 0 no replies processed * \retval 1 one reply processed */ static int -- 1.8.3.1