- fixes typos and outdated comments. (comment-only changes)
exp = class_export_get (req->rq_export);
obd = exp->exp_obd;
exp = class_export_get (req->rq_export);
obd = exp->exp_obd;
- /* disable reply scheduling onto srv_reply_queue while I'm setting up */
+ /* disable reply scheduling while I'm setting up */
rs->rs_scheduled = 1;
rs->rs_on_net = 1;
rs->rs_xid = req->rq_xid;
rs->rs_scheduled = 1;
rs->rs_on_net = 1;
rs->rs_xid = req->rq_xid;
if (netrc != 0) {
/* error sending: reply is off the net. Also we need +1
if (netrc != 0) {
/* error sending: reply is off the net. Also we need +1
- * reply ref until ptlrpc_server_handle_reply() is done
+ * reply ref until ptlrpc_handle_rs() is done
* with the reply state (if the send was successful, there
* would have been +1 ref for the net, which
* reply_out_callback leaves alone) */
* with the reply state (if the send was successful, there
* would have been +1 ref for the net, which
* reply_out_callback leaves alone) */
LASSERT(lock != NULL);
if (rc != 0) {
/* If client canceled the lock but the cancel has not
LASSERT(lock != NULL);
if (rc != 0) {
/* If client canceled the lock but the cancel has not
- * been recieved yet, we need to update lvbo to have the
+ * been received yet, we need to update lvbo to have the
* proper attributes cached. */
if (rc == -EINVAL && arg->type == LDLM_BL_CALLBACK)
ldlm_res_lvbo_update(lock->l_resource, NULL,
* proper attributes cached. */
if (rc == -EINVAL && arg->type == LDLM_BL_CALLBACK)
ldlm_res_lvbo_update(lock->l_resource, NULL,
}
req->rq_send_state = LUSTRE_IMP_FULL;
}
req->rq_send_state = LUSTRE_IMP_FULL;
- /* ptlrpc_prep_req already set timeout */
+ /* ptlrpc_request_alloc_pack already set timeout */
if (AT_OFF)
req->rq_timeout = ldlm_get_rq_timeout();
if (AT_OFF)
req->rq_timeout = ldlm_get_rq_timeout();
ptlrpc_request_set_replen(req);
req->rq_send_state = LUSTRE_IMP_FULL;
ptlrpc_request_set_replen(req);
req->rq_send_state = LUSTRE_IMP_FULL;
- /* ptlrpc_prep_req already set timeout */
+ /* ptlrpc_request_pack already set timeout */
if (AT_OFF)
req->rq_timeout = ldlm_get_rq_timeout();
if (AT_OFF)
req->rq_timeout = ldlm_get_rq_timeout();
req->rq_send_state = LUSTRE_IMP_FULL;
req->rq_send_state = LUSTRE_IMP_FULL;
- /* ptlrpc_prep_req already set timeout */
+ /* ptlrpc_request_alloc_pack already set timeout */
if (AT_OFF)
req->rq_timeout = ldlm_get_rq_timeout();
if (AT_OFF)
req->rq_timeout = ldlm_get_rq_timeout();
if (lock->l_granted_mode == lock->l_req_mode) {
/*
* Only cancel lock if it was granted, because it would
if (lock->l_granted_mode == lock->l_req_mode) {
/*
* Only cancel lock if it was granted, because it would
- * be destroyed immediatelly and would never be granted
+ * be destroyed immediately and would never be granted
* in the future, causing timeouts on client. Not
* in the future, causing timeouts on client. Not
- * granted lock will be cancelled immediatelly after
+ * granted lock will be cancelled immediately after
* sending completion AST.
*/
if (dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK) {
* sending completion AST.
*/
if (dlm_rep->lock_flags & LDLM_FL_CANCEL_ON_BLOCK) {
lock_res_and_lock(lock);
lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
lock_res_and_lock(lock);
lock->l_flags |= (dlm_req->lock_flags & LDLM_AST_FLAGS);
if (lustre_msg_get_opc(req->rq_reqmsg) == LDLM_BL_CALLBACK) {
- /* If somebody cancels lock and cache is already droped,
+ /* If somebody cancels lock and cache is already dropped,
* or lock is failed before cp_ast received on client,
* we can tell the server we have no lock. Otherwise, we
* should send cancel after dropping the cache. */
* or lock is failed before cp_ast received on client,
* we can tell the server we have no lock. Otherwise, we
* should send cancel after dropping the cache. */
+ * Enable/disable COS (Commit On Sharing).
*
* Set/Clear the COS flag in mdt options.
*
*
* Set/Clear the COS flag in mdt options.
*
+ * Check COS (Commit On Sharing) status.
- * Return COS flag status/
+ * Return COS flag status.
*
* \param mdt mdt device
*/
*
* \param mdt mdt device
*/
struct ptlrpc_request *req = mdt_info_req(info);
ENTRY;
struct ptlrpc_request *req = mdt_info_req(info);
ENTRY;
- /* transaction is occured already */
+ /* transaction has occurred already */
if (lustre_msg_get_transno(req->rq_repmsg) != 0) {
EXIT;
return;
if (lustre_msg_get_transno(req->rq_repmsg) != 0) {
EXIT;
return;
/* success so far. Note that anything going wrong
* with bulk now, is EXTREMELY strange, since the
* server must have believed that the bulk
/* success so far. Note that anything going wrong
* with bulk now, is EXTREMELY strange, since the
* server must have believed that the bulk
- * tranferred OK before she replied with success to
+ * transferred OK before she replied with success to
* me. */
lwi = LWI_TIMEOUT(timeout, NULL, NULL);
brc = l_wait_event(req->rq_reply_waitq,
* me. */
lwi = LWI_TIMEOUT(timeout, NULL, NULL);
brc = l_wait_event(req->rq_reply_waitq,
LASSERT (rs->rs_on_net);
if (ev->unlinked) {
LASSERT (rs->rs_on_net);
if (ev->unlinked) {
- /* Last network callback. The net's ref on 'rs' stays put
- * until ptlrpc_server_handle_reply() is done with it */
+ /* Last network callback. The net's ref on 'rs' stays put
+ * until ptlrpc_handle_rs() is done with it */
spin_lock(&svc->srv_lock);
spin_lock(&rs->rs_lock);
rs->rs_on_net = 0;
spin_lock(&svc->srv_lock);
spin_lock(&rs->rs_lock);
rs->rs_on_net = 0;
LASSERT (rc == -EOVERFLOW || rc == 1);
LASSERT (rc == -EOVERFLOW || rc == 1);
- /* liblustre: no asynch callback so we can't affort to miss any
+ /* liblustre: no asynch callback so we can't afford to miss any
* events... */
if (rc == -EOVERFLOW) {
CERROR ("Dropped an event!!!\n");
* events... */
if (rc == -EOVERFLOW) {
CERROR ("Dropped an event!!!\n");
- /* If we have not tried this connection since the
+ /* If we have not tried this connection since
the last successful attempt, go with this one */
if ((conn->oic_last_attempt == 0) ||
cfs_time_beforeq_64(conn->oic_last_attempt,
the last successful attempt, go with this one */
if ((conn->oic_last_attempt == 0) ||
cfs_time_beforeq_64(conn->oic_last_attempt,
- /* liblustre has no pinger thead, so we wakup pinger anyway */
+ /* liblustre has no pinger thread, so we wakeup pinger anyway */
/* Reset ns_connect_flags only for initial connect. It might be
* changed in while using FS and if we reset it in reconnect
/* Reset ns_connect_flags only for initial connect. It might be
* changed in while using FS and if we reset it in reconnect
- * this leads to lossing user settings done before such as
+ * this leads to losing user settings done before such as
* disable lru_resize, etc. */
if (old_connect_flags != exp->exp_connect_flags ||
aa->pcaa_initial_connect) {
* disable lru_resize, etc. */
if (old_connect_flags != exp->exp_connect_flags ||
aa->pcaa_initial_connect) {
#ifdef __KERNEL__
/* bug 17802: XXX client_disconnect_export vs connect request
* race. if client will evicted at this time, we start
#ifdef __KERNEL__
/* bug 17802: XXX client_disconnect_export vs connect request
* race. if client will evicted at this time, we start
- * invalidate thread without referece to import and import can
+ * invalidate thread without reference to import and import can
* be freed at same time. */
class_import_get(imp);
rc = cfs_kernel_thread(ptlrpc_invalidate_import_thread, imp,
* be freed at same time. */
class_import_get(imp);
rc = cfs_kernel_thread(ptlrpc_invalidate_import_thread, imp,
* Commonly the original context should be uptodate because we
* have a expiry nice time; And server will keep their half part
* context because we at least hold a ref of old context which
* Commonly the original context should be uptodate because we
* have a expiry nice time; And server will keep their half part
* context because we at least hold a ref of old context which
- * prevent the context detroy RPC be sent. So server still can
+ * prevent the context destroy RPC be sent. So server still can
* accept the request and finish RPC. Two cases:
* accept the request and finish RPC. Two cases:
- * 1. If server side context has been trimed, a NO_CONTEXT will
+ * 1. If server side context has been trimmed, a NO_CONTEXT will
* be returned, gss_cli_ctx_verify/unseal will switch to new
* context by force.
* 2. Current context never be refreshed, then we are fine: we
* be returned, gss_cli_ctx_verify/unseal will switch to new
* context by force.
* 2. Current context never be refreshed, then we are fine: we
/**
* Dispatch all replies accumulated in the batch to one from
/**
* Dispatch all replies accumulated in the batch to one from
- * dedicated reply handing threads.
+ * dedicated reply handling threads.
* and process it.
*
* \param svc a ptlrpc service
* and process it.
*
* \param svc a ptlrpc service
- * \retval 0 no replies processes
+ * \retval 0 no replies processed
* \retval 1 one reply processed
*/
static int
* \retval 1 one reply processed
*/
static int