# go away and the target just specify the $RPMSMPTYPE
[ -z "$RPMSMPTYPE" ] && set_rpm_smp_type
- # CC might have been overwriten in TARGET_FILE
+ # CC might have been overwritten in TARGET_FILE
if [[ $CC != ccache\ * ]] && which "$CCACHE" &>/dev/null; then
export CCACHE && export CC="ccache $CC"
fi
- OST: mkfs.lustre --fsname spfs --ost --mgsnode=$mgsnode@tcp0 $device
Additional options are:
- --reformat - this partition was previously formated with lustre, reformat it
- --param="failover.mode=failout" - don't hang the complete fs if a ost isn't available.
+ --reformat - this partition was previously formatted with lustre, reformat it
+ --param="failover.mode=failout" - don't hang the entire fs if an ost isn't available.
- Now mount the partitons on the servers:
- first the MDT/MGS Server:
if (down_read_trylock(&mm->mmap_sem) == 0)
return -EDEADLK;
- /* ignore errors, just check how much was sucessfully transfered */
+ /* ignore errors, just check how much was successfully transferred */
while (len) {
int bytes, rc, offset;
void *maddr;
Put request
-----------
-1. NAL notices that there is a incoming message header on the network
+1. NAL notices that there is an incoming message header on the network
and reads an ptl_hdr_t in from the wire.
2. It may store additional NAL specific data that provides context
\layout Standard
\noindent
-On succesfful return, this location will hold the Portal index where the
+On successful return, this location will hold the Portal index where the
match list has been attached.
\end_inset
</cell>
* cdm_hndl Communication Domain Handle
*
* Returns:
- * GNI_RC_SUCCESS - The operation completioned succesffully
+ * GNI_RC_SUCCESS - The operation completed successfully
* GNI_RC_INVALID_PARAM - Caller specified an invalid Communication Domain Handle
*
* Description:
* cdm_handle Communication Domain Handle
*
* Returns:
- * GNI_RC_SUCCESS - The operation completioned succesffully
+ * GNI_RC_SUCCESS - The operation completed successfully
* GNI_RC_INVALID_PARAM - Caller specified an invalid Communication Domain Handle
*
* Description:
* cdm_handle Communication Domain Handle
*
* Returns:
- * GNI_RC_SUCCESS - The operation completioned succesffully
+ * GNI_RC_SUCCESS - The operation completed successfully
* GNI_RC_INVALID_PARAM - Caller specified an invalid Communication Domain Handle
*
* Description:
/**
* GNI_SmsgBufferSizeNeeded - Return amount of memory required for short message
- * resources given parameters in a inut short message
- * attributes structure
+ * resources given parameters in an input short
+ * message attributes structure
* IN
* smsg_attr pointer to short message attributes structure
*
* Description:
*
* The remote PE address provided is assigned to an SMSG control structure and
- * mailbox for use in a inter-node connection. An attribute structure
+ * mailbox for use in an inter-node connection. An attribute structure
* describing the assigned resources is then returned. The attributes must be
* traded with the remote end-point to establish the connection.
**/
);
/**
- * gni_smsg_buff_size_needed - Return amount of memory required for short message
- * resources given parameters in a inut short message
- * attributes structure
+ * gni_smsg_buff_size_needed - Return amount of memory required for short
+ * message resources given parameters in an input
+ * short message attributes structure
* IN
* local_smsg_attr parameters for short messaging
*
* == 0: reschedule if someone marked him WANTS_SCHED
* > 0 : force a reschedule */
/* Return code 0 means it did not schedule the conn, 1
- * means it succesfully scheduled the conn.
+ * means it successfully scheduled the conn.
*/
int
rc = kgnilnd_map_buffer(tx);
}
- /* rc should be 0 if we mapped succesfully here, if non-zero we are queueing */
+ /* rc should be 0 if we mapped successfully here, if non-zero
+ * we are queueing */
if (rc != 0) {
/* if try_map_if_full set, they handle requeuing */
if (unlikely(try_map_if_full)) {
static void
brw_server_rpc_done(srpc_server_rpc_t *rpc)
{
- srpc_bulk_t *blk = rpc->srpc_bulk;
+ srpc_bulk_t *blk = rpc->srpc_bulk;
- if (blk == NULL) return;
+ if (blk == NULL)
+ return;
- if (rpc->srpc_status != 0)
- CERROR ("Bulk transfer %s %s has failed: %d\n",
- blk->bk_sink ? "from" : "to",
- libcfs_id2str(rpc->srpc_peer), rpc->srpc_status);
- else
- CDEBUG (D_NET, "Transfered %d pages bulk data %s %s\n",
- blk->bk_niov, blk->bk_sink ? "from" : "to",
- libcfs_id2str(rpc->srpc_peer));
+ if (rpc->srpc_status != 0)
+ CERROR("Bulk transfer %s %s has failed: %d\n",
+ blk->bk_sink ? "from" : "to",
+ libcfs_id2str(rpc->srpc_peer), rpc->srpc_status);
+ else
+ CDEBUG(D_NET, "Transferred %d pages bulk data %s %s\n",
+ blk->bk_niov, blk->bk_sink ? "from" : "to",
+ libcfs_id2str(rpc->srpc_peer));
- sfw_free_pages(rpc);
+ sfw_free_pages(rpc);
}
static int
if (rpc->crpc_timeout == 0)
return;
- /* timer sucessfully defused */
+ /* timer successfully defused */
if (stt_del_timer(&rpc->crpc_timer))
return;
/*
* cYAML_build_tree
- * Build a tree representation of the YAML formated text passed in.
+ * Build a tree representation of the YAML formatted text passed in.
*
* yaml_file - YAML file to parse and build tree representation
* yaml_blk - blk of YAML. yaml_file takes precedence if both
The script must be customised according to the components under test and
where it should keep its working files. Customization variables are
described clearly at Customization variables Section in the script.
-Please see maximum suported value ranges for customization variables
-in the srcipt.
+Please see maximum supported value ranges for customization variables
+in the script.
To run against a local disk:
---------------------------
Severity : enhancement
Bugzilla : 4900
Description: Async OSC create to avoid the blocking unnecessarily.
-Details : If a OST has no remain object, system will block on the creating
- when need to create a new object on this OST. Now, ways use
- pre-created objects when available, instead of blocking on an
- empty osc while others are not empty. If we must block, we block
- for the shortest possible period of time.
+Details : If an OST has no remaining object, system will block on the
+ creation when it needs to create a new object on this OST. Now,
+ ways use pre-created objects when available, instead of blocking on
+ an empty osc while others are not empty. If we must block, we
+ block for the shortest possible period of time.
Severity : major
Bugzilla : 11710
Severity : enhancement
Bugzilla : 4900
Description: Async OSC create to avoid the blocking unnecessarily.
-Details : If a OST has no remain object, system will block on the creating
+Details : If an OST has no remaining object, system will block on the creating
when need to create a new object on this OST. Now, ways use
pre-created objects when available, instead of blocking on an
empty osc while others are not empty. If we must block, we block
Bugzilla : 9489, 3273
Description: First write from each client to each OST was only 4kB in size,
to initialize client writeback cache, which caused sub-optimal
- RPCs and poor layout on disk for the first writen file.
+ RPCs and poor layout on disk for the first written file.
Details : Clients now request an initial cache grant at (re)connect time
and so that they can start streaming writes to the cache right
away and always do full-sized RPCs if there is enough data.
Simply type ./wsbuild on a node that match one of the disribution specified
above to create the RPMs for wirkshark and the associated Lustre/LNet plugins.
-Upon successs the build results can be found in directory
+Upon success the build results can be found in directory
'wireshark-<version>/packaging/rpm/RPMS/x86_64' relative to this directory.
The RPMs are:
IO contexts are owned by a thread (or, potentially a group of threads) doing
IO, and need neither reference counting nor indexing. Similarly, transfer
-requests are owned by a OSC device, and their lifetime is from RPC creation
+requests are owned by an OSC device, and their lifetime is from RPC creation
until completion notification.
1.7. State Machines
*/
struct cl_req {
enum cl_req_type crq_type;
- /** A list of pages being transfered */
+ /** A list of pages being transferred */
struct list_head crq_pages;
/** Number of pages in cl_req::crq_pages */
unsigned crq_nrpages;
/**
* Check if a fid is igif or not.
* \param fid the fid to be tested.
- * \return true if the fid is a igif; otherwise false.
+ * \return true if the fid is an igif; otherwise false.
*/
static inline bool fid_seq_is_igif(__u64 seq)
{
/**
* Check if a fid is idif or not.
* \param fid the fid to be tested.
- * \return true if the fid is a idif; otherwise false.
+ * \return true if the fid is an idif; otherwise false.
*/
static inline bool fid_seq_is_idif(__u64 seq)
{
}
/**
- * Get inode number from a igif.
- * \param fid a igif to get inode number from.
+ * Get inode number from an igif.
+ * \param fid an igif to get inode number from.
* \return inode number for the igif.
*/
static inline ino_t lu_igif_ino(const struct lu_fid *fid)
extern void lustre_swab_ost_id(struct ost_id *oid);
/**
- * Get inode generation from a igif.
- * \param fid a igif to get inode generation from.
+ * Get inode generation from an igif.
+ * \param fid an igif to get inode generation from.
* \return inode generation for the igif.
*/
static inline __u32 lu_igif_gen(const struct lu_fid *fid)
* List of hr_flags (bit field)
*/
#define HSM_FORCE_ACTION 0x0001
-/* used by CT, connot be set by user */
+/* used by CT, cannot be set by user */
#define HSM_GHOST_COPY 0x0002
/**
struct hsm_action_item {
__u32 hai_len; /* valid size of this struct */
__u32 hai_action; /* hsm_copytool_action, but use known size */
- lustre_fid hai_fid; /* Lustre FID to operated on */
+ lustre_fid hai_fid; /* Lustre FID to operate on */
lustre_fid hai_dfid; /* fid used for data access */
struct hsm_extent hai_extent; /* byte range to operate on */
__u64 hai_cookie; /* action cookie from coordinator */
/**
* Update Lock Value Block Operations (LVBO) on a resource taking into account
- * data from reqest \a r
+ * data from request \a r
*/
static inline int ldlm_res_lvbo_update(struct ldlm_resource *res,
struct ptlrpc_request *r, int increase)
struct nid_stat *exp_nid_stats;
/** Active connetion */
struct ptlrpc_connection *exp_connection;
- /** Connection count value from last succesful reconnect rpc */
- __u32 exp_conn_cnt;
- /** Hash list of all ldlm locks granted on this export */
- cfs_hash_t *exp_lock_hash;
+ /** Connection count value from last successful reconnect rpc */
+ __u32 exp_conn_cnt;
+ /** Hash list of all ldlm locks granted on this export */
+ cfs_hash_t *exp_lock_hash;
/**
* Hash list for Posix lock deadlock detection, added with
* ldlm_lock::l_exp_flock_hash.
char lsf_name[80];
/**
- * Just reformated or upgraded, and this flag is being
+ * Just reformatted or upgraded, and this flag is being
* used to check whether the local FLDB is needs to be
* synced with global FLDB(in MDT0), and it is only needed
* if the MDT is upgraded from < 2.6 to 2.6, i.e. when the
struct lustre_handle imp_remote_handle;
/** When to perform next ping. time in jiffies. */
cfs_time_t imp_next_ping;
- /** When we last succesfully connected. time in 64bit jiffies */
+ /** When we last successfully connected. time in 64bit jiffies */
__u64 imp_last_success_conn;
/** List of all possible connection for import. */
dlm = req_capsule_client_get(pill, &RMF_DLM_REQ);
LASSERT(dlm);
/* Skip first lock handler in ldlm_request_pack(),
- * this method will incrment @lock_count according
+ * this method will increment @lock_count according
* to the lock handle amount actually written to
* the buffer. */
dlm->lock_count = canceloff;
};
/*
- * Check whether file has possible unwriten pages.
+ * Check whether file has possible unwritten pages.
*
* \retval 1 file is mmap-ed or has dirty pages
* 0 otherwise
* failure, then the layer above LOD sends this defined striping
* using ->do_xattr_set(), so LOD uses this method to replay creation
* of the stripes. Notice the original information for the striping
- * (#stripes, FIDs, etc) was transfered in declare path.
+ * (#stripes, FIDs, etc) was transferred in declare path.
*
* \param[in] env execution environment
* \param[in] dt the striped object
* No permission check is needed.
*
* returns 1: if fid is ancestor of @mo;
- * returns 0: if fid is not a ancestor of @mo;
+ * returns 0: if fid is not an ancestor of @mo;
*
* returns EREMOTE if remote object is found, fid of remote object is saved to
* @fid;
cdt->cdt_max_requests)
break;
- /* first search if the request if known in the list we have
- * build and if there is room in the request vector */
+ /* first search whether the request is found in the list we
+ * have built and if there is room in the request vector */
empty_slot = -1;
found = -1;
for (i = 0; i < hsd->max_requests &&
struct cdt_restore_handle *crh;
/* restore in data FID done, we swap the layouts
- * only if restore is successfull */
+ * only if restore is successful */
if (pgs->hpk_errval == 0) {
rc = hsm_swap_layouts(mti, &car->car_hai->hai_fid,
&car->car_hai->hai_dfid, &mh);
}
/**
- * check if a request is comptaible with file status
+ * check if a request is compatible with file status
* \param hai [IN] request description
* \param hal_an [IN] request archive number (not used)
* \param rq_flags [IN] request flags
*
* Archive number is changed iif the value is not 0.
* The new flagset that will be computed should result in a coherent state.
- * This function checks that are flags are compatible.
+ * This function checks that flags are compatible.
*
* This is MDS_HSM_STATE_SET RPC handler.
*/
RETURN(ERR_PTR(-ENOENT));
}
- CDEBUG(D_HSM, "llog succesfully initialized, start from "LPD64"\n",
+ CDEBUG(D_HSM, "llog successfully initialized, start from "LPD64"\n",
*pos);
/* first call = rewind */
if (*pos == 0) {
crp->crp_max = 0;
}
-/** Allocate/init a agent request and its sub-structures.
+/** Allocate/init an agent request and its sub-structures.
*
* \param compound_id [IN]
* \param archive_id [IN]
}
/**
- * Free a agent request and its sub-structures.
+ * Free an agent request and its sub-structures.
*
* \param car [IN] Request to be freed.
*/
/* LU-2275, simulate broken behaviour (esp. prevalent in
* pre-2.4 servers where a very strange reply is sent on error
- * that looks like it was actually almost succesful and a failure at the
- * same time */
+ * that looks like it was actually almost successful and a
+ * failure at the same time.) */
if (OBD_FAIL_CHECK(OBD_FAIL_MDS_NEGATIVE_POSITIVE)) {
mdt_set_disposition(info, rep, DISP_OPEN_OPEN |
DISP_LOOKUP_NEG |
/**
* Version of cl_page_delete() that can be called for not fully constructed
- * pages, e.g,. in a error handling cl_page_find()->cl_page_delete0()
+ * pages, e.g. in an error handling cl_page_find()->cl_page_delete0()
* path. Doesn't check page invariant.
*/
static void cl_page_delete0(const struct lu_env *env, struct cl_page *pg)
EXPORT_SYMBOL(dt_locate_at);
/**
- * find a object named \a entry in given \a dfh->dfh_o directory.
+ * find an object named \a entry in given \a dfh->dfh_o directory.
*/
static int dt_find_entry(const struct lu_env *env, const char *entry, void *data)
{
* is safe to take the lu_sites_guard lock.
*
* Ideally we should accurately return the remaining number of cached
- * objects without taking the lu_sites_guard lock, but this is not
+ * objects without taking the lu_sites_guard lock, but this is not
* possible in the current implementation.
*/
static int lu_cache_shrink(SHRINKER_ARGS(sc, nr_to_scan, gfp_mask))
*
* - ldlm calls dlmlock->l_blocking_ast(..., LDLM_CB_BLOCKING) to notify
* us that dlmlock conflicts with another lock that some client is
- * enqueing. Lock is canceled.
+ * enqueuing. Lock is canceled.
*
* - cl_lock_cancel() is called. osc_lock_cancel() calls
* ldlm_cli_cancel() that calls
*/
static void osc_lru_use(struct client_obd *cli, struct osc_page *opg)
{
- /* If page is being transfered for the first time,
+ /* If page is being transferred for the first time,
* ops_lru should be empty */
if (opg->ops_in_lru && !list_empty(&opg->ops_lru)) {
spin_lock(&cli->cl_lru_list_lock);
*
* \param[in] obj pointer to the OSP object
* \param[in,out] poxe double pointer to the OSP object extended attribute
- * entry: the new extended attribute entry is transfered
+ * entry: the new extended attribute entry is transferred
* via such pointer target, and if old the extended
* attribute entry exists, then it will be returned back
* via such pointer target.
* Get more records for the iteration from peer.
*
* The new records will be filled in an array of pages. The OSP side
- * allows 1MB bulk data to be transfered.
+ * allows 1MB bulk data to be transferred.
*
* \param[in] env pointer to the thread context
* \param[in] it pointer to the iteration structure
*
* An interpretation callback called by ptlrpc for OST_STATFS RPC when it is
* replied by the target. It's used to maintain statfs cache for the target.
- * The function fills data from the reply if succesfull and schedules another
+ * The function fills data from the reply if successful and schedules another
* update.
*
* \param[in] env LU environment provided by the caller
}
/*
- * Set size regular attribute on a object
+ * Set size regular attribute on an object
*
* When a striping is created late, it's possible that size is already
* initialized on the file. Then the new striping should inherit size
* The callback is called by ptlrpc when RPC is replied. Now we have to decide
* whether we should:
* - put request on a special list to wait until it's committed by the target,
- * if the request is succesful
+ * if the request is successful
* - schedule llog record cancel if no target object is found
* - try later (essentially after reboot) in case of unexpected error
*
* OUT RPC.
*
* For the asynchronous idempotent operations, such as get_(x)attr, related
- * RPCs will be inserted into a osp_device based shared asynchronous request
+ * RPCs will be inserted into an osp_device based shared asynchronous request
* queue - osp_device::opd_async_requests. When the queue is full, all the
* requests in the queue will be packaged into a single OUT RPC and given to
* the ptlrpcd daemon (for sending), then the queue is purged and other new
static int __ptlrpc_req_finished(struct ptlrpc_request *request, int locked);
/**
* Drop one request reference. Must be called with import imp_lock held.
- * When reference count drops to zero, reuqest is freed.
+ * When reference count drops to zero, request is freed.
*/
void ptlrpc_req_finished_with_imp_lock(struct ptlrpc_request *request)
{
/**
* Callback used for replayed requests reply processing.
- * In case of succesful reply calls registeresd request replay callback.
+ * In case of successful reply calls registered request replay callback.
* In case of error restart replay process.
*/
static int ptlrpc_replay_interpret(const struct lu_env *env,
if (p != end)
goto out_err;
- CDEBUG(D_SEC, "succesfully imported rfc1964 context\n");
+ CDEBUG(D_SEC, "successfully imported rfc1964 context\n");
return 0;
out_err:
return GSS_S_FAILURE;
if (get_keyblock(&p, end, &kctx->kc_keyc, keysize))
goto out_err;
- CDEBUG(D_SEC, "succesfully imported v2 context\n");
+ CDEBUG(D_SEC, "successfully imported v2 context\n");
return 0;
out_err:
return GSS_S_FAILURE;
goto out_err;
gctx_new->internal_ctx_id = knew;
- CDEBUG(D_SEC, "succesfully copied reverse context\n");
+ CDEBUG(D_SEC, "successfully copied reverse context\n");
return GSS_S_COMPLETE;
out_err:
return GSS_S_FAILURE;
gss_context->internal_ctx_id = null_context;
- CDEBUG(D_SEC, "succesfully imported null context\n");
+ CDEBUG(D_SEC, "successfully imported null context\n");
return GSS_S_COMPLETE;
}
null_context_old = gss_context_old->internal_ctx_id;
memcpy(null_context_new, null_context_old, sizeof(*null_context_new));
gss_context_new->internal_ctx_id = null_context_new;
- CDEBUG(D_SEC, "succesfully copied reverse null context\n");
+ CDEBUG(D_SEC, "successfully copied reverse null context\n");
return GSS_S_COMPLETE;
}
return GSS_S_FAILURE;
gss_context->internal_ctx_id = sk_context;
- CDEBUG(D_SEC, "succesfully imported sk context\n");
+ CDEBUG(D_SEC, "successfully imported sk context\n");
return GSS_S_COMPLETE;
}
sk_context_old = gss_context_old->internal_ctx_id;
memcpy(sk_context_new, sk_context_old, sizeof(*sk_context_new));
gss_context_new->internal_ctx_id = sk_context_new;
- CDEBUG(D_SEC, "succesfully copied reverse sk context\n");
+ CDEBUG(D_SEC, "successfully copied reverse sk context\n");
return GSS_S_COMPLETE;
}
rawobj_free(&gctx->gc_handle);
}
-/*
+/**
* Based on sequence number algorithm as specified in RFC 2203.
*
- * modified for our own problem: arriving request has valid sequence number,
+ * Modified for our own problem: arriving request has valid sequence number,
* but unwrapping request might cost a long time, after that its sequence
* are not valid anymore (fall behind the window). It rarely happen, mostly
* under extreme load.
*
- * note we should not check sequence before verify the integrity of incoming
+ * Note we should not check sequence before verifying the integrity of incoming
* request, because just one attacking request with high sequence number might
- * cause all following request be dropped.
+ * cause all following requests be dropped.
*
- * so here we use a multi-phase approach: prepare 2 sequence windows,
+ * So here we use a multi-phase approach: prepare 2 sequence windows,
* "main window" for normal sequence and "back window" for fall behind sequence.
* and 3-phase checking mechanism:
- * 0 - before integrity verification, perform a initial sequence checking in
- * main window, which only try and don't actually set any bits. if the
- * sequence is high above the window or fit in the window and the bit
+ * 0 - before integrity verification, perform an initial sequence checking in
+ * main window, which only tries and doesn't actually set any bits. if the
+ * sequence is high above the window or fits in the window and the bit
* is 0, then accept and proceed to integrity verification. otherwise
* reject this sequence.
* 1 - after integrity verification, check in main window again. if this
- * sequence is high above the window or fit in the window and the bit
- * is 0, then set the bit and accept; if it fit in the window but bit
- * already set, then reject; if it fall behind the window, then proceed
+ * sequence is high above the window or fits in the window and the bit
+ * is 0, then set the bit and accept; if it fits in the window but bit
+ * already set, then reject; if it falls behind the window, then proceed
* to phase 2.
- * 2 - check in back window. if it is high above the window or fit in the
+ * 2 - check in back window. if it is high above the window or fits in the
* window and the bit is 0, then set the bit and accept. otherwise reject.
*
- * return value:
- * 1: looks like a replay
- * 0: is ok
- * -1: is a replay
+ * \return 1: looks like a replay
+ * \return 0: is ok
+ * \return -1: is a replay
*
- * note phase 0 is necessary, because otherwise replay attacking request of
+ * Note phase 0 is necessary, because otherwise replay attacking request of
* sequence which between the 2 windows can't be detected.
*
- * this mechanism can't totally solve the problem, but could help much less
+ * This mechanism can't totally solve the problem, but could help reduce the
* number of valid requests be dropped.
*/
static
/* Report actual service time for client latency calc */
lustre_msg_set_service_time(req->rq_repmsg, service_time);
/* Report service time estimate for future client reqs, but report 0
- * (to be ignored by client) if it's a error reply during recovery.
+ * (to be ignored by client) if it's an error reply during recovery.
* (bz15815) */
if (req->rq_type == PTL_RPC_MSG_ERR &&
(req->rq_export == NULL || req->rq_export->exp_obd->obd_recovering))
/**
* Send request reply from request \a req reply buffer.
* \a flags defines reply types
- * Returns 0 on sucess or error code
+ * Returns 0 on success or error code
*/
int ptlrpc_send_reply(struct ptlrpc_request *req, int flags)
{
struct timeout_item *item;
cfs_time_t timeout = PING_INTERVAL;
- /* The timeout list is a increase order sorted list */
+ /* This list is sorted in increasing timeout order */
mutex_lock(&pinger_mutex);
list_for_each_entry(item, &timeout_list, ti_chain) {
- int ti_timeout = item->ti_timeout;
- if (timeout > ti_timeout)
- timeout = ti_timeout;
- break;
- }
+ int ti_timeout = item->ti_timeout;
+ if (timeout > ti_timeout)
+ timeout = ti_timeout;
+ break;
+ }
mutex_unlock(&pinger_mutex);
return cfs_time_sub(cfs_time_add(time, cfs_time_seconds(timeout)),
}
/**
- * Given a \a req, find or allocate a appropriate context for it.
+ * Given a \a req, find or allocate an appropriate context for it.
* \pre req->rq_cli_ctx == NULL.
*
* \retval 0 succeed, and req->rq_cli_ctx is set.
* it for reply reconstruction.
*
* Commonly the original context should be uptodate because we
- * have a expiry nice time; server will keep its context because
+ * have an expiry nice time; server will keep its context because
* we at least hold a ref of old context which prevent context
- * destroying RPC being sent. So server still can accept the request
- * and finish the RPC. But if that's not the case:
+ * from destroying RPC being sent. So server still can accept the
+ * request and finish the RPC. But if that's not the case:
* 1. If server side context has been trimmed, a NO_CONTEXT will
* be returned, gss_cli_ctx_verify/unseal will switch to new
* context by force.
/**
* Used by ptlrpc server, to perform transformation upon request message of
- * incoming \a req. This must be the first thing to do with a incoming
+ * incoming \a req. This must be the first thing to do with an incoming
* request in ptlrpc layer.
*
* \retval SECSVC_OK success, and req->rq_reqmsg point to request message in
}
/**
- * to finish a active request: stop sending more early replies, and release
+ * to finish an active request: stop sending more early replies, and release
* the request. should be called after we finished handling the request.
*/
static void ptlrpc_server_finish_active_request(
* required and create the index file on disk if
* it does not exist.
* - lquota_disk_for_each_slv: iterate over all existing slave index files
- * - lquota_disk_read: read quota settings from a index file
+ * - lquota_disk_read: read quota settings from an index file
* - lquota_disk_declare_write: reserve credits to update a record in an index
* file
* - lquota_disk_write: update a record in an index file
lqe->lqe_adjust_time = defer ?
cfs_time_shift_64(QSD_WB_INTERVAL) :
cfs_time_current_64();
- /* lqe reference transfered to list */
+ /* lqe reference transferred to list */
if (defer)
list_add_tail(&lqe->lqe_link,
&qsd->qsd_adjust_list);
}
# Usage: mount_one_device <label> <successflag> [devtype]
-# Remove <succesflag> on error (trick to detect errors after parallel runs).
+# Remove <successflag> on error (trick to detect errors after parallel runs).
mount_one_device ()
{
local label=$1
{ skip "Need MDS version 2.5.4+ or 2.5.26+ or 2.6.52+"; return; }
cleanup
- # MDT concurent start
+ # MDT concurrent start
#define OBD_FAIL_TGT_DELAY_CONNECT 0x703
do_facet $SINGLEMDS "$LCTL set_param fail_loc=0x703"
start mds1 $(mdsdevname 1) $MDS_MOUNT_OPTS &
echo "2nd MDT start succeed"
else
stop mds1 -f
- error "unexpected concurent MDT mounts result, rc=$rc rc2=$rc2"
+ error "unexpected concurrent MDT mounts result, rc=$rc rc2=$rc2"
fi
- # OST concurent start
+ # OST concurrent start
#define OBD_FAIL_TGT_DELAY_CONNECT 0x703
do_facet ost1 "$LCTL set_param fail_loc=0x703"
start ost1 $(ostdevname 1) $OST_MOUNT_OPTS &
else
stop mds1 -f
stop ost1 -f
- error "unexpected concurent OST mounts result, rc=$rc rc2=$rc2"
+ error "unexpected concurrent OST mounts result, rc=$rc rc2=$rc2"
fi
# cleanup
stop mds1 -f
fi
cleanup
}
-run_test 41c "concurent mounts of MDT/OST should all fail but one"
+run_test 41c "concurrent mounts of MDT/OST should all fail but one"
test_42() { #bug 14693
setup
copytool_cleanup
}
-run_test 9 "Use of explict archive number, with dedicated copytool"
+run_test 9 "Use of explicit archive number, with dedicated copytool"
test_9a() {
needclients 3 || return 0
copytool_setup
mkdir -p $DIR/$tdir
- # Check that root can do HSM actions on a ordinary user's file.
+ # Check that root can do HSM actions on a regular user's file.
rm -f $file
fid=$(make_small $file)
sum0=$(md5sum $file)
resetquota $1 $TSTUSR2
}
+# b=18081
test_25() {
log "run for chown case"
test_25_sub -u
log "run for chgrp case"
test_25_sub -g
}
-run_test_with_stat 25 "test whether quota usage is transfered when chown/chgrp (18081) ==========="
+run_test_with_stat 25 "test if quota usage is transferred on chown/chgrp"
test_26() {
mkdir -p $DIR/$tdir
local ref2=$dir0/ref2
local file1=$dir0/file1
local file2=$dir0/file2
- # create a file large enough for the concurent test
+ # create a file large enough for the concurrent test
dd if=/dev/urandom of=$ref1 bs=1M count=$((RANDOM % 50 + 20))
dd if=/dev/urandom of=$ref2 bs=1M count=$((RANDOM % 50 + 20))
echo "ref file size: ref1($(stat -c %s $ref1))," \
done
myList="${myList%* }";
- # We can select an object at a offset in the list
+ # We can select an object at an offset in the list
[ $# -eq 2 ] && {
cnt=0
for item in $myList; do
eval VDEVPTR="";;
zfs )
#if $OSTDEVn isn't defined, default is $OSTDEVBASE{n}
- # Device formated by zfs
+ # Device formatted by zfs
DEVNAME=OSTDEV$num
eval VDEVPTR=${!DEVNAME:=${OSTDEVBASE}${num}};;
* )
echo -n $VDEVPTR
}
-# Logical device formated for lustre
+# Logical device formatted for lustre
mdsdevname() {
local num=$1
local DEVNAME=MDSDEV$num
eval VDEVPTR="";;
zfs )
# if $MDSDEVn isn't defined, default is $MDSDEVBASE{n}
- # Device formated by ZFS
+ # Device formatted by ZFS
local DEVNAME=MDSDEV$num
eval VDEVPTR=${!DEVNAME:=${MDSDEVBASE}${num}};;
* )
* might use to perform mount operations.
*
* Returns:
- * 0 => Sucess
+ * 0 => Success
* nonzero => Error
*/
static int
goto out_kt;
}
- /* iterate keytab to find proper a entry */
+ /* iterate keytab to find proper an entry */
do {
krb5_data *princname;
"detach a lustre regular file from a virtual block device\n"
"usage: blockdev_detach <device_name>"},
{"blockdev_info", jt_blockdev_info, 0,
- "get the device info of a attached file\n"
+ "get the device info of an attached file\n"
"usage: blockdev_info <device_name>"},
/* Pool commands */
{"swap_layouts", lfs_swap_layouts, 0, "Swap layouts between 2 files.\n"
"usage: swap_layouts <path1> <path2>"},
{"migrate", lfs_setstripe, 0, "migrate file from one OST layout to "
- "another (may be not safe with concurent writes).\n"
+ "another (may be not safe with concurrent writes).\n"
MIGRATE_USAGE},
{"mv", lfs_mv, 0,
"To move directories between MDTs.\n"
gid = random();
while (gid == 0);
if (migration_flags & MIGRATION_BLOCKS) {
- /* take group lock to limit concurent access
+ /* take group lock to limit concurrent access
* this will be no more needed when exclusive access will
* be implemented (see LU-2919) */
/* group lock is taken after data version read because it
if (line)
free(line);
- /* return 0 if all rebinds were sucessful */
+ /* return 0 if all rebinds were successful */
CT_TRACE("%u lines read from '%s', %u rebind successful", nl, list, ok);
return ok == nl ? 0 : -1;
* the value of the index will be ignored. The pathname will return data if
* the pathname is located on a lustre mount. Index is used to pick which
* mount point you want in the case of multiple mounted lustre file systems.
- * See function lfs_osts in lfs.c for a example of the index use.
+ * See function lfs_osts in lfs.c for an example of the index use.
*/
int llapi_search_mounts(const char *pathname, int index, char *mntdir,
char *fsname)
(param->fp_mdt_uuid && param->fp_mdt_index == OBD_NOT_FOUND))
goto decided;
- /* If a OST or MDT UUID is given, and some OST matches,
+ /* If an OST or MDT UUID is given, and some OST matches,
* check it here. */
if (param->fp_obd_index != OBD_NOT_FOUND ||
param->fp_mdt_index != OBD_NOT_FOUND) {
rc = -EINVAL;
goto out_err;
}
- /* in the list we have a all archive wildcard
+ /* in the list we have an all archive wildcard
* so move to all archives mode
*/
if (archives[rc] == 0) {
goto out;
}
- /* Stand alone MGS doesn't need a index */
+ /* Stand alone MGS doesn't need an index */
if (!IS_MDT(ldd) && IS_MGS(ldd)) {
#ifndef TUNEFS /* mkfs.lustre */
/* But if --index was specified flag an error */
MAX-ACCESS read-only
STATUS current
DESCRIPTION
- "The number of Object Storage Devices on a OST system."
+ "The number of Object Storage Devices on an OST system."
::= { objectStorageTargets 1 }
osdTable OBJECT-TYPE
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
- "A table listing the Object Storage Devices available on a OST system.
+ "A table listing the Object Storage Devices available on an OST system.
The number of entries in this table is available in osdNumber."
::= { objectStorageTargets 2 }
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
- "Table entry with information an Object Storage Device on a OST
+ "Table entry with information an Object Storage Device on an OST
system."
INDEX { osdIndex }
::= { osdTable 1 }
MAX-ACCESS not-accessible
STATUS current
DESCRIPTION
- "Index into the table of Object Storage Devices on a OST system."
+ "Index into the table of Object Storage Devices on an OST system."
::= { osdEntry 1 }
osdUUID OBJECT-TYPE
*/
var_trap[1].next_variable = NULL;
- /*The "name" is the OID of the portals trap reason strong*/
+ /* The "name" is the OID of the portals trap reason string */
var_trap[1].name = lustre_portals_trap_string;
var_trap[1].name_length = sizeof(lustre_portals_trap_string) / sizeof(oid);
- /*And the data is a octet string, that contains the actually reason string*/
+ /* And the data is an octet string, that contains the actually reason
+ * string */
var_trap[1].type = ASN_OCTET_STR;
var_trap[1].val.string = (unsigned char *)reason_string;
var_trap[1].val_len = strlen(reason_string);
*/
var_trap[1].next_variable = &var_trap[2];;
- /*The "name" is the OID of the portals trap reason strong*/
+ /* The "name" is the OID of the portals trap reason string */
var_trap[1].name = lustre_unhealthy_trap_device_name_string;
var_trap[1].name_length = sizeof(lustre_unhealthy_trap_device_name_string) / sizeof(oid);
- /*And the data is a octet string, that contains the actually reason strong*/
+ /* And the data is an octet string, that contains the actual reason
+ * string */
var_trap[1].type = ASN_OCTET_STR;
var_trap[1].val.string = (unsigned char *)obd_name;
var_trap[1].val_len = strlen(obd_name);
- /*
- * Setup the third variable in the trap data.
+ /*
+ * Setup the third variable in the trap data.
* It is the last in the chain so set next to NULL
*/
var_trap[2].next_variable = NULL;
- /*The "name" is the OID of the portals trap reason strong*/
+ /* The "name" is the OID of the portals trap reason string */
var_trap[2].name = lustre_unhealthy_trap_reason_string;
var_trap[2].name_length = sizeof(lustre_unhealthy_trap_reason_string) / sizeof(oid);
- /*And the data is a octet string, that contains the actually reason strong*/
+ /* And the data is an octet string, that contains the actual reason
+ * string */
var_trap[2].type = ASN_OCTET_STR;
var_trap[2].val.string = (unsigned char *)reason_string;
var_trap[2].val_len = strlen(reason_string);