removed cwd "./" (refer to Bugzilla 14399).
* File join has been disabled in this release, refer to Bugzilla 16929.
+Severity : normal
+Frequency : Create a symlink file with a very long name
+Bugzilla : 16578
+Description: ldlm_cancel_pack()) ASSERTION(max >= dlm->lock_count + count)
+Details : If there is no extra space in the request for early cancels,
+ ldlm_req_handles_avail() returns 0 instead of a negative value.
+
Severity : enhancement
Bugzilla : 1819
Description: Add /proc entry for import status
Severity : normal
Bugzilla : 16450
Description: Add lu_ref support to ldlm_lock
-Details : lu_ref support for ldlm_lock and ldlm_resource. See lu_ref patch.
- lu_ref fields ->l_reference and ->lr_reference are added to ldlm_lock
- and ldlm_resource. LDLM interface has to be changed, because code that
+Details : lu_ref support for ldlm_lock and ldlm_resource. See lu_ref patch.
+ lu_ref fields ->l_reference and ->lr_reference are added to ldlm_lock
+ and ldlm_resource. LDLM interface has to be changed, because code that
releases a reference on a lock, has to "know" what reference this is.
In the most frequent case
...
LDLM_LOCK_PUT(lock);
- no changes are required. When any other reference (received _not_ from
- ldlm_handle2lock()) is released, LDLM_LOCK_RELEASE() has to be called
+ no changes are required. When any other reference (received _not_ from
+ ldlm_handle2lock()) is released, LDLM_LOCK_RELEASE() has to be called
instead of LDLM_LOCK_PUT().
Arguably, changes are pervasive, and interface requires some discipline
- for proper use. On the other hand, it was very instrumental in finding
+ for proper use. On the other hand, it was very instrumental in finding
a few leaked lock references.
Severity : normal
Severity : normal
Bugzilla : 16450
Description: Add ldlm_weigh_callback().
-Details : Add new ->l_weigh_ast() call-back to ldlm_lock. It is called
+Details : Add new ->l_weigh_ast() call-back to ldlm_lock. It is called
by ldlm_cancel_shrink_policy() to estimate lock "value", instead of
hard-coded `number of pages' logic.
Severity : normal
Bugzilla : 16450
Description: Add start and stop methods to lu_device_type_operations.
-Details : Introduce two new methods in lu_device_type_operations, that are
- invoked when first instance of a given type is created and last one
+Details : Introduce two new methods in lu_device_type_operations, that are
+ invoked when first instance of a given type is created and last one
is destroyed respectively. This is need by CLIO.
Severity : normal
Bugzilla : 16450
Description: Introduce struct md_site and move meta-data specific parts of
struct lu_site here.
-Details : Move md-specific fields out of struct lu_site into special struct
+Details : Move md-specific fields out of struct lu_site into special struct
md_site, so that lu_site can be used on a client.
Severity : minor
Bugzilla : 17197
Description: (rw.c:1323:ll_read_ahead_pages()) ASSERTION(page_idx > ria->ria_stoff) failed
Details : Once the unmatched stride IO mode is detected, shrink the stride-ahead
- window to 0. If it does hit cache miss, and read-pattern is still
- stride-io mode, does not reset the stride window, but also does not
+ window to 0. If it does hit cache miss, and read-pattern is still
+ stride-io mode, does not reset the stride window, but also does not
increase the stride window length in this case.
--------------------------------------------------------------------------------
int avail;
avail = min_t(int, LDLM_MAXREQSIZE, CFS_PAGE_SIZE - 512) - req_size;
- avail /= sizeof(struct lustre_handle);
+ if (likely(avail >= 0))
+ avail /= (int)sizeof(struct lustre_handle);
+ else
+ avail = 0;
avail += LDLM_LOCKREQ_HANDLES - off;
return avail;
bufcount = req_capsule_filled_sizes(pill, RCL_CLIENT);
avail = ldlm_capsule_handles_avail(pill, RCL_CLIENT, canceloff);
- flags = ns_connect_lru_resize(ns) ?
+ flags = ns_connect_lru_resize(ns) ?
LDLM_CANCEL_LRUR : LDLM_CANCEL_AGED;
to_free = !ns_connect_lru_resize(ns) &&
opc == LDLM_ENQUEUE ? 1 : 0;
- /* Cancel lru locks here _only_ if the server supports
+ /* Cancel lru locks here _only_ if the server supports
* EARLY_CANCEL. Otherwise we have to send extra CANCEL
* rpc, what will make us slower. */
if (avail > count)
{
int rc = LDLM_FL_LOCAL_ONLY;
ENTRY;
-
+
if (lock->l_conn_export) {
int local_only;
LASSERT(dlm != NULL);
/* Check the room in the request buffer. */
- max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) -
+ max = req_capsule_get_size(&req->rq_pill, &RMF_DLM_REQ, RCL_CLIENT) -
sizeof(struct ldlm_request);
max /= sizeof(struct lustre_handle);
max += LDLM_LOCKREQ_HANDLES;
__u64 old_slv, new_slv;
__u32 new_limit;
ENTRY;
-
- if (unlikely(!req->rq_import || !req->rq_import->imp_obd ||
+
+ if (unlikely(!req->rq_import || !req->rq_import->imp_obd ||
!imp_connect_lru_resize(req->rq_import)))
{
- /*
- * Do nothing for corner cases.
+ /*
+ * Do nothing for corner cases.
*/
RETURN(0);
}
- /*
- * In some cases RPC may contain slv and limit zeroed out. This is
+ /*
+ * In some cases RPC may contain slv and limit zeroed out. This is
* the case when server does not support lru resize feature. This is
* also possible in some recovery cases when server side reqs have no
- * ref to obd export and thus access to server side namespace is no
- * possible.
+ * ref to obd export and thus access to server side namespace is no
+ * possible.
*/
- if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
+ if (lustre_msg_get_slv(req->rq_repmsg) == 0 ||
lustre_msg_get_limit(req->rq_repmsg) == 0) {
DEBUG_REQ(D_HA, req, "Zero SLV or Limit found "
- "(SLV: "LPU64", Limit: %u)",
- lustre_msg_get_slv(req->rq_repmsg),
+ "(SLV: "LPU64", Limit: %u)",
+ lustre_msg_get_slv(req->rq_repmsg),
lustre_msg_get_limit(req->rq_repmsg));
RETURN(0);
}
new_slv = lustre_msg_get_slv(req->rq_repmsg);
obd = req->rq_import->imp_obd;
- /*
- * Set new SLV and Limit to obd fields to make accessible for pool
+ /*
+ * Set new SLV and Limit to obd fields to make accessible for pool
* thread. We do not access obd_namespace and pool directly here
* as there is no reliable way to make sure that they are still
* alive in cleanup time. Evil races are possible which may cause
- * oops in that time.
+ * oops in that time.
*/
write_lock(&obd->obd_pool_lock);
old_slv = obd->obd_pool_slv;
RETURN(count);
}
-/**
+/**
* Callback function for shrink policy. Makes decision whether to keep
* \a lock in LRU for current \a LRU size \a unused, added in current scan
* \a added and number of locks to be preferably canceled \a count.
*/
static ldlm_policy_res_t ldlm_cancel_shrink_policy(struct ldlm_namespace *ns,
struct ldlm_lock *lock,
- int unused, int added,
+ int unused, int added,
int count)
{
int lock_cost;
__u64 page_nr;
- /*
- * Stop lru processing when we reached passed @count or checked all
- * locks in lru.
+ /*
+ * Stop lru processing when we reached passed @count or checked all
+ * locks in lru.
*/
if (count && added >= count)
return LDLM_POLICY_KEEP_LOCK;
} else {
struct ldlm_extent *l_extent;
- /*
+ /*
* For all extent locks cost is 1 + number of pages in
- * their extent.
+ * their extent.
*/
l_extent = &lock->l_policy_data.l_extent;
page_nr = l_extent->end - l_extent->start;
}
lock_cost = 1 + page_nr;
} else {
- /*
- * For all locks which are not extent ones cost is 1
+ /*
+ * For all locks which are not extent ones cost is 1
*/
lock_cost = 1;
}
- /*
+ /*
* Keep all expensive locks in lru for the memory pressure time
* cancel policy. They anyways may be canceled by lru resize
- * pplicy if they have not small enough CLV.
+ * pplicy if they have not small enough CLV.
*/
- return lock_cost > ns->ns_shrink_thumb ?
+ return lock_cost > ns->ns_shrink_thumb ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
static ldlm_policy_res_t ldlm_cancel_lrur_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
- int unused, int added,
+ struct ldlm_lock *lock,
+ int unused, int added,
int count)
{
cfs_time_t cur = cfs_time_current();
__u64 slv, lvf, lv;
cfs_time_t la;
- /*
- * Stop lru processing when we reached passed @count or checked all
+ /*
+ * Stop lru processing when we reached passed @count or checked all
* locks in lru.
*/
if (count && added >= count)
slv = ldlm_pool_get_slv(pl);
lvf = ldlm_pool_get_lvf(pl);
- la = cfs_duration_sec(cfs_time_sub(cur,
+ la = cfs_duration_sec(cfs_time_sub(cur,
lock->l_last_used));
- /*
- * Stop when slv is not yet come from server or lv is smaller than
+ /*
+ * Stop when slv is not yet come from server or lv is smaller than
* it is.
*/
lv = lvf * la * unused;
-
- /*
- * Inform pool about current CLV to see it via proc.
+
+ /*
+ * Inform pool about current CLV to see it via proc.
*/
ldlm_pool_set_clv(pl, lv);
- return (slv == 1 || lv < slv) ?
+ return (slv == 1 || lv < slv) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
static ldlm_policy_res_t ldlm_cancel_passed_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
+ struct ldlm_lock *lock,
int unused, int added,
int count)
{
- /*
- * Stop lru processing when we reached passed @count or checked all
- * locks in lru.
+ /*
+ * Stop lru processing when we reached passed @count or checked all
+ * locks in lru.
*/
- return (added >= count) ?
+ return (added >= count) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
static ldlm_policy_res_t ldlm_cancel_aged_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
+ struct ldlm_lock *lock,
int unused, int added,
int count)
{
- /*
- * Stop lru processing if young lock is found and we reached passed
- * @count.
+ /*
+ * Stop lru processing if young lock is found and we reached passed
+ * @count.
*/
- return ((added >= count) &&
+ return ((added >= count) &&
cfs_time_before(cfs_time_current(),
cfs_time_add(lock->l_last_used,
- ns->ns_max_age))) ?
+ ns->ns_max_age))) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
* \retval LDLM_POLICY_CANCEL_LOCK cancel lock from LRU
*/
static ldlm_policy_res_t ldlm_cancel_default_policy(struct ldlm_namespace *ns,
- struct ldlm_lock *lock,
+ struct ldlm_lock *lock,
int unused, int added,
int count)
{
- /*
- * Stop lru processing when we reached passed @count or checked all
- * locks in lru.
+ /*
+ * Stop lru processing when we reached passed @count or checked all
+ * locks in lru.
*/
- return (added >= count) ?
+ return (added >= count) ?
LDLM_POLICY_KEEP_LOCK : LDLM_POLICY_CANCEL_LOCK;
}
-typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *,
- struct ldlm_lock *, int,
+typedef ldlm_policy_res_t (*ldlm_cancel_lru_policy_t)(struct ldlm_namespace *,
+ struct ldlm_lock *, int,
int, int);
static ldlm_cancel_lru_policy_t
if (flags & LDLM_CANCEL_AGED)
return ldlm_cancel_aged_policy;
}
-
+
return ldlm_cancel_default_policy;
}
-
+
/* - Free space in lru for @count new locks,
* redundant unused locks are canceled locally;
* - also cancel locally unused aged locks;
pf = ldlm_cancel_lru_policy(ns, flags);
LASSERT(pf != NULL);
-
+
while (!list_empty(&ns->ns_unused_list)) {
/* For any flags, stop scanning if @max is reached. */
if (max && added >= max)
* we find a lock that should stay in the cache.
* We should take into account lock age anyway
* as new lock even if it is small of weight is
- * valuable resource.
+ * valuable resource.
*
* That is, for shrinker policy we drop only
* old locks, but additionally chose them by
- * their weight. Big extent locks will stay in
+ * their weight. Big extent locks will stay in
* the cache. */
if (pf(ns, lock, unused, added, count) ==
LDLM_POLICY_KEEP_LOCK) {
/* If we have chosen to cancel this lock voluntarily, we
* better send cancel notification to server, so that it
- * frees appropriate state. This might lead to a race
- * where while we are doing cancel here, server is also
+ * frees appropriate state. This might lead to a race
+ * where while we are doing cancel here, server is also
* silently cancelling this lock. */
lock->l_flags &= ~LDLM_FL_CANCEL_ON_BLOCK;
RETURN(ldlm_cancel_list(cancels, added, cancel_flags));
}
-/* Returns number of locks which could be canceled next time when
+/* Returns number of locks which could be canceled next time when
* ldlm_cancel_lru() is called. Used from locks pool shrinker. */
int ldlm_cancel_lru_estimate(struct ldlm_namespace *ns,
int count, int max, int flags)
break;
/* Somebody is already doing CANCEL or there is a
- * blocking request will send cancel. Let's not count
+ * blocking request will send cancel. Let's not count
* this lock. */
if ((lock->l_flags & LDLM_FL_CANCELING) ||
- (lock->l_flags & LDLM_FL_BL_AST))
+ (lock->l_flags & LDLM_FL_BL_AST))
continue;
LDLM_LOCK_GET(lock);
* in a thread and this function will return after the thread has been
* asked to call the callback. when called with LDLM_SYNC the blocking
* callback will be performed in this function. */
-int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync,
+int ldlm_cancel_lru(struct ldlm_namespace *ns, int nr, ldlm_sync_t sync,
int flags)
{
CFS_LIST_HEAD(cancels);
/* If somebody is already doing CANCEL, or blocking ast came,
* skip this lock. */
- if (lock->l_flags & LDLM_FL_BL_AST ||
+ if (lock->l_flags & LDLM_FL_BL_AST ||
lock->l_flags & LDLM_FL_CANCELING)
continue;
RETURN(ldlm_cancel_list(cancels, count, cancel_flags));
}
-/* If @req is NULL, send CANCEL request to server with handles of locks
- * in the @cancels. If EARLY_CANCEL is not supported, send CANCEL requests
+/* If @req is NULL, send CANCEL request to server with handles of locks
+ * in the @cancels. If EARLY_CANCEL is not supported, send CANCEL requests
* separately per lock.
- * If @req is not NULL, put handles of locks in @cancels into the request
+ * If @req is not NULL, put handles of locks in @cancels into the request
* buffer at the offset @off.
* Destroy @cancels at the end. */
int ldlm_cli_cancel_list(struct list_head *cancels, int count,
if (list_empty(cancels) || count == 0)
RETURN(0);
-
- /* XXX: requests (both batched and not) could be sent in parallel.
+
+ /* XXX: requests (both batched and not) could be sent in parallel.
* Usually it is enough to have just 1 RPC, but it is possible that
* there are to many locks to be cancelled in LRU or on a resource.
* It would also speed up the case when the server does not support
}
run_test 17f "symlinks: long and very long symlink name ========================"
+test_17g() {
+ mkdir -p $DIR/$tdir
+ for ((i = 0; i < 511; ++i)); do
+ LONGSYMLINK="${LONGSYMLINK}01234567"
+ done
+ ln -s $LONGSYMLINK $DIR/$tdir/$tfile
+ ls -l $DIR/$tdir
+}
+run_test 17g "symlinks: really long symlink name ==============================="
+
test_18() {
touch $DIR/f
ls $DIR || error
test_27n() {
[ "$OSTCOUNT" -lt "2" ] && skip "too few OSTs" && return
remote_mds_nodsh && skip "remote MDS with nodsh" && return
- remote_ost_nodsh && skip "remote OST with nodsh" && return
+ remote_ost_nodsh && skip "remote OST with nodsh" && return
reset_enospc
rm -f $DIR/d27/f27n
test_27o() {
[ "$OSTCOUNT" -lt "2" ] && skip "too few OSTs" && return
remote_mds_nodsh && skip "remote MDS with nodsh" && return
- remote_ost_nodsh && skip "remote OST with nodsh" && return
+ remote_ost_nodsh && skip "remote OST with nodsh" && return
reset_enospc
rm -f $DIR/d27/f27o
test_27p() {
[ "$OSTCOUNT" -lt "2" ] && skip "too few OSTs" && return
remote_mds_nodsh && skip "remote MDS with nodsh" && return
- remote_ost_nodsh && skip "remote OST with nodsh" && return
+ remote_ost_nodsh && skip "remote OST with nodsh" && return
reset_enospc
rm -f $DIR/d27/f27p
test_27q() {
[ "$OSTCOUNT" -lt "2" ] && skip "too few OSTs" && return
remote_mds_nodsh && skip "remote MDS with nodsh" && return
- remote_ost_nodsh && skip "remote OST with nodsh" && return
+ remote_ost_nodsh && skip "remote OST with nodsh" && return
reset_enospc
rm -f $DIR/d27/f27q
test_27r() {
[ "$OSTCOUNT" -lt "2" ] && skip "too few OSTs" && return
remote_mds_nodsh && skip "remote MDS with nodsh" && return
- remote_ost_nodsh && skip "remote OST with nodsh" && return
+ remote_ost_nodsh && skip "remote OST with nodsh" && return
reset_enospc
rm -f $DIR/d27/f27r
test_27v() { # bug 4900
[ "$OSTCOUNT" -lt "2" ] && skip "too few OSTs" && return
remote_mds_nodsh && skip "remote MDS with nodsh" && return
- remote_ost_nodsh && skip "remote OST with nodsh" && return
+ remote_ost_nodsh && skip "remote OST with nodsh" && return
exhaust_all_precreations
run_test 51b "mkdir .../t-0 --- .../t-$NUMTEST ===================="
test_51bb() {
- [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
+ [ $MDSCOUNT -lt 2 ] && skip "needs >= 2 MDTs" && return
local ndirs=${TEST51BB_NDIRS:-10}
local nfiles=${TEST51BB_NFILES:-100}
declare -a dirs
for ((i=0; i < $ndirs; i++)); do
dirs[i]=$dir/$RANDOM
- echo Creating directory ${dirs[i]}
+ echo Creating directory ${dirs[i]}
mkdir -p ${dirs[i]}
ls $dir
echo Creating $nfiles in dir ${dirs[i]} ...
error "local: $LPORT > 1024, remote: $RPORT"
fi
done
- [ "$rc" = 0 ] || error "privileged port not found" )
+ [ "$rc" = 0 ] || error "privileged port not found" )
}
run_test 100 "check local port using privileged port ==========="
$LCTL set_param -n obdfilter.*.writethrough_cache_enable 1
- # pages should be in the case right after write
+ # pages should be in the case right after write
dd if=/dev/urandom of=$DIR/$tfile bs=4k count=$CPAGES || error "dd failed"
BEFORE=`roc_hit`
cancel_lru_locks osc