*/
static int
ldlm_extent_compat_queue(struct list_head *queue, struct ldlm_lock *req,
- __u64 *flags, enum ldlm_error *err,
- struct list_head *work_list, int *contended_locks)
+ __u64 *flags, struct list_head *work_list,
+ int *contended_locks)
{
struct ldlm_resource *res = req->l_resource;
enum ldlm_mode req_mode = req->l_req_mode;
struct ldlm_lock *lock;
int check_contention;
int compat = 1;
- int scan = 0;
ENTRY;
lockmode_verify(req_mode);
goto destroylock;
}
- *flags |= LDLM_FL_NO_TIMEOUT;
if (!work_list)
RETURN(0);
if (req == lock)
break;
- if (unlikely(scan)) {
- /* We only get here if we are queuing GROUP lock
- and met some incompatible one. The main idea of this
- code is to insert GROUP lock past compatible GROUP
- lock in the waiting queue or if there is not any,
- then in front of first non-GROUP lock */
- if (lock->l_req_mode != LCK_GROUP) {
- /* Ok, we hit non-GROUP lock, there should
- * be no more GROUP locks later on, queue in
- * front of first non-GROUP lock */
-
- ldlm_resource_insert_lock_after(lock, req);
- list_del_init(&lock->l_res_link);
- ldlm_resource_insert_lock_after(req, lock);
- compat = 0;
- break;
- }
- if (req->l_policy_data.l_extent.gid ==
- lock->l_policy_data.l_extent.gid) {
- /* found it */
- ldlm_resource_insert_lock_after(lock, req);
- compat = 0;
- break;
- }
- continue;
- }
-
/* locks are compatible, overlap doesn't matter */
if (lockmode_compat(lock->l_req_mode, req_mode)) {
if (req_mode == LCK_PR &&
if (unlikely(req_mode == LCK_GROUP &&
!ldlm_is_granted(lock))) {
- scan = 1;
compat = 0;
if (lock->l_req_mode != LCK_GROUP) {
/* Ok, we hit non-GROUP lock, there should be no
more GROUP locks later on, queue in front of
first non-GROUP lock */
- ldlm_resource_insert_lock_after(lock, req);
- list_del_init(&lock->l_res_link);
- ldlm_resource_insert_lock_after(req, lock);
+ ldlm_resource_insert_lock_before(lock, req);
break;
}
- if (req->l_policy_data.l_extent.gid ==
- lock->l_policy_data.l_extent.gid) {
- /* found it */
- ldlm_resource_insert_lock_after(lock, req);
- break;
- }
- continue;
+ LASSERT(req->l_policy_data.l_extent.gid !=
+ lock->l_policy_data.l_extent.gid);
+ continue;
}
if (unlikely(lock->l_req_mode == LCK_GROUP)) {
| LDLM_FL_SPECULATIVE)) {
compat = -EWOULDBLOCK;
goto destroylock;
- } else {
- *flags |= LDLM_FL_NO_TIMEOUT;
}
} else if (lock->l_policy_data.l_extent.end < req_start ||
lock->l_policy_data.l_extent.start > req_end) {
destroylock:
list_del_init(&req->l_res_link);
ldlm_lock_destroy_nolock(req);
- *err = compat;
RETURN(compat);
}
enum ldlm_error *err, struct list_head *work_list)
{
struct ldlm_resource *res = lock->l_resource;
- int rc, rc2;
+ int rc, rc2 = 0;
int contended_locks = 0;
struct list_head *grant_work = intention == LDLM_PROCESS_ENQUEUE ?
NULL : work_list;
* ever stops being true, we want to find out. */
LASSERT(*flags == 0);
rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
- err, NULL, &contended_locks);
+ NULL, &contended_locks);
if (rc == 1) {
rc = ldlm_extent_compat_queue(&res->lr_waiting, lock,
- flags, err, NULL,
+ flags, NULL,
&contended_locks);
}
if (rc == 0)
}
contended_locks = 0;
- rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags, err,
+ rc = ldlm_extent_compat_queue(&res->lr_granted, lock, flags,
work_list, &contended_locks);
if (rc < 0)
- GOTO(out_rpc_list, rc);
+ GOTO(out, *err = rc);
- rc2 = 0;
if (rc != 2) {
rc2 = ldlm_extent_compat_queue(&res->lr_waiting, lock,
- flags, err, work_list,
+ flags, work_list,
&contended_locks);
if (rc2 < 0)
- GOTO(out_rpc_list, rc = rc2);
+ GOTO(out, *err = rc = rc2);
}
if (rc + rc2 == 2) {
* the lock is enqueued -bzzz */
*flags |= LDLM_FL_NO_TIMEOUT;
}
- rc = LDLM_ITER_CONTINUE;
-out_rpc_list:
- RETURN(rc);
+ RETURN(LDLM_ITER_CONTINUE);
+out:
+ return rc;
}
#endif /* HAVE_SERVER_SUPPORT */
struct list_head *grant_work = intention == LDLM_PROCESS_ENQUEUE ?
NULL : work_list;
int rc;
-
ENTRY;
+ *err = ELDLM_LOCK_ABORTED;
LASSERT(!ldlm_is_granted(lock));
check_res_locked(res);
if (intention == LDLM_PROCESS_RESCAN) {
- struct list_head *bl_list;
-
- if (*flags & LDLM_FL_BLOCK_NOWAIT) {
- bl_list = NULL;
- *err = ELDLM_LOCK_WOULDBLOCK;
- } else {
- bl_list = work_list;
- *err = ELDLM_LOCK_ABORTED;
- }
+ struct list_head *bl_list =
+ *flags & LDLM_FL_BLOCK_NOWAIT ? NULL : work_list;
LASSERT(lock->l_policy_data.l_inodebits.bits != 0);
if (rc != 2) {
/* if there were only bits to try and all are conflicting */
if ((lock->l_policy_data.l_inodebits.bits |
- lock->l_policy_data.l_inodebits.try_bits) == 0) {
- *err = ELDLM_LOCK_WOULDBLOCK;
- } else {
+ lock->l_policy_data.l_inodebits.try_bits)) {
+ /* There is no sense to set LDLM_FL_NO_TIMEOUT to @flags
+ * for DOM lock while they are enqueued through intents,
+ * i.e. @lock here is local which does not timeout. */
*err = ELDLM_OK;
}
} else {
void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
struct ldlm_lock *new);
+void ldlm_resource_insert_lock_before(struct ldlm_lock *original,
+ struct ldlm_lock *new);
/* ldlm_lock.c */
}
EXPORT_SYMBOL(ldlm_resource_putref);
-/**
- * Add a lock into a given resource into specified lock list.
- */
-void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
- struct ldlm_lock *lock)
+static void __ldlm_resource_add_lock(struct ldlm_resource *res,
+ struct list_head *head,
+ struct ldlm_lock *lock,
+ bool tail)
{
check_res_locked(res);
- LDLM_DEBUG(lock, "About to add this lock");
-
if (ldlm_is_destroyed(lock)) {
CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
return;
LASSERT(list_empty(&lock->l_res_link));
- list_add_tail(&lock->l_res_link, head);
+ if (tail)
+ list_add_tail(&lock->l_res_link, head);
+ else
+ list_add(&lock->l_res_link, head);
if (res->lr_type == LDLM_IBITS)
ldlm_inodebits_add_lock(res, head, lock);
+
+ ldlm_resource_dump(D_INFO, res);
+}
+
+/**
+ * Add a lock into a given resource into specified lock list.
+ */
+void ldlm_resource_add_lock(struct ldlm_resource *res, struct list_head *head,
+ struct ldlm_lock *lock)
+{
+ LDLM_DEBUG(lock, "About to add this lock");
+
+ __ldlm_resource_add_lock(res, head, lock, true);
}
/**
* Insert a lock into resource after specified lock.
- *
- * Obtain resource description from the lock we are inserting after.
*/
void ldlm_resource_insert_lock_after(struct ldlm_lock *original,
struct ldlm_lock *new)
{
- struct ldlm_resource *res = original->l_resource;
-
- check_res_locked(res);
+ LASSERT(!list_empty(&original->l_res_link));
- ldlm_resource_dump(D_INFO, res);
LDLM_DEBUG(new, "About to insert this lock after %p: ", original);
+ __ldlm_resource_add_lock(original->l_resource,
+ &original->l_res_link,
+ new, false);
+}
- if (ldlm_is_destroyed(new)) {
- CDEBUG(D_OTHER, "Lock destroyed, not adding to resource\n");
- goto out;
- }
-
- LASSERT(list_empty(&new->l_res_link));
+/**
+ * Insert a lock into resource before the specified lock.
+ */
+void ldlm_resource_insert_lock_before(struct ldlm_lock *original,
+ struct ldlm_lock *new)
+{
+ LASSERT(!list_empty(&original->l_res_link));
- list_add(&new->l_res_link, &original->l_res_link);
- out:;
+ LDLM_DEBUG(new, "About to insert this lock before %p: ", original);
+ __ldlm_resource_add_lock(original->l_resource,
+ original->l_res_link.prev, new, false);
}
void ldlm_resource_unlink_lock(struct ldlm_lock *lock)
struct mdt_device *mdt = info->mti_mdt;
union ldlm_policy_data *policy = &info->mti_policy;
struct ldlm_res_id *res_id = &info->mti_res_id;
+ __u64 open_flags = info->mti_spec.sp_cr_flags;
struct lustre_handle lockh;
enum ldlm_mode mode;
struct ldlm_lock *lock;
+ enum ldlm_mode lm;
bool rc;
policy->l_inodebits.bits = MDS_INODELOCK_DOM;
fid_build_reg_res_name(fid, res_id);
+
+ lm = (open_flags & MDS_FMODE_WRITE) ? LCK_PW : LCK_PR | LCK_PW;
mode = ldlm_lock_match(mdt->mdt_namespace, LDLM_FL_BLOCK_GRANTED |
LDLM_FL_TEST_LOCK, res_id, LDLM_IBITS, policy,
- LCK_PW, &lockh, 0);
+ lm, &lockh, 0);
/* There is no other PW lock on this object; finished. */
if (mode == 0)
skip "Does not support layout lock."
}
+check_swap_layout_no_dom()
+{
+ local FOLDER=$1
+ local SUPP=$(lfs getstripe $FOLDER | grep "pattern: mdt" | wc -l)
+ [ $SUPP -eq 0 ] || skip "layout swap does not support DOM files so far"
+}
+
check_and_setup_lustre
DIR=${DIR:-$MOUNT}
assert_DIR
}
run_test 81b "OST should return -ENOSPC when retry still fails ======="
-test_82() { # LU-1031
- dd if=/dev/zero of=$DIR/$tfile bs=1M count=10
- local gid1=14091995
- local gid2=16022000
-
- multiop_bg_pause $DIR/$tfile OG${gid1}_g${gid1}c || return 1
- local MULTIPID1=$!
- multiop_bg_pause $DIR/$tfile O_G${gid2}r10g${gid2}c || return 2
- local MULTIPID2=$!
- kill -USR1 $MULTIPID2
- sleep 2
- if [[ `ps h -o comm -p $MULTIPID2` == "" ]]; then
- error "First grouplock does not block second one"
- else
- echo "Second grouplock blocks first one"
- fi
- kill -USR1 $MULTIPID1
- wait $MULTIPID1
- wait $MULTIPID2
-}
-run_test 82 "Basic grouplock test"
-
test_99() {
[ -z "$(which cvs 2>/dev/null)" ] && skip_env "could not find cvs"
local cmpn_arg=$(cmp -n 2>&1 | grep "invalid option")
[ -n "$cmpn_arg" ] && skip_env "cmp does not support -n"
check_swap_layouts_support
+ check_swap_layout_no_dom $DIR
local dir0=$DIR/$tdir/$testnum
mkdir -p $dir0 || error "creating dir $dir0"
test_184d() {
check_swap_layouts_support
+ check_swap_layout_no_dom $DIR
[ -z "$(which getfattr 2>/dev/null)" ] &&
skip_env "no getfattr command"
[[ $MDS1_VERSION -ge $(version_code 2.6.94) ]] ||
skip "Need MDS version at least 2.6.94"
check_swap_layouts_support
+ check_swap_layout_no_dom $DIR
[ -z "$(which getfattr 2>/dev/null)" ] &&
skip_env "no getfattr command"
skip "Layout swap lock is not supported"
check_swap_layouts_support
+ check_swap_layout_no_dom $DIR
test_mkdir $DIR/$tdir
swap_lock_test -d $DIR/$tdir ||
}
run_test 106c "Verify statx attributes mask"
+test_107() { # LU-1031
+ dd if=/dev/zero of=$DIR1/$tfile bs=1M count=10
+ local gid1=14091995
+ local gid2=16022000
+
+ $LFS getstripe $DIR1/$tfile
+
+ multiop_bg_pause $DIR1/$tfile OG${gid1}_g${gid1}c || return 1
+ local MULTIPID1=$!
+ multiop_bg_pause $DIR2/$tfile O_G${gid2}r10g${gid2}c || return 2
+ local MULTIPID2=$!
+ kill -USR1 $MULTIPID2
+ sleep 2
+ if [[ `ps h -o comm -p $MULTIPID2` == "" ]]; then
+ error "First grouplock does not block second one"
+ else
+ echo "First grouplock blocks second one"
+ fi
+ kill -USR1 $MULTIPID1
+ wait $MULTIPID1
+ wait $MULTIPID2
+}
+run_test 107 "Basic grouplock conflict"
+
log "cleanup: ======================================================"
# kill and wait in each test only guarentee script finish, but command in script