}
static struct cl_lock *lov_sublock_alloc(const struct lu_env *env,
- const struct cl_io *io,
- struct lov_lock *lck,
- int idx, struct lov_lock_link **out)
+ const struct cl_io *io,
+ struct lov_lock *lck,
+ int idx, struct lov_lock_link **out)
{
- struct cl_lock *sublock;
- struct cl_lock *parent;
- struct lov_lock_link *link;
-
- LASSERT(idx < lck->lls_nr);
- ENTRY;
+ struct cl_lock *sublock;
+ struct cl_lock *parent;
+ struct lov_lock_link *link;
- OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, __GFP_IO);
- if (link != NULL) {
- struct lov_sublock_env *subenv;
- struct lov_lock_sub *lls;
- struct cl_lock_descr *descr;
-
- parent = lck->lls_cl.cls_lock;
- lls = &lck->lls_sub[idx];
- descr = &lls->sub_got;
-
- subenv = lov_sublock_env_get(env, parent, lls);
- if (!IS_ERR(subenv)) {
- /* CAVEAT: Don't try to add a field in lov_lock_sub
- * to remember the subio. This is because lock is able
- * to be cached, but this is not true for IO. This
- * further means a sublock might be referenced in
- * different io context. -jay */
-
- sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
- descr, "lov-parent", parent);
- lov_sublock_env_put(subenv);
- } else {
- /* error occurs. */
- sublock = (void*)subenv;
- }
+ LASSERT(idx < lck->lls_nr);
+ ENTRY;
- if (!IS_ERR(sublock))
- *out = link;
- else
- OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
- } else
- sublock = ERR_PTR(-ENOMEM);
- RETURN(sublock);
+ OBD_SLAB_ALLOC_PTR_GFP(link, lov_lock_link_kmem, GFP_NOFS);
+ if (link != NULL) {
+ struct lov_sublock_env *subenv;
+ struct lov_lock_sub *lls;
+ struct cl_lock_descr *descr;
+
+ parent = lck->lls_cl.cls_lock;
+ lls = &lck->lls_sub[idx];
+ descr = &lls->sub_got;
+
+ subenv = lov_sublock_env_get(env, parent, lls);
+ if (!IS_ERR(subenv)) {
+ /* CAVEAT: Don't try to add a field in lov_lock_sub
+ * to remember the subio. This is because lock is able
+ * to be cached, but this is not true for IO. This
+ * further means a sublock might be referenced in
+ * different io context. -jay */
+
+ sublock = cl_lock_hold(subenv->lse_env, subenv->lse_io,
+ descr, "lov-parent", parent);
+ lov_sublock_env_put(subenv);
+ } else {
+ /* error occurs. */
+ sublock = (void *)subenv;
+ }
+
+ if (!IS_ERR(sublock))
+ *out = link;
+ else
+ OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
+ } else
+ sublock = ERR_PTR(-ENOMEM);
+ RETURN(sublock);
}
static void lov_sublock_unlock(const struct lu_env *env,
}
}
LASSERT(nr == lck->lls_nr);
- /*
- * Then, create sub-locks. Once at least one sub-lock was created,
- * top-lock can be reached by other threads.
- */
- for (i = 0; i < lck->lls_nr; ++i) {
- struct cl_lock *sublock;
- struct lov_lock_link *link;
- if (lck->lls_sub[i].sub_lock == NULL) {
- sublock = lov_sublock_alloc(env, io, lck, i, &link);
- if (IS_ERR(sublock)) {
- result = PTR_ERR(sublock);
- break;
- }
- cl_lock_get_trust(sublock);
- cl_lock_mutex_get(env, sublock);
- cl_lock_mutex_get(env, parent);
- /*
- * recheck under mutex that sub-lock wasn't created
- * concurrently, and that top-lock is still alive.
- */
- if (lck->lls_sub[i].sub_lock == NULL &&
- parent->cll_state < CLS_FREEING) {
- lov_sublock_adopt(env, lck, sublock, i, link);
- cl_lock_mutex_put(env, parent);
- } else {
- OBD_SLAB_FREE_PTR(link, lov_lock_link_kmem);
- cl_lock_mutex_put(env, parent);
- cl_lock_unhold(env, sublock,
- "lov-parent", parent);
- }
- cl_lock_mutex_put(env, sublock);
- cl_lock_put(env, sublock);
- }
- }
/*
* Some sub-locks can be missing at this point. This is not a problem,
* because enqueue will create them anyway. Main duty of this function
static int lov_sublock_fill(const struct lu_env *env, struct cl_lock *parent,
struct cl_io *io, struct lov_lock *lck, int idx)
{
- struct lov_lock_link *link;
+ struct lov_lock_link *link = NULL;
struct cl_lock *sublock;
int result;
if (sub == NULL)
continue;
- sublock = sub->lss_cl.cls_lock;
- rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
- if (rc == 0) {
- if (lls->sub_flags & LSF_HELD) {
- LASSERT(sublock->cll_state == CLS_HELD ||
- sublock->cll_state == CLS_ENQUEUED);
- rc = cl_unuse_try(subenv->lse_env, sublock);
- rc = lov_sublock_release(env, lck, i, 0, rc);
- }
- lov_sublock_unlock(env, sub, closure, subenv);
- }
- result = lov_subresult(result, rc);
+ sublock = sub->lss_cl.cls_lock;
+ rc = lov_sublock_lock(env, lck, lls, closure, &subenv);
+ if (rc == 0) {
+ if (!(lls->sub_flags & LSF_HELD)) {
+ lov_sublock_unlock(env, sub, closure, subenv);
+ continue;
+ }
+
+ switch(sublock->cll_state) {
+ case CLS_HELD:
+ rc = cl_unuse_try(subenv->lse_env, sublock);
+ lov_sublock_release(env, lck, i, 0, 0);
+ break;
+ default:
+ cl_lock_cancel(subenv->lse_env, sublock);
+ lov_sublock_release(env, lck, i, 1, 0);
+ break;
+ }
+ lov_sublock_unlock(env, sub, closure, subenv);
+ }
+ result = lov_subresult(result, rc);
}
if (result == 0 && lck->lls_cancel_race) {
lov_sublock_release(env, lck, i, 0, 0);
break;
default:
+ cl_lock_cancel(subenv->lse_env, sublock);
lov_sublock_release(env, lck, i, 1, 0);
break;
}
};
int lov_lock_init_raid0(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
+ struct cl_lock *lock, const struct cl_io *io)
{
- struct lov_lock *lck;
- int result;
+ struct lov_lock *lck;
+ int result;
- ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, __GFP_IO);
- if (lck != NULL) {
- cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
- result = lov_lock_sub_init(env, lck, io);
- } else
- result = -ENOMEM;
- RETURN(result);
+ ENTRY;
+ OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
+ if (lck != NULL) {
+ cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_lock_ops);
+ result = lov_lock_sub_init(env, lck, io);
+ } else
+ result = -ENOMEM;
+ RETURN(result);
}
static void lov_empty_lock_fini(const struct lu_env *env,
};
int lov_lock_init_empty(const struct lu_env *env, struct cl_object *obj,
- struct cl_lock *lock, const struct cl_io *io)
+ struct cl_lock *lock, const struct cl_io *io)
{
struct lov_lock *lck;
int result = -ENOMEM;
ENTRY;
- OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, __GFP_IO);
+ OBD_SLAB_ALLOC_PTR_GFP(lck, lov_lock_kmem, GFP_NOFS);
if (lck != NULL) {
cl_lock_slice_add(lock, &lck->lls_cl, obj, &lov_empty_lock_ops);
lck->lls_orig = lock->cll_descr;