/* take layout lock mutex to enqueue layout lock exclusively. */
mutex_lock(&lli->lli_layout_mutex);
+ lli->lli_layout_lock_owner = current;
while (1) {
/* mostly layout lock is caching on the local side, so try to
if (rc == 0)
*gen = ll_layout_version_get(lli);
+ lli->lli_layout_lock_owner = NULL;
mutex_unlock(&lli->lli_layout_mutex);
RETURN(rc);
enum coo_inode_opc opc, void *data)
{
struct inode *inode = vvp_object_inode(obj);
+ struct ll_inode_info *lli = ll_i2info(inode);
int rc = 0;
ENTRY;
rc = -ENOLCK;
break;
case COIO_SIZE_LOCK:
- if (ll_i2info(inode)->lli_size_lock_owner != current)
+ if (lli->lli_size_lock_owner != current)
ll_inode_size_lock(inode);
else
rc = -EALREADY;
break;
case COIO_SIZE_UNLOCK:
- if (ll_i2info(inode)->lli_size_lock_owner == current)
+ if (lli->lli_size_lock_owner == current)
ll_inode_size_unlock(inode);
else
rc = -ENOLCK;
break;
+ case COIO_LAYOUT_LOCK:
+ if (lli->lli_layout_lock_owner != current) {
+ mutex_lock(&lli->lli_layout_mutex);
+ lli->lli_layout_lock_owner = current;
+ }
+ break;
+ case COIO_LAYOUT_UNLOCK:
+ if (lli->lli_layout_lock_owner == current) {
+ lli->lli_layout_lock_owner = NULL;
+ mutex_unlock(&lli->lli_layout_mutex);
+ } else {
+ rc = -ENOLCK;
+ }
+ break;
default:
rc = -EINVAL;
break;
struct cl_object *top = cl_object_top(obj);
bool unlock_inode = false;
bool lock_inode_size = false;
+ bool lock_layout = false;
ENTRY;
if (conf->coc_opc == OBJECT_CONF_SET &&
/**
* we need unlocked lov conf and get inode lock.
* It's possible we have already taken inode's size
- * mutex, so we need keep such lock order, lest deadlock
- * happens:
- * inode lock (ll_inode_lock())
- * inode size lock (ll_inode_size_lock())
- * lov conf lock (lov_conf_lock())
+ * mutex and/or layout mutex, so we need keep such lock
+ * order, lest deadlock happens:
+ * inode lock (ll_inode_lock())
+ * inode size lock (ll_inode_size_lock())
+ * inode layout lock (ll_layout_refresh())
+ * lov conf lock (lov_conf_lock())
*
* e.g.
* vfs_setxattr inode locked
* ll_file_inode_init
* cl_conf_set
* lov_conf_set lov conf locked
+ *
+ * ll_migrate inode locked
+ * ...
+ * ll_layout_refresh inode layout locked
+ * ll_layout_conf
+ * cl_conf_set
+ * lov_conf_set lov conf locked
*/
lov_conf_unlock(lov);
- if (cl_object_inode_ops(
- env, top, COIO_SIZE_UNLOCK, NULL) == 0)
+ if (cl_object_inode_ops(env, top, COIO_LAYOUT_UNLOCK,
+ NULL) == 0)
+ lock_layout = true;
+ if (cl_object_inode_ops(env, top, COIO_SIZE_UNLOCK,
+ NULL) == 0)
lock_inode_size = true;
/* take lock in order */
env, top, COIO_INODE_LOCK, NULL) == 0)
unlock_inode = true;
if (lock_inode_size)
- cl_object_inode_ops(
- env, top, COIO_SIZE_LOCK, NULL);
+ cl_object_inode_ops(env, top, COIO_SIZE_LOCK,
+ NULL);
+ if (lock_layout)
+ cl_object_inode_ops(env, top, COIO_LAYOUT_LOCK,
+ NULL);
goto retry;
}
set_bit(LO_LAYOUT_INVALID, &lov->lo_obj_flags);