* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2011, 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
ENTRY;
spin_lock(&lli->lli_lock);
lli->lli_flags |= LLIF_SOM_DIRTY;
- if (page != NULL && cfs_list_empty(&page->cpg_pending_linkage))
- cfs_list_add(&page->cpg_pending_linkage,
+ if (page != NULL && list_empty(&page->cpg_pending_linkage))
+ list_add(&page->cpg_pending_linkage,
&club->cob_pending_list);
spin_unlock(&lli->lli_lock);
EXIT;
ENTRY;
spin_lock(&lli->lli_lock);
- if (page != NULL && !cfs_list_empty(&page->cpg_pending_linkage)) {
- cfs_list_del_init(&page->cpg_pending_linkage);
+ if (page != NULL && !list_empty(&page->cpg_pending_linkage)) {
+ list_del_init(&page->cpg_pending_linkage);
rc = 1;
}
spin_unlock(&lli->lli_lock);
lli->lli_flags |= flags;
if ((lli->lli_flags & LLIF_DONE_WRITING) &&
- cfs_list_empty(&club->cob_pending_list)) {
+ list_empty(&club->cob_pending_list)) {
struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CWARN("ino %lu/%u(flags %u) som valid it just after "
- "recovery\n",
- inode->i_ino, inode->i_generation,
- lli->lli_flags);
- /* DONE_WRITING is allowed and inode has no dirty page. */
+ CWARN("%s: file "DFID"(flags %u) Size-on-MDS valid, "
+ "done writing allowed and no diry pages\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), lli->lli_flags);
+ /* DONE_WRITING is allowed and inode has no dirty page. */
spin_lock(&lcq->lcq_lock);
- LASSERT(cfs_list_empty(&lli->lli_close_list));
- CDEBUG(D_INODE, "adding inode %lu/%u to close list\n",
- inode->i_ino, inode->i_generation);
- cfs_list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
+ LASSERT(list_empty(&lli->lli_close_list));
+ CDEBUG(D_INODE, "adding inode "DFID" to close list\n",
+ PFID(ll_inode2fid(inode)));
+ list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
- /* Avoid a concurrent insertion into the close thread queue:
- * an inode is already in the close thread, open(), write(),
- * close() happen, epoch is closed as the inode is marked as
- * LLIF_EPOCH_PENDING. When pages are written inode should not
- * be inserted into the queue again, clear this flag to avoid
- * it. */
- lli->lli_flags &= ~LLIF_DONE_WRITING;
+ /* Avoid a concurrent insertion into the close thread queue:
+ * an inode is already in the close thread, open(), write(),
+ * close() happen, epoch is closed as the inode is marked as
+ * LLIF_EPOCH_PENDING. When pages are written inode should not
+ * be inserted into the queue again, clear this flag to avoid
+ * it. */
+ lli->lli_flags &= ~LLIF_DONE_WRITING;
- cfs_waitq_signal(&lcq->lcq_waitq);
+ wake_up(&lcq->lcq_waitq);
spin_unlock(&lcq->lcq_lock);
}
spin_unlock(&lli->lli_lock);
op_data->op_flags |= MF_SOM_CHANGE;
/* Check if Size-on-MDS attributes are valid. */
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("ino %lu/%u(flags %u) som valid it just after "
- "recovery\n", inode->i_ino, inode->i_generation,
- lli->lli_flags);
+ CERROR("%s: inode "DFID"(flags %u) MDS holds lock on "
+ "Size-on-MDS attributes\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), lli->lli_flags);
if (!cl_local_size(inode)) {
/* Send Size-on-MDS Attributes if valid. */
ENTRY;
spin_lock(&lli->lli_lock);
- if (!(cfs_list_empty(&club->cob_pending_list))) {
+ if (!(list_empty(&club->cob_pending_list))) {
if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
LASSERT(*och != NULL);
LASSERT(lli->lli_pending_och == NULL);
}
}
- LASSERT(cfs_list_empty(&club->cob_pending_list));
+ LASSERT(list_empty(&club->cob_pending_list));
lli->lli_flags &= ~LLIF_SOM_DIRTY;
spin_unlock(&lli->lli_lock);
ll_done_writing_attr(inode, op_data);
LASSERT(op_data != NULL);
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("ino %lu/%u(flags %u) som valid it just after "
- "recovery\n", inode->i_ino, inode->i_generation,
- lli->lli_flags);
+ CERROR("%s: inode "DFID"(flags %u) MDS holds lock on "
+ "Size-on-MDS attributes\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), lli->lli_flags);
OBDO_ALLOC(oa);
if (!oa) {
if (rc) {
oa->o_valid = 0;
if (rc != -ENOENT)
- CERROR("inode_getattr failed (%d): unable to "
- "send a Size-on-MDS attribute update "
- "for inode %lu/%u\n", rc, inode->i_ino,
- inode->i_generation);
+ CERROR("%s: inode_getattr failed - unable to "
+ "send a Size-on-MDS attribute update "
+ "for inode "DFID": rc = %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), rc);
} else {
CDEBUG(D_INODE, "Size-on-MDS update on "DFID"\n",
PFID(&lli->lli_fid));
* OSTs and send setattr to back to MDS. */
rc = ll_som_update(inode, op_data);
} else if (rc) {
- CERROR("inode %lu mdc done_writing failed: rc = %d\n",
- inode->i_ino, rc);
+ CERROR("%s: inode "DFID" mdc done_writing failed: rc = %d\n",
+ ll_get_fsname(inode->i_sb, NULL, 0),
+ PFID(ll_inode2fid(inode)), rc);
}
out:
ll_finish_md_op_data(op_data);
spin_lock(&lcq->lcq_lock);
- if (!cfs_list_empty(&lcq->lcq_head)) {
- lli = cfs_list_entry(lcq->lcq_head.next, struct ll_inode_info,
- lli_close_list);
- cfs_list_del_init(&lli->lli_close_list);
- } else if (cfs_atomic_read(&lcq->lcq_stop))
- lli = ERR_PTR(-EALREADY);
+ if (!list_empty(&lcq->lcq_head)) {
+ lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
+ lli_close_list);
+ list_del_init(&lli->lli_close_list);
+ } else if (atomic_read(&lcq->lcq_stop))
+ lli = ERR_PTR(-EALREADY);
spin_unlock(&lcq->lcq_lock);
return lli;
struct ll_close_queue *lcq = arg;
ENTRY;
- {
- char name[CFS_CURPROC_COMM_MAX];
- snprintf(name, sizeof(name) - 1, "ll_close");
- cfs_daemonize(name);
- }
-
complete(&lcq->lcq_comp);
while (1) {
break;
inode = ll_info2i(lli);
- CDEBUG(D_INFO, "done_writting for inode %lu/%u\n",
- inode->i_ino, inode->i_generation);
+ CDEBUG(D_INFO, "done_writting for inode "DFID"\n",
+ PFID(ll_inode2fid(inode)));
ll_done_writing(inode);
iput(inode);
}
int ll_close_thread_start(struct ll_close_queue **lcq_ret)
{
- struct ll_close_queue *lcq;
- pid_t pid;
+ struct ll_close_queue *lcq;
+ struct task_struct *task;
- if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
- return -EINTR;
+ if (OBD_FAIL_CHECK(OBD_FAIL_LDLM_CLOSE_THREAD))
+ return -EINTR;
- OBD_ALLOC(lcq, sizeof(*lcq));
- if (lcq == NULL)
- return -ENOMEM;
+ OBD_ALLOC(lcq, sizeof(*lcq));
+ if (lcq == NULL)
+ return -ENOMEM;
spin_lock_init(&lcq->lcq_lock);
- CFS_INIT_LIST_HEAD(&lcq->lcq_head);
- cfs_waitq_init(&lcq->lcq_waitq);
+ INIT_LIST_HEAD(&lcq->lcq_head);
+ init_waitqueue_head(&lcq->lcq_waitq);
init_completion(&lcq->lcq_comp);
- pid = cfs_create_thread(ll_close_thread, lcq, 0);
- if (pid < 0) {
+ task = kthread_run(ll_close_thread, lcq, "ll_close");
+ if (IS_ERR(task)) {
OBD_FREE(lcq, sizeof(*lcq));
- return pid;
+ return PTR_ERR(task);
}
wait_for_completion(&lcq->lcq_comp);
void ll_close_thread_shutdown(struct ll_close_queue *lcq)
{
init_completion(&lcq->lcq_comp);
- cfs_atomic_inc(&lcq->lcq_stop);
- cfs_waitq_signal(&lcq->lcq_waitq);
+ atomic_inc(&lcq->lcq_stop);
+ wake_up(&lcq->lcq_waitq);
wait_for_completion(&lcq->lcq_comp);
OBD_FREE(lcq, sizeof(*lcq));
}