* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
+ *
+ * Copyright (c) 2011, 2012, Whamcloud, Inc.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define DEBUG_SUBSYSTEM S_LLITE
-//#include <lustre_mdc.h>
#include <lustre_lite.h>
#include "llite_internal.h"
struct ll_inode_info *lli = ll_i2info(club->cob_inode);
ENTRY;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
lli->lli_flags |= LLIF_SOM_DIRTY;
- if (page != NULL && list_empty(&page->cpg_pending_linkage))
- list_add(&page->cpg_pending_linkage, &club->cob_pending_list);
- spin_unlock(&lli->lli_lock);
+ if (page != NULL && cfs_list_empty(&page->cpg_pending_linkage))
+ cfs_list_add(&page->cpg_pending_linkage,
+ &club->cob_pending_list);
+ cfs_spin_unlock(&lli->lli_lock);
EXIT;
}
int rc = 0;
ENTRY;
- spin_lock(&lli->lli_lock);
- if (page != NULL && !list_empty(&page->cpg_pending_linkage)) {
- list_del_init(&page->cpg_pending_linkage);
+ cfs_spin_lock(&lli->lli_lock);
+ if (page != NULL && !cfs_list_empty(&page->cpg_pending_linkage)) {
+ cfs_list_del_init(&page->cpg_pending_linkage);
rc = 1;
}
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
if (rc)
ll_queue_done_writing(club->cob_inode, 0);
EXIT;
struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
ENTRY;
- spin_lock(&lli->lli_lock);
+ cfs_spin_lock(&lli->lli_lock);
lli->lli_flags |= flags;
if ((lli->lli_flags & LLIF_DONE_WRITING) &&
- list_empty(&club->cob_pending_list)) {
+ cfs_list_empty(&club->cob_pending_list)) {
struct ll_close_queue *lcq = ll_i2sbi(inode)->ll_lcq;
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CWARN("ino %lu/%u(flags %lu) som valid it just after "
- "recovery\n",
- inode->i_ino, inode->i_generation,
- lli->lli_flags);
+ CWARN("ino %lu/%u(flags %u) som valid it just after "
+ "recovery\n",
+ inode->i_ino, inode->i_generation,
+ lli->lli_flags);
/* DONE_WRITING is allowed and inode has no dirty page. */
- spin_lock(&lcq->lcq_lock);
+ cfs_spin_lock(&lcq->lcq_lock);
- LASSERT(list_empty(&lli->lli_close_list));
+ LASSERT(cfs_list_empty(&lli->lli_close_list));
CDEBUG(D_INODE, "adding inode %lu/%u to close list\n",
inode->i_ino, inode->i_generation);
- list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
+ cfs_list_add_tail(&lli->lli_close_list, &lcq->lcq_head);
/* Avoid a concurrent insertion into the close thread queue:
* an inode is already in the close thread, open(), write(),
* it. */
lli->lli_flags &= ~LLIF_DONE_WRITING;
- wake_up(&lcq->lcq_waitq);
- spin_unlock(&lcq->lcq_lock);
+ cfs_waitq_signal(&lcq->lcq_waitq);
+ cfs_spin_unlock(&lcq->lcq_lock);
}
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
EXIT;
}
op_data->op_flags |= MF_SOM_CHANGE;
/* Check if Size-on-MDS attributes are valid. */
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("ino %lu/%u(flags %lu) som valid it just after "
+ CERROR("ino %lu/%u(flags %u) som valid it just after "
"recovery\n", inode->i_ino, inode->i_generation,
lli->lli_flags);
struct ccc_object *club = cl2ccc(ll_i2info(inode)->lli_clob);
ENTRY;
- spin_lock(&lli->lli_lock);
- if (!(list_empty(&club->cob_pending_list))) {
+ cfs_spin_lock(&lli->lli_lock);
+ if (!(cfs_list_empty(&club->cob_pending_list))) {
if (!(lli->lli_flags & LLIF_EPOCH_PENDING)) {
LASSERT(*och != NULL);
LASSERT(lli->lli_pending_och == NULL);
* request yet, DONE_WRITE is to be sent later. */
lli->lli_flags |= LLIF_EPOCH_PENDING;
lli->lli_pending_och = *och;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
inode = igrab(inode);
LASSERT(inode);
* and try DONE_WRITE again later. */
LASSERT(!(lli->lli_flags & LLIF_DONE_WRITING));
lli->lli_flags |= LLIF_DONE_WRITING;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
inode = igrab(inode);
LASSERT(inode);
} else {
/* Pack Size-on-MDS inode attributes only if they has changed */
if (!(lli->lli_flags & LLIF_SOM_DIRTY)) {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
GOTO(out, 0);
}
/* There is a pending DONE_WRITE -- close epoch with no
* attribute change. */
if (lli->lli_flags & LLIF_EPOCH_PENDING) {
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
GOTO(out, 0);
}
}
- LASSERT(list_empty(&club->cob_pending_list));
+ LASSERT(cfs_list_empty(&club->cob_pending_list));
lli->lli_flags &= ~LLIF_SOM_DIRTY;
- spin_unlock(&lli->lli_lock);
+ cfs_spin_unlock(&lli->lli_lock);
ll_done_writing_attr(inode, op_data);
EXIT;
{
struct ll_inode_info *lli = ll_i2info(inode);
struct ptlrpc_request *request = NULL;
+ __u32 old_flags;
struct obdo *oa;
int rc;
ENTRY;
+ LASSERT(op_data != NULL);
if (lli->lli_flags & LLIF_MDS_SIZE_LOCK)
- CERROR("ino %lu/%u(flags %lu) som valid it just after "
+ CERROR("ino %lu/%u(flags %u) som valid it just after "
"recovery\n", inode->i_ino, inode->i_generation,
lli->lli_flags);
RETURN(-ENOMEM);
}
+ old_flags = op_data->op_flags;
op_data->op_flags = MF_SOM_CHANGE;
/* If inode is already in another epoch, skip getattr from OSTs. */
if (lli->lli_ioepoch == op_data->op_ioepoch) {
- rc = ll_inode_getattr(inode, oa, op_data->op_ioepoch);
+ rc = ll_inode_getattr(inode, oa, op_data->op_ioepoch,
+ old_flags & MF_GETATTR_LOCK);
if (rc) {
oa->o_valid = 0;
if (rc == -ENOENT)
{
struct ll_inode_info *lli = NULL;
- spin_lock(&lcq->lcq_lock);
+ cfs_spin_lock(&lcq->lcq_lock);
- if (!list_empty(&lcq->lcq_head)) {
- lli = list_entry(lcq->lcq_head.next, struct ll_inode_info,
- lli_close_list);
- list_del_init(&lli->lli_close_list);
- } else if (atomic_read(&lcq->lcq_stop))
+ if (!cfs_list_empty(&lcq->lcq_head)) {
+ lli = cfs_list_entry(lcq->lcq_head.next, struct ll_inode_info,
+ lli_close_list);
+ cfs_list_del_init(&lli->lli_close_list);
+ } else if (cfs_atomic_read(&lcq->lcq_stop))
lli = ERR_PTR(-EALREADY);
- spin_unlock(&lcq->lcq_lock);
+ cfs_spin_unlock(&lcq->lcq_lock);
return lli;
}
cfs_daemonize(name);
}
- complete(&lcq->lcq_comp);
+ cfs_complete(&lcq->lcq_comp);
while (1) {
struct l_wait_info lwi = { 0 };
}
CDEBUG(D_INFO, "ll_close exiting\n");
- complete(&lcq->lcq_comp);
+ cfs_complete(&lcq->lcq_comp);
RETURN(0);
}
if (lcq == NULL)
return -ENOMEM;
- spin_lock_init(&lcq->lcq_lock);
- INIT_LIST_HEAD(&lcq->lcq_head);
- init_waitqueue_head(&lcq->lcq_waitq);
- init_completion(&lcq->lcq_comp);
+ cfs_spin_lock_init(&lcq->lcq_lock);
+ CFS_INIT_LIST_HEAD(&lcq->lcq_head);
+ cfs_waitq_init(&lcq->lcq_waitq);
+ cfs_init_completion(&lcq->lcq_comp);
- pid = kernel_thread(ll_close_thread, lcq, 0);
+ pid = cfs_create_thread(ll_close_thread, lcq, 0);
if (pid < 0) {
OBD_FREE(lcq, sizeof(*lcq));
return pid;
}
- wait_for_completion(&lcq->lcq_comp);
+ cfs_wait_for_completion(&lcq->lcq_comp);
*lcq_ret = lcq;
return 0;
}
void ll_close_thread_shutdown(struct ll_close_queue *lcq)
{
- init_completion(&lcq->lcq_comp);
- atomic_inc(&lcq->lcq_stop);
- wake_up(&lcq->lcq_waitq);
- wait_for_completion(&lcq->lcq_comp);
+ cfs_init_completion(&lcq->lcq_comp);
+ cfs_atomic_inc(&lcq->lcq_stop);
+ cfs_waitq_signal(&lcq->lcq_waitq);
+ cfs_wait_for_completion(&lcq->lcq_comp);
OBD_FREE(lcq, sizeof(*lcq));
}