struct lu_env *t_env;
};
+static inline int thread_is_stopped(struct ptlrpc_thread *thread)
+{
+ return !!(thread->t_flags & SVC_STOPPED);
+}
+
+static inline int thread_is_stopping(struct ptlrpc_thread *thread)
+{
+ return !!(thread->t_flags & SVC_STOPPING);
+}
+
+static inline int thread_is_starting(struct ptlrpc_thread *thread)
+{
+ return !!(thread->t_flags & SVC_STARTING);
+}
+
+static inline int thread_is_running(struct ptlrpc_thread *thread)
+{
+ return !!(thread->t_flags & SVC_RUNNING);
+}
+
+static inline int thread_is_event(struct ptlrpc_thread *thread)
+{
+ return !!(thread->t_flags & SVC_EVENT);
+}
+
+static inline int thread_is_signal(struct ptlrpc_thread *thread)
+{
+ return !!(thread->t_flags & SVC_SIGNAL);
+}
+
+static inline void thread_clear_flags(struct ptlrpc_thread *thread, __u32 flags)
+{
+ thread->t_flags &= ~flags;
+}
+
+static inline void thread_set_flags(struct ptlrpc_thread *thread, __u32 flags)
+{
+ thread->t_flags = flags;
+}
+
+static inline void thread_add_flags(struct ptlrpc_thread *thread, __u32 flags)
+{
+ thread->t_flags |= flags;
+}
+
+static inline int thread_test_and_clear_flags(struct ptlrpc_thread *thread,
+ __u32 flags)
+{
+ if (thread->t_flags & flags) {
+ thread->t_flags &= ~flags;
+ return 1;
+ }
+ return 0;
+}
+
/**
* Request buffer descriptor structure.
* This is a structure that contains one posted request buffer for service.
ENTRY;
cfs_daemonize(t_name);
- thread->t_flags = SVC_RUNNING;
+ thread_set_flags(thread, SVC_RUNNING);
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_DLMTRACE, "%s: pool thread starting, process %d\n",
*/
lwi = LWI_TIMEOUT(cfs_time_seconds(LDLM_POOLS_THREAD_PERIOD),
NULL, NULL);
- l_wait_event(thread->t_ctl_waitq, (thread->t_flags &
- (SVC_STOPPING|SVC_EVENT)),
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_stopping(thread) ||
+ thread_is_event(thread),
&lwi);
- if (thread->t_flags & SVC_STOPPING) {
- thread->t_flags &= ~SVC_STOPPING;
+ if (thread_test_and_clear_flags(thread, SVC_STOPPING))
break;
- } else if (thread->t_flags & SVC_EVENT) {
- thread->t_flags &= ~SVC_EVENT;
- }
+ else
+ thread_test_and_clear_flags(thread, SVC_EVENT);
}
- thread->t_flags = SVC_STOPPED;
+ thread_set_flags(thread, SVC_STOPPED);
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_DLMTRACE, "%s: pool thread exiting, process %d\n",
RETURN(rc);
}
l_wait_event(ldlm_pools_thread->t_ctl_waitq,
- (ldlm_pools_thread->t_flags & SVC_RUNNING), &lwi);
+ thread_is_running(ldlm_pools_thread), &lwi);
RETURN(0);
}
return;
}
- ldlm_pools_thread->t_flags = SVC_STOPPING;
+ thread_set_flags(ldlm_pools_thread, SVC_STOPPING);
cfs_waitq_signal(&ldlm_pools_thread->t_ctl_waitq);
/*
* Use is subject to license terms.
*/
/*
+ * Copyright (c) 2011 Whamcloud, Inc.
+ */
+/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*
return expired;
}
-static inline int ll_capa_check_stop(void)
-{
- return (ll_capa_thread.t_flags & SVC_STOPPING) ? 1: 0;
-}
-
static void sort_add_capa(struct obd_capa *ocapa, cfs_list_t *head)
{
struct obd_capa *tmp;
cfs_daemonize("ll_capa");
- ll_capa_thread.t_flags = SVC_RUNNING;
+ thread_set_flags(&ll_capa_thread, SVC_RUNNING);
cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
while (1) {
l_wait_event(ll_capa_thread.t_ctl_waitq,
- (ll_capa_check_stop() || have_expired_capa()),
+ !thread_is_running(&ll_capa_thread) ||
+ have_expired_capa(),
&lwi);
- if (ll_capa_check_stop())
+ if (!thread_is_running(&ll_capa_thread))
break;
next = NULL;
cfs_spin_unlock(&capa_lock);
}
- ll_capa_thread.t_flags = SVC_STOPPED;
+ thread_set_flags(&ll_capa_thread, SVC_STOPPED);
cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
RETURN(0);
}
RETURN(rc);
}
cfs_wait_event(ll_capa_thread.t_ctl_waitq,
- ll_capa_thread.t_flags & SVC_RUNNING);
+ thread_is_running(&ll_capa_thread));
RETURN(0);
}
void ll_capa_thread_stop(void)
{
- ll_capa_thread.t_flags = SVC_STOPPING;
+ thread_set_flags(&ll_capa_thread, SVC_STOPPING);
cfs_waitq_signal(&ll_capa_thread.t_ctl_waitq);
cfs_wait_event(ll_capa_thread.t_ctl_waitq,
- ll_capa_thread.t_flags & SVC_STOPPED);
+ thread_is_stopped(&ll_capa_thread));
}
struct obd_capa *ll_osscapa_get(struct inode *inode, __u64 opc)
return (cfs_atomic_read(&sai->sai_cache_count) < sai->sai_max);
}
-static inline int sa_is_running(struct ll_statahead_info *sai)
-{
- return !!(sai->sai_thread.t_flags & SVC_RUNNING);
-}
-
-static inline int sa_is_stopping(struct ll_statahead_info *sai)
-{
- return !!(sai->sai_thread.t_flags & SVC_STOPPING);
-}
-
-static inline int sa_is_stopped(struct ll_statahead_info *sai)
-{
- return !!(sai->sai_thread.t_flags & SVC_STOPPED);
-}
-
/**
* (1) hit ratio less than 80%
* or
lli->lli_opendir_pid = 0;
cfs_spin_unlock(&lli->lli_sa_lock);
- LASSERT(sa_is_stopped(sai));
+ LASSERT(thread_is_stopped(&sai->sai_thread));
if (sai->sai_sent > sai->sai_replied)
CDEBUG(D_READA,"statahead for dir "DFID" does not "
GOTO(out, rc = -ESTALE);
} else {
sai = ll_sai_get(lli->lli_sai);
- if (unlikely(!sa_is_running(sai))) {
+ if (unlikely(!thread_is_running(&sai->sai_thread))) {
sai->sai_replied++;
cfs_spin_unlock(&lli->lli_sa_lock);
GOTO(out, rc = -EBADFD);
atomic_inc(&sbi->ll_sa_total);
cfs_spin_lock(&lli->lli_sa_lock);
- thread->t_flags = SVC_RUNNING;
+ thread_set_flags(thread, SVC_RUNNING);
cfs_spin_unlock(&lli->lli_sa_lock);
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_READA, "start doing statahead for %s\n", parent->d_name.name);
l_wait_event(thread->t_ctl_waitq,
sa_not_full(sai) ||
!sa_received_empty(sai) ||
- !sa_is_running(sai),
+ !thread_is_running(thread),
&lwi);
while (!sa_received_empty(sai))
do_statahead_interpret(sai, NULL);
- if (unlikely(!sa_is_running(sai))) {
+ if (unlikely(!thread_is_running(thread))) {
ll_release_page(page, 0);
GOTO(out, rc);
}
l_wait_event(thread->t_ctl_waitq,
!sa_received_empty(sai) ||
sai->sai_sent == sai->sai_replied||
- !sa_is_running(sai),
+ !thread_is_running(thread),
&lwi);
while (!sa_received_empty(sai))
if ((sai->sai_sent == sai->sai_replied &&
sa_received_empty(sai)) ||
- !sa_is_running(sai))
+ !thread_is_running(thread))
GOTO(out, rc = 0);
}
} else if (1) {
ll_dir_chain_fini(&chain);
cfs_spin_lock(&lli->lli_sa_lock);
if (!sa_received_empty(sai)) {
- thread->t_flags = SVC_STOPPING;
+ thread_set_flags(thread, SVC_STOPPING);
cfs_spin_unlock(&lli->lli_sa_lock);
/* To release the resources held by received entries. */
cfs_spin_lock(&lli->lli_sa_lock);
}
- thread->t_flags = SVC_STOPPED;
+ thread_set_flags(thread, SVC_STOPPED);
cfs_spin_unlock(&lli->lli_sa_lock);
cfs_waitq_signal(&sai->sai_waitq);
cfs_waitq_signal(&thread->t_ctl_waitq);
struct l_wait_info lwi = { 0 };
struct ptlrpc_thread *thread = &lli->lli_sai->sai_thread;
- if (!sa_is_stopped(lli->lli_sai)) {
- thread->t_flags = SVC_STOPPING;
+ if (!thread_is_stopped(thread)) {
+ thread_set_flags(thread, SVC_STOPPING);
cfs_spin_unlock(&lli->lli_sa_lock);
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_READA, "stopping statahead thread, pid %d\n",
cfs_curproc_pid());
l_wait_event(thread->t_ctl_waitq,
- sa_is_stopped(lli->lli_sai),
+ thread_is_stopped(thread),
&lwi);
} else {
cfs_spin_unlock(&lli->lli_sa_lock);
static void
ll_sai_unplug(struct ll_statahead_info *sai, struct ll_sa_entry *entry)
{
- struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode);
- int hit;
+ struct ptlrpc_thread *thread = &sai->sai_thread;
+ struct ll_sb_info *sbi = ll_i2sbi(sai->sai_inode);
+ int hit;
ENTRY;
if (entry != NULL && entry->se_stat == SA_ENTRY_SUCC)
sai->sai_miss++;
sai->sai_consecutive_miss++;
- if (sa_low_hit(sai) && sa_is_running(sai)) {
+ if (sa_low_hit(sai) && thread_is_running(thread)) {
atomic_inc(&sbi->ll_sa_wrong);
CDEBUG(D_READA, "Statahead for dir "DFID" hit "
"ratio too low: hit/miss "LPU64"/"LPU64
sai->sai_miss, sai->sai_sent,
sai->sai_replied, cfs_curproc_pid());
cfs_spin_lock(&lli->lli_sa_lock);
- if (!sa_is_stopped(sai))
- sai->sai_thread.t_flags = SVC_STOPPING;
+ if (!thread_is_stopped(thread))
+ thread_set_flags(thread, SVC_STOPPING);
cfs_spin_unlock(&lli->lli_sa_lock);
}
}
- if (!sa_is_stopped(sai))
- cfs_waitq_signal(&sai->sai_thread.t_ctl_waitq);
+ if (!thread_is_stopped(thread))
+ cfs_waitq_signal(&thread->t_ctl_waitq);
EXIT;
}
struct ll_statahead_info *sai = lli->lli_sai;
struct dentry *parent;
struct ll_sa_entry *entry;
+ struct ptlrpc_thread *thread;
struct l_wait_info lwi = { 0 };
int rc = 0;
ENTRY;
LASSERT(lli->lli_opendir_pid == cfs_curproc_pid());
if (sai) {
- if (unlikely(sa_is_stopped(sai) &&
+ thread = &sai->sai_thread;
+ if (unlikely(thread_is_stopped(thread) &&
cfs_list_empty(&sai->sai_entries_stated))) {
/* to release resource */
ll_stop_statahead(dir, lli->lli_opendir_key);
LWI_ON_SIGNAL_NOOP, NULL);
rc = l_wait_event(sai->sai_waitq,
ll_sa_entry_stated(entry) ||
- sa_is_stopped(sai),
+ thread_is_stopped(thread),
&lwi);
if (rc < 0) {
ll_sai_unplug(sai, entry);
lli->lli_sai = sai;
rc = cfs_create_thread(ll_statahead_thread, parent, 0);
+ thread = &sai->sai_thread;
if (rc < 0) {
CERROR("can't start ll_sa thread, rc: %d\n", rc);
dput(parent);
lli->lli_opendir_key = NULL;
- sai->sai_thread.t_flags = SVC_STOPPED;
+ thread_set_flags(thread, SVC_STOPPED);
ll_sai_put(sai);
LASSERT(lli->lli_sai == NULL);
RETURN(-EAGAIN);
}
- l_wait_event(sai->sai_thread.t_ctl_waitq,
- sa_is_running(sai) || sa_is_stopped(sai),
+ l_wait_event(thread->t_ctl_waitq,
+ thread_is_running(thread) || thread_is_stopped(thread),
&lwi);
/*
* Use is subject to license terms.
*/
/*
+ * Copyright (c) 2011 Whamcloud, Inc.
+ */
+/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*
struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
ENTRY;
- thread->t_flags |= SVC_EVENT;
+ thread_add_flags(thread, SVC_EVENT);
cfs_waitq_signal(&thread->t_ctl_waitq);
EXIT;
}
cfs_daemonize_ctxt("mdt_ck");
cfs_block_allsigs();
- thread->t_flags = SVC_RUNNING;
+ thread_set_flags(thread, SVC_RUNNING);
cfs_waitq_signal(&thread->t_ctl_waitq);
rc = lu_env_init(&env, LCT_MD_THREAD|LCT_REMEMBER|LCT_NOREF);
mdsnum = mdt_md_site(mdt)->ms_node_id;
while (1) {
l_wait_event(thread->t_ctl_waitq,
- thread->t_flags & (SVC_STOPPING | SVC_EVENT),
+ thread_is_stopping(thread) ||
+ thread_is_event(thread),
&lwi);
- if (thread->t_flags & SVC_STOPPING)
+ if (thread_is_stopping(thread))
break;
- thread->t_flags &= ~SVC_EVENT;
+ thread_clear_flags(thread, SVC_EVENT);
if (cfs_time_before(cfs_time_current(), mdt->mdt_ck_expiry))
break;
}
lu_env_fini(&env);
- thread->t_flags = SVC_STOPPED;
+ thread_set_flags(thread, SVC_STOPPED);
cfs_waitq_signal(&thread->t_ctl_waitq);
RETURN(0);
}
return rc;
}
- l_wait_condition(thread->t_ctl_waitq, thread->t_flags & SVC_RUNNING);
+ l_wait_condition(thread->t_ctl_waitq, thread_is_running(thread));
return 0;
}
{
struct ptlrpc_thread *thread = &mdt->mdt_ck_thread;
- if (!(thread->t_flags & SVC_RUNNING))
+ if (!thread_is_running(thread))
return;
- thread->t_flags = SVC_STOPPING;
+ thread_set_flags(thread, SVC_STOPPING);
cfs_waitq_signal(&thread->t_ctl_waitq);
- l_wait_condition(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
+ l_wait_condition(thread->t_ctl_waitq, thread_is_stopped(thread));
}
cfs_daemonize(data->name);
/* Record that the thread is running */
- thread->t_flags = SVC_RUNNING;
+ thread_set_flags(thread, SVC_RUNNING);
cfs_waitq_signal(&thread->t_ctl_waitq);
/* And now, loop forever, pinging as needed. */
next ping time to next_ping + .01 sec, which means
we will SKIP the next ping at next_ping, and the
ping will get sent 2 timeouts from now! Beware. */
- CDEBUG(D_INFO, "next wakeup in "CFS_DURATION_T" ("CFS_TIME_T")\n",
- time_to_next_wake,
- cfs_time_add(this_ping, cfs_time_seconds(PING_INTERVAL)));
+ CDEBUG(D_INFO, "next wakeup in "CFS_DURATION_T" ("
+ CFS_TIME_T")\n", time_to_next_wake,
+ cfs_time_add(this_ping,cfs_time_seconds(PING_INTERVAL)));
if (time_to_next_wake > 0) {
- lwi = LWI_TIMEOUT(max_t(cfs_duration_t, time_to_next_wake, cfs_time_seconds(1)),
- NULL, NULL);
+ lwi = LWI_TIMEOUT(max_t(cfs_duration_t,
+ time_to_next_wake,
+ cfs_time_seconds(1)),
+ NULL, NULL);
l_wait_event(thread->t_ctl_waitq,
- thread->t_flags & (SVC_STOPPING|SVC_EVENT),
+ thread_is_stopping(thread) ||
+ thread_is_event(thread),
&lwi);
- if (thread->t_flags & SVC_STOPPING) {
- thread->t_flags &= ~SVC_STOPPING;
+ if (thread_test_and_clear_flags(thread, SVC_STOPPING)) {
EXIT;
break;
- } else if (thread->t_flags & SVC_EVENT) {
+ } else {
/* woken after adding import to reset timer */
- thread->t_flags &= ~SVC_EVENT;
+ thread_test_and_clear_flags(thread, SVC_EVENT);
}
}
}
- thread->t_flags = SVC_STOPPED;
+ thread_set_flags(thread, SVC_STOPPED);
cfs_waitq_signal(&thread->t_ctl_waitq);
CDEBUG(D_NET, "pinger thread exiting, process %d\n", cfs_curproc_pid());
RETURN(rc);
}
l_wait_event(pinger_thread->t_ctl_waitq,
- pinger_thread->t_flags & SVC_RUNNING, &lwi);
+ thread_is_running(pinger_thread), &lwi);
RETURN(0);
}
ptlrpc_pinger_remove_timeouts();
cfs_mutex_down(&pinger_sem);
- pinger_thread->t_flags = SVC_STOPPING;
+ thread_set_flags(pinger_thread, SVC_STOPPING);
cfs_waitq_signal(&pinger_thread->t_ctl_waitq);
cfs_mutex_up(&pinger_sem);
l_wait_event(pinger_thread->t_ctl_waitq,
- (pinger_thread->t_flags & SVC_STOPPED), &lwi);
+ thread_is_stopped(pinger_thread), &lwi);
OBD_FREE_PTR(pinger_thread);
pinger_thread = NULL;
void ptlrpc_pinger_wake_up()
{
#ifdef ENABLE_PINGER
- pinger_thread->t_flags |= SVC_EVENT;
+ thread_add_flags(pinger_thread, SVC_EVENT);
cfs_waitq_signal(&pinger_thread->t_ctl_waitq);
#endif
}
* Use is subject to license terms.
*/
/*
+ * Copyright (c) 2011 Whamcloud, Inc.
+ */
+/*
* This file is part of Lustre, http://www.lustre.org/
* Lustre is a trademark of Sun Microsystems, Inc.
*
cfs_list_add(&ctx->cc_gc_chain, &sec_gc_ctx_list);
cfs_spin_unlock(&sec_gc_ctx_list_lock);
- sec_gc_thread.t_flags |= SVC_SIGNAL;
+ thread_add_flags(&sec_gc_thread, SVC_SIGNAL);
cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
}
EXPORT_SYMBOL(sptlrpc_gc_add_ctx);
cfs_daemonize_ctxt("sptlrpc_gc");
/* Record that the thread is running */
- thread->t_flags = SVC_RUNNING;
+ thread_set_flags(thread, SVC_RUNNING);
cfs_waitq_signal(&thread->t_ctl_waitq);
while (1) {
struct ptlrpc_sec *sec;
- thread->t_flags &= ~SVC_SIGNAL;
+ thread_clear_flags(thread, SVC_SIGNAL);
sec_process_ctx_list();
again:
/* go through sec list do gc.
lwi = LWI_TIMEOUT(SEC_GC_INTERVAL * CFS_HZ, NULL, NULL);
l_wait_event(thread->t_ctl_waitq,
- thread->t_flags & (SVC_STOPPING | SVC_SIGNAL),
+ thread_is_stopping(thread) ||
+ thread_is_signal(thread),
&lwi);
- if (thread->t_flags & SVC_STOPPING) {
- thread->t_flags &= ~SVC_STOPPING;
+ if (thread_test_and_clear_flags(thread, SVC_STOPPING))
break;
- }
}
- thread->t_flags = SVC_STOPPED;
+ thread_set_flags(thread, SVC_STOPPED);
cfs_waitq_signal(&thread->t_ctl_waitq);
return 0;
}
}
l_wait_event(sec_gc_thread.t_ctl_waitq,
- sec_gc_thread.t_flags & SVC_RUNNING, &lwi);
+ thread_is_running(&sec_gc_thread), &lwi);
return 0;
}
{
struct l_wait_info lwi = { 0 };
- sec_gc_thread.t_flags = SVC_STOPPING;
+ thread_set_flags(&sec_gc_thread, SVC_STOPPING);
cfs_waitq_signal(&sec_gc_thread.t_ctl_waitq);
l_wait_event(sec_gc_thread.t_ctl_waitq,
- sec_gc_thread.t_flags & SVC_STOPPED, &lwi);
+ thread_is_stopped(&sec_gc_thread), &lwi);
}
#else /* !__KERNEL__ */
static inline int
ptlrpc_thread_stopping(struct ptlrpc_thread *thread)
{
- return (thread->t_flags & SVC_STOPPING) != 0 ||
- thread->t_svc->srv_is_stopping;
+ return thread_is_stopping(thread) ||
+ thread->t_svc->srv_is_stopping;
}
static inline int
cfs_spin_lock(&svc->srv_lock);
- LASSERT((thread->t_flags & SVC_STARTING) != 0);
- thread->t_flags &= ~SVC_STARTING;
+ LASSERT(thread_is_starting(thread));
+ thread_clear_flags(thread, SVC_STARTING);
svc->srv_threads_starting--;
/* SVC_STOPPING may already be set here if someone else is trying
* to stop the service while this new thread has been dynamically
* forked. We still set SVC_RUNNING to let our creator know that
* we are now running, however we will exit as soon as possible */
- thread->t_flags |= SVC_RUNNING;
+ thread_add_flags(thread, SVC_RUNNING);
svc->srv_threads_running++;
cfs_spin_unlock(&svc->srv_lock);
thread, thread->t_pid, thread->t_id, rc);
cfs_spin_lock(&svc->srv_lock);
- if ((thread->t_flags & SVC_STARTING) != 0) {
+ if (thread_test_and_clear_flags(thread, SVC_STARTING))
svc->srv_threads_starting--;
- thread->t_flags &= ~SVC_STARTING;
- }
- if ((thread->t_flags & SVC_RUNNING) != 0) {
+ if (thread_test_and_clear_flags(thread, SVC_RUNNING))
/* must know immediately */
svc->srv_threads_running--;
- thread->t_flags &= ~SVC_RUNNING;
- }
thread->t_id = rc;
- thread->t_flags |= SVC_STOPPED;
+ thread_add_flags(thread, SVC_STOPPED);
cfs_waitq_signal(&thread->t_ctl_waitq);
cfs_spin_unlock(&svc->srv_lock);
cfs_spin_lock(&svc->srv_lock);
/* let the thread know that we would like it to stop asap */
- thread->t_flags |= SVC_STOPPING;
+ thread_add_flags(thread, SVC_STOPPING);
cfs_spin_unlock(&svc->srv_lock);
cfs_waitq_broadcast(&svc->srv_waitq);
l_wait_event(thread->t_ctl_waitq,
- (thread->t_flags & SVC_STOPPED), &lwi);
+ thread_is_stopped(thread), &lwi);
cfs_spin_lock(&svc->srv_lock);
cfs_list_del(&thread->t_link);
svc->srv_threads_starting++;
thread->t_id = svc->srv_threads_next_id++;
- thread->t_flags |= SVC_STARTING;
+ thread_add_flags(thread, SVC_STARTING);
thread->t_svc = svc;
cfs_list_add(&thread->t_link, &svc->srv_threads);
RETURN(rc);
}
l_wait_event(thread->t_ctl_waitq,
- thread->t_flags & (SVC_RUNNING | SVC_STOPPED), &lwi);
+ thread_is_running(thread) || thread_is_stopped(thread),
+ &lwi);
- rc = (thread->t_flags & SVC_STOPPED) ? thread->t_id : 0;
+ rc = thread_is_stopped(thread) ? thread->t_id : 0;
RETURN(rc);
}