OBD_NOTIFY_ACTIVE,
/* Device deactivated */
OBD_NOTIFY_INACTIVE,
- /* Device disconnected */
- OBD_NOTIFY_DISCON,
/* Connect data for import were changed */
OBD_NOTIFY_OCD,
/* Sync request */
ldlm_lock_remove_from_lru(lock);
class_handle_unhash(&lock->l_handle);
-#if 0
- /* Wake anyone waiting for this lock */
- /* FIXME: I should probably add yet another flag, instead of using
- * l_export to only call this on clients */
- if (lock->l_export)
- class_export_put(lock->l_export);
- lock->l_export = NULL;
- if (lock->l_export && lock->l_completion_ast)
- lock->l_completion_ast(lock, 0);
-#endif
EXIT;
return 1;
}
GOTO(out, rc = -EPROTO);
}
-#if 0
- /* FIXME this makes it impossible to use LDLM_PLAIN locks -- check
- against server's _CONNECT_SUPPORTED flags? (I don't want to use
- ibits for mgc/mgs) */
-
- /* INODEBITS_INTEROP: Perform conversion from plain lock to
- * inodebits lock if client does not support them. */
- if (!(exp_connect_flags(req->rq_export) & OBD_CONNECT_IBITS) &&
- (dlm_req->lock_desc.l_resource.lr_type == LDLM_PLAIN)) {
- dlm_req->lock_desc.l_resource.lr_type = LDLM_IBITS;
- dlm_req->lock_desc.l_policy_data.l_inodebits.bits =
- MDS_INODELOCK_LOOKUP | MDS_INODELOCK_UPDATE;
- if (dlm_req->lock_desc.l_req_mode == LCK_PR)
- dlm_req->lock_desc.l_req_mode = LCK_CR;
- }
-#endif
-
if (unlikely((flags & LDLM_FL_REPLAY) ||
(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))) {
/* Find an existing lock in the per-export lock hash */
*/
obd->obd_self_export->exp_connect_data = *conn_data;
}
-#if 0
- else if (ev == OBD_NOTIFY_DISCON) {
- /*
- * For disconnect event, flush fld cache for failout MDS case.
- */
- fld_client_flush(&lmv->lmv_fld);
- }
-#endif
+
/*
* Pass the notification up the chain.
*/
}
static int mdc_import_event(struct obd_device *obd, struct obd_import *imp,
- enum obd_import_event event)
+ enum obd_import_event event)
{
- int rc = 0;
+ int rc = 0;
- LASSERT(imp->imp_obd == obd);
+ LASSERT(imp->imp_obd == obd);
- switch (event) {
- case IMP_EVENT_DISCON: {
-#if 0
- /* XXX Pass event up to OBDs stack. used only for FLD now */
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_DISCON, NULL);
-#endif
- break;
- }
- case IMP_EVENT_INACTIVE: {
- struct client_obd *cli = &obd->u.cli;
- /*
- * Flush current sequence to make client obtain new one
- * from server in case of disconnect/reconnect.
- */
- if (cli->cl_seq != NULL)
- seq_client_flush(cli->cl_seq);
+ switch (event) {
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
- break;
- }
- case IMP_EVENT_INVALIDATE: {
- struct ldlm_namespace *ns = obd->obd_namespace;
+ case IMP_EVENT_INACTIVE: {
+ struct client_obd *cli = &obd->u.cli;
+ /*
+ * Flush current sequence to make client obtain new one
+ * from server in case of disconnect/reconnect.
+ */
+ if (cli->cl_seq != NULL)
+ seq_client_flush(cli->cl_seq);
- ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
+ rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE, NULL);
+ break;
+ }
+ case IMP_EVENT_INVALIDATE: {
+ struct ldlm_namespace *ns = obd->obd_namespace;
- break;
- }
+ ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
+
+ break;
+ }
case IMP_EVENT_ACTIVE:
rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE, NULL);
/* redo the kuc registration after reconnecting */
if (rc == 0)
rc = mdc_kuc_reregister(imp);
break;
- case IMP_EVENT_OCD:
- rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
- break;
- case IMP_EVENT_DEACTIVATE:
- case IMP_EVENT_ACTIVATE:
- break;
- default:
- CERROR("Unknown import event %x\n", event);
- LBUG();
- }
- RETURN(rc);
+ case IMP_EVENT_OCD:
+ rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD, NULL);
+ break;
+ case IMP_EVENT_DISCON:
+ case IMP_EVENT_DEACTIVATE:
+ case IMP_EVENT_ACTIVATE:
+ break;
+ default:
+ CERROR("Unknown import event %x\n", event);
+ LBUG();
+ }
+ RETURN(rc);
}
int mdc_fid_alloc(const struct lu_env *env, struct obd_export *exp,
RETURN(-EPERM);
}
-#if 0
- /*
- * Now, flag -- O_NOATIME does not be packed by client.
- */
- if (flag & O_NOATIME) {
- struct lu_ucred *uc = lu_ucred(env);
-
- if (uc && ((uc->uc_valid == UCRED_OLD) ||
- (uc->uc_valid == UCRED_NEW)) &&
- (uc->uc_fsuid != attr->la_uid) &&
- !md_capable(uc, CFS_CAP_FOWNER))
- RETURN(-EPERM);
- }
-#endif
-
RETURN(0);
}
rc = 1;
} else {
/* Index is correctly marked as used */
-
- /* If the logs don't contain the mti_nids then add
- them as failover nids */
- rc = mgs_check_failnid(env, mgs, mti);
+ rc = 0;
}
RETURN(rc);
struct mgs_device *mgs, struct fs_db *fsdb);
int mgs_check_index(const struct lu_env *env, struct mgs_device *mgs,
struct mgs_target_info *mti);
-int mgs_check_failnid(const struct lu_env *env, struct mgs_device *mgs,
- struct mgs_target_info *mti);
int mgs_write_log_target(const struct lu_env *env, struct mgs_device *mgs,
struct mgs_target_info *mti, struct fs_db *fsdb);
int mgs_replace_nids(const struct lu_env *env, struct mgs_device *mgs,
RETURN(rc ?: rc2);
}
-/* Not implementing automatic failover nid addition at this time. */
-int mgs_check_failnid(const struct lu_env *env, struct mgs_device *mgs,
- struct mgs_target_info *mti)
-{
-#if 0
- struct fs_db *fsdb;
- int rc;
- ENTRY;
-
- rc = mgs_find_or_make_fsdb(obd, fsname, &fsdb);
- if (rc)
- RETURN(rc);
-
- if (mgs_log_is_empty(obd, mti->mti_svname))
- /* should never happen */
- RETURN(-ENOENT);
-
- CDEBUG(D_MGS, "Checking for new failnids for %s\n", mti->mti_svname);
-
- /* FIXME We can just check mti->params to see if we're already in
- the failover list. Modify mti->params for rewriting back at
- server_register_target(). */
-
- mutex_lock(&fsdb->fsdb_mutex);
- rc = mgs_write_log_add_failnid(obd, fsdb, mti);
- mutex_unlock(&fsdb->fsdb_mutex);
- char *buf, *params;
- int rc = -EINVAL;
-
- RETURN(rc);
-#endif
- return 0;
-}
-
int mgs_write_log_target(const struct lu_env *env, struct mgs_device *mgs,
struct mgs_target_info *mti, struct fs_db *fsdb)
{
* Page state private for osc layer.
*/
struct osc_page {
- struct cl_page_slice ops_cl;
- /**
- * Page queues used by osc to detect when RPC can be formed.
- */
- struct osc_async_page ops_oap;
- /**
- * An offset within page from which next transfer starts. This is used
- * by cl_page_clip() to submit partial page transfers.
- */
- int ops_from;
- /**
- * An offset within page at which next transfer ends.
- *
- * \see osc_page::ops_from.
- */
- int ops_to;
- /**
- * Boolean, true iff page is under transfer. Used for sanity checking.
- */
- unsigned ops_transfer_pinned:1,
- /**
- * True for a `temporary page' created by read-ahead code, probably
- * outside of any DLM lock.
- */
- ops_temp:1,
- /**
+ struct cl_page_slice ops_cl;
+ /**
+ * Page queues used by osc to detect when RPC can be formed.
+ */
+ struct osc_async_page ops_oap;
+ /**
+ * An offset within page from which next transfer starts. This is used
+ * by cl_page_clip() to submit partial page transfers.
+ */
+ int ops_from;
+ /**
+ * An offset within page at which next transfer ends.
+ *
+ * \see osc_page::ops_from.
+ */
+ int ops_to;
+ /**
+ * Boolean, true iff page is under transfer. Used for sanity checking.
+ */
+ unsigned ops_transfer_pinned:1,
+ /**
* in LRU?
*/
ops_in_lru:1,
*/
/*
- * Comment out osc_page_protected because it may sleep inside the
- * the client_obd_list_lock.
- * client_obd_list_lock -> osc_ap_completion -> osc_completion ->
- * -> osc_page_protected -> osc_page_is_dlocked -> osc_match_base
- * -> ldlm_lock_match -> sptlrpc_import_check_ctx -> sleep.
- */
-#if 0
-static int osc_page_is_dlocked(const struct lu_env *env,
- const struct osc_page *opg,
- enum cl_lock_mode mode, int pending, int unref)
-{
- struct cl_page *page;
- struct osc_object *obj;
- struct osc_thread_info *info;
- struct ldlm_res_id *resname;
- struct lustre_handle *lockh;
- union ldlm_policy_data *policy;
- enum ldlm_mode dlmmode;
- __u64 flags;
-
- might_sleep();
-
- info = osc_env_info(env);
- resname = &info->oti_resname;
- policy = &info->oti_policy;
- lockh = &info->oti_handle;
- page = opg->ops_cl.cpl_page;
- obj = cl2osc(opg->ops_cl.cpl_obj);
-
- flags = LDLM_FL_TEST_LOCK | LDLM_FL_BLOCK_GRANTED;
- if (pending)
- flags |= LDLM_FL_CBPENDING;
-
- dlmmode = osc_cl_lock2ldlm(mode) | LCK_PW;
- osc_lock_build_res(env, obj, resname);
- osc_index2policy(policy, page->cp_obj, osc_index(opg), osc_index(opg));
- return osc_match_base(osc_export(obj), resname, LDLM_EXTENT, policy,
- dlmmode, &flags, NULL, lockh, unref);
-}
-
-/**
- * Checks an invariant that a page in the cache is covered by a lock, as
- * needed.
- */
-static int osc_page_protected(const struct lu_env *env,
- const struct osc_page *opg,
- enum cl_lock_mode mode, int unref)
-{
- struct cl_object_header *hdr;
- struct cl_lock *scan;
- struct cl_page *page;
- struct cl_lock_descr *descr;
- int result;
-
- LINVRNT(!opg->ops_temp);
-
- page = opg->ops_cl.cpl_page;
- if (page->cp_owner != NULL &&
- cl_io_top(page->cp_owner)->ci_lockreq == CILR_NEVER)
- /*
- * If IO is done without locks (liblustre, or lloop), lock is
- * not required.
- */
- result = 1;
- else
- /* otherwise check for a DLM lock */
- result = osc_page_is_dlocked(env, opg, mode, 1, unref);
- if (result == 0) {
- /* maybe this page is a part of a lockless io? */
- hdr = cl_object_header(opg->ops_cl.cpl_obj);
- descr = &osc_env_info(env)->oti_descr;
- descr->cld_mode = mode;
- descr->cld_start = osc_index(opg);
- descr->cld_end = osc_index(opg);
- spin_lock(&hdr->coh_lock_guard);
- list_for_each_entry(scan, &hdr->coh_locks, cll_linkage) {
- /*
- * Lock-less sub-lock has to be either in HELD state
- * (when io is actively going on), or in CACHED state,
- * when top-lock is being unlocked:
- * cl_io_unlock()->cl_unuse()->...->lov_lock_unuse().
- */
- if ((scan->cll_state == CLS_HELD ||
- scan->cll_state == CLS_CACHED) &&
- cl_lock_ext_match(&scan->cll_descr, descr)) {
- struct osc_lock *olck;
-
- olck = osc_lock_at(scan);
- result = osc_lock_is_lockless(olck);
- break;
- }
- }
- spin_unlock(&hdr->coh_lock_guard);
- }
- return result;
-}
-#else
-static int osc_page_protected(const struct lu_env *env,
- const struct osc_page *opg,
- enum cl_lock_mode mode, int unref)
-{
- return 1;
-}
-#endif
-
-/*****************************************************************************
- *
* Page operations.
- *
*/
static void osc_page_transfer_get(struct osc_page *opg, const char *label)
{
int result;
ENTRY;
- LINVRNT(osc_page_protected(env, opg, CLM_WRITE, 0));
-
osc_page_transfer_get(opg, "transfer\0cache");
result = osc_queue_async_io(env, io, opg);
if (result != 0)
}
static void osc_page_delete(const struct lu_env *env,
- const struct cl_page_slice *slice)
+ const struct cl_page_slice *slice)
{
struct osc_page *opg = cl2osc_page(slice);
struct osc_object *obj = cl2osc(opg->ops_cl.cpl_obj);
- int rc;
-
- LINVRNT(opg->ops_temp || osc_page_protected(env, opg, CLM_READ, 1));
+ int rc;
- ENTRY;
- CDEBUG(D_TRACE, "%p\n", opg);
- osc_page_transfer_put(env, opg);
+ ENTRY;
+ CDEBUG(D_TRACE, "%p\n", opg);
+ osc_page_transfer_put(env, opg);
rc = osc_teardown_async_page(env, obj, opg);
if (rc) {
CL_PAGE_DEBUG(D_ERROR, env, slice->cpl_page,
const struct cl_page_slice *slice,
int from, int to)
{
- struct osc_page *opg = cl2osc_page(slice);
- struct osc_async_page *oap = &opg->ops_oap;
-
- LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
+ struct osc_page *opg = cl2osc_page(slice);
+ struct osc_async_page *oap = &opg->ops_oap;
- opg->ops_from = from;
- opg->ops_to = to;
+ opg->ops_from = from;
+ opg->ops_to = to;
spin_lock(&oap->oap_lock);
oap->oap_async_flags |= ASYNC_COUNT_STABLE;
spin_unlock(&oap->oap_lock);
}
static int osc_page_cancel(const struct lu_env *env,
- const struct cl_page_slice *slice)
+ const struct cl_page_slice *slice)
{
struct osc_page *opg = cl2osc_page(slice);
- int rc = 0;
-
- LINVRNT(osc_page_protected(env, opg, CLM_READ, 0));
+ int rc = 0;
- /* Check if the transferring against this page
- * is completed, or not even queued. */
- if (opg->ops_transfer_pinned)
- /* FIXME: may not be interrupted.. */
+ /* Check if the transferring against this page
+ * is completed, or not even queued. */
+ if (opg->ops_transfer_pinned)
+ /* FIXME: may not be interrupted.. */
rc = osc_cancel_async_page(env, opg);
- LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
- return rc;
+ LASSERT(ergo(rc == 0, opg->ops_transfer_pinned == 0));
+ return rc;
}
static int osc_page_flush(const struct lu_env *env,
cl_page_slice_add(page, &opg->ops_cl, obj, index,
&osc_page_ops);
}
- /*
- * Cannot assert osc_page_protected() here as read-ahead
- * creates temporary pages outside of a lock.
- */
-#ifdef CONFIG_LUSTRE_DEBUG_EXPENSIVE_CHECK
- opg->ops_temp = !osc_page_protected(env, opg, CLM_READ, 1);
-#endif
INIT_LIST_HEAD(&opg->ops_lru);
/* reserve an LRU space for this page */
{
struct osc_async_page *oap = &opg->ops_oap;
- LINVRNT(osc_page_protected(env, opg,
- crt == CRT_WRITE ? CLM_WRITE : CLM_READ, 1));
-
LASSERTF(oap->oap_magic == OAP_MAGIC, "Bad oap magic: oap %p, "
"magic 0x%x\n", oap, oap->oap_magic);
LASSERT(oap->oap_async_flags & ASYNC_READY);
if (bits == 0)
return 0;
- if (bits & LA_ATIME)
- inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
- if (bits & LA_CTIME)
- inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
- if (bits & LA_MTIME)
- inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
- if (bits & LA_SIZE) {
- LDISKFS_I(inode)->i_disksize = attr->la_size;
- i_size_write(inode, attr->la_size);
- }
-
-#if 0
- /* OSD should not change "i_blocks" which is used by quota.
- * "i_blocks" should be changed by ldiskfs only. */
- if (bits & LA_BLOCKS)
- inode->i_blocks = attr->la_blocks;
-#endif
+ if (bits & LA_ATIME)
+ inode->i_atime = *osd_inode_time(env, inode, attr->la_atime);
+ if (bits & LA_CTIME)
+ inode->i_ctime = *osd_inode_time(env, inode, attr->la_ctime);
+ if (bits & LA_MTIME)
+ inode->i_mtime = *osd_inode_time(env, inode, attr->la_mtime);
+ if (bits & LA_SIZE) {
+ LDISKFS_I(inode)->i_disksize = attr->la_size;
+ i_size_write(inode, attr->la_size);
+ }
+
+ /* OSD should not change "i_blocks" which is used by quota.
+ * "i_blocks" should be changed by ldiskfs only. */
if (bits & LA_MODE)
inode->i_mode = (inode->i_mode & S_IFMT) |
(attr->la_mode & ~S_IFMT);
if (bits & LA_RDEV)
inode->i_rdev = attr->la_rdev;
- if (bits & LA_FLAGS) {
- /* always keep S_NOCMTIME */
- inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
- S_NOCMTIME;
- }
- return 0;
+ if (bits & LA_FLAGS) {
+ /* always keep S_NOCMTIME */
+ inode->i_flags = ll_ext_to_inode_flags(attr->la_flags) |
+ S_NOCMTIME;
+ }
+ return 0;
}
static int osd_quota_transfer(struct inode *inode, const struct lu_attr *attr)
}
#if LDISKFS_INVARIANT_ON
-static int iam_leaf_check(struct iam_leaf *leaf);
extern int dx_node_check(struct iam_path *p, struct iam_frame *f);
static int iam_path_check(struct iam_path *p)
{
- int i;
- int result;
- struct iam_frame *f;
- struct iam_descr *param;
+ int i;
+ int result;
+ struct iam_frame *f;
+ struct iam_descr *param;
+
+ result = 1;
+ param = iam_path_descr(p);
+ for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
+ f = &p->ip_frames[i];
+ if (f->bh != NULL) {
+ result = dx_node_check(p, f);
+ if (result)
+ result = !param->id_ops->id_node_check(p, f);
+ }
+ }
+ if (result && p->ip_leaf.il_bh != NULL)
+ result = 1;
+ if (result == 0)
+ ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
- result = 1;
- param = iam_path_descr(p);
- for (i = 0; result && i < ARRAY_SIZE(p->ip_frames); ++i) {
- f = &p->ip_frames[i];
- if (f->bh != NULL) {
- result = dx_node_check(p, f);
- if (result)
- result = !param->id_ops->id_node_check(p, f);
- }
- }
- if (result && p->ip_leaf.il_bh != NULL)
- result = iam_leaf_check(&p->ip_leaf);
- if (result == 0) {
- ldiskfs_std_error(iam_path_obj(p)->i_sb, result);
- }
- return result;
+ return result;
}
#endif
leaf->il_bh = bh;
leaf->il_curidx = block;
err = iam_leaf_ops(leaf)->init(leaf);
- assert_inv(ergo(err == 0, iam_leaf_check(leaf)));
}
return err;
}
{
if (leaf->il_path != NULL) {
iam_leaf_unlock(leaf);
- assert_inv(ergo(leaf->il_bh != NULL, iam_leaf_check(leaf)));
iam_leaf_ops(leaf)->fini(leaf);
if (leaf->il_bh) {
brelse(leaf->il_bh);
return iam_leaf_ops(l)->can_add(l, k, r);
}
-#if LDISKFS_INVARIANT_ON
-static int iam_leaf_check(struct iam_leaf *leaf)
-{
- return 1;
-#if 0
- struct iam_lentry *orig;
- struct iam_path *path;
- struct iam_container *bag;
- struct iam_ikey *k0;
- struct iam_ikey *k1;
- int result;
- int first;
-
- orig = leaf->il_at;
- path = iam_leaf_path(leaf);
- bag = iam_leaf_container(leaf);
-
- result = iam_leaf_ops(leaf)->init(leaf);
- if (result != 0)
- return result;
-
- first = 1;
- iam_leaf_start(leaf);
- k0 = iam_path_ikey(path, 0);
- k1 = iam_path_ikey(path, 1);
- while (!iam_leaf_at_end(leaf)) {
- iam_ikeycpy(bag, k0, k1);
- iam_ikeycpy(bag, k1, iam_leaf_ikey(leaf, k1));
- if (!first && iam_ikeycmp(bag, k0, k1) > 0) {
- return 0;
- }
- first = 0;
- iam_leaf_next(leaf);
- }
- leaf->il_at = orig;
- return 1;
-#endif
-}
-#endif
-
static int iam_txn_dirty(handle_t *handle,
struct iam_path *path, struct buffer_head *bh)
{
do_corr(schedule());
if (result == 0) {
result = iam_leaf_load(path);
- assert_inv(ergo(result == 0, iam_leaf_check(leaf)));
if (result == 0) {
do_corr(schedule());
if (index)
struct inode *obj;
struct iam_path *path;
- assert_inv(iam_leaf_check(leaf));
-
c = iam_leaf_container(leaf);
path = leaf->il_path;
err = -ENOMEM;
brelse(new_leaf);
}
- assert_inv(iam_leaf_check(leaf));
- assert_inv(iam_leaf_check(&iam_leaf_path(leaf)->ip_leaf));
assert_inv(iam_path_check(iam_leaf_path(leaf)));
return err;
}
struct iam_leaf *leaf;
leaf = &path->ip_leaf;
- assert_inv(iam_leaf_check(leaf));
assert_inv(iam_path_check(path));
err = iam_txn_add(handle, path, leaf->il_bh);
if (err == 0) {
err = iam_txn_dirty(handle, path, leaf->il_bh);
}
}
- assert_inv(iam_leaf_check(leaf));
- assert_inv(iam_leaf_check(&path->ip_leaf));
assert_inv(iam_path_check(path));
return err;
}
path = &it->ii_path;
leaf = &path->ip_leaf;
- assert_inv(iam_leaf_check(leaf));
assert_inv(iam_path_check(path));
result = iam_txn_add(h, path, leaf->il_bh);
}
}
}
- assert_inv(iam_leaf_check(leaf));
assert_inv(iam_path_check(path));
assert_corr(it_state(it) == IAM_IT_ATTACHED ||
it_state(it) == IAM_IT_DETACHED);
printk(" %p %8.8x \"%*.*s\"\n", ent, e_hash(ent),
e_keysize(ent), e_keysize(ent), e_char(ent));
}
-#if 0
-static int e_check(const struct iam_leaf *leaf,
- const struct lvar_leaf_entry *ent)
-{
- const void *point = ent;
- const void *start = leaf->il_bh->b_data;
- return
- start + sizeof(struct lvar_leaf_header) <= point &&
- point + e_size(leaf, ent) < start + blocksize(leaf);
-}
-#endif
static inline struct lvar_leaf_entry *e_next(const struct iam_leaf *leaf,
const struct lvar_leaf_entry *ent)
rc = SECSVC_OK;
out:
- /* it looks like here we should put rsip also, but this mess up
- * with NFS cache mgmt code... FIXME */
-#if 0
- if (rsip)
- rsi_put(&rsip->h, &rsi_cache);
-#endif
-
- if (rsci) {
- /* if anything went wrong, we don't keep the context too */
- if (rc != SECSVC_OK)
+ /* it looks like here we should put rsip also, but this mess up
+ * with NFS cache mgmt code... FIXME
+ * something like:
+ * if (rsip)
+ * rsi_put(&rsip->h, &rsi_cache); */
+
+ if (rsci) {
+ /* if anything went wrong, we don't keep the context too */
+ if (rc != SECSVC_OK)
set_bit(CACHE_NEGATIVE, &rsci->h.flags);
- else
- CDEBUG(D_SEC, "create rsc with idx "LPX64"\n",
- gss_handle_to_u64(&rsci->handle));
+ else
+ CDEBUG(D_SEC, "create rsc with idx "LPX64"\n",
+ gss_handle_to_u64(&rsci->handle));
- COMPAT_RSC_PUT(&rsci->h, &rsc_cache);
- }
- RETURN(rc);
+ COMPAT_RSC_PUT(&rsci->h, &rsc_cache);
+ }
+ RETURN(rc);
}
struct gss_svc_ctx *gss_svc_upcall_get_ctx(struct ptlrpc_request *req,
return ghdr;
}
-#if 0
-static
-void gss_netobj_swabber(netobj_t *obj)
-{
- __swab32s(&obj->len);
-}
-
-netobj_t *gss_swab_netobj(struct lustre_msg *msg, int segment)
-{
- netobj_t *obj;
-
- obj = lustre_swab_buf(msg, segment, sizeof(*obj), gss_netobj_swabber);
- if (obj && sizeof(*obj) + obj->len > msg->lm_buflens[segment]) {
- CERROR("netobj require length %u but only %u received\n",
- (unsigned int) sizeof(*obj) + obj->len,
- msg->lm_buflens[segment]);
- return NULL;
- }
-
- return obj;
-}
-#endif
-
/*
* payload should be obtained from mechanism. but currently since we
* only support kerberos, we could simply use fixed value.
static int enc_pools_should_grow(int page_needed, long now)
{
- /* don't grow if someone else is growing the pools right now,
- * or the pools has reached its full capacity
- */
- if (page_pools.epp_growing ||
- page_pools.epp_total_pages == page_pools.epp_max_pages)
- return 0;
+ /* don't grow if someone else is growing the pools right now,
+ * or the pools has reached its full capacity
+ */
+ if (page_pools.epp_growing ||
+ page_pools.epp_total_pages == page_pools.epp_max_pages)
+ return 0;
- /* if total pages is not enough, we need to grow */
- if (page_pools.epp_total_pages < page_needed)
- return 1;
+ /* if total pages is not enough, we need to grow */
+ if (page_pools.epp_total_pages < page_needed)
+ return 1;
- /*
- * we wanted to return 0 here if there was a shrink just happened
- * moment ago, but this may cause deadlock if both client and ost
- * live on single node.
- */
-#if 0
- if (now - page_pools.epp_last_shrink < 2)
- return 0;
-#endif
+ /*
+ * we wanted to return 0 here if there was a shrink just
+ * happened a moment ago, but this may cause deadlock if both
+ * client and ost live on single node.
+ */
- /*
- * here we perhaps need consider other factors like wait queue
- * length, idle index, etc. ?
- */
+ /*
+ * here we perhaps need consider other factors like wait queue
+ * length, idle index, etc. ?
+ */
- /* grow the pools in any other cases */
- return 1;
+ /* grow the pools in any other cases */
+ return 1;
}
/*
exit(1);
}
-#if 0
- /* We cannot do this any longer, we do not store open special nodes
- * on MDS after unlink */
- if (st1.st_mode != st2.st_mode) { // can we do this?
- fprintf(stderr, "fstat different value on %s and %s\n", dname1, dname2);
- exit(1);
- }
-#endif
-
fprintf(stderr, "Ok, everything goes well.\n");
return 0;
}
#include <lustre_ioctl.h>
struct option longopts[] = {
- {"ea", 0, 0, 'e'},
{"lookup", 0, 0, 'l'},
{"random", 0, 0, 'r'},
{"stat", 0, 0, 's'},
{NULL, 0, 0, 0},
};
-char *shortopts = "ehlr:s0123456789";
+char *shortopts = "hlr:s0123456789";
static int usage(char *prog, FILE *out)
{
fprintf(out,
- "Usage: %s [-r rand_seed] {-s|-e|-l} filenamebase total_files iterations\n"
+ "Usage: %s [-r rand_seed] {-s|-l} filenamebase total_files iterations\n"
"-r : random seed\n"
"-s : regular stat() calls\n"
- "-e : open then GET_EA ioctl\n"
"-l : lookup ioctl only\n", prog);
exit(out == stderr);
}
usage(prog, stderr);
}
break;
- case 'e':
case 'l':
case 's':
mode = rc;
tmp = random() % count;
sprintf(filename, "%s%d", base, tmp);
- if (mode == 'e') {
-#if 0
- fd = open(filename, O_RDWR|O_LARGEFILE);
- if (fd < 0) {
- printf("open(%s) error: %s\n", filename,
- strerror(errno));
- break;
- }
- rc = ioctl(fd, LDISKFS_IOC_GETEA, NULL);
- if (rc < 0) {
- printf("ioctl(%s) error: %s\n", filename,
- strerror(errno));
- break;
- }
- close(fd);
- break;
-#endif
- } else if (mode == 's') {
+ if (mode == 's') {
struct stat buf;
rc = stat(filename, &buf);