return (__u32)seq;
}
-void fld_cache_flush(struct fld_cache_info *cache)
+void fld_cache_flush(struct fld_cache *cache)
{
struct fld_cache_entry *flde;
struct hlist_head *bucket;
EXIT;
}
-struct fld_cache_info *fld_cache_init(int hash_size, int cache_size,
- int cache_threshold)
+struct fld_cache *fld_cache_init(const char *name, int hash_size,
+ int cache_size, int cache_threshold)
{
- struct fld_cache_info *cache;
+ struct fld_cache *cache;
int i;
ENTRY;
- /* check if size is power of two */
+ LASSERT(name != NULL);
LASSERT(IS_PO2(hash_size));
-
LASSERT(cache_threshold < cache_size);
OBD_ALLOC_PTR(cache);
cache->fci_cache_count = 0;
spin_lock_init(&cache->fci_lock);
+
+ strncpy(cache->fci_name, name,
+ sizeof(cache->fci_name));
+
cache->fci_hash_size = hash_size;
cache->fci_cache_size = cache_size;
cache->fci_threshold = cache_threshold;
- /* init fld cache info */
+ /* Init fld cache info. */
cache->fci_hash_mask = hash_size - 1;
OBD_ALLOC(cache->fci_hash_table,
hash_size * sizeof(*cache->fci_hash_table));
for (i = 0; i < hash_size; i++)
INIT_HLIST_HEAD(&cache->fci_hash_table[i]);
- CDEBUG(D_INFO|D_WARNING, "FLD cache - Size: %d, Threshold: %d\n",
- cache_size, cache_threshold);
+ CDEBUG(D_INFO|D_WARNING, "%s: FLD cache - Size: %d, Threshold: %d\n",
+ cache->fci_name, cache_size, cache_threshold);
RETURN(cache);
}
EXPORT_SYMBOL(fld_cache_init);
-void fld_cache_fini(struct fld_cache_info *cache)
+void fld_cache_fini(struct fld_cache *cache)
{
ENTRY;
EXPORT_SYMBOL(fld_cache_fini);
static inline struct hlist_head *
-fld_cache_bucket(struct fld_cache_info *cache, seqno_t seq)
+fld_cache_bucket(struct fld_cache *cache, seqno_t seq)
{
return cache->fci_hash_table + (fld_cache_hash(seq) &
cache->fci_hash_mask);
* collision lists well balanced. That is, checks all of them and removes one
* entry in list and so on.
*/
-static int fld_cache_shrink(struct fld_cache_info *cache)
+static int fld_cache_shrink(struct fld_cache *cache)
{
struct fld_cache_entry *flde;
struct list_head *curr;
num++;
}
- CDEBUG(D_INFO|D_WARNING, "FLD cache - Shrinked by "
- "%d entries\n", num);
+ CDEBUG(D_INFO|D_WARNING, "%s: FLD cache - Shrinked by "
+ "%d entries\n", cache->fci_name, num);
RETURN(0);
}
-int fld_cache_insert(struct fld_cache_info *cache,
+int fld_cache_insert(struct fld_cache *cache,
seqno_t seq, mdsno_t mds)
{
struct fld_cache_entry *flde, *fldt;
spin_lock(&cache->fci_lock);
- /* check if need to shrink cache */
+ /* Check if need to shrink cache. */
rc = fld_cache_shrink(cache);
if (rc) {
spin_unlock(&cache->fci_lock);
RETURN(rc);
}
- /* check if cache already has the entry with such a seq */
+ /* Check if cache already has the entry with such a seq. */
bucket = fld_cache_bucket(cache, seq);
hlist_for_each_entry(fldt, scan, bucket, fce_list) {
if (fldt->fce_seq == seq) {
}
spin_unlock(&cache->fci_lock);
- /* allocate new entry */
+ /* Allocate new entry. */
OBD_ALLOC_PTR(flde);
if (!flde)
RETURN(-ENOMEM);
/*
- * check if cache has the entry with such a seq again. It could be added
+ * Check if cache has the entry with such a seq again. It could be added
* while we were allocating new entry.
*/
spin_lock(&cache->fci_lock);
}
}
- /* add new entry to cache and lru list */
+ /* Add new entry to cache and lru list. */
INIT_HLIST_NODE(&flde->fce_list);
flde->fce_mds = mds;
flde->fce_seq = seq;
}
EXPORT_SYMBOL(fld_cache_insert);
-void fld_cache_delete(struct fld_cache_info *cache, seqno_t seq)
+void fld_cache_delete(struct fld_cache *cache, seqno_t seq)
{
struct fld_cache_entry *flde;
struct hlist_node *scan, *n;
}
EXPORT_SYMBOL(fld_cache_delete);
-int fld_cache_lookup(struct fld_cache_info *cache,
+int fld_cache_lookup(struct fld_cache *cache,
seqno_t seq, mdsno_t *mds)
{
struct fld_cache_entry *flde;
hlist_for_each_entry_safe(flde, scan, n, bucket, fce_list) {
if (flde->fce_seq == seq) {
*mds = flde->fce_mds;
-
- /* Move found entry to the head of lru list. */
list_del(&flde->fce_lru);
list_add(&flde->fce_lru, &cache->fci_lru);
-
spin_unlock(&cache->fci_lock);
RETURN(0);
}
}
EXPORT_SYMBOL(fld_cache_lookup);
#else
-int fld_cache_insert(struct fld_cache_info *cache,
+int fld_cache_insert(struct fld_cache *cache,
seqno_t seq, mdsno_t mds)
{
return -ENOTSUPP;
}
EXPORT_SYMBOL(fld_cache_insert);
-void fld_cache_delete(struct fld_cache_info *cache,
+void fld_cache_delete(struct fld_cache *cache,
seqno_t seq)
{
return;
}
EXPORT_SYMBOL(fld_cache_delete);
-int fld_cache_lookup(struct fld_cache_info *cache,
+int fld_cache_lookup(struct fld_cache *cache,
seqno_t seq, mdsno_t *mds)
{
return -ENOTSUPP;
}
}
-/* insert index entry and update cache */
+/* Insert index entry and update cache. */
int fld_server_create(struct lu_server_fld *fld,
const struct lu_env *env,
seqno_t seq, mdsno_t mds)
{
- return fld_index_create(fld, env, seq, mds);
+ int rc;
+ ENTRY;
+
+ rc = fld_index_create(fld, env, seq, mds);
+
+ if (rc == 0) {
+ /*
+ * Do not return result of calling fld_cache_insert()
+ * here. First of all because it may return -EEXISTS. Another
+ * reason is that, we do not want to stop proceeding even after
+ * cache errors.
+ */
+ fld_cache_insert(fld->lsf_cache, seq, mds);
+ }
+
+ RETURN(rc);
}
EXPORT_SYMBOL(fld_server_create);
-/* delete index entry */
+/* Delete index entry. */
int fld_server_delete(struct lu_server_fld *fld,
const struct lu_env *env,
seqno_t seq)
{
- return fld_index_delete(fld, env, seq);
+ int rc;
+ ENTRY;
+
+ fld_cache_delete(fld->lsf_cache, seq);
+ rc = fld_index_delete(fld, env, seq);
+
+ RETURN(rc);
}
EXPORT_SYMBOL(fld_server_delete);
-/* issue on-disk index lookup */
+/* Lookup mds by seq. */
int fld_server_lookup(struct lu_server_fld *fld,
const struct lu_env *env,
seqno_t seq, mdsno_t *mds)
{
- return fld_index_lookup(fld, env, seq, mds);
+ int rc;
+ ENTRY;
+
+ fld->lsf_stat.fst_count++;
+
+ /* Lookup it in the cache. */
+ rc = fld_cache_lookup(fld->lsf_cache, seq, mds);
+ if (rc == 0) {
+ fld->lsf_stat.fst_cache++;
+ RETURN(0);
+ }
+
+ rc = fld_index_lookup(fld, env, seq, mds);
+ if (rc == 0) {
+ /*
+ * Do not return error here as well. See previous comment in
+ * same situation in function fld_server_create().
+ */
+ fld_cache_insert(fld->lsf_cache, seq, *mds);
+ }
+ RETURN(rc);
}
EXPORT_SYMBOL(fld_server_lookup);
info->fti_flags = lustre_msg_get_flags(req->rq_reqmsg);
- /* mark rep buffer as req-layout stuff expects */
+ /* Mark rep buffer as req-layout stuff expects. */
for (i = 0; i < ARRAY_SIZE(info->fti_rep_buf_size); i++)
info->fti_rep_buf_size[i] = -1;
- /* init request capsule */
+ /* Init request capsule. */
req_capsule_init(&info->fti_pill, req, RCL_SERVER,
info->fti_rep_buf_size);
int fld_server_init(struct lu_server_fld *fld, struct dt_device *dt,
const char *prefix, const struct lu_env *env)
{
+ int cache_size, cache_threshold;
int rc;
ENTRY;
+ memset(&fld->lsf_stat, 0, sizeof(fld->lsf_stat));
snprintf(fld->lsf_name, sizeof(fld->lsf_name),
"srv-%s", prefix);
sema_init(&fld->lsf_sem, 1);
+ cache_size = FLD_SERVER_CACHE_SIZE /
+ sizeof(struct fld_cache_entry);
+
+ cache_threshold = cache_size *
+ FLD_SERVER_CACHE_THRESHOLD / 100;
+
+ fld->lsf_cache = fld_cache_init(fld->lsf_name,
+ FLD_SERVER_HTABLE_SIZE,
+ cache_size, cache_threshold);
+ if (IS_ERR(fld->lsf_cache)) {
+ rc = PTR_ERR(fld->lsf_cache);
+ fld->lsf_cache = NULL;
+ GOTO(out, rc);
+ }
+
rc = fld_index_init(fld, env, dt);
if (rc)
GOTO(out, rc);
void fld_server_fini(struct lu_server_fld *fld,
const struct lu_env *env)
{
+ __u64 pct;
ENTRY;
+ pct = fld->lsf_stat.fst_cache * 100;
+ do_div(pct, fld->lsf_stat.fst_count);
+
+ printk("FLD cache statistics (%s):\n", fld->lsf_name);
+ printk(" Total reqs: "LPU64"\n", fld->lsf_stat.fst_count);
+ printk(" Cache reqs: "LPU64"\n", fld->lsf_stat.fst_cache);
+ printk(" Cache hits: "LPU64"%%\n", pct);
+
fld_server_proc_fini(fld);
fld_index_fini(fld, env);
+ if (fld->lsf_cache != NULL) {
+ if (!IS_ERR(fld->lsf_cache))
+ fld_cache_fini(fld->lsf_cache);
+ fld->lsf_cache = NULL;
+ }
+
EXIT;
}
EXPORT_SYMBOL(fld_server_fini);
};
enum {
- FLD_HTABLE_SIZE = 256
+ /* 4M of FLD cache will not hurt client a lot. */
+ FLD_SERVER_CACHE_SIZE = (4 * 0x100000),
+
+ /* 1M of FLD cache will not hurt client a lot. */
+ FLD_CLIENT_CACHE_SIZE = (1 * 0x100000)
+};
+
+enum {
+ /* Cache threshold is 10 percent of size. */
+ FLD_SERVER_CACHE_THRESHOLD = 10,
+
+ /* Cache threshold is 10 percent of size. */
+ FLD_CLIENT_CACHE_THRESHOLD = 10
+};
+
+enum {
+ /*
+ * One page is used for hashtable. That is sizeof(struct hlist_head) *
+ * 1024.
+ */
+ FLD_CLIENT_HTABLE_SIZE = (1024 * 1),
+
+ /*
+ * Here 4 pages are used for hashtable of server cache. This is is
+ * because cache it self is 4 times bugger.
+ */
+ FLD_SERVER_HTABLE_SIZE = (1024 * 4)
};
extern struct lu_fld_hash fld_hash[];
return (hash >= 0 && hash < ARRAY_SIZE(fld_hash));
}
-/* 1M of FLD cache will not hurt client a lot */
-#define FLD_CACHE_SIZE 1024000
-
-/* cache threshold is 10 percent of size */
-#define FLD_CACHE_THRESHOLD 10
-
int fld_client_init(struct lu_client_fld *fld,
const char *prefix, int hash)
{
LASSERT(fld != NULL);
+ memset(&fld->lcf_stat, 0, sizeof(fld->lcf_stat));
snprintf(fld->lcf_name, sizeof(fld->lcf_name),
"cli-srv-%s", prefix);
INIT_LIST_HEAD(&fld->lcf_targets);
#ifdef __KERNEL__
- cache_size = FLD_CACHE_SIZE /
+ cache_size = FLD_CLIENT_CACHE_SIZE /
sizeof(struct fld_cache_entry);
cache_threshold = cache_size *
- FLD_CACHE_THRESHOLD / 100;
+ FLD_CLIENT_CACHE_THRESHOLD / 100;
- fld->lcf_cache = fld_cache_init(FLD_HTABLE_SIZE,
- cache_size,
- cache_threshold);
+ fld->lcf_cache = fld_cache_init(fld->lcf_name,
+ FLD_CLIENT_HTABLE_SIZE,
+ cache_size, cache_threshold);
if (IS_ERR(fld->lcf_cache)) {
rc = PTR_ERR(fld->lcf_cache);
fld->lcf_cache = NULL;
void fld_client_fini(struct lu_client_fld *fld)
{
struct lu_fld_target *target, *tmp;
+ __u64 pct;
ENTRY;
+ pct = fld->lcf_stat.fst_cache * 100;
+ do_div(pct, fld->lcf_stat.fst_count);
+
+ printk("FLD cache statistics (%s):\n", fld->lcf_name);
+ printk(" Total reqs: "LPU64"\n", fld->lcf_stat.fst_count);
+ printk(" Cache reqs: "LPU64"\n", fld->lcf_stat.fst_cache);
+ printk(" Cache hits: "LPU64"%%\n", pct);
+
fld_client_proc_fini(fld);
spin_lock(&fld->lcf_lock);
* Do not return result of calling fld_cache_insert()
* here. First of all because it may return -EEXISTS. Another
* reason is that, we do not want to stop proceeding because of
- * cache errors. --umka
+ * cache errors.
*/
fld_cache_insert(fld->lcf_cache, seq, mds);
} else {
ENTRY;
down(&fld->lcf_sem);
-
+
+ fld->lcf_stat.fst_count++;
+
/* Lookup it in the cache */
rc = fld_cache_lookup(fld->lcf_cache, seq, mds);
if (rc == 0) {
+ fld->lcf_stat.fst_cache++;
up(&fld->lcf_sem);
RETURN(0);
}
/*
* Do not return error here as well. See previous comment in
- * same situation in function fld_client_create(). --umka
+ * same situation in function fld_client_create().
*/
fld_cache_insert(fld->lcf_cache, seq, *mds);
}
struct lu_client_fld;
struct lu_server_fld;
+struct fld_stats {
+ __u64 fst_count;
+ __u64 fst_cache;
+};
+
/*
* FLD (Fid Location Database) interface.
*/
fld_scan_func_t fh_scan_func;
};
-struct lu_server_fld {
- /* fld dir proc entry */
- cfs_proc_dir_entry_t *lsf_proc_dir;
-
- /* /fld file object device */
- struct dt_object *lsf_obj;
-
- /* Protect index modifications */
- struct semaphore lsf_sem;
-
- /* fld service name in form "fld-MDTXXX" */
- char lsf_name[80];
-};
-
struct fld_cache_entry {
struct hlist_node fce_list;
struct list_head fce_lru;
seqno_t fce_seq;
};
-struct fld_cache_info {
+struct fld_cache {
/*
- * cache guard, protects fci_hash mostly because others immutable after
+ * Cache guard, protects fci_hash mostly because others immutable after
* init is finished.
*/
spinlock_t fci_lock;
- /* cache shrink threshold */
+ /* Cache shrink threshold */
int fci_threshold;
- /* prefered number of cached entries */
+ /* Prefered number of cached entries */
int fci_cache_size;
- /* current number of cached entries. Protected by @fci_lock */
+ /* Current number of cached entries. Protected by @fci_lock */
int fci_cache_count;
- /* hash table size (number of collision lists) */
+ /* Hash table size (number of collision lists) */
int fci_hash_size;
- /* hash table mask */
+ /* Hash table mask */
int fci_hash_mask;
- /* hash table for all collision lists */
+ /* Hash table for all collision lists */
struct hlist_head *fci_hash_table;
- /* lru list */
+ /* Lru list */
struct list_head fci_lru;
+
+ /* Cache name used for debug and messages. */
+ char fci_name[80];
+};
+
+struct lu_server_fld {
+ /* Fld dir proc entry. */
+ cfs_proc_dir_entry_t *lsf_proc_dir;
+
+ /* /fld file object device */
+ struct dt_object *lsf_obj;
+
+ /* Client FLD cache. */
+ struct fld_cache *lsf_cache;
+
+ /* Protect index modifications */
+ struct semaphore lsf_sem;
+
+ /* Server cache statistics. */
+ struct fld_stats lsf_stat;
+
+ /* Fld service name in form "fld-srv-lustre-MDTXXX" */
+ char lsf_name[80];
};
struct lu_client_fld {
- /* client side proc entry */
+ /* Client side proc entry. */
cfs_proc_dir_entry_t *lcf_proc_dir;
- /* list of exports client FLD knows about */
+ /* List of exports client FLD knows about. */
struct list_head lcf_targets;
- /* current hash to be used to chose an export */
+ /* Current hash to be used to chose an export. */
struct lu_fld_hash *lcf_hash;
- /* exports count */
+ /* Exports count. */
int lcf_count;
- /* lock protecting exports list and fld_hash */
+ /* Lock protecting exports list and fld_hash. */
spinlock_t lcf_lock;
- /* protect fld req + cache modification */
+ /* Client cache statistics. */
+ struct fld_stats lcf_stat;
+
+ /* Protect fld req + cache modification. */
struct semaphore lcf_sem;
- /* client FLD cache */
- struct fld_cache_info *lcf_cache;
+ /* Client FLD cache. */
+ struct fld_cache *lcf_cache;
- /* client fld proc entry name */
+ /* Client fld proc entry name. */
char lcf_name[80];
const struct lu_context *lcf_ctx;
__u64 idx);
/* Cache methods */
-struct fld_cache_info *fld_cache_init(int hash_size,
- int cache_size,
- int cache_threshold);
+struct fld_cache *fld_cache_init(const char *name,
+ int hash_size,
+ int cache_size,
+ int cache_threshold);
-void fld_cache_fini(struct fld_cache_info *cache);
+void fld_cache_fini(struct fld_cache *cache);
-void fld_cache_flush(struct fld_cache_info *cache);
+void fld_cache_flush(struct fld_cache *cache);
-int fld_cache_insert(struct fld_cache_info *cache,
+int fld_cache_insert(struct fld_cache *cache,
seqno_t seq, mdsno_t mds);
-void fld_cache_delete(struct fld_cache_info *cache,
+void fld_cache_delete(struct fld_cache *cache,
seqno_t seq);
int
-fld_cache_lookup(struct fld_cache_info *cache,
+fld_cache_lookup(struct fld_cache *cache,
seqno_t seq, mdsno_t *mds);
#endif
};
struct lmv_tgt_desc {
- struct obd_uuid uuid;
- struct obd_export *ltd_exp;
- int active; /* is this target up for requests */
- int idx;
- struct semaphore fid_sem;
+ struct obd_uuid ltd_uuid;
+ struct obd_export *ltd_exp;
+ int ltd_active; /* is this target up for requests */
+ int ltd_idx;
+ struct semaphore ltd_fid_sem;
};
struct lmv_obd {
struct lmv_tgt_desc *tgt,
int activate)
{
- if (tgt->active == activate)
+ if (tgt->ltd_active == activate)
return;
- tgt->active = activate;
+ tgt->ltd_active = activate;
lmv->desc.ld_active_tgt_count += (activate ? 1 : -1);
}
continue;
CDEBUG(D_INFO, "lmv idx %d is %s conn "LPX64"\n",
- i, tgt->uuid.uuid, tgt->ltd_exp->exp_handle.h_cookie);
+ i, tgt->ltd_uuid.uuid, tgt->ltd_exp->exp_handle.h_cookie);
- if (obd_uuid_equals(uuid, &tgt->uuid))
+ if (obd_uuid_equals(uuid, &tgt->ltd_uuid))
break;
}
obd->obd_type->typ_name, i);
LASSERT(strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0);
- if (tgt->active == activate) {
+ if (tgt->ltd_active == activate) {
CDEBUG(D_INFO, "OBD %p already %sactive!\n", obd,
activate ? "" : "in");
GOTO(out_lmv_lock, rc);
if (tgt->ltd_exp == NULL)
continue;
- if (obd_uuid_equals(uuid, &tgt->uuid)) {
- lmv->datas[tgt->idx] = *data;
+ if (obd_uuid_equals(uuid, &tgt->ltd_uuid)) {
+ lmv->datas[tgt->ltd_idx] = *data;
break;
}
}
ENTRY;
/* for MDS: don't connect to yourself */
- if (obd_uuid_equals(&tgt->uuid, cluuid)) {
+ if (obd_uuid_equals(&tgt->ltd_uuid, cluuid)) {
CDEBUG(D_CONFIG, "don't connect back to %s\n", cluuid->uuid);
/* XXX - the old code didn't increment active tgt count.
* should we ? */
RETURN(0);
}
- mdc_obd = class_find_client_obd(&tgt->uuid, LUSTRE_MDC_NAME,
+ mdc_obd = class_find_client_obd(&tgt->ltd_uuid, LUSTRE_MDC_NAME,
&obd->obd_uuid);
if (!mdc_obd) {
- CERROR("target %s not attached\n", tgt->uuid.uuid);
+ CERROR("target %s not attached\n", tgt->ltd_uuid.uuid);
RETURN(-EINVAL);
}
CDEBUG(D_CONFIG, "connect to %s(%s) - %s, %s FOR %s\n",
mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
- tgt->uuid.uuid, obd->obd_uuid.uuid,
+ tgt->ltd_uuid.uuid, obd->obd_uuid.uuid,
cluuid->uuid);
if (!mdc_obd->obd_set_up) {
- CERROR("target %s is not set up\n", tgt->uuid.uuid);
+ CERROR("target %s is not set up\n", tgt->ltd_uuid.uuid);
RETURN(-EINVAL);
}
rc = obd_connect(NULL, &conn, mdc_obd, &lmv_mdc_uuid,
&lmv->conn_data);
if (rc) {
- CERROR("target %s connect error %d\n", tgt->uuid.uuid, rc);
+ CERROR("target %s connect error %d\n", tgt->ltd_uuid.uuid, rc);
RETURN(rc);
}
target.ft_srv = NULL;
target.ft_exp = mdc_exp;
- target.ft_idx = tgt->idx;
+ target.ft_idx = tgt->ltd_idx;
fld_client_add_target(&lmv->lmv_fld, &target);
if (rc) {
obd_disconnect(mdc_exp);
CERROR("target %s register_observer error %d\n",
- tgt->uuid.uuid, rc);
+ tgt->ltd_uuid.uuid, rc);
RETURN(rc);
}
}
}
- tgt->active = 1;
+ tgt->ltd_active = 1;
tgt->ltd_exp = mdc_exp;
lmv->desc.ld_active_tgt_count++;
/* copy connect data, it may be used later */
- lmv->datas[tgt->idx] = *mdc_data;
+ lmv->datas[tgt->ltd_idx] = *mdc_data;
md_init_ea_size(tgt->ltd_exp, lmv->max_easize,
lmv->max_def_easize, lmv->max_cookiesize);
}
spin_lock(&lmv->lmv_lock);
tgt = lmv->tgts + lmv->desc.ld_tgt_count++;
- tgt->uuid = *tgt_uuid;
+ tgt->ltd_uuid = *tgt_uuid;
spin_unlock(&lmv->lmv_lock);
if (lmv->connected) {
while (i-- > 0) {
int rc2;
--tgt;
- tgt->active = 0;
+ tgt->ltd_active = 0;
if (tgt->ltd_exp) {
--lmv->desc.ld_active_tgt_count;
rc2 = obd_disconnect(tgt->ltd_exp);
if (rc2) {
CERROR("error: LMV target %s disconnect on "
"MDC idx %d: error %d\n",
- tgt->uuid.uuid, i, rc2);
+ tgt->ltd_uuid.uuid, i, rc2);
}
}
}
obd_register_observer(lmv->tgts[i].ltd_exp->exp_obd, NULL);
rc = obd_disconnect(lmv->tgts[i].ltd_exp);
if (rc) {
- if (lmv->tgts[i].active) {
+ if (lmv->tgts[i].ltd_active) {
CERROR("Target %s disconnect error %d\n",
- lmv->tgts[i].uuid.uuid, rc);
+ lmv->tgts[i].ltd_uuid.uuid, rc);
}
rc = 0;
}
err = obd_iocontrol(cmd, lmv->tgts[i].ltd_exp, len, karg, uarg);
if (err) {
- if (lmv->tgts[i].active) {
+ if (lmv->tgts[i].ltd_active) {
CERROR("error: iocontrol MDC %s on MDT"
"idx %d: err = %d\n",
- lmv->tgts[i].uuid.uuid, i, err);
+ lmv->tgts[i].ltd_uuid.uuid, i, err);
if (!rc)
rc = err;
}
ENTRY;
/* New seq alloc and FLD setup should be atomic. */
- down(&tgt->fid_sem);
+ down(&tgt->ltd_fid_sem);
/* Asking underlaying tgt layer to allocate new fid. */
rc = obd_fid_alloc(tgt->ltd_exp, fid, NULL);
}
}
- up(&tgt->fid_sem);
+ up(&tgt->ltd_fid_sem);
RETURN(rc);
}
RETURN(-ENOMEM);
for (i = 0; i < LMV_MAX_TGT_COUNT; i++) {
- sema_init(&lmv->tgts[i].fid_sem, 1);
- lmv->tgts[i].idx = i;
+ sema_init(&lmv->tgts[i].ltd_fid_sem, 1);
+ lmv->tgts[i].ltd_idx = i;
}
lmv->datas_size = LMV_MAX_TGT_COUNT * sizeof(struct obd_connect_data);
LASSERT(fid != NULL);
for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
- if (!lmv->tgts[i].ltd_exp || !lmv->tgts[i].active)
+ if (!lmv->tgts[i].ltd_exp || !lmv->tgts[i].ltd_active)
continue;
err = md_cancel_unused(lmv->tgts[i].ltd_exp,
struct lmv_obd *lmv = &dev->u.lmv;
int idx = tgt - &(lmv->tgts[0]);
- return seq_printf(p, "%d: %s %sACTIVE\n", idx, tgt->uuid.uuid,
- tgt->active ? "" : "IN");
+ return seq_printf(p, "%d: %s %sACTIVE\n", idx, tgt->ltd_uuid.uuid,
+ tgt->ltd_active ? "" : "IN");
}
struct seq_operations lmv_tgt_sops = {