From: yury Date: Mon, 16 Oct 2006 12:47:49 +0000 (+0000) Subject: - adjust client FLD cache a bit. It should use hashtable one page size; X-Git-Tag: v1_8_0_110~486^2~495 X-Git-Url: https://git.whamcloud.com/gitweb?a=commitdiff_plain;h=89a3ba641bfc54f62d5ab59766a086f9c66e1d25;p=fs%2Flustre-release.git - adjust client FLD cache a bit. It should use hashtable one page size; - add FLD cache to FLD servers; - cleanups in lmv tgt field names. --- diff --git a/lustre/fld/fld_cache.c b/lustre/fld/fld_cache.c index 58b1656..16eaab6 100644 --- a/lustre/fld/fld_cache.c +++ b/lustre/fld/fld_cache.c @@ -58,7 +58,7 @@ static inline __u32 fld_cache_hash(seqno_t seq) return (__u32)seq; } -void fld_cache_flush(struct fld_cache_info *cache) +void fld_cache_flush(struct fld_cache *cache) { struct fld_cache_entry *flde; struct hlist_head *bucket; @@ -82,16 +82,15 @@ void fld_cache_flush(struct fld_cache_info *cache) EXIT; } -struct fld_cache_info *fld_cache_init(int hash_size, int cache_size, - int cache_threshold) +struct fld_cache *fld_cache_init(const char *name, int hash_size, + int cache_size, int cache_threshold) { - struct fld_cache_info *cache; + struct fld_cache *cache; int i; ENTRY; - /* check if size is power of two */ + LASSERT(name != NULL); LASSERT(IS_PO2(hash_size)); - LASSERT(cache_threshold < cache_size); OBD_ALLOC_PTR(cache); @@ -102,11 +101,15 @@ struct fld_cache_info *fld_cache_init(int hash_size, int cache_size, cache->fci_cache_count = 0; spin_lock_init(&cache->fci_lock); + + strncpy(cache->fci_name, name, + sizeof(cache->fci_name)); + cache->fci_hash_size = hash_size; cache->fci_cache_size = cache_size; cache->fci_threshold = cache_threshold; - /* init fld cache info */ + /* Init fld cache info. */ cache->fci_hash_mask = hash_size - 1; OBD_ALLOC(cache->fci_hash_table, hash_size * sizeof(*cache->fci_hash_table)); @@ -118,14 +121,14 @@ struct fld_cache_info *fld_cache_init(int hash_size, int cache_size, for (i = 0; i < hash_size; i++) INIT_HLIST_HEAD(&cache->fci_hash_table[i]); - CDEBUG(D_INFO|D_WARNING, "FLD cache - Size: %d, Threshold: %d\n", - cache_size, cache_threshold); + CDEBUG(D_INFO|D_WARNING, "%s: FLD cache - Size: %d, Threshold: %d\n", + cache->fci_name, cache_size, cache_threshold); RETURN(cache); } EXPORT_SYMBOL(fld_cache_init); -void fld_cache_fini(struct fld_cache_info *cache) +void fld_cache_fini(struct fld_cache *cache) { ENTRY; @@ -141,7 +144,7 @@ void fld_cache_fini(struct fld_cache_info *cache) EXPORT_SYMBOL(fld_cache_fini); static inline struct hlist_head * -fld_cache_bucket(struct fld_cache_info *cache, seqno_t seq) +fld_cache_bucket(struct fld_cache *cache, seqno_t seq) { return cache->fci_hash_table + (fld_cache_hash(seq) & cache->fci_hash_mask); @@ -152,7 +155,7 @@ fld_cache_bucket(struct fld_cache_info *cache, seqno_t seq) * collision lists well balanced. That is, checks all of them and removes one * entry in list and so on. */ -static int fld_cache_shrink(struct fld_cache_info *cache) +static int fld_cache_shrink(struct fld_cache *cache) { struct fld_cache_entry *flde; struct list_head *curr; @@ -179,13 +182,13 @@ static int fld_cache_shrink(struct fld_cache_info *cache) num++; } - CDEBUG(D_INFO|D_WARNING, "FLD cache - Shrinked by " - "%d entries\n", num); + CDEBUG(D_INFO|D_WARNING, "%s: FLD cache - Shrinked by " + "%d entries\n", cache->fci_name, num); RETURN(0); } -int fld_cache_insert(struct fld_cache_info *cache, +int fld_cache_insert(struct fld_cache *cache, seqno_t seq, mdsno_t mds) { struct fld_cache_entry *flde, *fldt; @@ -196,14 +199,14 @@ int fld_cache_insert(struct fld_cache_info *cache, spin_lock(&cache->fci_lock); - /* check if need to shrink cache */ + /* Check if need to shrink cache. */ rc = fld_cache_shrink(cache); if (rc) { spin_unlock(&cache->fci_lock); RETURN(rc); } - /* check if cache already has the entry with such a seq */ + /* Check if cache already has the entry with such a seq. */ bucket = fld_cache_bucket(cache, seq); hlist_for_each_entry(fldt, scan, bucket, fce_list) { if (fldt->fce_seq == seq) { @@ -213,13 +216,13 @@ int fld_cache_insert(struct fld_cache_info *cache, } spin_unlock(&cache->fci_lock); - /* allocate new entry */ + /* Allocate new entry. */ OBD_ALLOC_PTR(flde); if (!flde) RETURN(-ENOMEM); /* - * check if cache has the entry with such a seq again. It could be added + * Check if cache has the entry with such a seq again. It could be added * while we were allocating new entry. */ spin_lock(&cache->fci_lock); @@ -231,7 +234,7 @@ int fld_cache_insert(struct fld_cache_info *cache, } } - /* add new entry to cache and lru list */ + /* Add new entry to cache and lru list. */ INIT_HLIST_NODE(&flde->fce_list); flde->fce_mds = mds; flde->fce_seq = seq; @@ -246,7 +249,7 @@ int fld_cache_insert(struct fld_cache_info *cache, } EXPORT_SYMBOL(fld_cache_insert); -void fld_cache_delete(struct fld_cache_info *cache, seqno_t seq) +void fld_cache_delete(struct fld_cache *cache, seqno_t seq) { struct fld_cache_entry *flde; struct hlist_node *scan, *n; @@ -272,7 +275,7 @@ out_unlock: } EXPORT_SYMBOL(fld_cache_delete); -int fld_cache_lookup(struct fld_cache_info *cache, +int fld_cache_lookup(struct fld_cache *cache, seqno_t seq, mdsno_t *mds) { struct fld_cache_entry *flde; @@ -286,11 +289,8 @@ int fld_cache_lookup(struct fld_cache_info *cache, hlist_for_each_entry_safe(flde, scan, n, bucket, fce_list) { if (flde->fce_seq == seq) { *mds = flde->fce_mds; - - /* Move found entry to the head of lru list. */ list_del(&flde->fce_lru); list_add(&flde->fce_lru, &cache->fci_lru); - spin_unlock(&cache->fci_lock); RETURN(0); } @@ -300,21 +300,21 @@ int fld_cache_lookup(struct fld_cache_info *cache, } EXPORT_SYMBOL(fld_cache_lookup); #else -int fld_cache_insert(struct fld_cache_info *cache, +int fld_cache_insert(struct fld_cache *cache, seqno_t seq, mdsno_t mds) { return -ENOTSUPP; } EXPORT_SYMBOL(fld_cache_insert); -void fld_cache_delete(struct fld_cache_info *cache, +void fld_cache_delete(struct fld_cache *cache, seqno_t seq) { return; } EXPORT_SYMBOL(fld_cache_delete); -int fld_cache_lookup(struct fld_cache_info *cache, +int fld_cache_lookup(struct fld_cache *cache, seqno_t seq, mdsno_t *mds) { return -ENOTSUPP; diff --git a/lustre/fld/fld_handler.c b/lustre/fld/fld_handler.c index 5ed0965..9c3d202 100644 --- a/lustre/fld/fld_handler.c +++ b/lustre/fld/fld_handler.c @@ -105,30 +105,71 @@ static void __exit fld_mod_exit(void) } } -/* insert index entry and update cache */ +/* Insert index entry and update cache. */ int fld_server_create(struct lu_server_fld *fld, const struct lu_env *env, seqno_t seq, mdsno_t mds) { - return fld_index_create(fld, env, seq, mds); + int rc; + ENTRY; + + rc = fld_index_create(fld, env, seq, mds); + + if (rc == 0) { + /* + * Do not return result of calling fld_cache_insert() + * here. First of all because it may return -EEXISTS. Another + * reason is that, we do not want to stop proceeding even after + * cache errors. + */ + fld_cache_insert(fld->lsf_cache, seq, mds); + } + + RETURN(rc); } EXPORT_SYMBOL(fld_server_create); -/* delete index entry */ +/* Delete index entry. */ int fld_server_delete(struct lu_server_fld *fld, const struct lu_env *env, seqno_t seq) { - return fld_index_delete(fld, env, seq); + int rc; + ENTRY; + + fld_cache_delete(fld->lsf_cache, seq); + rc = fld_index_delete(fld, env, seq); + + RETURN(rc); } EXPORT_SYMBOL(fld_server_delete); -/* issue on-disk index lookup */ +/* Lookup mds by seq. */ int fld_server_lookup(struct lu_server_fld *fld, const struct lu_env *env, seqno_t seq, mdsno_t *mds) { - return fld_index_lookup(fld, env, seq, mds); + int rc; + ENTRY; + + fld->lsf_stat.fst_count++; + + /* Lookup it in the cache. */ + rc = fld_cache_lookup(fld->lsf_cache, seq, mds); + if (rc == 0) { + fld->lsf_stat.fst_cache++; + RETURN(0); + } + + rc = fld_index_lookup(fld, env, seq, mds); + if (rc == 0) { + /* + * Do not return error here as well. See previous comment in + * same situation in function fld_server_create(). + */ + fld_cache_insert(fld->lsf_cache, seq, *mds); + } + RETURN(rc); } EXPORT_SYMBOL(fld_server_lookup); @@ -219,11 +260,11 @@ static void fld_thread_info_init(struct ptlrpc_request *req, info->fti_flags = lustre_msg_get_flags(req->rq_reqmsg); - /* mark rep buffer as req-layout stuff expects */ + /* Mark rep buffer as req-layout stuff expects. */ for (i = 0; i < ARRAY_SIZE(info->fti_rep_buf_size); i++) info->fti_rep_buf_size[i] = -1; - /* init request capsule */ + /* Init request capsule. */ req_capsule_init(&info->fti_pill, req, RCL_SERVER, info->fti_rep_buf_size); @@ -333,14 +374,31 @@ static void fld_server_proc_fini(struct lu_server_fld *fld) int fld_server_init(struct lu_server_fld *fld, struct dt_device *dt, const char *prefix, const struct lu_env *env) { + int cache_size, cache_threshold; int rc; ENTRY; + memset(&fld->lsf_stat, 0, sizeof(fld->lsf_stat)); snprintf(fld->lsf_name, sizeof(fld->lsf_name), "srv-%s", prefix); sema_init(&fld->lsf_sem, 1); + cache_size = FLD_SERVER_CACHE_SIZE / + sizeof(struct fld_cache_entry); + + cache_threshold = cache_size * + FLD_SERVER_CACHE_THRESHOLD / 100; + + fld->lsf_cache = fld_cache_init(fld->lsf_name, + FLD_SERVER_HTABLE_SIZE, + cache_size, cache_threshold); + if (IS_ERR(fld->lsf_cache)) { + rc = PTR_ERR(fld->lsf_cache); + fld->lsf_cache = NULL; + GOTO(out, rc); + } + rc = fld_index_init(fld, env, dt); if (rc) GOTO(out, rc); @@ -360,11 +418,26 @@ EXPORT_SYMBOL(fld_server_init); void fld_server_fini(struct lu_server_fld *fld, const struct lu_env *env) { + __u64 pct; ENTRY; + pct = fld->lsf_stat.fst_cache * 100; + do_div(pct, fld->lsf_stat.fst_count); + + printk("FLD cache statistics (%s):\n", fld->lsf_name); + printk(" Total reqs: "LPU64"\n", fld->lsf_stat.fst_count); + printk(" Cache reqs: "LPU64"\n", fld->lsf_stat.fst_cache); + printk(" Cache hits: "LPU64"%%\n", pct); + fld_server_proc_fini(fld); fld_index_fini(fld, env); + if (fld->lsf_cache != NULL) { + if (!IS_ERR(fld->lsf_cache)) + fld_cache_fini(fld->lsf_cache); + fld->lsf_cache = NULL; + } + EXIT; } EXPORT_SYMBOL(fld_server_fini); diff --git a/lustre/fld/fld_internal.h b/lustre/fld/fld_internal.h index ecf9907..421b5c9 100644 --- a/lustre/fld/fld_internal.h +++ b/lustre/fld/fld_internal.h @@ -45,7 +45,33 @@ enum fld_op { }; enum { - FLD_HTABLE_SIZE = 256 + /* 4M of FLD cache will not hurt client a lot. */ + FLD_SERVER_CACHE_SIZE = (4 * 0x100000), + + /* 1M of FLD cache will not hurt client a lot. */ + FLD_CLIENT_CACHE_SIZE = (1 * 0x100000) +}; + +enum { + /* Cache threshold is 10 percent of size. */ + FLD_SERVER_CACHE_THRESHOLD = 10, + + /* Cache threshold is 10 percent of size. */ + FLD_CLIENT_CACHE_THRESHOLD = 10 +}; + +enum { + /* + * One page is used for hashtable. That is sizeof(struct hlist_head) * + * 1024. + */ + FLD_CLIENT_HTABLE_SIZE = (1024 * 1), + + /* + * Here 4 pages are used for hashtable of server cache. This is is + * because cache it self is 4 times bugger. + */ + FLD_SERVER_HTABLE_SIZE = (1024 * 4) }; extern struct lu_fld_hash fld_hash[]; diff --git a/lustre/fld/fld_request.c b/lustre/fld/fld_request.c index 8a207e9..9ee8004 100644 --- a/lustre/fld/fld_request.c +++ b/lustre/fld/fld_request.c @@ -287,12 +287,6 @@ static inline int hash_is_sane(int hash) return (hash >= 0 && hash < ARRAY_SIZE(fld_hash)); } -/* 1M of FLD cache will not hurt client a lot */ -#define FLD_CACHE_SIZE 1024000 - -/* cache threshold is 10 percent of size */ -#define FLD_CACHE_THRESHOLD 10 - int fld_client_init(struct lu_client_fld *fld, const char *prefix, int hash) { @@ -304,6 +298,7 @@ int fld_client_init(struct lu_client_fld *fld, LASSERT(fld != NULL); + memset(&fld->lcf_stat, 0, sizeof(fld->lcf_stat)); snprintf(fld->lcf_name, sizeof(fld->lcf_name), "cli-srv-%s", prefix); @@ -320,15 +315,15 @@ int fld_client_init(struct lu_client_fld *fld, INIT_LIST_HEAD(&fld->lcf_targets); #ifdef __KERNEL__ - cache_size = FLD_CACHE_SIZE / + cache_size = FLD_CLIENT_CACHE_SIZE / sizeof(struct fld_cache_entry); cache_threshold = cache_size * - FLD_CACHE_THRESHOLD / 100; + FLD_CLIENT_CACHE_THRESHOLD / 100; - fld->lcf_cache = fld_cache_init(FLD_HTABLE_SIZE, - cache_size, - cache_threshold); + fld->lcf_cache = fld_cache_init(fld->lcf_name, + FLD_CLIENT_HTABLE_SIZE, + cache_size, cache_threshold); if (IS_ERR(fld->lcf_cache)) { rc = PTR_ERR(fld->lcf_cache); fld->lcf_cache = NULL; @@ -354,8 +349,17 @@ EXPORT_SYMBOL(fld_client_init); void fld_client_fini(struct lu_client_fld *fld) { struct lu_fld_target *target, *tmp; + __u64 pct; ENTRY; + pct = fld->lcf_stat.fst_cache * 100; + do_div(pct, fld->lcf_stat.fst_count); + + printk("FLD cache statistics (%s):\n", fld->lcf_name); + printk(" Total reqs: "LPU64"\n", fld->lcf_stat.fst_count); + printk(" Cache reqs: "LPU64"\n", fld->lcf_stat.fst_cache); + printk(" Cache hits: "LPU64"%%\n", pct); + fld_client_proc_fini(fld); spin_lock(&fld->lcf_lock); @@ -471,7 +475,7 @@ int fld_client_create(struct lu_client_fld *fld, * Do not return result of calling fld_cache_insert() * here. First of all because it may return -EEXISTS. Another * reason is that, we do not want to stop proceeding because of - * cache errors. --umka + * cache errors. */ fld_cache_insert(fld->lcf_cache, seq, mds); } else { @@ -531,10 +535,13 @@ int fld_client_lookup(struct lu_client_fld *fld, ENTRY; down(&fld->lcf_sem); - + + fld->lcf_stat.fst_count++; + /* Lookup it in the cache */ rc = fld_cache_lookup(fld->lcf_cache, seq, mds); if (rc == 0) { + fld->lcf_stat.fst_cache++; up(&fld->lcf_sem); RETURN(0); } @@ -564,7 +571,7 @@ int fld_client_lookup(struct lu_client_fld *fld, /* * Do not return error here as well. See previous comment in - * same situation in function fld_client_create(). --umka + * same situation in function fld_client_create(). */ fld_cache_insert(fld->lcf_cache, seq, *mds); } diff --git a/lustre/include/lustre_fld.h b/lustre/include/lustre_fld.h index b36d090..0ca8205 100644 --- a/lustre/include/lustre_fld.h +++ b/lustre/include/lustre_fld.h @@ -33,6 +33,11 @@ struct lu_client_fld; struct lu_server_fld; +struct fld_stats { + __u64 fst_count; + __u64 fst_cache; +}; + /* * FLD (Fid Location Database) interface. */ @@ -62,20 +67,6 @@ struct lu_fld_hash { fld_scan_func_t fh_scan_func; }; -struct lu_server_fld { - /* fld dir proc entry */ - cfs_proc_dir_entry_t *lsf_proc_dir; - - /* /fld file object device */ - struct dt_object *lsf_obj; - - /* Protect index modifications */ - struct semaphore lsf_sem; - - /* fld service name in form "fld-MDTXXX" */ - char lsf_name[80]; -}; - struct fld_cache_entry { struct hlist_node fce_list; struct list_head fce_lru; @@ -83,58 +74,84 @@ struct fld_cache_entry { seqno_t fce_seq; }; -struct fld_cache_info { +struct fld_cache { /* - * cache guard, protects fci_hash mostly because others immutable after + * Cache guard, protects fci_hash mostly because others immutable after * init is finished. */ spinlock_t fci_lock; - /* cache shrink threshold */ + /* Cache shrink threshold */ int fci_threshold; - /* prefered number of cached entries */ + /* Prefered number of cached entries */ int fci_cache_size; - /* current number of cached entries. Protected by @fci_lock */ + /* Current number of cached entries. Protected by @fci_lock */ int fci_cache_count; - /* hash table size (number of collision lists) */ + /* Hash table size (number of collision lists) */ int fci_hash_size; - /* hash table mask */ + /* Hash table mask */ int fci_hash_mask; - /* hash table for all collision lists */ + /* Hash table for all collision lists */ struct hlist_head *fci_hash_table; - /* lru list */ + /* Lru list */ struct list_head fci_lru; + + /* Cache name used for debug and messages. */ + char fci_name[80]; +}; + +struct lu_server_fld { + /* Fld dir proc entry. */ + cfs_proc_dir_entry_t *lsf_proc_dir; + + /* /fld file object device */ + struct dt_object *lsf_obj; + + /* Client FLD cache. */ + struct fld_cache *lsf_cache; + + /* Protect index modifications */ + struct semaphore lsf_sem; + + /* Server cache statistics. */ + struct fld_stats lsf_stat; + + /* Fld service name in form "fld-srv-lustre-MDTXXX" */ + char lsf_name[80]; }; struct lu_client_fld { - /* client side proc entry */ + /* Client side proc entry. */ cfs_proc_dir_entry_t *lcf_proc_dir; - /* list of exports client FLD knows about */ + /* List of exports client FLD knows about. */ struct list_head lcf_targets; - /* current hash to be used to chose an export */ + /* Current hash to be used to chose an export. */ struct lu_fld_hash *lcf_hash; - /* exports count */ + /* Exports count. */ int lcf_count; - /* lock protecting exports list and fld_hash */ + /* Lock protecting exports list and fld_hash. */ spinlock_t lcf_lock; - /* protect fld req + cache modification */ + /* Client cache statistics. */ + struct fld_stats lcf_stat; + + /* Protect fld req + cache modification. */ struct semaphore lcf_sem; - /* client FLD cache */ - struct fld_cache_info *lcf_cache; + /* Client FLD cache. */ + struct fld_cache *lcf_cache; - /* client fld proc entry name */ + /* Client fld proc entry name. */ char lcf_name[80]; const struct lu_context *lcf_ctx; @@ -188,22 +205,23 @@ int fld_client_del_target(struct lu_client_fld *fld, __u64 idx); /* Cache methods */ -struct fld_cache_info *fld_cache_init(int hash_size, - int cache_size, - int cache_threshold); +struct fld_cache *fld_cache_init(const char *name, + int hash_size, + int cache_size, + int cache_threshold); -void fld_cache_fini(struct fld_cache_info *cache); +void fld_cache_fini(struct fld_cache *cache); -void fld_cache_flush(struct fld_cache_info *cache); +void fld_cache_flush(struct fld_cache *cache); -int fld_cache_insert(struct fld_cache_info *cache, +int fld_cache_insert(struct fld_cache *cache, seqno_t seq, mdsno_t mds); -void fld_cache_delete(struct fld_cache_info *cache, +void fld_cache_delete(struct fld_cache *cache, seqno_t seq); int -fld_cache_lookup(struct fld_cache_info *cache, +fld_cache_lookup(struct fld_cache *cache, seqno_t seq, mdsno_t *mds); #endif diff --git a/lustre/include/obd.h b/lustre/include/obd.h index b1cbee6..07891fb 100644 --- a/lustre/include/obd.h +++ b/lustre/include/obd.h @@ -666,11 +666,11 @@ struct lov_obd { }; struct lmv_tgt_desc { - struct obd_uuid uuid; - struct obd_export *ltd_exp; - int active; /* is this target up for requests */ - int idx; - struct semaphore fid_sem; + struct obd_uuid ltd_uuid; + struct obd_export *ltd_exp; + int ltd_active; /* is this target up for requests */ + int ltd_idx; + struct semaphore ltd_fid_sem; }; struct lmv_obd { diff --git a/lustre/lmv/lmv_obd.c b/lustre/lmv/lmv_obd.c index 1e9d1f5..d50891b 100644 --- a/lustre/lmv/lmv_obd.c +++ b/lustre/lmv/lmv_obd.c @@ -62,10 +62,10 @@ static void lmv_activate_target(struct lmv_obd *lmv, struct lmv_tgt_desc *tgt, int activate) { - if (tgt->active == activate) + if (tgt->ltd_active == activate) return; - tgt->active = activate; + tgt->ltd_active = activate; lmv->desc.ld_active_tgt_count += (activate ? 1 : -1); } @@ -92,9 +92,9 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid, continue; CDEBUG(D_INFO, "lmv idx %d is %s conn "LPX64"\n", - i, tgt->uuid.uuid, tgt->ltd_exp->exp_handle.h_cookie); + i, tgt->ltd_uuid.uuid, tgt->ltd_exp->exp_handle.h_cookie); - if (obd_uuid_equals(uuid, &tgt->uuid)) + if (obd_uuid_equals(uuid, &tgt->ltd_uuid)) break; } @@ -110,7 +110,7 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid, obd->obd_type->typ_name, i); LASSERT(strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0); - if (tgt->active == activate) { + if (tgt->ltd_active == activate) { CDEBUG(D_INFO, "OBD %p already %sactive!\n", obd, activate ? "" : "in"); GOTO(out_lmv_lock, rc); @@ -142,8 +142,8 @@ static int lmv_set_mdc_data(struct lmv_obd *lmv, struct obd_uuid *uuid, if (tgt->ltd_exp == NULL) continue; - if (obd_uuid_equals(uuid, &tgt->uuid)) { - lmv->datas[tgt->idx] = *data; + if (obd_uuid_equals(uuid, &tgt->ltd_uuid)) { + lmv->datas[tgt->ltd_idx] = *data; break; } } @@ -359,34 +359,34 @@ int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) ENTRY; /* for MDS: don't connect to yourself */ - if (obd_uuid_equals(&tgt->uuid, cluuid)) { + if (obd_uuid_equals(&tgt->ltd_uuid, cluuid)) { CDEBUG(D_CONFIG, "don't connect back to %s\n", cluuid->uuid); /* XXX - the old code didn't increment active tgt count. * should we ? */ RETURN(0); } - mdc_obd = class_find_client_obd(&tgt->uuid, LUSTRE_MDC_NAME, + mdc_obd = class_find_client_obd(&tgt->ltd_uuid, LUSTRE_MDC_NAME, &obd->obd_uuid); if (!mdc_obd) { - CERROR("target %s not attached\n", tgt->uuid.uuid); + CERROR("target %s not attached\n", tgt->ltd_uuid.uuid); RETURN(-EINVAL); } CDEBUG(D_CONFIG, "connect to %s(%s) - %s, %s FOR %s\n", mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, - tgt->uuid.uuid, obd->obd_uuid.uuid, + tgt->ltd_uuid.uuid, obd->obd_uuid.uuid, cluuid->uuid); if (!mdc_obd->obd_set_up) { - CERROR("target %s is not set up\n", tgt->uuid.uuid); + CERROR("target %s is not set up\n", tgt->ltd_uuid.uuid); RETURN(-EINVAL); } rc = obd_connect(NULL, &conn, mdc_obd, &lmv_mdc_uuid, &lmv->conn_data); if (rc) { - CERROR("target %s connect error %d\n", tgt->uuid.uuid, rc); + CERROR("target %s connect error %d\n", tgt->ltd_uuid.uuid, rc); RETURN(rc); } @@ -394,7 +394,7 @@ int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) target.ft_srv = NULL; target.ft_exp = mdc_exp; - target.ft_idx = tgt->idx; + target.ft_idx = tgt->ltd_idx; fld_client_add_target(&lmv->lmv_fld, &target); @@ -404,7 +404,7 @@ int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) if (rc) { obd_disconnect(mdc_exp); CERROR("target %s register_observer error %d\n", - tgt->uuid.uuid, rc); + tgt->ltd_uuid.uuid, rc); RETURN(rc); } @@ -418,12 +418,12 @@ int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) } } - tgt->active = 1; + tgt->ltd_active = 1; tgt->ltd_exp = mdc_exp; lmv->desc.ld_active_tgt_count++; /* copy connect data, it may be used later */ - lmv->datas[tgt->idx] = *mdc_data; + lmv->datas[tgt->ltd_idx] = *mdc_data; md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize, lmv->max_cookiesize); @@ -496,7 +496,7 @@ int lmv_add_target(struct obd_device *obd, struct obd_uuid *tgt_uuid) } spin_lock(&lmv->lmv_lock); tgt = lmv->tgts + lmv->desc.ld_tgt_count++; - tgt->uuid = *tgt_uuid; + tgt->ltd_uuid = *tgt_uuid; spin_unlock(&lmv->lmv_lock); if (lmv->connected) { @@ -563,14 +563,14 @@ int lmv_check_connect(struct obd_device *obd) while (i-- > 0) { int rc2; --tgt; - tgt->active = 0; + tgt->ltd_active = 0; if (tgt->ltd_exp) { --lmv->desc.ld_active_tgt_count; rc2 = obd_disconnect(tgt->ltd_exp); if (rc2) { CERROR("error: LMV target %s disconnect on " "MDC idx %d: error %d\n", - tgt->uuid.uuid, i, rc2); + tgt->ltd_uuid.uuid, i, rc2); } } } @@ -634,9 +634,9 @@ static int lmv_disconnect(struct obd_export *exp) obd_register_observer(lmv->tgts[i].ltd_exp->exp_obd, NULL); rc = obd_disconnect(lmv->tgts[i].ltd_exp); if (rc) { - if (lmv->tgts[i].active) { + if (lmv->tgts[i].ltd_active) { CERROR("Target %s disconnect error %d\n", - lmv->tgts[i].uuid.uuid, rc); + lmv->tgts[i].ltd_uuid.uuid, rc); } rc = 0; } @@ -684,10 +684,10 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, err = obd_iocontrol(cmd, lmv->tgts[i].ltd_exp, len, karg, uarg); if (err) { - if (lmv->tgts[i].active) { + if (lmv->tgts[i].ltd_active) { CERROR("error: iocontrol MDC %s on MDT" "idx %d: err = %d\n", - lmv->tgts[i].uuid.uuid, i, err); + lmv->tgts[i].ltd_uuid.uuid, i, err); if (!rc) rc = err; } @@ -838,7 +838,7 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, ENTRY; /* New seq alloc and FLD setup should be atomic. */ - down(&tgt->fid_sem); + down(&tgt->ltd_fid_sem); /* Asking underlaying tgt layer to allocate new fid. */ rc = obd_fid_alloc(tgt->ltd_exp, fid, NULL); @@ -854,7 +854,7 @@ int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, } } - up(&tgt->fid_sem); + up(&tgt->ltd_fid_sem); RETURN(rc); } @@ -926,8 +926,8 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg) RETURN(-ENOMEM); for (i = 0; i < LMV_MAX_TGT_COUNT; i++) { - sema_init(&lmv->tgts[i].fid_sem, 1); - lmv->tgts[i].idx = i; + sema_init(&lmv->tgts[i].ltd_fid_sem, 1); + lmv->tgts[i].ltd_idx = i; } lmv->datas_size = LMV_MAX_TGT_COUNT * sizeof(struct obd_connect_data); @@ -2339,7 +2339,7 @@ static int lmv_cancel_unused(struct obd_export *exp, LASSERT(fid != NULL); for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (!lmv->tgts[i].ltd_exp || !lmv->tgts[i].active) + if (!lmv->tgts[i].ltd_exp || !lmv->tgts[i].ltd_active) continue; err = md_cancel_unused(lmv->tgts[i].ltd_exp, diff --git a/lustre/lmv/lproc_lmv.c b/lustre/lmv/lproc_lmv.c index 39c62fc..9b1615a 100644 --- a/lustre/lmv/lproc_lmv.c +++ b/lustre/lmv/lproc_lmv.c @@ -100,8 +100,8 @@ static int lmv_tgt_seq_show(struct seq_file *p, void *v) struct lmv_obd *lmv = &dev->u.lmv; int idx = tgt - &(lmv->tgts[0]); - return seq_printf(p, "%d: %s %sACTIVE\n", idx, tgt->uuid.uuid, - tgt->active ? "" : "IN"); + return seq_printf(p, "%d: %s %sACTIVE\n", idx, tgt->ltd_uuid.uuid, + tgt->ltd_active ? "" : "IN"); } struct seq_operations lmv_tgt_sops = {