X-Git-Url: https://git.whamcloud.com/?p=fs%2Flustre-release.git;a=blobdiff_plain;f=lustre%2Flmv%2Flmv_obd.c;h=8e9d844c58baf364149783230d52900f0428d457;hp=e5b48460f11d1c66d4c2e130b2116171b8690388;hb=47c31bb3174a4f0ba81e088bed0d3dd5cb1223f4;hpb=08aa217ce49aba1ded52e0f7adb8a607035123fd diff --git a/lustre/lmv/lmv_obd.c b/lustre/lmv/lmv_obd.c index e5b4846..8e9d844 100644 --- a/lustre/lmv/lmv_obd.c +++ b/lustre/lmv/lmv_obd.c @@ -27,7 +27,7 @@ * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011, 2012, Intel Corporation. + * Copyright (c) 2011, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -35,33 +35,88 @@ */ #define DEBUG_SUBSYSTEM S_LMV -#ifdef __KERNEL__ #include #include #include #include #include #include -#include +#include #include #include -#else -#include -#endif #include #include #include #include #include +#include #include +#include +#include #include #include +#include #include "lmv_internal.h" -/* object cache. */ -cfs_mem_cache_t *lmv_object_cache; -cfs_atomic_t lmv_object_count = CFS_ATOMIC_INIT(0); +/* This hash is only for testing purpose */ +static inline unsigned int +lmv_hash_all_chars(unsigned int count, const char *name, int namelen) +{ + unsigned int c = 0; + const unsigned char *p = (const unsigned char *)name; + + while (--namelen >= 0) + c += p[namelen]; + + c = c % count; + + return c; +} + +static inline unsigned int +lmv_hash_fnv1a(unsigned int count, const char *name, int namelen) +{ + __u64 hash; + + hash = lustre_hash_fnv_1a_64(name, namelen); + + hash = hash % count; + + return hash; +} + +int lmv_name_to_stripe_index(__u32 lmv_hash_type, unsigned int stripe_count, + const char *name, int namelen) +{ + int idx; + __u32 hash_type = lmv_hash_type & LMV_HASH_TYPE_MASK; + + LASSERT(namelen > 0); + if (stripe_count <= 1) + return 0; + + /* for migrating object, always start from 0 stripe */ + if (lmv_hash_type & LMV_HASH_FLAG_MIGRATION) + return 0; + + switch (hash_type) { + case LMV_HASH_TYPE_ALL_CHARS: + idx = lmv_hash_all_chars(stripe_count, name, namelen); + break; + case LMV_HASH_TYPE_FNV_1A_64: + idx = lmv_hash_fnv1a(stripe_count, name, namelen); + break; + default: + idx = -EBADFD; + break; + } + + CDEBUG(D_INFO, "name %.*s hash_type %d idx %d\n", namelen, name, + hash_type, idx); + + return idx; +} static void lmv_activate_target(struct lmv_obd *lmv, struct lmv_tgt_desc *tgt, @@ -81,29 +136,31 @@ static void lmv_activate_target(struct lmv_obd *lmv, * -ENOTCONN: The UUID is found, but the target connection is bad (!) * -EBADF : The UUID is found, but the OBD of the wrong type (!) */ -static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid, - int activate) -{ - struct lmv_tgt_desc *tgt; - struct obd_device *obd; - int i; - int rc = 0; - ENTRY; +static int lmv_set_mdc_active(struct lmv_obd *lmv, + const struct obd_uuid *uuid, + int activate) +{ + struct lmv_tgt_desc *tgt = NULL; + struct obd_device *obd; + __u32 i; + int rc = 0; + ENTRY; - CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n", - lmv, uuid->uuid, activate); + CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n", + lmv, uuid->uuid, activate); spin_lock(&lmv->lmv_lock); - for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) { - if (tgt->ltd_exp == NULL) - continue; + for (i = 0; i < lmv->desc.ld_tgt_count; i++) { + tgt = lmv->tgts[i]; + if (tgt == NULL || tgt->ltd_exp == NULL) + continue; - CDEBUG(D_INFO, "Target idx %d is %s conn "LPX64"\n", - i, tgt->ltd_uuid.uuid, tgt->ltd_exp->exp_handle.h_cookie); + CDEBUG(D_INFO, "Target idx %d is %s conn "LPX64"\n", i, + tgt->ltd_uuid.uuid, tgt->ltd_exp->exp_handle.h_cookie); - if (obd_uuid_equals(uuid, &tgt->ltd_uuid)) - break; - } + if (obd_uuid_equals(uuid, &tgt->ltd_uuid)) + break; + } if (i == lmv->desc.ld_tgt_count) GOTO(out_lmv_lock, rc = -EINVAL); @@ -133,33 +190,12 @@ static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid, return rc; } -static int lmv_set_mdc_data(struct lmv_obd *lmv, struct obd_uuid *uuid, - struct obd_connect_data *data) +struct obd_uuid *lmv_get_uuid(struct obd_export *exp) { - struct lmv_tgt_desc *tgt; - int i; - ENTRY; + struct lmv_obd *lmv = &exp->exp_obd->u.lmv; + struct lmv_tgt_desc *tgt = lmv->tgts[0]; - LASSERT(data != NULL); - - spin_lock(&lmv->lmv_lock); - for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) { - if (tgt->ltd_exp == NULL) - continue; - - if (obd_uuid_equals(uuid, &tgt->ltd_uuid)) { - lmv->datas[tgt->ltd_idx] = *data; - break; - } - } - spin_unlock(&lmv->lmv_lock); - RETURN(0); -} - -struct obd_uuid *lmv_get_uuid(struct obd_export *exp) { - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - return obd_get_uuid(lmv->tgts[0].ltd_exp); + return (tgt == NULL) ? NULL : obd_get_uuid(tgt->ltd_exp); } static int lmv_notify(struct obd_device *obd, struct obd_device *watched, @@ -192,27 +228,15 @@ static int lmv_notify(struct obd_device *obd, struct obd_device *watched, uuid->uuid, rc); RETURN(rc); } - } else if (ev == OBD_NOTIFY_OCD) { - conn_data = &watched->u.cli.cl_import->imp_connect_data; - - /* - * Set connect data to desired target, update exp_connect_flags. - */ - rc = lmv_set_mdc_data(lmv, uuid, conn_data); - if (rc) { - CERROR("can't set connect data to target %s, rc %d\n", - uuid->uuid, rc); - RETURN(rc); - } - - /* - * XXX: Make sure that ocd_connect_flags from all targets are - * the same. Otherwise one of MDTs runs wrong version or - * something like this. --umka - */ - obd->obd_self_export->exp_connect_flags = - conn_data->ocd_connect_flags; - } + } else if (ev == OBD_NOTIFY_OCD) { + conn_data = &watched->u.cli.cl_import->imp_connect_data; + /* + * XXX: Make sure that ocd_connect_flags from all targets are + * the same. Otherwise one of MDTs runs wrong version or + * something like this. --umka + */ + obd->obd_self_export->exp_connect_data = *conn_data; + } #if 0 else if (ev == OBD_NOTIFY_DISCON) { /* @@ -239,9 +263,6 @@ static int lmv_connect(const struct lu_env *env, struct obd_uuid *cluuid, struct obd_connect_data *data, void *localdata) { -#ifdef __KERNEL__ - struct proc_dir_entry *lmv_proc_dir; -#endif struct lmv_obd *lmv = &obd->u.lmv; struct lustre_handle conn = { 0 }; int rc = 0; @@ -273,15 +294,18 @@ static int lmv_connect(const struct lu_env *env, if (data) lmv->conn_data = *data; -#ifdef __KERNEL__ - lmv_proc_dir = lprocfs_register("target_obds", obd->obd_proc_entry, - NULL, NULL); - if (IS_ERR(lmv_proc_dir)) { - CERROR("could not register /proc/fs/lustre/%s/%s/target_obds.", - obd->obd_type->typ_name, obd->obd_name); - lmv_proc_dir = NULL; - } -#endif + if (lmv->targets_proc_entry == NULL) { + lmv->targets_proc_entry = lprocfs_seq_register("target_obds", + obd->obd_proc_entry, + NULL, NULL); + if (IS_ERR(lmv->targets_proc_entry)) { + CERROR("%s: cannot register " + "/proc/fs/lustre/%s/%s/target_obds\n", + obd->obd_name, obd->obd_type->typ_name, + obd->obd_name); + lmv->targets_proc_entry = NULL; + } + } /* * All real clients should perform actual connection right away, because @@ -289,24 +313,18 @@ static int lmv_connect(const struct lu_env *env, * and MDC stuff will be called directly, for instance while reading * ../mdc/../kbytesfree procfs file, etc. */ - if (data->ocd_connect_flags & OBD_CONNECT_REAL) + if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_REAL)) rc = lmv_check_connect(obd); -#ifdef __KERNEL__ - if (rc) { - if (lmv_proc_dir) - lprocfs_remove(&lmv_proc_dir); - } -#endif - - RETURN(rc); + if (rc && lmv->targets_proc_entry != NULL) + lprocfs_remove(&lmv->targets_proc_entry); + RETURN(rc); } static void lmv_set_timeouts(struct obd_device *obd) { - struct lmv_tgt_desc *tgts; - struct lmv_obd *lmv; - int i; + struct lmv_obd *lmv; + __u32 i; lmv = &obd->u.lmv; if (lmv->server_timeout == 0) @@ -315,24 +333,26 @@ static void lmv_set_timeouts(struct obd_device *obd) if (lmv->connected == 0) return; - for (i = 0, tgts = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgts++) { - if (tgts->ltd_exp == NULL) - continue; + for (i = 0; i < lmv->desc.ld_tgt_count; i++) { + struct lmv_tgt_desc *tgt = lmv->tgts[i]; - obd_set_info_async(NULL, tgts->ltd_exp, sizeof(KEY_INTERMDS), - KEY_INTERMDS, 0, NULL, NULL); - } + if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active) + continue; + + obd_set_info_async(NULL, tgt->ltd_exp, sizeof(KEY_INTERMDS), + KEY_INTERMDS, 0, NULL, NULL); + } } static int lmv_init_ea_size(struct obd_export *exp, int easize, - int def_easize, int cookiesize) + int def_easize, int cookiesize, int def_cookiesize) { - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - int i; - int rc = 0; - int change = 0; - ENTRY; + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv = &obd->u.lmv; + __u32 i; + int rc = 0; + int change = 0; + ENTRY; if (lmv->max_easize < easize) { lmv->max_easize = easize; @@ -346,39 +366,41 @@ static int lmv_init_ea_size(struct obd_export *exp, int easize, lmv->max_cookiesize = cookiesize; change = 1; } - if (change == 0) - RETURN(0); + if (lmv->max_def_cookiesize < def_cookiesize) { + lmv->max_def_cookiesize = def_cookiesize; + change = 1; + } + if (change == 0) + RETURN(0); - if (lmv->connected == 0) - RETURN(0); + if (lmv->connected == 0) + RETURN(0); - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i].ltd_exp == NULL) { - CWARN("%s: NULL export for %d\n", obd->obd_name, i); - continue; - } + for (i = 0; i < lmv->desc.ld_tgt_count; i++) { + struct lmv_tgt_desc *tgt = lmv->tgts[i]; - rc = md_init_ea_size(lmv->tgts[i].ltd_exp, easize, def_easize, - cookiesize); - if (rc) { - CERROR("obd_init_ea_size() failed on MDT target %d, " - "error %d.\n", i, rc); - break; - } - } - RETURN(rc); + if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active) { + CWARN("%s: NULL export for %d\n", obd->obd_name, i); + continue; + } + + rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize, + cookiesize, def_cookiesize); + if (rc) { + CERROR("%s: obd_init_ea_size() failed on MDT target %d:" + " rc = %d\n", obd->obd_name, i, rc); + break; + } + } + RETURN(rc); } #define MAX_STRING_SIZE 128 int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) { -#ifdef __KERNEL__ - struct proc_dir_entry *lmv_proc_dir; -#endif struct lmv_obd *lmv = &obd->u.lmv; struct obd_uuid *cluuid = &lmv->cluuid; - struct obd_connect_data *mdc_data = NULL; struct obd_uuid lmv_mdc_uuid = { "LMV_MDC_UUID" }; struct obd_device *mdc_obd; struct obd_export *mdc_exp; @@ -410,12 +432,12 @@ int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) RETURN(rc); } - /* - * Init fid sequence client for this mdc and add new fld target. - */ - rc = obd_fid_init(mdc_exp); - if (rc) - RETURN(rc); + /* + * Init fid sequence client for this mdc and add new fld target. + */ + rc = obd_fid_init(mdc_obd, mdc_exp, LUSTRE_SEQ_METADATA); + if (rc) + RETURN(rc); target.ft_srv = NULL; target.ft_exp = mdc_exp; @@ -423,8 +445,6 @@ int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) fld_client_add_target(&lmv->lmv_fld, &target); - mdc_data = &class_exp2cliimp(mdc_exp)->imp_connect_data; - rc = obd_register_observer(mdc_obd, obd); if (rc) { obd_disconnect(mdc_exp); @@ -437,116 +457,165 @@ int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) /* * Tell the observer about the new target. */ - rc = obd_notify(obd->obd_observer, mdc_exp->exp_obd, - OBD_NOTIFY_ACTIVE, (void *)(tgt - lmv->tgts)); - if (rc) { - obd_disconnect(mdc_exp); - RETURN(rc); - } + rc = obd_notify(obd->obd_observer, mdc_exp->exp_obd, + OBD_NOTIFY_ACTIVE, + (void *)(tgt - lmv->tgts[0])); + if (rc) { + obd_disconnect(mdc_exp); + RETURN(rc); + } } - tgt->ltd_active = 1; - tgt->ltd_exp = mdc_exp; - lmv->desc.ld_active_tgt_count++; - - /* - * Copy connect data, it may be used later. - */ - lmv->datas[tgt->ltd_idx] = *mdc_data; + tgt->ltd_active = 1; + tgt->ltd_exp = mdc_exp; + lmv->desc.ld_active_tgt_count++; + + md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize, + lmv->max_cookiesize, lmv->max_def_cookiesize); + + CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n", + mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, + atomic_read(&obd->obd_refcount)); + + if (lmv->targets_proc_entry != NULL) { + struct proc_dir_entry *mdc_symlink; + + LASSERT(mdc_obd->obd_type != NULL); + LASSERT(mdc_obd->obd_type->typ_name != NULL); + mdc_symlink = lprocfs_add_symlink(mdc_obd->obd_name, + lmv->targets_proc_entry, + "../../../%s/%s", + mdc_obd->obd_type->typ_name, + mdc_obd->obd_name); + if (mdc_symlink == NULL) { + CERROR("cannot register LMV target " + "/proc/fs/lustre/%s/%s/target_obds/%s\n", + obd->obd_type->typ_name, obd->obd_name, + mdc_obd->obd_name); + } + } + RETURN(0); +} - md_init_ea_size(tgt->ltd_exp, lmv->max_easize, - lmv->max_def_easize, lmv->max_cookiesize); +static void lmv_del_target(struct lmv_obd *lmv, int index) +{ + if (lmv->tgts[index] == NULL) + return; - CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n", - mdc_obd->obd_name, mdc_obd->obd_uuid.uuid, - cfs_atomic_read(&obd->obd_refcount)); - -#ifdef __KERNEL__ - lmv_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds"); - if (lmv_proc_dir) { - struct proc_dir_entry *mdc_symlink; - - LASSERT(mdc_obd->obd_type != NULL); - LASSERT(mdc_obd->obd_type->typ_name != NULL); - mdc_symlink = lprocfs_add_symlink(mdc_obd->obd_name, - lmv_proc_dir, - "../../../%s/%s", - mdc_obd->obd_type->typ_name, - mdc_obd->obd_name); - if (mdc_symlink == NULL) { - CERROR("Could not register LMV target " - "/proc/fs/lustre/%s/%s/target_obds/%s.", - obd->obd_type->typ_name, obd->obd_name, - mdc_obd->obd_name); - lprocfs_remove(&lmv_proc_dir); - lmv_proc_dir = NULL; - } - } -#endif - RETURN(0); + OBD_FREE_PTR(lmv->tgts[index]); + lmv->tgts[index] = NULL; + return; } -int lmv_add_target(struct obd_device *obd, struct obd_uuid *tgt_uuid) +static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp, + __u32 index, int gen) { struct lmv_obd *lmv = &obd->u.lmv; struct lmv_tgt_desc *tgt; + int orig_tgt_count = 0; int rc = 0; ENTRY; - CDEBUG(D_CONFIG, "Target uuid: %s.\n", tgt_uuid->uuid); + CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index); lmv_init_lock(lmv); - if (lmv->desc.ld_active_tgt_count >= LMV_MAX_TGT_COUNT) { - lmv_init_unlock(lmv); - CERROR("Can't add %s, LMV module compiled for %d MDCs. " - "That many MDCs already configured.\n", - tgt_uuid->uuid, LMV_MAX_TGT_COUNT); - RETURN(-EINVAL); - } - if (lmv->desc.ld_tgt_count == 0) { - struct obd_device *mdc_obd; + if (lmv->desc.ld_tgt_count == 0) { + struct obd_device *mdc_obd; - mdc_obd = class_find_client_obd(tgt_uuid, LUSTRE_MDC_NAME, - &obd->obd_uuid); - if (!mdc_obd) { - lmv_init_unlock(lmv); - CERROR("Target %s not attached\n", tgt_uuid->uuid); - RETURN(-EINVAL); - } - } - spin_lock(&lmv->lmv_lock); - tgt = lmv->tgts + lmv->desc.ld_tgt_count++; - tgt->ltd_uuid = *tgt_uuid; - spin_unlock(&lmv->lmv_lock); + mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME, + &obd->obd_uuid); + if (!mdc_obd) { + lmv_init_unlock(lmv); + CERROR("%s: Target %s not attached: rc = %d\n", + obd->obd_name, uuidp->uuid, -EINVAL); + RETURN(-EINVAL); + } + } + + if ((index < lmv->tgts_size) && (lmv->tgts[index] != NULL)) { + tgt = lmv->tgts[index]; + CERROR("%s: UUID %s already assigned at LOV target index %d:" + " rc = %d\n", obd->obd_name, + obd_uuid2str(&tgt->ltd_uuid), index, -EEXIST); + lmv_init_unlock(lmv); + RETURN(-EEXIST); + } + + if (index >= lmv->tgts_size) { + /* We need to reallocate the lmv target array. */ + struct lmv_tgt_desc **newtgts, **old = NULL; + __u32 newsize = 1; + __u32 oldsize = 0; + + while (newsize < index + 1) + newsize = newsize << 1; + OBD_ALLOC(newtgts, sizeof(*newtgts) * newsize); + if (newtgts == NULL) { + lmv_init_unlock(lmv); + RETURN(-ENOMEM); + } + + if (lmv->tgts_size) { + memcpy(newtgts, lmv->tgts, + sizeof(*newtgts) * lmv->tgts_size); + old = lmv->tgts; + oldsize = lmv->tgts_size; + } + + lmv->tgts = newtgts; + lmv->tgts_size = newsize; + smp_rmb(); + if (old) + OBD_FREE(old, sizeof(*old) * oldsize); + + CDEBUG(D_CONFIG, "tgts: %p size: %d\n", lmv->tgts, + lmv->tgts_size); + } + + OBD_ALLOC_PTR(tgt); + if (!tgt) { + lmv_init_unlock(lmv); + RETURN(-ENOMEM); + } + + mutex_init(&tgt->ltd_fid_mutex); + tgt->ltd_idx = index; + tgt->ltd_uuid = *uuidp; + tgt->ltd_active = 0; + lmv->tgts[index] = tgt; + if (index >= lmv->desc.ld_tgt_count) { + orig_tgt_count = lmv->desc.ld_tgt_count; + lmv->desc.ld_tgt_count = index + 1; + } if (lmv->connected) { rc = lmv_connect_mdc(obd, tgt); - if (rc) { + if (rc != 0) { spin_lock(&lmv->lmv_lock); - lmv->desc.ld_tgt_count--; + if (lmv->desc.ld_tgt_count == index + 1) + lmv->desc.ld_tgt_count = orig_tgt_count; memset(tgt, 0, sizeof(*tgt)); spin_unlock(&lmv->lmv_lock); - } else { - int easize = sizeof(struct lmv_stripe_md) + - lmv->desc.ld_tgt_count * - sizeof(struct lu_fid); - lmv_init_ea_size(obd->obd_self_export, easize, 0, 0); - } - } + } else { + int easize = sizeof(struct lmv_stripe_md) + + lmv->desc.ld_tgt_count * sizeof(struct lu_fid); + lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0); + } + } - lmv_init_unlock(lmv); - RETURN(rc); + lmv_init_unlock(lmv); + RETURN(rc); } int lmv_check_connect(struct obd_device *obd) { - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - int i; - int rc; - int easize; - ENTRY; + struct lmv_obd *lmv = &obd->u.lmv; + struct lmv_tgt_desc *tgt; + __u32 i; + int rc; + int easize; + ENTRY; if (lmv->connected) RETURN(0); @@ -563,29 +632,41 @@ int lmv_check_connect(struct obd_device *obd) RETURN(-EINVAL); } - CDEBUG(D_CONFIG, "Time to connect %s to %s\n", - lmv->cluuid.uuid, obd->obd_name); + LASSERT(lmv->tgts != NULL); - LASSERT(lmv->tgts != NULL); + if (lmv->tgts[0] == NULL) { + lmv_init_unlock(lmv); + CERROR("%s: no target configured for index 0.\n", + obd->obd_name); + RETURN(-EINVAL); + } - for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) { - rc = lmv_connect_mdc(obd, tgt); - if (rc) - GOTO(out_disc, rc); - } + CDEBUG(D_CONFIG, "Time to connect %s to %s\n", + lmv->cluuid.uuid, obd->obd_name); - lmv_set_timeouts(obd); - class_export_put(lmv->exp); - lmv->connected = 1; - easize = lmv_get_easize(lmv); - lmv_init_ea_size(obd->obd_self_export, easize, 0, 0); - lmv_init_unlock(lmv); - RETURN(0); + for (i = 0; i < lmv->desc.ld_tgt_count; i++) { + tgt = lmv->tgts[i]; + if (tgt == NULL) + continue; + rc = lmv_connect_mdc(obd, tgt); + if (rc) + GOTO(out_disc, rc); + } + + lmv_set_timeouts(obd); + class_export_put(lmv->exp); + lmv->connected = 1; + easize = lmv_mds_md_size(lmv->desc.ld_tgt_count, LMV_MAGIC); + lmv_init_ea_size(obd->obd_self_export, easize, 0, 0, 0); + lmv_init_unlock(lmv); + RETURN(0); out_disc: while (i-- > 0) { int rc2; - --tgt; + tgt = lmv->tgts[i]; + if (tgt == NULL) + continue; tgt->ltd_active = 0; if (tgt->ltd_exp) { --lmv->desc.ld_active_tgt_count; @@ -604,9 +685,6 @@ int lmv_check_connect(struct obd_device *obd) static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) { -#ifdef __KERNEL__ - struct proc_dir_entry *lmv_proc_dir; -#endif struct lmv_obd *lmv = &obd->u.lmv; struct obd_device *mdc_obd; int rc; @@ -623,24 +701,13 @@ static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) mdc_obd->obd_no_recov = obd->obd_no_recov; } -#ifdef __KERNEL__ - lmv_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds"); - if (lmv_proc_dir) { - struct proc_dir_entry *mdc_symlink; + if (lmv->targets_proc_entry != NULL) + lprocfs_remove_proc_entry(mdc_obd->obd_name, + lmv->targets_proc_entry); - mdc_symlink = lprocfs_srch(lmv_proc_dir, mdc_obd->obd_name); - if (mdc_symlink) { - lprocfs_remove(&mdc_symlink); - } else { - CERROR("/proc/fs/lustre/%s/%s/target_obds/%s missing\n", - obd->obd_type->typ_name, obd->obd_name, - mdc_obd->obd_name); - } - } -#endif - rc = obd_fid_fini(tgt->ltd_exp); - if (rc) - CERROR("Can't finanize fids factory\n"); + rc = obd_fid_fini(tgt->ltd_exp->exp_obd); + if (rc) + CERROR("Can't finanize fids factory\n"); CDEBUG(D_INFO, "Disconnected from %s(%s) successfully\n", tgt->ltd_exp->exp_obd->obd_name, @@ -662,14 +729,11 @@ static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt) static int lmv_disconnect(struct obd_export *exp) { - struct obd_device *obd = class_exp2obd(exp); -#ifdef __KERNEL__ - struct proc_dir_entry *lmv_proc_dir; -#endif - struct lmv_obd *lmv = &obd->u.lmv; - int rc; - int i; - ENTRY; + struct obd_device *obd = class_exp2obd(exp); + struct lmv_obd *lmv = &obd->u.lmv; + int rc; + __u32 i; + ENTRY; if (!lmv->tgts) goto out_local; @@ -682,20 +746,17 @@ static int lmv_disconnect(struct obd_export *exp) goto out_local; for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i].ltd_exp == NULL) + if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL) continue; - lmv_disconnect_mdc(obd, &lmv->tgts[i]); - } -#ifdef __KERNEL__ - lmv_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds"); - if (lmv_proc_dir) { - lprocfs_remove(&lmv_proc_dir); - } else { - CERROR("/proc/fs/lustre/%s/%s/target_obds missing\n", - obd->obd_type->typ_name, obd->obd_name); + lmv_disconnect_mdc(obd, lmv->tgts[i]); } -#endif + + if (lmv->targets_proc_entry != NULL) + lprocfs_remove(&lmv->targets_proc_entry); + else + CERROR("/proc/fs/lustre/%s/%s/target_obds missing\n", + obd->obd_type->typ_name, obd->obd_name); out_local: /* @@ -710,16 +771,248 @@ out_local: RETURN(rc); } +static int lmv_fid2path(struct obd_export *exp, int len, void *karg, void *uarg) +{ + struct obd_device *obddev = class_exp2obd(exp); + struct lmv_obd *lmv = &obddev->u.lmv; + struct getinfo_fid2path *gf; + struct lmv_tgt_desc *tgt; + struct getinfo_fid2path *remote_gf = NULL; + int remote_gf_size = 0; + int rc; + + gf = (struct getinfo_fid2path *)karg; + tgt = lmv_find_target(lmv, &gf->gf_fid); + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); + +repeat_fid2path: + rc = obd_iocontrol(OBD_IOC_FID2PATH, tgt->ltd_exp, len, gf, uarg); + if (rc != 0 && rc != -EREMOTE) + GOTO(out_fid2path, rc); + + /* If remote_gf != NULL, it means just building the + * path on the remote MDT, copy this path segement to gf */ + if (remote_gf != NULL) { + struct getinfo_fid2path *ori_gf; + char *ptr; + + ori_gf = (struct getinfo_fid2path *)karg; + if (strlen(ori_gf->gf_path) + + strlen(gf->gf_path) > ori_gf->gf_pathlen) + GOTO(out_fid2path, rc = -EOVERFLOW); + + ptr = ori_gf->gf_path; + + memmove(ptr + strlen(gf->gf_path) + 1, ptr, + strlen(ori_gf->gf_path)); + + strncpy(ptr, gf->gf_path, strlen(gf->gf_path)); + ptr += strlen(gf->gf_path); + *ptr = '/'; + } + + CDEBUG(D_INFO, "%s: get path %s "DFID" rec: "LPU64" ln: %u\n", + tgt->ltd_exp->exp_obd->obd_name, + gf->gf_path, PFID(&gf->gf_fid), gf->gf_recno, + gf->gf_linkno); + + if (rc == 0) + GOTO(out_fid2path, rc); + + /* sigh, has to go to another MDT to do path building further */ + if (remote_gf == NULL) { + remote_gf_size = sizeof(*remote_gf) + PATH_MAX; + OBD_ALLOC(remote_gf, remote_gf_size); + if (remote_gf == NULL) + GOTO(out_fid2path, rc = -ENOMEM); + remote_gf->gf_pathlen = PATH_MAX; + } + + if (!fid_is_sane(&gf->gf_fid)) { + CERROR("%s: invalid FID "DFID": rc = %d\n", + tgt->ltd_exp->exp_obd->obd_name, + PFID(&gf->gf_fid), -EINVAL); + GOTO(out_fid2path, rc = -EINVAL); + } + + tgt = lmv_find_target(lmv, &gf->gf_fid); + if (IS_ERR(tgt)) + GOTO(out_fid2path, rc = -EINVAL); + + remote_gf->gf_fid = gf->gf_fid; + remote_gf->gf_recno = -1; + remote_gf->gf_linkno = -1; + memset(remote_gf->gf_path, 0, remote_gf->gf_pathlen); + gf = remote_gf; + goto repeat_fid2path; + +out_fid2path: + if (remote_gf != NULL) + OBD_FREE(remote_gf, remote_gf_size); + RETURN(rc); +} + +static int lmv_hsm_req_count(struct lmv_obd *lmv, + const struct hsm_user_request *hur, + const struct lmv_tgt_desc *tgt_mds) +{ + __u32 i; + int nr = 0; + struct lmv_tgt_desc *curr_tgt; + + /* count how many requests must be sent to the given target */ + for (i = 0; i < hur->hur_request.hr_itemcount; i++) { + curr_tgt = lmv_find_target(lmv, &hur->hur_user_item[i].hui_fid); + if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) + nr++; + } + return nr; +} + +static void lmv_hsm_req_build(struct lmv_obd *lmv, + struct hsm_user_request *hur_in, + const struct lmv_tgt_desc *tgt_mds, + struct hsm_user_request *hur_out) +{ + __u32 i, nr_out; + struct lmv_tgt_desc *curr_tgt; + + /* build the hsm_user_request for the given target */ + hur_out->hur_request = hur_in->hur_request; + nr_out = 0; + for (i = 0; i < hur_in->hur_request.hr_itemcount; i++) { + curr_tgt = lmv_find_target(lmv, + &hur_in->hur_user_item[i].hui_fid); + if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) { + hur_out->hur_user_item[nr_out] = + hur_in->hur_user_item[i]; + nr_out++; + } + } + hur_out->hur_request.hr_itemcount = nr_out; + memcpy(hur_data(hur_out), hur_data(hur_in), + hur_in->hur_request.hr_data_len); +} + +static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len, + struct lustre_kernelcomm *lk, void *uarg) +{ + __u32 i; + int rc; + struct kkuc_ct_data *kcd = NULL; + ENTRY; + + /* unregister request (call from llapi_hsm_copytool_fini) */ + for (i = 0; i < lmv->desc.ld_tgt_count; i++) { + struct lmv_tgt_desc *tgt = lmv->tgts[i]; + + if (tgt == NULL || tgt->ltd_exp == NULL) + continue; + /* best effort: try to clean as much as possible + * (continue on error) */ + obd_iocontrol(cmd, tgt->ltd_exp, len, lk, uarg); + } + + /* Whatever the result, remove copytool from kuc groups. + * Unreached coordinators will get EPIPE on next requests + * and will unregister automatically. + */ + rc = libcfs_kkuc_group_rem(lk->lk_uid, lk->lk_group, (void **)&kcd); + if (kcd != NULL) + OBD_FREE_PTR(kcd); + + RETURN(rc); +} + +static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len, + struct lustre_kernelcomm *lk, void *uarg) +{ + struct file *filp; + __u32 i, j; + int err, rc; + bool any_set = false; + struct kkuc_ct_data *kcd; + ENTRY; + + /* All or nothing: try to register to all MDS. + * In case of failure, unregister from previous MDS, + * except if it because of inactive target. */ + for (i = 0; i < lmv->desc.ld_tgt_count; i++) { + struct lmv_tgt_desc *tgt = lmv->tgts[i]; + + if (tgt == NULL || tgt->ltd_exp == NULL) + continue; + err = obd_iocontrol(cmd, tgt->ltd_exp, len, lk, uarg); + if (err) { + if (tgt->ltd_active) { + /* permanent error */ + CERROR("%s: iocontrol MDC %s on MDT" + " idx %d cmd %x: err = %d\n", + class_exp2obd(lmv->exp)->obd_name, + tgt->ltd_uuid.uuid, i, cmd, err); + rc = err; + lk->lk_flags |= LK_FLG_STOP; + /* unregister from previous MDS */ + for (j = 0; j < i; j++) { + tgt = lmv->tgts[j]; + if (tgt == NULL || tgt->ltd_exp == NULL) + continue; + obd_iocontrol(cmd, tgt->ltd_exp, len, + lk, uarg); + } + RETURN(rc); + } + /* else: transient error. + * kuc will register to the missing MDT + * when it is back */ + } else { + any_set = true; + } + } + + if (!any_set) + /* no registration done: return error */ + RETURN(-ENOTCONN); + + /* at least one registration done, with no failure */ + filp = fget(lk->lk_wfd); + if (filp == NULL) + RETURN(-EBADF); + + OBD_ALLOC_PTR(kcd); + if (kcd == NULL) { + fput(filp); + RETURN(-ENOMEM); + } + kcd->kcd_magic = KKUC_CT_DATA_MAGIC; + kcd->kcd_uuid = lmv->cluuid; + kcd->kcd_archive = lk->lk_data; + + rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group, kcd); + if (rc != 0) { + if (filp != NULL) + fput(filp); + OBD_FREE_PTR(kcd); + } + + RETURN(rc); +} + + + + static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, int len, void *karg, void *uarg) { - struct obd_device *obddev = class_exp2obd(exp); - struct lmv_obd *lmv = &obddev->u.lmv; - int i = 0; - int rc = 0; - int set = 0; - int count = lmv->desc.ld_tgt_count; - ENTRY; + struct obd_device *obddev = class_exp2obd(exp); + struct lmv_obd *lmv = &obddev->u.lmv; + struct lmv_tgt_desc *tgt = NULL; + __u32 i = 0; + int rc = 0; + int set = 0; + __u32 count = lmv->desc.ld_tgt_count; + ENTRY; if (count == 0) RETURN(-ENOTTY); @@ -735,48 +1028,50 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, if ((index >= count)) RETURN(-ENODEV); - if (!lmv->tgts[index].ltd_active) - RETURN(-ENODATA); - - mdc_obd = class_exp2obd(lmv->tgts[index].ltd_exp); - if (!mdc_obd) - RETURN(-EINVAL); - - /* copy UUID */ - if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd), - min((int) data->ioc_plen2, - (int) sizeof(struct obd_uuid)))) - RETURN(-EFAULT); - - rc = obd_statfs(NULL, lmv->tgts[index].ltd_exp, &stat_buf, - cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), - 0); - if (rc) - RETURN(rc); - if (cfs_copy_to_user(data->ioc_pbuf1, &stat_buf, - min((int) data->ioc_plen1, - (int) sizeof(stat_buf)))) - RETURN(-EFAULT); - break; + tgt = lmv->tgts[index]; + if (tgt == NULL || !tgt->ltd_active) + RETURN(-ENODATA); + + mdc_obd = class_exp2obd(tgt->ltd_exp); + if (!mdc_obd) + RETURN(-EINVAL); + + /* copy UUID */ + if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd), + min((int) data->ioc_plen2, + (int) sizeof(struct obd_uuid)))) + RETURN(-EFAULT); + + rc = obd_statfs(NULL, tgt->ltd_exp, &stat_buf, + cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS), + 0); + if (rc) + RETURN(rc); + if (copy_to_user(data->ioc_pbuf1, &stat_buf, + min((int) data->ioc_plen1, + (int) sizeof(stat_buf)))) + RETURN(-EFAULT); + break; } case OBD_IOC_QUOTACTL: { struct if_quotactl *qctl = karg; - struct lmv_tgt_desc *tgt = NULL; struct obd_quotactl *oqctl; - if (qctl->qc_valid == QC_MDTIDX) { - if (qctl->qc_idx < 0 || count <= qctl->qc_idx) - RETURN(-EINVAL); - - tgt = &lmv->tgts[qctl->qc_idx]; - if (!tgt->ltd_exp) - RETURN(-EINVAL); - } else if (qctl->qc_valid == QC_UUID) { - for (i = 0; i < count; i++) { - tgt = &lmv->tgts[i]; - if (!obd_uuid_equals(&tgt->ltd_uuid, - &qctl->obd_uuid)) - continue; + if (qctl->qc_valid == QC_MDTIDX) { + if (count <= qctl->qc_idx) + RETURN(-EINVAL); + + tgt = lmv->tgts[qctl->qc_idx]; + if (tgt == NULL || tgt->ltd_exp == NULL) + RETURN(-EINVAL); + } else if (qctl->qc_valid == QC_UUID) { + for (i = 0; i < count; i++) { + tgt = lmv->tgts[i]; + if (tgt == NULL) + continue; + if (!obd_uuid_equals(&tgt->ltd_uuid, + &qctl->obd_uuid)) + continue; if (tgt->ltd_exp == NULL) RETURN(-EINVAL); @@ -790,7 +1085,7 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, if (i >= count) RETURN(-EAGAIN); - LASSERT(tgt && tgt->ltd_exp); + LASSERT(tgt != NULL && tgt->ltd_exp != NULL); OBD_ALLOC_PTR(oqctl); if (!oqctl) RETURN(-ENOMEM); @@ -812,49 +1107,173 @@ static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp, if (icc->icc_mdtindex >= count) RETURN(-ENODEV); - rc = obd_iocontrol(cmd, lmv->tgts[icc->icc_mdtindex].ltd_exp, - sizeof(*icc), icc, NULL); - break; - } - case LL_IOC_GET_CONNECT_FLAGS: { - rc = obd_iocontrol(cmd, lmv->tgts[0].ltd_exp, len, karg, uarg); - break; - } + tgt = lmv->tgts[icc->icc_mdtindex]; + if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active) + RETURN(-ENODEV); + rc = obd_iocontrol(cmd, tgt->ltd_exp, sizeof(*icc), icc, NULL); + break; + } + case LL_IOC_GET_CONNECT_FLAGS: { + tgt = lmv->tgts[0]; + if (tgt == NULL || tgt->ltd_exp == NULL) + RETURN(-ENODATA); + rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg); + break; + } + case LL_IOC_FID2MDTIDX: { + struct lu_fid *fid = karg; + int mdt_index; + + rc = lmv_fld_lookup(lmv, fid, &mdt_index); + if (rc != 0) + RETURN(rc); + + /* Note: this is from llite(see ll_dir_ioctl()), @uarg does not + * point to user space memory for FID2MDTIDX. */ + *(__u32 *)uarg = mdt_index; + break; + } + case OBD_IOC_FID2PATH: { + rc = lmv_fid2path(exp, len, karg, uarg); + break; + } + case LL_IOC_HSM_STATE_GET: + case LL_IOC_HSM_STATE_SET: + case LL_IOC_HSM_ACTION: { + struct md_op_data *op_data = karg; - default : { - for (i = 0; i < count; i++) { - int err; - struct obd_device *mdc_obd; - - if (lmv->tgts[i].ltd_exp == NULL) - continue; - /* ll_umount_begin() sets force flag but for lmv, not - * mdc. Let's pass it through */ - mdc_obd = class_exp2obd(lmv->tgts[i].ltd_exp); - mdc_obd->obd_force = obddev->obd_force; - err = obd_iocontrol(cmd, lmv->tgts[i].ltd_exp, len, - karg, uarg); - if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) { - RETURN(err); - } else if (err) { - if (lmv->tgts[i].ltd_active) { - CERROR("error: iocontrol MDC %s on MDT" - "idx %d cmd %x: err = %d\n", - lmv->tgts[i].ltd_uuid.uuid, - i, cmd, err); - if (!rc) - rc = err; - } - } else - set = 1; - } + tgt = lmv_find_target(lmv, &op_data->op_fid1); + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); + + if (tgt->ltd_exp == NULL) + RETURN(-EINVAL); + + rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg); + break; + } + case LL_IOC_HSM_PROGRESS: { + const struct hsm_progress_kernel *hpk = karg; + + tgt = lmv_find_target(lmv, &hpk->hpk_fid); + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); + rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg); + break; + } + case LL_IOC_HSM_REQUEST: { + struct hsm_user_request *hur = karg; + unsigned int reqcount = hur->hur_request.hr_itemcount; + + if (reqcount == 0) + RETURN(0); + + /* if the request is about a single fid + * or if there is a single MDS, no need to split + * the request. */ + if (reqcount == 1 || count == 1) { + tgt = lmv_find_target(lmv, + &hur->hur_user_item[0].hui_fid); + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); + rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg); + } else { + /* split fid list to their respective MDS */ + for (i = 0; i < count; i++) { + unsigned int nr, reqlen; + int rc1; + struct hsm_user_request *req; + + tgt = lmv->tgts[i]; + if (tgt == NULL || tgt->ltd_exp == NULL) + continue; + + nr = lmv_hsm_req_count(lmv, hur, tgt); + if (nr == 0) /* nothing for this MDS */ + continue; + + /* build a request with fids for this MDS */ + reqlen = offsetof(typeof(*hur), + hur_user_item[nr]) + + hur->hur_request.hr_data_len; + OBD_ALLOC_LARGE(req, reqlen); + if (req == NULL) + RETURN(-ENOMEM); + + lmv_hsm_req_build(lmv, hur, tgt, req); + + rc1 = obd_iocontrol(cmd, tgt->ltd_exp, reqlen, + req, uarg); + if (rc1 != 0 && rc == 0) + rc = rc1; + OBD_FREE_LARGE(req, reqlen); + } + } + break; + } + case LL_IOC_LOV_SWAP_LAYOUTS: { + struct md_op_data *op_data = karg; + struct lmv_tgt_desc *tgt1, *tgt2; + + tgt1 = lmv_find_target(lmv, &op_data->op_fid1); + if (IS_ERR(tgt1)) + RETURN(PTR_ERR(tgt1)); + + tgt2 = lmv_find_target(lmv, &op_data->op_fid2); + if (IS_ERR(tgt2)) + RETURN(PTR_ERR(tgt2)); + + if ((tgt1->ltd_exp == NULL) || (tgt2->ltd_exp == NULL)) + RETURN(-EINVAL); + + /* only files on same MDT can have their layouts swapped */ + if (tgt1->ltd_idx != tgt2->ltd_idx) + RETURN(-EPERM); + + rc = obd_iocontrol(cmd, tgt1->ltd_exp, len, karg, uarg); + break; + } + case LL_IOC_HSM_CT_START: { + struct lustre_kernelcomm *lk = karg; + if (lk->lk_flags & LK_FLG_STOP) + rc = lmv_hsm_ct_unregister(lmv, cmd, len, lk, uarg); + else + rc = lmv_hsm_ct_register(lmv, cmd, len, lk, uarg); + break; + } + default: + for (i = 0; i < count; i++) { + struct obd_device *mdc_obd; + int err; + + tgt = lmv->tgts[i]; + if (tgt == NULL || tgt->ltd_exp == NULL) + continue; + /* ll_umount_begin() sets force flag but for lmv, not + * mdc. Let's pass it through */ + mdc_obd = class_exp2obd(tgt->ltd_exp); + mdc_obd->obd_force = obddev->obd_force; + err = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg); + if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) { + RETURN(err); + } else if (err) { + if (tgt->ltd_active) { + CERROR("error: iocontrol MDC %s on MDT" + " idx %d cmd %x: err = %d\n", + tgt->ltd_uuid.uuid, i, cmd, err); + if (!rc) + rc = err; + } + } else + set = 1; + } if (!set && !rc) rc = -EIO; } - } RETURN(rc); } +#if 0 static int lmv_all_chars_policy(int count, const char *name, int len) { @@ -897,6 +1316,7 @@ static int lmv_choose_mds(struct lmv_obd *lmv, struct md_op_data *op_data, CERROR("Unsupported placement policy %x\n", placement); return -EINVAL; } +#endif /** * This is _inode_ placement policy function (not name). @@ -905,95 +1325,70 @@ static int lmv_placement_policy(struct obd_device *obd, struct md_op_data *op_data, mdsno_t *mds) { - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_object *obj; - int rc; - ENTRY; - - LASSERT(mds != NULL); - - if (lmv->desc.ld_tgt_count == 1) { - *mds = 0; - RETURN(0); - } + struct lmv_obd *lmv = &obd->u.lmv; + ENTRY; - /* - * Allocate new fid on target according to operation type and parent - * home mds. - */ - obj = lmv_object_find(obd, &op_data->op_fid1); - if (obj != NULL || op_data->op_name == NULL || - op_data->op_opc != LUSTRE_OPC_MKDIR) { - /* - * Allocate fid for non-dir or for null name or for case parent - * dir is split. - */ - if (obj) { - lmv_object_put(obj); - - /* - * If we have this flag turned on, and we see that - * parent dir is split, this means, that caller did not - * notice split yet. This is race and we would like to - * let caller know that. - */ - if (op_data->op_bias & MDS_CHECK_SPLIT) - RETURN(-ERESTART); - } + LASSERT(mds != NULL); - /* - * Allocate new fid on same mds where parent fid is located and - * where operation will be sent. In case of split dir, ->op_fid1 - * and ->op_mds here will contain fid and mds of slave directory - * object (assigned by caller). - */ - *mds = op_data->op_mds; - rc = 0; - } else { - /* - * Parent directory is not split and we want to create a - * directory in it. Let's calculate where to place it according - * to operation data @op_data. - */ - *mds = lmv_choose_mds(lmv, op_data, lmv->lmv_placement); - rc = 0; - } + if (lmv->desc.ld_tgt_count == 1) { + *mds = 0; + RETURN(0); + } - if (rc) { - CERROR("Can't choose MDS, err = %d\n", rc); - } else { - LASSERT(*mds < lmv->desc.ld_tgt_count); - } + /** + * If stripe_offset is provided during setdirstripe + * (setdirstripe -i xx), xx MDS will be choosen. + */ + if (op_data->op_cli_flags & CLI_SET_MEA && op_data->op_data != NULL) { + struct lmv_user_md *lum; + + lum = op_data->op_data; + + if (le32_to_cpu(lum->lum_stripe_offset) != (__u32)-1) { + *mds = le32_to_cpu(lum->lum_stripe_offset); + } else { + /* -1 means default, which will be in the same MDT with + * the stripe */ + *mds = op_data->op_mds; + lum->lum_stripe_offset = cpu_to_le32(op_data->op_mds); + } + } else { + /* Allocate new fid on target according to operation type and + * parent home mds. */ + *mds = op_data->op_mds; + } - RETURN(rc); + RETURN(0); } int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, - mdsno_t mds) + mdsno_t mds) { - struct lmv_tgt_desc *tgt; - int rc; - ENTRY; + struct lmv_tgt_desc *tgt; + int rc; + ENTRY; - tgt = lmv_get_target(lmv, mds); + tgt = lmv_get_target(lmv, mds, NULL); + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); - /* - * New seq alloc and FLD setup should be atomic. Otherwise we may find - * on server that seq in new allocated fid is not yet known. - */ + /* + * New seq alloc and FLD setup should be atomic. Otherwise we may find + * on server that seq in new allocated fid is not yet known. + */ mutex_lock(&tgt->ltd_fid_mutex); - if (!tgt->ltd_active) - GOTO(out, rc = -ENODEV); + if (tgt->ltd_active == 0 || tgt->ltd_exp == NULL) + GOTO(out, rc = -ENODEV); - /* - * Asking underlaying tgt layer to allocate new fid. - */ - rc = obd_fid_alloc(tgt->ltd_exp, fid, NULL); - if (rc > 0) { - LASSERT(fid_is_sane(fid)); - rc = 0; - } + /* + * Asking underlying tgt layer to allocate new fid. + */ + rc = obd_fid_alloc(NULL, tgt->ltd_exp, fid, NULL); + if (rc > 0) { + LASSERT(fid_is_sane(fid)); + rc = 0; + } EXIT; out: @@ -1001,8 +1396,8 @@ out: return rc; } -int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid, - struct md_op_data *op_data) +int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp, + struct lu_fid *fid, struct md_op_data *op_data) { struct obd_device *obd = class_exp2obd(exp); struct lmv_obd *lmv = &obd->u.lmv; @@ -1031,12 +1426,10 @@ int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid, static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg) { - struct lmv_obd *lmv = &obd->u.lmv; - struct lprocfs_static_vars lvars; - struct lmv_desc *desc; - int rc; - int i = 0; - ENTRY; + struct lmv_obd *lmv = &obd->u.lmv; + struct lmv_desc *desc; + int rc; + ENTRY; if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) { CERROR("LMV setup requires a descriptor\n"); @@ -1050,115 +1443,105 @@ static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg) RETURN(-EINVAL); } - lmv->tgts_size = LMV_MAX_TGT_COUNT * sizeof(struct lmv_tgt_desc); + OBD_ALLOC(lmv->tgts, sizeof(*lmv->tgts) * 32); + if (lmv->tgts == NULL) + RETURN(-ENOMEM); + lmv->tgts_size = 32; - OBD_ALLOC(lmv->tgts, lmv->tgts_size); - if (lmv->tgts == NULL) - RETURN(-ENOMEM); - - for (i = 0; i < LMV_MAX_TGT_COUNT; i++) { - mutex_init(&lmv->tgts[i].ltd_fid_mutex); - lmv->tgts[i].ltd_idx = i; - } - - lmv->datas_size = LMV_MAX_TGT_COUNT * sizeof(struct obd_connect_data); - - OBD_ALLOC(lmv->datas, lmv->datas_size); - if (lmv->datas == NULL) - GOTO(out_free_tgts, rc = -ENOMEM); - - obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid); - lmv->desc.ld_tgt_count = 0; - lmv->desc.ld_active_tgt_count = 0; - lmv->max_cookiesize = 0; - lmv->max_def_easize = 0; - lmv->max_easize = 0; - lmv->lmv_placement = PLACEMENT_CHAR_POLICY; + obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid); + lmv->desc.ld_tgt_count = 0; + lmv->desc.ld_active_tgt_count = 0; + lmv->max_cookiesize = 0; + lmv->max_def_easize = 0; + lmv->max_easize = 0; + lmv->lmv_placement = PLACEMENT_CHAR_POLICY; spin_lock_init(&lmv->lmv_lock); mutex_init(&lmv->init_mutex); - rc = lmv_object_setup(obd); - if (rc) { - CERROR("Can't setup LMV object manager, error %d.\n", rc); - GOTO(out_free_datas, rc); - } - - lprocfs_lmv_init_vars(&lvars); - lprocfs_obd_setup(obd, lvars.obd_vars); #ifdef LPROCFS - { - rc = lprocfs_seq_create(obd->obd_proc_entry, "target_obd", - 0444, &lmv_proc_target_fops, obd); - if (rc) - CWARN("%s: error adding LMV target_obd file: rc = %d\n", - obd->obd_name, rc); - } + obd->obd_vars = lprocfs_lmv_obd_vars; + lprocfs_seq_obd_setup(obd); + lprocfs_alloc_md_stats(obd, 0); + rc = lprocfs_seq_create(obd->obd_proc_entry, "target_obd", + 0444, &lmv_proc_target_fops, obd); + if (rc) + CWARN("%s: error adding LMV target_obd file: rc = %d\n", + obd->obd_name, rc); #endif - rc = fld_client_init(&lmv->lmv_fld, obd->obd_name, - LUSTRE_CLI_FLD_HASH_DHT); - if (rc) { - CERROR("Can't init FLD, err %d\n", rc); - GOTO(out_free_datas, rc); - } + rc = fld_client_init(&lmv->lmv_fld, obd->obd_name, + LUSTRE_CLI_FLD_HASH_DHT); + if (rc) { + CERROR("Can't init FLD, err %d\n", rc); + GOTO(out, rc); + } RETURN(0); -out_free_datas: - OBD_FREE(lmv->datas, lmv->datas_size); - lmv->datas = NULL; -out_free_tgts: - OBD_FREE(lmv->tgts, lmv->tgts_size); - lmv->tgts = NULL; +out: return rc; } static int lmv_cleanup(struct obd_device *obd) { - struct lmv_obd *lmv = &obd->u.lmv; - ENTRY; - - fld_client_fini(&lmv->lmv_fld); - lmv_object_cleanup(obd); - OBD_FREE(lmv->datas, lmv->datas_size); - OBD_FREE(lmv->tgts, lmv->tgts_size); + struct lmv_obd *lmv = &obd->u.lmv; + ENTRY; - RETURN(0); + fld_client_fini(&lmv->lmv_fld); + if (lmv->tgts != NULL) { + int i; + for (i = 0; i < lmv->desc.ld_tgt_count; i++) { + if (lmv->tgts[i] == NULL) + continue; + lmv_del_target(lmv, i); + } + OBD_FREE(lmv->tgts, sizeof(*lmv->tgts) * lmv->tgts_size); + lmv->tgts_size = 0; + } + RETURN(0); } static int lmv_process_config(struct obd_device *obd, obd_count len, void *buf) { - struct lustre_cfg *lcfg = buf; - struct obd_uuid tgt_uuid; - int rc; - ENTRY; + struct lustre_cfg *lcfg = buf; + struct obd_uuid obd_uuid; + int gen; + __u32 index; + int rc; + ENTRY; - switch(lcfg->lcfg_command) { - case LCFG_ADD_MDC: - if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(tgt_uuid.uuid)) - GOTO(out, rc = -EINVAL); - - obd_str2uuid(&tgt_uuid, lustre_cfg_string(lcfg, 1)); - rc = lmv_add_target(obd, &tgt_uuid); - GOTO(out, rc); - default: { - CERROR("Unknown command: %d\n", lcfg->lcfg_command); - GOTO(out, rc = -EINVAL); - } - } + switch (lcfg->lcfg_command) { + case LCFG_ADD_MDC: + /* modify_mdc_tgts add 0:lustre-clilmv 1:lustre-MDT0000_UUID + * 2:0 3:1 4:lustre-MDT0000-mdc_UUID */ + if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(obd_uuid.uuid)) + GOTO(out, rc = -EINVAL); + + obd_str2uuid(&obd_uuid, lustre_cfg_buf(lcfg, 1)); + + if (sscanf(lustre_cfg_buf(lcfg, 2), "%u", &index) != 1) + GOTO(out, rc = -EINVAL); + if (sscanf(lustre_cfg_buf(lcfg, 3), "%d", &gen) != 1) + GOTO(out, rc = -EINVAL); + rc = lmv_add_target(obd, &obd_uuid, index, gen); + GOTO(out, rc); + default: + CERROR("Unknown command: %d\n", lcfg->lcfg_command); + GOTO(out, rc = -EINVAL); + } out: - RETURN(rc); + RETURN(rc); } static int lmv_statfs(const struct lu_env *env, struct obd_export *exp, struct obd_statfs *osfs, __u64 max_age, __u32 flags) { - struct obd_device *obd = class_exp2obd(exp); - struct lmv_obd *lmv = &obd->u.lmv; - struct obd_statfs *temp; - int rc = 0; - int i; - ENTRY; + struct obd_device *obd = class_exp2obd(exp); + struct lmv_obd *lmv = &obd->u.lmv; + struct obd_statfs *temp; + int rc = 0; + __u32 i; + ENTRY; rc = lmv_check_connect(obd); if (rc) @@ -1169,19 +1552,28 @@ static int lmv_statfs(const struct lu_env *env, struct obd_export *exp, RETURN(-ENOMEM); for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (lmv->tgts[i].ltd_exp == NULL) - continue; + if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL) + continue; - rc = obd_statfs(env, lmv->tgts[i].ltd_exp, temp, - max_age, flags); - if (rc) { - CERROR("can't stat MDS #%d (%s), error %d\n", i, - lmv->tgts[i].ltd_exp->exp_obd->obd_name, - rc); - GOTO(out_free_temp, rc); - } - if (i == 0) { - *osfs = *temp; + rc = obd_statfs(env, lmv->tgts[i]->ltd_exp, temp, + max_age, flags); + if (rc) { + CERROR("can't stat MDS #%d (%s), error %d\n", i, + lmv->tgts[i]->ltd_exp->exp_obd->obd_name, + rc); + GOTO(out_free_temp, rc); + } + + if (i == 0) { + *osfs = *temp; + /* If the statfs is from mount, it will needs + * retrieve necessary information from MDT0. + * i.e. mount does not need the merged osfs + * from all of MDT. + * And also clients can be mounted as long as + * MDT0 is in service*/ + if (flags & OBD_STATFS_FOR_MDT0) + GOTO(out_free_temp, rc); } else { osfs->os_bavail += temp->os_bavail; osfs->os_blocks += temp->os_blocks; @@ -1209,8 +1601,8 @@ static int lmv_getstatus(struct obd_export *exp, if (rc) RETURN(rc); - rc = md_getstatus(lmv->tgts[0].ltd_exp, fid, pc); - RETURN(rc); + rc = md_getstatus(lmv->tgts[0]->ltd_exp, fid, pc); + RETURN(rc); } static int lmv_getxattr(struct obd_export *exp, const struct lu_fid *fid, @@ -1271,9 +1663,7 @@ static int lmv_getattr(struct obd_export *exp, struct md_op_data *op_data, struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; struct lmv_tgt_desc *tgt; - struct lmv_object *obj; int rc; - int i; ENTRY; rc = lmv_check_connect(obd); @@ -1290,61 +1680,15 @@ static int lmv_getattr(struct obd_export *exp, struct md_op_data *op_data, } rc = md_getattr(tgt->ltd_exp, op_data, request); - if (rc) - RETURN(rc); - - obj = lmv_object_find_lock(obd, &op_data->op_fid1); - - CDEBUG(D_INODE, "GETATTR for "DFID" %s\n", PFID(&op_data->op_fid1), - obj ? "(split)" : ""); - - /* - * If object is split, then we loop over all the slaves and gather size - * attribute. In ideal world we would have to gather also mds field from - * all slaves, as object is spread over the cluster and this is - * definitely interesting information and it is not good to loss it, - * but... - */ - if (obj) { - struct mdt_body *body; - - if (*request == NULL) { - lmv_object_put(obj); - RETURN(rc); - } - - body = req_capsule_server_get(&(*request)->rq_pill, - &RMF_MDT_BODY); - LASSERT(body != NULL); - - for (i = 0; i < obj->lo_objcount; i++) { - if (lmv->tgts[i].ltd_exp == NULL) { - CWARN("%s: NULL export for %d\n", - obd->obd_name, i); - continue; - } - - /* - * Skip master object. - */ - if (lu_fid_eq(&obj->lo_fid, &obj->lo_stripes[i].ls_fid)) - continue; - - body->size += obj->lo_stripes[i].ls_size; - } - - lmv_object_put_unlock(obj); - } RETURN(rc); } -static int lmv_change_cbdata(struct obd_export *exp, const struct lu_fid *fid, - ldlm_iterator_t it, void *data) +static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid) { struct obd_device *obd = exp->exp_obd; struct lmv_obd *lmv = &obd->u.lmv; - int i; + __u32 i; int rc; ENTRY; @@ -1354,44 +1698,61 @@ static int lmv_change_cbdata(struct obd_export *exp, const struct lu_fid *fid, CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid)); - /* - * With CMD every object can have two locks in different namespaces: - * lookup lock in space of mds storing direntry and update/open lock in - * space of mds storing inode. - */ - for (i = 0; i < lmv->desc.ld_tgt_count; i++) - md_change_cbdata(lmv->tgts[i].ltd_exp, fid, it, data); + /* + * With DNE every object can have two locks in different namespaces: + * lookup lock in space of MDT storing direntry and update/open lock in + * space of MDT storing inode. + */ + for (i = 0; i < lmv->desc.ld_tgt_count; i++) { + if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL) + continue; + md_null_inode(lmv->tgts[i]->ltd_exp, fid); + } - RETURN(0); + RETURN(0); } static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid, ldlm_iterator_t it, void *data) { - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - int i; - int rc; - ENTRY; + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv = &obd->u.lmv; + int i; + int tgt; + int rc; + ENTRY; - rc = lmv_check_connect(obd); - if (rc) - RETURN(rc); + rc = lmv_check_connect(obd); + if (rc) + RETURN(rc); + + CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid)); + + /* + * With DNE every object can have two locks in different namespaces: + * lookup lock in space of MDT storing direntry and update/open lock in + * space of MDT storing inode. Try the MDT that the FID maps to first, + * since this can be easily found, and only try others if that fails. + */ + for (i = 0, tgt = lmv_find_target_index(lmv, fid); + i < lmv->desc.ld_tgt_count; + i++, tgt = (tgt + 1) % lmv->desc.ld_tgt_count) { + if (tgt < 0) { + CDEBUG(D_HA, "%s: "DFID" is inaccessible: rc = %d\n", + obd->obd_name, PFID(fid), tgt); + tgt = 0; + } - CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid)); + if (lmv->tgts[tgt] == NULL || + lmv->tgts[tgt]->ltd_exp == NULL) + continue; - /* - * With CMD every object can have two locks in different namespaces: - * lookup lock in space of mds storing direntry and update/open lock in - * space of mds storing inode. - */ - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - rc = md_find_cbdata(lmv->tgts[i].ltd_exp, fid, it, data); - if (rc) - RETURN(rc); - } + rc = md_find_cbdata(lmv->tgts[tgt]->ltd_exp, fid, it, data); + if (rc) + RETURN(rc); + } - RETURN(rc); + RETURN(rc); } @@ -1418,72 +1779,94 @@ static int lmv_close(struct obd_export *exp, struct md_op_data *op_data, } /** - * Called in the case MDS returns -ERESTART on create on open, what means that - * directory is split and its LMV presentation object has to be updated. - */ -int lmv_handle_split(struct obd_export *exp, const struct lu_fid *fid) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct ptlrpc_request *req = NULL; - struct lmv_tgt_desc *tgt; - struct lmv_object *obj; - struct lustre_md md; - struct md_op_data *op_data; - int mealen; - int rc; - __u64 valid; - ENTRY; - - md.mea = NULL; - mealen = lmv_get_easize(lmv); - - valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA | OBD_MD_MEA; - - tgt = lmv_find_target(lmv, fid); - if (IS_ERR(tgt)) - RETURN(PTR_ERR(tgt)); - - /* - * Time to update mea of parent fid. - */ - - OBD_ALLOC_PTR(op_data); - if (op_data == NULL) - RETURN(-ENOMEM); + * Choosing the MDT by name or FID in @op_data. + * For non-striped directory, it will locate MDT by fid. + * For striped-directory, it will locate MDT by name. And also + * it will reset op_fid1 with the FID of the choosen stripe. + **/ +struct lmv_tgt_desc * +lmv_locate_target_for_name(struct lmv_obd *lmv, struct lmv_stripe_md *lsm, + const char *name, int namelen, struct lu_fid *fid, + mdsno_t *mds) +{ + struct lmv_tgt_desc *tgt; + const struct lmv_oinfo *oinfo; + + oinfo = lsm_name_to_stripe_info(lsm, name, namelen); + if (IS_ERR(oinfo)) + RETURN(ERR_CAST(oinfo)); + *fid = oinfo->lmo_fid; + *mds = oinfo->lmo_mds; + tgt = lmv_get_target(lmv, *mds, NULL); + + CDEBUG(D_INFO, "locate on mds %u "DFID"\n", *mds, PFID(fid)); + return tgt; +} - op_data->op_fid1 = *fid; - op_data->op_mode = mealen; - op_data->op_valid = valid; +/** + * Locate mds by fid or name + * + * For striped directory (lsm != NULL), it will locate the stripe + * by name hash (see lsm_name_to_stripe_info()). Note: if the hash_type + * is unknown, it will return -EBADFD, and lmv_intent_lookup might need + * walk through all of stripes to locate the entry. + * + * For normal direcotry, it will locate MDS by FID directly. + * \param[in] lmv LMV device + * \param[in] op_data client MD stack parameters, name, namelen + * mds_num etc. + * \param[in] fid object FID used to locate MDS. + * + * retval pointer to the lmv_tgt_desc if succeed. + * ERR_PTR(errno) if failed. + */ +struct lmv_tgt_desc* +lmv_locate_mds(struct lmv_obd *lmv, struct md_op_data *op_data, + struct lu_fid *fid) +{ + struct lmv_stripe_md *lsm = op_data->op_mea1; + struct lmv_tgt_desc *tgt; + + /* During creating VOLATILE file, it should honor the mdt + * index if the file under striped dir is being restored, see + * ct_restore(). */ + if (op_data->op_bias & MDS_CREATE_VOLATILE && + (int)op_data->op_mds != -1 && lsm != NULL) { + int i; + tgt = lmv_get_target(lmv, op_data->op_mds, NULL); + if (IS_ERR(tgt)) + return tgt; + + /* refill the right parent fid */ + for (i = 0; i < lsm->lsm_md_stripe_count; i++) { + struct lmv_oinfo *oinfo; + + oinfo = &lsm->lsm_md_oinfo[i]; + if (oinfo->lmo_mds == op_data->op_mds) { + *fid = oinfo->lmo_fid; + break; + } + } - rc = md_getattr(tgt->ltd_exp, op_data, &req); - OBD_FREE_PTR(op_data); - if (rc) { - CERROR("md_getattr() failed, error %d\n", rc); - GOTO(cleanup, rc); - } + /* Hmm, can not find the stripe by mdt_index(op_mds) */ + if (i == lsm->lsm_md_stripe_count) + tgt = ERR_PTR(-EINVAL); - rc = md_get_lustre_md(tgt->ltd_exp, req, NULL, exp, &md); - if (rc) { - CERROR("md_get_lustre_md() failed, error %d\n", rc); - GOTO(cleanup, rc); - } + return tgt; + } - if (md.mea == NULL) - GOTO(cleanup, rc = -ENODATA); + if (lsm == NULL || op_data->op_namelen == 0) { + tgt = lmv_find_target(lmv, fid); + if (IS_ERR(tgt)) + return tgt; - obj = lmv_object_create(exp, fid, md.mea); - if (IS_ERR(obj)) - rc = PTR_ERR(obj); - else - lmv_object_put(obj); + op_data->op_mds = tgt->ltd_idx; + return tgt; + } - obd_free_memmd(exp, (void *)&md.mea); - EXIT; -cleanup: - if (req) - ptlrpc_req_finished(req); - return rc; + return lmv_locate_target_for_name(lmv, lsm, op_data->op_name, + op_data->op_namelen, fid, + &op_data->op_mds); } int lmv_create(struct obd_export *exp, struct md_op_data *op_data, @@ -1491,81 +1874,51 @@ int lmv_create(struct obd_export *exp, struct md_op_data *op_data, __u32 gid, cfs_cap_t cap_effective, __u64 rdev, struct ptlrpc_request **request) { - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - struct lmv_object *obj; - int rc; - int loop = 0; - int sidx; - ENTRY; + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv = &obd->u.lmv; + struct lmv_tgt_desc *tgt; + int rc; + ENTRY; - rc = lmv_check_connect(obd); - if (rc) - RETURN(rc); + rc = lmv_check_connect(obd); + if (rc) + RETURN(rc); - if (!lmv->desc.ld_active_tgt_count) - RETURN(-EIO); -repeat: - ++loop; - LASSERT(loop <= 2); - - obj = lmv_object_find(obd, &op_data->op_fid1); - if (obj) { - sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount, - op_data->op_name, op_data->op_namelen); - op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid; - op_data->op_bias &= ~MDS_CHECK_SPLIT; - op_data->op_mds = obj->lo_stripes[sidx].ls_mds; - tgt = lmv_get_target(lmv, op_data->op_mds); - lmv_object_put(obj); - } else { - tgt = lmv_find_target(lmv, &op_data->op_fid1); - op_data->op_bias |= MDS_CHECK_SPLIT; - op_data->op_mds = tgt->ltd_idx; - } + if (!lmv->desc.ld_active_tgt_count) + RETURN(-EIO); - if (IS_ERR(tgt)) - RETURN(PTR_ERR(tgt)); + tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1); + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); - rc = lmv_fid_alloc(exp, &op_data->op_fid2, op_data); - if (rc == -ERESTART) - goto repeat; - else if (rc) - RETURN(rc); + CDEBUG(D_INODE, "CREATE name '%.*s' on "DFID" -> mds #%x\n", + op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1), + op_data->op_mds); - CDEBUG(D_INODE, "CREATE '%*s' on "DFID" -> mds #%x\n", - op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1), - op_data->op_mds); + rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data); + if (rc) + RETURN(rc); - op_data->op_flags |= MF_MDC_CANCEL_FID1; - rc = md_create(tgt->ltd_exp, op_data, data, datalen, mode, uid, gid, - cap_effective, rdev, request); - if (rc == 0) { - if (*request == NULL) - RETURN(rc); - CDEBUG(D_INODE, "Created - "DFID"\n", PFID(&op_data->op_fid2)); - } else if (rc == -ERESTART) { - LASSERT(*request != NULL); - DEBUG_REQ(D_WARNING|D_RPCTRACE, *request, - "Got -ERESTART during create!\n"); - ptlrpc_req_finished(*request); - *request = NULL; + /* Send the create request to the MDT where the object + * will be located */ + tgt = lmv_find_target(lmv, &op_data->op_fid2); + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); - /* - * Directory got split. Time to update local object and repeat - * the request with proper MDS. - */ - rc = lmv_handle_split(exp, &op_data->op_fid1); - if (rc == 0) { - rc = lmv_allocate_slaves(obd, &op_data->op_fid1, - op_data, &op_data->op_fid2); - if (rc) - RETURN(rc); - goto repeat; - } - } - RETURN(rc); + op_data->op_mds = tgt->ltd_idx; + + CDEBUG(D_INODE, "CREATE obj "DFID" -> mds #%x\n", + PFID(&op_data->op_fid2), op_data->op_mds); + + op_data->op_flags |= MF_MDC_CANCEL_FID1; + rc = md_create(tgt->ltd_exp, op_data, data, datalen, mode, uid, gid, + cap_effective, rdev, request); + if (rc == 0) { + if (*request == NULL) + RETURN(rc); + CDEBUG(D_INODE, "Created - "DFID"\n", PFID(&op_data->op_fid2)); + } + RETURN(rc); } static int lmv_done_writing(struct obd_export *exp, @@ -1591,277 +1944,90 @@ static int lmv_done_writing(struct obd_export *exp, } static int -lmv_enqueue_slaves(struct obd_export *exp, struct ldlm_enqueue_info *einfo, - struct lookup_intent *it, struct md_op_data *op_data, - struct lustre_handle *lockh, void *lmm, int lmmsize) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_stripe_md *mea = op_data->op_mea1; - struct md_op_data *op_data2; - struct lmv_tgt_desc *tgt; - int i; - int rc = 0; - ENTRY; - - OBD_ALLOC_PTR(op_data2); - if (op_data2 == NULL) - RETURN(-ENOMEM); - - LASSERT(mea != NULL); - for (i = 0; i < mea->mea_count; i++) { - memset(op_data2, 0, sizeof(*op_data2)); - op_data2->op_fid1 = mea->mea_ids[i]; - op_data2->op_bias = 0; - - tgt = lmv_find_target(lmv, &op_data2->op_fid1); - if (IS_ERR(tgt)) - GOTO(cleanup, rc = PTR_ERR(tgt)); - - if (tgt->ltd_exp == NULL) - continue; - - rc = md_enqueue(tgt->ltd_exp, einfo, it, op_data2, - lockh + i, lmm, lmmsize, NULL, 0); - - CDEBUG(D_INODE, "Take lock on slave "DFID" -> %d/%d\n", - PFID(&mea->mea_ids[i]), rc, it->d.lustre.it_status); - - if (rc) - GOTO(cleanup, rc); - - if (it->d.lustre.it_data) { - struct ptlrpc_request *req; - req = (struct ptlrpc_request *)it->d.lustre.it_data; - ptlrpc_req_finished(req); - } - - if (it->d.lustre.it_status) - GOTO(cleanup, rc = it->d.lustre.it_status); - } - - EXIT; -cleanup: - OBD_FREE_PTR(op_data2); - - if (rc != 0) { - /* - * Drop all taken locks. - */ - while (--i >= 0) { - if (lockh[i].cookie) - ldlm_lock_decref(lockh + i, einfo->ei_mode); - lockh[i].cookie = 0; - } - } - return rc; -} - -static int -lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo, - struct lookup_intent *it, struct md_op_data *op_data, - struct lustre_handle *lockh, void *lmm, int lmmsize, - int extra_lock_flags) -{ - struct ptlrpc_request *req = it->d.lustre.it_data; - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lustre_handle plock; - struct lmv_tgt_desc *tgt; - struct md_op_data *rdata; - struct lu_fid fid1; - struct mdt_body *body; - int rc = 0; - int pmode; - ENTRY; - - body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY); - LASSERT(body != NULL); - - if (!(body->valid & OBD_MD_MDS)) - RETURN(0); - - CDEBUG(D_INODE, "REMOTE_ENQUEUE '%s' on "DFID" -> "DFID"\n", - LL_IT2STR(it), PFID(&op_data->op_fid1), PFID(&body->fid1)); +lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, + const union ldlm_policy_data *policy, + struct lookup_intent *it, struct md_op_data *op_data, + struct lustre_handle *lockh, __u64 extra_lock_flags) +{ + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv = &obd->u.lmv; + struct lmv_tgt_desc *tgt; + int rc; + ENTRY; - /* - * We got LOOKUP lock, but we really need attrs. - */ - pmode = it->d.lustre.it_lock_mode; - LASSERT(pmode != 0); - memcpy(&plock, lockh, sizeof(plock)); - it->d.lustre.it_lock_mode = 0; - it->d.lustre.it_data = NULL; - fid1 = body->fid1; + rc = lmv_check_connect(obd); + if (rc) + RETURN(rc); - it->d.lustre.it_disposition &= ~DISP_ENQ_COMPLETE; - ptlrpc_req_finished(req); + CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID"\n", + LL_IT2STR(it), PFID(&op_data->op_fid1)); - tgt = lmv_find_target(lmv, &fid1); - if (IS_ERR(tgt)) - GOTO(out, rc = PTR_ERR(tgt)); + tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1); + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); - OBD_ALLOC_PTR(rdata); - if (rdata == NULL) - GOTO(out, rc = -ENOMEM); + CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID" -> mds #%d\n", + LL_IT2STR(it), PFID(&op_data->op_fid1), tgt->ltd_idx); - rdata->op_fid1 = fid1; - rdata->op_bias = MDS_CROSS_REF; + rc = md_enqueue(tgt->ltd_exp, einfo, policy, it, op_data, lockh, + extra_lock_flags); - rc = md_enqueue(tgt->ltd_exp, einfo, it, rdata, lockh, - lmm, lmmsize, NULL, extra_lock_flags); - OBD_FREE_PTR(rdata); - EXIT; -out: - ldlm_lock_decref(&plock, pmode); - return rc; + RETURN(rc); } static int -lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo, - struct lookup_intent *it, struct md_op_data *op_data, - struct lustre_handle *lockh, void *lmm, int lmmsize, - struct ptlrpc_request **req, __u64 extra_lock_flags) -{ - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - struct lmv_object *obj; - int sidx; - int rc; - ENTRY; - - rc = lmv_check_connect(obd); - if (rc) - RETURN(rc); - - CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID"\n", - LL_IT2STR(it), PFID(&op_data->op_fid1)); - - if (op_data->op_mea1 && it && it->it_op == IT_UNLINK) { - rc = lmv_enqueue_slaves(exp, einfo, it, op_data, - lockh, lmm, lmmsize); - RETURN(rc); - } - - obj = lmv_object_find(obd, &op_data->op_fid1); - if (obj && op_data->op_namelen) { - sidx = raw_name2idx(obj->lo_hashtype, - obj->lo_objcount, - (char *)op_data->op_name, - op_data->op_namelen); - op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid; - tgt = lmv_get_target(lmv, obj->lo_stripes[sidx].ls_mds); - } else { - tgt = lmv_find_target(lmv, &op_data->op_fid1); - } - if (obj) - lmv_object_put(obj); - - if (IS_ERR(tgt)) - RETURN(PTR_ERR(tgt)); +lmv_getattr_name(struct obd_export *exp,struct md_op_data *op_data, + struct ptlrpc_request **preq) +{ + struct ptlrpc_request *req = NULL; + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv = &obd->u.lmv; + struct lmv_tgt_desc *tgt; + struct mdt_body *body; + int rc; + ENTRY; - CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID" -> mds #%d\n", - LL_IT2STR(it), PFID(&op_data->op_fid1), tgt->ltd_idx); + rc = lmv_check_connect(obd); + if (rc) + RETURN(rc); - rc = md_enqueue(tgt->ltd_exp, einfo, it, op_data, lockh, - lmm, lmmsize, req, extra_lock_flags); + tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1); + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); - if (rc == 0 && it && it->it_op == IT_OPEN) { - rc = lmv_enqueue_remote(exp, einfo, it, op_data, lockh, - lmm, lmmsize, extra_lock_flags); - } - RETURN(rc); -} + CDEBUG(D_INODE, "GETATTR_NAME for %*s on "DFID" -> mds #%d\n", + op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1), + tgt->ltd_idx); -static int -lmv_getattr_name(struct obd_export *exp,struct md_op_data *op_data, - struct ptlrpc_request **request) -{ - struct ptlrpc_request *req = NULL; - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lu_fid rid = op_data->op_fid1; - struct lmv_tgt_desc *tgt; - struct mdt_body *body; - struct lmv_object *obj; - obd_valid valid = op_data->op_valid; - int rc; - int loop = 0; - int sidx; - ENTRY; + rc = md_getattr_name(tgt->ltd_exp, op_data, preq); + if (rc != 0) + RETURN(rc); - rc = lmv_check_connect(obd); - if (rc) - RETURN(rc); + body = req_capsule_server_get(&(*preq)->rq_pill, &RMF_MDT_BODY); + LASSERT(body != NULL); -repeat: - ++loop; - LASSERT(loop <= 2); - obj = lmv_object_find(obd, &rid); - if (obj) { - sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount, - op_data->op_name, op_data->op_namelen); - rid = obj->lo_stripes[sidx].ls_fid; - tgt = lmv_get_target(lmv, obj->lo_stripes[sidx].ls_mds); - op_data->op_mds = obj->lo_stripes[sidx].ls_mds; - valid &= ~OBD_MD_FLCKSPLIT; - lmv_object_put(obj); - } else { - tgt = lmv_find_target(lmv, &rid); - valid |= OBD_MD_FLCKSPLIT; - op_data->op_mds = tgt->ltd_idx; - } - if (IS_ERR(tgt)) - RETURN(PTR_ERR(tgt)); + if (body->mbo_valid & OBD_MD_MDS) { + struct lu_fid rid = body->mbo_fid1; + CDEBUG(D_INODE, "Request attrs for "DFID"\n", + PFID(&rid)); - CDEBUG(D_INODE, "GETATTR_NAME for %*s on "DFID" - "DFID" -> mds #%d\n", - op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1), - PFID(&rid), tgt->ltd_idx); - - op_data->op_valid = valid; - op_data->op_fid1 = rid; - rc = md_getattr_name(tgt->ltd_exp, op_data, request); - if (rc == 0) { - body = req_capsule_server_get(&(*request)->rq_pill, - &RMF_MDT_BODY); - LASSERT(body != NULL); - - if (body->valid & OBD_MD_MDS) { - rid = body->fid1; - CDEBUG(D_INODE, "Request attrs for "DFID"\n", - PFID(&rid)); - - tgt = lmv_find_target(lmv, &rid); - if (IS_ERR(tgt)) { - ptlrpc_req_finished(*request); - RETURN(PTR_ERR(tgt)); - } + tgt = lmv_find_target(lmv, &rid); + if (IS_ERR(tgt)) { + ptlrpc_req_finished(*preq); + preq = NULL; + RETURN(PTR_ERR(tgt)); + } - op_data->op_fid1 = rid; - op_data->op_valid |= OBD_MD_FLCROSSREF; - op_data->op_namelen = 0; - op_data->op_name = NULL; - rc = md_getattr_name(tgt->ltd_exp, op_data, &req); - ptlrpc_req_finished(*request); - *request = req; - } - } else if (rc == -ERESTART) { - LASSERT(*request != NULL); - DEBUG_REQ(D_WARNING|D_RPCTRACE, *request, - "Got -ERESTART during getattr!\n"); - ptlrpc_req_finished(*request); - *request = NULL; + op_data->op_fid1 = rid; + op_data->op_valid |= OBD_MD_FLCROSSREF; + op_data->op_namelen = 0; + op_data->op_name = NULL; + rc = md_getattr_name(tgt->ltd_exp, op_data, &req); + ptlrpc_req_finished(*preq); + *preq = req; + } - /* - * Directory got split. Time to update local object and repeat - * the request with proper MDS. - */ - rc = lmv_handle_split(exp, &rid); - if (rc == 0) - goto repeat; - } - RETURN(rc); + RETURN(rc); } #define md_op_data_fid(op_data, fl) \ @@ -1871,99 +2037,40 @@ repeat: fl == MF_MDC_CANCEL_FID4 ? &op_data->op_fid4 : \ NULL) -static int lmv_early_cancel_slaves(struct obd_export *exp, - struct md_op_data *op_data, int op_tgt, - ldlm_mode_t mode, int bits, int flag) +static int lmv_early_cancel(struct obd_export *exp, struct lmv_tgt_desc *tgt, + struct md_op_data *op_data, + int op_tgt, ldlm_mode_t mode, int bits, int flag) { - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - ldlm_policy_data_t policy = {{0}}; - struct lu_fid *op_fid; - struct lu_fid *st_fid; - struct lmv_tgt_desc *tgt; - struct lmv_object *obj; - int rc = 0; - int i; - ENTRY; - - op_fid = md_op_data_fid(op_data, flag); - if (!fid_is_sane(op_fid)) - RETURN(0); - - obj = lmv_object_find(obd, op_fid); - if (obj == NULL) - RETURN(-EALREADY); - - policy.l_inodebits.bits = bits; - for (i = 0; i < obj->lo_objcount; i++) { - tgt = lmv_get_target(lmv, obj->lo_stripes[i].ls_mds); - st_fid = &obj->lo_stripes[i].ls_fid; - if (op_tgt != tgt->ltd_idx) { - CDEBUG(D_INODE, "EARLY_CANCEL slave "DFID" -> mds #%d\n", - PFID(st_fid), tgt->ltd_idx); - rc = md_cancel_unused(tgt->ltd_exp, st_fid, &policy, - mode, LCF_ASYNC, NULL); - if (rc) - GOTO(out_put_obj, rc); - } else { - CDEBUG(D_INODE, - "EARLY_CANCEL skip operation target %d on "DFID"\n", - op_tgt, PFID(st_fid)); - /* - * Do not cancel locks for operation target, they will - * be handled later in underlaying layer when calling - * function we run on behalf of. - */ - *op_fid = *st_fid; - op_data->op_flags |= flag; - } - } - EXIT; -out_put_obj: - lmv_object_put(obj); - return rc; -} + struct lu_fid *fid = md_op_data_fid(op_data, flag); + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv = &obd->u.lmv; + ldlm_policy_data_t policy = {{ 0 }}; + int rc = 0; + ENTRY; -static int lmv_early_cancel(struct obd_export *exp, struct md_op_data *op_data, - int op_tgt, ldlm_mode_t mode, int bits, int flag) -{ - struct lu_fid *fid = md_op_data_fid(op_data, flag); - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - ldlm_policy_data_t policy = {{0}}; - struct lmv_object *obj; - int rc = 0; - ENTRY; + if (!fid_is_sane(fid)) + RETURN(0); - if (!fid_is_sane(fid)) - RETURN(0); + if (tgt == NULL) { + tgt = lmv_find_target(lmv, fid); + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); + } - obj = lmv_object_find(obd, fid); - if (obj) { - rc = lmv_early_cancel_slaves(exp, op_data, op_tgt, mode, - bits, flag); - lmv_object_put(obj); - } else { - tgt = lmv_find_target(lmv, fid); - if (IS_ERR(tgt)) - RETURN(PTR_ERR(tgt)); - - if (tgt->ltd_idx != op_tgt) { - CDEBUG(D_INODE, "EARLY_CANCEL on "DFID"\n", PFID(fid)); - policy.l_inodebits.bits = bits; - rc = md_cancel_unused(tgt->ltd_exp, fid, &policy, - mode, LCF_ASYNC, NULL); - } else { - CDEBUG(D_INODE, - "EARLY_CANCEL skip operation target %d on "DFID"\n", - op_tgt, PFID(fid)); - op_data->op_flags |= flag; - rc = 0; - } + if (tgt->ltd_idx != op_tgt) { + CDEBUG(D_INODE, "EARLY_CANCEL on "DFID"\n", PFID(fid)); + policy.l_inodebits.bits = bits; + rc = md_cancel_unused(tgt->ltd_exp, fid, &policy, + mode, LCF_ASYNC, NULL); + } else { + CDEBUG(D_INODE, + "EARLY_CANCEL skip operation target %d on "DFID"\n", + op_tgt, PFID(fid)); + op_data->op_flags |= flag; + rc = 0; + } - } - RETURN(rc); + RETURN(rc); } /* @@ -1973,197 +2080,174 @@ static int lmv_early_cancel(struct obd_export *exp, struct md_op_data *op_data, static int lmv_link(struct obd_export *exp, struct md_op_data *op_data, struct ptlrpc_request **request) { - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - struct lmv_object *obj; - int rc; - int loop = 0; - mdsno_t mds; - int sidx; - ENTRY; + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv = &obd->u.lmv; + struct lmv_tgt_desc *tgt; + int rc; + ENTRY; - rc = lmv_check_connect(obd); - if (rc) - RETURN(rc); + rc = lmv_check_connect(obd); + if (rc) + RETURN(rc); -repeat: - ++loop; - LASSERT(loop <= 2); - LASSERT(op_data->op_namelen != 0); - - CDEBUG(D_INODE, "LINK "DFID":%*s to "DFID"\n", - PFID(&op_data->op_fid2), op_data->op_namelen, - op_data->op_name, PFID(&op_data->op_fid1)); - - obj = lmv_object_find(obd, &op_data->op_fid2); - if (obj) { - sidx = raw_name2idx(obj->lo_hashtype, - obj->lo_objcount, - op_data->op_name, - op_data->op_namelen); - op_data->op_fid2 = obj->lo_stripes[sidx].ls_fid; - mds = obj->lo_stripes[sidx].ls_mds; - lmv_object_put(obj); - } else { - rc = lmv_fld_lookup(lmv, &op_data->op_fid2, &mds); - if (rc) - RETURN(rc); - } + LASSERT(op_data->op_namelen != 0); - CDEBUG(D_INODE, "Forward to mds #%x ("DFID")\n", - mds, PFID(&op_data->op_fid1)); + CDEBUG(D_INODE, "LINK "DFID":%*s to "DFID"\n", + PFID(&op_data->op_fid2), op_data->op_namelen, + op_data->op_name, PFID(&op_data->op_fid1)); - op_data->op_fsuid = cfs_curproc_fsuid(); - op_data->op_fsgid = cfs_curproc_fsgid(); - op_data->op_cap = cfs_curproc_cap_pack(); - tgt = lmv_get_target(lmv, mds); + op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid()); + op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid()); + op_data->op_cap = cfs_curproc_cap_pack(); + if (op_data->op_mea2 != NULL) { + struct lmv_stripe_md *lsm = op_data->op_mea2; + const struct lmv_oinfo *oinfo; - /* - * Cancel UPDATE lock on child (fid1). - */ - op_data->op_flags |= MF_MDC_CANCEL_FID2; - rc = lmv_early_cancel(exp, op_data, tgt->ltd_idx, LCK_EX, - MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID1); - if (rc == 0) - rc = md_link(tgt->ltd_exp, op_data, request); - if (rc == -ERESTART) { - LASSERT(*request != NULL); - DEBUG_REQ(D_WARNING|D_RPCTRACE, *request, - "Got -ERESTART during link!\n"); - ptlrpc_req_finished(*request); - *request = NULL; + oinfo = lsm_name_to_stripe_info(lsm, op_data->op_name, + op_data->op_namelen); + if (IS_ERR(oinfo)) + RETURN(PTR_ERR(oinfo)); - /* - * Directory got split. Time to update local object and repeat - * the request with proper MDS. - */ - rc = lmv_handle_split(exp, &op_data->op_fid2); - if (rc == 0) - goto repeat; - } + op_data->op_fid2 = oinfo->lmo_fid; + } - RETURN(rc); + tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2); + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); + + /* + * Cancel UPDATE lock on child (fid1). + */ + op_data->op_flags |= MF_MDC_CANCEL_FID2; + rc = lmv_early_cancel(exp, NULL, op_data, tgt->ltd_idx, LCK_EX, + MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID1); + if (rc != 0) + RETURN(rc); + + rc = md_link(tgt->ltd_exp, op_data, request); + + RETURN(rc); } static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data, const char *old, int oldlen, const char *new, int newlen, struct ptlrpc_request **request) { - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *src_tgt; - int rc; - int sidx; - int loop = 0; - struct lmv_object *obj; - mdsno_t mds1; - mdsno_t mds2; - ENTRY; - - LASSERT(oldlen != 0); - - CDEBUG(D_INODE, "RENAME %*s in "DFID" to %*s in "DFID"\n", - oldlen, old, PFID(&op_data->op_fid1), - newlen, new, PFID(&op_data->op_fid2)); - - rc = lmv_check_connect(obd); - if (rc) - RETURN(rc); - -repeat: - ++loop; - LASSERT(loop <= 2); - obj = lmv_object_find(obd, &op_data->op_fid1); - if (obj) { - sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount, - (char *)old, oldlen); - op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid; - mds1 = obj->lo_stripes[sidx].ls_mds; - CDEBUG(D_INODE, "Parent obj "DFID"\n", PFID(&op_data->op_fid1)); - lmv_object_put(obj); - } else { - rc = lmv_fld_lookup(lmv, &op_data->op_fid1, &mds1); - if (rc) - RETURN(rc); - } + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv = &obd->u.lmv; + struct lmv_tgt_desc *src_tgt; + int rc; + ENTRY; - obj = lmv_object_find(obd, &op_data->op_fid2); - if (obj) { - /* - * Directory is already split, so we have to forward request to - * the right MDS. - */ - sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount, - (char *)new, newlen); - - mds2 = obj->lo_stripes[sidx].ls_mds; - op_data->op_fid2 = obj->lo_stripes[sidx].ls_fid; - CDEBUG(D_INODE, "Parent obj "DFID"\n", PFID(&op_data->op_fid2)); - lmv_object_put(obj); - } else { - rc = lmv_fld_lookup(lmv, &op_data->op_fid2, &mds2); - if (rc) - RETURN(rc); - } + LASSERT(oldlen != 0); + + CDEBUG(D_INODE, "RENAME %.*s in "DFID":%d to %.*s in "DFID":%d\n", + oldlen, old, PFID(&op_data->op_fid1), + op_data->op_mea1 ? op_data->op_mea1->lsm_md_stripe_count : 0, + newlen, new, PFID(&op_data->op_fid2), + op_data->op_mea2 ? op_data->op_mea2->lsm_md_stripe_count : 0); + + rc = lmv_check_connect(obd); + if (rc) + RETURN(rc); + + op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid()); + op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid()); + op_data->op_cap = cfs_curproc_cap_pack(); + if (op_data->op_cli_flags & CLI_MIGRATE) { + LASSERTF(fid_is_sane(&op_data->op_fid3), "invalid FID "DFID"\n", + PFID(&op_data->op_fid3)); + rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data); + if (rc) + RETURN(rc); + src_tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid3); + } else { + if (op_data->op_mea1 != NULL) { + struct lmv_stripe_md *lsm = op_data->op_mea1; + + src_tgt = lmv_locate_target_for_name(lmv, lsm, old, + oldlen, + &op_data->op_fid1, + &op_data->op_mds); + if (IS_ERR(src_tgt)) + RETURN(PTR_ERR(src_tgt)); + } else { + src_tgt = lmv_find_target(lmv, &op_data->op_fid1); + if (IS_ERR(src_tgt)) + RETURN(PTR_ERR(src_tgt)); + + op_data->op_mds = src_tgt->ltd_idx; + } - op_data->op_fsuid = cfs_curproc_fsuid(); - op_data->op_fsgid = cfs_curproc_fsgid(); - op_data->op_cap = cfs_curproc_cap_pack(); + if (op_data->op_mea2) { + struct lmv_stripe_md *lsm = op_data->op_mea2; + const struct lmv_oinfo *oinfo; - src_tgt = lmv_get_target(lmv, mds1); + oinfo = lsm_name_to_stripe_info(lsm, new, newlen); + if (IS_ERR(oinfo)) + RETURN(PTR_ERR(oinfo)); - /* - * LOOKUP lock on src child (fid3) should also be cancelled for - * src_tgt in mdc_rename. - */ - op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3; + op_data->op_fid2 = oinfo->lmo_fid; + } + } + if (IS_ERR(src_tgt)) + RETURN(PTR_ERR(src_tgt)); + + /* + * LOOKUP lock on src child (fid3) should also be cancelled for + * src_tgt in mdc_rename. + */ + op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3; + + /* + * Cancel UPDATE locks on tgt parent (fid2), tgt_tgt is its + * own target. + */ + rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx, + LCK_EX, MDS_INODELOCK_UPDATE, + MF_MDC_CANCEL_FID2); + + if (rc != 0) + RETURN(rc); + /* + * Cancel LOOKUP locks on source child (fid3) for parent tgt_tgt. + */ + if (fid_is_sane(&op_data->op_fid3)) { + struct lmv_tgt_desc *tgt; + + tgt = lmv_find_target(lmv, &op_data->op_fid1); + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); + + /* Cancel LOOKUP lock on its parent */ + rc = lmv_early_cancel(exp, tgt, op_data, src_tgt->ltd_idx, + LCK_EX, MDS_INODELOCK_LOOKUP, + MF_MDC_CANCEL_FID3); + if (rc != 0) + RETURN(rc); + + rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx, + LCK_EX, MDS_INODELOCK_FULL, + MF_MDC_CANCEL_FID3); + if (rc != 0) + RETURN(rc); + } - /* - * Cancel UPDATE locks on tgt parent (fid2), tgt_tgt is its - * own target. - */ - rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx, - LCK_EX, MDS_INODELOCK_UPDATE, - MF_MDC_CANCEL_FID2); + /* + * Cancel all the locks on tgt child (fid4). + */ + if (fid_is_sane(&op_data->op_fid4)) + rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx, + LCK_EX, MDS_INODELOCK_FULL, + MF_MDC_CANCEL_FID4); - /* - * Cancel LOOKUP locks on tgt child (fid4) for parent tgt_tgt. - */ - if (rc == 0) { - rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx, - LCK_EX, MDS_INODELOCK_LOOKUP, - MF_MDC_CANCEL_FID4); - } + CDEBUG(D_INODE, DFID":m%d to "DFID"\n", PFID(&op_data->op_fid1), + op_data->op_mds, PFID(&op_data->op_fid2)); - /* - * Cancel all the locks on tgt child (fid4). - */ - if (rc == 0) - rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx, - LCK_EX, MDS_INODELOCK_FULL, - MF_MDC_CANCEL_FID4); - - if (rc == 0) - rc = md_rename(src_tgt->ltd_exp, op_data, old, oldlen, - new, newlen, request); - - if (rc == -ERESTART) { - LASSERT(*request != NULL); - DEBUG_REQ(D_WARNING|D_RPCTRACE, *request, - "Got -ERESTART during rename!\n"); - ptlrpc_req_finished(*request); - *request = NULL; + rc = md_rename(src_tgt->ltd_exp, op_data, old, oldlen, new, newlen, + request); - /* - * Directory got split. Time to update local object and repeat - * the request with proper MDS. - */ - rc = lmv_handle_split(exp, &op_data->op_fid1); - if (rc == 0) - goto repeat; - } - RETURN(rc); + RETURN(rc); } static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data, @@ -2171,439 +2255,546 @@ static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data, struct ptlrpc_request **request, struct md_open_data **mod) { - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct ptlrpc_request *req; - struct lmv_tgt_desc *tgt; - struct lmv_object *obj; - int rc = 0; - int i; - ENTRY; - - rc = lmv_check_connect(obd); - if (rc) - RETURN(rc); - - obj = lmv_object_find(obd, &op_data->op_fid1); + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv = &obd->u.lmv; + struct lmv_tgt_desc *tgt; + int rc = 0; + ENTRY; - CDEBUG(D_INODE, "SETATTR for "DFID", valid 0x%x%s\n", - PFID(&op_data->op_fid1), op_data->op_attr.ia_valid, - obj ? ", split" : ""); + rc = lmv_check_connect(obd); + if (rc) + RETURN(rc); - op_data->op_flags |= MF_MDC_CANCEL_FID1; - if (obj) { - for (i = 0; i < obj->lo_objcount; i++) { - op_data->op_fid1 = obj->lo_stripes[i].ls_fid; + CDEBUG(D_INODE, "SETATTR for "DFID", valid 0x%x\n", + PFID(&op_data->op_fid1), op_data->op_attr.ia_valid); - tgt = lmv_get_target(lmv, obj->lo_stripes[i].ls_mds); - if (IS_ERR(tgt)) { - rc = PTR_ERR(tgt); - break; - } + op_data->op_flags |= MF_MDC_CANCEL_FID1; + tgt = lmv_find_target(lmv, &op_data->op_fid1); + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); - rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen, - ea2, ea2len, &req, mod); - - if (lu_fid_eq(&obj->lo_fid, &obj->lo_stripes[i].ls_fid)) { - /* - * This is master object and this request should - * be returned back to llite. - */ - *request = req; - } else { - ptlrpc_req_finished(req); - } + rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen, ea2, + ea2len, request, mod); - if (rc) - break; - } - lmv_object_put(obj); - } else { - tgt = lmv_find_target(lmv, &op_data->op_fid1); - if (IS_ERR(tgt)) - RETURN(PTR_ERR(tgt)); - - rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen, ea2, - ea2len, request, mod); - } - RETURN(rc); + RETURN(rc); } -static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid, - struct obd_capa *oc, struct ptlrpc_request **request) +static int lmv_fsync(struct obd_export *exp, const struct lu_fid *fid, + struct obd_capa *oc, struct ptlrpc_request **request) { - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - int rc; - ENTRY; + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv = &obd->u.lmv; + struct lmv_tgt_desc *tgt; + int rc; + ENTRY; - rc = lmv_check_connect(obd); - if (rc) - RETURN(rc); + rc = lmv_check_connect(obd); + if (rc != 0) + RETURN(rc); - tgt = lmv_find_target(lmv, fid); - if (IS_ERR(tgt)) - RETURN(PTR_ERR(tgt)); + tgt = lmv_find_target(lmv, fid); + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); - rc = md_sync(tgt->ltd_exp, fid, oc, request); - RETURN(rc); + rc = md_fsync(tgt->ltd_exp, fid, oc, request); + RETURN(rc); } /** - * Main purpose of LMV blocking ast is to remove split directory LMV - * presentation object (struct lmv_object) attached to the lock being revoked. + * Get current minimum entry from striped directory + * + * This function will search the dir entry, whose hash value is the + * closest(>=) to @hash_offset, from all of sub-stripes, and it is + * only being called for striped directory. + * + * \param[in] exp export of LMV + * \param[in] op_data parameters transferred beween client MD stack + * stripe_information will be included in this + * parameter + * \param[in] cb_op ldlm callback being used in enqueue in + * mdc_read_page + * \param[in] hash_offset the hash value, which is used to locate + * minum(closet) dir entry + * \param[in|out] stripe_offset the caller use this to indicate the stripe + * index of last entry, so to avoid hash conflict + * between stripes. It will also be used to + * return the stripe index of current dir entry. + * \param[in|out] entp the minum entry and it also is being used + * to input the last dir entry to resolve the + * hash conflict + * + * \param[out] ppage the page which holds the minum entry + * + * \retval = 0 get the entry successfully + * negative errno (< 0) does not get the entry */ -int lmv_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc, - void *data, int flag) -{ - struct lustre_handle lockh; - struct lmv_object *obj; - int rc; - ENTRY; +static int lmv_get_min_striped_entry(struct obd_export *exp, + struct md_op_data *op_data, + struct md_callback *cb_op, + __u64 hash_offset, int *stripe_offset, + struct lu_dirent **entp, + struct page **ppage) +{ + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv = &obd->u.lmv; + struct lmv_stripe_md *lsm = op_data->op_mea1; + struct lmv_tgt_desc *tgt; + int stripe_count; + struct lu_dirent *min_ent = NULL; + struct page *min_page = NULL; + int min_idx = 0; + int i; + int rc = 0; + ENTRY; - switch (flag) { - case LDLM_CB_BLOCKING: - ldlm_lock2handle(lock, &lockh); - rc = ldlm_cli_cancel(&lockh); - if (rc < 0) { - CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc); - RETURN(rc); - } - break; - case LDLM_CB_CANCELING: - /* - * Time to drop cached attrs for split directory object - */ - obj = lock->l_ast_data; - if (obj) { - CDEBUG(D_INODE, "Cancel %s on "LPU64"/"LPU64 - ", master "DFID"\n", - lock->l_resource->lr_name.name[3] == 1 ? - "LOOKUP" : "UPDATE", - lock->l_resource->lr_name.name[0], - lock->l_resource->lr_name.name[1], - PFID(&obj->lo_fid)); - lmv_object_put(obj); - } - break; - default: - LBUG(); - } - RETURN(0); -} + stripe_count = lsm->lsm_md_stripe_count; + for (i = 0; i < stripe_count; i++) { + struct lu_dirent *ent = NULL; + struct page *page = NULL; + struct lu_dirpage *dp; + __u64 stripe_hash = hash_offset; + + tgt = lmv_get_target(lmv, lsm->lsm_md_oinfo[i].lmo_mds, NULL); + if (IS_ERR(tgt)) + GOTO(out, rc = PTR_ERR(tgt)); + + /* op_data will be shared by each stripe, so we need + * reset these value for each stripe */ + op_data->op_stripe_offset = i; + op_data->op_fid1 = lsm->lsm_md_oinfo[i].lmo_fid; + op_data->op_fid2 = lsm->lsm_md_oinfo[i].lmo_fid; + op_data->op_data = lsm->lsm_md_oinfo[i].lmo_root; +next: + rc = md_read_page(tgt->ltd_exp, op_data, cb_op, stripe_hash, + &page); + if (rc != 0) + GOTO(out, rc); + + dp = page_address(page); + for (ent = lu_dirent_start(dp); ent != NULL; + ent = lu_dirent_next(ent)) { + /* Skip dummy entry */ + if (le16_to_cpu(ent->lde_namelen) == 0) + continue; + + if (le64_to_cpu(ent->lde_hash) < hash_offset) + continue; + + if (le64_to_cpu(ent->lde_hash) == hash_offset && + (*entp == ent || i < *stripe_offset)) + continue; + + /* skip . and .. for other stripes */ + if (i != 0 && + (strncmp(ent->lde_name, ".", + le16_to_cpu(ent->lde_namelen)) == 0 || + strncmp(ent->lde_name, "..", + le16_to_cpu(ent->lde_namelen)) == 0)) + continue; + break; + } -static void lmv_hash_adjust(__u64 *hash, __u64 hash_adj) -{ - __u64 val; + if (ent == NULL) { + stripe_hash = le64_to_cpu(dp->ldp_hash_end); + + kunmap(page); + page_cache_release(page); + page = NULL; + + /* reach the end of current stripe, go to next stripe */ + if (stripe_hash == MDS_DIR_END_OFF) + continue; + else + goto next; + } + + if (min_ent != NULL) { + if (le64_to_cpu(min_ent->lde_hash) > + le64_to_cpu(ent->lde_hash)) { + min_ent = ent; + kunmap(min_page); + page_cache_release(min_page); + min_idx = i; + min_page = page; + } else { + kunmap(page); + page_cache_release(page); + page = NULL; + } + } else { + min_ent = ent; + min_page = page; + min_idx = i; + } + } - val = le64_to_cpu(*hash); - if (val < hash_adj) - val += MAX_HASH_SIZE; - if (val != MDS_DIR_END_OFF) - *hash = cpu_to_le64(val - hash_adj); +out: + if (*ppage != NULL) { + kunmap(*ppage); + page_cache_release(*ppage); + } + *stripe_offset = min_idx; + *entp = min_ent; + *ppage = min_page; + RETURN(rc); } -static __u32 lmv_node_rank(struct obd_export *exp, const struct lu_fid *fid) -{ - __u64 id; - struct obd_import *imp; +/** + * Build dir entry page from a striped directory + * + * This function gets one entry by @offset from a striped directory. It will + * read entries from all of stripes, and choose one closest to the required + * offset(&offset). A few notes + * 1. skip . and .. for non-zero stripes, because there can only have one . + * and .. in a directory. + * 2. op_data will be shared by all of stripes, instead of allocating new + * one, so need to restore before reusing. + * 3. release the entry page if that is not being chosen. + * + * \param[in] exp obd export refer to LMV + * \param[in] op_data hold those MD parameters of read_entry + * \param[in] cb_op ldlm callback being used in enqueue in mdc_read_entry + * \param[out] ldp the entry being read + * \param[out] ppage the page holding the entry. Note: because the entry + * will be accessed in upper layer, so we need hold the + * page until the usages of entry is finished, see + * ll_dir_entry_next. + * + * retval =0 if get entry successfully + * <0 cannot get entry + */ +static int lmv_read_striped_page(struct obd_export *exp, + struct md_op_data *op_data, + struct md_callback *cb_op, + __u64 offset, struct page **ppage) +{ + struct obd_device *obd = exp->exp_obd; + struct lu_fid master_fid = op_data->op_fid1; + struct inode *master_inode = op_data->op_data; + __u64 hash_offset = offset; + struct lu_dirpage *dp; + struct page *min_ent_page = NULL; + struct page *ent_page = NULL; + struct lu_dirent *ent; + void *area; + int ent_idx = 0; + struct lu_dirent *min_ent = NULL; + struct lu_dirent *last_ent; + int left_bytes; + int rc; + ENTRY; + + rc = lmv_check_connect(obd); + if (rc) + RETURN(rc); + + /* Allocate a page and read entries from all of stripes and fill + * the page by hash order */ + ent_page = alloc_page(GFP_KERNEL); + if (ent_page == NULL) + RETURN(-ENOMEM); + + /* Initialize the entry page */ + dp = kmap(ent_page); + memset(dp, 0, sizeof(*dp)); + dp->ldp_hash_start = cpu_to_le64(offset); + dp->ldp_flags |= LDF_COLLIDE; + + area = dp + 1; + left_bytes = PAGE_CACHE_SIZE - sizeof(*dp); + ent = area; + last_ent = ent; + do { + __u16 ent_size; + + /* Find the minum entry from all sub-stripes */ + rc = lmv_get_min_striped_entry(exp, op_data, cb_op, hash_offset, + &ent_idx, &min_ent, + &min_ent_page); + if (rc != 0) + GOTO(out, rc); + + /* If it can not get minum entry, it means it already reaches + * the end of this directory */ + if (min_ent == NULL) { + last_ent->lde_reclen = 0; + hash_offset = MDS_DIR_END_OFF; + GOTO(out, rc); + } + + ent_size = le16_to_cpu(min_ent->lde_reclen); + + /* the last entry lde_reclen is 0, but it might not + * the end of this entry of this temporay entry */ + if (ent_size == 0) + ent_size = lu_dirent_calc_size( + le16_to_cpu(min_ent->lde_namelen), + le32_to_cpu(min_ent->lde_attrs)); + if (ent_size > left_bytes) { + last_ent->lde_reclen = cpu_to_le16(0); + hash_offset = le64_to_cpu(min_ent->lde_hash); + GOTO(out, rc); + } + + memcpy(ent, min_ent, ent_size); + + /* Replace . with master FID and Replace .. with the parent FID + * of master object */ + if (strncmp(ent->lde_name, ".", + le16_to_cpu(ent->lde_namelen)) == 0 && + le16_to_cpu(ent->lde_namelen) == 1) + fid_cpu_to_le(&ent->lde_fid, &master_fid); + else if (strncmp(ent->lde_name, "..", + le16_to_cpu(ent->lde_namelen)) == 0 && + le16_to_cpu(ent->lde_namelen) == 2) + fid_cpu_to_le(&ent->lde_fid, &op_data->op_fid3); + + left_bytes -= ent_size; + ent->lde_reclen = cpu_to_le16(ent_size); + last_ent = ent; + ent = (void *)ent + ent_size; + hash_offset = le64_to_cpu(min_ent->lde_hash); + if (hash_offset == MDS_DIR_END_OFF) { + last_ent->lde_reclen = 0; + break; + } + } while (1); +out: + if (min_ent_page != NULL) { + kunmap(min_ent_page); + page_cache_release(min_ent_page); + } + + if (unlikely(rc != 0)) { + __free_page(ent_page); + ent_page = NULL; + } else { + if (ent == area) + dp->ldp_flags |= LDF_EMPTY; + dp->ldp_flags = cpu_to_le32(dp->ldp_flags); + dp->ldp_hash_end = cpu_to_le64(hash_offset); + } - /* - * XXX: to get nid we assume that underlying obd device is mdc. - */ - imp = class_exp2cliimp(exp); - id = imp->imp_connection->c_self + fid_flatten(fid); + /* We do not want to allocate md_op_data during each + * dir entry reading, so op_data will be shared by every stripe, + * then we need to restore it back to original value before + * return to the upper layer */ + op_data->op_fid1 = master_fid; + op_data->op_fid2 = master_fid; + op_data->op_data = master_inode; - CDEBUG(D_INODE, "Readpage node rank: "LPX64" "DFID" "LPX64" "LPX64"\n", - imp->imp_connection->c_self, PFID(fid), id, id ^ (id >> 32)); + *ppage = ent_page; - return id ^ (id >> 32); + RETURN(rc); } -static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data, - struct page **pages, struct ptlrpc_request **request) +int lmv_read_page(struct obd_export *exp, struct md_op_data *op_data, + struct md_callback *cb_op, __u64 offset, + struct page **ppage) { - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_object *obj; - struct lu_fid rid = op_data->op_fid1; - __u64 offset = op_data->op_offset; - __u64 hash_adj = 0; - __u32 rank = 0; - __u64 seg_size = 0; - __u64 tgt_tmp = 0; - int tgt_idx = 0; - int tgt0_idx = 0; - int rc; - int nr = 0; - int i; - /* number of pages read, in CFS_PAGE_SIZE */ - int nrdpgs; - /* number of pages transferred in LU_PAGE_SIZE */ - int nlupgs; - struct lmv_stripe *los; - struct lmv_tgt_desc *tgt; - struct lu_dirpage *dp; - struct lu_dirent *ent; - ENTRY; - - rc = lmv_check_connect(obd); - if (rc) - RETURN(rc); - - CDEBUG(D_INODE, "READPAGE at "LPX64" from "DFID"\n", offset, PFID(&rid)); + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv = &obd->u.lmv; + struct lmv_stripe_md *lsm = op_data->op_mea1; + struct lmv_tgt_desc *tgt; + int rc; + ENTRY; - /* - * This case handle directory lookup in clustered metadata case (i.e. - * split directory is located on multiple md servers.) - * each server keeps directory entries for certain range of hashes. - * E.g. we have N server and suppose hash range is 0 to MAX_HASH. - * first server will keep records with hashes [ 0 ... MAX_HASH / N - 1], - * second one with hashes [MAX_HASH / N ... 2 * MAX_HASH / N] and - * so on.... - * readdir can simply start reading entries from 0 - N server in - * order but that will not scale well as all client will request dir in - * to server in same order. - * Following algorithm does optimization: - * Instead of doing readdir in 1, 2, ...., N order, client with a - * rank R does readdir in R, R + 1, ..., N, 1, ... R - 1 order. - * (every client has rank R) - * But ll_readdir() expect offset range [0 to MAX_HASH/N) but - * since client ask dir from MDS{R} client has pages with offsets - * [R*MAX_HASH/N ... (R + 1)*MAX_HASH/N] there for we do hash_adj - * on hash values that we get. - */ - obj = lmv_object_find_lock(obd, &rid); - if (obj) { - nr = obj->lo_objcount; - LASSERT(nr > 0); - seg_size = MAX_HASH_SIZE; - do_div(seg_size, nr); - los = obj->lo_stripes; - tgt = lmv_get_target(lmv, los[0].ls_mds); - rank = lmv_node_rank(tgt->ltd_exp, &rid) % nr; - tgt_tmp = offset; - do_div(tgt_tmp, seg_size); - tgt0_idx = do_div(tgt_tmp, nr); - tgt_idx = (tgt0_idx + rank) % nr; - - if (tgt_idx < tgt0_idx) - /* - * Wrap around. - * - * Last segment has unusual length due to division - * rounding. - */ - hash_adj = MAX_HASH_SIZE - seg_size * nr; - else - hash_adj = 0; - - hash_adj += rank * seg_size; - - CDEBUG(D_INODE, "Readpage hash adjustment: %x "LPX64" " - LPX64"/%x -> "LPX64"/%x\n", rank, hash_adj, - offset, tgt0_idx, offset + hash_adj, tgt_idx); - - offset = (offset + hash_adj) & MAX_HASH_SIZE; - rid = obj->lo_stripes[tgt_idx].ls_fid; - tgt = lmv_get_target(lmv, los[tgt_idx].ls_mds); - - CDEBUG(D_INODE, "Forward to "DFID" with offset %lu i %d\n", - PFID(&rid), (unsigned long)offset, tgt_idx); - } else - tgt = lmv_find_target(lmv, &rid); + rc = lmv_check_connect(obd); + if (rc != 0) + RETURN(rc); - if (IS_ERR(tgt)) - GOTO(cleanup, rc = PTR_ERR(tgt)); + if (unlikely(lsm != NULL)) { + rc = lmv_read_striped_page(exp, op_data, cb_op, offset, ppage); + RETURN(rc); + } - op_data->op_fid1 = rid; - rc = md_readpage(tgt->ltd_exp, op_data, pages, request); - if (rc) - GOTO(cleanup, rc); - - nrdpgs = ((*request)->rq_bulk->bd_nob_transferred + CFS_PAGE_SIZE - 1) - >> CFS_PAGE_SHIFT; - nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT; - LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK)); - LASSERT(nrdpgs > 0 && nrdpgs <= op_data->op_npages); - - CDEBUG(D_INODE, "read %d(%d)/%d pages\n", nrdpgs, nlupgs, - op_data->op_npages); - - for (i = 0; i < nrdpgs; i++) { -#if CFS_PAGE_SIZE > LU_PAGE_SIZE - struct lu_dirpage *first; - __u64 hash_end = 0; - __u32 flags = 0; -#endif - struct lu_dirent *tmp = NULL; - - dp = cfs_kmap(pages[i]); - if (obj) { - lmv_hash_adjust(&dp->ldp_hash_start, hash_adj); - lmv_hash_adjust(&dp->ldp_hash_end, hash_adj); - LASSERT(le64_to_cpu(dp->ldp_hash_start) <= - op_data->op_offset); - - if ((tgt0_idx != nr - 1) && - (le64_to_cpu(dp->ldp_hash_end) == MDS_DIR_END_OFF)) - { - dp->ldp_hash_end = cpu_to_le32(seg_size * - (tgt0_idx + 1)); - CDEBUG(D_INODE, - ""DFID" reset end "LPX64" tgt %d\n", - PFID(&rid), - (__u64)le64_to_cpu(dp->ldp_hash_end), - tgt_idx); - } - } + tgt = lmv_find_target(lmv, &op_data->op_fid1); + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); - ent = lu_dirent_start(dp); -#if CFS_PAGE_SIZE > LU_PAGE_SIZE - first = dp; - hash_end = dp->ldp_hash_end; -repeat: -#endif - nlupgs--; - for (tmp = ent; ent != NULL; - tmp = ent, ent = lu_dirent_next(ent)) { - if (obj) - lmv_hash_adjust(&ent->lde_hash, hash_adj); - } + rc = md_read_page(tgt->ltd_exp, op_data, cb_op, offset, ppage); -#if CFS_PAGE_SIZE > LU_PAGE_SIZE - dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE); - if (((unsigned long)dp & ~CFS_PAGE_MASK) && nlupgs > 0) { - ent = lu_dirent_start(dp); - - if (obj) { - lmv_hash_adjust(&dp->ldp_hash_end, hash_adj); - if ((tgt0_idx != nr - 1) && - (le64_to_cpu(dp->ldp_hash_end) == - MDS_DIR_END_OFF)) { - hash_end = cpu_to_le32(seg_size * - (tgt0_idx + 1)); - CDEBUG(D_INODE, - ""DFID" reset end "LPX64" tgt %d\n", - PFID(&rid), - (__u64)le64_to_cpu(hash_end), - tgt_idx); - } - } - hash_end = dp->ldp_hash_end; - flags = dp->ldp_flags; - - if (tmp) { - /* enlarge the end entry lde_reclen from 0 to - * first entry of next lu_dirpage, in this way - * several lu_dirpages can be stored into one - * client page on client. */ - tmp = ((void *)tmp) + - le16_to_cpu(tmp->lde_reclen); - tmp->lde_reclen = - cpu_to_le16((char *)(dp->ldp_entries) - - (char *)tmp); - goto repeat; - } - } - first->ldp_hash_end = hash_end; - first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE); - first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE); -#else - SET_BUT_UNUSED(tmp); -#endif - cfs_kunmap(pages[i]); - } - EXIT; -cleanup: - if (obj) - lmv_object_put_unlock(obj); - return rc; + RETURN(rc); } +/** + * Unlink a file/directory + * + * Unlink a file or directory under the parent dir. The unlink request + * usually will be sent to the MDT where the child is located, but if + * the client does not have the child FID then request will be sent to the + * MDT where the parent is located. + * + * If the parent is a striped directory then it also needs to locate which + * stripe the name of the child is located, and replace the parent FID + * (@op->op_fid1) with the stripe FID. Note: if the stripe is unknown, + * it will walk through all of sub-stripes until the child is being + * unlinked finally. + * + * \param[in] exp export refer to LMV + * \param[in] op_data different parameters transferred beween client + * MD stacks, name, namelen, FIDs etc. + * op_fid1 is the parent FID, op_fid2 is the child + * FID. + * \param[out] request point to the request of unlink. + * + * retval 0 if succeed + * negative errno if failed. + */ static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data, struct ptlrpc_request **request) { - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt = NULL; - struct lmv_object *obj; - int rc; - int sidx; - int loop = 0; - ENTRY; + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv = &obd->u.lmv; + struct lmv_tgt_desc *tgt = NULL; + struct lmv_tgt_desc *parent_tgt = NULL; + struct mdt_body *body; + int rc; + int stripe_index = 0; + struct lmv_stripe_md *lsm = op_data->op_mea1; + ENTRY; - rc = lmv_check_connect(obd); - if (rc) - RETURN(rc); + rc = lmv_check_connect(obd); + if (rc) + RETURN(rc); +retry_unlink: + /* For striped dir, we need to locate the parent as well */ + if (lsm != NULL) { + struct lmv_tgt_desc *tmp; + + LASSERT(op_data->op_name != NULL && + op_data->op_namelen != 0); + + tmp = lmv_locate_target_for_name(lmv, lsm, + op_data->op_name, + op_data->op_namelen, + &op_data->op_fid1, + &op_data->op_mds); + + /* return -EBADFD means unknown hash type, might + * need try all sub-stripe here */ + if (IS_ERR(tmp) && PTR_ERR(tmp) != -EBADFD) + RETURN(PTR_ERR(tmp)); + + /* Note: both migrating dir and unknown hash dir need to + * try all of sub-stripes, so we need start search the + * name from stripe 0, but migrating dir is already handled + * inside lmv_locate_target_for_name(), so we only check + * unknown hash type directory here */ + if (!lmv_is_known_hash_type(lsm)) { + struct lmv_oinfo *oinfo; + + oinfo = &lsm->lsm_md_oinfo[stripe_index]; + + op_data->op_fid1 = oinfo->lmo_fid; + op_data->op_mds = oinfo->lmo_mds; + } + } -repeat: - ++loop; - LASSERT(loop <= 2); - LASSERT(op_data->op_namelen != 0); - - obj = lmv_object_find(obd, &op_data->op_fid1); - if (obj) { - sidx = raw_name2idx(obj->lo_hashtype, - obj->lo_objcount, - op_data->op_name, - op_data->op_namelen); - op_data->op_bias &= ~MDS_CHECK_SPLIT; - op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid; - tgt = lmv_get_target(lmv, - obj->lo_stripes[sidx].ls_mds); - lmv_object_put(obj); - CDEBUG(D_INODE, "UNLINK '%*s' in "DFID" -> %u\n", - op_data->op_namelen, op_data->op_name, - PFID(&op_data->op_fid1), sidx); - } +try_next_stripe: + /* Send unlink requests to the MDT where the child is located */ + if (likely(!fid_is_zero(&op_data->op_fid2))) + tgt = lmv_find_target(lmv, &op_data->op_fid2); + else if (lsm != NULL) + tgt = lmv_get_target(lmv, op_data->op_mds, NULL); + else + tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1); + + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); + + op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid()); + op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid()); + op_data->op_cap = cfs_curproc_cap_pack(); + + /* + * If child's fid is given, cancel unused locks for it if it is from + * another export than parent. + * + * LOOKUP lock for child (fid3) should also be cancelled on parent + * tgt_tgt in mdc_unlink(). + */ + op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3; + + /* + * Cancel FULL locks on child (fid3). + */ + parent_tgt = lmv_find_target(lmv, &op_data->op_fid1); + if (IS_ERR(parent_tgt)) + RETURN(PTR_ERR(parent_tgt)); + + if (parent_tgt != tgt) { + rc = lmv_early_cancel(exp, parent_tgt, op_data, tgt->ltd_idx, + LCK_EX, MDS_INODELOCK_LOOKUP, + MF_MDC_CANCEL_FID3); + } - if (tgt == NULL) { - tgt = lmv_find_target(lmv, &op_data->op_fid1); - if (IS_ERR(tgt)) - RETURN(PTR_ERR(tgt)); - op_data->op_bias |= MDS_CHECK_SPLIT; - } + rc = lmv_early_cancel(exp, NULL, op_data, tgt->ltd_idx, LCK_EX, + MDS_INODELOCK_FULL, MF_MDC_CANCEL_FID3); + if (rc != 0) + RETURN(rc); - op_data->op_fsuid = cfs_curproc_fsuid(); - op_data->op_fsgid = cfs_curproc_fsgid(); - op_data->op_cap = cfs_curproc_cap_pack(); + CDEBUG(D_INODE, "unlink with fid="DFID"/"DFID" -> mds #%d\n", + PFID(&op_data->op_fid1), PFID(&op_data->op_fid2), tgt->ltd_idx); - /* - * If child's fid is given, cancel unused locks for it if it is from - * another export than parent. - * - * LOOKUP lock for child (fid3) should also be cancelled on parent - * tgt_tgt in mdc_unlink(). - */ - op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3; + rc = md_unlink(tgt->ltd_exp, op_data, request); + if (rc != 0 && rc != -EREMOTE && rc != -ENOENT) + RETURN(rc); - /* - * Cancel FULL locks on child (fid3). - */ - rc = lmv_early_cancel(exp, op_data, tgt->ltd_idx, LCK_EX, - MDS_INODELOCK_FULL, MF_MDC_CANCEL_FID3); + /* Try next stripe if it is needed. */ + if (rc == -ENOENT && lsm != NULL && lmv_need_try_all_stripes(lsm)) { + struct lmv_oinfo *oinfo; - if (rc == 0) - rc = md_unlink(tgt->ltd_exp, op_data, request); + stripe_index++; + if (stripe_index >= lsm->lsm_md_stripe_count) + RETURN(rc); - if (rc == -ERESTART) { - LASSERT(*request != NULL); - DEBUG_REQ(D_WARNING|D_RPCTRACE, *request, - "Got -ERESTART during unlink!\n"); - ptlrpc_req_finished(*request); - *request = NULL; + oinfo = &lsm->lsm_md_oinfo[stripe_index]; - /* - * Directory got split. Time to update local object and repeat - * the request with proper MDS. - */ - rc = lmv_handle_split(exp, &op_data->op_fid1); - if (rc == 0) - goto repeat; - } - RETURN(rc); + op_data->op_fid1 = oinfo->lmo_fid; + op_data->op_mds = oinfo->lmo_mds; + + ptlrpc_req_finished(*request); + *request = NULL; + + goto try_next_stripe; + } + + body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY); + if (body == NULL) + RETURN(-EPROTO); + + /* Not cross-ref case, just get out of here. */ + if (likely(!(body->mbo_valid & OBD_MD_MDS))) + RETURN(rc); + + CDEBUG(D_INODE, "%s: try unlink to another MDT for "DFID"\n", + exp->exp_obd->obd_name, PFID(&body->mbo_fid1)); + + /* This is a remote object, try remote MDT, Note: it may + * try more than 1 time here, Considering following case + * /mnt/lustre is root on MDT0, remote1 is on MDT1 + * 1. Initially A does not know where remote1 is, it send + * unlink RPC to MDT0, MDT0 return -EREMOTE, it will + * resend unlink RPC to MDT1 (retry 1st time). + * + * 2. During the unlink RPC in flight, + * client B mv /mnt/lustre/remote1 /mnt/lustre/remote2 + * and create new remote1, but on MDT0 + * + * 3. MDT1 get unlink RPC(from A), then do remote lock on + * /mnt/lustre, then lookup get fid of remote1, and find + * it is remote dir again, and replay -EREMOTE again. + * + * 4. Then A will resend unlink RPC to MDT0. (retry 2nd times). + * + * In theory, it might try unlimited time here, but it should + * be very rare case. */ + op_data->op_fid2 = body->mbo_fid1; + ptlrpc_req_finished(*request); + *request = NULL; + + goto retry_unlink; } static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) @@ -2619,6 +2810,7 @@ static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage) case OBD_CLEANUP_EXPORTS: fld_client_proc_fini(&lmv->lmv_fld); lprocfs_obd_cleanup(obd); + lprocfs_free_md_stats(obd); break; default: break; @@ -2644,7 +2836,6 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp, lmv = &obd->u.lmv; if (keylen >= strlen("remote_flag") && !strcmp(key, "remote_flag")) { - struct lmv_tgt_desc *tgts; int i; rc = lmv_check_connect(obd); @@ -2652,37 +2843,36 @@ static int lmv_get_info(const struct lu_env *env, struct obd_export *exp, RETURN(rc); LASSERT(*vallen == sizeof(__u32)); - for (i = 0, tgts = lmv->tgts; i < lmv->desc.ld_tgt_count; - i++, tgts++) { - - /* - * All tgts should be connected when this gets called. - */ - if (!tgts || !tgts->ltd_exp) { - CERROR("target not setup?\n"); - continue; - } - - if (!obd_get_info(env, tgts->ltd_exp, keylen, key, - vallen, val, NULL)) - RETURN(0); - } - RETURN(-EINVAL); - } else if (KEY_IS(KEY_MAX_EASIZE) || KEY_IS(KEY_CONN_DATA)) { - rc = lmv_check_connect(obd); - if (rc) - RETURN(rc); - - /* - * Forwarding this request to first MDS, it should know LOV - * desc. - */ - rc = obd_get_info(env, lmv->tgts[0].ltd_exp, keylen, key, - vallen, val, NULL); - if (!rc && KEY_IS(KEY_CONN_DATA)) { - exp->exp_connect_flags = - ((struct obd_connect_data *)val)->ocd_connect_flags; - } + for (i = 0; i < lmv->desc.ld_tgt_count; i++) { + struct lmv_tgt_desc *tgt = lmv->tgts[i]; + /* + * All tgts should be connected when this gets called. + */ + if (tgt == NULL || tgt->ltd_exp == NULL) + continue; + + if (!obd_get_info(env, tgt->ltd_exp, keylen, key, + vallen, val, NULL)) + RETURN(0); + } + RETURN(-EINVAL); + } else if (KEY_IS(KEY_MAX_EASIZE) || + KEY_IS(KEY_DEFAULT_EASIZE) || + KEY_IS(KEY_MAX_COOKIESIZE) || + KEY_IS(KEY_DEFAULT_COOKIESIZE) || + KEY_IS(KEY_CONN_DATA)) { + rc = lmv_check_connect(obd); + if (rc) + RETURN(rc); + + /* + * Forwarding this request to first MDS, it should know LOV + * desc. + */ + rc = obd_get_info(env, lmv->tgts[0]->ltd_exp, keylen, key, + vallen, val, NULL); + if (!rc && KEY_IS(KEY_CONN_DATA)) + exp->exp_connect_data = *(struct obd_connect_data *)val; RETURN(rc); } else if (KEY_IS(KEY_TGT_COUNT)) { *((int *)val) = lmv->desc.ld_tgt_count; @@ -2697,7 +2887,7 @@ int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp, obd_count keylen, void *key, obd_count vallen, void *val, struct ptlrpc_request_set *set) { - struct lmv_tgt_desc *tgt; + struct lmv_tgt_desc *tgt = NULL; struct obd_device *obd; struct lmv_obd *lmv; int rc = 0; @@ -2714,11 +2904,11 @@ int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp, if (KEY_IS(KEY_READ_ONLY) || KEY_IS(KEY_FLUSH_CTX)) { int i, err = 0; - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - tgt = &lmv->tgts[i]; + for (i = 0; i < lmv->desc.ld_tgt_count; i++) { + tgt = lmv->tgts[i]; - if (!tgt->ltd_exp) - continue; + if (tgt == NULL || tgt->ltd_exp == NULL) + continue; err = obd_set_info_async(env, tgt->ltd_exp, keylen, key, vallen, val, set); @@ -2732,108 +2922,260 @@ int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp, RETURN(-EINVAL); } -int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, - struct lov_stripe_md *lsm) +static int lmv_pack_md_v1(const struct lmv_stripe_md *lsm, + struct lmv_mds_md_v1 *lmm1) { - struct obd_device *obd = class_exp2obd(exp); - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_stripe_md *meap; - struct lmv_stripe_md *lsmp; - int mea_size; - int i; - ENTRY; + int cplen; + int i; - mea_size = lmv_get_easize(lmv); - if (!lmmp) - RETURN(mea_size); + lmm1->lmv_magic = cpu_to_le32(lsm->lsm_md_magic); + lmm1->lmv_stripe_count = cpu_to_le32(lsm->lsm_md_stripe_count); + lmm1->lmv_master_mdt_index = cpu_to_le32(lsm->lsm_md_master_mdt_index); + lmm1->lmv_hash_type = cpu_to_le32(lsm->lsm_md_hash_type); + cplen = strlcpy(lmm1->lmv_pool_name, lsm->lsm_md_pool_name, + sizeof(lmm1->lmv_pool_name)); + if (cplen >= sizeof(lmm1->lmv_pool_name)) + return -E2BIG; - if (*lmmp && !lsm) { - OBD_FREE_LARGE(*lmmp, mea_size); - *lmmp = NULL; - RETURN(0); - } + for (i = 0; i < lsm->lsm_md_stripe_count; i++) + fid_cpu_to_le(&lmm1->lmv_stripe_fids[i], + &lsm->lsm_md_oinfo[i].lmo_fid); + return 0; +} - if (*lmmp == NULL) { - OBD_ALLOC_LARGE(*lmmp, mea_size); - if (*lmmp == NULL) - RETURN(-ENOMEM); - } +int lmv_pack_md(union lmv_mds_md **lmmp, const struct lmv_stripe_md *lsm, + int stripe_count) +{ + int lmm_size = 0; + bool allocated = false; + int rc = 0; + ENTRY; - if (!lsm) - RETURN(mea_size); + LASSERT(lmmp != NULL); + /* Free lmm */ + if (*lmmp != NULL && lsm == NULL) { + int stripe_count; + + stripe_count = lmv_mds_md_stripe_count_get(*lmmp); + lmm_size = lmv_mds_md_size(stripe_count, + le32_to_cpu((*lmmp)->lmv_magic)); + if (lmm_size == 0) + RETURN(-EINVAL); + OBD_FREE(*lmmp, lmm_size); + *lmmp = NULL; + RETURN(0); + } - lsmp = (struct lmv_stripe_md *)lsm; - meap = (struct lmv_stripe_md *)*lmmp; + /* Alloc lmm */ + if (*lmmp == NULL && lsm == NULL) { + lmm_size = lmv_mds_md_size(stripe_count, LMV_MAGIC); + LASSERT(lmm_size > 0); + OBD_ALLOC(*lmmp, lmm_size); + if (*lmmp == NULL) + RETURN(-ENOMEM); + lmv_mds_md_stripe_count_set(*lmmp, stripe_count); + (*lmmp)->lmv_magic = cpu_to_le32(LMV_MAGIC); + RETURN(lmm_size); + } - if (lsmp->mea_magic != MEA_MAGIC_LAST_CHAR && - lsmp->mea_magic != MEA_MAGIC_ALL_CHARS) - RETURN(-EINVAL); + /* pack lmm */ + LASSERT(lsm != NULL); + lmm_size = lmv_mds_md_size(lsm->lsm_md_stripe_count, lsm->lsm_md_magic); + if (*lmmp == NULL) { + OBD_ALLOC(*lmmp, lmm_size); + if (*lmmp == NULL) + RETURN(-ENOMEM); + allocated = true; + } - meap->mea_magic = cpu_to_le32(lsmp->mea_magic); - meap->mea_count = cpu_to_le32(lsmp->mea_count); - meap->mea_master = cpu_to_le32(lsmp->mea_master); + switch (lsm->lsm_md_magic) { + case LMV_MAGIC_V1: + rc = lmv_pack_md_v1(lsm, &(*lmmp)->lmv_md_v1); + break; + default: + rc = -EINVAL; + break; + } - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - meap->mea_ids[i] = meap->mea_ids[i]; - fid_cpu_to_le(&meap->mea_ids[i], &meap->mea_ids[i]); - } + if (rc != 0 && allocated) { + OBD_FREE(*lmmp, lmm_size); + *lmmp = NULL; + } - RETURN(mea_size); + RETURN(lmm_size); } +EXPORT_SYMBOL(lmv_pack_md); -int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, - struct lov_mds_md *lmm, int lmm_size) -{ - struct obd_device *obd = class_exp2obd(exp); - struct lmv_stripe_md **tmea = (struct lmv_stripe_md **)lsmp; - struct lmv_stripe_md *mea = (struct lmv_stripe_md *)lmm; - struct lmv_obd *lmv = &obd->u.lmv; - int mea_size; - int i; - __u32 magic; - ENTRY; +static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm, + const struct lmv_mds_md_v1 *lmm1) +{ + struct lmv_obd *lmv = &exp->exp_obd->u.lmv; + int stripe_count; + int cplen; + int i; + int rc = 0; + ENTRY; - mea_size = lmv_get_easize(lmv); - if (lsmp == NULL) - return mea_size; + lsm->lsm_md_magic = le32_to_cpu(lmm1->lmv_magic); + lsm->lsm_md_stripe_count = le32_to_cpu(lmm1->lmv_stripe_count); + lsm->lsm_md_master_mdt_index = le32_to_cpu(lmm1->lmv_master_mdt_index); + if (OBD_FAIL_CHECK(OBD_FAIL_UNKNOWN_LMV_STRIPE)) + lsm->lsm_md_hash_type = LMV_HASH_TYPE_UNKNOWN; + else + lsm->lsm_md_hash_type = le32_to_cpu(lmm1->lmv_hash_type); + lsm->lsm_md_layout_version = le32_to_cpu(lmm1->lmv_layout_version); + cplen = strlcpy(lsm->lsm_md_pool_name, lmm1->lmv_pool_name, + sizeof(lsm->lsm_md_pool_name)); + + if (cplen >= sizeof(lsm->lsm_md_pool_name)) + RETURN(-E2BIG); + + CDEBUG(D_INFO, "unpack lsm count %d, master %d hash_type %d" + "layout_version %d\n", lsm->lsm_md_stripe_count, + lsm->lsm_md_master_mdt_index, lsm->lsm_md_hash_type, + lsm->lsm_md_layout_version); + + stripe_count = le32_to_cpu(lmm1->lmv_stripe_count); + for (i = 0; i < le32_to_cpu(stripe_count); i++) { + fid_le_to_cpu(&lsm->lsm_md_oinfo[i].lmo_fid, + &lmm1->lmv_stripe_fids[i]); + rc = lmv_fld_lookup(lmv, &lsm->lsm_md_oinfo[i].lmo_fid, + &lsm->lsm_md_oinfo[i].lmo_mds); + if (rc != 0) + RETURN(rc); + CDEBUG(D_INFO, "unpack fid #%d "DFID"\n", i, + PFID(&lsm->lsm_md_oinfo[i].lmo_fid)); + } - if (*lsmp != NULL && lmm == NULL) { - OBD_FREE_LARGE(*tmea, mea_size); - *lsmp = NULL; - RETURN(0); - } + RETURN(rc); +} - LASSERT(mea_size == lmm_size); +int lmv_unpack_md(struct obd_export *exp, struct lmv_stripe_md **lsmp, + const union lmv_mds_md *lmm, int stripe_count) +{ + struct lmv_stripe_md *lsm; + int lsm_size; + int rc; + bool allocated = false; + ENTRY; - OBD_ALLOC_LARGE(*tmea, mea_size); - if (*tmea == NULL) - RETURN(-ENOMEM); + LASSERT(lsmp != NULL); + + lsm = *lsmp; + /* Free memmd */ + if (lsm != NULL && lmm == NULL) { + int i; + for (i = 0; i < lsm->lsm_md_stripe_count; i++) { + /* For migrating inode, the master stripe and master + * object will be the same, so do not need iput, see + * ll_update_lsm_md */ + if (!(lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION && + i == 0) && lsm->lsm_md_oinfo[i].lmo_root != NULL) + iput(lsm->lsm_md_oinfo[i].lmo_root); + } + lsm_size = lmv_stripe_md_size(lsm->lsm_md_stripe_count); + OBD_FREE(lsm, lsm_size); + *lsmp = NULL; + RETURN(0); + } - if (!lmm) - RETURN(mea_size); + /* Alloc memmd */ + if (lsm == NULL && lmm == NULL) { + lsm_size = lmv_stripe_md_size(stripe_count); + OBD_ALLOC(lsm, lsm_size); + if (lsm == NULL) + RETURN(-ENOMEM); + lsm->lsm_md_stripe_count = stripe_count; + *lsmp = lsm; + RETURN(0); + } - if (mea->mea_magic == MEA_MAGIC_LAST_CHAR || - mea->mea_magic == MEA_MAGIC_ALL_CHARS || - mea->mea_magic == MEA_MAGIC_HASH_SEGMENT) - { - magic = le32_to_cpu(mea->mea_magic); - } else { - /* - * Old mea is not handled here. - */ - CERROR("Old not supportable EA is found\n"); - LBUG(); - } + if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_STRIPE) + RETURN(-EPERM); - (*tmea)->mea_magic = magic; - (*tmea)->mea_count = le32_to_cpu(mea->mea_count); - (*tmea)->mea_master = le32_to_cpu(mea->mea_master); + /* Unpack memmd */ + if (le32_to_cpu(lmm->lmv_magic) != LMV_MAGIC_V1 && + le32_to_cpu(lmm->lmv_magic) != LMV_USER_MAGIC) { + CERROR("%s: invalid lmv magic %x: rc = %d\n", + exp->exp_obd->obd_name, le32_to_cpu(lmm->lmv_magic), + -EIO); + RETURN(-EIO); + } - for (i = 0; i < (*tmea)->mea_count; i++) { - (*tmea)->mea_ids[i] = mea->mea_ids[i]; - fid_le_to_cpu(&(*tmea)->mea_ids[i], &(*tmea)->mea_ids[i]); - } - RETURN(mea_size); + if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_V1) + lsm_size = lmv_stripe_md_size(lmv_mds_md_stripe_count_get(lmm)); + else + /** + * Unpack default dirstripe(lmv_user_md) to lmv_stripe_md, + * stripecount should be 0 then. + */ + lsm_size = lmv_stripe_md_size(0); + + lsm_size = lmv_stripe_md_size(lmv_mds_md_stripe_count_get(lmm)); + if (lsm == NULL) { + OBD_ALLOC(lsm, lsm_size); + if (lsm == NULL) + RETURN(-ENOMEM); + allocated = true; + *lsmp = lsm; + } + + switch (le32_to_cpu(lmm->lmv_magic)) { + case LMV_MAGIC_V1: + rc = lmv_unpack_md_v1(exp, lsm, &lmm->lmv_md_v1); + break; + default: + CERROR("%s: unrecognized magic %x\n", exp->exp_obd->obd_name, + le32_to_cpu(lmm->lmv_magic)); + rc = -EINVAL; + break; + } + + if (rc != 0 && allocated) { + OBD_FREE(lsm, lsm_size); + *lsmp = NULL; + lsm_size = rc; + } + RETURN(lsm_size); +} + +int lmv_alloc_memmd(struct lmv_stripe_md **lsmp, int stripes) +{ + return lmv_unpack_md(NULL, lsmp, NULL, stripes); +} +EXPORT_SYMBOL(lmv_alloc_memmd); + +void lmv_free_memmd(struct lmv_stripe_md *lsm) +{ + lmv_unpack_md(NULL, &lsm, NULL, 0); +} +EXPORT_SYMBOL(lmv_free_memmd); + +int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp, + struct lov_mds_md *lmm, int disk_len) +{ + return lmv_unpack_md(exp, (struct lmv_stripe_md **)lsmp, + (union lmv_mds_md *)lmm, disk_len); +} + +int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp, + struct lov_stripe_md *lsm) +{ + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv_obd = &obd->u.lmv; + const struct lmv_stripe_md *lmv = (struct lmv_stripe_md *)lsm; + int stripe_count; + + if (lmmp == NULL) { + if (lsm != NULL) + stripe_count = lmv->lsm_md_stripe_count; + else + stripe_count = lmv_obd->desc.ld_tgt_count; + + return lmv_mds_md_size(stripe_count, LMV_MAGIC_V1); + } + + return lmv_pack_md((union lmv_mds_md **)lmmp, lmv, 0); } static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, @@ -2844,33 +3186,37 @@ static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid, struct lmv_obd *lmv = &obd->u.lmv; int rc = 0; int err; - int i; + __u32 i; ENTRY; LASSERT(fid != NULL); - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - if (!lmv->tgts[i].ltd_exp || !lmv->tgts[i].ltd_active) - continue; + for (i = 0; i < lmv->desc.ld_tgt_count; i++) { + struct lmv_tgt_desc *tgt = lmv->tgts[i]; - err = md_cancel_unused(lmv->tgts[i].ltd_exp, fid, - policy, mode, flags, opaque); - if (!rc) - rc = err; - } - RETURN(rc); + if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active) + continue; + + err = md_cancel_unused(tgt->ltd_exp, fid, policy, mode, flags, + opaque); + if (!rc) + rc = err; + } + RETURN(rc); } int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data, __u64 *bits) { - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - int rc; - ENTRY; + struct lmv_obd *lmv = &exp->exp_obd->u.lmv; + struct lmv_tgt_desc *tgt = lmv->tgts[0]; + int rc; + ENTRY; - rc = md_set_lock_data(lmv->tgts[0].ltd_exp, lockh, data, bits); - RETURN(rc); + if (tgt == NULL || tgt->ltd_exp == NULL) + RETURN(-EINVAL); + rc = md_set_lock_data(tgt->ltd_exp, lockh, data, bits); + RETURN(rc); } ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags, @@ -2878,67 +3224,87 @@ ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags, ldlm_policy_data_t *policy, ldlm_mode_t mode, struct lustre_handle *lockh) { - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - ldlm_mode_t rc; - int i; - ENTRY; + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv = &obd->u.lmv; + ldlm_mode_t rc; + int tgt; + int i; + ENTRY; - CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid)); + CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid)); /* - * With CMD every object can have two locks in different namespaces: - * lookup lock in space of mds storing direntry and update/open lock in - * space of mds storing inode. Thus we check all targets, not only that - * one fid was created in. - */ - for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - rc = md_lock_match(lmv->tgts[i].ltd_exp, flags, fid, - type, policy, mode, lockh); - if (rc) - RETURN(rc); - } + * With DNE every object can have two locks in different namespaces: + * lookup lock in space of MDT storing direntry and update/open lock in + * space of MDT storing inode. Try the MDT that the FID maps to first, + * since this can be easily found, and only try others if that fails. + */ + for (i = 0, tgt = lmv_find_target_index(lmv, fid); + i < lmv->desc.ld_tgt_count; + i++, tgt = (tgt + 1) % lmv->desc.ld_tgt_count) { + if (tgt < 0) { + CDEBUG(D_HA, "%s: "DFID" is inaccessible: rc = %d\n", + obd->obd_name, PFID(fid), tgt); + tgt = 0; + } - RETURN(0); + if (lmv->tgts[tgt] == NULL || + lmv->tgts[tgt]->ltd_exp == NULL || + lmv->tgts[tgt]->ltd_active == 0) + continue; + + rc = md_lock_match(lmv->tgts[tgt]->ltd_exp, flags, fid, + type, policy, mode, lockh); + if (rc) + RETURN(rc); + } + + RETURN(0); } int lmv_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req, - struct obd_export *dt_exp, struct obd_export *md_exp, - struct lustre_md *md) + struct obd_export *dt_exp, struct obd_export *md_exp, + struct lustre_md *md) { - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - int rc; - ENTRY; - rc = md_get_lustre_md(lmv->tgts[0].ltd_exp, req, dt_exp, md_exp, md); - RETURN(rc); + struct lmv_obd *lmv = &exp->exp_obd->u.lmv; + struct lmv_tgt_desc *tgt = lmv->tgts[0]; + + if (tgt == NULL || tgt->ltd_exp == NULL) + RETURN(-EINVAL); + + return md_get_lustre_md(lmv->tgts[0]->ltd_exp, req, dt_exp, md_exp, md); } int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md) { - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - ENTRY; + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv = &obd->u.lmv; + struct lmv_tgt_desc *tgt = lmv->tgts[0]; + ENTRY; - if (md->mea) - obd_free_memmd(exp, (void *)&md->mea); - RETURN(md_free_lustre_md(lmv->tgts[0].ltd_exp, md)); + if (md->lmv != NULL) { + lmv_free_memmd(md->lmv); + md->lmv = NULL; + } + if (tgt == NULL || tgt->ltd_exp == NULL) + RETURN(-EINVAL); + RETURN(md_free_lustre_md(lmv->tgts[0]->ltd_exp, md)); } int lmv_set_open_replay_data(struct obd_export *exp, - struct obd_client_handle *och, - struct ptlrpc_request *open_req) + struct obd_client_handle *och, + struct lookup_intent *it) { - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - ENTRY; + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv = &obd->u.lmv; + struct lmv_tgt_desc *tgt; + ENTRY; - tgt = lmv_find_target(lmv, &och->och_fid); - if (IS_ERR(tgt)) - RETURN(PTR_ERR(tgt)); + tgt = lmv_find_target(lmv, &och->och_fid); + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); - RETURN(md_set_open_replay_data(tgt->ltd_exp, och, open_req)); + RETURN(md_set_open_replay_data(tgt->ltd_exp, och, it)); } int lmv_clear_open_replay_data(struct obd_export *exp, @@ -3001,54 +3367,37 @@ static int lmv_renew_capa(struct obd_export *exp, struct obd_capa *oc, } int lmv_unpack_capa(struct obd_export *exp, struct ptlrpc_request *req, - const struct req_msg_field *field, struct obd_capa **oc) + const struct req_msg_field *field, struct obd_capa **oc) { - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - int rc; + struct lmv_obd *lmv = &exp->exp_obd->u.lmv; + struct lmv_tgt_desc *tgt = lmv->tgts[0]; - ENTRY; - rc = md_unpack_capa(lmv->tgts[0].ltd_exp, req, field, oc); - RETURN(rc); + if (tgt == NULL || tgt->ltd_exp == NULL) + RETURN(-EINVAL); + return md_unpack_capa(tgt->ltd_exp, req, field, oc); } int lmv_intent_getattr_async(struct obd_export *exp, struct md_enqueue_info *minfo, struct ldlm_enqueue_info *einfo) { - struct md_op_data *op_data = &minfo->mi_data; - struct obd_device *obd = exp->exp_obd; - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_object *obj; - struct lmv_tgt_desc *tgt = NULL; - int rc; - int sidx; - ENTRY; - - rc = lmv_check_connect(obd); - if (rc) - RETURN(rc); - - if (op_data->op_namelen) { - obj = lmv_object_find(obd, &op_data->op_fid1); - if (obj) { - sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount, - (char *)op_data->op_name, - op_data->op_namelen); - op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid; - tgt = lmv_get_target(lmv, obj->lo_stripes[sidx].ls_mds); - lmv_object_put(obj); - } - } + struct md_op_data *op_data = &minfo->mi_data; + struct obd_device *obd = exp->exp_obd; + struct lmv_obd *lmv = &obd->u.lmv; + struct lmv_tgt_desc *tgt = NULL; + int rc; + ENTRY; - if (tgt == NULL) - tgt = lmv_find_target(lmv, &op_data->op_fid1); + rc = lmv_check_connect(obd); + if (rc) + RETURN(rc); - if (IS_ERR(tgt)) - RETURN(PTR_ERR(tgt)); + tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1); + if (IS_ERR(tgt)) + RETURN(PTR_ERR(tgt)); - rc = md_intent_getattr_async(tgt->ltd_exp, minfo, einfo); - RETURN(rc); + rc = md_intent_getattr_async(tgt->ltd_exp, minfo, einfo); + RETURN(rc); } int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, @@ -3072,25 +3421,45 @@ int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it, RETURN(rc); } +int lmv_get_fid_from_lsm(struct obd_export *exp, + const struct lmv_stripe_md *lsm, + const char *name, int namelen, struct lu_fid *fid) +{ + const struct lmv_oinfo *oinfo; + + LASSERT(lsm != NULL); + oinfo = lsm_name_to_stripe_info(lsm, name, namelen); + if (IS_ERR(oinfo)) + return PTR_ERR(oinfo); + + *fid = oinfo->lmo_fid; + + RETURN(0); +} + /** * For lmv, only need to send request to master MDT, and the master MDT will * process with other slave MDTs. The only exception is Q_GETOQUOTA for which * we directly fetch data from the slave MDTs. */ int lmv_quotactl(struct obd_device *unused, struct obd_export *exp, - struct obd_quotactl *oqctl) -{ - struct obd_device *obd = class_exp2obd(exp); - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt = &lmv->tgts[0]; - int rc = 0, i; - __u64 curspace, curinodes; - ENTRY; + struct obd_quotactl *oqctl) +{ + struct obd_device *obd = class_exp2obd(exp); + struct lmv_obd *lmv = &obd->u.lmv; + struct lmv_tgt_desc *tgt = lmv->tgts[0]; + int rc = 0; + __u32 i; + __u64 curspace, curinodes; + ENTRY; - if (!lmv->desc.ld_tgt_count || !tgt->ltd_active) { - CERROR("master lmv inactive\n"); - RETURN(-EIO); - } + if (tgt == NULL || + tgt->ltd_exp == NULL || + !tgt->ltd_active || + lmv->desc.ld_tgt_count == 0) { + CERROR("master lmv inactive\n"); + RETURN(-EIO); + } if (oqctl->qc_cmd != Q_GETOQUOTA) { rc = obd_quotactl(tgt->ltd_exp, oqctl); @@ -3099,15 +3468,11 @@ int lmv_quotactl(struct obd_device *unused, struct obd_export *exp, curspace = curinodes = 0; for (i = 0; i < lmv->desc.ld_tgt_count; i++) { - int err; - tgt = &lmv->tgts[i]; + int err; + tgt = lmv->tgts[i]; - if (tgt->ltd_exp == NULL) - continue; - if (!tgt->ltd_active) { - CDEBUG(D_HA, "mdt %d is inactive.\n", i); - continue; - } + if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active) + continue; err = obd_quotactl(tgt->ltd_exp, oqctl); if (err) { @@ -3128,19 +3493,20 @@ int lmv_quotactl(struct obd_device *unused, struct obd_export *exp, int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp, struct obd_quotactl *oqctl) { - struct obd_device *obd = class_exp2obd(exp); - struct lmv_obd *lmv = &obd->u.lmv; - struct lmv_tgt_desc *tgt; - int i, rc = 0; - ENTRY; - - for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) { - int err; + struct obd_device *obd = class_exp2obd(exp); + struct lmv_obd *lmv = &obd->u.lmv; + struct lmv_tgt_desc *tgt; + __u32 i; + int rc = 0; + ENTRY; - if (!tgt->ltd_active) { - CERROR("lmv idx %d inactive\n", i); - RETURN(-EIO); - } + for (i = 0; i < lmv->desc.ld_tgt_count; i++) { + int err; + tgt = lmv->tgts[i]; + if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active) { + CERROR("lmv idx %d inactive\n", i); + RETURN(-EIO); + } err = obd_quotacheck(tgt->ltd_exp, oqctl); if (err && !rc) @@ -3150,6 +3516,46 @@ int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp, RETURN(rc); } +int lmv_update_lsm_md(struct obd_export *exp, struct lmv_stripe_md *lsm, + struct mdt_body *body, ldlm_blocking_callback cb_blocking) +{ + return lmv_revalidate_slaves(exp, body, lsm, cb_blocking, 0); +} + +int lmv_merge_attr(struct obd_export *exp, const struct lmv_stripe_md *lsm, + struct cl_attr *attr) +{ + int i; + + for (i = 0; i < lsm->lsm_md_stripe_count; i++) { + struct inode *inode = lsm->lsm_md_oinfo[i].lmo_root; + + CDEBUG(D_INFO, ""DFID" size %llu, nlink %u, atime %lu ctime" + "%lu, mtime %lu.\n", PFID(&lsm->lsm_md_oinfo[i].lmo_fid), + i_size_read(inode), inode->i_nlink, + LTIME_S(inode->i_atime), LTIME_S(inode->i_ctime), + LTIME_S(inode->i_mtime)); + + /* for slave stripe, it needs to subtract nlink for . and .. */ + if (i != 0) + attr->cat_nlink += inode->i_nlink - 2; + else + attr->cat_nlink = inode->i_nlink; + + attr->cat_size += i_size_read(inode); + + if (attr->cat_atime < LTIME_S(inode->i_atime)) + attr->cat_atime = LTIME_S(inode->i_atime); + + if (attr->cat_ctime < LTIME_S(inode->i_ctime)) + attr->cat_ctime = LTIME_S(inode->i_ctime); + + if (attr->cat_mtime < LTIME_S(inode->i_mtime)) + attr->cat_mtime = LTIME_S(inode->i_mtime); + } + return 0; +} + struct obd_ops lmv_obd_ops = { .o_owner = THIS_MODULE, .o_setup = lmv_setup, @@ -3172,7 +3578,7 @@ struct obd_ops lmv_obd_ops = { struct md_ops lmv_md_ops = { .m_getstatus = lmv_getstatus, - .m_change_cbdata = lmv_change_cbdata, + .m_null_inode = lmv_null_inode, .m_find_cbdata = lmv_find_cbdata, .m_close = lmv_close, .m_create = lmv_create, @@ -3186,56 +3592,39 @@ struct md_ops lmv_md_ops = { .m_rename = lmv_rename, .m_setattr = lmv_setattr, .m_setxattr = lmv_setxattr, - .m_sync = lmv_sync, - .m_readpage = lmv_readpage, + .m_fsync = lmv_fsync, + .m_read_page = lmv_read_page, .m_unlink = lmv_unlink, .m_init_ea_size = lmv_init_ea_size, .m_cancel_unused = lmv_cancel_unused, .m_set_lock_data = lmv_set_lock_data, .m_lock_match = lmv_lock_match, - .m_get_lustre_md = lmv_get_lustre_md, - .m_free_lustre_md = lmv_free_lustre_md, + .m_get_lustre_md = lmv_get_lustre_md, + .m_free_lustre_md = lmv_free_lustre_md, + .m_update_lsm_md = lmv_update_lsm_md, + .m_merge_attr = lmv_merge_attr, .m_set_open_replay_data = lmv_set_open_replay_data, .m_clear_open_replay_data = lmv_clear_open_replay_data, .m_renew_capa = lmv_renew_capa, .m_unpack_capa = lmv_unpack_capa, .m_get_remote_perm = lmv_get_remote_perm, .m_intent_getattr_async = lmv_intent_getattr_async, - .m_revalidate_lock = lmv_revalidate_lock + .m_revalidate_lock = lmv_revalidate_lock, + .m_get_fid_from_lsm = lmv_get_fid_from_lsm, }; int __init lmv_init(void) { - struct lprocfs_static_vars lvars; - int rc; - - lmv_object_cache = cfs_mem_cache_create("lmv_objects", - sizeof(struct lmv_object), - 0, 0); - if (!lmv_object_cache) { - CERROR("Error allocating lmv objects cache\n"); - return -ENOMEM; - } - - lprocfs_lmv_init_vars(&lvars); - - rc = class_register_type(&lmv_obd_ops, &lmv_md_ops, - lvars.module_vars, LUSTRE_LMV_NAME, NULL); - if (rc) - cfs_mem_cache_destroy(lmv_object_cache); - - return rc; + return class_register_type(&lmv_obd_ops, &lmv_md_ops, true, NULL, +#ifndef HAVE_ONLY_PROCFS_SEQ + NULL, +#endif + LUSTRE_LMV_NAME, NULL); } -#ifdef __KERNEL__ static void lmv_exit(void) { class_unregister_type(LUSTRE_LMV_NAME); - - LASSERTF(cfs_atomic_read(&lmv_object_count) == 0, - "Can't free lmv objects cache, %d object(s) busy\n", - cfs_atomic_read(&lmv_object_count)); - cfs_mem_cache_destroy(lmv_object_cache); } MODULE_AUTHOR("Sun Microsystems, Inc. "); @@ -3244,4 +3633,3 @@ MODULE_LICENSE("GPL"); module_init(lmv_init); module_exit(lmv_exit); -#endif