X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Flod%2Flod_qos.c;h=065ade07f697281079b4097a742fe8d5e7fd679c;hb=979203503af2f77d51bcf27375a1a09f5f28a4a3;hp=b1aa0c1f8b9ec7a2d1ccde53fafd7266d683db92;hpb=cee1ab3997d70ee5eeece41b50fbe4479eda9d14;p=fs%2Flustre-release.git diff --git a/lustre/lod/lod_qos.c b/lustre/lod/lod_qos.c index b1aa0c1..065ade0 100644 --- a/lustre/lod/lod_qos.c +++ b/lustre/lod/lod_qos.c @@ -23,7 +23,7 @@ * Copyright 2009 Sun Microsystems, Inc. All rights reserved * Use is subject to license terms. * - * Copyright (c) 2012, Intel Corporation. + * Copyright (c) 2012, 2013, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -31,13 +31,15 @@ * * lustre/lod/lod_qos.c * + * Implementation of different allocation algorithm used + * to distribute objects and data among OSTs. */ #define DEBUG_SUBSYSTEM S_LOV +#include #include #include -#include #include #include "lod_internal.h" @@ -59,12 +61,25 @@ #define TGT_BAVAIL(i) (OST_TGT(lod,i)->ltd_statfs.os_bavail * \ OST_TGT(lod,i)->ltd_statfs.os_bsize) +/** + * Add a new target to Quality of Service (QoS) target table. + * + * Add a new OST target to the structure representing an OSS. Resort the list + * of known OSSs by the number of OSTs attached to each OSS. The OSS list is + * protected internally and no external locking is required. + * + * \param[in] lod LOD device + * \param[in] ost_desc OST description + * + * \retval 0 on success + * \retval -ENOMEM on error + */ int qos_add_tgt(struct lod_device *lod, struct lod_tgt_desc *ost_desc) { - struct lov_qos_oss *oss = NULL, *temposs; + struct lod_qos_oss *oss = NULL, *temposs; struct obd_export *exp = ost_desc->ltd_exp; int rc = 0, found = 0; - cfs_list_t *list; + struct list_head *list; ENTRY; down_write(&lod->lod_qos.lq_rw_sem); @@ -73,7 +88,7 @@ int qos_add_tgt(struct lod_device *lod, struct lod_tgt_desc *ost_desc) * but there is no official API to access information like this * with OSD API. */ - cfs_list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) { + list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) { if (obd_uuid_equals(&oss->lqo_uuid, &exp->exp_connection->c_remote_uuid)) { found++; @@ -89,7 +104,7 @@ int qos_add_tgt(struct lod_device *lod, struct lod_tgt_desc *ost_desc) sizeof(oss->lqo_uuid)); } else { /* Assume we have to move this one */ - cfs_list_del(&oss->lqo_oss_list); + list_del(&oss->lqo_oss_list); } oss->lqo_ost_count++; @@ -102,13 +117,13 @@ int qos_add_tgt(struct lod_device *lod, struct lod_tgt_desc *ost_desc) /* Add sorted by # of OSTs. Find the first entry that we're bigger than... */ list = &lod->lod_qos.lq_oss_list; - cfs_list_for_each_entry(temposs, list, lqo_oss_list) { + list_for_each_entry(temposs, list, lqo_oss_list) { if (oss->lqo_ost_count > temposs->lqo_ost_count) break; } /* ...and add before it. If we're the first or smallest, temposs points to the list head, and we add to the end. */ - cfs_list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list); + list_add_tail(&oss->lqo_oss_list, &temposs->lqo_oss_list); lod->lod_qos.lq_dirty = 1; lod->lod_qos.lq_rr.lqr_dirty = 1; @@ -118,9 +133,21 @@ out: RETURN(rc); } +/** + * Remove OST target from QoS table. + * + * Removes given OST target from QoS table and releases related OSS structure + * if no OSTs remain on the OSS. + * + * \param[in] lod LOD device + * \param[in] ost_desc OST description + * + * \retval 0 on success + * \retval -ENOENT if no OSS was found + */ int qos_del_tgt(struct lod_device *lod, struct lod_tgt_desc *ost_desc) { - struct lov_qos_oss *oss; + struct lod_qos_oss *oss; int rc = 0; ENTRY; @@ -133,7 +160,7 @@ int qos_del_tgt(struct lod_device *lod, struct lod_tgt_desc *ost_desc) if (oss->lqo_ost_count == 0) { CDEBUG(D_QOS, "removing OSS %s\n", obd_uuid2str(&oss->lqo_uuid)); - cfs_list_del(&oss->lqo_oss_list); + list_del(&oss->lqo_oss_list); ost_desc->ltd_qos.ltq_oss = NULL; OBD_FREE_PTR(oss); } @@ -145,6 +172,24 @@ out: RETURN(rc); } +/** + * Check whether the target is available for new OST objects. + * + * Request statfs data from the given target and verify it's active and not + * read-only. If so, then it can be used to place new OST objects. This + * function also maintains the number of active/inactive targets and sets + * dirty flags if those numbers change so others can run re-balance procedures. + * No external locking is required. + * + * \param[in] env execution environment for this thread + * \param[in] d LOD device + * \param[in] index index of OST target to check + * \param[out] sfs buffer for statfs data + * + * \retval 0 if the target is good + * \retval negative negated errno on error + + */ static int lod_statfs_and_check(const struct lu_env *env, struct lod_device *d, int index, struct obd_statfs *sfs) { @@ -159,6 +204,10 @@ static int lod_statfs_and_check(const struct lu_env *env, struct lod_device *d, if (rc && rc != -ENOTCONN) CERROR("%s: statfs: rc = %d\n", lod2obd(d)->obd_name, rc); + /* If the OST is readonly then we can't allocate objects there */ + if (sfs->os_state & OS_STATE_READONLY) + rc = -EROFS; + /* check whether device has changed state (active, inactive) */ if (rc != 0 && ost->ltd_active) { /* turned inactive? */ @@ -175,7 +224,9 @@ static int lod_statfs_and_check(const struct lu_env *env, struct lod_device *d, spin_unlock(&d->lod_desc_lock); } else if (rc == 0 && ost->ltd_active == 0) { /* turned active? */ - LASSERT(d->lod_desc.ld_active_tgt_count < d->lod_ostnr); + LASSERTF(d->lod_desc.ld_active_tgt_count < d->lod_ostnr, + "active tgt count %d, ost nr %d\n", + d->lod_desc.ld_active_tgt_count, d->lod_ostnr); spin_lock(&d->lod_desc_lock); if (ost->ltd_active == 0) { ost->ltd_active = 1; @@ -188,15 +239,26 @@ static int lod_statfs_and_check(const struct lu_env *env, struct lod_device *d, spin_unlock(&d->lod_desc_lock); } - return rc; + RETURN(rc); } +/** + * Maintain per-target statfs data. + * + * The function refreshes statfs data for all the targets every N seconds. + * The actual N is controlled via procfs and set to LOV_DESC_QOS_MAXAGE_DEFAULT + * initially. + * + * \param[in] env execution environment for this thread + * \param[in] lod LOD device + */ static void lod_qos_statfs_update(const struct lu_env *env, struct lod_device *lod) { struct obd_device *obd = lod2obd(lod); struct ost_pool *osts = &(lod->lod_pool_info); - int i, idx, rc = 0; + unsigned int i; + int idx, rc = 0; __u64 max_age, avail; ENTRY; @@ -225,16 +287,32 @@ static void lod_qos_statfs_update(const struct lu_env *env, out: up_write(&lod->lod_qos.lq_rw_sem); + EXIT; } -/* Recalculate per-object penalties for OSSs and OSTs, - depends on size of each ost in an oss */ +/** + * Calculate per-OST and per-OSS penalties + * + * Re-calculate penalties when the configuration changes, active targets + * change and after statfs refresh (all these are reflected by lq_dirty flag). + * On every OST and OSS: decay the penalty by half for every 8x the update + * interval that the device has been idle. That gives lots of time for the + * statfs information to be updated (which the penalty is only a proxy for), + * and avoids penalizing OSS/OSTs under light load. + * See lod_qos_calc_weight() for how penalties are factored into the weight. + * + * \param[in] lod LOD device + * + * \retval 0 on success + * \retval -EAGAIN the number of OSTs isn't enough + */ static int lod_qos_calc_ppo(struct lod_device *lod) { - struct lov_qos_oss *oss; + struct lod_qos_oss *oss; __u64 ba_max, ba_min, temp; __u32 num_active; - int rc, i, prio_wide; + unsigned int i; + int rc, prio_wide; time_t now, age; ENTRY; @@ -246,8 +324,8 @@ static int lod_qos_calc_ppo(struct lod_device *lod) GOTO(out, rc = -EAGAIN); /* find bavail on each OSS */ - cfs_list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) - oss->lqo_bavail = 0; + list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) + oss->lqo_bavail = 0; lod->lod_qos.lq_active_oss_count = 0; /* @@ -277,7 +355,7 @@ static int lod_qos_calc_ppo(struct lod_device *lod) /* per-OST penalty is prio * TGT_bavail / (num_ost - 1) / 2 */ temp >>= 1; - lov_do_div64(temp, num_active); + do_div(temp, num_active); OST_TGT(lod,i)->ltd_qos.ltq_penalty_per_obj = (temp * prio_wide) >> 8; @@ -286,11 +364,7 @@ static int lod_qos_calc_ppo(struct lod_device *lod) age > 32 * lod->lod_desc.ld_qos_maxage) OST_TGT(lod,i)->ltd_qos.ltq_penalty = 0; else if (age > lod->lod_desc.ld_qos_maxage) - /* Decay the penalty by half for every 8x the update - * interval that the device has been idle. That gives - * lots of time for the statfs information to be - * updated (which the penalty is only a proxy for), - * and avoids penalizing OSS/OSTs under light load. */ + /* Decay OST penalty. */ OST_TGT(lod,i)->ltd_qos.ltq_penalty >>= (age / lod->lod_desc.ld_qos_maxage); } @@ -305,9 +379,9 @@ static int lod_qos_calc_ppo(struct lod_device *lod) } /* Per-OSS penalty is prio * oss_avail / oss_osts / (num_oss - 1) / 2 */ - cfs_list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) { + list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) { temp = oss->lqo_bavail >> 1; - lov_do_div64(temp, oss->lqo_ost_count * num_active); + do_div(temp, oss->lqo_ost_count * num_active); oss->lqo_penalty_per_obj = (temp * prio_wide) >> 8; age = (now - oss->lqo_used) >> 3; @@ -315,11 +389,7 @@ static int lod_qos_calc_ppo(struct lod_device *lod) age > 32 * lod->lod_desc.ld_qos_maxage) oss->lqo_penalty = 0; else if (age > lod->lod_desc.ld_qos_maxage) - /* Decay the penalty by half for every 8x the update - * interval that the device has been idle. That gives - * lots of time for the statfs information to be - * updated (which the penalty is only a proxy for), - * and avoids penalizing OSS/OSTs under light load. */ + /* Decay OSS penalty. */ oss->lqo_penalty >>= age / lod->lod_desc.ld_qos_maxage; } @@ -344,11 +414,21 @@ out: RETURN(rc); } +/** + * Calculate weight for a given OST target. + * + * The final OST weight is the number of bytes available minus the OST and + * OSS penalties. See lod_qos_calc_ppo() for how penalties are calculated. + * + * \param[in] lod LOD device, where OST targets are listed + * \param[in] i OST target index + * + * \retval 0 + */ static int lod_qos_calc_weight(struct lod_device *lod, int i) { __u64 temp, temp2; - /* Final ost weight = TGT_BAVAIL - ost_penalty - oss_penalty */ temp = TGT_BAVAIL(i); temp2 = OST_TGT(lod,i)->ltd_qos.ltq_penalty + OST_TGT(lod,i)->ltd_qos.ltq_oss->lqo_penalty; @@ -359,13 +439,26 @@ static int lod_qos_calc_weight(struct lod_device *lod, int i) return 0; } -/* We just used this index for a stripe; adjust everyone's weights */ +/** + * Re-calculate weights. + * + * The function is called when some OST target was used for a new object. In + * this case we should re-calculate all the weights to keep new allocations + * balanced well. + * + * \param[in] lod LOD device + * \param[in] osts OST pool where a new object was placed + * \param[in] index OST target where a new object was placed + * \param[out] total_wt new total weight for the pool + * + * \retval 0 + */ static int lod_qos_used(struct lod_device *lod, struct ost_pool *osts, __u32 index, __u64 *total_wt) { struct lod_tgt_desc *ost; - struct lov_qos_oss *oss; - int j; + struct lod_qos_oss *oss; + unsigned int j; ENTRY; ost = OST_TGT(lod,index); @@ -391,7 +484,7 @@ static int lod_qos_used(struct lod_device *lod, struct ost_pool *osts, lod->lod_qos.lq_active_oss_count; /* Decrease all OSS penalties */ - cfs_list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) { + list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) { if (oss->lqo_penalty < oss->lqo_penalty_per_obj) oss->lqo_penalty = 0; else @@ -438,14 +531,31 @@ static int lod_qos_used(struct lod_device *lod, struct ost_pool *osts, } #define LOV_QOS_EMPTY ((__u32)-1) -/* compute optimal round-robin order, based on OSTs per OSS */ + +/** + * Calculate optimal round-robin order with regard to OSSes. + * + * Place all the OSTs from pool \a src_pool in a special array to be used for + * round-robin (RR) stripe allocation. The placement algorithm interleaves + * OSTs from the different OSSs so that RR allocation can balance OSSs evenly. + * Resorts the targets when the number of active targets changes (because of + * a new target or activation/deactivation). + * + * \param[in] lod LOD device + * \param[in] src_pool OST pool + * \param[in] lqr round-robin list + * + * \retval 0 on success + * \retval -ENOMEM fails to allocate the array + */ static int lod_qos_calc_rr(struct lod_device *lod, struct ost_pool *src_pool, - struct lov_qos_rr *lqr) + struct lod_qos_rr *lqr) { - struct lov_qos_oss *oss; + struct lod_qos_oss *oss; struct lod_tgt_desc *ost; unsigned placed, real_count; - int i, rc; + unsigned int i; + int rc; ENTRY; if (!lqr->lqr_dirty) { @@ -483,7 +593,7 @@ static int lod_qos_calc_rr(struct lod_device *lod, struct ost_pool *src_pool, /* Place all the OSTs from 1 OSS at the same time. */ placed = 0; - cfs_list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) { + list_for_each_entry(oss, &lod->lod_qos.lq_oss_list, lqo_oss_list) { int j = 0; for (i = 0; i < lqr->lqr_pool.op_count; i++) { @@ -534,19 +644,28 @@ static int lod_qos_calc_rr(struct lod_device *lod, struct ost_pool *src_pool, } /** - * A helper function to: - * create in-core lu object on the specified OSP - * declare creation of the object - * IMPORTANT: at this stage object is anonymouos - it has no fid assigned - * this is a workaround till we have natural FIDs on OST - * - * at this point we want to declare (reserve) object for us as - * we can't block at execution (when create method is called). - * otherwise we'd block whole transaction batch + * Instantiate and declare creation of a new object. + * + * The function instantiates LU representation for a new object on the + * specified device. Also it declares an intention to create that + * object on the storage target. + * + * Note lu_object_anon() is used which is a trick with regard to LU/OSD + * infrastructure - in the existing precreation framework we can't assign FID + * at this moment, we do this later once a transaction is started. So the + * special method instantiates FID-less object in the cache and later it + * will get a FID and proper placement in LU cache. + * + * \param[in] env execution environment for this thread + * \param[in] d LOD device + * \param[in] ost_idx OST target index where the object is being created + * \param[in] th transaction handle + * + * \retval object ptr on success, ERR_PTR() otherwise */ static struct dt_object *lod_qos_declare_object_on(const struct lu_env *env, struct lod_device *d, - int ost_idx, + __u32 ost_idx, struct thandle *th) { struct lod_tgt_desc *ost; @@ -557,7 +676,6 @@ static struct dt_object *lod_qos_declare_object_on(const struct lu_env *env, ENTRY; LASSERT(d); - LASSERT(ost_idx >= 0); LASSERT(ost_idx < d->lod_osts_size); ost = OST_TGT(d,ost_idx); LASSERT(ost); @@ -595,7 +713,18 @@ out: RETURN(dt); } -static int min_stripe_count(int stripe_cnt, int flags) +/** + * Calculate a minimum acceptable stripe count. + * + * Return an acceptable stripe count depending on flag LOV_USES_DEFAULT_STRIPE: + * all stripes or 3/4 of stripes. + * + * \param[in] stripe_cnt number of stripes requested + * \param[in] flags 0 or LOV_USES_DEFAULT_STRIPE + * + * \retval acceptable stripecount + */ +static int min_stripe_count(__u32 stripe_cnt, int flags) { return (flags & LOV_USES_DEFAULT_STRIPE ? stripe_cnt - (stripe_cnt / 4) : stripe_cnt); @@ -604,6 +733,17 @@ static int min_stripe_count(int stripe_cnt, int flags) #define LOV_CREATE_RESEED_MULT 30 #define LOV_CREATE_RESEED_MIN 2000 +/** + * Check if an OST is full. + * + * Check whether an OST should be considered full based + * on the given statfs data. + * + * \param[in] msfs statfs data + * + * \retval false not full + * \retval true full + */ static int inline lod_qos_dev_is_full(struct obd_statfs *msfs) { __u64 used; @@ -617,9 +757,20 @@ static int inline lod_qos_dev_is_full(struct obd_statfs *msfs) return (msfs->os_bavail < used); } -int lod_ea_store_resize(struct lod_thread_info *info, int size); - -static inline int lod_qos_ost_in_use_clear(const struct lu_env *env, int stripes) +/** + * Initialize temporary OST-in-use array. + * + * Allocate or extend the array used to mark targets already assigned to a new + * striping so they are not used more than once. + * + * \param[in] env execution environment for this thread + * \param[in] stripes number of items needed in the array + * + * \retval 0 on success + * \retval -ENOMEM on error + */ +static inline int lod_qos_ost_in_use_clear(const struct lu_env *env, + __u32 stripes) { struct lod_thread_info *info = lod_env_info(env); @@ -629,11 +780,22 @@ static inline int lod_qos_ost_in_use_clear(const struct lu_env *env, int stripes CERROR("can't allocate memory for ost-in-use array\n"); return -ENOMEM; } - memset(info->lti_ea_store, 0, sizeof(int) * stripes); + memset(info->lti_ea_store, -1, sizeof(int) * stripes); return 0; } -static inline void lod_qos_ost_in_use(const struct lu_env *env, int idx, int ost) +/** + * Remember a target in the array of used targets. + * + * Mark the given target as used for a new striping being created. The status + * of an OST in a striping can be checked with lod_qos_is_ost_used(). + * + * \param[in] env execution environment for this thread + * \param[in] idx index in the array + * \param[in] ost OST target index to mark as used + */ +static inline void lod_qos_ost_in_use(const struct lu_env *env, + int idx, int ost) { struct lod_thread_info *info = lod_env_info(env); int *osts = info->lti_ea_store; @@ -642,11 +804,24 @@ static inline void lod_qos_ost_in_use(const struct lu_env *env, int idx, int ost osts[idx] = ost; } -static int lod_qos_is_ost_used(const struct lu_env *env, int ost, int stripes) +/** + * Check is OST used in a striping. + * + * Checks whether OST with the given index is marked as used in the temporary + * array (see lod_qos_ost_in_use()). + * + * \param[in] env execution environment for this thread + * \param[in] ost OST target index to check + * \param[in] stripes the number of items used in the array already + * + * \retval 0 not used + * \retval 1 used + */ +static int lod_qos_is_ost_used(const struct lu_env *env, int ost, __u32 stripes) { struct lod_thread_info *info = lod_env_info(env); int *osts = info->lti_ea_store; - int j; + __u32 j; for (j = 0; j < stripes; j++) { if (osts[j] == ost) @@ -655,23 +830,47 @@ static int lod_qos_is_ost_used(const struct lu_env *env, int ost, int stripes) return 0; } -/* Allocate objects on OSTs with round-robin algorithm */ +/** + * Allocate a striping using round-robin algorigthm. + * + * Allocates a new striping using round-robin algorithm. The function refreshes + * all the internal structures (statfs cache, array of available OSTs sorted + * with regard to OSS, etc). The number of stripes required is taken from the + * object (must be prepared by the caller), but can change if the flag + * LOV_USES_DEFAULT_STRIPE is supplied. The caller should ensure nobody else + * is trying to create a striping on the object in parallel. All the internal + * structures (like pools, etc) are protected and no additional locking is + * required. The function succeeds even if a single stripe is allocated. To save + * time we give priority to targets which already have objects precreated. + * Full OSTs are skipped (see lod_qos_dev_is_full() for the details). + * + * \param[in] env execution environment for this thread + * \param[in] lo LOD object + * \param[out] stripe striping created + * \param[in] flags allocation flags (0 or LOV_USES_DEFAULT_STRIPE) + * \param[in] th transaction handle + * + * \retval 0 on success + * \retval -ENOSPC if not enough OSTs are found + * \retval negative negated errno for other failures + */ static int lod_alloc_rr(const struct lu_env *env, struct lod_object *lo, - int flags, struct thandle *th) + struct dt_object **stripe, int flags, + struct thandle *th) { struct lod_device *m = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev); struct obd_statfs *sfs = &lod_env_info(env)->lti_osfs; struct pool_desc *pool = NULL; struct ost_pool *osts; - struct lov_qos_rr *lqr; + struct lod_qos_rr *lqr; struct dt_object *o; - unsigned array_idx; - int i, rc; - int ost_start_idx_temp; + unsigned int i, array_idx; + int rc; + __u32 ost_start_idx_temp; int speed = 0; - int stripe_idx = 0; - int stripe_cnt = lo->ldo_stripenr; - int stripe_cnt_min = min_stripe_count(stripe_cnt, flags); + __u32 stripe_idx = 0; + __u32 stripe_cnt = lo->ldo_stripenr; + __u32 stripe_cnt_min = min_stripe_count(stripe_cnt, flags); __u32 ost_idx; ENTRY; @@ -767,7 +966,7 @@ repeat_find: /* * try to use another OSP if this one is degraded */ - if (sfs->os_state == OS_STATE_DEGRADED && speed < 2) { + if (sfs->os_state & OS_STATE_DEGRADED && speed < 2) { QOS_DEBUG("#%d: degraded\n", ost_idx); continue; } @@ -790,7 +989,7 @@ repeat_find: * We've successfuly declared (reserved) an object */ lod_qos_ost_in_use(env, stripe_idx, ost_idx); - lo->ldo_stripe[stripe_idx] = o; + stripe[stripe_idx] = o; stripe_idx++; } @@ -822,15 +1021,140 @@ out: RETURN(rc); } -/* alloc objects on osts with specific stripe offset */ +/** + * Allocate a specific striping layout on a user defined set of OSTs. + * + * Allocates new striping using the OST index range provided by the data from + * the lmm_obejcts contained in the lov_user_md passed to this method. Full + * OSTs are not considered. The exact order of OSTs requested by the user + * is respected as much as possible depending on OST status. The number of + * stripes needed and stripe offset are taken from the object. If that number + * can not be met, then the function returns a failure and then it's the + * caller's responsibility to release the stripes allocated. All the internal + * structures are protected, but no concurrent allocation is allowed on the + * same objects. + * + * \param[in] env execution environment for this thread + * \param[in] lo LOD object + * \param[out] stripe striping created + * \param[in] lum stripe md to specify list of OSTs + * \param[in] th transaction handle + * + * \retval 0 on success + * \retval -ENODEV OST index does not exist on file system + * \retval -EINVAL requested OST index is invalid + * \retval negative negated errno on error + */ +static int lod_alloc_ost_list(const struct lu_env *env, + struct lod_object *lo, struct dt_object **stripe, + struct lov_user_md *lum, struct thandle *th) +{ + struct lod_device *m = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev); + struct obd_statfs *sfs = &lod_env_info(env)->lti_osfs; + struct dt_object *o; + struct lov_user_md_v3 *v3; + unsigned int array_idx = 0; + int stripe_count = 0; + int i; + int rc; + ENTRY; + + /* for specific OSTs layout */ + LASSERT(lum != NULL && lum->lmm_magic == LOV_USER_MAGIC_SPECIFIC); + lustre_print_user_md(D_OTHER, lum, __func__); + + rc = lod_qos_ost_in_use_clear(env, lo->ldo_stripenr); + if (rc < 0) + RETURN(rc); + + v3 = (struct lov_user_md_v3 *)lum; + for (i = 0; i < lo->ldo_stripenr; i++) { + if (v3->lmm_objects[i].l_ost_idx == lo->ldo_def_stripe_offset) { + array_idx = i; + break; + } + } + if (i == lo->ldo_stripenr) { + CDEBUG(D_OTHER, + "%s: start index %d not in the specified list of OSTs\n", + lod2obd(m)->obd_name, lo->ldo_def_stripe_offset); + RETURN(-EINVAL); + } + + for (i = 0; i < lo->ldo_stripenr; + i++, array_idx = (array_idx + 1) % lo->ldo_stripenr) { + __u32 ost_idx = v3->lmm_objects[array_idx].l_ost_idx; + + if (!cfs_bitmap_check(m->lod_ost_bitmap, ost_idx)) { + rc = -ENODEV; + break; + } + + /* + * do not put >1 objects on a single OST + */ + if (lod_qos_is_ost_used(env, ost_idx, stripe_count)) { + rc = -EINVAL; + break; + } + + rc = lod_statfs_and_check(env, m, ost_idx, sfs); + if (rc < 0) /* this OSP doesn't feel well */ + break; + + o = lod_qos_declare_object_on(env, m, ost_idx, th); + if (IS_ERR(o)) { + rc = PTR_ERR(o); + CDEBUG(D_OTHER, + "%s: can't declare new object on #%u: %d\n", + lod2obd(m)->obd_name, ost_idx, rc); + break; + } + + /* + * We've successfuly declared (reserved) an object + */ + lod_qos_ost_in_use(env, stripe_count, ost_idx); + stripe[stripe_count] = o; + stripe_count++; + } + + RETURN(rc); +} + +/** + * Allocate a striping on a predefined set of OSTs. + * + * Allocates new striping starting from OST provided lo->ldo_def_stripe_offset. + * Full OSTs are not considered. The exact order of OSTs is not important and + * varies depending on OST status. The allocation procedure prefers the targets + * with precreated objects ready. The number of stripes needed and stripe + * offset are taken from the object. If that number can not be met, then the + * function returns a failure and then it's the caller's responsibility to + * release the stripes allocated. All the internal structures are protected, + * but no concurrent allocation is allowed on the same objects. + * + * \param[in] env execution environment for this thread + * \param[in] lo LOD object + * \param[out] stripe striping created + * \param[in] flags not used + * \param[in] th transaction handle + * + * \retval 0 on success + * \retval -E2BIG if no enough OSTs are found + * \retval -EINVAL requested offset is invalid + * \retval negative negated errno on error + */ static int lod_alloc_specific(const struct lu_env *env, struct lod_object *lo, - int flags, struct thandle *th) + struct dt_object **stripe, int flags, + struct thandle *th) { struct lod_device *m = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev); struct obd_statfs *sfs = &lod_env_info(env)->lti_osfs; struct dt_object *o; - unsigned ost_idx, array_idx, ost_count; - int i, rc, stripe_num = 0; + __u32 ost_idx; + unsigned int i, array_idx, ost_count; + int rc, stripe_num = 0; int speed = 0; struct pool_desc *pool = NULL; struct ost_pool *osts; @@ -916,7 +1240,8 @@ repeat_find: /* * We've successfuly declared (reserved) an object */ - lo->ldo_stripe[stripe_num] = o; + lod_qos_ost_in_use(env, stripe_num, ost_idx); + stripe[stripe_num] = o; stripe_num++; /* We have enough stripes */ @@ -949,6 +1274,19 @@ out: RETURN(rc); } +/** + * Check whether QoS allocation should be used. + * + * A simple helper to decide when QoS allocation should be used: + * if it's just a single available target or the used space is + * evenly distributed among the targets at the moment, then QoS + * allocation algorithm should not be used. + * + * \param[in] lod LOD device + * + * \retval 0 should not be used + * \retval 1 should be used + */ static inline int lod_qos_is_usable(struct lod_device *lod) { #ifdef FORCE_QOS @@ -966,21 +1304,51 @@ static inline int lod_qos_is_usable(struct lod_device *lod) return 1; } -/* Alloc objects on OSTs with optimization based on: - - free space - - network resources (shared OSS's) +/** + * Allocate a striping using an algorithm with weights. + * + * The function allocates OST objects to create a striping. The algorithm + * used is based on weights (currently only using the free space), and it's + * trying to ensure the space is used evenly by OSTs and OSSs. The striping + * configuration (# of stripes, offset, + * pool) is taken from the object and is prepared by the caller. + * If LOV_USES_DEFAULT_STRIPE is not passed and prepared configuration can't + * be met due to too few OSTs, then allocation fails. If the flag is + * passed and less than 75% of the requested number of stripes can be + * allocated, then allocation fails. + * No concurrent allocation is allowed on the object and this must be + * ensured by the caller. All the internal structures are protected by the + * function. + * The algorithm has two steps: find available OSTs and calucate their weights, + * then select the OSTs the weights used as the probability. An OST with a + * higher weight is proportionately more likely to be selected than one with + * a lower weight. + * + * \param[in] env execution environment for this thread + * \param[in] lo LOD object + * \param[out] stripe striping created + * \param[in] flags 0 or LOV_USES_DEFAULT_STRIPE + * \param[in] th transaction handle + * + * \retval 0 on success + * \retval -E2BIG if no enough OSTs are found + * \retval -EINVAL requested offset is invalid + * \retval negative negated errno on error */ static int lod_alloc_qos(const struct lu_env *env, struct lod_object *lo, - int flags, struct thandle *th) + struct dt_object **stripe, int flags, + struct thandle *th) { struct lod_device *m = lu2lod_dev(lo->ldo_obj.do_lu.lo_dev); struct obd_statfs *sfs = &lod_env_info(env)->lti_osfs; struct lod_tgt_desc *ost; struct dt_object *o; __u64 total_weight = 0; - int nfound, good_osts, i, rc = 0; - int stripe_cnt = lo->ldo_stripenr; - int stripe_cnt_min; + unsigned int i; + int rc = 0; + __u32 nfound, good_osts; + __u32 stripe_cnt = lo->ldo_stripenr; + __u32 stripe_cnt_min; struct pool_desc *pool = NULL; struct ost_pool *osts; ENTRY; @@ -1098,7 +1466,7 @@ static int lod_alloc_qos(const struct lu_env *env, struct lod_object *lo, /* On average, this will hit larger-weighted osts more often. 0-weight osts will always get used last (only when rand=0) */ for (i = 0; i < osts->op_count; i++) { - int idx = osts->op_array[i]; + __u32 idx = osts->op_array[i]; if (!cfs_bitmap_check(m->lod_ost_bitmap, idx)) continue; @@ -1132,7 +1500,7 @@ static int lod_alloc_qos(const struct lu_env *env, struct lod_object *lo, idx, (int) PTR_ERR(o)); continue; } - lo->ldo_stripe[nfound++] = o; + stripe[nfound++] = o; lod_qos_used(m, osts, idx, &total_weight); rc = 0; break; @@ -1154,9 +1522,9 @@ static int lod_alloc_qos(const struct lu_env *env, struct lod_object *lo, */ LCONSOLE_INFO("wanted %d, found %d\n", stripe_cnt, nfound); for (i = 0; i < nfound; i++) { - LASSERT(lo->ldo_stripe[i]); - lu_object_put(env, &lo->ldo_stripe[i]->do_lu); - lo->ldo_stripe[i] = NULL; + LASSERT(stripe[i] != NULL); + lu_object_put(env, &stripe[i]->do_lu); + stripe[i] = NULL; } /* makes sense to rebalance next time */ @@ -1179,7 +1547,20 @@ out_nolock: RETURN(rc); } -/* Find the max stripecount we should use */ +/** + * Find largest stripe count the caller can use. + * + * Find the maximal possible stripe count not greater than \a stripe_count. + * Sometimes suggested stripecount can't be reached for a number of reasons: + * lack of enough active OSTs or the backend does not support EAs that large. + * If the passed one is 0, then the filesystem's default one is used. + * + * \param[in] lod LOD device + * \param[in] magic the format if striping + * \param[in] stripe_count count the caller would like to use + * + * \retval the maximum usable stripe count + */ static __u16 lod_get_stripecnt(struct lod_device *lod, __u32 magic, __u16 stripe_count) { @@ -1194,56 +1575,87 @@ static __u16 lod_get_stripecnt(struct lod_device *lod, __u32 magic, /* stripe count is based on whether OSD can handle larger EA sizes */ if (lod->lod_osd_max_easize > 0) - max_stripes = lov_mds_md_stripecnt(lod->lod_osd_max_easize, - magic); + max_stripes = lov_mds_md_max_stripe_count( + lod->lod_osd_max_easize, magic); return (stripe_count < max_stripes) ? stripe_count : max_stripes; } +/** + * Create in-core respresentation for a fully-defined striping + * + * When the caller passes a fully-defined striping (i.e. everything including + * OST object FIDs are defined), then we still need to instantiate LU-cache + * with the objects representing the stripes defined. This function completes + * that task. + * + * \param[in] env execution environment for this thread + * \param[in] mo LOD object + * \param[in] buf buffer containing the striping + * + * \retval 0 on success + * \retval negative negated errno on error + */ static int lod_use_defined_striping(const struct lu_env *env, struct lod_object *mo, const struct lu_buf *buf) { - struct lod_device *d = lu2lod_dev(lod2lu_obj(mo)->lo_dev); struct lov_mds_md_v1 *v1 = buf->lb_buf; struct lov_mds_md_v3 *v3 = buf->lb_buf; struct lov_ost_data_v1 *objs; __u32 magic; - int rc; + int rc = 0; ENTRY; - rc = lod_verify_striping(d, buf, 1); - if (rc) - RETURN(rc); - magic = le32_to_cpu(v1->lmm_magic); if (magic == LOV_MAGIC_V1_DEF) { + magic = LOV_MAGIC_V1; objs = &v1->lmm_objects[0]; } else if (magic == LOV_MAGIC_V3_DEF) { + magic = LOV_MAGIC_V3; objs = &v3->lmm_objects[0]; lod_object_set_pool(mo, v3->lmm_pool_name); } else { GOTO(out, rc = -EINVAL); } - /* - * LOD shouldn't be aware of recovery at all, - * but this track recovery status (to some extent) - * to be do additional checks like this one - */ - LASSERT(d->lod_recovery_completed == 0); - + mo->ldo_pattern = le32_to_cpu(v1->lmm_pattern); mo->ldo_stripe_size = le32_to_cpu(v1->lmm_stripe_size); mo->ldo_stripenr = le16_to_cpu(v1->lmm_stripe_count); mo->ldo_layout_gen = le16_to_cpu(v1->lmm_layout_gen); + + /* fixup for released file before object initialization */ + if (mo->ldo_pattern & LOV_PATTERN_F_RELEASED) { + mo->ldo_released_stripenr = mo->ldo_stripenr; + mo->ldo_stripenr = 0; + } + LASSERT(buf->lb_len >= lov_mds_md_size(mo->ldo_stripenr, magic)); - rc = lod_initialize_objects(env, mo, objs); + if (mo->ldo_stripenr > 0) + rc = lod_initialize_objects(env, mo, objs); out: RETURN(rc); } +/** + * Parse suggested striping configuration. + * + * The caller gets a suggested striping configuration from a number of sources + * including per-directory default and applications. Then it needs to verify + * the suggested striping is valid, apply missing bits and store the resulting + * configuration in the object to be used by the allocator later. Must not be + * called concurrently against the same object. It's OK to provide a + * fully-defined striping. + * + * \param[in] env execution environment for this thread + * \param[in] lo LOD object + * \param[in] buf buffer containing the striping + * + * \retval 0 on success + * \retval negative negated errno on error + */ static int lod_qos_parse_config(const struct lu_env *env, struct lod_object *lo, const struct lu_buf *buf) @@ -1251,107 +1663,160 @@ static int lod_qos_parse_config(const struct lu_env *env, struct lod_device *d = lu2lod_dev(lod2lu_obj(lo)->lo_dev); struct lov_user_md_v1 *v1 = NULL; struct lov_user_md_v3 *v3 = NULL; - struct pool_desc *pool; + char *pool_name = NULL; __u32 magic; int rc; + unsigned int size; ENTRY; if (buf == NULL || buf->lb_buf == NULL || buf->lb_len == 0) RETURN(0); + v3 = buf->lb_buf; v1 = buf->lb_buf; magic = v1->lmm_magic; - if (magic == __swab32(LOV_USER_MAGIC_V1)) { + if (unlikely(magic == LOV_MAGIC_V1_DEF || magic == LOV_MAGIC_V3_DEF)) { + /* try to use as fully defined striping */ + rc = lod_use_defined_striping(env, lo, buf); + RETURN(rc); + } + + switch (magic) { + case __swab32(LOV_USER_MAGIC_V1): lustre_swab_lov_user_md_v1(v1); magic = v1->lmm_magic; - } else if (magic == __swab32(LOV_USER_MAGIC_V3)) { - v3 = buf->lb_buf; + /* fall through */ + case LOV_USER_MAGIC_V1: + size = sizeof(*v1); + break; + + case __swab32(LOV_USER_MAGIC_V3): lustre_swab_lov_user_md_v3(v3); magic = v3->lmm_magic; - } + /* fall through */ + case LOV_USER_MAGIC_V3: + size = sizeof(*v3); + pool_name = v3->lmm_pool_name; + break; - if (unlikely(magic != LOV_MAGIC_V1 && magic != LOV_MAGIC_V3)) { - /* try to use as fully defined striping */ - rc = lod_use_defined_striping(env, lo, buf); - RETURN(rc); + case __swab32(LOV_USER_MAGIC_SPECIFIC): + lustre_swab_lov_user_md_v3(v3); + lustre_swab_lov_user_md_objects(v3->lmm_objects, + v3->lmm_stripe_count); + magic = v3->lmm_magic; + /* fall through */ + case LOV_USER_MAGIC_SPECIFIC: + if (v3->lmm_stripe_offset == LOV_OFFSET_DEFAULT) + v3->lmm_stripe_offset = v3->lmm_objects[0].l_ost_idx; + if (v3->lmm_pool_name[0] != '\0') + pool_name = v3->lmm_pool_name; + size = lov_user_md_size(v3->lmm_stripe_count, + LOV_USER_MAGIC_SPECIFIC); + break; + + default: + CERROR("%s: unrecognized magic %X\n", + lod2obd(d)->obd_name, magic); + RETURN(-EINVAL); } - if (unlikely(buf->lb_len < sizeof(*v1))) { - CERROR("wrong size: %u\n", (unsigned) buf->lb_len); + if (unlikely(buf->lb_len < size)) { + CERROR("%s: wrong size: %zd, expect: %u\n", + lod2obd(d)->obd_name, buf->lb_len, size); RETURN(-EINVAL); } - if (v1->lmm_pattern != 0 && v1->lmm_pattern != LOV_PATTERN_RAID0) { - CERROR("invalid pattern: %x\n", v1->lmm_pattern); + lustre_print_user_md(D_OTHER, v1, "parse config"); + + v1->lmm_magic = magic; + if (v1->lmm_pattern == 0) + v1->lmm_pattern = LOV_PATTERN_RAID0; + if (lov_pattern(v1->lmm_pattern) != LOV_PATTERN_RAID0) { + CERROR("%s: invalid pattern: %x\n", + lod2obd(d)->obd_name, v1->lmm_pattern); RETURN(-EINVAL); } + lo->ldo_pattern = v1->lmm_pattern; - if (v1->lmm_stripe_size) + if (v1->lmm_stripe_size > 0) lo->ldo_stripe_size = v1->lmm_stripe_size; + if (lo->ldo_stripe_size & (LOV_MIN_STRIPE_SIZE - 1)) lo->ldo_stripe_size = LOV_MIN_STRIPE_SIZE; - if (v1->lmm_stripe_count) + if (v1->lmm_stripe_count > 0) lo->ldo_stripenr = v1->lmm_stripe_count; - if ((v1->lmm_stripe_offset >= d->lod_desc.ld_tgt_count) && - (v1->lmm_stripe_offset != (typeof(v1->lmm_stripe_offset))(-1))) { - CERROR("invalid offset: %x\n", v1->lmm_stripe_offset); - RETURN(-EINVAL); - } lo->ldo_def_stripe_offset = v1->lmm_stripe_offset; - CDEBUG(D_OTHER, "lsm: %u size, %u stripes, %u offset\n", - v1->lmm_stripe_size, v1->lmm_stripe_count, - v1->lmm_stripe_offset); - - if (v1->lmm_magic == LOV_MAGIC_V3) { - if (buf->lb_len < sizeof(*v3)) { - CERROR("wrong size: %u\n", (unsigned) buf->lb_len); - RETURN(-EINVAL); - } - - v3 = buf->lb_buf; - lod_object_set_pool(lo, v3->lmm_pool_name); + lod_object_set_pool(lo, NULL); + if (pool_name != NULL) { + struct pool_desc *pool; /* In the function below, .hs_keycmp resolves to * pool_hashkey_keycmp() */ /* coverity[overrun-buffer-val] */ - pool = lod_find_pool(d, v3->lmm_pool_name); + pool = lod_find_pool(d, pool_name); if (pool != NULL) { - if (lo->ldo_def_stripe_offset != - (typeof(v1->lmm_stripe_offset))(-1)) { - rc = lo->ldo_def_stripe_offset; - rc = lod_check_index_in_pool(rc, pool); + if (lo->ldo_def_stripe_offset != LOV_OFFSET_DEFAULT) { + rc = lod_check_index_in_pool( + lo->ldo_def_stripe_offset, pool); if (rc < 0) { lod_pool_putref(pool); - CERROR("invalid offset\n"); + CERROR("%s: invalid offset, %u\n", + lod2obd(d)->obd_name, + lo->ldo_def_stripe_offset); RETURN(-EINVAL); } } if (lo->ldo_stripenr > pool_tgt_count(pool)) - lo->ldo_stripenr= pool_tgt_count(pool); + lo->ldo_stripenr = pool_tgt_count(pool); lod_pool_putref(pool); } - } else - lod_object_set_pool(lo, NULL); + + lod_object_set_pool(lo, pool_name); + } + + /* fixup for released file */ + if (lo->ldo_pattern & LOV_PATTERN_F_RELEASED) { + lo->ldo_released_stripenr = lo->ldo_stripenr; + lo->ldo_stripenr = 0; + } RETURN(0); } -/* - * buf should be NULL or contain striping settings +/** + * Create a striping for an obejct. + * + * The function creates a new striping for the object. A buffer containing + * configuration hints can be provided optionally. The function tries QoS + * algorithm first unless free space is distributed evenly among OSTs, but + * by default RR algorithm is preferred due to internal concurrency (QoS is + * serialized). The caller must ensure no concurrent calls to the function + * are made against the same object. + * + * \param[in] env execution environment for this thread + * \param[in] lo LOD object + * \param[in] attr attributes OST objects will be declared with + * \param[in] buf suggested striping configuration or NULL + * \param[in] th transaction handle + * + * \retval 0 on success + * \retval negative negated errno on error */ int lod_qos_prep_create(const struct lu_env *env, struct lod_object *lo, struct lu_attr *attr, const struct lu_buf *buf, struct thandle *th) { - struct lod_device *d = lu2lod_dev(lod2lu_obj(lo)->lo_dev); - int flag = LOV_USES_ASSIGNED_STRIPE; - int i, rc = 0; + struct lod_device *d = lu2lod_dev(lod2lu_obj(lo)->lo_dev); + struct dt_object **stripe; + int stripe_len; + int flag = LOV_USES_ASSIGNED_STRIPE; + int i, rc; ENTRY; LASSERT(lo); @@ -1374,29 +1839,60 @@ int lod_qos_prep_create(const struct lu_env *env, struct lod_object *lo, if (rc) GOTO(out, rc); + /* A released file is being created */ + if (lo->ldo_stripenr == 0) + GOTO(out, rc = 0); + if (likely(lo->ldo_stripe == NULL)) { + struct lov_user_md *lum = NULL; + /* * no striping has been created so far */ LASSERT(lo->ldo_stripenr > 0); + /* + * statfs and check OST targets now, since ld_active_tgt_count + * could be changed if some OSTs are [de]activated manually. + */ + lod_qos_statfs_update(env, d); lo->ldo_stripenr = lod_get_stripecnt(d, LOV_MAGIC, - lo->ldo_stripenr); - i = sizeof(struct dt_object *) * lo->ldo_stripenr; - OBD_ALLOC(lo->ldo_stripe, i); - if (lo->ldo_stripe == NULL) + lo->ldo_stripenr); + + stripe_len = lo->ldo_stripenr; + OBD_ALLOC(stripe, sizeof(stripe[0]) * stripe_len); + if (stripe == NULL) GOTO(out, rc = -ENOMEM); - lo->ldo_stripes_allocated = lo->ldo_stripenr; lod_getref(&d->lod_ost_descs); /* XXX: support for non-0 files w/o objects */ - if (lo->ldo_def_stripe_offset >= d->lod_desc.ld_tgt_count) { - lod_qos_statfs_update(env, d); - rc = lod_alloc_qos(env, lo, flag, th); + CDEBUG(D_OTHER, "tgt_count %d stripenr %d\n", + d->lod_desc.ld_tgt_count, stripe_len); + + if (buf != NULL && buf->lb_buf != NULL) + lum = buf->lb_buf; + + if (lum != NULL && lum->lmm_magic == LOV_USER_MAGIC_SPECIFIC) { + rc = lod_alloc_ost_list(env, lo, stripe, lum, th); + } else if (lo->ldo_def_stripe_offset == LOV_OFFSET_DEFAULT) { + rc = lod_alloc_qos(env, lo, stripe, flag, th); if (rc == -EAGAIN) - rc = lod_alloc_rr(env, lo, flag, th); - } else - rc = lod_alloc_specific(env, lo, flag, th); + rc = lod_alloc_rr(env, lo, stripe, flag, th); + } else { + rc = lod_alloc_specific(env, lo, stripe, flag, th); + } lod_putref(d, &d->lod_ost_descs); + + if (rc < 0) { + for (i = 0; i < stripe_len; i++) + if (stripe[i] != NULL) + lu_object_put(env, &stripe[i]->do_lu); + + OBD_FREE(stripe, sizeof(stripe[0]) * stripe_len); + lo->ldo_stripenr = 0; + } else { + lo->ldo_stripe = stripe; + lo->ldo_stripes_allocated = stripe_len; + } } else { /* * lod_qos_parse_config() found supplied buf as a predefined