X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Finclude%2Flustre_mdc.h;h=3c7b6f017a95bbe99e4978f713b987d95dd03bce;hb=976c0abd4efab4f56cf4b21b940eb1b976c37372;hp=1bfa2a8e193ee63ea72b7eb47ac48ab4b707b37c;hpb=6869932b552ac705f411de3362f01bd50c1f6f7d;p=fs%2Flustre-release.git diff --git a/lustre/include/lustre_mdc.h b/lustre/include/lustre_mdc.h index 1bfa2a8..3c7b6f0 100644 --- a/lustre/include/lustre_mdc.h +++ b/lustre/include/lustre_mdc.h @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -26,10 +24,13 @@ * GPL HEADER END */ /* - * Copyright 2008 Sun Microsystems, Inc. All rights reserved + * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. */ /* + * Copyright (c) 2011, 2012, Intel Corporation. + */ +/* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. * @@ -42,27 +43,25 @@ #ifndef _LUSTRE_MDC_H #define _LUSTRE_MDC_H +/** \defgroup mdc mdc + * + * @{ + */ + #ifdef __KERNEL__ # include # include # ifdef CONFIG_FS_POSIX_ACL -# ifdef HAVE_XATTR_ACL -# include -# endif /*HAVE_XATTR_ACL */ -# ifdef HAVE_LINUX_POSIX_ACL_XATTR_H -# include -# endif /* HAVE_LINUX_POSIX_ACL_XATTR_H */ +# include # endif /* CONFIG_FS_POSIX_ACL */ -# ifndef HAVE_VFS_INTENT_PATCHES # include -# endif /* HAVE_VFS_INTENT_PATCHES */ #endif /* __KERNEL__ */ #include #include +#include #include #include #include -#include #include struct ptlrpc_client; @@ -70,42 +69,122 @@ struct obd_export; struct ptlrpc_request; struct obd_device; +/** + * Serializes in-flight MDT-modifying RPC requests to preserve idempotency. + * + * This mutex is used to implement execute-once semantics on the MDT. + * The MDT stores the last transaction ID and result for every client in + * its last_rcvd file. If the client doesn't get a reply, it can safely + * resend the request and the MDT will reconstruct the reply being aware + * that the request has already been executed. Without this lock, + * execution status of concurrent in-flight requests would be + * overwritten. + * + * This design limits the extent to which we can keep a full pipeline of + * in-flight requests from a single client. This limitation could be + * overcome by allowing multiple slots per client in the last_rcvd file. + */ struct mdc_rpc_lock { - struct semaphore rpcl_sem; - struct lookup_intent *rpcl_it; + /** Lock protecting in-flight RPC concurrency. */ + struct mutex rpcl_mutex; + /** Intent associated with currently executing request. */ + struct lookup_intent *rpcl_it; + /** Used for MDS/RPC load testing purposes. */ + int rpcl_fakes; }; +#define MDC_FAKE_RPCL_IT ((void *)0x2c0012bfUL) + static inline void mdc_init_rpc_lock(struct mdc_rpc_lock *lck) { - sema_init(&lck->rpcl_sem, 1); + mutex_init(&lck->rpcl_mutex); lck->rpcl_it = NULL; } static inline void mdc_get_rpc_lock(struct mdc_rpc_lock *lck, - struct lookup_intent *it) + struct lookup_intent *it) { - ENTRY; - if (!it || (it->it_op != IT_GETATTR && it->it_op != IT_LOOKUP)) { - down(&lck->rpcl_sem); - LASSERT(lck->rpcl_it == NULL); - lck->rpcl_it = it; - } + ENTRY; + + if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || + it->it_op == IT_LAYOUT)) + return; + + /* This would normally block until the existing request finishes. + * If fail_loc is set it will block until the regular request is + * done, then set rpcl_it to MDC_FAKE_RPCL_IT. Once that is set + * it will only be cleared when all fake requests are finished. + * Only when all fake requests are finished can normal requests + * be sent, to ensure they are recoverable again. */ + again: + mutex_lock(&lck->rpcl_mutex); + + if (CFS_FAIL_CHECK_QUIET(OBD_FAIL_MDC_RPCS_SEM)) { + lck->rpcl_it = MDC_FAKE_RPCL_IT; + lck->rpcl_fakes++; + mutex_unlock(&lck->rpcl_mutex); + return; + } + + /* This will only happen when the CFS_FAIL_CHECK() was + * just turned off but there are still requests in progress. + * Wait until they finish. It doesn't need to be efficient + * in this extremely rare case, just have low overhead in + * the common case when it isn't true. */ + while (unlikely(lck->rpcl_it == MDC_FAKE_RPCL_IT)) { + mutex_unlock(&lck->rpcl_mutex); + schedule_timeout(cfs_time_seconds(1) / 4); + goto again; + } + + LASSERT(lck->rpcl_it == NULL); + lck->rpcl_it = it; } static inline void mdc_put_rpc_lock(struct mdc_rpc_lock *lck, - struct lookup_intent *it) + struct lookup_intent *it) { - if (!it || (it->it_op != IT_GETATTR && it->it_op != IT_LOOKUP)) { - LASSERT(it == lck->rpcl_it); - lck->rpcl_it = NULL; - up(&lck->rpcl_sem); + if (it != NULL && (it->it_op == IT_GETATTR || it->it_op == IT_LOOKUP || + it->it_op == IT_LAYOUT)) + goto out; + + if (lck->rpcl_it == MDC_FAKE_RPCL_IT) { /* OBD_FAIL_MDC_RPCS_SEM */ + mutex_lock(&lck->rpcl_mutex); + + LASSERTF(lck->rpcl_fakes > 0, "%d\n", lck->rpcl_fakes); + lck->rpcl_fakes--; + + if (lck->rpcl_fakes == 0) + lck->rpcl_it = NULL; + + } else { + LASSERTF(it == lck->rpcl_it, "%p != %p\n", it, lck->rpcl_it); + lck->rpcl_it = NULL; + } + + mutex_unlock(&lck->rpcl_mutex); + out: + EXIT; +} + +static inline void mdc_update_max_ea_from_body(struct obd_export *exp, + struct mdt_body *body) +{ + if (body->valid & OBD_MD_FLMODEASIZE) { + if (exp->exp_obd->u.cli.cl_max_mds_easize < body->max_mdsize) + exp->exp_obd->u.cli.cl_max_mds_easize = + body->max_mdsize; + if (exp->exp_obd->u.cli.cl_max_mds_cookiesize < + body->max_cookiesize) + exp->exp_obd->u.cli.cl_max_mds_cookiesize = + body->max_cookiesize; } - EXIT; } -struct mdc_cache_waiter { - struct list_head mcw_entry; - cfs_waitq_t mcw_waitq; + +struct mdc_cache_waiter { + cfs_list_t mcw_entry; + wait_queue_head_t mcw_waitq; }; /* mdc/mdc_locks.c */ @@ -117,4 +196,21 @@ int it_open_error(int phase, struct lookup_intent *it); int mdc_sendpage(struct obd_export *exp, const struct lu_fid *fid, const struct page *page, int offset); #endif + +static inline bool cl_is_lov_delay_create(unsigned int flags) +{ + return (flags & O_LOV_DELAY_CREATE_1_8) != 0 || + (flags & O_LOV_DELAY_CREATE_MASK) == O_LOV_DELAY_CREATE_MASK; +} + +static inline void cl_lov_delay_create_clear(unsigned int *flags) +{ + if ((*flags & O_LOV_DELAY_CREATE_1_8) != 0) + *flags &= ~O_LOV_DELAY_CREATE_1_8; + if ((*flags & O_LOV_DELAY_CREATE_MASK) == O_LOV_DELAY_CREATE_MASK) + *flags &= ~O_LOV_DELAY_CREATE_MASK; +} + +/** @} mdc */ + #endif