X-Git-Url: https://git.whamcloud.com/?a=blobdiff_plain;f=lustre%2Finclude%2Fcl_object.h;h=b0c11e30e3a2062100f12e53c29c8f02dcbd3592;hb=98fc84a0f0ddcd49d249d375243969269f351c84;hp=3d44b1c81f957b9affc8ae039ca587c43b6359c1;hpb=e8ffe16619baf1ef7c5c6b117d338956372aa752;p=fs%2Flustre-release.git diff --git a/lustre/include/cl_object.h b/lustre/include/cl_object.h index 3d44b1c..b0c11e3 100644 --- a/lustre/include/cl_object.h +++ b/lustre/include/cl_object.h @@ -1,6 +1,4 @@ -/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*- - * vim:expandtab:shiftwidth=8:tabstop=8: - * +/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. @@ -29,8 +27,7 @@ * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * - * Copyright (c) 2011 Whamcloud, Inc. - * + * Copyright (c) 2011, 2012, Whamcloud, Inc. */ /* * This file is part of Lustre, http://www.lustre.org/ @@ -279,6 +276,16 @@ struct cl_object_conf { * VFS inode. This is consumed by vvp. */ struct inode *coc_inode; + /** + * Validate object conf. If object is using an invalid conf, + * then invalidate it and set the new layout. + */ + bool coc_validate_only; + /** + * Invalidate the current stripe configuration due to losing + * layout lock. + */ + bool coc_invalidate; }; /** @@ -1057,6 +1064,15 @@ struct cl_page_operations { */ int (*cpo_cancel)(const struct lu_env *env, const struct cl_page_slice *slice); + /** + * Write out a page by kernel. This is only called by ll_writepage + * right now. + * + * \see cl_page_flush() + */ + int (*cpo_flush)(const struct lu_env *env, + const struct cl_page_slice *slice, + struct cl_io *io); /** @} transfer */ }; @@ -1065,10 +1081,10 @@ struct cl_page_operations { */ #define CL_PAGE_DEBUG(mask, env, page, format, ...) \ do { \ - static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \ + LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \ \ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ - cl_page_print(env, &__info, lu_cdebug_printer, page); \ + cl_page_print(env, &msgdata, lu_cdebug_printer, page); \ CDEBUG(mask, format , ## __VA_ARGS__); \ } \ } while (0) @@ -1076,14 +1092,14 @@ do { \ /** * Helper macro, dumping shorter information about \a page into a log. */ -#define CL_PAGE_HEADER(mask, env, page, format, ...) \ -do { \ - static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \ - \ - if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ - cl_page_header_print(env, &__info, lu_cdebug_printer, page); \ - CDEBUG(mask, format , ## __VA_ARGS__); \ - } \ +#define CL_PAGE_HEADER(mask, env, page, format, ...) \ +do { \ + LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \ + \ + if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ + cl_page_header_print(env, &msgdata, lu_cdebug_printer, page); \ + CDEBUG(mask, format , ## __VA_ARGS__); \ + } \ } while (0) /** @} cl_page */ @@ -1443,7 +1459,9 @@ enum cl_lock_flags { /** cancellation is pending for this lock. */ CLF_CANCELPEND = 1 << 1, /** destruction is pending for this lock. */ - CLF_DOOMED = 1 << 2 + CLF_DOOMED = 1 << 2, + /** from enqueue RPC reply upcall. */ + CLF_FROM_UPCALL= 1 << 3, }; /** @@ -1616,9 +1634,11 @@ struct cl_lock_slice { */ enum cl_lock_transition { /** operation cannot be completed immediately. Wait for state change. */ - CLO_WAIT = 1, + CLO_WAIT = 1, /** operation had to release lock mutex, restart. */ - CLO_REPEAT = 2 + CLO_REPEAT = 2, + /** lower layer re-enqueued. */ + CLO_REENQUEUED = 3, }; /** @@ -1789,14 +1809,22 @@ struct cl_lock_operations { #define CL_LOCK_DEBUG(mask, env, lock, format, ...) \ do { \ - static DECLARE_LU_CDEBUG_PRINT_INFO(__info, mask); \ + LIBCFS_DEBUG_MSG_DATA_DECL(msgdata, mask, NULL); \ \ if (cfs_cdebug_show(mask, DEBUG_SUBSYSTEM)) { \ - cl_lock_print(env, &__info, lu_cdebug_printer, lock); \ + cl_lock_print(env, &msgdata, lu_cdebug_printer, lock); \ CDEBUG(mask, format , ## __VA_ARGS__); \ } \ } while (0) +#define CL_LOCK_ASSERT(expr, env, lock) do { \ + if (likely(expr)) \ + break; \ + \ + CL_LOCK_DEBUG(D_ERROR, env, lock, "failed at %s.\n", #expr); \ + LBUG(); \ +} while (0) + /** @} cl_lock */ /** \addtogroup cl_page_list cl_page_list @@ -1900,6 +1928,11 @@ enum cl_io_type { */ CIT_FAULT, /** + * fsync system call handling + * To write out a range of file + */ + CIT_FSYNC, + /** * Miscellaneous io. This is used for occasional io activity that * doesn't fit into other types. Currently this is used for: * @@ -1946,11 +1979,6 @@ enum cl_io_state { CIS_FINI }; -enum cl_req_priority { - CRP_NORMAL, - CRP_CANCEL -}; - /** * IO state private for a layer. * @@ -2068,8 +2096,7 @@ struct cl_io_operations { int (*cio_submit)(const struct lu_env *env, const struct cl_io_slice *slice, enum cl_req_type crt, - struct cl_2queue *queue, - enum cl_req_priority priority); + struct cl_2queue *queue); } req_op[CRT_NR]; /** * Read missing page. @@ -2155,9 +2182,13 @@ enum cl_enq_flags { */ CEF_NEVER = 0x00000010, /** + * for async glimpse lock. + */ + CEF_AGL = 0x00000020, + /** * mask of enq_flags. */ - CEF_MASK = 0x0000001f + CEF_MASK = 0x0000003f, }; /** @@ -2227,6 +2258,18 @@ enum cl_io_lock_dmd { CILR_PEEK }; +enum cl_fsync_mode { + /** start writeback, do not wait for them to finish */ + CL_FSYNC_NONE = 0, + /** start writeback and wait for them to finish */ + CL_FSYNC_LOCAL = 1, + /** discard all of dirty pages in a specific file range */ + CL_FSYNC_DISCARD = 2, + /** start writeback and make sure they have reached storage before + * return. OST_SYNC RPC must be issued and finished */ + CL_FSYNC_ALL = 3 +}; + struct cl_io_rw_common { loff_t crw_pos; size_t crw_count; @@ -2261,11 +2304,6 @@ struct cl_io { struct cl_lockset ci_lockset; /** lock requirements, this is just a help info for sublayers. */ enum cl_io_lock_dmd ci_lockreq; - /** - * This io has held grouplock, to inform sublayers that - * don't do lockless i/o. - */ - int ci_no_srvlock; union { struct cl_rd_io { struct cl_io_rw_common rd; @@ -2273,6 +2311,7 @@ struct cl_io { struct cl_wr_io { struct cl_io_rw_common wr; int wr_append; + int wr_sync; } ci_wr; struct cl_io_rw_common ci_rw; struct cl_setattr_io { @@ -2294,11 +2333,43 @@ struct cl_io { /** resulting page */ struct cl_page *ft_page; } ci_fault; + struct cl_fsync_io { + loff_t fi_start; + loff_t fi_end; + struct obd_capa *fi_capa; + /** file system level fid */ + struct lu_fid *fi_fid; + enum cl_fsync_mode fi_mode; + /* how many pages were written/discarded */ + unsigned int fi_nr_written; + } ci_fsync; } u; struct cl_2queue ci_queue; size_t ci_nob; int ci_result; - int ci_continue; + unsigned int ci_continue:1, + /** + * This io has held grouplock, to inform sublayers that + * don't do lockless i/o. + */ + ci_no_srvlock:1, + /** + * The whole IO need to be restarted because layout has been changed + */ + ci_need_restart:1, + /** + * Ignore layout change. + * Most of the CIT_MISC operations can ignore layout change, because + * the purpose to create this kind of cl_io is to give an environment + * to run clio methods, for example: + * 1. request group lock; + * 2. flush caching pages by osc; + * 3. writepage + * 4. echo client + * So far, only direct IO and glimpse clio need restart if layout + * change during IO time. + */ + ci_ignore_layout:1; /** * Number of pages owned by this IO. For invariant checking. */ @@ -2373,10 +2444,12 @@ struct cl_io { * Per-transfer attributes. */ struct cl_req_attr { - /** Generic attributes for the server consumption. */ - struct obdo *cra_oa; - /** Capability. */ - struct obd_capa *cra_capa; + /** Generic attributes for the server consumption. */ + struct obdo *cra_oa; + /** Capability. */ + struct obd_capa *cra_capa; + /** Jobid */ + char cra_jobid[JOBSTATS_JOBID_SIZE]; }; /** @@ -2663,15 +2736,19 @@ static inline int cl_object_same(struct cl_object *o0, struct cl_object *o1) * @{ */ enum { CLP_GANG_OKAY = 0, + CLP_GANG_RESCHED, CLP_GANG_AGAIN, - CLP_GANG_RESCHED + CLP_GANG_ABORT }; +/* callback of cl_page_gang_lookup() */ +typedef int (*cl_page_gang_cb_t) (const struct lu_env *, struct cl_io *, + struct cl_page *, void *); int cl_page_gang_lookup (const struct lu_env *env, struct cl_object *obj, struct cl_io *io, pgoff_t start, pgoff_t end, - struct cl_page_list *plist); + cl_page_gang_cb_t cb, void *cbdata); struct cl_page *cl_page_lookup (struct cl_object_header *hdr, pgoff_t index); struct cl_page *cl_page_find (const struct lu_env *env, @@ -2738,6 +2815,8 @@ int cl_page_cache_add (const struct lu_env *env, struct cl_io *io, void cl_page_clip (const struct lu_env *env, struct cl_page *pg, int from, int to); int cl_page_cancel (const struct lu_env *env, struct cl_page *page); +int cl_page_flush (const struct lu_env *env, struct cl_io *io, + struct cl_page *pg); /** @} transfer */ @@ -2784,9 +2863,19 @@ struct cl_lock *cl_lock_peek(const struct lu_env *env, const struct cl_io *io, struct cl_lock *cl_lock_request(const struct lu_env *env, struct cl_io *io, const struct cl_lock_descr *need, const char *scope, const void *source); -struct cl_lock *cl_lock_at_page(const struct lu_env *env, struct cl_object *obj, - struct cl_page *page, struct cl_lock *except, - int pending, int canceld); +struct cl_lock *cl_lock_at_pgoff(const struct lu_env *env, + struct cl_object *obj, pgoff_t index, + struct cl_lock *except, int pending, + int canceld); +static inline struct cl_lock *cl_lock_at_page(const struct lu_env *env, + struct cl_object *obj, + struct cl_page *page, + struct cl_lock *except, + int pending, int canceld) +{ + return cl_lock_at_pgoff(env, obj, page->cp_index, except, + pending, canceld); +} const struct cl_lock_slice *cl_lock_at(const struct cl_lock *lock, const struct lu_device_type *dtype); @@ -2796,12 +2885,14 @@ void cl_lock_get_trust (struct cl_lock *lock); void cl_lock_put (const struct lu_env *env, struct cl_lock *lock); void cl_lock_hold_add (const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source); +void cl_lock_hold_release(const struct lu_env *env, struct cl_lock *lock, + const char *scope, const void *source); void cl_lock_unhold (const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source); void cl_lock_release (const struct lu_env *env, struct cl_lock *lock, const char *scope, const void *source); void cl_lock_user_add (const struct lu_env *env, struct cl_lock *lock); -int cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock); +void cl_lock_user_del (const struct lu_env *env, struct cl_lock *lock); enum cl_lock_state cl_lock_intransit(const struct lu_env *env, struct cl_lock *lock); @@ -2868,8 +2959,7 @@ int cl_lock_mutex_try (const struct lu_env *env, struct cl_lock *lock); void cl_lock_mutex_put (const struct lu_env *env, struct cl_lock *lock); int cl_lock_is_mutexed (struct cl_lock *lock); int cl_lock_nr_mutexed (const struct lu_env *env); -int cl_lock_page_out (const struct lu_env *env, struct cl_lock *lock, - int discard); +int cl_lock_discard_pages(const struct lu_env *env, struct cl_lock *lock); int cl_lock_ext_match (const struct cl_lock_descr *has, const struct cl_lock_descr *need); int cl_lock_descr_match(const struct cl_lock_descr *has, @@ -2927,11 +3017,10 @@ int cl_io_prepare_write(const struct lu_env *env, struct cl_io *io, int cl_io_commit_write (const struct lu_env *env, struct cl_io *io, struct cl_page *page, unsigned from, unsigned to); int cl_io_submit_rw (const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue, - enum cl_req_priority priority); + enum cl_req_type iot, struct cl_2queue *queue); int cl_io_submit_sync (const struct lu_env *env, struct cl_io *io, - enum cl_req_type iot, struct cl_2queue *queue, - enum cl_req_priority priority, long timeout); + enum cl_req_type iot, struct cl_2queue *queue, + long timeout); void cl_io_rw_advance (const struct lu_env *env, struct cl_io *io, size_t nob); int cl_io_cancel (const struct lu_env *env, struct cl_io *io, @@ -2946,6 +3035,16 @@ static inline int cl_io_is_append(const struct cl_io *io) return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_append; } +static inline int cl_io_is_sync_write(const struct cl_io *io) +{ + return io->ci_type == CIT_WRITE && io->u.ci_wr.wr_sync; +} + +static inline int cl_io_is_mkwrite(const struct cl_io *io) +{ + return io->ci_type == CIT_FAULT && io->u.ci_fault.ft_mkwrite; +} + /** * True, iff \a io is a truncate(2). */