/* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
* vim:expandtab:shiftwidth=8:tabstop=8:
*
- * lustre/mdt/mdt_internal.h
- * Lustre Metadata Target (mdt) request handler
+ * GPL HEADER START
*
- * Copyright (c) 2006 Cluster File Systems, Inc.
- * Author: Peter Braam <braam@clusterfs.com>
- * Author: Andreas Dilger <adilger@clusterfs.com>
- * Author: Phil Schwan <phil@clusterfs.com>
- * Author: Mike Shaver <shaver@clusterfs.com>
- * Author: Nikita Danilov <nikita@clusterfs.com>
- * Author: Huang Hua <huanghua@clusterfs.com>
+ * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
- * This file is part of the Lustre file system, http://www.lustre.org
- * Lustre is a trademark of Cluster File Systems, Inc.
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 only,
+ * as published by the Free Software Foundation.
*
- * You may have signed or agreed to another license before downloading
- * this software. If so, you are bound by the terms and conditions
- * of that agreement, and the following does not apply to you. See the
- * LICENSE file included with this distribution for more information.
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License version 2 for more details (a copy is included
+ * in the LICENSE file that accompanied this code).
*
- * If you did not agree to a different license, then this copy of Lustre
- * is open source software; you can redistribute it and/or modify it
- * under the terms of version 2 of the GNU General Public License as
- * published by the Free Software Foundation.
+ * You should have received a copy of the GNU General Public License
+ * version 2 along with this program; If not, see
+ * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
*
- * In either case, Lustre is distributed in the hope that it will be
- * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * license text for more details.
+ * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
+ * CA 95054 USA or visit www.sun.com if you need additional information or
+ * have any questions.
+ *
+ * GPL HEADER END
+ */
+/*
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Use is subject to license terms.
+ *
+ * Copyright (c) 2011 Whamcloud, Inc.
+ *
+ */
+/*
+ * This file is part of Lustre, http://www.lustre.org/
+ * Lustre is a trademark of Sun Microsystems, Inc.
+ *
+ * lustre/mdt/mdt_internal.h
+ *
+ * Lustre Metadata Target (mdt) request handler
+ *
+ * Author: Peter Braam <braam@clusterfs.com>
+ * Author: Andreas Dilger <adilger@clusterfs.com>
+ * Author: Phil Schwan <phil@clusterfs.com>
+ * Author: Mike Shaver <shaver@clusterfs.com>
+ * Author: Nikita Danilov <nikita@clusterfs.com>
+ * Author: Huang Hua <huanghua@clusterfs.com>
*/
#ifndef _MDT_INTERNAL_H
* struct lustre_handle
*/
#include <lustre/lustre_idl.h>
+#include <lustre_disk.h>
+#include <lu_target.h>
#include <md_object.h>
-#include <dt_object.h>
#include <lustre_fid.h>
#include <lustre_fld.h>
#include <lustre_req_layout.h>
-/* LR_CLIENT_SIZE, etc. */
-#include <lustre_disk.h>
#include <lustre_sec.h>
#include <lvfs.h>
-
-
-/* Data stored per client in the last_rcvd file. In le32 order. */
-struct mdt_client_data {
- __u8 mcd_uuid[40]; /* client UUID */
- __u64 mcd_last_transno; /* last completed transaction ID */
- __u64 mcd_last_xid; /* xid for the last transaction */
- __u32 mcd_last_result; /* result from last RPC */
- __u32 mcd_last_data; /* per-op data (disposition for open &c.) */
- /* for MDS_CLOSE and MDS_DONE_WRITTING requests */
- __u64 mcd_last_close_transno; /* last completed transaction ID */
- __u64 mcd_last_close_xid; /* xid for the last transaction */
- __u32 mcd_last_close_result; /* result from last RPC */
- __u8 mcd_padding[LR_CLIENT_SIZE - 84];
-};
-
-static inline __u64 mcd_last_transno(struct mdt_client_data *mcd)
-{
- return max(mcd->mcd_last_transno, mcd->mcd_last_close_transno);
-}
-
-static inline __u64 mcd_last_xid(struct mdt_client_data *mcd)
-{
- return max(mcd->mcd_last_xid, mcd->mcd_last_close_xid);
-}
+#include <lustre_idmap.h>
+#include <lustre_eacl.h>
+#include <lustre_fsfilt.h>
/* check if request's xid is equal to last one or not*/
static inline int req_xid_is_last(struct ptlrpc_request *req)
{
- struct mdt_client_data *mcd = req->rq_export->exp_mdt_data.med_mcd;
- return (req->rq_xid == mcd->mcd_last_xid ||
- req->rq_xid == mcd->mcd_last_close_xid);
+ struct lsd_client_data *lcd = req->rq_export->exp_target_data.ted_lcd;
+ return (req->rq_xid == lcd->lcd_last_xid ||
+ req->rq_xid == lcd->lcd_last_close_xid);
}
-/* copied from lr_server_data.
- * mds data stored at the head of last_rcvd file. In le32 order. */
-struct mdt_server_data {
- __u8 msd_uuid[40]; /* server UUID */
- __u64 msd_last_transno; /* last completed transaction ID */
- __u64 msd_mount_count; /* incarnation number */
- __u32 msd_feature_compat; /* compatible feature flags */
- __u32 msd_feature_rocompat;/* read-only compatible feature flags */
- __u32 msd_feature_incompat;/* incompatible feature flags */
- __u32 msd_server_size; /* size of server data area */
- __u32 msd_client_start; /* start of per-client data area */
- __u16 msd_client_size; /* size of per-client data area */
- //__u16 msd_subdir_count; /* number of subdirectories for objects */
- //__u64 msd_catalog_oid; /* recovery catalog object id */
- //__u32 msd_catalog_ogen; /* recovery catalog inode generation */
- //__u8 msd_peeruuid[40]; /* UUID of MDS associated with this OST */
- //__u32 msd_ost_index; /* index number of OST in LOV */
- //__u32 msd_mdt_index; /* index number of MDT in LMV */
- __u8 msd_padding[LR_SERVER_SIZE - 78];
-};
-
struct mdt_object;
/* file data for open files on MDS */
struct mdt_file_data {
struct portals_handle mfd_handle; /* must be first */
- struct list_head mfd_list; /* protected by med_open_lock */
+ cfs_list_t mfd_list; /* protected by med_open_lock */
__u64 mfd_xid; /* xid of the open request */
struct lustre_handle mfd_old_handle; /* old handle in replay case */
int mfd_mode; /* open mode provided by client */
struct mdt_object *mfd_object; /* point to opened object */
};
+/* mdt state flag bits */
+#define MDT_FL_CFGLOG 0
+#define MDT_FL_SYNCED 1
+
struct mdt_device {
/* super-class */
struct md_device mdt_md_dev;
/* underlying device */
struct md_device *mdt_child;
struct dt_device *mdt_bottom;
+ /** target device */
+ struct lu_target mdt_lut;
/*
* Options bit-fields.
*/
mo_acl :1,
mo_compat_resname:1,
mo_mds_capa :1,
- mo_oss_capa :1;
+ mo_oss_capa :1,
+ mo_cos :1;
} mdt_opts;
/* mdt state flags */
- __u32 mdt_fl_cfglog:1,
- mdt_fl_synced:1;
- /* lock to pretect epoch and write count */
- spinlock_t mdt_ioepoch_lock;
+ unsigned long mdt_state;
+ /* lock to protect IOepoch */
+ cfs_spinlock_t mdt_ioepoch_lock;
__u64 mdt_ioepoch;
- /* Transaction related stuff here */
- spinlock_t mdt_transno_lock;
- __u64 mdt_last_transno;
-
/* transaction callbacks */
struct dt_txn_callback mdt_txn_cb;
- /* last_rcvd file */
- struct dt_object *mdt_last_rcvd;
/* these values should be updated from lov if necessary.
* or should be placed somewhere else. */
int mdt_max_mdsize;
int mdt_max_cookiesize;
- __u64 mdt_mount_count;
-
- /* last_rcvd data */
- struct mdt_server_data mdt_msd;
- spinlock_t mdt_client_bitmap_lock;
- unsigned long mdt_client_bitmap[(LR_MAX_CLIENTS >> 3) / sizeof(long)];
struct upcall_cache *mdt_identity_cache;
- struct upcall_cache *mdt_rmtacl_cache;
- /* root squash */
- struct rootsquash_info *mdt_rootsquash_info;
+ /* sptlrpc rules */
+ cfs_rwlock_t mdt_sptlrpc_lock;
+ struct sptlrpc_rule_set mdt_sptlrpc_rset;
/* capability keys */
unsigned long mdt_capa_timeout;
struct dt_object *mdt_ck_obj;
unsigned long mdt_ck_timeout;
unsigned long mdt_ck_expiry;
- struct timer_list mdt_ck_timer;
+ cfs_timer_t mdt_ck_timer;
struct ptlrpc_thread mdt_ck_thread;
struct lustre_capa_key mdt_capa_keys[2];
- unsigned int mdt_capa_conf:1;
+ unsigned int mdt_capa_conf:1,
+ mdt_som_conf:1;
+
+ /* root squash */
+ uid_t mdt_squash_uid;
+ gid_t mdt_squash_gid;
+ cfs_list_t mdt_nosquash_nids;
+ char *mdt_nosquash_str;
+ int mdt_nosquash_strlen;
+ cfs_rw_semaphore_t mdt_squash_sem;
cfs_proc_dir_entry_t *mdt_proc_entry;
struct lprocfs_stats *mdt_stats;
+ int mdt_sec_level;
+ struct rename_stats mdt_rename_stats;
};
-/*XXX copied from mds_internal.h */
-#define MDT_SERVICE_WATCHDOG_TIMEOUT (obd_timeout * 1000)
+#define MDT_SERVICE_WATCHDOG_FACTOR (2)
#define MDT_ROCOMPAT_SUPP (OBD_ROCOMPAT_LOVOBJID)
-#define MDT_INCOMPAT_SUPP (OBD_INCOMPAT_MDT | OBD_INCOMPAT_COMMON_LR)
+#define MDT_INCOMPAT_SUPP (OBD_INCOMPAT_MDT | OBD_INCOMPAT_COMMON_LR | \
+ OBD_INCOMPAT_FID | OBD_INCOMPAT_IAM_DIR | \
+ OBD_INCOMPAT_LMM_VER | OBD_INCOMPAT_MULTI_OI)
+#define MDT_COS_DEFAULT (0)
struct mdt_object {
struct lu_object_header mot_header;
struct md_object mot_obj;
__u64 mot_ioepoch;
__u64 mot_flags;
- int mot_epochcount;
+ int mot_ioepoch_count;
int mot_writecount;
+ /* Lock to protect object's IO epoch. */
+ cfs_semaphore_t mot_ioepoch_sem;
+ /* Lock to protect create_data */
+ cfs_semaphore_t mot_lov_sem;
+};
+
+enum mdt_object_flags {
+ /** SOM attributes are changed. */
+ MOF_SOM_CHANGE = (1 << 0),
+ /**
+ * The SOM recovery state for mdt object.
+ * This state is an in-memory equivalent of an absent SOM EA, used
+ * instead of invalidating SOM EA while IOEpoch is still opened when
+ * a client eviction occurs or a client fails to obtain SOM attributes.
+ * It indicates that the last IOEpoch holder will need to obtain SOM
+ * attributes under [0;EOF] extent lock to flush all the client's
+ * cached of evicted from MDS clients (but not necessary evicted from
+ * OST) before taking ost attributes.
+ */
+ MOF_SOM_RECOV = (1 << 1),
+ /** File has been just created. */
+ MOF_SOM_CREATED = (1 << 2),
+ /** lov object has been created. */
+ MOF_LOV_CREATED = (1 << 3),
};
struct mdt_lock_handle {
};
enum mdt_reint_flag {
- MRF_SETATTR_LOCKED = 1 << 0,
+ MRF_OPEN_TRUNC = 1 << 0,
};
/*
struct mdt_thread_info {
/*
* XXX: Part One:
- * The following members will be filled expilictly
+ * The following members will be filled explicitly
* with specific data in mdt_thread_info_init().
*/
-
- /*
- * for req-layout interface. This field should be first to be compatible
- * with "struct com_thread_info" in seq and fld.
+ /* TODO: move this into mdt_session_key(with LCT_SESSION), because
+ * request handling may migrate from one server thread to another.
*/
- struct req_capsule mti_pill;
+ struct req_capsule *mti_pill;
+
/* although we have export in req, there are cases when it is not
* available, e.g. closing files upon export destroy */
struct obd_export *mti_exp;
/*
- * number of buffers in reply message.
- */
- int mti_rep_buf_nr;
- /*
- * sizes of reply buffers.
- */
- int mti_rep_buf_size[REQ_MAX_FIELD_NR];
- /*
* A couple of lock handles.
*/
struct mdt_lock_handle mti_lh[MDT_LH_NR];
/*
* XXX: Part Three:
- * The following members will be filled expilictly
+ * The following members will be filled explicitly
* with zero in mdt_reint_unpack(), because they are only used
* by reint requests (including mdt_reint_open()).
*/
*/
struct mdt_reint_record mti_rr;
+ /** md objects included in operation */
+ struct mdt_object *mti_mos;
+ __u64 mti_ver[PTLRPC_NUM_VERSIONS];
/*
* Operation specification (currently create and lookup)
*/
struct obd_uuid uuid[2]; /* for mdt_seq_init_cli() */
char ns_name[48]; /* for mdt_init0() */
struct lustre_cfg_bufs bufs; /* for mdt_stack_fini() */
- struct kstatfs ksfs; /* for mdt_statfs() */
+ cfs_kstatfs_t ksfs; /* for mdt_statfs() */
struct {
/* for mdt_readpage() */
struct lu_rdpg mti_rdpg;
/* for mdt_sendpage() */
struct l_wait_info mti_wait_info;
} rdpg;
+ struct {
+ struct md_attr attr;
+ struct md_som_data data;
+ } som;
} mti_u;
/* IO epoch related stuff. */
- struct mdt_epoch *mti_epoch;
+ struct mdt_ioepoch *mti_ioepoch;
__u64 mti_replayepoch;
/* server and client data buffers */
- struct mdt_server_data mti_msd;
- struct mdt_client_data mti_mcd;
+ struct lr_server_data mti_lsd;
+ struct lsd_client_data mti_lcd;
loff_t mti_off;
- struct txn_param mti_txn_param;
struct lu_buf mti_buf;
struct lustre_capa_key mti_capa_key;
/* Ops object filename */
struct lu_name mti_name;
+ struct md_attr mti_tmp_attr;
};
typedef void (*mdt_cb_t)(const struct mdt_device *mdt, __u64 transno,
void *mdt_cb_data;
};
-/*
- * Info allocated per-transaction.
- */
-#define MDT_MAX_COMMIT_CB 4
-struct mdt_txn_info {
- __u64 txi_transno;
- unsigned int txi_cb_count;
- struct mdt_commit_cb txi_cb[MDT_MAX_COMMIT_CB];
+enum mdt_txn_op {
+ MDT_TXN_CAPA_KEYS_WRITE_OP,
+ MDT_TXN_LAST_RCVD_WRITE_OP,
};
-extern struct lu_context_key mdt_txn_key;
-
-static inline void mdt_trans_add_cb(const struct thandle *th,
- mdt_cb_t cb_func, void *cb_data)
-{
- struct mdt_txn_info *txi;
-
- txi = lu_context_key_get(&th->th_ctx, &mdt_txn_key);
- LASSERT(txi->txi_cb_count < ARRAY_SIZE(txi->txi_cb));
-
- /* add new callback */
- txi->txi_cb[txi->txi_cb_count].mdt_cb_func = cb_func;
- txi->txi_cb[txi->txi_cb_count].mdt_cb_data = cb_data;
- txi->txi_cb_count++;
-}
+enum mdt_obj_exist{
+ MDT_OBJ_MUST_EXIST,
+ MDT_OBJ_MAY_NOT_EXIST,
+};
-static inline struct md_device_operations *mdt_child_ops(struct mdt_device * m)
+static inline const struct md_device_operations *
+mdt_child_ops(struct mdt_device * m)
{
LASSERT(m->mdt_child);
return m->mdt_child->md_ops;
static inline struct md_object *mdt_object_child(struct mdt_object *o)
{
+ LASSERT(o);
return lu2md(lu_object_next(&o->mot_obj.mo_lu));
}
static inline struct ptlrpc_request *mdt_info_req(struct mdt_thread_info *info)
{
- return info->mti_pill.rc_req;
+ return info->mti_pill ? info->mti_pill->rc_req : NULL;
+}
+
+static inline int req_is_replay(struct ptlrpc_request *req)
+{
+ LASSERT(req->rq_reqmsg);
+ return !!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY);
}
static inline __u64 mdt_conn_flags(struct mdt_thread_info *info)
return lu_object_fid(&o->mot_obj.mo_lu);
}
+static inline struct lu_site *mdt_lu_site(const struct mdt_device *mdt)
+{
+ return mdt->mdt_md_dev.md_lu_dev.ld_site;
+}
+
+static inline struct md_site *mdt_md_site(const struct mdt_device *mdt)
+{
+ return lu_site2md(mdt_lu_site(mdt));
+}
+
+static inline void mdt_export_evict(struct obd_export *exp)
+{
+ class_fail_export(exp);
+ class_export_put(exp);
+}
+
+static inline const char *mdt_obj_dev_name(const struct mdt_object *obj)
+{
+ return lu_dev_name(obj->mot_obj.mo_lu.lo_dev);
+}
+
int mdt_get_disposition(struct ldlm_reply *rep, int flag);
void mdt_set_disposition(struct mdt_thread_info *info,
struct ldlm_reply *rep, int flag);
struct mdt_object *mdt_object_find(const struct lu_env *,
struct mdt_device *,
- const struct lu_fid *);
+ const struct lu_fid *,
+ enum mdt_obj_exist check_exist);
struct mdt_object *mdt_object_find_lock(struct mdt_thread_info *,
const struct lu_fid *,
struct mdt_lock_handle *,
- __u64);
+ __u64 ibits,
+ enum mdt_obj_exist check_exist);
void mdt_object_unlock_put(struct mdt_thread_info *,
struct mdt_object *,
struct mdt_lock_handle *,
int decref);
+void mdt_client_compatibility(struct mdt_thread_info *info);
+
int mdt_close_unpack(struct mdt_thread_info *info);
int mdt_reint_unpack(struct mdt_thread_info *info, __u32 op);
int mdt_reint_rec(struct mdt_thread_info *, struct mdt_lock_handle *);
-void mdt_pack_size2body(struct mdt_thread_info *info,
- struct mdt_object *o);
void mdt_pack_attr2body(struct mdt_thread_info *info, struct mdt_body *b,
const struct lu_attr *attr, const struct lu_fid *fid);
int mdt_getxattr(struct mdt_thread_info *info);
-int mdt_setxattr(struct mdt_thread_info *info);
+int mdt_reint_setxattr(struct mdt_thread_info *info,
+ struct mdt_lock_handle *lh);
void mdt_lock_handle_init(struct mdt_lock_handle *lh);
void mdt_lock_handle_fini(struct mdt_lock_handle *lh);
struct mdt_lock_handle *lhc);
extern void target_recovery_fini(struct obd_device *obd);
-extern void target_recovery_init(struct obd_device *obd,
+extern void target_recovery_init(struct lu_target *lut,
svc_handler_t handler);
int mdt_fs_setup(const struct lu_env *, struct mdt_device *,
- struct obd_device *);
+ struct obd_device *, struct lustre_sb_info *lsi);
void mdt_fs_cleanup(const struct lu_env *, struct mdt_device *);
int mdt_client_del(const struct lu_env *env,
int mdt_client_new(const struct lu_env *env,
struct mdt_device *mdt);
+int mdt_export_stats_init(struct obd_device *obd,
+ struct obd_export *exp,
+ void *client_nid);
+
int mdt_pin(struct mdt_thread_info* info);
int mdt_lock_new_child(struct mdt_thread_info *info,
struct mdt_file_data *mdt_handle2mfd(struct mdt_thread_info *,
const struct lustre_handle *);
-int mdt_epoch_open(struct mdt_thread_info *info, struct mdt_object *o);
-void mdt_sizeonmds_enable(struct mdt_thread_info *info, struct mdt_object *mo);
-int mdt_sizeonmds_enabled(struct mdt_object *mo);
-int mdt_write_get(struct mdt_device *mdt, struct mdt_object *o);
-int mdt_write_read(struct mdt_device *mdt, struct mdt_object *o);
+
+enum {
+ MDT_IOEPOCH_CLOSED = 0,
+ MDT_IOEPOCH_OPENED = 1,
+ MDT_IOEPOCH_GETATTR = 2,
+};
+
+enum {
+ MDT_SOM_DISABLE = 0,
+ MDT_SOM_ENABLE = 1,
+};
+
+int mdt_ioepoch_open(struct mdt_thread_info *info, struct mdt_object *o,
+ int created);
+int mdt_object_is_som_enabled(struct mdt_object *mo);
+int mdt_write_get(struct mdt_object *o);
+void mdt_write_put(struct mdt_object *o);
+int mdt_write_read(struct mdt_object *o);
struct mdt_file_data *mdt_mfd_new(void);
int mdt_mfd_close(struct mdt_thread_info *info, struct mdt_file_data *mfd);
void mdt_mfd_free(struct mdt_file_data *mfd);
int mdt_close(struct mdt_thread_info *info);
int mdt_attr_set(struct mdt_thread_info *info, struct mdt_object *mo,
- int flags);
+ struct md_attr *ma, int flags);
int mdt_done_writing(struct mdt_thread_info *info);
void mdt_shrink_reply(struct mdt_thread_info *info);
int mdt_handle_last_unlink(struct mdt_thread_info *, struct mdt_object *,
const struct md_attr *);
void mdt_reconstruct_open(struct mdt_thread_info *, struct mdt_lock_handle *);
-struct thandle* mdt_trans_start(const struct lu_env *env,
- struct mdt_device *mdt, int credits);
+
+struct thandle *mdt_trans_create(const struct lu_env *env,
+ struct mdt_device *mdt);
+int mdt_trans_start(const struct lu_env *env, struct mdt_device *mdt,
+ struct thandle *th);
void mdt_trans_stop(const struct lu_env *env,
struct mdt_device *mdt, struct thandle *th);
int mdt_record_write(const struct lu_env *env,
void mdt_dump_lmm(int level, const struct lov_mds_md *lmm);
int mdt_check_ucred(struct mdt_thread_info *);
-
int mdt_init_ucred(struct mdt_thread_info *, struct mdt_body *);
-
int mdt_init_ucred_reint(struct mdt_thread_info *);
-
void mdt_exit_ucred(struct mdt_thread_info *);
-
-int groups_from_list(struct group_info *, gid_t *);
-
-void groups_sort(struct group_info *);
+int mdt_version_get_check(struct mdt_thread_info *, struct mdt_object *, int);
+void mdt_version_get_save(struct mdt_thread_info *, struct mdt_object *, int);
+int mdt_version_get_check_save(struct mdt_thread_info *, struct mdt_object *,
+ int);
/* mdt_idmap.c */
+int mdt_init_sec_level(struct mdt_thread_info *);
int mdt_init_idmap(struct mdt_thread_info *);
-
void mdt_cleanup_idmap(struct mdt_export_data *);
-
int mdt_handle_idmap(struct mdt_thread_info *);
-
int ptlrpc_user_desc_do_idmap(struct ptlrpc_request *,
struct ptlrpc_user_desc *);
-
void mdt_body_reverse_idmap(struct mdt_thread_info *,
struct mdt_body *);
-
int mdt_remote_perm_reverse_idmap(struct ptlrpc_request *,
struct mdt_remote_perm *);
-
int mdt_fix_attr_ucred(struct mdt_thread_info *, __u32);
static inline struct mdt_device *mdt_dev(struct lu_device *d)
return container_of0(d, struct mdt_device, mdt_md_dev.md_lu_dev);
}
+static inline struct dt_object *mdt_obj2dt(struct mdt_object *mo)
+{
+ struct lu_object *lo;
+ struct mdt_device *mdt = mdt_dev(mo->mot_obj.mo_lu.lo_dev);
+
+ lo = lu_object_locate(mo->mot_obj.mo_lu.lo_header,
+ mdt->mdt_bottom->dd_lu_dev.ld_type);
+ return lu2dt(lo);
+}
+
/* mdt/mdt_identity.c */
#define MDT_IDENTITY_UPCALL_PATH "/usr/sbin/l_getidentity"
extern struct upcall_cache_ops mdt_identity_upcall_cache_ops;
-struct mdt_identity *mdt_identity_get(struct upcall_cache *, __u32);
+struct md_identity *mdt_identity_get(struct upcall_cache *, __u32);
-void mdt_identity_put(struct upcall_cache *, struct mdt_identity *);
+void mdt_identity_put(struct upcall_cache *, struct md_identity *);
void mdt_flush_identity(struct upcall_cache *, int);
-__u32 mdt_identity_get_setxid_perm(struct mdt_identity *, __u32, lnet_nid_t);
+__u32 mdt_identity_get_perm(struct md_identity *, __u32, lnet_nid_t);
int mdt_pack_remote_perm(struct mdt_thread_info *, struct mdt_object *, void *);
-/* mdt/mdt_rmtacl.c */
-#define MDT_RMTACL_UPCALL_PATH "/usr/sbin/l_facl"
-
-extern struct upcall_cache_ops mdt_rmtacl_upcall_cache_ops;
-
-int mdt_rmtacl_upcall(struct mdt_thread_info *, char *, struct lu_buf *);
-
extern struct lu_context_key mdt_thread_key;
/* debug issues helper starts here*/
static inline void mdt_fail_write(const struct lu_env *env,
struct dt_device *dd, int id)
{
- if (OBD_FAIL_CHECK(id)) {
- CERROR(LUSTRE_MDT_NAME": obd_fail_loc=%x, fail write ops\n",
+ if (OBD_FAIL_CHECK_ORSET(id, OBD_FAIL_ONCE)) {
+ CERROR(LUSTRE_MDT_NAME": cfs_fail_loc=%x, fail write ops\n",
id);
dd->dd_ops->dt_ro(env, dd);
/* We set FAIL_ONCE because we never "un-fail" a device */
- obd_fail_loc |= OBD_FAILED | OBD_FAIL_ONCE;
}
}
RETURN(1);
}
DEBUG_REQ(D_HA, req, "no reply for RESENT req (have "LPD64")",
- req->rq_export->exp_mdt_data.med_mcd->mcd_last_xid);
+ req->rq_export->exp_target_data.ted_lcd->lcd_last_xid);
}
RETURN(0);
}
-#define MDT_FAIL_CHECK(id) \
-({ \
- if (unlikely(OBD_FAIL_CHECK(id))) \
- CERROR(LUSTRE_MDT_NAME": " #id " test failed\n"); \
- OBD_FAIL_CHECK(id); \
-})
-
-#define MDT_FAIL_CHECK_ONCE(id) \
-({ int _ret_ = 0; \
- if (unlikely(OBD_FAIL_CHECK(id))) { \
- CERROR(LUSTRE_MDT_NAME": *** obd_fail_loc=%x ***\n", id); \
- obd_fail_loc |= OBD_FAILED; \
- if ((id) & OBD_FAIL_ONCE) \
- obd_fail_loc |= OBD_FAIL_ONCE; \
- _ret_ = 1; \
- } \
- _ret_; \
-})
-
-#define MDT_FAIL_RETURN(id, ret) \
-do { \
- if (unlikely(MDT_FAIL_CHECK_ONCE(id))) { \
- RETURN(ret); \
- } \
-} while(0)
-
struct md_ucred *mdt_ucred(const struct mdt_thread_info *info);
static inline int is_identity_get_disabled(struct upcall_cache *cache)
return cache ? (strcmp(cache->uc_upcall, "NONE") == 0) : 1;
}
+int mdt_blocking_ast(struct ldlm_lock*, struct ldlm_lock_desc*, void*, int);
+
/* Issues dlm lock on passed @ns, @f stores it lock handle into @lh. */
static inline int mdt_fid_lock(struct ldlm_namespace *ns,
struct lustre_handle *lh,
ldlm_mode_t mode,
ldlm_policy_data_t *policy,
const struct ldlm_res_id *res_id,
- int flags)
+ int flags, const __u64 *client_cookie)
{
int rc;
LASSERT(lh != NULL);
rc = ldlm_cli_enqueue_local(ns, res_id, LDLM_IBITS, policy,
- mode, &flags, ldlm_blocking_ast,
- ldlm_completion_ast, NULL, NULL,
- 0, NULL, lh);
+ mode, &flags, mdt_blocking_ast,
+ ldlm_completion_ast, NULL, NULL, 0,
+ client_cookie, lh);
return rc == ELDLM_OK ? 0 : -EIO;
}
return tlname;
}
+void mdt_enable_cos(struct mdt_device *, int);
+int mdt_cos_is_enabled(struct mdt_device *);
+int mdt_hsm_copytool_send(struct obd_export *exp);
+
/* lprocfs stuff */
+enum {
+ LPROC_MDT_OPEN = 0,
+ LPROC_MDT_CLOSE,
+ LPROC_MDT_MKNOD,
+ LPROC_MDT_LINK,
+ LPROC_MDT_UNLINK,
+ LPROC_MDT_MKDIR,
+ LPROC_MDT_RMDIR,
+ LPROC_MDT_RENAME,
+ LPROC_MDT_GETATTR,
+ LPROC_MDT_SETATTR,
+ LPROC_MDT_GETXATTR,
+ LPROC_MDT_SETXATTR,
+ LPROC_MDT_STATFS,
+ LPROC_MDT_SYNC,
+ LPROC_MDT_SAMEDIR_RENAME,
+ LPROC_MDT_CROSSDIR_RENAME,
+ LPROC_MDT_LAST,
+};
+void mdt_counter_incr(struct obd_export *exp, int opcode);
+void mdt_stats_counter_init(struct lprocfs_stats *stats);
+void lprocfs_mdt_init_vars(struct lprocfs_static_vars *lvars);
int mdt_procfs_init(struct mdt_device *mdt, const char *name);
int mdt_procfs_fini(struct mdt_device *mdt);
+void mdt_rename_counter_tally(struct mdt_thread_info *info,
+ struct mdt_device *mdt, struct obd_export *exp,
+ struct mdt_object *src, struct mdt_object *tgt);
void mdt_time_start(const struct mdt_thread_info *info);
void mdt_time_end(const struct mdt_thread_info *info, int idx);
-enum {
- LPROC_MDT_NR
-};
-
/* Capability */
int mdt_ck_thread_start(struct mdt_device *mdt);
void mdt_ck_thread_stop(struct mdt_device *mdt);
const struct lu_fid *fid,
struct lustre_capa *capa)
{
- struct mdt_device *dev = info->mti_mdt;
struct md_capainfo *ci;
LASSERT(offset >= 0 && offset <= MD_CAPAINFO_MAX);
- if (!dev->mdt_opts.mo_mds_capa)
+ if (!info->mti_mdt->mdt_opts.mo_mds_capa ||
+ !(info->mti_exp->exp_connect_flags & OBD_CONNECT_MDS_CAPA))
return;
ci = md_capainfo(info->mti_env);
LASSERT(ci);
- ci->mc_fid[offset] = fid;
+ ci->mc_fid[offset] = *fid;
ci->mc_capa[offset] = capa;
}
if (!ci)
return;
for (i = 0; i < MD_CAPAINFO_MAX; i++) {
- if (!ci->mc_fid[i])
- continue;
if (!ci->mc_capa[i]) {
CERROR("no capa for index %d "DFID"\n",
- i, PFID(ci->mc_fid[i]));
+ i, PFID(&ci->mc_fid[i]));
continue;
}
if (ci->mc_capa[i] == BYPASS_CAPA) {
CERROR("bypass for index %d "DFID"\n",
- i, PFID(ci->mc_fid[i]));
+ i, PFID(&ci->mc_fid[i]));
continue;
}
DEBUG_CAPA(D_ERROR, ci->mc_capa[i], "index %d", i);
}
}
+static inline struct obd_device *mdt2obd_dev(const struct mdt_device *mdt)
+{
+ return mdt->mdt_md_dev.md_lu_dev.ld_obd;
+}
#endif /* __KERNEL__ */
#endif /* _MDT_H */