* Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2012, Whamcloud, Inc.
+ * Copyright (c) 2012, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
#define OFD_ROCOMPAT_SUPP (0)
#define OFD_INCOMPAT_SUPP (OBD_INCOMPAT_GROUPS | OBD_INCOMPAT_OST | \
OBD_INCOMPAT_COMMON_LR)
-#define OFD_MAX_GROUPS 256
+#define OFD_PRECREATE_BATCH_DEFAULT (FILTER_SUBDIR_COUNT * 4)
+
+/* on small filesystems we should not precreate too many objects in
+ * a single transaction, otherwise we can overflow transactions */
+#define OFD_PRECREATE_SMALL_FS (1024ULL * 1024 * 1024)
+#define OFD_PRECREATE_BATCH_SMALL 8
/* Limit the returned fields marked valid to those that we actually might set */
#define OFD_VALID_FLAGS (LA_TYPE | LA_MODE | LA_SIZE | LA_BLOCKS | \
char *jobid, long amount)
{
if (exp->exp_obd && exp->exp_obd->u.obt.obt_jobstats.ojs_hash &&
- (exp->exp_connect_flags & OBD_CONNECT_JOBSTATS))
+ (exp_connect_flags(exp) & OBD_CONNECT_JOBSTATS))
lprocfs_job_stats_log(exp->exp_obd, jobid, opcode, amount);
+
+ if (exp->exp_nid_stats != NULL &&
+ exp->exp_nid_stats->nid_stats != NULL) {
+ if (opcode == LPROC_OFD_STATS_READ)
+ lprocfs_counter_add(exp->exp_nid_stats->nid_stats,
+ LPROC_OFD_READ_BYTES, amount);
+ else if (opcode == LPROC_OFD_STATS_WRITE)
+ lprocfs_counter_add(exp->exp_nid_stats->nid_stats,
+ LPROC_OFD_WRITE_BYTES, amount);
+ }
}
+struct ofd_seq {
+ cfs_list_t os_list;
+ struct ost_id os_oi;
+ spinlock_t os_last_oid_lock;
+ struct mutex os_create_lock;
+ cfs_atomic_t os_refc;
+ struct dt_object *os_lastid_obj;
+ unsigned long os_destroys_in_progress:1;
+};
+
+#define os_seq os_oi.oi_seq
+#define os_last_oid os_oi.oi_id
+
struct ofd_device {
struct dt_device ofd_dt_dev;
struct dt_device *ofd_osd;
/* last_rcvd file */
struct lu_target ofd_lut;
- struct dt_object *ofd_last_group_file;
struct dt_object *ofd_health_check_file;
int ofd_subdir_count;
- int ofd_max_group;
- obd_id ofd_last_objids[OFD_MAX_GROUPS];
- cfs_mutex_t ofd_create_locks[OFD_MAX_GROUPS];
- struct dt_object *ofd_lastid_obj[OFD_MAX_GROUPS];
- cfs_spinlock_t ofd_objid_lock;
- unsigned long ofd_destroys_in_progress;
+ cfs_list_t ofd_seq_list;
+ rwlock_t ofd_seq_list_lock;
+ int ofd_seq_count;
+ int ofd_precreate_batch;
+ spinlock_t ofd_batch_lock;
/* protect all statfs-related counters */
- cfs_spinlock_t ofd_osfs_lock;
+ spinlock_t ofd_osfs_lock;
/* statfs optimization: we cache a bit */
struct obd_statfs ofd_osfs;
__u64 ofd_osfs_age;
/* grants: all values in bytes */
/* grant lock to protect all grant counters */
- cfs_spinlock_t ofd_grant_lock;
+ spinlock_t ofd_grant_lock;
/* total amount of dirty data reported by clients in incoming obdo */
obd_size ofd_tot_dirty;
/* sum of filesystem space granted to clients for async writes */
int ofd_fmd_max_num; /* per ofd ofd_mod_data */
cfs_duration_t ofd_fmd_max_age; /* time to fmd expiry */
- cfs_spinlock_t ofd_flags_lock;
+ spinlock_t ofd_flags_lock;
unsigned long ofd_raid_degraded:1,
/* sync journal on writes */
ofd_syncjournal:1,
/* shall we grant space to clients not
* supporting OBD_CONNECT_GRANT_PARAM? */
ofd_grant_compat_disable:1;
+ struct seq_server_site ofd_seq_site;
};
static inline struct ofd_device *ofd_dev(struct lu_device *d)
/* Space used by the I/O, used by grant code */
unsigned long fti_used;
+ struct ost_lvb fti_lvb;
};
extern void target_recovery_fini(struct obd_device *obd);
/* ofd_dev.c */
extern struct lu_context_key ofd_thread_key;
+int ofd_postrecov(const struct lu_env *env, struct ofd_device *ofd);
/* ofd_obd.c */
extern struct obd_ops ofd_obd_ops;
int *from_cache);
/* ofd_fs.c */
-obd_id ofd_last_id(struct ofd_device *ofd, obd_seq seq);
-void ofd_last_id_set(struct ofd_device *ofd, obd_id id, obd_seq seq);
-int ofd_last_id_write(const struct lu_env *env, struct ofd_device *ofd,
- obd_seq seq);
-int ofd_group_load(const struct lu_env *env, struct ofd_device *ofd, int);
+obd_id ofd_seq_last_oid(struct ofd_seq *oseq);
+void ofd_seq_last_oid_set(struct ofd_seq *oseq, obd_id id);
+int ofd_seq_last_oid_write(const struct lu_env *env, struct ofd_device *ofd,
+ struct ofd_seq *oseq);
+int ofd_seqs_init(const struct lu_env *env, struct ofd_device *ofd);
+struct ofd_seq *ofd_seq_get(struct ofd_device *ofd, obd_seq seq);
+void ofd_seq_put(const struct lu_env *env, struct ofd_seq *oseq);
+
int ofd_fs_setup(const struct lu_env *env, struct ofd_device *ofd,
struct obd_device *obd);
void ofd_fs_cleanup(const struct lu_env *env, struct ofd_device *ofd);
+int ofd_precreate_batch(struct ofd_device *ofd, int batch);
+struct ofd_seq *ofd_seq_load(const struct lu_env *env, struct ofd_device *ofd,
+ obd_seq seq);
+void ofd_seqs_fini(const struct lu_env *env, struct ofd_device *ofd);
/* ofd_io.c */
int ofd_preprw(const struct lu_env *env,int cmd, struct obd_export *exp,
const struct lu_fid *fid,
struct lu_attr *attr);
int ofd_object_ff_check(const struct lu_env *env, struct ofd_object *fo);
-int ofd_precreate_object(const struct lu_env *env, struct ofd_device *ofd,
- obd_id id, obd_seq seq);
+int ofd_precreate_objects(const struct lu_env *env, struct ofd_device *ofd,
+ obd_id id, struct ofd_seq *oseq, int nr, int sync);
void ofd_object_put(const struct lu_env *env, struct ofd_object *fo);
int ofd_attr_set(const struct lu_env *env, struct ofd_object *fo,
static inline int ofd_grant_param_supp(struct obd_export *exp)
{
- return !!(exp->exp_connect_flags & OBD_CONNECT_GRANT_PARAM);
+ return !!(exp_connect_flags(exp) & OBD_CONNECT_GRANT_PARAM);
}
/* Blocksize used for client not supporting OBD_CONNECT_GRANT_PARAM.
#define ofd_fmd_drop(exp, fid) do {} while (0)
#endif
+/* ofd_dev.c */
+int ofd_fid_set_index(const struct lu_env *env, struct ofd_device *ofd,
+ int index);
+int ofd_fid_init(const struct lu_env *env, struct ofd_device *ofd);
+int ofd_fid_fini(const struct lu_env *env, struct ofd_device *ofd);
+
/* ofd_lvb.c */
extern struct ldlm_valblock_ops ofd_lvbo;
/* ofd_dlm.c */
int ofd_intent_policy(struct ldlm_namespace *ns, struct ldlm_lock **lockp,
- void *req_cookie, ldlm_mode_t mode, int flags,
+ void *req_cookie, ldlm_mode_t mode, __u64 flags,
void *data);
static inline struct ofd_thread_info * ofd_info(const struct lu_env *env)
static inline void ofd_build_resid(const struct lu_fid *fid,
struct ldlm_res_id *resname)
{
+ struct ost_id oid;
+
if (fid_is_idif(fid)) {
- /* get id/seq like ostid_idif_pack() does */
- osc_build_res_name(fid_idif_id(fid_seq(fid), fid_oid(fid),
- fid_ver(fid)),
- FID_SEQ_OST_MDT0, resname);
+ oid.oi_id = fid_idif_id(fid_seq(fid), fid_oid(fid),
+ fid_ver(fid));
+ oid.oi_seq = FID_SEQ_OST_MDT0;
} else {
- /* In the future, where OSTs have FID sequences allocated. */
- fid_build_reg_res_name(fid, resname);
+ oid.oi_id = fid_oid(fid);
+ oid.oi_seq = fid_seq(fid);
}
+ ostid_build_res_name(&oid, resname);
}
static inline void ofd_fid_from_resid(struct lu_fid *fid,
const struct ldlm_res_id *name)
{
- /* if seq is FID_SEQ_OST_MDT0 then we have IDIF and resid was built
- * using osc_build_res_name function. */
- if (fid_seq_is_mdt0(name->name[LUSTRE_RES_ID_VER_OID_OFF])) {
- struct ost_id ostid;
-
- ostid.oi_id = name->name[LUSTRE_RES_ID_SEQ_OFF];
- ostid.oi_seq = name->name[LUSTRE_RES_ID_VER_OID_OFF];
- fid_ostid_unpack(fid, &ostid, 0);
- } else {
- fid->f_seq = name->name[LUSTRE_RES_ID_SEQ_OFF];
- fid->f_oid = (__u32)name->name[LUSTRE_RES_ID_VER_OID_OFF];
- fid->f_ver = name->name[LUSTRE_RES_ID_VER_OID_OFF] >> 32;
- }
+ /* To keep compatiblity, res[0] = oi_id, res[1] = oi_seq. */
+ struct ost_id ostid;
+
+ ostid.oi_id = name->name[LUSTRE_RES_ID_SEQ_OFF];
+ ostid.oi_seq = name->name[LUSTRE_RES_ID_VER_OID_OFF];
+ fid_ostid_unpack(fid, &ostid, 0);
}
static inline void ofd_oti2info(struct ofd_thread_info *info,