*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2012, 2015, Intel Corporation.
+ * Copyright (c) 2012, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
- * Lustre is a trademark of Sun Microsystems, Inc.
*
* lustre/osp/osp_internal.h
*
#include <lustre_fid.h>
#include <lustre_update.h>
#include <lu_target.h>
-#include <lustre_mdc.h>
/*
* Infrastructure to support tracking of last committed llog record
/*
* Precreation pool
*/
- spinlock_t osp_pre_lock;
/* last fid to assign in creation */
struct lu_fid osp_pre_used_fid;
int our_update_nr;
struct list_head our_cb_items;
+ struct list_head our_invalidate_cb_list;
/* points to thandle if this update request belongs to one */
struct osp_thandle *our_th;
__u64 our_version;
+ __u64 our_generation;
/* protect our_list and flag */
spinlock_t our_list_lock;
/* linked to the list(ou_list) in osp_updates */
struct list_head ou_list;
spinlock_t ou_lock;
wait_queue_head_t ou_waitq;
- /* wait for next updates */
+
+ /* The next rpc version which supposed to be sent in
+ * osp_send_update_thread().*/
__u64 ou_rpc_version;
+
+ /* The rpc version assigned to the osp thandle during (osp_md_write()),
+ * which will be sent by this order. Note: the osp_thandle has be sent
+ * by this order to make sure the remote update log will follow the
+ * llog format rule. XXX: these probably should be removed once we
+ * invent new llog format */
__u64 ou_version;
+
+ /* The generation of current osp update RPC, which is used to make sure
+ * those stale RPC(with older generation) will not be sent, otherwise it
+ * will cause update lllog corruption */
+ __u64 ou_generation;
+
+ /* dedicate update thread */
+ struct task_struct *ou_update_task;
+ struct lu_env ou_env;
};
struct osp_device {
* and required le64_to_cpu() conversion before use.
* Protected by opd_pre_lock */
struct lu_fid opd_last_used_fid;
+ /* on disk copy last_used_fid.f_oid or idif */
+ u64 opd_last_id;
struct lu_fid opd_gap_start_fid;
int opd_gap_count;
- /* connection to OST */
struct obd_device *opd_obd;
struct obd_export *opd_exp;
- struct obd_uuid opd_cluuid;
struct obd_connect_data *opd_connect_data;
- int opd_connects;
- struct proc_dir_entry *opd_proc_entry;
- struct lprocfs_stats *opd_stats;
+
/* connection status. */
unsigned int opd_new_connection:1,
opd_got_disconnected:1,
/* precreate structure for OSP */
struct osp_precreate *opd_pre;
/* dedicate precreate thread */
- struct ptlrpc_thread opd_pre_thread;
+ struct task_struct *opd_pre_task;
+ spinlock_t opd_pre_lock;
/* thread waits for signals about pool going empty */
wait_queue_head_t opd_pre_waitq;
/* send update thread */
struct osp_updates *opd_update;
- /* dedicate update thread */
- struct ptlrpc_thread opd_update_thread;
/*
- * OST synchronization
+ * OST synchronization thread
*/
- spinlock_t opd_syn_lock;
+ spinlock_t opd_sync_lock;
/* unique generation, to recognize start of new records in the llog */
- struct llog_gen opd_syn_generation;
+ struct llog_gen opd_sync_generation;
/* number of changes to sync, used to wake up sync thread */
- atomic_t opd_syn_changes;
+ atomic_t opd_sync_changes;
+ /* limit of changes to sync */
+ int opd_sync_max_changes;
/* processing of changes from previous mount is done? */
- int opd_syn_prev_done;
+ int opd_sync_prev_done;
/* found records */
- struct ptlrpc_thread opd_syn_thread;
- wait_queue_head_t opd_syn_waitq;
- /* list of inflight rpc */
- struct list_head opd_syn_inflight_list;
+ struct task_struct *opd_sync_task;
+ wait_queue_head_t opd_sync_waitq;
+ /* list of in flight rpcs */
+ struct list_head opd_sync_in_flight_list;
/* list of remotely committed rpc */
- struct list_head opd_syn_committed_there;
- /* number of changes being under sync */
- int opd_syn_sync_in_progress;
+ struct list_head opd_sync_committed_there;
/* number of RPCs in flight - flow control */
- atomic_t opd_syn_rpc_in_flight;
- int opd_syn_max_rpc_in_flight;
+ atomic_t opd_sync_rpcs_in_flight;
+ int opd_sync_max_rpcs_in_flight;
/* number of RPC in processing (including non-committed by OST) */
- atomic_t opd_syn_rpc_in_progress;
- int opd_syn_max_rpc_in_progress;
+ atomic_t opd_sync_rpcs_in_progress;
+ int opd_sync_max_rpcs_in_progress;
/* osd api's commit cb control structure */
- struct dt_txn_callback opd_syn_txn_cb;
+ struct dt_txn_callback opd_sync_txn_cb;
/* last used change number -- semantically similar to transno */
- __u64 opd_syn_last_used_id;
+ unsigned long opd_sync_last_used_id;
/* last committed change number -- semantically similar to
* last_committed */
- __u64 opd_syn_last_committed_id;
- /* last processed (taken from llog) id */
- volatile __u64 opd_syn_last_processed_id;
- struct osp_id_tracker *opd_syn_tracker;
- struct list_head opd_syn_ontrack;
+ __u64 opd_sync_last_committed_id;
+ /* last processed catalog index */
+ int opd_sync_last_catalog_idx;
+ /* number of processed records */
+ atomic64_t opd_sync_processed_recs;
/* stop processing new requests until barrier=0 */
- atomic_t opd_syn_barrier;
- wait_queue_head_t opd_syn_barrier_waitq;
+ atomic_t opd_sync_barrier;
+ wait_queue_head_t opd_sync_barrier_waitq;
+ /* last generated id */
+ ktime_t opd_sync_next_commit_cb;
+ atomic_t opd_commits_registered;
/*
* statfs related fields: OSP maintains it on its own
*/
struct obd_statfs opd_statfs;
- cfs_time_t opd_statfs_fresh_till;
- struct timer_list opd_statfs_timer;
+ ktime_t opd_statfs_fresh_till;
+ struct timer_list opd_statfs_timer;
int opd_statfs_update_in_progress;
/* how often to update statfs data */
- int opd_statfs_maxage;
+ time64_t opd_statfs_maxage;
- struct proc_dir_entry *opd_symlink;
+ struct dentry *opd_debugfs;
/* If the caller wants to do some idempotent async operations on
* remote server, it can append the async remote requests on the
struct list_head opd_async_updates;
struct rw_semaphore opd_async_updates_rwsem;
atomic_t opd_async_updates_count;
+
+ /*
+ * Limit the object allocation using ENOSPC for opd_pre_status
+ */
+ int opd_reserved_mb_high;
+ int opd_reserved_mb_low;
+ bool opd_cleanup_orphans_done;
+ bool opd_force_creation;
};
-#define opd_pre_lock opd_pre->osp_pre_lock
#define opd_pre_used_fid opd_pre->osp_pre_used_fid
#define opd_pre_last_created_fid opd_pre->osp_pre_last_created_fid
#define opd_pre_reserved opd_pre->osp_pre_reserved
* The left part is for value, binary mode. */
struct osp_xattr_entry {
struct list_head oxe_list;
- atomic_t oxe_ref;
void *oxe_value;
- size_t oxe_buflen;
- size_t oxe_namelen;
- size_t oxe_vallen;
- unsigned int oxe_exist:1,
- oxe_ready:1;
- char oxe_buf[0];
-};
-
-struct osp_object_attr {
- struct lu_attr ooa_attr;
- struct list_head ooa_xattr_list;
+ atomic_t oxe_ref;
+ unsigned int oxe_buflen;
+ unsigned int oxe_vallen;
+ unsigned short oxe_namelen;
+ unsigned short oxe_exist:1,
+ oxe_ready:1,
+ oxe_largebuf:1;
+ char oxe_name[0];
};
/* this is a top object */
struct dt_object opo_obj;
unsigned int opo_reserved:1,
opo_non_exist:1,
- opo_stale:1;
+ opo_stale:1,
+ opo_destroyed:1;
/* read/write lock for md osp object */
struct rw_semaphore opo_sem;
const struct lu_env *opo_owner;
- struct osp_object_attr *opo_ooa;
+ struct lu_attr opo_attr;
+ struct list_head opo_xattr_list;
+ struct list_head opo_invalidate_cb_list;
/* Protect opo_ooa. */
spinlock_t opo_lock;
+ /* to implement in-flight invalidation */
+ atomic_t opo_invalidate_seq;
+ struct rw_semaphore opo_invalidate_sem;
};
-extern struct lu_object_operations osp_lu_obj_ops;
-extern const struct dt_device_operations osp_dt_ops;
-extern struct dt_object_operations osp_md_obj_ops;
-extern struct dt_body_operations osp_md_body_ops;
+extern const struct lu_object_operations osp_lu_obj_ops;
+extern const struct dt_object_operations osp_md_obj_ops;
+extern const struct dt_body_operations osp_md_body_ops;
struct osp_thread_info {
struct lu_buf osi_lb;
struct lu_attr osi_attr;
struct ost_id osi_oi;
struct ost_id osi_oi2;
- u64 osi_id;
loff_t osi_off;
union {
struct llog_rec_hdr osi_hdr;
struct llog_unlink64_rec osi_unlink;
- struct llog_setattr64_rec osi_setattr;
+ struct llog_setattr64_rec_v2 osi_setattr;
struct llog_gen_rec osi_gen;
};
struct llog_cookie osi_cookie;
__u32 ooi_pos_page;
__u32 ooi_pos_lu_page;
__u32 ooi_attr;
+ __u32 ooi_rec_size;
int ooi_pos_ent;
int ooi_total_npages;
int ooi_valid_npages;
}
static inline void osp_objid_buf_prep(struct lu_buf *buf, loff_t *off,
- __u32 *id, int index)
+ __u64 *id, int index)
{
/* Note: through id is only 32 bits, it will also write 64 bits
* for oid to keep compatibility with the previous version. */
static inline struct osp_thread_info *osp_env_info(const struct lu_env *env)
{
- struct osp_thread_info *info;
-
- info = lu_context_key_get(&env->le_ctx, &osp_thread_key);
- if (info == NULL) {
- lu_env_refill((struct lu_env *)env);
- info = lu_context_key_get(&env->le_ctx, &osp_thread_key);
- }
- LASSERT(info);
- return info;
+ return lu_env_info(env, &osp_thread_key);
}
struct osp_txn_info {
static inline struct osp_device *lu2osp_dev(struct lu_device *d)
{
LASSERT(lu_device_is_osp(d));
- return container_of0(d, struct osp_device, opd_dt_dev.dd_lu_dev);
+ return container_of_safe(d, struct osp_device, opd_dt_dev.dd_lu_dev);
}
static inline struct lu_device *osp2lu_dev(struct osp_device *d)
static inline struct osp_device *dt2osp_dev(struct dt_device *d)
{
LASSERT(lu_device_is_osp(&d->dd_lu_dev));
- return container_of0(d, struct osp_device, opd_dt_dev);
+ return container_of_safe(d, struct osp_device, opd_dt_dev);
}
static inline struct osp_object *lu2osp_obj(struct lu_object *o)
{
LASSERT(ergo(o != NULL, lu_device_is_osp(o->lo_dev)));
- return container_of0(o, struct osp_object, opo_obj.do_lu);
+ return container_of_safe(o, struct osp_object, opo_obj.do_lu);
}
static inline struct lu_object *osp2lu_obj(struct osp_object *obj)
static inline struct osp_object *osp_obj(const struct lu_object *o)
{
LASSERT(lu_device_is_osp(o->lo_dev));
- return container_of0(o, struct osp_object, opo_obj.do_lu);
+ return container_of_safe(o, struct osp_object, opo_obj.do_lu);
}
static inline struct osp_object *dt2osp_obj(const struct dt_object *d)
static inline struct dt_object *osp_object_child(struct osp_object *o)
{
- return container_of0(lu_object_next(osp2lu_obj(o)),
- struct dt_object, do_lu);
+ return container_of(lu_object_next(osp2lu_obj(o)),
+ struct dt_object, do_lu);
}
static inline struct seq_server_site *osp_seq_site(struct osp_device *osp)
return osp->opd_dt_dev.dd_lu_dev.ld_site->ld_seq_site;
}
-#define osp_init_rpc_lock(lck) mdc_init_rpc_lock(lck)
-
-static inline void osp_get_rpc_lock(struct osp_device *osp)
-{
- struct mdc_rpc_lock *rpc_lock = osp->opd_obd->u.cli.cl_rpc_lock;
-
- mdc_get_rpc_lock(rpc_lock, NULL);
-}
-
-static inline void osp_put_rpc_lock(struct osp_device *osp)
-{
- struct mdc_rpc_lock *rpc_lock = osp->opd_obd->u.cli.cl_rpc_lock;
-
- mdc_put_rpc_lock(rpc_lock, NULL);
-}
-
static inline int osp_fid_diff(const struct lu_fid *fid1,
const struct lu_fid *fid2)
{
fid_idif_id(fid2->f_seq, fid2->f_oid, 0);
}
- LASSERTF(fid_seq(fid1) == fid_seq(fid2), "fid1:"DFID
- ", fid2:"DFID"\n", PFID(fid1), PFID(fid2));
+ LASSERTF(fid_seq(fid1) == fid_seq(fid2), "fid1:"DFID", fid2:"DFID"\n",
+ PFID(fid1), PFID(fid2));
return fid_oid(fid1) - fid_oid(fid2);
}
+static inline void osp_fid_to_obdid(struct lu_fid *last_fid, u64 *osi_id)
+{
+ if (fid_is_idif((last_fid)))
+ *osi_id = fid_idif_id(fid_seq(last_fid), fid_oid(last_fid),
+ fid_ver(last_fid));
+ else
+ *osi_id = fid_oid(last_fid);
+}
static inline void osp_update_last_fid(struct osp_device *d, struct lu_fid *fid)
{
int diff = osp_fid_diff(fid, &d->opd_last_used_fid);
+ struct lu_fid *gap_start = &d->opd_gap_start_fid;
+
/*
* we might have lost precreated objects due to VBR and precreate
* orphans, the gap in objid can be calculated properly only here
if (diff > 0) {
if (diff > 1) {
d->opd_gap_start_fid = d->opd_last_used_fid;
- d->opd_gap_start_fid.f_oid++;
+ if (fid_oid(gap_start) == LUSTRE_DATA_SEQ_MAX_WIDTH) {
+ gap_start->f_seq++;
+ gap_start->f_oid = fid_is_idif(gap_start) ?
+ 0 : 1;
+ } else {
+ gap_start->f_oid++;
+ }
d->opd_gap_count = diff - 1;
CDEBUG(D_HA, "Gap in objids: start="DFID", count =%d\n",
PFID(&d->opd_gap_start_fid), d->opd_gap_count);
}
d->opd_last_used_fid = *fid;
+ osp_fid_to_obdid(fid, &d->opd_last_id);
}
}
int osp_object_update_request_create(struct osp_update_request *our,
size_t size);
-#define osp_update_rpc_pack(env, name, our, op, ...) \
+#define OSP_UPDATE_RPC_PACK(env, out_something_pack, our, ...) \
({ \
- struct object_update *object_update; \
- size_t max_update_length; \
+ struct object_update *object_update; \
+ size_t max_update_length; \
struct osp_update_request_sub *ours; \
int ret; \
\
\
object_update = update_buffer_get_update(ours->ours_req,\
ours->ours_req->ourq_count); \
- ret = out_##name##_pack(env, object_update, \
- &max_update_length, \
- __VA_ARGS__); \
+ ret = out_something_pack(env, object_update, \
+ &max_update_length, \
+ __VA_ARGS__); \
if (ret == -E2BIG) { \
int rc1; \
/* Create new object update request */ \
ret; \
})
-static inline bool osp_send_update_thread_running(struct osp_device *osp)
-{
- return osp->opd_update_thread.t_flags & SVC_RUNNING;
-}
-
-static inline bool osp_send_update_thread_stopped(struct osp_device *osp)
-{
- return osp->opd_update_thread.t_flags & SVC_STOPPED;
-}
-
typedef int (*osp_update_interpreter_t)(const struct lu_env *env,
struct object_update_reply *rep,
struct ptlrpc_request *req,
/* osp_dev.c */
void osp_update_last_id(struct osp_device *d, u64 objid);
-extern struct llog_operations osp_mds_ost_orig_logops;
/* osp_trans.c */
int osp_insert_async_request(const struct lu_env *env, enum update_type op,
osp_update_interpreter_t interpreter);
struct osp_update_request *osp_update_request_create(struct dt_device *dt);
-void osp_update_request_destroy(struct osp_update_request *update);
+void osp_update_request_destroy(const struct lu_env *env,
+ struct osp_update_request *update);
int osp_send_update_thread(void *arg);
int osp_check_and_set_rpc_version(struct osp_thandle *oth,
struct osp_object *obj);
-void osp_thandle_destroy(struct osp_thandle *oth);
+void osp_thandle_destroy(const struct lu_env *env, struct osp_thandle *oth);
static inline void osp_thandle_get(struct osp_thandle *oth)
{
atomic_inc(&oth->ot_refcount);
}
-static inline void osp_thandle_put(struct osp_thandle *oth)
+static inline void osp_thandle_put(const struct lu_env *env,
+ struct osp_thandle *oth)
{
if (atomic_dec_and_test(&oth->ot_refcount))
- osp_thandle_destroy(oth);
+ osp_thandle_destroy(env, oth);
}
int osp_prep_update_req(const struct lu_env *env, struct obd_import *imp,
const char *name, struct thandle *th);
int osp_xattr_del(const struct lu_env *env, struct dt_object *dt,
const char *name, struct thandle *th);
+int osp_invalidate(const struct lu_env *env, struct dt_object *dt);
+bool osp_check_stale(struct dt_object *dt);
+void osp_obj_invalidate_cache(struct osp_object *obj);
int osp_trans_stop(const struct lu_env *env, struct dt_device *dt,
struct thandle *th);
int osp_it_key_rec(const struct lu_env *env, const struct dt_it *di,
void *key_rec);
int osp_it_next_page(const struct lu_env *env, struct dt_it *di);
-int osp_oac_init(struct osp_object *obj);
/* osp_md_object.c */
-int osp_md_declare_object_create(const struct lu_env *env,
- struct dt_object *dt,
- struct lu_attr *attr,
- struct dt_allocation_hint *hint,
- struct dt_object_format *dof,
- struct thandle *th);
-int osp_md_object_create(const struct lu_env *env, struct dt_object *dt,
- struct lu_attr *attr, struct dt_allocation_hint *hint,
- struct dt_object_format *dof, struct thandle *th);
+int osp_md_declare_create(const struct lu_env *env, struct dt_object *dt,
+ struct lu_attr *attr, struct dt_allocation_hint *hint,
+ struct dt_object_format *dof, struct thandle *th);
+int osp_md_create(const struct lu_env *env, struct dt_object *dt,
+ struct lu_attr *attr, struct dt_allocation_hint *hint,
+ struct dt_object_format *dof, struct thandle *th);
int osp_md_declare_attr_set(const struct lu_env *env, struct dt_object *dt,
const struct lu_attr *attr, struct thandle *th);
int osp_md_attr_set(const struct lu_env *env, struct dt_object *dt,
/* osp_precreate.c */
int osp_init_precreate(struct osp_device *d);
-int osp_precreate_reserve(const struct lu_env *env, struct osp_device *d);
+int osp_precreate_reserve(const struct lu_env *env,
+ struct osp_device *d, bool can_block);
__u64 osp_precreate_get_id(struct osp_device *d);
int osp_precreate_get_fid(const struct lu_env *env, struct osp_device *d,
struct lu_fid *fid);
int osp_write_last_oid_seq_files(struct lu_env *env, struct osp_device *osp,
struct lu_fid *fid, int sync);
int osp_init_pre_fid(struct osp_device *osp);
+int osp_init_statfs(struct osp_device *osp);
+void osp_fini_statfs(struct osp_device *osp);
+void osp_statfs_fini(struct osp_device *d);
/* lproc_osp.c */
-void osp_lprocfs_init(struct osp_device *osp);
+void osp_tunables_init(struct osp_device *osp);
+void osp_tunables_fini(struct osp_device *osp);
/* osp_sync.c */
int osp_sync_declare_add(const struct lu_env *env, struct osp_object *o,
- llog_op_type type, struct thandle *th);
+ enum llog_op_type type, struct thandle *th);
int osp_sync_add(const struct lu_env *env, struct osp_object *o,
- llog_op_type type, struct thandle *th,
+ enum llog_op_type type, struct thandle *th,
const struct lu_attr *attr);
int osp_sync_init(const struct lu_env *env, struct osp_device *d);
int osp_sync_fini(struct osp_device *d);
-void __osp_sync_check_for_work(struct osp_device *d);
+void osp_sync_check_for_work(struct osp_device *osp);
+void osp_sync_force(const struct lu_env *env, struct osp_device *d);
+int osp_sync_add_commit_cb_1s(const struct lu_env *env, struct osp_device *d,
+ struct thandle *th);
/* lwp_dev.c */
-extern struct obd_ops lwp_obd_device_ops;
+extern const struct obd_ops lwp_obd_device_ops;
extern struct lu_device_type lwp_device_type;
+static inline struct lu_device *osp2top(const struct osp_device *osp)
+{
+ return osp->opd_dt_dev.dd_lu_dev.ld_site->ls_top_dev;
+}
+
+static inline void osp_set_req_replay(const struct osp_device *osp,
+ struct ptlrpc_request *req)
+{
+ struct obd_device *obd = osp2top(osp)->ld_obd;
+
+ /* The RPC must be recovery related for the cases:
+ *
+ * 1. sent during recovery, or
+ * 2. sent before the recovery thread target_recovery_thread() start,
+ * such as triggered by lod_sub_recovery_thread(). */
+ if (obd->obd_recovering || (obd->obd_replayable && obd->obd_no_conn))
+ req->rq_allow_replay = 1;
+}
+
#endif