eviction after a failed ast to the elt thread instead of handling
it in the dlm interpret routine.
+Severity : normal
+Bugzilla : 16450
+Description: Convert some comments to new format.
+Details : Update documenting comments to match doxygen conventions.
+
--------------------------------------------------------------------------------
2007-08-10 Cluster File Systems, Inc. <info@clusterfs.com>
#ifndef __LUSTRE_DT_OBJECT_H
#define __LUSTRE_DT_OBJECT_H
-/*
+/** \defgroup dt dt
* Sub-class of lu_object with methods common for "data" objects in OST stack.
*
* Data objects behave like regular files: you can read/write them, get and
* (nlink) based one.
*
* Examples: osd (lustre/osd) is an implementation of dt interface.
+ * @{
*/
unsigned ddp_block_shift;
};
-/*
+/**
* Basic transaction credit op
*/
enum dt_txn_op {
DTO_OBJECT_DELETE,
DTO_ATTR_SET,
DTO_XATTR_SET,
- DTO_LOG_REC, /* XXX temporary: dt layer knows nothing about llog. */
+ DTO_LOG_REC, /**< XXX temporary: dt layer knows nothing about llog. */
DTO_WRITE_BASE,
DTO_WRITE_BLOCK,
DTO_NR
};
-/*
+/**
* Operations on dt device.
*/
struct dt_device_operations {
- /*
+ /**
* Return device-wide statistics.
*/
int (*dt_statfs)(const struct lu_env *env,
struct dt_device *dev, struct kstatfs *sfs);
- /*
- * Start transaction, described by @param.
+ /**
+ * Start transaction, described by \a param.
*/
struct thandle *(*dt_trans_start)(const struct lu_env *env,
struct dt_device *dev,
struct txn_param *param);
- /*
+ /**
* Finish previously started transaction.
*/
void (*dt_trans_stop)(const struct lu_env *env,
struct thandle *th);
- /*
+ /**
* Return fid of root index object.
*/
int (*dt_root_get)(const struct lu_env *env,
struct dt_device *dev, struct lu_fid *f);
- /*
+ /**
* Return device configuration data.
*/
void (*dt_conf_get)(const struct lu_env *env,
const struct dt_device *dev,
struct dt_device_param *param);
- /*
+ /**
* handling device state, mostly for tests
*/
int (*dt_sync)(const struct lu_env *env, struct dt_device *dev);
void (*dt_ro)(const struct lu_env *env, struct dt_device *dev);
- /*
+ /**
* Initialize capability context.
*/
int (*dt_init_capa_ctxt)(const struct lu_env *env,
int mode, unsigned long timeout,
__u32 alg, struct lustre_capa_key *keys);
- /*
- * get transaction credits for given @op.
+ /**
+ * get transaction credits for given \a op.
*/
int (*dt_credit_get)(const struct lu_env *env, struct dt_device *dev,
enum dt_txn_op);
};
struct dt_index_features {
- /* required feature flags from enum dt_index_flags */
+ /** required feature flags from enum dt_index_flags */
__u32 dif_flags;
- /* minimal required key size */
+ /** minimal required key size */
size_t dif_keysize_min;
- /* maximal required key size, 0 if no limit */
+ /** maximal required key size, 0 if no limit */
size_t dif_keysize_max;
- /* minimal required record size */
+ /** minimal required record size */
size_t dif_recsize_min;
- /* maximal required record size, 0 if no limit */
+ /** maximal required record size, 0 if no limit */
size_t dif_recsize_max;
};
enum dt_index_flags {
- /* index supports variable sized keys */
+ /** index supports variable sized keys */
DT_IND_VARKEY = 1 << 0,
- /* index supports variable sized records */
+ /** index supports variable sized records */
DT_IND_VARREC = 1 << 1,
- /* index can be modified */
+ /** index can be modified */
DT_IND_UPDATE = 1 << 2,
- /* index supports records with non-unique (duplicate) keys */
+ /** index supports records with non-unique (duplicate) keys */
DT_IND_NONUNQ = 1 << 3
};
-/*
+/**
* Features, required from index to support file system directories (mapping
* names to fids).
*/
extern const struct dt_index_features dt_directory_features;
-/*
+/**
* This is a general purpose dt allocation hint.
* It now contains the parent object.
* It can contain any allocation hint in the future.
* lu_object_operations, but that would break existing symmetry.
*/
- /*
+ /**
* Return standard attributes.
*
* precondition: lu_object_exists(&dt->do_lu);
int (*do_attr_get)(const struct lu_env *env,
struct dt_object *dt, struct lu_attr *attr,
struct lustre_capa *capa);
- /*
+ /**
* Set standard attributes.
*
* precondition: dt_object_exists(dt);
const struct lu_attr *attr,
struct thandle *handle,
struct lustre_capa *capa);
- /*
+ /**
* Return a value of an extended attribute.
*
* precondition: dt_object_exists(dt);
int (*do_xattr_get)(const struct lu_env *env, struct dt_object *dt,
struct lu_buf *buf, const char *name,
struct lustre_capa *capa);
- /*
+ /**
* Set value of an extended attribute.
*
- * @fl - flags from enum lu_xattr_flags
+ * \a fl - flags from enum lu_xattr_flags
*
* precondition: dt_object_exists(dt);
*/
struct dt_object *dt, const struct lu_buf *buf,
const char *name, int fl, struct thandle *handle,
struct lustre_capa *capa);
- /*
+ /**
* Delete existing extended attribute.
*
* precondition: dt_object_exists(dt);
struct dt_object *dt,
const char *name, struct thandle *handle,
struct lustre_capa *capa);
- /*
- * Place list of existing extended attributes into @buf (which has
+ /**
+ * Place list of existing extended attributes into \a buf (which has
* length len).
*
* precondition: dt_object_exists(dt);
int (*do_xattr_list)(const struct lu_env *env,
struct dt_object *dt, struct lu_buf *buf,
struct lustre_capa *capa);
- /*
+ /**
* Init allocation hint using parent object and child mode.
- * (1) The @parent might be NULL if this is a partial creation for
+ * (1) The \a parent might be NULL if this is a partial creation for
* remote object.
- * (2) The type of child is in @child_mode.
- * (3) The result hint is stored in @ah;
+ * (2) The type of child is in \a child_mode.
+ * (3) The result hint is stored in \a ah;
*/
void (*do_ah_init)(const struct lu_env *env,
struct dt_allocation_hint *ah,
struct dt_object *parent,
umode_t child_mode);
- /*
+ /**
* Create new object on this device.
*
* precondition: !dt_object_exists(dt);
struct dt_allocation_hint *hint,
struct thandle *th);
- /*
+ /**
* Announce that this object is going to be used as an index. This
* operation check that object supports indexing operations and
* installs appropriate dt_index_operations vector on success.
int (*do_index_try)(const struct lu_env *env,
struct dt_object *dt,
const struct dt_index_features *feat);
- /*
+ /**
* Add nlink of the object
* precondition: dt_object_exists(dt);
*/
void (*do_ref_add)(const struct lu_env *env,
struct dt_object *dt, struct thandle *th);
- /*
+ /**
* Del nlink of the object
* precondition: dt_object_exists(dt);
*/
int (*do_object_sync)(const struct lu_env *, struct dt_object *);
};
-/*
+/**
* Per-dt-object operations on "file body".
*/
struct dt_body_operations {
- /*
+ /**
* precondition: dt_object_exists(dt);
*/
ssize_t (*dbo_read)(const struct lu_env *env, struct dt_object *dt,
struct lu_buf *buf, loff_t *pos,
struct lustre_capa *capa);
- /*
+ /**
* precondition: dt_object_exists(dt);
*/
ssize_t (*dbo_write)(const struct lu_env *env, struct dt_object *dt,
struct thandle *handle, struct lustre_capa *capa);
};
-/*
+/**
* Incomplete type of index record.
*/
struct dt_rec;
-/*
+/**
* Incomplete type of index key.
*/
struct dt_key;
-/*
+/**
* Incomplete type of dt iterator.
*/
struct dt_it;
-/*
+/**
* Per-dt-object operations on object as index.
*/
struct dt_index_operations {
- /*
+ /**
* precondition: dt_object_exists(dt);
*/
int (*dio_lookup)(const struct lu_env *env, struct dt_object *dt,
struct dt_rec *rec, const struct dt_key *key,
struct lustre_capa *capa);
- /*
+ /**
* precondition: dt_object_exists(dt);
*/
int (*dio_insert)(const struct lu_env *env, struct dt_object *dt,
const struct dt_rec *rec, const struct dt_key *key,
struct thandle *handle, struct lustre_capa *capa);
- /*
+ /**
* precondition: dt_object_exists(dt);
*/
int (*dio_delete)(const struct lu_env *env, struct dt_object *dt,
const struct dt_key *key, struct thandle *handle,
struct lustre_capa *capa);
- /*
+ /**
* Iterator interface
*/
struct dt_it_ops {
- /*
+ /**
* Allocate and initialize new iterator.
*
* precondition: dt_object_exists(dt);
}
struct txn_param {
- /* number of blocks this transaction will modify */
+ /** number of blocks this transaction will modify */
unsigned int tp_credits;
- /* sync transaction is needed */
+ /** sync transaction is needed */
__u32 tp_sync:1;
};
p->tp_credits = credits;
}
-/*
+/**
* This is the general purpose transaction handle.
* 1. Transaction Life Cycle
* This transaction handle is allocated upon starting a new transaction,
* No RPC request should be issued inside transaction.
*/
struct thandle {
- /* the dt device on which the transactions are executed */
+ /** the dt device on which the transactions are executed */
struct dt_device *th_dev;
- /* context for this transaction, tag is LCT_TX_HANDLE */
+ /** context for this transaction, tag is LCT_TX_HANDLE */
struct lu_context th_ctx;
- /* the last operation result in this transaction.
+ /** the last operation result in this transaction.
* this value is used in recovery */
__s32 th_result;
};
-/*
+/**
* Transaction call-backs.
*
* These are invoked by osd (or underlying transaction engine) when
struct dt_device *dt, const char *name,
struct lu_fid *fid);
+/** @} dt */
+
#endif /* __LUSTRE_DT_OBJECT_H */
#include <libcfs/libcfs.h>
-/*
- * Layered objects support for CMD3/C5.
- */
+#include <lu_ref.h>
struct seq_file;
struct proc_dir_entry;
struct lustre_cfg;
struct lprocfs_stats;
-/*
+/** \defgroup lu lu
* lu_* data-types represent server-side entities shared by data and meta-data
* stacks.
*
* Design goals:
*
- * 0. support for layering.
+ * -# support for layering.
*
* Server side object is split into layers, one per device in the
* corresponding device stack. Individual layer is represented by struct
* it is possible that at some layer object "spawns" multiple sub-objects
* on the lower layer.
*
- * 1. fid-based identification.
+ * -# fid-based identification.
*
* Compound object is uniquely identified by its fid. Objects are indexed
* by their fids (hash table is used for index).
*
- * 2. caching and life-cycle management.
+ * -# caching and life-cycle management.
*
* Object's life-time is controlled by reference counting. When reference
* count drops to 0, object is returned to cache. Cached objects still
* can be used to reclaim given number of unused objects from the tail of
* the LRU.
*
- * 3. avoiding recursion.
+ * -# avoiding recursion.
*
* Generic code tries to replace recursion through layers by iterations
* where possible. Additionally to the end of reducing stack consumption,
* data, when practically possible, are allocated through lu_context_key
* interface rather than on stack.
- *
+ * @{
*/
struct lu_site;
struct lu_context;
struct lu_env;
-/*
+/**
* Operations common for data and meta-data devices.
*/
struct lu_device_operations {
- /*
+ /**
+ * Allocate object for the given device (without lower-layer
+ * parts). This is called by lu_object_operations::loo_object_init()
+ * from the parent layer, and should setup at least lu_object::lo_dev
+ * and lu_object::lo_ops fields of resulting lu_object.
+ *
* Object creation protocol.
*
* Due to design goal of avoiding recursion, object creation (see
* lu_object_alloc()) is somewhat involved:
*
- * - first, ->ldo_object_alloc() method of the top-level device
- * in the stack is called. It should allocate top level object
- * (including lu_object_header), but without any lower-layer
- * sub-object(s).
+ * - first, lu_device_operations::ldo_object_alloc() method of the
+ * top-level device in the stack is called. It should allocate top
+ * level object (including lu_object_header), but without any
+ * lower-layer sub-object(s).
*
* - then lu_object_alloc() sets fid in the header of newly created
* object.
*
- * - then ->loo_object_init() (a method from struct
- * lu_object_operations) is called. It has to allocate lower-layer
- * object(s). To do this, ->loo_object_init() calls
- * ldo_object_alloc() of the lower-layer device(s).
+ * - then lu_object_operations::loo_object_init() is called. It has
+ * to allocate lower-layer object(s). To do this,
+ * lu_object_operations::loo_object_init() calls ldo_object_alloc()
+ * of the lower-layer device(s).
*
- * - for all new objects allocated by ->loo_object_init() (and
- * inserted into object stack), ->loo_object_init() is called again
+ * - for all new objects allocated by
+ * lu_object_operations::loo_object_init() (and inserted into object
+ * stack), lu_object_operations::loo_object_init() is called again
* repeatedly, until no new objects are created.
*
- */
-
- /*
- * Allocate object for the given device (without lower-layer
- * parts). This is called by ->loo_object_init() from the parent
- * layer, and should setup at least ->lo_dev and ->lo_ops fields of
- * resulting lu_object.
- *
- * postcondition: ergo(!IS_ERR(result), result->lo_dev == d &&
+ * \post ergo(!IS_ERR(result), result->lo_dev == d &&
* result->lo_ops != NULL);
*/
struct lu_object *(*ldo_object_alloc)(const struct lu_env *env,
const struct lu_object_header *h,
struct lu_device *d);
- /*
- * process config specific for device
+ /**
+ * process config specific for device.
*/
int (*ldo_process_config)(const struct lu_env *env,
struct lu_device *, struct lustre_cfg *);
};
-/*
- * Type of "printer" function used by ->loo_object_print() method.
+/**
+ * Type of "printer" function used by lu_object_operations::loo_object_print()
+ * method.
*
* Printer function is needed to provide some flexibility in (semi-)debugging
* output: possible implementations: printk, CDEBUG, sysfs/seq_file
*/
struct lu_object_operations {
- /*
+ /**
* Allocate lower-layer parts of the object by calling
- * ->ldo_object_alloc() of the corresponding underlying device.
+ * lu_device_operations::ldo_object_alloc() of the corresponding
+ * underlying device.
*
* This method is called once for each object inserted into object
* stack. It's responsibility of this method to insert lower-layer
* object(s) it create into appropriate places of object stack.
*/
int (*loo_object_init)(const struct lu_env *env,
- struct lu_object *o);
- /*
+ struct lu_object *o,
+ const struct lu_object_conf *conf);
+ /**
* Called (in top-to-bottom order) during object allocation after all
* layers were allocated and initialized. Can be used to perform
* initialization depending on lower layers.
*/
int (*loo_object_start)(const struct lu_env *env,
struct lu_object *o);
- /*
- * Called before ->loo_object_free() to signal that object is being
- * destroyed. Dual to ->loo_object_init().
+ /**
+ * Called before lu_object_operations::loo_object_free() to signal
+ * that object is being destroyed. Dual to
+ * lu_object_operations::loo_object_init().
*/
void (*loo_object_delete)(const struct lu_env *env,
struct lu_object *o);
-
- /*
- * Dual to ->ldo_object_alloc(). Called when object is removed from
- * memory.
+ /**
+ * Dual to lu_device_operations::ldo_object_alloc(). Called when
+ * object is removed from memory.
*/
void (*loo_object_free)(const struct lu_env *env,
struct lu_object *o);
-
- /*
+ /**
* Called when last active reference to the object is released (and
* object returns to the cache). This method is optional.
*/
void (*loo_object_release)(const struct lu_env *env,
struct lu_object *o);
- /*
- * Debugging helper. Print given object.
+ /**
+ * Optional debugging helper. Print given object.
*/
int (*loo_object_print)(const struct lu_env *env, void *cookie,
lu_printer_t p, const struct lu_object *o);
- /*
+ /**
* Optional debugging method. Returns true iff method is internally
* consistent.
*/
int (*loo_object_invariant)(const struct lu_object *o);
};
-/*
+/**
* Type of lu_device.
*/
struct lu_device_type;
-/*
+/**
* Device: a layer in the server side abstraction stacking.
*/
struct lu_device {
- /*
+ /**
* reference count. This is incremented, in particular, on each object
* created at this layer.
*
- * XXX which means that atomic_t is probably too small.
+ * \todo XXX which means that atomic_t is probably too small.
*/
atomic_t ld_ref;
- /*
+ /**
* Pointer to device type. Never modified once set.
*/
struct lu_device_type *ld_type;
- /*
+ /**
* Operation vector for this device.
*/
- struct lu_device_operations *ld_ops;
- /*
+ const struct lu_device_operations *ld_ops;
+ /**
* Stack this device belongs to.
*/
struct lu_site *ld_site;
struct proc_dir_entry *ld_proc_entry;
- /* XXX: temporary back pointer into obd. */
+ /** \todo XXX: temporary back pointer into obd. */
struct obd_device *ld_obd;
+ /**
+ * A list of references to this object, for debugging.
+ */
+ struct lu_ref ld_reference;
};
struct lu_device_type_operations;
-/*
+/**
* Tag bits for device type. They are used to distinguish certain groups of
* device types.
*/
enum lu_device_tag {
- /* this is meta-data device */
+ /** this is meta-data device */
LU_DEVICE_MD = (1 << 0),
- /* this is data device */
- LU_DEVICE_DT = (1 << 1)
+ /** this is data device */
+ LU_DEVICE_DT = (1 << 1),
+ /** data device in the client stack */
+ LU_DEVICE_CL = (1 << 2)
};
-/*
+/**
* Type of device.
*/
struct lu_device_type {
- /*
+ /**
* Tag bits. Taken from enum lu_device_tag. Never modified once set.
*/
__u32 ldt_tags;
- /*
+ /**
* Name of this class. Unique system-wide. Never modified once set.
*/
char *ldt_name;
- /*
+ /**
* Operations for this type.
*/
- struct lu_device_type_operations *ldt_ops;
- /*
- * XXX: temporary pointer to associated obd_type.
+ const struct lu_device_type_operations *ldt_ops;
+ /**
+ * \todo XXX: temporary pointer to associated obd_type.
*/
struct obd_type *ldt_obd_type;
- /*
- * XXX: temporary: context tags used by obd_*() calls.
+ /**
+ * \todo XXX: temporary: context tags used by obd_*() calls.
*/
__u32 ldt_ctx_tags;
+ /**
+ * Number of existing device type instances.
+ */
+ unsigned ldt_device_nr;
+ /**
+ * Linkage into a global list of all device types.
+ *
+ * \see lu_device_types.
+ */
+ struct list_head ldt_linkage;
};
-/*
+/**
* Operations on a device type.
*/
struct lu_device_type_operations {
- /*
+ /**
* Allocate new device.
*/
struct lu_device *(*ldto_device_alloc)(const struct lu_env *env,
struct lu_device_type *t,
struct lustre_cfg *lcfg);
- /*
- * Free device. Dual to ->ldto_device_alloc(). Returns pointer to
+ /**
+ * Free device. Dual to
+ * lu_device_type_operations::ldto_device_alloc(). Returns pointer to
* the next device in the stack.
*/
struct lu_device *(*ldto_device_free)(const struct lu_env *,
struct lu_device *);
- /*
+ /**
* Initialize the devices after allocation
*/
int (*ldto_device_init)(const struct lu_env *env,
struct lu_device *, const char *,
struct lu_device *);
- /*
- * Finalize device. Dual to ->ldto_device_init(). Returns pointer to
+ /**
+ * Finalize device. Dual to
+ * lu_device_type_operations::ldto_device_init(). Returns pointer to
* the next device in the stack.
*/
struct lu_device *(*ldto_device_fini)(const struct lu_env *env,
struct lu_device *);
-
- /*
+ /**
* Initialize device type. This is called on module load.
*/
int (*ldto_init)(struct lu_device_type *t);
- /*
- * Finalize device type. Dual to ->ldto_init(). Called on module
- * unload.
+ /**
+ * Finalize device type. Dual to
+ * lu_device_type_operations::ldto_init(). Called on module unload.
*/
void (*ldto_fini)(struct lu_device_type *t);
+ /**
+ * Called when the first device is created.
+ */
+ void (*ldto_start)(struct lu_device_type *t);
+ /**
+ * Called when number of devices drops to 0.
+ */
+ void (*ldto_stop)(struct lu_device_type *t);
};
-/*
+/**
* Flags for the object layers.
*/
enum lu_object_flags {
- /*
- * this flags is set if ->loo_object_init() has been called for this
- * layer. Used by lu_object_alloc().
+ /**
+ * this flags is set if lu_object_operations::loo_object_init() has
+ * been called for this layer. Used by lu_object_alloc().
*/
LU_OBJECT_ALLOCATED = (1 << 0)
};
-/*
+/**
* Common object attributes.
*/
-/* valid flags */
+struct lu_attr {
+ /** size in bytes */
+ __u64 la_size;
+ /** modification time in seconds since Epoch */
+ __u64 la_mtime;
+ /** access time in seconds since Epoch */
+ __u64 la_atime;
+ /** change time in seconds since Epoch */
+ __u64 la_ctime;
+ /** 512-byte blocks allocated to object */
+ __u64 la_blocks;
+ /** permission bits and file type */
+ __u32 la_mode;
+ /** owner id */
+ __u32 la_uid;
+ /** group id */
+ __u32 la_gid;
+ /** object flags */
+ __u32 la_flags;
+ /** number of persistent references to this object */
+ __u32 la_nlink;
+ /** blk bits of the object*/
+ __u32 la_blkbits;
+ /** blk size of the object*/
+ __u32 la_blksize;
+ /** real device */
+ __u32 la_rdev;
+ /**
+ * valid bits
+ *
+ * \see enum la_valid
+ */
+ __u64 la_valid;
+};
+
+/** Bit-mask of valid attributes */
enum la_valid {
LA_ATIME = 1 << 0,
LA_MTIME = 1 << 1,
LA_BLKSIZE = 1 << 12,
};
-struct lu_attr {
- __u64 la_size; /* size in bytes */
- __u64 la_mtime; /* modification time in seconds since Epoch */
- __u64 la_atime; /* access time in seconds since Epoch */
- __u64 la_ctime; /* change time in seconds since Epoch */
- __u64 la_blocks; /* 512-byte blocks allocated to object */
- __u32 la_mode; /* permission bits and file type */
- __u32 la_uid; /* owner id */
- __u32 la_gid; /* group id */
- __u32 la_flags; /* object flags */
- __u32 la_nlink; /* number of persistent references to this
- * object */
- __u32 la_blkbits; /* blk bits of the object*/
- __u32 la_blksize; /* blk size of the object*/
-
- __u32 la_rdev; /* real device */
- __u64 la_valid; /* valid bits */
-};
-
/*
* Layer in the layered object.
*/
* Note, that object does *not* necessary correspond to the real object in the
* persistent storage: object is an anchor for locking and method calling, so
* it is created for things like not-yet-existing child created by mkdir or
- * create calls. ->loo_exists() can be used to check whether object is backed
- * by persistent storage entity.
+ * create calls. lu_object_operations::loo_exists() can be used to check
+ * whether object is backed by persistent storage entity.
*/
struct lu_object_header {
- /*
+ /**
* Object flags from enum lu_object_header_flags. Set and checked
* atomically.
*/
unsigned long loh_flags;
- /*
- * Object reference count. Protected by site guard lock.
+ /**
+ * Object reference count. Protected by lu_site::ls_guard.
*/
atomic_t loh_ref;
- /*
+ /**
* Fid, uniquely identifying this object.
*/
struct lu_fid loh_fid;
- /*
+ /**
* Common object attributes, cached for efficiency. From enum
* lu_object_header_attr.
*/
__u32 loh_attr;
- /*
- * Linkage into per-site hash table. Protected by site guard lock.
+ /**
+ * Linkage into per-site hash table. Protected by lu_site::ls_guard.
*/
struct hlist_node loh_hash;
- /*
- * Linkage into per-site LRU list. Protected by site guard lock.
+ /**
+ * Linkage into per-site LRU list. Protected by lu_site::ls_guard.
*/
struct list_head loh_lru;
- /*
+ /**
* Linkage into list of layers. Never modified once set (except lately
* during object destruction). No locking is necessary.
*/
struct list_head loh_layers;
+ /**
+ * A list of references to this object, for debugging.
+ */
+ struct lu_ref loh_reference;
};
struct fld;
-/*
+/**
* lu_site is a "compartment" within which objects are unique, and LRU
* discipline is maintained.
*
* lu_object.
*/
struct lu_site {
- /*
+ /**
+ * Site-wide lock.
+ *
* lock protecting:
*
- * - ->ls_hash hash table (and its linkages in objects);
+ * - lu_site::ls_hash hash table (and its linkages in objects);
*
- * - ->ls_lru list (and its linkages in objects);
+ * - lu_site::ls_lru list (and its linkages in objects);
*
- * - 0/1 transitions of object ->loh_ref reference count;
+ * - 0/1 transitions of object lu_object_header::loh_ref
+ * reference count;
*
* yes, it's heavy.
*/
rwlock_t ls_guard;
- /*
+ /**
* Hash-table where objects are indexed by fid.
*/
struct hlist_head *ls_hash;
- /*
+ /**
* Bit-mask for hash-table size.
*/
int ls_hash_mask;
- /*
+ /**
* Order of hash-table.
*/
int ls_hash_bits;
- /*
+ /**
* Number of buckets in the hash-table.
*/
int ls_hash_size;
- /*
+ /**
* LRU list, updated on each access to object. Protected by
- * ->ls_guard.
+ * lu_site::ls_guard.
*
- * "Cold" end of LRU is ->ls_lru.next. Accessed object are moved to
- * the ->ls_lru.prev (this is due to the non-existence of
- * list_for_each_entry_safe_reverse()).
+ * "Cold" end of LRU is lu_site::ls_lru.next. Accessed object are
+ * moved to the lu_site::ls_lru.prev (this is due to the non-existence
+ * of list_for_each_entry_safe_reverse()).
*/
struct list_head ls_lru;
- /*
- * Total number of objects in this site. Protected by ->ls_guard.
+ /**
+ * Total number of objects in this site. Protected by
+ * lu_site::ls_guard.
*/
unsigned ls_total;
- /*
+ /**
* Total number of objects in this site with reference counter greater
- * than 0. Protected by ->ls_guard.
+ * than 0. Protected by lu_site::ls_guard.
*/
unsigned ls_busy;
- /*
+ /**
* Top-level device for this stack.
*/
struct lu_device *ls_top_dev;
* lookup.
*/
__u32 s_cache_check;
- /* raced cache insertions */
+ /** Races with cache insertions. */
__u32 s_cache_race;
+ /**
+ * Races with object destruction.
+ *
+ * \see lu_site::ls_marche_funebre.
+ */
+ __u32 s_cache_death_race;
__u32 s_lru_purged;
} ls_stats;
- /*
+ /**
* Linkage into global list of sites.
*/
struct list_head ls_linkage;
struct lprocfs_stats *ls_time_stats;
};
-/*
+/** \name ctors
* Constructors/destructors.
+ * @{
*/
-/*
- * Initialize site @s, with @d as the top level device.
- */
-int lu_site_init(struct lu_site *s, struct lu_device *d);
-/*
- * Finalize @s and release its resources.
- */
-void lu_site_fini(struct lu_site *s);
-
-/*
- * Called when initialization of stack for this site is completed.
- */
-int lu_site_init_finish(struct lu_site *s);
-
-/*
- * Acquire additional reference on device @d
- */
-void lu_device_get(struct lu_device *d);
-/*
- * Release reference on device @d.
- */
-void lu_device_put(struct lu_device *d);
-
-/*
- * Initialize device @d of type @t.
- */
-int lu_device_init(struct lu_device *d, struct lu_device_type *t);
-/*
- * Finalize device @d.
- */
-void lu_device_fini(struct lu_device *d);
-
-/*
- * Initialize compound object.
- */
+int lu_site_init (struct lu_site *s, struct lu_device *d);
+void lu_site_fini (struct lu_site *s);
+int lu_site_init_finish (struct lu_site *s);
+void lu_stack_fini (const struct lu_env *env, struct lu_device *top);
+void lu_device_get (struct lu_device *d);
+void lu_device_put (struct lu_device *d);
+int lu_device_init (struct lu_device *d, struct lu_device_type *t);
+void lu_device_fini (struct lu_device *d);
int lu_object_header_init(struct lu_object_header *h);
-/*
- * Finalize compound object.
- */
void lu_object_header_fini(struct lu_object_header *h);
-
-/*
- * Initialize object @o that is part of compound object @h and was created by
- * device @d.
- */
-int lu_object_init(struct lu_object *o,
+int lu_object_init (struct lu_object *o,
struct lu_object_header *h, struct lu_device *d);
-/*
- * Finalize object and release its resources.
- */
-void lu_object_fini(struct lu_object *o);
-/*
- * Add object @o as first layer of compound object @h.
- *
- * This is typically called by the ->ldo_object_alloc() method of top-level
- * device.
- */
-void lu_object_add_top(struct lu_object_header *h, struct lu_object *o);
-/*
- * Add object @o as a layer of compound object, going after @before.1
- *
- * This is typically called by the ->ldo_object_alloc() method of
- * @before->lo_dev.
+void lu_object_fini (struct lu_object *o);
+void lu_object_add_top (struct lu_object_header *h, struct lu_object *o);
+void lu_object_add (struct lu_object *before, struct lu_object *o);
+
+/**
+ * Helpers to initialize and finalize device types.
*/
-void lu_object_add(struct lu_object *before, struct lu_object *o);
-/*
+int lu_device_type_init(struct lu_device_type *ldt);
+void lu_device_type_fini(struct lu_device_type *ldt);
+void lu_types_stop(void);
+
+/** @} ctors */
+
+/** \name caching
* Caching and reference counting.
+ * @{
*/
-/*
+/**
* Acquire additional reference to the given object. This function is used to
* attain additional reference. To acquire initial reference use
* lu_object_find().
atomic_inc(&o->lo_header->loh_ref);
}
-/*
+/**
* Return true of object will not be cached after last reference to it is
* released.
*/
return container_of0(h->loh_layers.next, struct lu_object, lo_linkage);
}
-/*
+/**
* Next sub-object in the layering
*/
static inline struct lu_object *lu_object_next(const struct lu_object *o)
return container_of0(o->lo_linkage.next, struct lu_object, lo_linkage);
}
-/*
+/**
* Pointer to the fid of this object.
*/
static inline const struct lu_fid *lu_object_fid(const struct lu_object *o)
return lu_object_exists(o) <= 0;
}
-/*
+/**
* Attr of this object.
*/
static inline __u32 lu_object_attr(const struct lu_object *o)
LU_XATTR_CREATE = (1 << 1)
};
-/* For lu_context health-checks */
+/** @} helpers */
+
+/** \name lu_context
+ * @{ */
+
+/** For lu_context health-checks */
enum lu_context_state {
LCS_INITIALIZED = 1,
LCS_ENTERED,
LCS_FINALIZED
};
-/*
+/**
* lu_context. Execution context for lu_object methods. Currently associated
* with thread.
*
int ln_namelen;
};
-/*
+/**
* Common buffer structure to be passed around for various xattr_{s,g}et()
* methods.
*/
ssize_t lb_len;
};
-extern struct lu_buf LU_BUF_NULL; /* null buffer */
+/** null buffer */
+extern struct lu_buf LU_BUF_NULL;
#define DLUBUF "(%p %z)"
#define PLUBUF(buf) (buf)->lb_buf, (buf)->lb_len
-/*
+/**
* One-time initializers, called at obdclass module initialization, not
* exported.
*/
-/*
+/**
* Initialization of global lu_* data.
*/
int lu_global_init(void);
-/*
+/**
* Dual to lu_global_init().
*/
void lu_global_fini(void);
* lustre/include/lustre/lustre_idl.h
*
* Lustre wire protocol definitions.
+ */
+
+/** \defgroup lustreidl lustreidl
*
+ * Lustre wire protocol definitions.
*
* We assume all nodes are either little-endian or big-endian, and we
* always send messages in the sender's native format. The receiver
* For variable length types, a second 'lustre_swab_v_xxxtypexxx()' routine
* may be defined that swabs just the variable part, after the caller has
* verified that the message buffer is large enough.
+ *
+ * @{
*/
#ifndef _LUSTRE_IDL_H_
(range)->lr_start, \
(range)->lr_end
+/** \defgroup lu_fid lu_fid
+ * @{ */
+
+/**
+ * File identifier.
+ *
+ * Fid is a cluster-wide unique identifier of a file or an object
+ * (stripe). Fids are never reused. Fids are transmitted across network (in
+ * the sender byte-ordering), and stored on disk in a packed form (struct
+ * lu_fid_pack) in a big-endian order.
+ */
struct lu_fid {
- __u64 f_seq; /* holds fid sequence. Lustre should support 2 ^ 64
- * objects, thus even if one sequence has one object we
- * will never reach this value. */
- __u32 f_oid; /* fid number within its sequence. */
- __u32 f_ver; /* holds fid version. */
+ /**
+ * fid sequence. Sequence is a unit of migration: all files (objects)
+ * with fids from a given sequence are stored on the same
+ * server.
+ *
+ * Lustre should support 2 ^ 64 objects, thus even if one
+ * sequence has one object we will never reach this value.
+ */
+ __u64 f_seq;
+ /** fid number within sequence. */
+ __u32 f_oid;
+ /**
+ * fid version, used to distinguish different versions (in the sense
+ * of snapshots, etc.) of the same file system object. Not currently
+ * used.
+ */
+ __u32 f_ver;
};
-/*
+/**
* fid constants
*/
enum {
LUSTRE_FID_INIT_OID = 1UL
};
-/* get object sequence */
+/** returns fid object sequence */
static inline __u64 fid_seq(const struct lu_fid *fid)
{
return fid->f_seq;
}
-/* get object id */
+/** returns fid object id */
static inline __u32 fid_oid(const struct lu_fid *fid)
{
return fid->f_oid;
}
-/* get object version */
+/** returns fid object version */
static inline __u32 fid_ver(const struct lu_fid *fid)
{
return fid->f_ver;
}
#ifdef __KERNEL__
-/*
+
+/**
* Storage representation for fids.
*
* Variable size, first byte contains the length of the whole record.
return memcmp(f0, f1, sizeof *f0) == 0;
}
-/*
+#define __diff_normalize(val0, val1) \
+({ \
+ typeof(val0) __val0 = (val0); \
+ typeof(val1) __val1 = (val1); \
+ \
+ (__val0 == __val1 ? 0 : __val0 > __val1 ? +1 : -1); \
+})
+
+static inline int lu_fid_cmp(const struct lu_fid *f0,
+ const struct lu_fid *f1)
+{
+ return
+ __diff_normalize(fid_seq(f0), fid_seq(f1)) ?:
+ __diff_normalize(fid_oid(f0), fid_oid(f1)) ?:
+ __diff_normalize(fid_ver(f0), fid_ver(f1));
+}
+
+/** @} lu_fid */
+
+/** \defgroup lu_dir lu_dir
+ * @{ */
+/**
* Layout of readdir pages, as transmitted on wire.
*/
struct lu_dirent {
#define DIR_END_OFF 0xfffffffffffffffeULL
+/** @} lu_dir */
+
struct lustre_handle {
__u64 cookie;
};
#define OBD_FL_NO_USRQUOTA (0x00000100) /* the object's owner is over quota */
#define OBD_FL_NO_GRPQUOTA (0x00000200) /* the object's group is over quota */
-/*
+/**
* Set this to delegate DLM locking during obd_punch() to the OSTs. Only OSTs
* that declared OBD_CONNECT_TRUNCLOCK in their connect flags support this
* functionality.
/* catalog of log objects */
-/* Identifier for a single log object */
+/** Identifier for a single log object */
struct llog_logid {
__u64 lgl_oid;
__u64 lgl_ogr;
__u32 lgl_ogen;
} __attribute__((packed));
-/* Records written to the CATALOGS list */
+/** Records written to the CATALOGS list */
#define CATLIST "CATALOGS"
struct llog_catid {
struct llog_logid lci_logid;
__u32 lci_padding3;
} __attribute__((packed));
-/*join file lov mds md*/
+/** join file lov mds md*/
struct lov_mds_md_join {
struct lov_mds_md lmmj_md;
/*join private info*/
__swab32(LLOG_OP_MAGIC) || \
(((r)->lrh_type == 0) && ((r)->lrh_len > LLOG_CHUNK_SIZE)))
-/* Log record header - stored in little endian order.
+/** Log record header - stored in little endian order.
* Each record must start with this struct, end with a llog_rec_tail,
* and be a multiple of 256 bits in size.
*/
struct llog_rec_tail lid_tail;
} __attribute__((packed));
-/* MDS extent description
+/** MDS extent description
* It is for joined file extent info, each extent info for joined file
* just like (start, end, lmm).
*/
__u64 med_len; /* extent length */
struct lov_mds_md med_lmm; /* extent's lmm */
};
-/*Joined file array extent log record*/
+
+/** Joined file array extent log record*/
struct llog_array_rec {
struct llog_rec_hdr lmr_hdr;
struct mds_extent_desc lmr_med;
llh->llh_bitmap_offset - \
sizeof(llh->llh_tail)) * 8)
-/* log cookies are used to reference a specific log file and a record therein */
+/** log cookies are used to reference a specific log file and a record therein */
struct llog_cookie {
struct llog_logid lgc_lgl;
__u32 lgc_subsys;
__u32 lgc_padding;
} __attribute__((packed));
-/* llog protocol */
+/** llog protocol */
enum llogd_rpc_ops {
LLOG_ORIGIN_HANDLE_CREATE = 501,
LLOG_ORIGIN_HANDLE_NEXT_BLOCK = 502,
#define JOIN_FILE_ALIGN 4096
-/* security opcodes */
+/** security opcodes */
typedef enum {
SEC_CTX_INIT = 801,
SEC_CTX_INIT_CONT = 802,
extern void lustre_swab_lustre_capa(struct lustre_capa *c);
-/* lustre_capa.lc_opc */
+/** lustre_capa::lc_opc */
enum {
- CAPA_OPC_BODY_WRITE = 1<<0, /* write object data */
- CAPA_OPC_BODY_READ = 1<<1, /* read object data */
- CAPA_OPC_INDEX_LOOKUP = 1<<2, /* lookup object fid */
- CAPA_OPC_INDEX_INSERT = 1<<3, /* insert object fid */
- CAPA_OPC_INDEX_DELETE = 1<<4, /* delete object fid */
- CAPA_OPC_OSS_WRITE = 1<<5, /* write oss object data */
- CAPA_OPC_OSS_READ = 1<<6, /* read oss object data */
- CAPA_OPC_OSS_TRUNC = 1<<7, /* truncate oss object */
- CAPA_OPC_META_WRITE = 1<<8, /* write object meta data */
- CAPA_OPC_META_READ = 1<<9, /* read object meta data */
+ CAPA_OPC_BODY_WRITE = 1<<0, /**< write object data */
+ CAPA_OPC_BODY_READ = 1<<1, /**< read object data */
+ CAPA_OPC_INDEX_LOOKUP = 1<<2, /**< lookup object fid */
+ CAPA_OPC_INDEX_INSERT = 1<<3, /**< insert object fid */
+ CAPA_OPC_INDEX_DELETE = 1<<4, /**< delete object fid */
+ CAPA_OPC_OSS_WRITE = 1<<5, /**< write oss object data */
+ CAPA_OPC_OSS_READ = 1<<6, /**< read oss object data */
+ CAPA_OPC_OSS_TRUNC = 1<<7, /**< truncate oss object */
+ CAPA_OPC_META_WRITE = 1<<8, /**< write object meta data */
+ CAPA_OPC_META_READ = 1<<9, /**< read object meta data */
};
return (c->lc_opc & CAPA_OPC_INDEX_LOOKUP) == 0;
}
-/* lustre_capa.lc_hmac_alg */
+/* lustre_capa::lc_hmac_alg */
enum {
- CAPA_HMAC_ALG_SHA1 = 1, /* sha1 algorithm */
+ CAPA_HMAC_ALG_SHA1 = 1, /**< sha1 algorithm */
CAPA_HMAC_ALG_MAX,
};
#define CAPA_HMAC_ALG_MASK 0xff000000
struct lustre_capa_key {
- __u64 lk_mdsid; /* mds# */
- __u32 lk_keyid; /* key# */
+ __u64 lk_mdsid; /**< mds# */
+ __u32 lk_keyid; /**< key# */
__u32 lk_padding;
- __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /* key */
+ __u8 lk_key[CAPA_HMAC_KEY_MAX_LEN]; /**< key */
} __attribute__((packed));
extern void lustre_swab_lustre_capa_key(struct lustre_capa_key *k);
/* quota check function */
-#define QUOTA_RET_OK 0 /* return successfully */
-#define QUOTA_RET_NOQUOTA 1 /* not support quota */
-#define QUOTA_RET_NOLIMIT 2 /* quota limit isn't set */
-#define QUOTA_RET_ACQUOTA 3 /* need to acquire extra quota */
+#define QUOTA_RET_OK 0 /**< return successfully */
+#define QUOTA_RET_NOQUOTA 1 /**< not support quota */
+#define QUOTA_RET_NOLIMIT 2 /**< quota limit isn't set */
+#define QUOTA_RET_ACQUOTA 3 /**< need to acquire extra quota */
#endif
+
+/** @} lustreidl */
#define LDLM_FL_BLOCK_WAIT 0x000008
#define LDLM_FL_CBPENDING 0x000010 /* this lock is being destroyed */
-#define LDLM_FL_AST_SENT 0x000020 /* blocking or cancel packet was sent */
+#define LDLM_FL_AST_SENT 0x000020 /* blocking or cancel packet was
+ * queued for sending. */
#define LDLM_FL_WAIT_NOREPROC 0x000040 /* not a real flag, not saved in lock */
#define LDLM_FL_CANCEL 0x000080 /* cancellation callback already run */
* list. */
#define LDLM_FL_KMS_IGNORE 0x200000
-/* Don't drop lock covering mmapped file in LRU */
-#define LDLM_FL_NO_LRU 0x400000
-
/* Immediatelly cancel such locks when they block some other locks. Send
- cancel notification to original lock holder, but expect no reply. */
+ * cancel notification to original lock holder, but expect no reply. This is
+ * for clients (like liblustre) that cannot be expected to reliably response
+ * to blocking ast. */
#define LDLM_FL_CANCEL_ON_BLOCK 0x800000
/* Flags flags inherited from parent lock when doing intents. */
__u32 l_flags;
__u32 l_readers;
__u32 l_writers;
+ /*
+ * Set for locks that were removed from class hash table and will be
+ * destroyed when last reference to them is released. Set by
+ * ldlm_lock_destroy_internal().
+ *
+ * Protected by lock and resource locks.
+ */
__u8 l_destroyed;
/**
__FILE__, __FUNCTION__, __LINE__, \
"### " fmt , ##a); \
} while (0)
-#else
-#define LDLM_DEBUG(lock, fmt, a...) ((void)0)
-#define LDLM_ERROR(lock, fmt, a...) ((void)0)
+#else /* !LIBCFS_DEBUG */
+# define LDLM_DEBUG(lock, fmt, a...) ((void)0)
+# define LDLM_ERROR(lock, fmt, a...) ((void)0)
+# define ldlm_lock_debuf(cdls, level, lock, file, func, line, fmt, a...) \
+ ((void)0)
#endif
#define LDLM_DEBUG_NOLOCK(format, a...) \
spinlock_t lco_lock;
};
-/*
+/**
* This function is used as an upcall-callback hooked by liblustre and llite
* clients into obd_notify() listeners chain to handle notifications about
* change of import connect_flags. See llu_fsswop_mount() and
struct lu_context;
struct lu_env;
+/**
+ * Represents remote procedure call.
+ */
struct ptlrpc_request {
int rq_type; /* one of PTL_RPC_MSG_* */
struct list_head rq_list;
/* Multi-rpc bits */
struct list_head rq_set_chain;
struct ptlrpc_request_set *rq_set;
- void *rq_interpret_reply; /* Async completion handler */
+ /** Async completion handler */
+ ptlrpc_interpterer_t rq_interpret_reply;
union ptlrpc_async_args rq_async_args; /* Async completion context */
struct ptlrpc_request_pool *rq_pool; /* Pool if request from
preallocated list */
#ifndef _LUSTRE_MD_OBJECT_H
#define _LUSTRE_MD_OBJECT_H
-/*
+/** \defgroup md md
* Sub-class of lu_object with methods common for "meta-data" objects in MDT
* stack.
*
* them, and treat them as directories.
*
* Examples: mdt, cmm, and mdt are implementations of md interface.
+ * @{
*/
#define MD_CAPAINFO_MAX 5
-/* there are at most 5 fids in one operation, see rename, NOTE the last one
+/** there are at most 5 fids in one operation, see rename, NOTE the last one
* is a temporary one used for is_subdir() */
struct md_capainfo {
const struct lu_fid *mc_fid[MD_CAPAINFO_MAX];
struct lustre_capa *mc_capa[MD_CAPAINFO_MAX];
};
-/*
+/**
* Implemented in mdd/mdd_handler.c.
*
* XXX should be moved into separate .h/.c together with all md security
struct md_ucred *md_ucred(const struct lu_env *env);
struct md_capainfo *md_capainfo(const struct lu_env *env);
-/* metadata attributes */
+/** metadata attributes */
enum ma_valid {
MA_INODE = (1 << 0),
MA_LOV = (1 << 1),
struct lustre_capa *ma_capa;
};
-/* Additional parameters for create */
+/** Additional parameters for create */
struct md_op_spec {
union {
- /* symlink target */
+ /** symlink target */
const char *sp_symname;
- /* parent FID for cross-ref mkdir */
+ /** parent FID for cross-ref mkdir */
const struct lu_fid *sp_pfid;
- /* eadata for regular files */
+ /** eadata for regular files */
struct md_spec_reg {
- /* lov objs exist already */
+ /** lov objs exist already */
const struct lu_fid *fid;
int no_lov_create;
const void *eadata;
} sp_ea;
} u;
- /* Create flag from client: such as MDS_OPEN_CREAT, and others. */
+ /** Create flag from client: such as MDS_OPEN_CREAT, and others. */
__u32 sp_cr_flags;
- /* Should mdd do lookup sanity check or not. */
+ /** Should mdd do lookup sanity check or not. */
int sp_cr_lookup;
- /* Current lock mode for parent dir where create is performing. */
+ /** Current lock mode for parent dir where create is performing. */
mdl_mode_t sp_cr_mode;
- /* Check for split */
+ /** Check for split */
int sp_ck_split;
};
-/*
+/**
* Operations implemented for each md object (both directory and leaf).
*/
struct md_object_operations {
int (*moo_readlink)(const struct lu_env *env, struct md_object *obj,
struct lu_buf *buf);
- /* part of cross-ref operation */
+ /** part of cross-ref operation */
int (*moo_object_create)(const struct lu_env *env,
struct md_object *obj,
const struct md_op_spec *spec,
int (*moo_object_sync)(const struct lu_env *, struct md_object *);
};
-/*
+/**
* Operations implemented for each directory object.
*/
struct md_dir_operations {
struct md_op_spec *spec,
struct md_attr *ma);
- /* This method is used for creating data object for this meta object*/
+ /** This method is used for creating data object for this meta object*/
int (*mdo_create_data)(const struct lu_env *env, struct md_object *p,
struct md_object *o,
const struct md_op_spec *spec,
struct md_object *cobj, const struct lu_name *lname,
struct md_attr *ma);
- /* partial ops for cross-ref case */
+ /** partial ops for cross-ref case */
int (*mdo_name_insert)(const struct lu_env *env,
struct md_object *obj,
const struct lu_name *lname,
};
struct md_device_operations {
- /* meta-data device related handlers. */
+ /** meta-data device related handlers. */
int (*mdo_root_get)(const struct lu_env *env, struct md_device *m,
struct lu_fid *f);
};
enum md_upcall_event {
- /*sync the md layer*/
+ /**sync the md layer*/
MD_LOV_SYNC = (1 << 0),
- MD_NO_TRANS = (1 << 1), /* Just for split, no need trans, for replay */
+ /** Just for split, no need trans, for replay */
+ MD_NO_TRANS = (1 << 1),
MD_LOV_CONFIG = (1 << 2)
};
struct md_upcall {
- /* this lock protects upcall using against its removal
+ /** this lock protects upcall using against its removal
* read lock is for usage the upcall, write - for init/fini */
struct rw_semaphore mu_upcall_sem;
- /* device to call, upper layer normally */
+ /** device to call, upper layer normally */
struct md_device *mu_upcall_dev;
- /* upcall function */
+ /** upcall function */
int (*mu_upcall)(const struct lu_env *env, struct md_device *md,
enum md_upcall_event ev);
};
}
}
+/** @} md */
+
#endif /* _LINUX_MD_OBJECT_H */
int mdc_getstatus(struct obd_export *exp, struct lu_fid *rootfid,
struct obd_capa **pc)
{
- return send_getstatus(class_exp2cliimp(exp), rootfid, pc,
+ return send_getstatus(class_exp2cliimp(exp), rootfid, pc,
LUSTRE_IMP_FULL, 0);
}
sizeof(struct mdt_rec_reint));
rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
rec->sx_opcode = REINT_SETXATTR;
- /* TODO:
- * cfs_curproc_fs{u,g}id() should replace
+ /* TODO:
+ * cfs_curproc_fs{u,g}id() should replace
* current->fs{u,g}id for portability.
*/
rec->sx_fsuid = current->fsuid;
const char *input, int input_size, int output_size,
int flags, __u32 suppgid, struct ptlrpc_request **request)
{
- return mdc_xattr_common(exp, &RQF_MDS_REINT_SETXATTR,
+ return mdc_xattr_common(exp, &RQF_MDS_REINT_SETXATTR,
fid, oc, MDS_REINT, valid, xattr_name,
input, input_size, output_size, flags,
suppgid, request);
const char *input, int input_size, int output_size,
int flags, struct ptlrpc_request **request)
{
- return mdc_xattr_common(exp, &RQF_MDS_GETXATTR,
+ return mdc_xattr_common(exp, &RQF_MDS_GETXATTR,
fid, oc, MDS_GETXATTR, valid, xattr_name,
input, input_size, output_size, flags,
-1, request);
}
else if (md->body->valid & OBD_MD_FLACL) {
/* for ACL, it's possible that FLACL is set but aclsize is zero.
- * only when aclsize != 0 there's an actual segment for ACL
- * in reply buffer.
+ * only when aclsize != 0 there's an actual segment for ACL
+ * in reply buffer.
*/
if (md->body->aclsize) {
rc = mdc_unpack_acl(req, md);
opc == MDS_CLOSE ? "CLOSE" : "DONE_WRITING");
} else if (opc == MDS_REINT) {
struct mdt_rec_setattr *rec;
-
+
/* Check this is REINT_SETATTR. */
rec = req_capsule_client_get(&cur->rq_pill,
&RMF_REC_REINT);
{
struct md_open_data *mod = req->rq_cb_data;
struct ptlrpc_request *cur, *tmp;
-
+
DEBUG_REQ(D_HA, req, "req committed");
if (mod == NULL)
if (rc > 0)
rc = -rc;
} else if (mod == NULL) {
- if (req->rq_import->imp_replayable)
- CERROR("Unexpected: can't find md_open_data,"
+ if (req->rq_import->imp_replayable)
+ CERROR("Unexpected: can't find md_open_data,"
"but close succeeded with replayable imp"
"Please tell "
"http://bugzilla.lustre.org/\n");
* attribute update is needed. */
if (rc != -EAGAIN)
ptlrpc_close_replay_seq(req);
-
+
if (rc && rc != -EAGAIN && req->rq_commit_cb)
req->rq_commit_cb(req);
}
if (KEY_IS(KEY_MDS_CONN)) {
struct obd_import *imp = class_exp2cliimp(exp);
-
+
/* mds-mds import */
spin_lock(&imp->imp_lock);
imp->imp_server_timeout = 1;
*data = imp->imp_connect_data;
RETURN(0);
}
-
+
RETURN(rc);
}
int rc;
ENTRY;
-
- /*Since the request might also come from lprocfs, so we need
+
+ /*Since the request might also come from lprocfs, so we need
*sync this with client_disconnect_export Bug15684*/
down_read(&obd->u.cli.cl_sem);
if (obd->u.cli.cl_import)
up_read(&obd->u.cli.cl_sem);
if (!imp)
RETURN(-ENODEV);
-
+
req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_STATFS,
LUSTRE_MDS_VERSION, MDS_STATFS);
if (req == NULL)
}
case IMP_EVENT_INACTIVE: {
struct client_obd *cli = &obd->u.cli;
- /*
+ /*
* Flush current sequence to make client obtain new one
* from server in case of disconnect/reconnect.
* If range is already empty then no need to flush it.
*/
- if (cli->cl_seq != NULL &&
+ if (cli->cl_seq != NULL &&
!range_is_exhausted(&cli->cl_seq->lcs_space)) {
seq_client_flush(cli->cl_seq);
}
exp->exp_obd->obd_name);
/* Init client side sequence-manager */
- rc = seq_client_init(cli->cl_seq, exp,
+ rc = seq_client_init(cli->cl_seq, exp,
LUSTRE_SEQ_METADATA,
prefix, NULL);
OBD_FREE(prefix, MAX_OBD_NAME + 5);
OBD_FREE_PTR(cli->cl_seq);
cli->cl_seq = NULL;
}
-
+
RETURN(0);
}
static int mdc_fid_delete(struct obd_export *exp, const struct lu_fid *fid)
{
struct client_obd *cli = &exp->exp_obd->u.cli;
-
+
seq_client_flush(cli->cl_seq);
return 0;
}
int rc;
struct lprocfs_static_vars lvars = { 0 };
lprocfs_mdc_init_vars(&lvars);
-
+
request_module("lquota");
quota_interface = PORTAL_SYMBOL_GET(mdc_quota_interface);
init_obd_quota_ops(quota_interface, &mdc_obd_ops);
if (info->mti_dlm_req)
ldlm_request_cancel(req, info->mti_dlm_req, 0);
-
+
repbody = req_capsule_server_get(info->mti_pill, &RMF_MDT_BODY);
mo = mdt_object_find(info->mti_env, info->mti_mdt, rr->rr_fid1);
if (IS_ERR(mo))
MDS_INODELOCK_UPDATE);
if (IS_ERR(mp)) {
rc = PTR_ERR(mp);
- /* errors are possible here in cross-ref cases, see below */
+ /* errors are possible here in cross-ref cases, see below */
if (info->mti_cross_ref)
rc = 0;
GOTO(out, rc);
int rc;
ENTRY;
- /*
- * Disable global rename BFL lock temporarily because
- * when a mds do rename recoverying, which might enqueue
- * BFL lock to the controller mds. and this req might be
- * replay req for controller mds. but we did not have
+ /*
+ * Disable global rename BFL lock temporarily because
+ * when a mds do rename recoverying, which might enqueue
+ * BFL lock to the controller mds. and this req might be
+ * replay req for controller mds. but we did not have
* such handling in controller mds. XXX
*/
RETURN(0);
* These are the only exported functions, they provide some generic
* infrastructure for managing object devices
*
- * Author: Nikita Danilov <nikita@clusterfs.com>
+ * Author: Nikita Danilov <nikita.danilov@sun.com>
*/
#define DEBUG_SUBSYSTEM S_CLASS
# define EXPORT_SYMTAB
#endif
-#include <linux/seq_file.h>
-#include <linux/module.h>
-/* nr_free_pages() */
-#include <linux/swap.h>
+#include <libcfs/libcfs.h>
+
+#ifdef __KERNEL__
+# include <linux/module.h>
+#endif
+
/* hash_long() */
#include <libcfs/libcfs_hash.h>
+#include <obd_class.h>
#include <obd_support.h>
#include <lustre_disk.h>
#include <lustre_fid.h>
static void lu_object_free(const struct lu_env *env, struct lu_object *o);
-/*
+/**
* Decrease reference counter on object. If last reference is freed, return
* object to the cache, unless lu_object_is_dying(o) holds. In the latter
* case, free object immediately.
}
}
- s->ls_stats.s_created ++;
+ dev->ld_site->ls_stats.s_created ++;
RETURN(top);
}
-/*
- * Free object.
+/**
+ * Free an object.
*/
static void lu_object_free(const struct lu_env *env, struct lu_object *o)
{
struct list_head splice;
struct lu_object *scan;
+ struct lu_site *site;
+ struct list_head *layers;
+ site = o->lo_dev->ld_site;
+ layers = &o->lo_header->loh_layers;
/*
* First call ->loo_object_delete() method to release all resources.
*/
- list_for_each_entry_reverse(scan,
- &o->lo_header->loh_layers, lo_linkage) {
+ list_for_each_entry_reverse(scan, layers, lo_linkage) {
if (scan->lo_ops->loo_object_delete != NULL)
scan->lo_ops->loo_object_delete(env, scan);
}
* top-level slice.
*/
CFS_INIT_LIST_HEAD(&splice);
- list_splice_init(&o->lo_header->loh_layers, &splice);
+ list_splice_init(layers, &splice);
while (!list_empty(&splice)) {
- o = container_of0(splice.next, struct lu_object, lo_linkage);
+ /*
+ * Free layers in bottom-to-top order, so that object header
+ * lives as long as possible and ->loo_object_free() methods
+ * can look at its contents.
+ */
+ o = container_of0(splice.prev, struct lu_object, lo_linkage);
list_del_init(&o->lo_linkage);
LASSERT(o->lo_ops->loo_object_free != NULL);
o->lo_ops->loo_object_free(env, o);
}
+ cfs_waitq_broadcast(&site->ls_marche_funebre);
}
-/*
- * Free @nr objects from the cold end of the site LRU list.
+/**
+ * Free \a nr objects from the cold end of the site LRU list.
*/
int lu_site_purge(const struct lu_env *env, struct lu_site *s, int nr)
{
*/
enum {
- /*
+ /**
* Maximal line size.
*
* XXX overflow is not handled correctly.
};
struct lu_cdebug_data {
- /*
+ /**
* Temporary buffer.
*/
char lck_area[LU_CDEBUG_LINE];
- /*
- * fid staging area used by dt_store_open().
- */
- struct lu_fid_pack lck_pack;
};
/* context key constructor/destructor: lu_global_key_init, lu_global_key_fini */
LU_KEY_INIT_FINI(lu_global, struct lu_cdebug_data);
-/*
+/**
* Key, holding temporary buffer. This key is registered very early by
* lu_global_init().
*/
.lct_fini = lu_global_key_fini
};
-/*
+/**
* Printer function emitting messages through libcfs_debug_msg().
*/
int lu_cdebug_printer(const struct lu_env *env,
}
EXPORT_SYMBOL(lu_object_add_top);
-/*
- * Add object @o as a layer of compound object, going after @before.1
+/**
+ * Add object \a o as a layer of compound object, going after \a before.
*
- * This is typically called by the ->ldo_object_alloc() method of
- * @before->lo_dev.
+ * This is typically called by the ->ldo_object_alloc() method of \a
+ * before->lo_dev.
*/
void lu_object_add(struct lu_object *before, struct lu_object *o)
{
}
EXPORT_SYMBOL(lu_object_add);
-/*
+/**
* Initialize compound object.
*/
int lu_object_header_init(struct lu_object_header *h)
struct osd_object {
struct dt_object oo_dt;
- /*
+ /**
* Inode for file system object represented by this osd_object. This
* inode is pinned for the whole duration of lu_object life.
*
struct req_msg_field {
__u32 rmf_flags;
const char *rmf_name;
- /*
+ /**
* Field length. (-1) means "variable length".
*/
int rmf_size;