*
* You should have received a copy of the GNU General Public License
* version 2 along with this program; If not, see
- * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
+ * http://www.gnu.org/licenses/gpl-2.0.html
*
* GPL HEADER END
*/
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*
- * Copyright (c) 2011, 2013, Intel Corporation.
+ * Copyright (c) 2011, 2017, Intel Corporation.
*/
/*
* This file is part of Lustre, http://www.lustre.org/
} mntopt_t;
struct dt_device_param {
- unsigned ddp_max_name_len;
- unsigned ddp_max_nlink;
- unsigned ddp_block_shift;
- mntopt_t ddp_mntopts;
- unsigned ddp_max_ea_size;
- int ddp_mount_type;
- unsigned long long ddp_maxbytes;
- /* percentage of available space to reserve for grant error margin */
- int ddp_grant_reserved;
- /* per-inode space consumption */
- short ddp_inodespace;
- /* per-fragment grant overhead to be used by client for grant
- * calculation */
- int ddp_grant_frag;
+ unsigned ddp_max_name_len;
+ unsigned ddp_max_nlink;
+ unsigned ddp_symlink_max;
+ mntopt_t ddp_mntopts;
+ unsigned ddp_max_ea_size;
+ unsigned ddp_mount_type;
+ unsigned long long ddp_maxbytes;
+ /* per-inode space consumption */
+ short ddp_inodespace;
+ /* maximum number of blocks in an extent */
+ unsigned ddp_max_extent_blks;
+ /* per-extent insertion overhead to be used by client for grant
+ * calculation */
+ unsigned int ddp_extent_tax;
+ unsigned int ddp_brw_size; /* optimal RPC size */
+ /* T10PI checksum type, zero if not supported */
+ enum cksum_types ddp_t10_cksum_type;
};
/**
#define TRANS_COMMIT_CB_MAGIC 0xa0a00a0a
#define MAX_COMMIT_CB_STR_LEN 32
+#define DCB_TRANS_STOP 0x1
struct dt_txn_commit_cb {
struct list_head dcb_linkage;
dt_cb_t dcb_func;
+ void *dcb_data;
__u32 dcb_magic;
+ __u32 dcb_flags;
char dcb_name[MAX_COMMIT_CB_STR_LEN];
};
*/
int (*dt_statfs)(const struct lu_env *env,
struct dt_device *dev,
- struct obd_statfs *osfs);
+ struct obd_statfs *osfs,
+ struct obd_statfs_info *info);
/**
* Create transaction.
struct dt_device_param *param);
/**
+ * Return device's super block.
+ *
+ * \param[in] dev dt device
+ */
+ struct super_block *(*dt_mnt_sb_get)(const struct dt_device *dev);
+
+ /**
* Sync the device.
*
* Sync all the cached state (dirty buffers, pages, etc) to the
*/
int (*dt_commit_async)(const struct lu_env *env,
struct dt_device *dev);
-
- /**
- * Not used, subject to removal.
- */
- int (*dt_init_capa_ctxt)(const struct lu_env *env,
- struct dt_device *dev,
- int mode,
- unsigned long timeout,
- __u32 alg,
- struct lustre_capa_key *keys);
};
struct dt_index_features {
};
enum dt_index_flags {
- /** index supports variable sized keys */
- DT_IND_VARKEY = 1 << 0,
- /** index supports variable sized records */
- DT_IND_VARREC = 1 << 1,
- /** index can be modified */
- DT_IND_UPDATE = 1 << 2,
- /** index supports records with non-unique (duplicate) keys */
- DT_IND_NONUNQ = 1 << 3,
- /**
- * index support fixed-size keys sorted with natural numerical way
- * and is able to return left-side value if no exact value found
- */
- DT_IND_RANGE = 1 << 4,
+ /** index supports variable sized keys */
+ DT_IND_VARKEY = BIT(0),
+ /** index supports variable sized records */
+ DT_IND_VARREC = BIT(1),
+ /** index can be modified */
+ DT_IND_UPDATE = BIT(2),
+ /** index supports records with non-unique (duplicate) keys */
+ DT_IND_NONUNQ = BIT(3),
+ /**
+ * index support fixed-size keys sorted with natural numerical way
+ * and is able to return left-side value if no exact value found
+ */
+ DT_IND_RANGE = BIT(4),
+};
+
+/* for dt_read_lock() and dt_write_lock() object lock rule */
+enum dt_object_role {
+ DT_SRC_PARENT,
+ DT_SRC_CHILD,
+ DT_TGT_PARENT,
+ DT_TGT_CHILD,
+ DT_TGT_ORPHAN,
+ DT_LASTID,
};
/**
*/
extern const struct dt_index_features dt_directory_features;
extern const struct dt_index_features dt_otable_features;
-extern const struct dt_index_features dt_lfsck_orphan_features;
-extern const struct dt_index_features dt_lfsck_features;
+extern const struct dt_index_features dt_lfsck_layout_orphan_features;
+extern const struct dt_index_features dt_lfsck_layout_dangling_features;
+extern const struct dt_index_features dt_lfsck_namespace_features;
/* index features supported by the accounting objects */
extern const struct dt_index_features dt_acct_features;
/* index features supported by the quota slave indexes */
extern const struct dt_index_features dt_quota_slv_features;
+/* index features supported by the nodemap index */
+extern const struct dt_index_features dt_nodemap_features;
+
/**
* This is a general purpose dt allocation hint.
* It now contains the parent object.
const void *dah_eadata;
int dah_eadata_len;
__u32 dah_mode;
+ int dah_append_stripes;
+ char *dah_append_pool;
};
/**
union ldlm_policy_data;
+struct md_layout_change;
+
/**
* A dt_object provides common operations to create and destroy
* objects and to manage regular and extended attributes.
*
* \param[in] env execution environment for this thread
* \param[in] dt object
- * \param[in] capa unused
*
* \retval 0 on success
* \retval negative negated errno on error
*/
int (*do_declare_attr_get)(const struct lu_env *env,
- struct dt_object *dt,
- struct lustre_capa *capa);
+ struct dt_object *dt);
/**
* Return regular attributes.
* \param[in] env execution environment for this thread
* \param[in] dt object
* \param[out] attr attributes to fill
- * \param[in] capa unused
*
* \retval 0 on success
* \retval negative negated errno on error
*/
int (*do_attr_get)(const struct lu_env *env,
struct dt_object *dt,
- struct lu_attr *attr,
- struct lustre_capa *capa);
+ struct lu_attr *attr);
/**
* Declare intention to change regular object's attributes.
* \param[in] dt object
* \param[in] attr new attributes to apply
* \param[in] th transaction handle
- * \param[in] capa unused
*
* \retval 0 on success
* \retval negative negated errno on error
int (*do_attr_set)(const struct lu_env *env,
struct dt_object *dt,
const struct lu_attr *attr,
- struct thandle *th,
- struct lustre_capa *capa);
+ struct thandle *th);
/**
* Declare intention to request extented attribute.
* \param[in] dt object
* \param[in] buf unused, may be removed in the future
* \param[in] name name of the extended attribute
- * \param[in] capa unused, may be removed in the future
*
* \retval 0 on success
* \retval negative negated errno on error
int (*do_declare_xattr_get)(const struct lu_env *env,
struct dt_object *dt,
struct lu_buf *buf,
- const char *name,
- struct lustre_capa *capa);
+ const char *name);
/**
* Return a value of an extended attribute.
* \param[in] dt object
* \param[out] buf buffer in which to store the value
* \param[in] name name of the extended attribute
- * \param[in] capa unused
*
* \retval 0 on success
* \retval -ERANGE if \a buf is too small
int (*do_xattr_get)(const struct lu_env *env,
struct dt_object *dt,
struct lu_buf *buf,
- const char *name,
- struct lustre_capa *capa);
+ const char *name);
/**
* Declare intention to change an extended attribute.
* \param[in] name name of the attribute
* \param[in] fl flags indicating EA creation or replacement
* \param[in] th transaction handle
- * \param[in] capa unused
*
* \retval 0 on success
* \retval negative negated errno on error
const struct lu_buf *buf,
const char *name,
int fl,
- struct thandle *th,
- struct lustre_capa *capa);
+ struct thandle *th);
/**
* Declare intention to delete an extended attribute.
* \param[in] dt object
* \param[in] name name of the attribute
* \param[in] th transaction handle
- * \param[in] capa unused
*
* \retval 0 on success
* \retval negative negated errno on error
int (*do_xattr_del)(const struct lu_env *env,
struct dt_object *dt,
const char *name,
- struct thandle *th,
- struct lustre_capa *capa);
+ struct thandle *th);
/**
* Return a list of the extended attributes.
* \param[in] env execution environment for this thread
* \param[in] dt object
* \param[out] buf buffer to put the list in
- * \param[in] capa unused
*
* \retval positive bytes used/required in the buffer
* \retval negative negated errno on error
*/
int (*do_xattr_list)(const struct lu_env *env,
struct dt_object *dt,
- struct lu_buf *buf,
- struct lustre_capa *capa);
+ const struct lu_buf *buf);
/**
* Prepare allocation hint for a new object.
struct thandle *th);
/**
- * Not used, subject to removal.
- */
- struct obd_capa *(*do_capa_get)(const struct lu_env *env,
- struct dt_object *dt,
- struct lustre_capa *old,
- __u64 opc);
-
- /**
* Sync obect.
*
* The method is called to sync specified range of the object to a
* \retval 0 on success
* \retval negative negated errno on error
*/
- int (*do_object_sync)(const struct lu_env *env,
- struct dt_object *obj,
- __u64 start,
- __u64 end);
-
- /**
- * Not used, subject to removal.
- */
- int (*do_data_get)(const struct lu_env *env,
- struct dt_object *dt,
- void **data);
+ int (*do_object_sync)(const struct lu_env *env, struct dt_object *obj,
+ __u64 start, __u64 end);
/**
* Lock object.
struct dt_object *dt,
struct ldlm_enqueue_info *einfo,
union ldlm_policy_data *policy);
+
+ /**
+ * Invalidate attribute cache.
+ *
+ * This method invalidate attribute cache of the object, which is on OSP
+ * only.
+ *
+ * \param[in] env execution envionment for this thread
+ * \param[in] dt object
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on error
+ */
+ int (*do_invalidate)(const struct lu_env *env, struct dt_object *dt);
+
+ /**
+ * Declare intention to instaintiate extended layout component.
+ *
+ * \param[in] env execution environment
+ * \param[in] dt DT object
+ * \param[in] layout data structure to describe the changes to
+ * the DT object's layout
+ * \param[in] buf buffer containing client's lovea or empty
+ *
+ * \retval 0 success
+ * \retval -ne error code
+ */
+ int (*do_declare_layout_change)(const struct lu_env *env,
+ struct dt_object *dt,
+ struct md_layout_change *mlc,
+ struct thandle *th);
+
+ /**
+ * Client is trying to write to un-instantiated layout component.
+ *
+ * \param[in] env execution environment
+ * \param[in] dt DT object
+ * \param[in] layout data structure to describe the changes to
+ * the DT object's layout
+ * \param[in] buf buffer containing client's lovea or empty
+ *
+ * \retval 0 success
+ * \retval -ne error code
+ */
+ int (*do_layout_change)(const struct lu_env *env, struct dt_object *dt,
+ struct md_layout_change *mlc,
+ struct thandle *th);
+};
+
+enum dt_bufs_type {
+ DT_BUFS_TYPE_READ = 0x0000,
+ DT_BUFS_TYPE_WRITE = 0x0001,
+ DT_BUFS_TYPE_READAHEAD = 0x0002,
+ DT_BUFS_TYPE_LOCAL = 0x0004,
};
/**
* \param[out] buf buffer (including size) to copy data in
* \param[in] pos position in the object to start
* \param[out] pos original value of \a pos + bytes returned
- * \param[in] capa unused
*
* \retval positive bytes read on success
* \retval negative negated errno on error
ssize_t (*dbo_read)(const struct lu_env *env,
struct dt_object *dt,
struct lu_buf *buf,
- loff_t *pos,
- struct lustre_capa *capa);
+ loff_t *pos);
/**
* Declare intention to write data to object.
* \param[in] pos position in the object to start
* \param[out] pos \a pos + bytes written
* \param[in] th transaction handle
- * \param[in] capa unused
- * \param[in] ignore unused (was used to request quota ignorance)
*
* \retval positive bytes written on success
* \retval negative negated errno on error
struct dt_object *dt,
const struct lu_buf *buf,
loff_t *pos,
- struct thandle *th,
- struct lustre_capa *capa,
- int ignore);
+ struct thandle *th);
/**
* Return buffers for data.
* at object's offset lnb_file_offset.
*
* The memory referenced by the descriptors can't change its purpose
- * until the complimentary ->dbo_bufs_put() is called. The caller should
+ * until the complementary ->dbo_bufs_put() is called. The caller should
* specify if the buffers are used to read or modify data so that OSD
* can decide how to initialize the buffers: bring all the data for
* reads or just bring partial buffers for write. Note: the method does
* \param[in] pos position in the object to start
* \param[in] len size of region in bytes
* \param[out] lb array of descriptors to fill
+ * \param[in] maxlnb max slots in @lnb array
* \param[in] rw 0 if used to read, 1 if used for write
- * \param[in] capa unused
*
* \retval positive number of descriptors on success
* \retval negative negated errno on error
loff_t pos,
ssize_t len,
struct niobuf_local *lb,
- int rw,
- struct lustre_capa *capa);
+ int maxlnb,
+ enum dt_bufs_type rw);
/**
* Release reference granted by ->dbo_bufs_get().
* then the method should maintain space accounting for the given
* credentials.
*
+ * user_size parameter is the apparent size of the file, ie the size
+ * of the clear text version of the file. It can differ from the actual
+ * amount of valuable data received when a file is encrypted,
+ * because encrypted pages always contain PAGE_SIZE bytes of data,
+ * even if clear text data is only a few bytes.
+ * In case of encrypted file, apparent size will be stored as the inode
+ * size, so that servers return to clients an object size they can use
+ * to determine clear text size.
+ *
* \param[in] env execution environment for this thread
* \param[in] dt object
* \param[in] lb array of descriptors for the buffers
* \param[in] nr size of the array
* \param[in] th transaction handle
+ * \param[in] user_size apparent size
*
* \retval 0 on success
* \retval negative negated errno on error
struct dt_object *dt,
struct niobuf_local *lb,
int nr,
- struct thandle *th);
+ struct thandle *th,
+ __u64 user_size);
/**
* Return logical to physical block mapping for a given extent
*/
int (*dbo_fiemap_get)(const struct lu_env *env,
struct dt_object *dt,
- struct ll_user_fiemap *fm);
+ struct fiemap *fm);
/**
* Declare intention to deallocate space from an object.
* \param[in] start the start of the region to deallocate
* \param[in] end the end of the region to deallocate
* \param[in] th transaction handle
- * \param[in] capa unused
*
* \retval 0 on success
* \retval negative negated errno on error
struct dt_object *dt,
__u64 start,
__u64 end,
- struct thandle *th,
- struct lustre_capa *capa);
+ struct thandle *th);
+ /**
+ * Give advices on specified region in an object.
+ *
+ * This method is used to give advices about access pattern on an
+ * given region of the object. The disk filesystem understands
+ * the advices and tunes cache/read-ahead policies.
+ *
+ * \param[in] env execution environment for this thread
+ * \param[in] dt object
+ * \param[in] start the start of the region affected
+ * \param[in] end the end of the region affected
+ * \param[in] advice advice type
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on error
+ */
+ int (*dbo_ladvise)(const struct lu_env *env,
+ struct dt_object *dt,
+ __u64 start,
+ __u64 end,
+ enum lu_ladvise_type advice);
+
+ /**
+ * Declare intention to preallocate space for an object
+ *
+ * \param[in] env execution environment for this thread
+ * \param[in] dt object
+ * \param[in] th transaction handle
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on error
+ */
+ int (*dbo_declare_fallocate)(const struct lu_env *env,
+ struct dt_object *dt,
+ struct thandle *th);
+ /**
+ * Allocate specified region for an object
+ *
+ * \param[in] env execution environment for this thread
+ * \param[in] dt object
+ * \param[in] start the start of the region to allocate
+ * \param[in] end the end of the region to allocate
+ * \param[in] mode fallocate mode
+ * \param[in] th transaction handle
+ *
+ * \retval 0 on success
+ * \retval negative negated errno on error
+ */
+ int (*dbo_fallocate)(const struct lu_env *env,
+ struct dt_object *dt,
+ __u64 start,
+ __u64 end,
+ int mode,
+ struct thandle *th);
};
/**
* \param[in] dt object
* \param[out] rec buffer where value will be stored
* \param[in] key key
- * \param[in] capa unused
*
* \retval 0 on success
* \retval -ENOENT if key isn't found
int (*dio_lookup)(const struct lu_env *env,
struct dt_object *dt,
struct dt_rec *rec,
- const struct dt_key *key,
- struct lustre_capa *capa);
+ const struct dt_key *key);
/**
* Declare intention to insert a key/value into an index.
* \param[in] rec buffer storing value
* \param[in] key key
* \param[in] th transaction handle
- * \param[in] capa unused
- * \param[in] ignore unused (was used to request quota ignorance)
*
* \retval 0 on success
* \retval negative negated errno on error
struct dt_object *dt,
const struct dt_rec *rec,
const struct dt_key *key,
- struct thandle *th,
- struct lustre_capa *capa,
- int ignore);
+ struct thandle *th);
/**
* Declare intention to delete a key/value from an index.
* \param[in] dt object
* \param[in] key key
* \param[in] th transaction handle
- * \param[in] capa unused
*
* \retval 0 on success
* \retval negative negated errno on error
int (*dio_delete)(const struct lu_env *env,
struct dt_object *dt,
const struct dt_key *key,
- struct thandle *th,
- struct lustre_capa *capa);
+ struct thandle *th);
/**
* Iterator interface.
* \param[in] dt object
* \param[in] attr ask the iterator to return part of
the records, see LUDA_* for details
- * \param[in] capa unused
*
* \retval pointer iterator pointer on success
* \retval ERR_PTR(errno) on error
*/
struct dt_it *(*init)(const struct lu_env *env,
- struct dt_object *dt,
- __u32 attr,
- struct lustre_capa *capa);
+ struct dt_object *dt,
+ __u32 attr);
/**
* Release iterator.
DOIF_DRYRUN = 0x0008,
};
-/* otable based iteration needs to use the common DT interation APIs.
+/* otable based iteration needs to use the common DT iteration APIs.
* To initialize the iteration, it needs call dio_it::init() firstly.
* Here is how the otable based iteration should prepare arguments to
* call dt_it_ops::init().
* single-threaded start-up shut-down procedures.
*/
struct list_head dd_txn_callbacks;
- unsigned int dd_record_fid_accessed:1;
+ unsigned int dd_record_fid_accessed:1,
+ dd_rdonly:1;
+
+ /* sysfs and debugfs handling */
+ struct dentry *dd_debugfs_entry;
+
+ const struct attribute **dd_def_attrs;
+ struct kobject dd_kobj;
+ struct kobj_type dd_ktype;
+ struct completion dd_kobj_unregister;
};
int dt_device_init(struct dt_device *dev, struct lu_device_type *t);
static inline struct dt_device * lu2dt_dev(struct lu_device *l)
{
- LASSERT(lu_device_is_dt(l));
- return container_of0(l, struct dt_device, dd_lu_dev);
+ LASSERT(lu_device_is_dt(l));
+ return container_of_safe(l, struct dt_device, dd_lu_dev);
}
struct dt_object {
static inline struct dt_object *lu2dt(struct lu_object *l)
{
- LASSERT(l == NULL || IS_ERR(l) || lu_device_is_dt(l->lo_dev));
- return container_of0(l, struct dt_object, do_lu);
+ LASSERT(l == NULL || IS_ERR(l) || lu_device_is_dt(l->lo_dev));
+ return container_of_safe(l, struct dt_object, do_lu);
}
int dt_object_init(struct dt_object *obj,
static inline struct dt_object *lu2dt_obj(struct lu_object *o)
{
LASSERT(ergo(o != NULL, lu_device_is_dt(o->lo_dev)));
- return container_of0(o, struct dt_object, do_lu);
+ return container_of_safe(o, struct dt_object, do_lu);
}
-struct thandle_update {
- /* In DNE, one transaction can be disassembled into
- * updates on several different MDTs, and these updates
- * will be attached to tu_remote_update_list per target.
- * Only single thread will access the list, no need lock
- */
- struct list_head tu_remote_update_list;
-
- /* sent after or before local transaction */
- unsigned int tu_sent_after_local_trans:1,
- tu_only_remote_trans:1;
-};
+static inline struct dt_object *dt_object_child(struct dt_object *o)
+{
+ return container_of(lu_object_next(&(o)->do_lu),
+ struct dt_object, do_lu);
+}
/**
* This is the general purpose transaction handle.
/** the dt device on which the transactions are executed */
struct dt_device *th_dev;
- atomic_t th_refc;
- /* the size of transaction */
- int th_alloc_size;
-
- /** context for this transaction, tag is LCT_TX_HANDLE */
- struct lu_context th_ctx;
-
- /** additional tags (layers can add in declare) */
- __u32 th_tags;
+ /* point to the top thandle, XXX this is a bit hacky right now,
+ * but normal device trans callback triggered by the bottom
+ * device (OSP/OSD == sub thandle layer) needs to get the
+ * top_thandle (see dt_txn_hook_start/stop()), so we put the
+ * top thandle here for now, will fix it when we have better
+ * callback mechanism */
+ struct thandle *th_top;
/** the last operation result in this transaction.
* this value is used in recovery */
__s32 th_result;
/** whether we need sync commit */
- unsigned int th_sync:1;
-
+ unsigned int th_sync:1,
/* local transation, no need to inform other layers */
- unsigned int th_local:1;
-
- struct thandle_update *th_update;
+ th_local:1,
+ /* Whether we need wait the transaction to be submitted
+ * (send to remote target) */
+ th_wait_submit:1,
+ /* complex transaction which will track updates on all targets,
+ * including OSTs */
+ th_complex:1,
+ /* whether ignore quota */
+ th_ignore_quota:1;
};
-static inline void thandle_get(struct thandle *thandle)
-{
- atomic_inc(&thandle->th_refc);
-}
-
-static inline void thandle_put(struct thandle *thandle)
-{
- if (atomic_dec_and_test(&thandle->th_refc)) {
- if (thandle->th_update != NULL)
- OBD_FREE_PTR(thandle->th_update);
- OBD_FREE(thandle, thandle->th_alloc_size);
- }
-}
/**
* Transaction call-backs.
*
struct thandle *txn, void *cookie);
int (*dtc_txn_stop)(const struct lu_env *env,
struct thandle *txn, void *cookie);
- void (*dtc_txn_commit)(struct thandle *txn, void *cookie);
void *dtc_cookie;
__u32 dtc_tag;
struct list_head dtc_linkage;
int dt_txn_hook_start(const struct lu_env *env,
struct dt_device *dev, struct thandle *txn);
int dt_txn_hook_stop(const struct lu_env *env, struct thandle *txn);
-void dt_txn_hook_commit(struct thandle *txn);
int dt_try_as_dir(const struct lu_env *env, struct dt_object *obj);
dev->dd_lu_dev.ld_site->ls_top_dev, NULL);
}
+static inline struct dt_object *
+dt_object_locate(struct dt_object *dto, struct dt_device *dt_dev)
+{
+ struct lu_object *lo;
+
+ list_for_each_entry(lo, &dto->do_lu.lo_header->loh_layers, lo_linkage) {
+ if (lo->lo_dev == &dt_dev->dd_lu_dev)
+ return container_of(lo, struct dt_object, do_lu);
+ }
+ return NULL;
+}
+
+static inline void dt_object_put(const struct lu_env *env,
+ struct dt_object *dto)
+{
+ lu_object_put(env, &dto->do_lu);
+}
+
+static inline void dt_object_put_nocache(const struct lu_env *env,
+ struct dt_object *dto)
+{
+ lu_object_put_nocache(env, &dto->do_lu);
+}
+
int local_oid_storage_init(const struct lu_env *env, struct dt_device *dev,
const struct lu_fid *first_fid,
struct local_oid_storage **los);
struct dt_object *o,
struct lu_attr *attr, struct dt_object_format *dof,
struct thandle *th);
+struct dt_object *local_file_find(const struct lu_env *env,
+ struct local_oid_storage *los,
+ struct dt_object *parent,
+ const char *name);
struct dt_object *local_file_find_or_create(const struct lu_env *env,
struct local_oid_storage *los,
struct dt_object *parent,
return o->do_ops->do_object_sync(env, o, start, end);
}
+static inline int dt_fid_alloc(const struct lu_env *env,
+ struct dt_device *d,
+ struct lu_fid *fid,
+ struct lu_object *parent,
+ const struct lu_name *name)
+{
+ struct lu_device *l = dt2lu_dev(d);
+
+ return l->ld_ops->ldo_fid_alloc(env, l, fid, parent, name);
+}
+
int dt_declare_version_set(const struct lu_env *env, struct dt_object *o,
struct thandle *th);
void dt_version_set(const struct lu_env *env, struct dt_object *o,
}
static inline int dt_declare_attr_get(const struct lu_env *env,
- struct dt_object *dt,
- struct lustre_capa *capa)
+ struct dt_object *dt)
{
LASSERT(dt);
LASSERT(dt->do_ops);
if (CFS_FAULT_CHECK(OBD_FAIL_DT_DECLARE_ATTR_GET))
return cfs_fail_err;
- return dt->do_ops->do_declare_attr_get(env, dt, capa);
+ return dt->do_ops->do_declare_attr_get(env, dt);
}
static inline int dt_attr_get(const struct lu_env *env, struct dt_object *dt,
- struct lu_attr *la, void *arg)
+ struct lu_attr *la)
{
LASSERT(dt);
LASSERT(dt->do_ops);
if (CFS_FAULT_CHECK(OBD_FAIL_DT_ATTR_GET))
return cfs_fail_err;
- return dt->do_ops->do_attr_get(env, dt, la, arg);
+ return dt->do_ops->do_attr_get(env, dt, la);
}
static inline int dt_declare_attr_set(const struct lu_env *env,
}
static inline int dt_attr_set(const struct lu_env *env, struct dt_object *dt,
- const struct lu_attr *la, struct thandle *th,
- struct lustre_capa *capa)
+ const struct lu_attr *la, struct thandle *th)
{
LASSERT(dt);
LASSERT(dt->do_ops);
if (CFS_FAULT_CHECK(OBD_FAIL_DT_ATTR_SET))
return cfs_fail_err;
- return dt->do_ops->do_attr_set(env, dt, la, th, capa);
+ return dt->do_ops->do_attr_set(env, dt, la, th);
}
static inline int dt_declare_ref_add(const struct lu_env *env,
return dt->do_ops->do_ref_del(env, dt, th);
}
-static inline struct obd_capa *dt_capa_get(const struct lu_env *env,
- struct dt_object *dt,
- struct lustre_capa *old, __u64 opc)
-{
- LASSERT(dt);
- LASSERT(dt->do_ops);
- LASSERT(dt->do_ops->do_capa_get);
- return dt->do_ops->do_capa_get(env, dt, old, opc);
-}
-
static inline int dt_bufs_get(const struct lu_env *env, struct dt_object *d,
struct niobuf_remote *rnb,
- struct niobuf_local *lnb, int rw,
- struct lustre_capa *capa)
+ struct niobuf_local *lnb, int maxlnb,
+ enum dt_bufs_type rw)
{
LASSERT(d);
LASSERT(d->do_body_ops);
LASSERT(d->do_body_ops->dbo_bufs_get);
return d->do_body_ops->dbo_bufs_get(env, d, rnb->rnb_offset,
- rnb->rnb_len, lnb, rw, capa);
+ rnb->rnb_len, lnb, maxlnb, rw);
}
static inline int dt_bufs_put(const struct lu_env *env, struct dt_object *d,
static inline int dt_write_commit(const struct lu_env *env,
- struct dt_object *d, struct niobuf_local *lnb,
- int n, struct thandle *th)
+ struct dt_object *d, struct niobuf_local *lnb,
+ int n, struct thandle *th, __u64 size)
{
- LASSERT(d);
- LASSERT(d->do_body_ops);
- LASSERT(d->do_body_ops->dbo_write_commit);
- return d->do_body_ops->dbo_write_commit(env, d, lnb, n, th);
+ LASSERT(d);
+ LASSERT(d->do_body_ops);
+ LASSERT(d->do_body_ops->dbo_write_commit);
+ return d->do_body_ops->dbo_write_commit(env, d, lnb, n, th, size);
}
static inline int dt_read_prep(const struct lu_env *env, struct dt_object *d,
return d->do_body_ops->dbo_read_prep(env, d, lnb, n);
}
+static inline int dt_declare_write(const struct lu_env *env,
+ struct dt_object *dt,
+ const struct lu_buf *buf, loff_t pos,
+ struct thandle *th)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_body_ops);
+ LASSERT(dt->do_body_ops->dbo_declare_write);
+ return dt->do_body_ops->dbo_declare_write(env, dt, buf, pos, th);
+}
+
+static inline ssize_t dt_write(const struct lu_env *env, struct dt_object *dt,
+ const struct lu_buf *buf, loff_t *pos,
+ struct thandle *th)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_body_ops);
+ LASSERT(dt->do_body_ops->dbo_write);
+ return dt->do_body_ops->dbo_write(env, dt, buf, pos, th);
+}
+
static inline int dt_declare_punch(const struct lu_env *env,
struct dt_object *dt, __u64 start,
__u64 end, struct thandle *th)
}
static inline int dt_punch(const struct lu_env *env, struct dt_object *dt,
- __u64 start, __u64 end, struct thandle *th,
- struct lustre_capa *capa)
+ __u64 start, __u64 end, struct thandle *th)
{
LASSERT(dt);
LASSERT(dt->do_body_ops);
LASSERT(dt->do_body_ops->dbo_punch);
- return dt->do_body_ops->dbo_punch(env, dt, start, end, th, capa);
+ return dt->do_body_ops->dbo_punch(env, dt, start, end, th);
+}
+
+static inline int dt_ladvise(const struct lu_env *env, struct dt_object *dt,
+ __u64 start, __u64 end, int advice)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_body_ops);
+ LASSERT(dt->do_body_ops->dbo_ladvise);
+ return dt->do_body_ops->dbo_ladvise(env, dt, start, end, advice);
+}
+
+static inline int dt_declare_falloc(const struct lu_env *env,
+ struct dt_object *dt, struct thandle *th)
+{
+ LASSERT(dt);
+ if (!dt->do_body_ops)
+ return -EOPNOTSUPP;
+ LASSERT(dt->do_body_ops);
+ LASSERT(dt->do_body_ops->dbo_declare_fallocate);
+ return dt->do_body_ops->dbo_declare_fallocate(env, dt, th);
+}
+
+static inline int dt_falloc(const struct lu_env *env, struct dt_object *dt,
+ __u64 start, __u64 end, int mode,
+ struct thandle *th)
+{
+ LASSERT(dt);
+ if (!dt->do_body_ops)
+ return -EOPNOTSUPP;
+ LASSERT(dt->do_body_ops);
+ LASSERT(dt->do_body_ops->dbo_fallocate);
+ return dt->do_body_ops->dbo_fallocate(env, dt, start, end, mode, th);
}
static inline int dt_fiemap_get(const struct lu_env *env, struct dt_object *d,
- struct ll_user_fiemap *fm)
+ struct fiemap *fm)
{
LASSERT(d);
if (d->do_body_ops == NULL)
return d->do_body_ops->dbo_fiemap_get(env, d, fm);
}
+static inline int dt_statfs_info(const struct lu_env *env,
+ struct dt_device *dev,
+ struct obd_statfs *osfs,
+ struct obd_statfs_info *info)
+{
+ LASSERT(dev);
+ LASSERT(dev->dd_ops);
+ LASSERT(dev->dd_ops->dt_statfs);
+ return dev->dd_ops->dt_statfs(env, dev, osfs, info);
+}
+
static inline int dt_statfs(const struct lu_env *env, struct dt_device *dev,
- struct obd_statfs *osfs)
+ struct obd_statfs *osfs)
{
- LASSERT(dev);
- LASSERT(dev->dd_ops);
- LASSERT(dev->dd_ops->dt_statfs);
- return dev->dd_ops->dt_statfs(env, dev, osfs);
+ return dt_statfs_info(env, dev, osfs, NULL);
}
static inline int dt_root_get(const struct lu_env *env, struct dt_device *dev,
return dev->dd_ops->dt_conf_get(env, dev, param);
}
+static inline struct super_block *dt_mnt_sb_get(const struct dt_device *dev)
+{
+ LASSERT(dev);
+ LASSERT(dev->dd_ops);
+ if (dev->dd_ops->dt_mnt_sb_get)
+ return dev->dd_ops->dt_mnt_sb_get(dev);
+
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline int dt_sync(const struct lu_env *env, struct dt_device *dev)
{
LASSERT(dev);
if (CFS_FAULT_CHECK(OBD_FAIL_DT_DECLARE_INSERT))
return cfs_fail_err;
- return dt->do_index_ops->dio_declare_insert(env, dt, rec, key, th);
+ return dt->do_index_ops->dio_declare_insert(env, dt, rec, key, th);
}
static inline int dt_insert(const struct lu_env *env,
- struct dt_object *dt,
- const struct dt_rec *rec,
- const struct dt_key *key,
- struct thandle *th,
- struct lustre_capa *capa,
- int noquota)
+ struct dt_object *dt,
+ const struct dt_rec *rec,
+ const struct dt_key *key,
+ struct thandle *th)
{
LASSERT(dt);
LASSERT(dt->do_index_ops);
if (CFS_FAULT_CHECK(OBD_FAIL_DT_INSERT))
return cfs_fail_err;
- return dt->do_index_ops->dio_insert(env, dt, rec, key, th,
- capa, noquota);
+ return dt->do_index_ops->dio_insert(env, dt, rec, key, th);
}
static inline int dt_declare_xattr_del(const struct lu_env *env,
}
static inline int dt_xattr_del(const struct lu_env *env,
- struct dt_object *dt, const char *name,
- struct thandle *th,
- struct lustre_capa *capa)
+ struct dt_object *dt, const char *name,
+ struct thandle *th)
{
LASSERT(dt);
LASSERT(dt->do_ops);
if (CFS_FAULT_CHECK(OBD_FAIL_DT_XATTR_DEL))
return cfs_fail_err;
- return dt->do_ops->do_xattr_del(env, dt, name, th, capa);
+ return dt->do_ops->do_xattr_del(env, dt, name, th);
}
static inline int dt_declare_xattr_set(const struct lu_env *env,
}
static inline int dt_xattr_set(const struct lu_env *env,
- struct dt_object *dt, const struct lu_buf *buf,
- const char *name, int fl, struct thandle *th,
- struct lustre_capa *capa)
+ struct dt_object *dt, const struct lu_buf *buf,
+ const char *name, int fl, struct thandle *th)
{
LASSERT(dt);
LASSERT(dt->do_ops);
if (CFS_FAULT_CHECK(OBD_FAIL_DT_XATTR_SET))
return cfs_fail_err;
- return dt->do_ops->do_xattr_set(env, dt, buf, name, fl, th, capa);
+ return dt->do_ops->do_xattr_set(env, dt, buf, name, fl, th);
}
static inline int dt_declare_xattr_get(const struct lu_env *env,
struct dt_object *dt,
struct lu_buf *buf,
- const char *name,
- struct lustre_capa *capa)
+ const char *name)
{
LASSERT(dt);
LASSERT(dt->do_ops);
if (CFS_FAULT_CHECK(OBD_FAIL_DT_DECLARE_XATTR_GET))
return cfs_fail_err;
- return dt->do_ops->do_declare_xattr_get(env, dt, buf, name, capa);
+ return dt->do_ops->do_declare_xattr_get(env, dt, buf, name);
}
static inline int dt_xattr_get(const struct lu_env *env,
- struct dt_object *dt, struct lu_buf *buf,
- const char *name, struct lustre_capa *capa)
+ struct dt_object *dt, struct lu_buf *buf,
+ const char *name)
{
LASSERT(dt);
LASSERT(dt->do_ops);
if (CFS_FAULT_CHECK(OBD_FAIL_DT_XATTR_GET))
return cfs_fail_err;
- return dt->do_ops->do_xattr_get(env, dt, buf, name, capa);
+ return dt->do_ops->do_xattr_get(env, dt, buf, name);
}
-static inline int dt_xattr_list(const struct lu_env *env,
- struct dt_object *dt, struct lu_buf *buf,
- struct lustre_capa *capa)
+static inline int dt_xattr_list(const struct lu_env *env, struct dt_object *dt,
+ const struct lu_buf *buf)
{
LASSERT(dt);
LASSERT(dt->do_ops);
if (CFS_FAULT_CHECK(OBD_FAIL_DT_XATTR_LIST))
return cfs_fail_err;
- return dt->do_ops->do_xattr_list(env, dt, buf, capa);
+ return dt->do_ops->do_xattr_list(env, dt, buf);
+}
+
+static inline int dt_invalidate(const struct lu_env *env, struct dt_object *dt)
+{
+ LASSERT(dt);
+ LASSERT(dt->do_ops);
+ LASSERT(dt->do_ops->do_invalidate);
+
+ return dt->do_ops->do_invalidate(env, dt);
}
static inline int dt_declare_delete(const struct lu_env *env,
}
static inline int dt_delete(const struct lu_env *env,
- struct dt_object *dt,
- const struct dt_key *key,
- struct thandle *th,
- struct lustre_capa *capa)
+ struct dt_object *dt,
+ const struct dt_key *key,
+ struct thandle *th)
{
LASSERT(dt);
LASSERT(dt->do_index_ops);
if (CFS_FAULT_CHECK(OBD_FAIL_DT_DELETE))
return cfs_fail_err;
- return dt->do_index_ops->dio_delete(env, dt, key, th, capa);
+ return dt->do_index_ops->dio_delete(env, dt, key, th);
}
static inline int dt_commit_async(const struct lu_env *env,
return dev->dd_ops->dt_commit_async(env, dev);
}
-static inline int dt_init_capa_ctxt(const struct lu_env *env,
- struct dt_device *dev,
- int mode, unsigned long timeout,
- __u32 alg, struct lustre_capa_key *keys)
-{
- LASSERT(dev);
- LASSERT(dev->dd_ops);
- LASSERT(dev->dd_ops->dt_init_capa_ctxt);
- return dev->dd_ops->dt_init_capa_ctxt(env, dev, mode,
- timeout, alg, keys);
-}
-
static inline int dt_lookup(const struct lu_env *env,
- struct dt_object *dt,
- struct dt_rec *rec,
- const struct dt_key *key,
- struct lustre_capa *capa)
+ struct dt_object *dt,
+ struct dt_rec *rec,
+ const struct dt_key *key)
{
int ret;
if (CFS_FAULT_CHECK(OBD_FAIL_DT_LOOKUP))
return cfs_fail_err;
- ret = dt->do_index_ops->dio_lookup(env, dt, rec, key, capa);
+ ret = dt->do_index_ops->dio_lookup(env, dt, rec, key);
if (ret > 0)
ret = 0;
else if (ret == 0)
return ret;
}
-#define LU221_BAD_TIME (0x80000000U + 24 * 3600)
+static inline int dt_declare_layout_change(const struct lu_env *env,
+ struct dt_object *o,
+ struct md_layout_change *mlc,
+ struct thandle *th)
+{
+ LASSERT(o);
+ LASSERT(o->do_ops);
+ LASSERT(o->do_ops->do_declare_layout_change);
+ return o->do_ops->do_declare_layout_change(env, o, mlc, th);
+}
+
+static inline int dt_layout_change(const struct lu_env *env,
+ struct dt_object *o,
+ struct md_layout_change *mlc,
+ struct thandle *th)
+{
+ LASSERT(o);
+ LASSERT(o->do_ops);
+ LASSERT(o->do_ops->do_layout_change);
+ return o->do_ops->do_layout_change(env, o, mlc, th);
+}
struct dt_find_hint {
struct lu_fid *dfh_fid;
int dt_global_init(void);
void dt_global_fini(void);
+int dt_tunables_init(struct dt_device *dt, struct obd_type *type,
+ const char *name, struct lprocfs_vars *list);
+int dt_tunables_fini(struct dt_device *dt);
-# ifdef LPROCFS
+# ifdef CONFIG_PROC_FS
int lprocfs_dt_blksize_seq_show(struct seq_file *m, void *v);
int lprocfs_dt_kbytestotal_seq_show(struct seq_file *m, void *v);
int lprocfs_dt_kbytesfree_seq_show(struct seq_file *m, void *v);
int lprocfs_dt_kbytesavail_seq_show(struct seq_file *m, void *v);
int lprocfs_dt_filestotal_seq_show(struct seq_file *m, void *v);
int lprocfs_dt_filesfree_seq_show(struct seq_file *m, void *v);
-# endif /* LPROCFS */
+# endif /* CONFIG_PROC_FS */
#endif /* __LUSTRE_DT_OBJECT_H */