* EA enabled, the ldiskfs will make all EAs to share one (4K) EA block.
*
* 3. Too many entries in linkEA will seriously affect linkEA performance
- * because we only support to locate linkEA entry consecutively. */
+ * because we only support to locate linkEA entry consecutively.
+ */
#define MAX_LINKEA_SIZE 4096
struct linkea_data {
#ifdef HAVE_SERVER_SUPPORT
void nodemap_test_nid(struct lnet_nid *nid, char *name_buf, size_t name_len);
#else
-#define nodemap_test_nid(nid, name_buf, name_len) do {} while(0)
+#define nodemap_test_nid(nid, name_buf, name_len) do {} while (0)
#endif
int nodemap_test_id(struct lnet_nid *nid, enum nodemap_id_type idtype,
u32 client_id, u32 *fs_id);
struct nm_config_file *nm_config_file_register_mgs(const struct lu_env *env,
struct dt_object *obj,
- struct local_oid_storage *los);
+ struct local_oid_storage *l);
struct dt_device;
struct nm_config_file *nm_config_file_register_tgt(const struct lu_env *env,
struct dt_device *dev,
- struct local_oid_storage *los);
+ struct local_oid_storage *l);
void nm_config_file_deregister_mgs(const struct lu_env *env,
struct nm_config_file *ncf);
void nm_config_file_deregister_tgt(const struct lu_env *env,
*
* \param[in,out] policy The policy being initialized
*/
- int (*op_policy_init) (struct ptlrpc_nrs_policy *policy);
+ int (*op_policy_init)(struct ptlrpc_nrs_policy *policy);
/**
* Called during policy unregistration; this operation is optional.
*
* \param[in,out] policy The policy being unregistered/finalized
*/
- void (*op_policy_fini) (struct ptlrpc_nrs_policy *policy);
+ void (*op_policy_fini)(struct ptlrpc_nrs_policy *policy);
/**
* Called when activating a policy via lprocfs; policies allocate and
* initialize their resources here; this operation is optional.
*
* \see nrs_policy_start_locked()
*/
- int (*op_policy_start) (struct ptlrpc_nrs_policy *policy,
- char *arg);
+ int (*op_policy_start)(struct ptlrpc_nrs_policy *policy,
+ char *arg);
/**
* Called when deactivating a policy via lprocfs; policies deallocate
* their resources here; this operation is optional
*
* \see nrs_policy_stop0()
*/
- void (*op_policy_stop) (struct ptlrpc_nrs_policy *policy);
+ void (*op_policy_stop)(struct ptlrpc_nrs_policy *policy);
/**
* Used for policy-specific operations; i.e. not generic ones like
* \e PTLRPC_NRS_CTL_START and \e PTLRPC_NRS_CTL_GET_INFO; analogous
*
* \see ptlrpc_nrs_policy_control()
*/
- int (*op_policy_ctl) (struct ptlrpc_nrs_policy *policy,
- enum ptlrpc_nrs_ctl opc, void *arg);
+ int (*op_policy_ctl)(struct ptlrpc_nrs_policy *policy,
+ enum ptlrpc_nrs_ctl opc, void *arg);
/**
* Called when obtaining references to the resources of the resource
* \see ptlrpc_nrs_hpreq_add_nolock()
* \see ptlrpc_nrs_req_hp_move()
*/
- int (*op_res_get) (struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq,
- const struct ptlrpc_nrs_resource *parent,
- struct ptlrpc_nrs_resource **resp,
- bool moving_req);
+ int (*op_res_get)(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq,
+ const struct ptlrpc_nrs_resource *parent,
+ struct ptlrpc_nrs_resource **resp,
+ bool moving_req);
/**
* Called when releasing references taken for resources in the resource
* hierarchy for the request; this operation is optional.
* \see ptlrpc_nrs_hpreq_add_nolock()
* \see ptlrpc_nrs_req_hp_move()
*/
- void (*op_res_put) (struct ptlrpc_nrs_policy *policy,
- const struct ptlrpc_nrs_resource *res);
+ void (*op_res_put)(struct ptlrpc_nrs_policy *policy,
+ const struct ptlrpc_nrs_resource *res);
/**
* Obtains a request for handling from the policy, and optionally
* \see ptlrpc_nrs_req_get_nolock()
*/
struct ptlrpc_nrs_request *
- (*op_req_get) (struct ptlrpc_nrs_policy *policy, bool peek,
- bool force);
+ (*op_req_get)(struct ptlrpc_nrs_policy *policy, bool peek,
+ bool force);
/**
* Called when attempting to add a request to a policy for later
* handling; this operation is mandatory.
*
* \see ptlrpc_nrs_req_add_nolock()
*/
- int (*op_req_enqueue) (struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
+ int (*op_req_enqueue)(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq);
/**
* Removes a request from the policy's set of pending requests. Normally
* called after a request has been polled successfully from the policy
*
* \see ptlrpc_nrs_req_del_nolock()
*/
- void (*op_req_dequeue) (struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
+ void (*op_req_dequeue)(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq);
/**
* Called after the request being carried out. Could be used for
* job/resource control; this operation is optional.
*
* \see ptlrpc_nrs_req_stop_nolock()
*/
- void (*op_req_stop) (struct ptlrpc_nrs_policy *policy,
- struct ptlrpc_nrs_request *nrq);
+ void (*op_req_stop)(struct ptlrpc_nrs_policy *policy,
+ struct ptlrpc_nrs_request *nrq);
/**
* Registers the policy's lprocfs interface with a PTLRPC service.
*
* \retval 0 success
* \retval != 0 error
*/
- int (*op_lprocfs_init) (struct ptlrpc_service *svc);
+ int (*op_lprocfs_init)(struct ptlrpc_service *svc);
/**
* Unegisters the policy's lprocfs interface with a PTLRPC service.
*
*
* \param[in] svc The service
*/
- void (*op_lprocfs_fini) (struct ptlrpc_service *svc);
+ void (*op_lprocfs_fini)(struct ptlrpc_service *svc);
};
/**
/**
* # policies on this NRS
*/
- unsigned nrs_num_pols;
+ unsigned int nrs_num_pols;
/**
* This NRS head is in progress of starting a policy
*/
/**
* Policy registration flags; a bitmast of \e nrs_policy_flags
*/
- unsigned nc_flags;
+ unsigned int nc_flags;
};
/**
/**
* Bitmask of \e nrs_policy_flags
*/
- unsigned pd_flags;
+ unsigned int pd_flags;
/**
* # of references on this descriptor
*/
/**
* Bitmask of nrs_policy_flags
*/
- unsigned pol_flags;
+ unsigned int pol_flags;
/**
* # RPCs enqueued for later dispatching by the policy
*/
* purpose of this object is to hold references to the request's resources
* for the lifetime of the request, and to hold properties that policies use
* use for determining the request's scheduling priority.
- * */
+ */
struct ptlrpc_nrs_request {
/**
* The request's resource hierarchy.
*
* \see nrs_request_enqueue()
*/
- unsigned nr_res_idx;
- unsigned nr_initialized:1;
- unsigned nr_enqueued:1;
- unsigned nr_started:1;
- unsigned nr_finalized:1;
+ unsigned int nr_res_idx;
+ unsigned int nr_initialized:1;
+ unsigned int nr_enqueued:1;
+ unsigned int nr_started:1;
+ unsigned int nr_finalized:1;
struct binheap_node nr_node;
/**
*/
enum oap_async_flags {
- ASYNC_READY = 0x1, /* ap_make_ready will not be called before
- * this page is added to an rpc */
+ /* ap_make_ready will not be called before page is added to an rpc */
+ ASYNC_READY = 0x1,
ASYNC_URGENT = 0x2, /* page must be put into RPC before return */
- ASYNC_COUNT_STABLE = 0x4, /* ap_refresh_count will not be called
- to give the caller a chance to update
- or cancel the size of the io */
+ /* ap_refresh_count will not be called to give the caller a chance to
+ * update or cancel the size of the io
+ */
+ ASYNC_COUNT_STABLE = 0x4,
ASYNC_HP = 0x8,
OAP_ASYNC_MAX,
OAP_ASYNC_BITS = 4
};
/* add explicit padding to keep fields aligned despite "packed",
- * which is needed to pack with following field in osc_page */
+ * which is needed to pack with following field in osc_page
+ */
#define OAP_PAD_BITS (16 - OBD_BRW_WRITE - OAP_ASYNC_BITS)
struct osc_async_page {
unsigned short oap_page_off /* :PAGE_SHIFT */;
unsigned long oi_lru_reserved;
/** active extents, we know how many bytes is going to be written,
- * so having an active extent will prevent it from being fragmented */
+ * so having an active extent will prevent it from being fragmented
+ */
struct osc_extent *oi_active;
/** partially truncated extent, we need to hold this extent to prevent
- * page writeback from happening. */
+ * page writeback from happening.
+ */
struct osc_extent *oi_trunc;
/** write osc_lock for this IO, used by osc_extent_find(). */
struct osc_lock *oi_write_osclock;
atomic_t oo_nr_reads;
atomic_t oo_nr_writes;
- /** Protect extent tree. Will be used to protect
- * oo_{read|write}_pages soon. */
+ /** Protect extent tree. used to protect oo_{read|write}_pages soon. */
spinlock_t oo_lock;
/**
/* osc_page.c */
int osc_page_init(const struct lu_env *env, struct cl_object *obj,
struct cl_page *page, pgoff_t ind);
-void osc_index2policy(union ldlm_policy_data *policy, const struct cl_object *obj,
- pgoff_t start, pgoff_t end);
+void osc_index2policy(union ldlm_policy_data *policy,
+ const struct cl_object *obj, pgoff_t start, pgoff_t end);
void osc_lru_add_batch(struct client_obd *cli, struct list_head *list);
void osc_page_submit(const struct lu_env *env, struct osc_page *opg,
enum cl_req_type crt, int brw_flags);
struct osc_object *osc, pgoff_t start, pgoff_t end,
osc_page_gang_cbt cb, void *cbdata);
bool osc_discard_cb(const struct lu_env *env, struct cl_io *io,
- void**, int, void *cbdata);
+ void **pvec, int count, void *cbdata);
/* osc_dev.c */
int osc_device_init(const struct lu_env *env, struct lu_device *d,
int osc_attr_get(const struct lu_env *env, struct cl_object *obj,
struct cl_attr *attr);
int osc_attr_update(const struct lu_env *env, struct cl_object *obj,
- const struct cl_attr *attr, unsigned valid);
+ const struct cl_attr *attr, unsigned int valid);
int osc_object_glimpse(const struct lu_env *env, const struct cl_object *obj,
struct ost_lvb *lvb);
int osc_object_invalidate(const struct lu_env *env, struct osc_object *osc);
int osc_ldlm_glimpse_ast(struct ldlm_lock *dlmlock, void *data);
unsigned long osc_ldlm_weigh_ast(struct ldlm_lock *dlmlock);
-/*****************************************************************************
- *
- * Accessors and type conversions.
- *
- */
+/* Accessors and type conversions. */
static inline struct osc_thread_info *osc_env_info(const struct lu_env *env)
{
struct osc_thread_info *info;
/** sync extent, queued by osc_queue_sync_pages() */
oe_sync:1,
/** set if this extent has partial, sync pages.
- * Extents with partial page(s) can't merge with others in RPC */
+ * Extents with partial page(s) can't merge with others in RPC
+ */
oe_no_merge:1,
oe_srvlock:1,
oe_memalloc:1,
/** an ACTIVE extent is going to be truncated, so when this extent
- * is released, it will turn into TRUNC state instead of CACHE. */
+ * is released, it will turn into TRUNC state instead of CACHE.
+ */
oe_trunc_pending:1,
/** this extent should be written asap and someone may wait for the
* write to finish. This bit is usually set along with urgent if
* the extent was CACHE state.
* fsync_wait extent can't be merged because new extent region may
- * exceed fsync range. */
+ * exceed fsync range.
+ */
oe_fsync_wait:1,
/** covering lock is being canceled */
oe_hp:1,
/** this extent should be written back asap. set if one of pages is
- * called by page WB daemon, or sync write or reading requests. */
+ * called by page WB daemon, or sync write or reading requests.
+ */
oe_urgent:1,
/** Non-delay RPC should be used for this extent. */
oe_ndelay:1,
/** direct IO pages */
oe_dio:1,
/** this extent consists of pages that are not directly accessible
- * from the CPU */
+ * from the CPU
+ */
oe_is_rdma_only:1;
/** how many grants allocated for this extent.
* Grant allocated for this extent. There is no grant allocated
- * for reading extents and sync write extents. */
+ * for reading extents and sync write extents.
+ */
unsigned int oe_grants;
/** # of dirty pages in this extent */
unsigned int oe_nr_pages;
struct list_head oe_pages;
/** start and end index of this extent, include start and end
* themselves. Page offset here is the page index of osc_pages.
- * oe_start is used as keyword for red-black tree. */
+ * oe_start is used as keyword for red-black tree.
+ */
pgoff_t oe_start;
pgoff_t oe_end;
/** maximum ending index of this extent, this is limited by
- * max_pages_per_rpc, lock extent and chunk size. */
+ * max_pages_per_rpc, lock extent and chunk size.
+ */
pgoff_t oe_max_end;
/** waitqueue - for those who want to be notified if this extent's
- * state has changed. */
+ * state has changed.
+ */
wait_queue_head_t oe_waitq;
/** lock covering this extent */
struct ldlm_lock *oe_dlmlock;
/** terminator of this extent. Must be true if this extent is in IO. */
struct task_struct *oe_owner;
/** return value of writeback. If somebody is waiting for this extent,
- * this value can be known by outside world. */
+ * this value can be known by outside world.
+ */
int oe_rc;
/** max pages per rpc when this extent was created */
unsigned int oe_mppr;
/* Gather all quota record type in an union that can be used to read any records
* from disk. All fields of these records must be 64-bit aligned, otherwise the
- * OSD layer may swab them incorrectly. */
+ * OSD layer may swab them incorrectly.
+ */
union lquota_rec {
struct lquota_glb_rec lqr_glb_rec;
struct lquota_slv_rec lqr_slv_rec;
/* Index features supported by the global index objects
* Only used for migration purpose and should be removed once on-disk migration
- * is no longer needed */
+ * is no longer needed
+ */
extern struct dt_index_features dt_quota_iusr_features;
extern struct dt_index_features dt_quota_busr_features;
extern struct dt_index_features dt_quota_igrp_features;
/* Name used in the configuration logs to identify the default metadata pool
* (composed of all the MDTs, with pool ID 0) and the default data pool (all
- * the OSTs, with pool ID 0 too). */
+ * the OSTs, with pool ID 0 too).
+ */
#define QUOTA_METAPOOL_NAME "mdt="
#define QUOTA_DATAPOOL_NAME "ost="
/* Request handlers for quota master operations.
* This is used by the MDT to pass quota/lock requests to the quota master
* target. This won't be needed any more once the QMT is a real target and
- * does not rely any more on the MDT service threads and namespace. */
+ * does not rely any more on the MDT service threads and namespace.
+ */
struct qmt_handlers {
/* Handle quotactl request from client. */
- int (*qmth_quotactl)(const struct lu_env *, struct lu_device *,
+ int (*qmth_quotactl)(const struct lu_env *env, struct lu_device *d,
struct obd_quotactl *);
/* Handle dqacq/dqrel request from slave. */
- int (*qmth_dqacq)(const struct lu_env *, struct lu_device *,
- struct ptlrpc_request *);
+ int (*qmth_dqacq)(const struct lu_env *env, struct lu_device *d,
+ struct ptlrpc_request *req);
/* LDLM intent policy associated with quota locks */
- int (*qmth_intent_policy)(const struct lu_env *, struct lu_device *,
- struct ptlrpc_request *, struct ldlm_lock **,
- int);
+ int (*qmth_intent_policy)(const struct lu_env *env, struct lu_device *d,
+ struct ptlrpc_request *req,
+ struct ldlm_lock **lock, int i);
/* Initialize LVB of ldlm resource associated with quota objects */
- int (*qmth_lvbo_init)(struct lu_device *, struct ldlm_resource *);
+ int (*qmth_lvbo_init)(struct lu_device *d, struct ldlm_resource *res);
/* Update LVB of ldlm resource associated with quota objects */
- int (*qmth_lvbo_update)(struct lu_device *, struct ldlm_resource *,
- struct ptlrpc_request *, int);
+ int (*qmth_lvbo_update)(struct lu_device *d, struct ldlm_resource *res,
+ struct ptlrpc_request *req, int i);
/* Return size of LVB to be packed in ldlm message */
- int (*qmth_lvbo_size)(struct lu_device *, struct ldlm_lock *);
+ int (*qmth_lvbo_size)(struct lu_device *d, struct ldlm_lock *lock);
/* Fill request buffer with lvb */
- int (*qmth_lvbo_fill)(struct lu_device *, struct ldlm_lock *, void *,
- int);
+ int (*qmth_lvbo_fill)(struct lu_device *d, struct ldlm_lock *lock,
+ void *lvb, int lvblen);
/* Free lvb associated with ldlm resource */
- int (*qmth_lvbo_free)(struct lu_device *, struct ldlm_resource *);
+ int (*qmth_lvbo_free)(struct lu_device *d, struct ldlm_resource *res);
};
/* actual handlers are defined in lustre/quota/qmt_handler.c */
* - qsd_op_adjust(): triggers pre-acquire/release if necessary.
*
* Below are the function prototypes to be used by OSD layer to manage quota
- * enforcement. Arguments are documented where each function is defined. */
+ * enforcement. Arguments are documented where each function is defined.
+ */
/* flags for quota local enforcement */
enum osd_quota_local_flags {
QUOTA_FL_ROOT_PRJQUOTA = BIT(4),
};
-struct qsd_instance *qsd_init(const struct lu_env *, char *, struct dt_device *,
- struct proc_dir_entry *, bool is_md, bool excl);
-int qsd_prepare(const struct lu_env *, struct qsd_instance *);
-int qsd_start(const struct lu_env *, struct qsd_instance *);
-void qsd_fini(const struct lu_env *, struct qsd_instance *);
-int qsd_op_begin(const struct lu_env *, struct qsd_instance *,
- struct lquota_trans *, struct lquota_id_info *,
- enum osd_quota_local_flags *);
-void qsd_op_end(const struct lu_env *, struct qsd_instance *,
- struct lquota_trans *);
-void qsd_op_adjust(const struct lu_env *, struct qsd_instance *,
- union lquota_id *, int);
+struct qsd_instance *qsd_init(const struct lu_env *env, char *svnname,
+ struct dt_device *d, struct proc_dir_entry *proc,
+ bool is_md, bool excl);
+int qsd_prepare(const struct lu_env *env, struct qsd_instance *qsd);
+int qsd_start(const struct lu_env *env, struct qsd_instance *qsd);
+void qsd_fini(const struct lu_env *env, struct qsd_instance *qsd);
+int qsd_op_begin(const struct lu_env *env, struct qsd_instance *qsd,
+ struct lquota_trans *trans, struct lquota_id_info *lqi,
+ enum osd_quota_local_flags *flags);
+void qsd_op_end(const struct lu_env *env, struct qsd_instance *qsd,
+ struct lquota_trans *trans);
+void qsd_op_adjust(const struct lu_env *env, struct qsd_instance *qsd,
+ union lquota_id *id, int i);
int qsd_transfer(const struct lu_env *env, struct qsd_instance *qsd,
struct lquota_trans *trans, unsigned int qtype,
u64 orig_id, u64 new_id, u64 bspace,
union lquota_id lqi_id;
/* USRQUOTA or GRPQUOTA for now, could be expanded for
- * directory quota or other types later. */
+ * directory quota or other types later.
+ */
int lqi_type;
/* inodes or kbytes to be consumed or released, it could
- * be negative when releasing space. */
+ * be negative when releasing space.
+ */
long long lqi_space;
/* quota slave entry structure associated with this ID */
* original uid and gid, new uid and gid.
*
* Given a parent dir and a sub dir, with different uid, gid and project id,
- * need <parent,child> x <user,group,project> x <block,inode> = 12 ids */
+ * need <parent,child> x <user,group,project> x <block,inode> = 12 ids
+ */
#define QUOTA_MAX_TRANSIDS 12
/* all qids involved in a single transaction */
res->lr_name.name[LUSTRE_RES_ID_SEQ_OFF] == FID_SEQ_QUOTA_GLB)
/* helper function used by MDT & OFD to retrieve quota accounting information
- * on slave */
-int lquotactl_slv(const struct lu_env *, struct dt_device *,
- struct obd_quotactl *);
+ * on slave
+ */
+int lquotactl_slv(const struct lu_env *env, struct dt_device *dt,
+ struct obd_quotactl *obdq);
+
/** @} quota */
#endif /* _LUSTRE_QUOTA_H */
* OI inconsistency especailly when OI scrub just done recently.
*
* The 'auto_scrub' defines the time (united as second) interval to
- * enable auto detect OI inconsistency since last OI scurb done. */
+ * enable auto detect OI inconsistency since last OI scurb done.
+ */
enum auto_scrub {
/* Disable auto scrub. */
AS_NEVER = 0,
/* 1 second is too short interval, it is almost equal to always auto
- * detect inconsistent OI, usually used for test. */
+ * detect inconsistent OI, usually used for test.
+ */
AS_ALWAYS = 1,
/* Enable auto detect OI inconsistency one month (60 * 60 * 24 * 30)
- * after last OI scrub. */
+ * after last OI scrub.
+ */
AS_DEFAULT = 2592000LL,
};
struct list_head os_stale_items;
/* write lock for scrub prep/update/post/checkpoint,
- * read lock for scrub dump. */
+ * read lock for scrub dump.
+ */
struct rw_semaphore os_rwsem;
spinlock_t os_lock;
* all updates must be protected by ->os_lock to avoid
* racing read-modify-write cycles causing corruption.
*/
- unsigned int os_in_prior:1, /* process inconsistent item
- * found by RPC prior */
+ /* process inconsistent item found by RPC prior */
+ unsigned int os_in_prior:1,
os_waiting:1, /* Waiting for scan window. */
os_full_speed:1, /* run w/o speed limit */
os_paused:1, /* The scrub is paused. */
/* {top,sub}_thandle are used to manage distributed transactions which
* include updates on several nodes. A top_handle represents the
- * whole operation, and sub_thandle represents updates on each node. */
+ * whole operation, and sub_thandle represents updates on each node.
+ */
struct top_thandle {
struct thandle tt_super;
/* The master sub transaction. */
struct list_head stc_list;
};
-/* Sub thandle is used to track multiple sub thandles under one parent
- * thandle */
+/* Sub thandle used to track multiple sub thandles under one parent thandle */
struct sub_thandle {
struct thandle *st_sub_th;
struct dt_device *st_dt;