#ifndef _LUSTRE_QUOTA_H
#define _LUSTRE_QUOTA_H
+/** \defgroup quota quota
+ *
+ * @{
+ */
+
#if defined(__linux__)
#include <linux/lustre_quota.h>
#elif defined(__APPLE__)
#include <lustre/lustre_idl.h>
#include <lvfs.h>
#include <obd_support.h>
-#include <class_hash.h>
struct obd_device;
struct client_obd;
struct lustre_dquot {
/** Hash list in memory, protect by dquot_hash_lock */
- struct list_head dq_hash;
+ cfs_list_t dq_hash;
/** Protect the data in lustre_dquot */
- struct semaphore dq_sem;
+ cfs_semaphore_t dq_sem;
/** Use count */
int dq_refcnt;
/** Pointer of quota info it belongs to */
};
struct dquot_id {
- struct list_head di_link;
+ cfs_list_t di_link;
__u32 di_id;
__u32 di_flag;
};
int lustre_commit_dquot(struct lustre_dquot *dquot);
int lustre_init_quota_info(struct lustre_quota_info *lqi, int type);
int lustre_get_qids(struct file *file, struct inode *inode, int type,
- struct list_head *list);
+ cfs_list_t *list);
int lustre_quota_convert(struct lustre_quota_info *lqi, int type);
typedef int (*dqacq_handler_t) (struct obd_device * obd, struct qunit_data * qd,
* 0:Off, 1:On
*/
lqc_valid:1, /** this qctxt is valid or not */
- lqc_setup:1, /**
+ lqc_setup:1; /**
* tell whether of not quota_type has
* been processed, so that the master
* knows when it can start processing
* incoming acq/rel quota requests
*/
- lqc_immutable:1; /**
- * cannot be turned on/off on-fly;
- * temporary used by SOM.
- */
/** }@ */
/**
* original unit size of file quota and
/** See comment of lqc_itune_sz */
unsigned long lqc_btune_sz;
/** all lustre_qunit_size structures */
- struct lustre_hash *lqc_lqs_hash;
+ cfs_hash_t *lqc_lqs_hash;
/** @{ */
/**
*/
int lqc_sync_blk;
/** guard lqc_imp_valid now */
- spinlock_t lqc_lock;
+ cfs_spinlock_t lqc_lock;
/**
* when mds isn't connected, threads
* on osts who send the quota reqs
/** lquota statistics */
struct lprocfs_stats *lqc_stats;
/** the number of used hashed lqs */
- atomic_t lqc_lqs;
+ cfs_atomic_t lqc_lqs;
/** no lqs are in use */
cfs_waitq_t lqc_lqs_waitq;
};
#define QUOTA_MASTER_UNREADY(qctxt) (qctxt)->lqc_setup = 0
struct lustre_qunit_size {
- struct hlist_node lqs_hash; /** the hash entry */
+ cfs_hlist_node_t lqs_hash; /** the hash entry */
unsigned int lqs_id; /** id of user/group */
unsigned long lqs_flags; /** 31st bit is QB_SET, 30th bit is QI_SET
* other bits are same as LQUOTA_FLAGS_*
long long lqs_ino_rec;
/** when blocks are allocated/released, this value will record it */
long long lqs_blk_rec;
- atomic_t lqs_refcount;
+ cfs_atomic_t lqs_refcount;
cfs_time_t lqs_last_bshrink; /** time of last block shrink */
cfs_time_t lqs_last_ishrink; /** time of last inode shrink */
- spinlock_t lqs_lock;
+ cfs_spinlock_t lqs_lock;
unsigned long long lqs_key; /** hash key */
struct lustre_quota_ctxt *lqs_ctxt; /** quota ctxt */
};
static inline void __lqs_getref(struct lustre_qunit_size *lqs)
{
- int count = atomic_inc_return(&lqs->lqs_refcount);
+ int count = cfs_atomic_inc_return(&lqs->lqs_refcount);
if (count == 2) /* quota_create_lqs */
- atomic_inc(&lqs->lqs_ctxt->lqc_lqs);
- CDEBUG(D_QUOTA, "lqs=%p refcount %d\n", lqs, count);
+ cfs_atomic_inc(&lqs->lqs_ctxt->lqc_lqs);
+ CDEBUG(D_INFO, "lqs=%p refcount %d\n", lqs, count);
}
static inline void lqs_getref(struct lustre_qunit_size *lqs)
static inline void __lqs_putref(struct lustre_qunit_size *lqs)
{
- LASSERT(atomic_read(&lqs->lqs_refcount) > 0);
+ LASSERT(cfs_atomic_read(&lqs->lqs_refcount) > 0);
- if (atomic_dec_return(&lqs->lqs_refcount) == 1)
- if (atomic_dec_and_test(&lqs->lqs_ctxt->lqc_lqs))
+ if (cfs_atomic_dec_return(&lqs->lqs_refcount) == 1)
+ if (cfs_atomic_dec_and_test(&lqs->lqs_ctxt->lqc_lqs))
cfs_waitq_signal(&lqs->lqs_ctxt->lqc_lqs_waitq);
- CDEBUG(D_QUOTA, "lqs=%p refcount %d\n",
- lqs, atomic_read(&lqs->lqs_refcount));
+ CDEBUG(D_INFO, "lqs=%p refcount %d\n",
+ lqs, cfs_atomic_read(&lqs->lqs_refcount));
}
static inline void lqs_putref(struct lustre_qunit_size *lqs)
static inline void lqs_initref(struct lustre_qunit_size *lqs)
{
- atomic_set(&lqs->lqs_refcount, 0);
+ cfs_atomic_set(&lqs->lqs_refcount, 0);
}
#else
struct obd_device *qta_obd; /** obd device */
struct obd_quotactl qta_oqctl; /** obd_quotactl args */
struct super_block *qta_sb; /** obd super block */
- struct semaphore *qta_sem; /** obt_quotachecking */
+ cfs_semaphore_t *qta_sem; /** obt_quotachecking */
};
struct obd_trans_info;
* can finish a block_write or inode_create rpc. It updates the pending
* record of block and inode, acquires quota if necessary
*/
- int (*quota_chkquota) (struct obd_device *, const unsigned int [],
- int [], int, quota_acquire,
- struct obd_trans_info *, int, struct inode *,
- int);
+ int (*quota_chkquota) (struct obd_device *, struct obd_export *,
+ const unsigned int [], int [],
+ int, quota_acquire, struct obd_trans_info *,
+ int, struct inode *, int);
/**
* For quota client, the actions after the pending write is committed
#ifdef __KERNEL__
static inline int lquota_chkquota(quota_interface_t *interface,
struct obd_device *obd,
+ struct obd_export *exp,
const unsigned int id[], int pending[],
int count, struct obd_trans_info *oti,
int isblk, void *data, int frags)
QUOTA_CHECK_OP(interface, chkquota);
QUOTA_CHECK_OP(interface, acquire);
- rc = QUOTA_OP(interface, chkquota)(obd, id, pending, count,
+ rc = QUOTA_OP(interface, chkquota)(obd, exp, id, pending, count,
QUOTA_OP(interface, acquire), oti,
isblk, (struct inode *)data, frags);
RETURN(rc);
"admin_quotafile_v2.grp" /** group admin quotafile */\
}
+/** @} quota */
+
#endif /* _LUSTRE_QUOTA_H */