* GPL HEADER END
*/
/*
- * Copyright 2008 Sun Microsystems, Inc. All rights reserved
+ * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Use is subject to license terms.
*/
/*
#ifndef _LUSTRE_QUOTA_H
#define _LUSTRE_QUOTA_H
+/** \defgroup quota quota
+ *
+ * @{
+ */
+
#if defined(__linux__)
#include <linux/lustre_quota.h>
#elif defined(__APPLE__)
#error Unsupported operating system.
#endif
-#include <lustre/lustre_idl.h>
#include <lustre_net.h>
+#include <lustre/lustre_idl.h>
#include <lvfs.h>
#include <obd_support.h>
-#include <class_hash.h>
struct obd_device;
struct client_obd;
#define NR_DQHASH 45
#endif
+#ifndef QUOTABLOCK_BITS
+#define QUOTABLOCK_BITS 10
+#endif
+
+#ifndef QUOTABLOCK_SIZE
+#define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS)
+#endif
+
+#ifndef toqb
+#define toqb(x) (((x) + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS)
+#endif
+
#ifdef HAVE_QUOTA_SUPPORT
+#ifndef MAX_IQ_TIME
+#define MAX_IQ_TIME 604800 /* (7*24*60*60) 1 week */
+#endif
+
+#ifndef MAX_DQ_TIME
+#define MAX_DQ_TIME 604800 /* (7*24*60*60) 1 week */
+#endif
+
#ifdef __KERNEL__
#ifdef LPROCFS
lustre_quota_version_t qi_version;
};
-#define DQ_STATUS_AVAIL 0x0 /* Available dquot */
-#define DQ_STATUS_SET 0x01 /* Sombody is setting dquot */
-#define DQ_STATUS_RECOVERY 0x02 /* dquot is in recovery */
-
struct lustre_mem_dqblk {
__u64 dqb_bhardlimit; /**< absolute limit on disk blks alloc */
__u64 dqb_bsoftlimit; /**< preferred limit on disk blks */
struct lustre_dquot {
/** Hash list in memory, protect by dquot_hash_lock */
- struct list_head dq_hash;
+ cfs_list_t dq_hash;
/** Protect the data in lustre_dquot */
- struct semaphore dq_sem;
+ cfs_semaphore_t dq_sem;
/** Use count */
- int dq_refcnt;
+ cfs_atomic_t dq_refcnt;
/** Pointer of quota info it belongs to */
struct lustre_quota_info *dq_info;
/** Offset of dquot on disk */
unsigned int dq_id;
/** Type fo quota (USRQUOTA, GRPQUOUTA) */
int dq_type;
- /** See DQ_STATUS_ */
- unsigned short dq_status;
/** See DQ_ in quota.h */
unsigned long dq_flags;
/** Diskquota usage */
};
struct dquot_id {
- struct list_head di_link;
+ cfs_list_t di_link;
__u32 di_id;
+ __u32 di_flag;
};
+/* set inode quota limitation on a quota uid/gid */
+#define QI_SET (1 << 30)
+/* set block quota limitation on a quota uid/gid */
+#define QB_SET (1 << 31)
#define QFILE_CHK 1
#define QFILE_RD_INFO 2
int lustre_commit_dquot(struct lustre_dquot *dquot);
int lustre_init_quota_info(struct lustre_quota_info *lqi, int type);
int lustre_get_qids(struct file *file, struct inode *inode, int type,
- struct list_head *list);
+ cfs_list_t *list);
int lustre_quota_convert(struct lustre_quota_info *lqi, int type);
-#define LL_DQUOT_OFF(sb) DQUOT_OFF(sb)
-
typedef int (*dqacq_handler_t) (struct obd_device * obd, struct qunit_data * qd,
int opc);
/** See comment of lqc_itune_sz */
unsigned long lqc_btune_sz;
/** all lustre_qunit_size structures */
- struct lustre_hash *lqc_lqs_hash;
+ cfs_hash_t *lqc_lqs_hash;
/** @{ */
/**
*/
int lqc_sync_blk;
/** guard lqc_imp_valid now */
- spinlock_t lqc_lock;
+ cfs_spinlock_t lqc_lock;
/**
* when mds isn't connected, threads
* on osts who send the quota reqs
struct proc_dir_entry *lqc_proc_dir;
/** lquota statistics */
struct lprocfs_stats *lqc_stats;
+ /** the number of used hashed lqs */
+ cfs_atomic_t lqc_lqs;
+ /** no lqs are in use */
+ cfs_waitq_t lqc_lqs_waitq;
};
#define QUOTA_MASTER_READY(qctxt) (qctxt)->lqc_setup = 1
#define QUOTA_MASTER_UNREADY(qctxt) (qctxt)->lqc_setup = 0
struct lustre_qunit_size {
- struct hlist_node lqs_hash; /** the hash entry */
+ cfs_hlist_node_t lqs_hash; /** the hash entry */
unsigned int lqs_id; /** id of user/group */
- unsigned long lqs_flags; /** is user/group; FULLBUF or LESSBUF */
+ unsigned long lqs_flags; /** 31st bit is QB_SET, 30th bit is QI_SET
+ * other bits are same as LQUOTA_FLAGS_*
+ */
unsigned long lqs_iunit_sz; /** Unit size of file quota currently */
/**
* Trigger dqacq when available file quota
long long lqs_ino_rec;
/** when blocks are allocated/released, this value will record it */
long long lqs_blk_rec;
- atomic_t lqs_refcount;
+ cfs_atomic_t lqs_refcount;
cfs_time_t lqs_last_bshrink; /** time of last block shrink */
cfs_time_t lqs_last_ishrink; /** time of last inode shrink */
- spinlock_t lqs_lock;
- struct quota_adjust_qunit lqs_key; /** hash key */
+ cfs_spinlock_t lqs_lock;
+ unsigned long long lqs_key; /** hash key */
struct lustre_quota_ctxt *lqs_ctxt; /** quota ctxt */
};
-#define LQS_IS_GRP(lqs) ((lqs)->lqs_flags & LQUOTA_FLAGS_GRP)
-#define LQS_IS_ADJBLK(lqs) ((lqs)->lqs_flags & LQUOTA_FLAGS_ADJBLK)
-#define LQS_IS_ADJINO(lqs) ((lqs)->lqs_flags & LQUOTA_FLAGS_ADJINO)
+#define LQS_IS_GRP(lqs) ((lqs)->lqs_flags & LQUOTA_FLAGS_GRP)
+#define LQS_IS_ADJBLK(lqs) ((lqs)->lqs_flags & LQUOTA_FLAGS_ADJBLK)
+#define LQS_IS_ADJINO(lqs) ((lqs)->lqs_flags & LQUOTA_FLAGS_ADJINO)
+#define LQS_IS_RECOVERY(lqs) ((lqs)->lqs_flags & LQUOTA_FLAGS_RECOVERY)
+#define LQS_IS_SETQUOTA(lqs) ((lqs)->lqs_flags & LQUOTA_FLAGS_SETQUOTA)
+
+#define LQS_SET_GRP(lqs) ((lqs)->lqs_flags |= LQUOTA_FLAGS_GRP)
+#define LQS_SET_ADJBLK(lqs) ((lqs)->lqs_flags |= LQUOTA_FLAGS_ADJBLK)
+#define LQS_SET_ADJINO(lqs) ((lqs)->lqs_flags |= LQUOTA_FLAGS_ADJINO)
+#define LQS_SET_RECOVERY(lqs) ((lqs)->lqs_flags |= LQUOTA_FLAGS_RECOVERY)
+#define LQS_SET_SETQUOTA(lqs) ((lqs)->lqs_flags |= LQUOTA_FLAGS_SETQUOTA)
-#define LQS_SET_GRP(lqs) ((lqs)->lqs_flags |= LQUOTA_FLAGS_GRP)
-#define LQS_SET_ADJBLK(lqs) ((lqs)->lqs_flags |= LQUOTA_FLAGS_ADJBLK)
-#define LQS_SET_ADJINO(lqs) ((lqs)->lqs_flags |= LQUOTA_FLAGS_ADJINO)
+#define LQS_CLEAR_RECOVERY(lqs) ((lqs)->lqs_flags &= ~LQUOTA_FLAGS_RECOVERY)
+#define LQS_CLEAR_SETQUOTA(lqs) ((lqs)->lqs_flags &= ~LQUOTA_FLAGS_SETQUOTA)
+
+/* In the hash for lustre_qunit_size, the key is decided by
+ * grp_or_usr and uid/gid, in here, I combine these two values,
+ * which will make comparing easier and more efficient */
+#define LQS_KEY(is_grp, id) ((is_grp ? 1ULL << 32: 0) + id)
+#define LQS_KEY_ID(key) (key & 0xffffffff)
+#define LQS_KEY_GRP(key) (key >> 32)
static inline void lqs_getref(struct lustre_qunit_size *lqs)
{
- atomic_inc(&lqs->lqs_refcount);
- CDEBUG(D_QUOTA, "lqs=%p refcount %d\n",
- lqs, atomic_read(&lqs->lqs_refcount));
+ int count = cfs_atomic_inc_return(&lqs->lqs_refcount);
+
+ CDEBUG(D_INFO, "lqs=%p refcount %d\n", lqs, count);
}
static inline void lqs_putref(struct lustre_qunit_size *lqs)
{
- LASSERT(atomic_read(&lqs->lqs_refcount) > 0);
+ int count = cfs_atomic_read(&lqs->lqs_refcount);
- /* killing last ref, let's let hash table kill it */
- if (atomic_read(&lqs->lqs_refcount) == 1) {
- lustre_hash_del(lqs->lqs_ctxt->lqc_lqs_hash,
- &lqs->lqs_key, &lqs->lqs_hash);
- OBD_FREE_PTR(lqs);
- } else {
- atomic_dec(&lqs->lqs_refcount);
- CDEBUG(D_QUOTA, "lqs=%p refcount %d\n",
- lqs, atomic_read(&lqs->lqs_refcount));
+ LASSERT(count > 0);
+ CDEBUG(D_INFO, "lqs=%p refcount %d\n", lqs, count - 1);
+ if (cfs_atomic_dec_and_test(&lqs->lqs_refcount)) {
+ if (cfs_atomic_dec_and_test(&lqs->lqs_ctxt->lqc_lqs))
+ cfs_waitq_signal(&lqs->lqs_ctxt->lqc_lqs_waitq);
+ OBD_FREE_PTR(lqs);
}
}
-static inline void lqs_initref(struct lustre_qunit_size *lqs)
-{
- atomic_set(&lqs->lqs_refcount, 0);
-}
-
#else
struct lustre_quota_info {
struct obd_device *qta_obd; /** obd device */
struct obd_quotactl qta_oqctl; /** obd_quotactl args */
struct super_block *qta_sb; /** obd super block */
- atomic_t *qta_sem; /** obt_quotachecking */
+ cfs_semaphore_t *qta_sem; /** obt_quotachecking */
};
struct obd_trans_info;
-typedef int (*quota_acquire)(struct obd_device *obd, unsigned int uid,
- unsigned int gid, struct obd_trans_info *oti,
- int isblk);
+typedef int (*quota_acquire)(struct obd_device *obd, const unsigned int id[],
+ struct obd_trans_info *oti, int isblk);
typedef struct {
int (*quota_init) (void);
/**
* For quota master/slave, adjust quota limit after fs operation
*/
- int (*quota_adjust) (struct obd_device *, unsigned int[],
- unsigned int[], int, int);
+ int (*quota_adjust) (struct obd_device *, const unsigned int[],
+ const unsigned int[], int, int);
/**
* For quota slave, set import, trigger quota recovery,
/**
* For quota slave, acquire/release quota from master if needed
*/
- int (*quota_acquire) (struct obd_device *, unsigned int, unsigned int,
+ int (*quota_acquire) (struct obd_device *, const unsigned int [],
struct obd_trans_info *, int);
/**
* can finish a block_write or inode_create rpc. It updates the pending
* record of block and inode, acquires quota if necessary
*/
- int (*quota_chkquota) (struct obd_device *, unsigned int, unsigned int,
- int, int *, quota_acquire,
- struct obd_trans_info *, int, struct inode *,
- int);
+ int (*quota_chkquota) (struct obd_device *, struct obd_export *,
+ const unsigned int [], int [],
+ int, quota_acquire, struct obd_trans_info *,
+ int, struct inode *, int);
/**
* For quota client, the actions after the pending write is committed
*/
- int (*quota_pending_commit) (struct obd_device *, unsigned int,
- unsigned int, int, int);
+ int (*quota_pending_commit) (struct obd_device *, const unsigned int [],
+ int [], int);
#endif
/**
/**
* For quota client, check whether specified uid/gid is over quota
*/
- int (*quota_chkdq) (struct client_obd *, unsigned int, unsigned int);
+ int (*quota_chkdq) (struct client_obd *, const unsigned int []);
/**
* For quota client, set over quota flag for specifed uid/gid
*/
- int (*quota_setdq) (struct client_obd *, unsigned int, unsigned int,
+ int (*quota_setdq) (struct client_obd *, const unsigned int [],
obd_flag, obd_flag);
/**
*/
int (*quota_adjust_qunit) (struct obd_export *exp,
struct quota_adjust_qunit *oqaq,
- struct lustre_quota_ctxt *qctxt);
+ struct lustre_quota_ctxt *qctxt,
+ struct ptlrpc_request_set *rqset);
} quota_interface_t;
static inline int lquota_adjust(quota_interface_t *interface,
struct obd_device *obd,
- unsigned int qcids[],
- unsigned int qpids[],
+ const unsigned int qcids[],
+ const unsigned int qpids[],
int rc, int opc)
{
int ret;
}
static inline int lquota_chkdq(quota_interface_t *interface,
- struct client_obd *cli,
- unsigned int uid, unsigned int gid)
+ struct client_obd *cli, const unsigned int qid[])
{
int rc;
ENTRY;
QUOTA_CHECK_OP(interface, chkdq);
- rc = QUOTA_OP(interface, chkdq)(cli, uid, gid);
+ rc = QUOTA_OP(interface, chkdq)(cli, qid);
RETURN(rc);
}
static inline int lquota_setdq(quota_interface_t *interface,
- struct client_obd *cli,
- unsigned int uid, unsigned int gid,
+ struct client_obd *cli, const unsigned int qid[],
obd_flag valid, obd_flag flags)
{
int rc;
ENTRY;
QUOTA_CHECK_OP(interface, setdq);
- rc = QUOTA_OP(interface, setdq)(cli, uid, gid, valid, flags);
+ rc = QUOTA_OP(interface, setdq)(cli, qid, valid, flags);
RETURN(rc);
}
#ifdef __KERNEL__
static inline int lquota_chkquota(quota_interface_t *interface,
struct obd_device *obd,
- unsigned int uid, unsigned int gid, int count,
- int *flag, struct obd_trans_info *oti,
+ struct obd_export *exp,
+ const unsigned int id[], int pending[],
+ int count, struct obd_trans_info *oti,
int isblk, void *data, int frags)
{
int rc;
QUOTA_CHECK_OP(interface, chkquota);
QUOTA_CHECK_OP(interface, acquire);
- rc = QUOTA_OP(interface, chkquota)(obd, uid, gid, count, flag,
+ rc = QUOTA_OP(interface, chkquota)(obd, exp, id, pending, count,
QUOTA_OP(interface, acquire), oti,
isblk, (struct inode *)data, frags);
RETURN(rc);
static inline int lquota_pending_commit(quota_interface_t *interface,
struct obd_device *obd,
- unsigned int uid, unsigned int gid,
- int pending, int isblk)
+ const unsigned int id[],
+ int pending[], int isblk)
{
int rc;
ENTRY;
QUOTA_CHECK_OP(interface, pending_commit);
- rc = QUOTA_OP(interface, pending_commit)(obd, uid, gid, pending, isblk);
+ rc = QUOTA_OP(interface, pending_commit)(obd, id, pending, isblk);
RETURN(rc);
}
#endif
"admin_quotafile_v2.grp" /** group admin quotafile */\
}
+/*
+ * Definitions of structures for vfsv0 quota format
+ * Source linux/fs/quota/quotaio_v2.h
+ *
+ * The following definitions are normally found in private kernel headers.
+ * However, some sites build Lustre against kernel development headers rather
+ * than than full kernel source, so we provide them here for compatibility.
+ */
+#ifdef __KERNEL__
+# if !defined(HAVE_QUOTAIO_H) && !defined(HAVE_FS_QUOTA_QUOTAIO_H) && \
+ !defined(HAVE_FS_QUOTAIO_H)
+
+#include <linux/types.h>
+#include <linux/quota.h>
+
+#define V2_INITQMAGICS {\
+ 0xd9c01f11, /* USRQUOTA */\
+ 0xd9c01927 /* GRPQUOTA */\
+}
+
+/* Header with type and version specific information */
+struct v2_disk_dqinfo {
+ __le32 dqi_bgrace; /* Time before block soft limit becomes hard limit */
+ __le32 dqi_igrace; /* Time before inode soft limit becomes hard limit */
+ __le32 dqi_flags; /* Flags for quotafile (DQF_*) */
+ __le32 dqi_blocks; /* Number of blocks in file */
+ __le32 dqi_free_blk; /* Number of first free block in the list */
+ __le32 dqi_free_entry; /* Number of block with at least one free entry */
+};
+
+/* First generic header */
+struct v2_disk_dqheader {
+ __le32 dqh_magic; /* Magic number identifying file */
+ __le32 dqh_version; /* File version */
+};
+#define V2_DQINFOOFF sizeof(struct v2_disk_dqheader) /* Offset of info header in file */
+#define QT_TREEOFF 1 /* Offset of tree in file in blocks */
+#define V2_DQTREEOFF QT_TREEOFF
+
+# endif /* !defined(HAVE_QUOTAIO_V1_H) ... */
+#endif /* __KERNEL__ */
+
+/** @} quota */
+
#endif /* _LUSTRE_QUOTA_H */