Whamcloud - gitweb
LU-7261 ldiskfs: fix large_xattr overwrite
[fs/lustre-release.git] / lustre / quota / lquota_entry.c
index cff6b6d..73c3d05 100644 (file)
  * Author: Niu    Yawei    <yawei.niu@intel.com>
  */
 
-#ifndef EXPORT_SYMTAB
-# define EXPORT_SYMTAB
-#endif
-
 #define DEBUG_SUBSYSTEM S_LQUOTA
 
 #include <linux/module.h>
@@ -42,52 +38,53 @@ static int hash_lqs_cur_bits = HASH_LQE_CUR_BITS;
 CFS_MODULE_PARM(hash_lqs_cur_bits, "i", int, 0444,
                 "the current bits of lqe hash");
 
-static unsigned lqe64_hash_hash(cfs_hash_t *hs, const void *key, unsigned mask)
+static unsigned
+lqe64_hash_hash(struct cfs_hash *hs, const void *key, unsigned mask)
 {
        return cfs_hash_u64_hash(*((__u64 *)key), mask);
 }
 
-static void *lqe64_hash_key(cfs_hlist_node_t *hnode)
+static void *lqe64_hash_key(struct hlist_node *hnode)
 {
        struct lquota_entry *lqe;
-       lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+       lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
        return &lqe->lqe_id.qid_uid;
 }
 
-static int lqe64_hash_keycmp(const void *key, cfs_hlist_node_t *hnode)
+static int lqe64_hash_keycmp(const void *key, struct hlist_node *hnode)
 {
        struct lquota_entry *lqe;
-       lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+       lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
        return (lqe->lqe_id.qid_uid == *((__u64*)key));
 }
 
-static void *lqe_hash_object(cfs_hlist_node_t *hnode)
+static void *lqe_hash_object(struct hlist_node *hnode)
 {
-       return cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+       return hlist_entry(hnode, struct lquota_entry, lqe_hash);
 }
 
-static void lqe_hash_get(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void lqe_hash_get(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct lquota_entry *lqe;
-       lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+       lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
        lqe_getref(lqe);
 }
 
-static void lqe_hash_put_locked(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void lqe_hash_put_locked(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        struct lquota_entry *lqe;
-       lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+       lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
        lqe_putref(lqe);
 }
 
-static void lqe_hash_exit(cfs_hash_t *hs, cfs_hlist_node_t *hnode)
+static void lqe_hash_exit(struct cfs_hash *hs, struct hlist_node *hnode)
 {
        CERROR("Should not have any item left!\n");
 }
 
 /* lqe hash methods for 64-bit uid/gid, new hash functions would have to be
  * defined for per-directory quota relying on a 128-bit FID */
-static cfs_hash_ops_t lqe64_hash_ops = {
+static struct cfs_hash_ops lqe64_hash_ops = {
        .hs_hash       = lqe64_hash_hash,
        .hs_key        = lqe64_hash_key,
        .hs_keycmp     = lqe64_hash_keycmp,
@@ -118,13 +115,13 @@ struct lqe_iter_data {
        bool            lid_free_all;
 };
 
-static int lqe_iter_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
-                      cfs_hlist_node_t *hnode, void *data)
+static int lqe_iter_cb(struct cfs_hash *hs, struct cfs_hash_bd *bd,
+                      struct hlist_node *hnode, void *data)
 {
        struct lqe_iter_data *d = (struct lqe_iter_data *)data;
        struct lquota_entry  *lqe;
 
-       lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
+       lqe = hlist_entry(hnode, struct lquota_entry, lqe_hash);
        LASSERT(atomic_read(&lqe->lqe_ref) > 0);
 
        /* Only one reference held by hash table, and nobody else can
@@ -155,7 +152,7 @@ static int lqe_iter_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
  * \param free_all - free all entries or only free the entries
  *                   without quota enforce ?
  */
-static void lqe_cleanup(cfs_hash_t *hash, bool free_all)
+static void lqe_cleanup(struct cfs_hash *hash, bool free_all)
 {
        struct lqe_iter_data    d;
        int                     repeat = 0;
@@ -176,8 +173,8 @@ retry:
                        "freed:%lu, repeat:%u\n", hash,
                        d.lid_inuse, d.lid_freed, repeat);
                repeat++;
-               cfs_schedule_timeout_and_set_state(CFS_TASK_INTERRUPTIBLE,
-                                               cfs_time_seconds(1));
+               set_current_state(TASK_INTERRUPTIBLE);
+               schedule_timeout(cfs_time_seconds(1));
                goto retry;
        }
        EXIT;
@@ -220,7 +217,7 @@ struct lquota_site *lquota_site_alloc(const struct lu_env *env, void *parent,
 
        /* allocate hash table */
        memset(hashname, 0, sizeof(hashname));
-       sprintf(hashname, "LQUOTA_HASH%u", qtype);
+       snprintf(hashname, sizeof(hashname), "LQUOTA_HASH%hu", qtype);
        site->lqs_hash= cfs_hash_create(hashname, hash_lqs_cur_bits,
                                        HASH_LQE_MAX_BITS,
                                        min(hash_lqs_cur_bits,
@@ -326,7 +323,7 @@ struct lquota_entry *lqe_locate(const struct lu_env *env,
                RETURN(lqe);
        }
 
-       OBD_SLAB_ALLOC_PTR_GFP(new, lqe_kmem, __GFP_IO);
+       OBD_SLAB_ALLOC_PTR_GFP(new, lqe_kmem, GFP_NOFS);
        if (new == NULL) {
                CERROR("Fail to allocate lqe for id:"LPU64", "
                        "hash:%s\n", qid->qid_uid, site->lqs_hash->hs_name);
@@ -336,7 +333,7 @@ struct lquota_entry *lqe_locate(const struct lu_env *env,
        atomic_set(&new->lqe_ref, 1); /* hold 1 for caller */
        new->lqe_id     = *qid;
        new->lqe_site   = site;
-       CFS_INIT_LIST_HEAD(&new->lqe_link);
+       INIT_LIST_HEAD(&new->lqe_link);
 
        /* quota settings need to be updated from disk, that's why
         * lqe->lqe_uptodate isn't set yet */