4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2012, 2017, Intel Corporation.
25 * Use is subject to license terms.
28 #ifndef _QMT_INTERNAL_H
29 #define _QMT_INTERNAL_H
31 #include "lquota_internal.h"
34 * The Quota Master Target Device.
35 * The qmt is responsible for:
36 * - all interactions with MDT0 (provide request handlers, share ldlm namespace,
37 * manage ldlm lvbo, ...)
38 * - all quota lock management (i.e. global quota locks as well as per-ID locks)
39 * - manage the quota pool configuration
41 * That's the structure MDT0 connects to in mdt_quota_init().
44 /* Super-class. dt_device/lu_device for this master target */
45 struct dt_device qmt_dt_dev;
47 /* service name of this qmt */
48 char qmt_svname[MAX_OBD_NAME];
50 /* Reference to the next device in the side stack
51 * The child device is actually the OSD device where we store the quota
53 struct obd_export *qmt_child_exp;
54 struct dt_device *qmt_child;
56 /* pointer to ldlm namespace to be used for quota locks */
57 struct ldlm_namespace *qmt_ns;
59 /* Hash table containing a qmt_pool_info structure for each pool
60 * this quota master is in charge of. We only have 2 pools in this
61 * hash for the time being:
62 * - one for quota management on the default metadata pool
63 * - one for quota managment on the default data pool
65 * Once we support quota on non-default pools, then more pools will
66 * be added to this hash table and pool master setup would have to be
67 * handled via configuration logs */
68 struct cfs_hash *qmt_pool_hash;
70 /* List of pools managed by this master target */
71 struct list_head qmt_pool_list;
72 /* rw spinlock to protect pool list */
73 rwlock_t qmt_pool_lock;
75 /* procfs root directory for this qmt */
76 struct proc_dir_entry *qmt_proc;
78 /* dedicated thread in charge of space rebalancing */
79 struct ptlrpc_thread qmt_reba_thread;
81 /* list of lqe entry which need space rebalancing */
82 struct list_head qmt_reba_list;
84 /* lock protecting rebalancing list */
85 spinlock_t qmt_reba_lock;
87 unsigned long qmt_stopping:1; /* qmt is stopping */
91 #define QPI_MAXNAME (LOV_MAXPOOLNAME + 1)
94 * Per-pool quota information.
95 * The qmt creates one such structure for each pool
96 * with quota enforced. All the structures are kept in a list.
97 * We currently only support the default data pool and default metadata pool.
99 struct qmt_pool_info {
100 /* chained list of all pools managed by the same qmt */
101 struct list_head qpi_linkage;
103 /* Could be LQUOTA_RES_MD or LQUOTA_RES_DT */
105 char qpi_name[QPI_MAXNAME];
107 /* track users of this pool instance */
110 /* back pointer to master target
111 * immutable after creation. */
112 struct qmt_device *qpi_qmt;
114 /* pointer to dt object associated with global indexes for both user
116 struct dt_object *qpi_glb_obj[LL_MAXQUOTAS];
118 /* A pool supports two different quota types: user and group quota.
119 * Each quota type has its own global index and lquota_entry hash table.
121 struct lquota_site *qpi_site[LL_MAXQUOTAS];
123 /* number of slaves registered for each quota types */
124 int qpi_slv_nr[LL_MAXQUOTAS];
126 /* reference on lqe (ID 0) storing grace time. */
127 struct lquota_entry *qpi_grace_lqe[LL_MAXQUOTAS];
129 /* procfs root directory for this pool */
130 struct proc_dir_entry *qpi_proc;
132 /* pool directory where all indexes related to this pool instance are
134 struct dt_object *qpi_root;
136 /* Global quota parameters which apply to all quota type */
137 /* the least value of qunit */
138 unsigned long qpi_least_qunit;
140 /* Least value of qunit when soft limit is exceeded.
142 * When soft limit is exceeded, qunit will be shrinked to least_qunit
143 * (1M for block limit), that results in significant write performance
144 * drop since the client will turn to sync write from now on.
146 * To retain the write performance in an acceptable level, we choose
147 * to sacrifice grace time accuracy a bit and use a larger least_qunit
148 * when soft limit is exceeded. It's (qpi_least_qunit * 4) by default,
149 * and user may enlarge it via procfs to get even better performance
150 * (with the cost of losing more grace time accuracy).
152 * See qmt_calc_softlimit().
154 unsigned long qpi_soft_least_qunit;
158 * Helper routines and prototypes
161 /* helper routine to find qmt_pool_info associated a lquota_entry */
162 static inline struct qmt_pool_info *lqe2qpi(struct lquota_entry *lqe)
164 LASSERT(lqe_is_master(lqe));
165 return (struct qmt_pool_info *)lqe->lqe_site->lqs_parent;
168 /* return true if someone holds either a read or write lock on the lqe */
169 static inline bool lqe_is_locked(struct lquota_entry *lqe)
171 LASSERT(lqe_is_master(lqe));
172 if (down_write_trylock(&lqe->lqe_sem) == 0)
174 lqe_write_unlock(lqe);
178 /* value to be restored if someone wrong happens during lqe writeback */
179 struct qmt_lqe_restore {
187 /* Common data shared by qmt handlers */
188 struct qmt_thread_info {
189 union lquota_rec qti_rec;
190 union lquota_id qti_id;
191 char qti_buf[MTI_NAME_MAXLEN];
192 struct lu_fid qti_fid;
193 struct ldlm_res_id qti_resid;
194 union ldlm_gl_desc qti_gl_desc;
195 struct quota_body qti_body;
196 struct qmt_lqe_restore qti_restore;
199 extern struct lu_context_key qmt_thread_key;
201 /* helper function to extract qmt_thread_info from current environment */
203 struct qmt_thread_info *qmt_info(const struct lu_env *env)
205 return lu_env_info(env, &qmt_thread_key);
208 /* helper routine to convert a lu_device into a qmt_device */
209 static inline struct qmt_device *lu2qmt_dev(struct lu_device *ld)
211 return container_of0(lu2dt_dev(ld), struct qmt_device, qmt_dt_dev);
214 /* helper routine to convert a qmt_device into lu_device */
215 static inline struct lu_device *qmt2lu_dev(struct qmt_device *qmt)
217 return &qmt->qmt_dt_dev.dd_lu_dev;
220 #define LQE_ROOT(lqe) (lqe2qpi(lqe)->qpi_root)
221 #define LQE_GLB_OBJ(lqe) (lqe2qpi(lqe)->qpi_glb_obj[lqe->lqe_site->lqs_qtype])
223 /* helper function returning grace time to use for a given lquota entry */
224 static inline __u64 qmt_lqe_grace(struct lquota_entry *lqe)
226 struct qmt_pool_info *pool = lqe2qpi(lqe);
227 struct lquota_entry *grace_lqe;
229 grace_lqe = pool->qpi_grace_lqe[lqe->lqe_site->lqs_qtype];
230 LASSERT(grace_lqe != NULL);
232 return grace_lqe->lqe_gracetime;
235 static inline void qmt_restore(struct lquota_entry *lqe,
236 struct qmt_lqe_restore *restore)
238 lqe->lqe_hardlimit = restore->qlr_hardlimit;
239 lqe->lqe_softlimit = restore->qlr_softlimit;
240 lqe->lqe_gracetime = restore->qlr_gracetime;
241 lqe->lqe_granted = restore->qlr_granted;
242 lqe->lqe_qunit = restore->qlr_qunit;
245 #define QMT_GRANT(lqe, slv, cnt) \
247 (lqe)->lqe_granted += (cnt); \
250 #define QMT_REL(lqe, slv, cnt) \
252 (lqe)->lqe_granted -= (cnt); \
256 /* helper routine returning true when reached hardlimit */
257 static inline bool qmt_hard_exhausted(struct lquota_entry *lqe)
259 if (lqe->lqe_hardlimit != 0 && lqe->lqe_granted >= lqe->lqe_hardlimit)
264 /* helper routine returning true when reached softlimit */
265 static inline bool qmt_soft_exhausted(struct lquota_entry *lqe, __u64 now)
267 if (lqe->lqe_softlimit != 0 && lqe->lqe_granted > lqe->lqe_softlimit &&
268 lqe->lqe_gracetime != 0 && now >= lqe->lqe_gracetime)
273 /* helper routine returning true when the id has run out of quota space:
274 * - reached hardlimit
276 * - reached softlimit and grace time expired already */
277 static inline bool qmt_space_exhausted(struct lquota_entry *lqe, __u64 now)
279 return (qmt_hard_exhausted(lqe) || qmt_soft_exhausted(lqe, now));
282 /* helper routine clearing the default quota setting */
283 static inline void qmt_lqe_clear_default(struct lquota_entry *lqe)
285 lqe->lqe_is_default = false;
286 lqe->lqe_gracetime &= ~((__u64)LQUOTA_FLAG_DEFAULT <<
290 /* number of seconds to wait for slaves to release quota space after
292 #define QMT_REBA_TIMEOUT 2
295 void qmt_pool_fini(const struct lu_env *, struct qmt_device *);
296 int qmt_pool_init(const struct lu_env *, struct qmt_device *);
297 int qmt_pool_prepare(const struct lu_env *, struct qmt_device *,
299 int qmt_pool_new_conn(const struct lu_env *, struct qmt_device *,
300 struct lu_fid *, struct lu_fid *, __u64 *,
302 struct lquota_entry *qmt_pool_lqe_lookup(const struct lu_env *,
303 struct qmt_device *, int, int,
306 extern struct lquota_entry_operations qmt_lqe_ops;
307 int qmt_lqe_set_default(const struct lu_env *env, struct qmt_pool_info *pool,
308 struct lquota_entry *lqe, bool create_record);
309 struct thandle *qmt_trans_start_with_slv(const struct lu_env *,
310 struct lquota_entry *,
312 struct qmt_lqe_restore *);
313 struct thandle *qmt_trans_start(const struct lu_env *, struct lquota_entry *,
314 struct qmt_lqe_restore *);
315 int qmt_glb_write(const struct lu_env *, struct thandle *,
316 struct lquota_entry *, __u32, __u64 *);
317 int qmt_slv_write(const struct lu_env *, struct thandle *,
318 struct lquota_entry *, struct dt_object *, __u32, __u64 *,
320 int qmt_slv_read(const struct lu_env *, struct lquota_entry *,
321 struct dt_object *, __u64 *);
322 int qmt_validate_limits(struct lquota_entry *, __u64, __u64);
323 void qmt_adjust_qunit(const struct lu_env *, struct lquota_entry *);
324 void qmt_adjust_edquot(struct lquota_entry *, __u64);
325 void qmt_revalidate(const struct lu_env *, struct lquota_entry *);
326 __u64 qmt_alloc_expand(struct lquota_entry *, __u64, __u64);
329 int qmt_set_with_lqe(const struct lu_env *env, struct qmt_device *qmt,
330 struct lquota_entry *lqe, __u64 hard, __u64 soft,
331 __u64 time, __u32 valid, bool is_default, bool is_updated);
332 int qmt_dqacq0(const struct lu_env *, struct lquota_entry *,
333 struct qmt_device *, struct obd_uuid *, __u32, __u64, __u64,
334 struct quota_body *);
337 int qmt_intent_policy(const struct lu_env *, struct lu_device *,
338 struct ptlrpc_request *, struct ldlm_lock **, int);
339 int qmt_lvbo_init(struct lu_device *, struct ldlm_resource *);
340 int qmt_lvbo_update(struct lu_device *, struct ldlm_resource *,
341 struct ptlrpc_request *, int);
342 int qmt_lvbo_size(struct lu_device *, struct ldlm_lock *);
343 int qmt_lvbo_fill(struct lu_device *, struct ldlm_lock *, void *, int);
344 int qmt_lvbo_free(struct lu_device *, struct ldlm_resource *);
345 int qmt_start_reba_thread(struct qmt_device *);
346 void qmt_stop_reba_thread(struct qmt_device *);
347 void qmt_glb_lock_notify(const struct lu_env *, struct lquota_entry *, __u64);
348 void qmt_id_lock_notify(struct qmt_device *, struct lquota_entry *);
349 #endif /* _QMT_INTERNAL_H */