4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2012, 2017, Intel Corporation.
25 * Use is subject to license terms.
28 #ifndef _QMT_INTERNAL_H
29 #define _QMT_INTERNAL_H
31 #include "lquota_internal.h"
34 * The Quota Master Target Device.
35 * The qmt is responsible for:
36 * - all interactions with MDT0 (provide request handlers, share ldlm namespace,
37 * manage ldlm lvbo, ...)
38 * - all quota lock management (i.e. global quota locks as well as per-ID locks)
39 * - manage the quota pool configuration
41 * That's the structure MDT0 connects to in mdt_quota_init().
44 /* Super-class. dt_device/lu_device for this master target */
45 struct dt_device qmt_dt_dev;
47 /* service name of this qmt */
48 char qmt_svname[MAX_OBD_NAME];
49 /* root directory for this qmt */
50 struct dt_object *qmt_root;
52 /* Reference to the next device in the side stack
53 * The child device is actually the OSD device where we store the quota
55 struct obd_export *qmt_child_exp;
56 struct dt_device *qmt_child;
58 /* pointer to ldlm namespace to be used for quota locks */
59 struct ldlm_namespace *qmt_ns;
61 /* List of pools managed by this master target */
62 struct list_head qmt_pool_list;
63 /* rw semaphore to protect pool list */
64 struct rw_semaphore qmt_pool_lock;
66 /* procfs root directory for this qmt */
67 struct proc_dir_entry *qmt_proc;
69 /* dedicated thread in charge of space rebalancing */
70 struct task_struct *qmt_reba_task;
72 /* list of lqe entry which need space rebalancing */
73 struct list_head qmt_reba_list;
75 /* lock protecting rebalancing list */
76 spinlock_t qmt_reba_lock;
78 unsigned long qmt_stopping:1; /* qmt is stopping */
83 #define QPI_MAXNAME (LOV_MAXPOOLNAME + 1)
84 #define qmt_pool_global(qpi) \
85 (!strncmp(qpi->qpi_name, GLB_POOL_NAME, \
86 strlen(GLB_POOL_NAME) + 1) ? true : false)
87 /* Draft for mdt pools */
89 struct lu_tgt_pool osts;
92 /* Since DOM support, data resources can exist
93 * on both MDT and OST targets. */
101 /* set while recalc_thread is working */
102 QPI_FLAG_RECALC_OFFSET,
103 QPI_FLAG_STATE_INITED,
107 * Per-pool quota information.
108 * The qmt creates one such structure for each pool
109 * with quota enforced. All the structures are kept in a list.
110 * We currently only support the default data pool and default metadata pool.
112 struct qmt_pool_info {
113 /* chained list of all pools managed by the same qmt */
114 struct list_head qpi_linkage;
116 /* Could be LQUOTA_RES_MD or LQUOTA_RES_DT */
118 char qpi_name[QPI_MAXNAME];
120 union qmt_sarray qpi_sarr;
121 /* recalculation thread pointer */
122 struct task_struct *qpi_recalc_task;
123 /* rw semaphore to avoid acquire/release during
124 * pool recalculation. */
125 struct rw_semaphore qpi_recalc_sem;
126 unsigned long qpi_flags;
128 /* track users of this pool instance */
131 /* back pointer to master target
132 * immutable after creation. */
133 struct qmt_device *qpi_qmt;
135 /* pointer to dt object associated with global indexes for both user
137 struct dt_object *qpi_glb_obj[LL_MAXQUOTAS];
139 /* A pool supports two different quota types: user and group quota.
140 * Each quota type has its own global index and lquota_entry hash table.
142 struct lquota_site *qpi_site[LL_MAXQUOTAS];
144 /* number of slaves registered for each quota types */
145 int qpi_slv_nr[QMT_STYPE_CNT][LL_MAXQUOTAS];
147 /* reference on lqe (ID 0) storing grace time. */
148 struct lquota_entry *qpi_grace_lqe[LL_MAXQUOTAS];
150 /* procfs root directory for this pool */
151 struct proc_dir_entry *qpi_proc;
153 /* pool directory where all indexes related to this pool instance are
155 struct dt_object *qpi_root;
157 /* Global quota parameters which apply to all quota type */
158 /* the least value of qunit */
159 unsigned long qpi_least_qunit;
161 /* Least value of qunit when soft limit is exceeded.
163 * When soft limit is exceeded, qunit will be shrinked to least_qunit
164 * (1M for block limit), that results in significant write performance
165 * drop since the client will turn to sync write from now on.
167 * To retain the write performance in an acceptable level, we choose
168 * to sacrifice grace time accuracy a bit and use a larger least_qunit
169 * when soft limit is exceeded. It's (qpi_least_qunit * 4) by default,
170 * and user may enlarge it via procfs to get even better performance
171 * (with the cost of losing more grace time accuracy).
173 * See qmt_calc_softlimit().
175 unsigned long qpi_soft_least_qunit;
178 static inline int qpi_slv_nr(struct qmt_pool_info *pool, int qtype)
182 for (i = 0; i < QMT_STYPE_CNT; i++)
183 sum += pool->qpi_slv_nr[i][qtype];
188 static inline int qpi_slv_nr_by_rtype(struct qmt_pool_info *pool, int qtype)
190 if (pool->qpi_rtype == LQUOTA_RES_DT)
191 /* Here should be qpi_slv_nr() if MDTs will be added
193 return pool->qpi_slv_nr[QMT_STYPE_OST][qtype];
195 return pool->qpi_slv_nr[QMT_STYPE_MDT][qtype];
198 * Helper routines and prototypes
201 /* helper routine to find qmt_pool_info associated a lquota_entry */
202 static inline struct qmt_pool_info *lqe2qpi(struct lquota_entry *lqe)
204 LASSERT(lqe_is_master(lqe));
205 return (struct qmt_pool_info *)lqe->lqe_site->lqs_parent;
208 /* return true if someone holds either a read or write lock on the lqe */
209 static inline bool lqe_is_locked(struct lquota_entry *lqe)
211 LASSERT(lqe_is_master(lqe));
212 if (down_write_trylock(&lqe->lqe_sem) == 0)
214 lqe_write_unlock(lqe);
218 /* value to be restored if someone wrong happens during lqe writeback */
219 struct qmt_lqe_restore {
227 #define QMT_MAX_POOL_NUM 16
228 /* Common data shared by qmt handlers */
229 struct qmt_thread_info {
230 union lquota_rec qti_rec;
231 union lquota_id qti_id;
232 char qti_buf[MTI_NAME_MAXLEN];
233 struct lu_fid qti_fid;
234 struct ldlm_res_id qti_resid;
235 union ldlm_gl_desc qti_gl_desc;
236 struct quota_body qti_body;
238 struct qmt_lqe_restore qti_lqes_rstr_small[QMT_MAX_POOL_NUM];
239 struct qmt_lqe_restore *qti_lqes_rstr;
242 struct qmt_pool_info *qti_pools_small[QMT_MAX_POOL_NUM];
243 /* Pointer to an array of qpis in case when
244 * qti_pools_cnt > QMT_MAX_POOL_NUM. */
245 struct qmt_pool_info **qti_pools;
247 /* The number of pools in qti_pools */
249 /* Maximum number of elements in qti_pools array.
250 * By default it is QMT_MAX_POOL_NUM. */
252 int qti_glbl_lqe_idx;
253 /* The same is for lqe ... */
255 struct lquota_entry *qti_lqes_small[QMT_MAX_POOL_NUM];
256 /* Pointer to an array of lqes in case when
257 * qti_lqes_cnt > QMT_MAX_POOL_NUM. */
258 struct lquota_entry **qti_lqes;
260 /* The number of lqes in qti_lqes */
262 /* Maximum number of elements in qti_lqes array.
263 * By default it is QMT_MAX_POOL_NUM. */
267 extern struct lu_context_key qmt_thread_key;
269 /* helper function to extract qmt_thread_info from current environment */
271 struct qmt_thread_info *qmt_info(const struct lu_env *env)
273 return lu_env_info(env, &qmt_thread_key);
276 #define qti_lqes_num(env) (qmt_info(env)->qti_lqes_num)
277 #define qti_lqes_inited(env) (qmt_info(env)->qti_lqes_num)
278 #define qti_lqes_cnt(env) (qmt_info(env)->qti_lqes_cnt)
279 #define qti_glbl_lqe_idx(env) (qmt_info(env)->qti_glbl_lqe_idx)
280 #define qti_lqes(env) (qti_lqes_num(env) > QMT_MAX_POOL_NUM ? \
281 qmt_info(env)->qti_lqes : \
282 qmt_info(env)->qti_lqes_small)
283 #define qti_lqes_rstr(env) (qti_lqes_num(env) > QMT_MAX_POOL_NUM ? \
284 qmt_info(env)->qti_lqes_rstr : \
285 qmt_info(env)->qti_lqes_rstr_small)
286 #define qti_lqes_glbl(env) (qti_lqes(env)[qti_glbl_lqe_idx(env)])
287 #define qti_lqe_hard(env, i) (qti_lqes(env)[i]->lqe_hardlimit)
288 #define qti_lqe_soft(env, i) (qti_lqes(env)[i]->lqe_softlimit)
289 #define qti_lqe_granted(env, i) (qti_lqes(env)[i]->lqe_granted)
290 #define qti_lqe_qunit(env, i) (qti_lqes(env)[i]->lqe_qunit)
292 /* helper routine to convert a lu_device into a qmt_device */
293 static inline struct qmt_device *lu2qmt_dev(struct lu_device *ld)
295 return container_of_safe(lu2dt_dev(ld), struct qmt_device, qmt_dt_dev);
298 /* helper routine to convert a qmt_device into lu_device */
299 static inline struct lu_device *qmt2lu_dev(struct qmt_device *qmt)
301 return &qmt->qmt_dt_dev.dd_lu_dev;
304 #define LQE_ROOT(lqe) (lqe2qpi(lqe)->qpi_root)
305 #define LQE_GLB_OBJ(lqe) (lqe2qpi(lqe)->qpi_glb_obj[lqe_qtype(lqe)])
307 /* helper function returning grace time to use for a given lquota entry */
308 static inline __u64 qmt_lqe_grace(struct lquota_entry *lqe)
310 struct qmt_pool_info *pool = lqe2qpi(lqe);
311 struct lquota_entry *grace_lqe;
313 grace_lqe = pool->qpi_grace_lqe[lqe_qtype(lqe)];
314 LASSERT(grace_lqe != NULL);
316 return grace_lqe->lqe_gracetime;
319 static inline void qmt_restore(struct lquota_entry *lqe,
320 struct qmt_lqe_restore *restore)
322 lqe->lqe_hardlimit = restore->qlr_hardlimit;
323 lqe->lqe_softlimit = restore->qlr_softlimit;
324 lqe->lqe_gracetime = restore->qlr_gracetime;
325 lqe->lqe_granted = restore->qlr_granted;
326 lqe->lqe_qunit = restore->qlr_qunit;
329 static inline void qmt_restore_lqes(const struct lu_env *env)
333 for (i = 0; i < qti_lqes_cnt(env); i++)
334 qmt_restore(qti_lqes(env)[i], &qti_lqes_rstr(env)[i]);
337 #define QMT_GRANT(lqe, slv, cnt) \
339 (lqe)->lqe_granted += (cnt); \
342 #define QMT_REL(lqe, slv, cnt) \
344 (lqe)->lqe_granted -= (cnt); \
348 /* helper routine returning true when reached hardlimit */
349 static inline bool qmt_hard_exhausted(struct lquota_entry *lqe)
351 if (lqe->lqe_hardlimit != 0 && lqe->lqe_granted >= lqe->lqe_hardlimit)
356 /* helper routine returning true when reached softlimit */
357 static inline bool qmt_soft_exhausted(struct lquota_entry *lqe, __u64 now)
359 if (lqe->lqe_softlimit != 0 && lqe->lqe_granted > lqe->lqe_softlimit &&
360 lqe->lqe_gracetime != 0 && now >= lqe->lqe_gracetime)
365 /* helper routine returning true when the id has run out of quota space:
366 * - reached hardlimit
368 * - reached softlimit and grace time expired already */
369 static inline bool qmt_space_exhausted(struct lquota_entry *lqe, __u64 now)
371 return (qmt_hard_exhausted(lqe) || qmt_soft_exhausted(lqe, now));
374 static inline bool qmt_space_exhausted_lqes(const struct lu_env *env, __u64 now)
376 bool exhausted = false;
379 for (i = 0; i < qti_lqes_cnt(env) && !exhausted; i++)
380 exhausted |= qmt_space_exhausted(qti_lqes(env)[i], now);
385 /* helper routine clearing the default quota setting */
386 static inline void qmt_lqe_clear_default(struct lquota_entry *lqe)
388 lqe->lqe_is_default = false;
389 lqe->lqe_gracetime &= ~((__u64)LQUOTA_FLAG_DEFAULT <<
393 /* number of seconds to wait for slaves to release quota space after
395 #define QMT_REBA_TIMEOUT 2
399 void qmt_pool_free(const struct lu_env *, struct qmt_pool_info *);
401 * Reference counter management for qmt_pool_info structures
403 static inline void qpi_getref(struct qmt_pool_info *pool)
405 atomic_inc(&pool->qpi_ref);
408 static inline void qpi_putref(const struct lu_env *env,
409 struct qmt_pool_info *pool)
411 LASSERT(atomic_read(&pool->qpi_ref) > 0);
412 if (atomic_dec_and_test(&pool->qpi_ref))
413 qmt_pool_free(env, pool);
417 void qmt_pool_fini(const struct lu_env *, struct qmt_device *);
418 int qmt_pool_init(const struct lu_env *, struct qmt_device *);
419 int qmt_pool_prepare(const struct lu_env *, struct qmt_device *,
420 struct dt_object *, char *);
421 int qmt_pool_new_conn(const struct lu_env *, struct qmt_device *,
422 struct lu_fid *, struct lu_fid *, __u64 *,
425 #define GLB_POOL_NAME "0x0"
426 #define qmt_pool_lookup_glb(env, qmt, type) \
427 qmt_pool_lookup(env, qmt, type, NULL, -1, false)
428 #define qmt_pool_lookup_name(env, qmt, type, name) \
429 qmt_pool_lookup(env, qmt, type, name, -1, false)
430 #define qmt_pool_lookup_arr(env, qmt, type, idx) \
431 qmt_pool_lookup(env, qmt, type, NULL, idx, true)
432 struct qmt_pool_info *qmt_pool_lookup(const struct lu_env *env,
433 struct qmt_device *qmt,
438 struct lquota_entry *qmt_pool_lqe_lookup(const struct lu_env *,
439 struct qmt_device *, int, int,
440 union lquota_id *, char *);
441 int qmt_pool_lqes_lookup(const struct lu_env *, struct qmt_device *, int,
442 int, int, union lquota_id *, char *, int);
443 int qmt_pool_lqes_lookup_spec(const struct lu_env *env, struct qmt_device *qmt,
444 int rtype, int qtype, union lquota_id *qid);
445 void qmt_lqes_sort(const struct lu_env *env);
446 int qmt_pool_new(struct obd_device *obd, char *poolname);
447 int qmt_pool_add(struct obd_device *obd, char *poolname, char *ostname);
448 int qmt_pool_rem(struct obd_device *obd, char *poolname, char *ostname);
449 int qmt_pool_del(struct obd_device *obd, char *poolname);
451 struct rw_semaphore *qmt_sarr_rwsem(struct qmt_pool_info *qpi);
452 int qmt_sarr_get_idx(struct qmt_pool_info *qpi, int arr_idx);
453 unsigned int qmt_sarr_count(struct qmt_pool_info *qpi);
456 extern const struct lquota_entry_operations qmt_lqe_ops;
457 int qmt_lqe_set_default(const struct lu_env *env, struct qmt_pool_info *pool,
458 struct lquota_entry *lqe, bool create_record);
459 struct thandle *qmt_trans_start_with_slv(const struct lu_env *,
460 struct lquota_entry *,
463 struct thandle *qmt_trans_start(const struct lu_env *, struct lquota_entry *);
464 int qmt_glb_write_lqes(const struct lu_env *, struct thandle *, __u32, __u64 *);
465 int qmt_glb_write(const struct lu_env *, struct thandle *,
466 struct lquota_entry *, __u32, __u64 *);
467 int qmt_slv_write(const struct lu_env *, struct thandle *,
468 struct lquota_entry *, struct dt_object *, __u32, __u64 *,
470 int qmt_slv_read(const struct lu_env *, union lquota_id *,
471 struct dt_object *, __u64 *);
472 int qmt_validate_limits(struct lquota_entry *, __u64, __u64);
473 bool qmt_adjust_qunit(const struct lu_env *, struct lquota_entry *);
474 bool qmt_adjust_edquot(struct lquota_entry *, __u64);
476 #define qmt_adjust_edquot_notify(env, qmt, now, qb_flags) \
477 qmt_adjust_edquot_qunit_notify(env, qmt, now, true, false, qb_flags)
478 #define qmt_adjust_qunit_notify(env, qmt, qb_flags) \
479 qmt_adjust_edquot_qunit_notify(env, qmt, 0, false, true, qb_flags)
480 #define qmt_adjust_and_notify(env, qmt, now, qb_flags) \
481 qmt_adjust_edquot_qunit_notify(env, qmt, now, true, true, qb_flags)
482 bool qmt_adjust_edquot_qunit_notify(const struct lu_env *, struct qmt_device *,
483 __u64, bool, bool, __u32);
484 bool qmt_revalidate(const struct lu_env *, struct lquota_entry *);
485 void qmt_revalidate_lqes(const struct lu_env *, struct qmt_device *, __u32);
486 __u64 qmt_alloc_expand(struct lquota_entry *, __u64, __u64);
488 void qti_lqes_init(const struct lu_env *env);
489 int qti_lqes_add(const struct lu_env *env, struct lquota_entry *lqe);
490 void qti_lqes_del(const struct lu_env *env, int index);
491 void qti_lqes_fini(const struct lu_env *env);
492 __u64 qti_lqes_min_qunit(const struct lu_env *env);
493 int qti_lqes_edquot(const struct lu_env *env);
494 int qti_lqes_restore_init(const struct lu_env *env);
495 void qti_lqes_restore_fini(const struct lu_env *env);
496 void qti_lqes_write_lock(const struct lu_env *env);
497 void qti_lqes_write_unlock(const struct lu_env *env);
499 struct lqe_glbl_data *qmt_alloc_lqe_gd(struct qmt_pool_info *, int);
500 void qmt_free_lqe_gd(struct lqe_glbl_data *);
501 void qmt_setup_lqe_gd(const struct lu_env *, struct qmt_device *,
502 struct lquota_entry *, struct lqe_glbl_data *, int);
503 #define qmt_seed_glbe_edquot(env, lqeg) \
504 qmt_seed_glbe_all(env, lqeg, false, true)
505 #define qmt_seed_glbe_qunit(env, lqeg) \
506 qmt_seed_glbe_all(env, lqeg, true, false)
507 #define qmt_seed_glbe(env, lqeg) \
508 qmt_seed_glbe_all(env, lqeg, true, true)
509 void qmt_seed_glbe_all(const struct lu_env *, struct lqe_glbl_data *,
513 int qmt_set_with_lqe(const struct lu_env *env, struct qmt_device *qmt,
514 struct lquota_entry *lqe, __u64 hard, __u64 soft,
515 __u64 time, __u32 valid, bool is_default, bool is_updated);
516 int qmt_dqacq0(const struct lu_env *, struct qmt_device *, struct obd_uuid *,
517 __u32, __u64, __u64, struct quota_body *);
518 int qmt_uuid2idx(struct obd_uuid *, int *);
521 int qmt_intent_policy(const struct lu_env *, struct lu_device *,
522 struct ptlrpc_request *, struct ldlm_lock **, int);
523 int qmt_lvbo_init(struct lu_device *, struct ldlm_resource *);
524 int qmt_lvbo_update(struct lu_device *, struct ldlm_resource *,
525 struct ptlrpc_request *, int);
526 int qmt_lvbo_size(struct lu_device *, struct ldlm_lock *);
527 int qmt_lvbo_fill(struct lu_device *, struct ldlm_lock *, void *, int);
528 int qmt_lvbo_free(struct lu_device *, struct ldlm_resource *);
529 int qmt_start_reba_thread(struct qmt_device *);
530 void qmt_stop_reba_thread(struct qmt_device *);
531 void qmt_glb_lock_notify(const struct lu_env *, struct lquota_entry *, __u64);
532 void qmt_id_lock_notify(struct qmt_device *, struct lquota_entry *);
533 #endif /* _QMT_INTERNAL_H */