4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2012, 2017, Intel Corporation.
25 * Use is subject to license terms.
28 #ifndef _QMT_INTERNAL_H
29 #define _QMT_INTERNAL_H
31 #include "lquota_internal.h"
34 * The Quota Master Target Device.
35 * The qmt is responsible for:
36 * - all interactions with MDT0 (provide request handlers, share ldlm namespace,
37 * manage ldlm lvbo, ...)
38 * - all quota lock management (i.e. global quota locks as well as per-ID locks)
39 * - manage the quota pool configuration
41 * That's the structure MDT0 connects to in mdt_quota_init().
44 /* Super-class. dt_device/lu_device for this master target */
45 struct dt_device qmt_dt_dev;
47 /* service name of this qmt */
48 char qmt_svname[MAX_OBD_NAME];
49 /* root directory for this qmt */
50 struct dt_object *qmt_root;
52 /* Reference to the next device in the side stack
53 * The child device is actually the OSD device where we store the quota
55 struct obd_export *qmt_child_exp;
56 struct dt_device *qmt_child;
58 /* pointer to ldlm namespace to be used for quota locks */
59 struct ldlm_namespace *qmt_ns;
61 /* Hash table containing a qmt_pool_info structure for each pool
62 * this quota master is in charge of. We only have 2 pools in this
63 * hash for the time being:
64 * - one for quota management on the default metadata pool
65 * - one for quota managment on the default data pool
67 * Once we support quota on non-default pools, then more pools will
68 * be added to this hash table and pool master setup would have to be
69 * handled via configuration logs */
70 struct cfs_hash *qmt_pool_hash;
72 /* List of pools managed by this master target */
73 struct list_head qmt_pool_list;
74 /* rw semaphore to protect pool list */
75 struct rw_semaphore qmt_pool_lock;
77 /* procfs root directory for this qmt */
78 struct proc_dir_entry *qmt_proc;
80 /* dedicated thread in charge of space rebalancing */
81 struct task_struct *qmt_reba_task;
83 /* list of lqe entry which need space rebalancing */
84 struct list_head qmt_reba_list;
86 /* lock protecting rebalancing list */
87 spinlock_t qmt_reba_lock;
89 unsigned long qmt_stopping:1; /* qmt is stopping */
94 #define QPI_MAXNAME (LOV_MAXPOOLNAME + 1)
95 #define qmt_pool_global(qpi) \
96 (!strncmp(qpi->qpi_name, GLB_POOL_NAME, \
97 strlen(GLB_POOL_NAME) + 1) ? true : false)
98 /* Draft for mdt pools */
100 struct lu_tgt_pool osts;
103 /* Since DOM support, data resources can exist
104 * on both MDT and OST targets. */
112 /* set while recalc_thread is working */
113 QPI_FLAG_RECALC_OFFSET,
117 * Per-pool quota information.
118 * The qmt creates one such structure for each pool
119 * with quota enforced. All the structures are kept in a list.
120 * We currently only support the default data pool and default metadata pool.
122 struct qmt_pool_info {
123 /* chained list of all pools managed by the same qmt */
124 struct list_head qpi_linkage;
126 /* Could be LQUOTA_RES_MD or LQUOTA_RES_DT */
128 char qpi_name[QPI_MAXNAME];
130 union qmt_sarray qpi_sarr;
131 /* recalculation thread pointer */
132 struct task_struct *qpi_recalc_task;
133 /* rw semaphore to avoid acquire/release during
134 * pool recalculation. */
135 struct rw_semaphore qpi_recalc_sem;
136 unsigned long qpi_flags;
138 /* track users of this pool instance */
141 /* back pointer to master target
142 * immutable after creation. */
143 struct qmt_device *qpi_qmt;
145 /* pointer to dt object associated with global indexes for both user
147 struct dt_object *qpi_glb_obj[LL_MAXQUOTAS];
149 /* A pool supports two different quota types: user and group quota.
150 * Each quota type has its own global index and lquota_entry hash table.
152 struct lquota_site *qpi_site[LL_MAXQUOTAS];
154 /* number of slaves registered for each quota types */
155 int qpi_slv_nr[QMT_STYPE_CNT][LL_MAXQUOTAS];
157 /* reference on lqe (ID 0) storing grace time. */
158 struct lquota_entry *qpi_grace_lqe[LL_MAXQUOTAS];
160 /* procfs root directory for this pool */
161 struct proc_dir_entry *qpi_proc;
163 /* pool directory where all indexes related to this pool instance are
165 struct dt_object *qpi_root;
167 /* Global quota parameters which apply to all quota type */
168 /* the least value of qunit */
169 unsigned long qpi_least_qunit;
171 /* Least value of qunit when soft limit is exceeded.
173 * When soft limit is exceeded, qunit will be shrinked to least_qunit
174 * (1M for block limit), that results in significant write performance
175 * drop since the client will turn to sync write from now on.
177 * To retain the write performance in an acceptable level, we choose
178 * to sacrifice grace time accuracy a bit and use a larger least_qunit
179 * when soft limit is exceeded. It's (qpi_least_qunit * 4) by default,
180 * and user may enlarge it via procfs to get even better performance
181 * (with the cost of losing more grace time accuracy).
183 * See qmt_calc_softlimit().
185 unsigned long qpi_soft_least_qunit;
188 static inline int qpi_slv_nr(struct qmt_pool_info *pool, int qtype)
192 for (i = 0; i < QMT_STYPE_CNT; i++)
193 sum += pool->qpi_slv_nr[i][qtype];
198 static inline int qpi_slv_nr_by_rtype(struct qmt_pool_info *pool, int qtype)
200 if (pool->qpi_rtype == LQUOTA_RES_DT)
201 /* Here should be qpi_slv_nr() if MDTs will be added
203 return pool->qpi_slv_nr[QMT_STYPE_OST][qtype];
205 return pool->qpi_slv_nr[QMT_STYPE_MDT][qtype];
208 * Helper routines and prototypes
211 /* helper routine to find qmt_pool_info associated a lquota_entry */
212 static inline struct qmt_pool_info *lqe2qpi(struct lquota_entry *lqe)
214 LASSERT(lqe_is_master(lqe));
215 return (struct qmt_pool_info *)lqe->lqe_site->lqs_parent;
218 /* return true if someone holds either a read or write lock on the lqe */
219 static inline bool lqe_is_locked(struct lquota_entry *lqe)
221 LASSERT(lqe_is_master(lqe));
222 if (down_write_trylock(&lqe->lqe_sem) == 0)
224 lqe_write_unlock(lqe);
228 /* value to be restored if someone wrong happens during lqe writeback */
229 struct qmt_lqe_restore {
237 #define QMT_MAX_POOL_NUM 16
238 /* Common data shared by qmt handlers */
239 struct qmt_thread_info {
240 union lquota_rec qti_rec;
241 union lquota_id qti_id;
242 char qti_buf[MTI_NAME_MAXLEN];
243 struct lu_fid qti_fid;
244 struct ldlm_res_id qti_resid;
245 union ldlm_gl_desc qti_gl_desc;
246 struct quota_body qti_body;
248 struct qmt_lqe_restore qti_lqes_rstr_small[QMT_MAX_POOL_NUM];
249 struct qmt_lqe_restore *qti_lqes_rstr;
252 struct qmt_pool_info *qti_pools_small[QMT_MAX_POOL_NUM];
253 /* Pointer to an array of qpis in case when
254 * qti_pools_cnt > QMT_MAX_POOL_NUM. */
255 struct qmt_pool_info **qti_pools;
257 /* The number of pools in qti_pools */
259 /* Maximum number of elements in qti_pools array.
260 * By default it is QMT_MAX_POOL_NUM. */
262 int qti_glbl_lqe_idx;
263 /* The same is for lqe ... */
265 struct lquota_entry *qti_lqes_small[QMT_MAX_POOL_NUM];
266 /* Pointer to an array of lqes in case when
267 * qti_lqes_cnt > QMT_MAX_POOL_NUM. */
268 struct lquota_entry **qti_lqes;
270 /* The number of lqes in qti_lqes */
272 /* Maximum number of elements in qti_lqes array.
273 * By default it is QMT_MAX_POOL_NUM. */
277 extern struct lu_context_key qmt_thread_key;
279 /* helper function to extract qmt_thread_info from current environment */
281 struct qmt_thread_info *qmt_info(const struct lu_env *env)
283 return lu_env_info(env, &qmt_thread_key);
286 #define qti_lqes_num(env) (qmt_info(env)->qti_lqes_num)
287 #define qti_lqes_cnt(env) (qmt_info(env)->qti_lqes_cnt)
288 #define qti_glbl_lqe_idx(env) (qmt_info(env)->qti_glbl_lqe_idx)
289 #define qti_lqes(env) (qti_lqes_num(env) > QMT_MAX_POOL_NUM ? \
290 qmt_info(env)->qti_lqes : \
291 qmt_info(env)->qti_lqes_small)
292 #define qti_lqes_rstr(env) (qti_lqes_num(env) > QMT_MAX_POOL_NUM ? \
293 qmt_info(env)->qti_lqes_rstr : \
294 qmt_info(env)->qti_lqes_rstr_small)
295 #define qti_lqes_glbl(env) (qti_lqes(env)[qti_glbl_lqe_idx(env)])
296 #define qti_lqe_hard(env, i) (qti_lqes(env)[i]->lqe_hardlimit)
297 #define qti_lqe_soft(env, i) (qti_lqes(env)[i]->lqe_softlimit)
298 #define qti_lqe_granted(env, i) (qti_lqes(env)[i]->lqe_granted)
299 #define qti_lqe_qunit(env, i) (qti_lqes(env)[i]->lqe_qunit)
301 /* helper routine to convert a lu_device into a qmt_device */
302 static inline struct qmt_device *lu2qmt_dev(struct lu_device *ld)
304 return container_of_safe(lu2dt_dev(ld), struct qmt_device, qmt_dt_dev);
307 /* helper routine to convert a qmt_device into lu_device */
308 static inline struct lu_device *qmt2lu_dev(struct qmt_device *qmt)
310 return &qmt->qmt_dt_dev.dd_lu_dev;
313 #define LQE_ROOT(lqe) (lqe2qpi(lqe)->qpi_root)
314 #define LQE_GLB_OBJ(lqe) (lqe2qpi(lqe)->qpi_glb_obj[lqe_qtype(lqe)])
316 /* helper function returning grace time to use for a given lquota entry */
317 static inline __u64 qmt_lqe_grace(struct lquota_entry *lqe)
319 struct qmt_pool_info *pool = lqe2qpi(lqe);
320 struct lquota_entry *grace_lqe;
322 grace_lqe = pool->qpi_grace_lqe[lqe_qtype(lqe)];
323 LASSERT(grace_lqe != NULL);
325 return grace_lqe->lqe_gracetime;
328 static inline void qmt_restore(struct lquota_entry *lqe,
329 struct qmt_lqe_restore *restore)
331 lqe->lqe_hardlimit = restore->qlr_hardlimit;
332 lqe->lqe_softlimit = restore->qlr_softlimit;
333 lqe->lqe_gracetime = restore->qlr_gracetime;
334 lqe->lqe_granted = restore->qlr_granted;
335 lqe->lqe_qunit = restore->qlr_qunit;
338 static inline void qmt_restore_lqes(const struct lu_env *env)
342 for (i = 0; i < qti_lqes_cnt(env); i++)
343 qmt_restore(qti_lqes(env)[i], &qti_lqes_rstr(env)[i]);
346 #define QMT_GRANT(lqe, slv, cnt) \
348 (lqe)->lqe_granted += (cnt); \
351 #define QMT_REL(lqe, slv, cnt) \
353 (lqe)->lqe_granted -= (cnt); \
357 /* helper routine returning true when reached hardlimit */
358 static inline bool qmt_hard_exhausted(struct lquota_entry *lqe)
360 if (lqe->lqe_hardlimit != 0 && lqe->lqe_granted >= lqe->lqe_hardlimit)
365 /* helper routine returning true when reached softlimit */
366 static inline bool qmt_soft_exhausted(struct lquota_entry *lqe, __u64 now)
368 if (lqe->lqe_softlimit != 0 && lqe->lqe_granted > lqe->lqe_softlimit &&
369 lqe->lqe_gracetime != 0 && now >= lqe->lqe_gracetime)
374 /* helper routine returning true when the id has run out of quota space:
375 * - reached hardlimit
377 * - reached softlimit and grace time expired already */
378 static inline bool qmt_space_exhausted(struct lquota_entry *lqe, __u64 now)
380 return (qmt_hard_exhausted(lqe) || qmt_soft_exhausted(lqe, now));
383 static inline bool qmt_space_exhausted_lqes(const struct lu_env *env, __u64 now)
385 bool exhausted = false;
388 for (i = 0; i < qti_lqes_cnt(env) && !exhausted; i++)
389 exhausted |= qmt_space_exhausted(qti_lqes(env)[i], now);
394 /* helper routine clearing the default quota setting */
395 static inline void qmt_lqe_clear_default(struct lquota_entry *lqe)
397 lqe->lqe_is_default = false;
398 lqe->lqe_gracetime &= ~((__u64)LQUOTA_FLAG_DEFAULT <<
402 /* number of seconds to wait for slaves to release quota space after
404 #define QMT_REBA_TIMEOUT 2
408 void qmt_pool_free(const struct lu_env *, struct qmt_pool_info *);
410 * Reference counter management for qmt_pool_info structures
412 static inline void qpi_getref(struct qmt_pool_info *pool)
414 atomic_inc(&pool->qpi_ref);
417 static inline void qpi_putref(const struct lu_env *env,
418 struct qmt_pool_info *pool)
420 LASSERT(atomic_read(&pool->qpi_ref) > 0);
421 if (atomic_dec_and_test(&pool->qpi_ref))
422 qmt_pool_free(env, pool);
426 void qmt_pool_fini(const struct lu_env *, struct qmt_device *);
427 int qmt_pool_init(const struct lu_env *, struct qmt_device *);
428 int qmt_pool_prepare(const struct lu_env *, struct qmt_device *,
429 struct dt_object *, char *);
430 int qmt_pool_new_conn(const struct lu_env *, struct qmt_device *,
431 struct lu_fid *, struct lu_fid *, __u64 *,
434 #define GLB_POOL_NAME "0x0"
435 #define qmt_pool_lookup_glb(env, qmt, type) \
436 qmt_pool_lookup(env, qmt, type, NULL, -1, false)
437 #define qmt_pool_lookup_name(env, qmt, type, name) \
438 qmt_pool_lookup(env, qmt, type, name, -1, false)
439 #define qmt_pool_lookup_arr(env, qmt, type, idx) \
440 qmt_pool_lookup(env, qmt, type, NULL, idx, true)
441 struct qmt_pool_info *qmt_pool_lookup(const struct lu_env *env,
442 struct qmt_device *qmt,
447 struct lquota_entry *qmt_pool_lqe_lookup(const struct lu_env *,
448 struct qmt_device *, int, int,
449 union lquota_id *, char *);
450 int qmt_pool_lqes_lookup(const struct lu_env *, struct qmt_device *, int,
451 int, int, union lquota_id *, char *, int);
452 int qmt_pool_lqes_lookup_spec(const struct lu_env *env, struct qmt_device *qmt,
453 int rtype, int qtype, union lquota_id *qid);
454 void qmt_lqes_sort(const struct lu_env *env);
455 int qmt_pool_new(struct obd_device *obd, char *poolname);
456 int qmt_pool_add(struct obd_device *obd, char *poolname, char *ostname);
457 int qmt_pool_rem(struct obd_device *obd, char *poolname, char *ostname);
458 int qmt_pool_del(struct obd_device *obd, char *poolname);
460 struct rw_semaphore *qmt_sarr_rwsem(struct qmt_pool_info *qpi);
461 int qmt_sarr_get_idx(struct qmt_pool_info *qpi, int arr_idx);
462 unsigned int qmt_sarr_count(struct qmt_pool_info *qpi);
465 extern struct lquota_entry_operations qmt_lqe_ops;
466 int qmt_lqe_set_default(const struct lu_env *env, struct qmt_pool_info *pool,
467 struct lquota_entry *lqe, bool create_record);
468 struct thandle *qmt_trans_start_with_slv(const struct lu_env *,
469 struct lquota_entry *,
472 struct thandle *qmt_trans_start(const struct lu_env *, struct lquota_entry *);
473 int qmt_glb_write_lqes(const struct lu_env *, struct thandle *, __u32, __u64 *);
474 int qmt_glb_write(const struct lu_env *, struct thandle *,
475 struct lquota_entry *, __u32, __u64 *);
476 int qmt_slv_write(const struct lu_env *, struct thandle *,
477 struct lquota_entry *, struct dt_object *, __u32, __u64 *,
479 int qmt_slv_read(const struct lu_env *, union lquota_id *,
480 struct dt_object *, __u64 *);
481 int qmt_validate_limits(struct lquota_entry *, __u64, __u64);
482 bool qmt_adjust_qunit(const struct lu_env *, struct lquota_entry *);
483 bool qmt_adjust_edquot(struct lquota_entry *, __u64);
485 #define qmt_adjust_edquot_notify(env, qmt, now, qb_flags) \
486 qmt_adjust_edquot_qunit_notify(env, qmt, now, true, false, qb_flags)
487 #define qmt_adjust_qunit_notify(env, qmt, qb_flags) \
488 qmt_adjust_edquot_qunit_notify(env, qmt, 0, false, true, qb_flags)
489 #define qmt_adjust_and_notify(env, qmt, now, qb_flags) \
490 qmt_adjust_edquot_qunit_notify(env, qmt, now, true, true, qb_flags)
491 bool qmt_adjust_edquot_qunit_notify(const struct lu_env *, struct qmt_device *,
492 __u64, bool, bool, __u32);
493 bool qmt_revalidate(const struct lu_env *, struct lquota_entry *);
494 void qmt_revalidate_lqes(const struct lu_env *, struct qmt_device *, __u32);
495 __u64 qmt_alloc_expand(struct lquota_entry *, __u64, __u64);
497 void qti_lqes_init(const struct lu_env *env);
498 int qti_lqes_add(const struct lu_env *env, struct lquota_entry *lqe);
499 void qti_lqes_del(const struct lu_env *env, int index);
500 void qti_lqes_fini(const struct lu_env *env);
501 int qti_lqes_min_qunit(const struct lu_env *env);
502 int qti_lqes_edquot(const struct lu_env *env);
503 int qti_lqes_restore_init(const struct lu_env *env);
504 void qti_lqes_restore_fini(const struct lu_env *env);
505 void qti_lqes_write_lock(const struct lu_env *env);
506 void qti_lqes_write_unlock(const struct lu_env *env);
508 struct lqe_glbl_data *qmt_alloc_lqe_gd(struct qmt_pool_info *, int);
509 void qmt_free_lqe_gd(struct lqe_glbl_data *);
510 void qmt_setup_lqe_gd(const struct lu_env *, struct qmt_device *,
511 struct lquota_entry *, struct lqe_glbl_data *, int);
512 #define qmt_seed_glbe_edquot(env, lqeg) \
513 qmt_seed_glbe_all(env, lqeg, false, true)
514 #define qmt_seed_glbe_qunit(env, lqeg) \
515 qmt_seed_glbe_all(env, lqeg, true, false)
516 #define qmt_seed_glbe(env, lqeg) \
517 qmt_seed_glbe_all(env, lqeg, true, true)
518 void qmt_seed_glbe_all(const struct lu_env *, struct lqe_glbl_data *,
522 int qmt_set_with_lqe(const struct lu_env *env, struct qmt_device *qmt,
523 struct lquota_entry *lqe, __u64 hard, __u64 soft,
524 __u64 time, __u32 valid, bool is_default, bool is_updated);
525 int qmt_dqacq0(const struct lu_env *, struct qmt_device *, struct obd_uuid *,
526 __u32, __u64, __u64, struct quota_body *);
527 int qmt_uuid2idx(struct obd_uuid *, int *);
530 int qmt_intent_policy(const struct lu_env *, struct lu_device *,
531 struct ptlrpc_request *, struct ldlm_lock **, int);
532 int qmt_lvbo_init(struct lu_device *, struct ldlm_resource *);
533 int qmt_lvbo_update(struct lu_device *, struct ldlm_resource *,
534 struct ptlrpc_request *, int);
535 int qmt_lvbo_size(struct lu_device *, struct ldlm_lock *);
536 int qmt_lvbo_fill(struct lu_device *, struct ldlm_lock *, void *, int);
537 int qmt_lvbo_free(struct lu_device *, struct ldlm_resource *);
538 int qmt_start_reba_thread(struct qmt_device *);
539 void qmt_stop_reba_thread(struct qmt_device *);
540 void qmt_glb_lock_notify(const struct lu_env *, struct lquota_entry *, __u64);
541 void qmt_id_lock_notify(struct qmt_device *, struct lquota_entry *);
542 #endif /* _QMT_INTERNAL_H */