4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2012, 2014, Intel Corporation.
25 * Use is subject to license terms.
27 * Author: Johann Lombardi <johann.lombardi@intel.com>
28 * Author: Niu Yawei <yawei.niu@intel.com>
31 #define DEBUG_SUBSYSTEM S_LQUOTA
33 #include <linux/kthread.h>
34 #include <lustre_dlm.h>
35 #include <obd_class.h>
37 #include "qmt_internal.h"
39 /* intent policy function called from mdt_intent_opc() when the intent is of
41 int qmt_intent_policy(const struct lu_env *env, struct lu_device *ld,
42 struct ptlrpc_request *req, struct ldlm_lock **lockp,
45 struct qmt_device *qmt = lu2qmt_dev(ld);
46 struct ldlm_intent *it;
47 struct quota_body *reqbody;
48 struct quota_body *repbody;
49 struct obd_uuid *uuid;
50 struct lquota_lvb *lvb;
51 struct ldlm_resource *res = (*lockp)->l_resource;
55 req_capsule_extend(&req->rq_pill, &RQF_LDLM_INTENT_QUOTA);
56 req_capsule_set_size(&req->rq_pill, &RMF_DLM_LVB, RCL_SERVER,
57 ldlm_lvbo_size(*lockp));
59 /* extract quota body and intent opc */
60 it = req_capsule_client_get(&req->rq_pill, &RMF_LDLM_INTENT);
62 RETURN(err_serious(-EFAULT));
64 reqbody = req_capsule_client_get(&req->rq_pill, &RMF_QUOTA_BODY);
66 RETURN(err_serious(-EFAULT));
69 rc = req_capsule_server_pack(&req->rq_pill);
71 CERROR("Can't pack response, rc %d\n", rc);
72 RETURN(err_serious(rc));
75 repbody = req_capsule_server_get(&req->rq_pill, &RMF_QUOTA_BODY);
77 RETURN(err_serious(-EFAULT));
79 uuid = &(*lockp)->l_export->exp_client_uuid;
82 case IT_QUOTA_DQACQ: {
83 struct lquota_entry *lqe;
84 struct ldlm_lock *lock;
86 if (res->lr_name.name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] == 0)
87 /* acquire on global lock? something is wrong ... */
88 GOTO(out, rc = -EPROTO);
90 /* verify global lock isn't stale */
91 if (!lustre_handle_is_used(&reqbody->qb_glb_lockh))
92 GOTO(out, rc = -ENOLCK);
94 lock = ldlm_handle2lock(&reqbody->qb_glb_lockh);
96 GOTO(out, rc = -ENOLCK);
99 lqe = res->lr_lvb_data;
100 LASSERT(lqe != NULL);
103 /* acquire quota space */
104 rc = qmt_dqacq0(env, lqe, qmt, uuid, reqbody->qb_flags,
105 reqbody->qb_count, reqbody->qb_usage,
114 /* new connection from slave */
116 if (res->lr_name.name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] != 0)
117 /* connection on per-ID lock? something is wrong ... */
118 GOTO(out, rc = -EPROTO);
120 rc = qmt_pool_new_conn(env, qmt, &reqbody->qb_fid,
121 &repbody->qb_slv_fid,
122 &repbody->qb_slv_ver, uuid);
128 CERROR("%s: invalid intent opcode: "LPU64"\n", qmt->qmt_svname,
130 GOTO(out, rc = err_serious(-EINVAL));
133 /* on success, pack lvb in reply */
134 lvb = req_capsule_server_get(&req->rq_pill, &RMF_DLM_LVB);
135 lvb_len = ldlm_lvbo_size(*lockp);
136 lvb_len = ldlm_lvbo_fill(*lockp, lvb, lvb_len);
138 GOTO(out, rc = lvb_len);
140 req_capsule_shrink(&req->rq_pill, &RMF_DLM_LVB, lvb_len, RCL_SERVER);
147 * Initialize quota LVB associated with quota indexes.
148 * Called with res->lr_lvb_sem held
150 int qmt_lvbo_init(struct lu_device *ld, struct ldlm_resource *res)
153 struct qmt_thread_info *qti;
154 struct qmt_device *qmt = lu2qmt_dev(ld);
155 int pool_id, pool_type, qtype;
159 LASSERT(res != NULL);
161 if (res->lr_type != LDLM_PLAIN)
164 if (res->lr_lvb_data ||
165 res->lr_name.name[LUSTRE_RES_ID_SEQ_OFF] != FID_SEQ_QUOTA_GLB)
172 /* initialize environment */
173 rc = lu_env_init(env, LCT_MD_THREAD);
178 /* extract global index FID and quota identifier */
179 fid_extract_from_quota_res(&qti->qti_fid, &qti->qti_id, &res->lr_name);
181 /* sanity check the global index FID */
182 rc = lquota_extract_fid(&qti->qti_fid, &pool_id, &pool_type, &qtype);
184 CERROR("can't extract pool information from FID "DFID"\n",
185 PFID(&qti->qti_fid));
189 if (res->lr_name.name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] != 0) {
190 /* no ID quota lock associated with UID/GID 0 or with a seq 0,
191 * we are thus dealing with an ID lock. */
192 struct lquota_entry *lqe;
194 /* Find the quota entry associated with the quota id */
195 lqe = qmt_pool_lqe_lookup(env, qmt, pool_id, pool_type, qtype,
198 GOTO(out, rc = PTR_ERR(lqe));
200 /* store reference to lqe in lr_lvb_data */
201 res->lr_lvb_data = lqe;
202 LQUOTA_DEBUG(lqe, "initialized res lvb");
204 struct dt_object *obj;
206 /* lookup global index */
207 obj = dt_locate(env, qmt->qmt_child, &qti->qti_fid);
209 GOTO(out, rc = PTR_ERR(obj));
210 if (!dt_object_exists(obj)) {
211 lu_object_put(env, &obj->do_lu);
212 GOTO(out, rc = -ENOENT);
215 /* store reference to global index object in lr_lvb_data */
216 res->lr_lvb_data = obj;
217 CDEBUG(D_QUOTA, DFID" initialized lvb\n", PFID(&qti->qti_fid));
220 res->lr_lvb_len = sizeof(struct lquota_lvb);
230 * Update LVB associated with the global quota index.
231 * This function is called from the DLM itself after a glimpse callback, in this
232 * case valid ptlrpc request is passed.
234 int qmt_lvbo_update(struct lu_device *ld, struct ldlm_resource *res,
235 struct ptlrpc_request *req, int increase_only)
238 struct qmt_thread_info *qti;
239 struct qmt_device *qmt = lu2qmt_dev(ld);
240 struct lquota_entry *lqe;
241 struct lquota_lvb *lvb;
242 struct ldlm_lock *lock;
243 struct obd_export *exp;
247 LASSERT(res != NULL);
252 if (res->lr_name.name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] == 0)
253 /* no need to update lvb for global quota locks */
256 lvb = req_capsule_server_swab_get(&req->rq_pill, &RMF_DLM_LVB,
257 lustre_swab_lquota_lvb);
259 CERROR("%s: failed to extract lvb from request\n",
264 lqe = res->lr_lvb_data;
265 LASSERT(lqe != NULL);
268 LQUOTA_DEBUG(lqe, "releasing:"LPU64" may release:"LPU64,
269 lvb->lvb_id_rel, lvb->lvb_id_may_rel);
271 if (lvb->lvb_id_rel == 0) {
272 /* nothing to release */
273 if (lvb->lvb_id_may_rel != 0)
274 /* but might still release later ... */
275 lqe->lqe_may_rel += lvb->lvb_id_may_rel;
276 GOTO(out_lqe, rc = 0);
279 /* allocate environement */
282 GOTO(out_lqe, rc = -ENOMEM);
284 /* initialize environment */
285 rc = lu_env_init(env, LCT_MD_THREAD);
290 /* The request is a glimpse callback which was sent via the
291 * reverse import to the slave. What we care about here is the
292 * export associated with the slave and req->rq_export is
293 * definitely not what we are looking for (it is actually set to
295 * Therefore we extract the lock from the request argument
296 * and use lock->l_export. */
297 lock = ldlm_request_lock(req);
299 CERROR("%s: failed to get lock from request!\n",
301 GOTO(out_env_init, rc = PTR_ERR(lock));
304 exp = class_export_get(lock->l_export);
306 CERROR("%s: failed to get export from lock!\n",
308 GOTO(out_env_init, rc = -EFAULT);
311 /* release quota space */
312 rc = qmt_dqacq0(env, lqe, qmt, &exp->exp_client_uuid,
313 QUOTA_DQACQ_FL_REL, lvb->lvb_id_rel, 0, &qti->qti_body);
314 if (rc || qti->qti_body.qb_count != lvb->lvb_id_rel)
315 LQUOTA_ERROR(lqe, "failed to release quota space on glimpse "
316 LPU64"!="LPU64" rc:%d\n", qti->qti_body.qb_count,
317 lvb->lvb_id_rel, rc);
318 class_export_put(exp);
320 GOTO(out_env_init, rc);
332 * Report size of lvb to ldlm layer in order to allocate lvb buffer
333 * As far as quota locks are concerned, the size is static and is the same
334 * for both global and per-ID locks which shares the same lvb format.
336 int qmt_lvbo_size(struct lu_device *ld, struct ldlm_lock *lock)
338 return sizeof(struct lquota_lvb);
342 * Fill request buffer with quota lvb
344 int qmt_lvbo_fill(struct lu_device *ld, struct ldlm_lock *lock, void *lvb,
347 struct ldlm_resource *res = lock->l_resource;
348 struct lquota_lvb *qlvb = lvb;
351 LASSERT(res != NULL);
353 if (res->lr_type != LDLM_PLAIN || res->lr_lvb_data == NULL ||
354 res->lr_name.name[LUSTRE_RES_ID_SEQ_OFF] != FID_SEQ_QUOTA_GLB)
357 if (res->lr_name.name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] != 0) {
358 /* no ID quota lock associated with UID/GID 0 or with a seq 0,
359 * we are thus dealing with an ID lock. */
360 struct lquota_entry *lqe = res->lr_lvb_data;
362 /* return current qunit value & edquot flags in lvb */
364 qlvb->lvb_id_qunit = lqe->lqe_qunit;
367 qlvb->lvb_flags = LQUOTA_FL_EDQUOT;
370 /* global quota lock */
373 struct dt_object *obj = res->lr_lvb_data;
379 /* initialize environment */
380 rc = lu_env_init(env, LCT_LOCAL);
386 /* return current version of global index */
387 qlvb->lvb_glb_ver = dt_version_get(env, obj);
393 RETURN(sizeof(struct lquota_lvb));
397 * Free lvb associated with a given ldlm resource
398 * we don't really allocate a lvb, lr_lvb_data just points to
399 * the appropriate backend structures.
401 int qmt_lvbo_free(struct lu_device *ld, struct ldlm_resource *res)
405 if (res->lr_lvb_data == NULL)
408 if (res->lr_name.name[LUSTRE_RES_ID_QUOTA_SEQ_OFF] != 0) {
409 struct lquota_entry *lqe = res->lr_lvb_data;
411 /* release lqe reference */
414 struct dt_object *obj = res->lr_lvb_data;
422 /* initialize environment */
423 rc = lu_env_init(env, LCT_LOCAL);
429 /* release object reference */
430 lu_object_put(env, &obj->do_lu);
435 res->lr_lvb_data = NULL;
441 typedef int (*qmt_glimpse_cb_t)(const struct lu_env *, struct qmt_device *,
442 struct obd_uuid *, union ldlm_gl_desc *,
445 * Send glimpse callback to slaves holding a lock on resource \res.
446 * This is used to notify slaves of new quota settings or to claim quota space
449 * \param env - is the environment passed by the caller
450 * \param qmt - is the quota master target
451 * \param res - is the dlm resource associated with the quota object
452 * \param desc - is the glimpse descriptor to pack in glimpse callback
453 * \param cb - is the callback function called on every lock and determine
454 * whether a glimpse should be issued
455 * \param arg - is an opaq parameter passed to the callback function
457 static int qmt_glimpse_lock(const struct lu_env *env, struct qmt_device *qmt,
458 struct ldlm_resource *res, union ldlm_gl_desc *desc,
459 qmt_glimpse_cb_t cb, void *arg)
461 struct list_head *tmp, *pos;
462 struct list_head gl_list = LIST_HEAD_INIT(gl_list);
467 /* scan list of granted locks */
468 list_for_each(pos, &res->lr_granted) {
469 struct ldlm_glimpse_work *work;
470 struct ldlm_lock *lock;
471 struct obd_uuid *uuid;
473 lock = list_entry(pos, struct ldlm_lock, l_res_link);
474 LASSERT(lock->l_export);
475 uuid = &lock->l_export->exp_client_uuid;
478 rc = cb(env, qmt, uuid, desc, arg);
480 /* slave should not be notified */
483 /* something wrong happened, we still notify */
484 CERROR("%s: callback function failed to "
485 "determine whether slave %s should be "
486 "notified (%d)\n", qmt->qmt_svname,
487 obd_uuid2str(uuid), rc);
492 CERROR("%s: failed to notify %s\n", qmt->qmt_svname,
497 list_add_tail(&work->gl_list, &gl_list);
498 work->gl_lock = LDLM_LOCK_GET(lock);
500 work->gl_desc = desc;
505 if (list_empty(&gl_list)) {
506 CDEBUG(D_QUOTA, "%s: nobody to notify\n", qmt->qmt_svname);
510 /* issue glimpse callbacks to all connected slaves */
511 rc = ldlm_glimpse_locks(res, &gl_list);
513 list_for_each_safe(pos, tmp, &gl_list) {
514 struct ldlm_glimpse_work *work;
516 work = list_entry(pos, struct ldlm_glimpse_work, gl_list);
518 list_del(&work->gl_list);
519 CERROR("%s: failed to notify %s of new quota settings\n",
521 obd_uuid2str(&work->gl_lock->l_export->exp_client_uuid));
522 LDLM_LOCK_RELEASE(work->gl_lock);
530 * Send glimpse request to all global quota locks to push new quota setting to
533 * \param env - is the environment passed by the caller
534 * \param lqe - is the lquota entry which has new settings
535 * \param ver - is the version associated with the setting change
537 void qmt_glb_lock_notify(const struct lu_env *env, struct lquota_entry *lqe,
540 struct qmt_thread_info *qti = qmt_info(env);
541 struct qmt_pool_info *pool = lqe2qpi(lqe);
542 struct ldlm_resource *res = NULL;
546 lquota_generate_fid(&qti->qti_fid, pool->qpi_key & 0x0000ffff,
547 pool->qpi_key >> 16, lqe->lqe_site->lqs_qtype);
549 /* send glimpse callback to notify slaves of new quota settings */
550 qti->qti_gl_desc.lquota_desc.gl_id = lqe->lqe_id;
551 qti->qti_gl_desc.lquota_desc.gl_flags = 0;
552 qti->qti_gl_desc.lquota_desc.gl_hardlimit = lqe->lqe_hardlimit;
553 qti->qti_gl_desc.lquota_desc.gl_softlimit = lqe->lqe_softlimit;
554 qti->qti_gl_desc.lquota_desc.gl_time = lqe->lqe_gracetime;
555 qti->qti_gl_desc.lquota_desc.gl_ver = ver;
557 /* look up ldlm resource associated with global index */
558 fid_build_reg_res_name(&qti->qti_fid, &qti->qti_resid);
559 res = ldlm_resource_get(pool->qpi_qmt->qmt_ns, NULL, &qti->qti_resid,
562 /* this might happen if no slaves have enqueued global quota
564 LQUOTA_DEBUG(lqe, "failed to lookup ldlm resource associated "
565 "with "DFID, PFID(&qti->qti_fid));
569 rc = qmt_glimpse_lock(env, pool->qpi_qmt, res, &qti->qti_gl_desc,
571 ldlm_resource_putref(res);
575 /* Callback function used to select locks that should be glimpsed when
576 * broadcasting the new qunit value */
577 static int qmt_id_lock_cb(const struct lu_env *env, struct qmt_device *qmt,
578 struct obd_uuid *uuid, union ldlm_gl_desc *desc,
581 struct obd_uuid *slv_uuid = arg;
583 if (slv_uuid != NULL && obd_uuid_equals(uuid, slv_uuid))
589 * Send glimpse request on per-ID lock to push new qunit value to slave.
591 * \param env - is the environment passed by the caller
592 * \param qmt - is the quota master target device
593 * \param lqe - is the lquota entry with the new qunit value
594 * \param uuid - is the uuid of the slave acquiring space, if any
596 static void qmt_id_lock_glimpse(const struct lu_env *env,
597 struct qmt_device *qmt,
598 struct lquota_entry *lqe, struct obd_uuid *uuid)
600 struct qmt_thread_info *qti = qmt_info(env);
601 struct qmt_pool_info *pool = lqe2qpi(lqe);
602 struct ldlm_resource *res = NULL;
606 if (!lqe->lqe_enforced)
609 lquota_generate_fid(&qti->qti_fid, pool->qpi_key & 0x0000ffff,
610 pool->qpi_key >> 16, lqe->lqe_site->lqs_qtype);
611 fid_build_quota_res_name(&qti->qti_fid, &lqe->lqe_id, &qti->qti_resid);
612 res = ldlm_resource_get(qmt->qmt_ns, NULL, &qti->qti_resid, LDLM_PLAIN,
615 /* this might legitimately happens if slaves haven't had the
616 * opportunity to enqueue quota lock yet. */
617 LQUOTA_DEBUG(lqe, "failed to lookup ldlm resource for per-ID "
618 "lock "DFID, PFID(&qti->qti_fid));
620 if (lqe->lqe_revoke_time == 0 &&
621 lqe->lqe_qunit == pool->qpi_least_qunit)
622 lqe->lqe_revoke_time = cfs_time_current_64();
623 lqe_write_unlock(lqe);
628 /* The purpose of glimpse callback on per-ID lock is twofold:
629 * - notify slaves of new qunit value and hope they will release some
630 * spare quota space in return
631 * - notify slaves that master ran out of quota space and there is no
632 * need to send acquire request any more until further notice */
634 /* fill glimpse descriptor with lqe settings */
636 qti->qti_gl_desc.lquota_desc.gl_flags = LQUOTA_FL_EDQUOT;
638 qti->qti_gl_desc.lquota_desc.gl_flags = 0;
639 qti->qti_gl_desc.lquota_desc.gl_qunit = lqe->lqe_qunit;
641 if (lqe->lqe_revoke_time == 0 &&
642 qti->qti_gl_desc.lquota_desc.gl_qunit == pool->qpi_least_qunit)
643 /* reset lqe_may_rel, it will be updated on glimpse callback
644 * replies if needed */
645 lqe->lqe_may_rel = 0;
647 /* The rebalance thread is the only thread which can issue glimpses */
648 LASSERT(!lqe->lqe_gl);
650 lqe_write_unlock(lqe);
652 /* issue glimpse callback to slaves */
653 rc = qmt_glimpse_lock(env, qmt, res, &qti->qti_gl_desc,
654 uuid ? qmt_id_lock_cb : NULL, (void *)uuid);
657 if (lqe->lqe_revoke_time == 0 &&
658 qti->qti_gl_desc.lquota_desc.gl_qunit == pool->qpi_least_qunit &&
659 lqe->lqe_qunit == pool->qpi_least_qunit) {
660 lqe->lqe_revoke_time = cfs_time_current_64();
661 qmt_adjust_edquot(lqe, cfs_time_current_sec());
663 LASSERT(lqe->lqe_gl);
665 lqe_write_unlock(lqe);
667 ldlm_resource_putref(res);
672 * Schedule a glimpse request on per-ID locks to push new qunit value or
673 * edquot flag to quota slaves.
675 * \param qmt - is the quota master target device
676 * \param lqe - is the lquota entry with the new qunit value
678 void qmt_id_lock_notify(struct qmt_device *qmt, struct lquota_entry *lqe)
684 spin_lock(&qmt->qmt_reba_lock);
685 if (!qmt->qmt_stopping && list_empty(&lqe->lqe_link)) {
686 list_add_tail(&lqe->lqe_link, &qmt->qmt_reba_list);
689 spin_unlock(&qmt->qmt_reba_lock);
692 wake_up(&qmt->qmt_reba_thread.t_ctl_waitq);
699 * The rebalance thread is in charge of sending glimpse callbacks on per-ID
700 * quota locks owned by slaves in order to notify them of:
701 * - a qunit shrink in which case slaves might release quota space back in
703 * - set/clear edquot flag used to cache the "quota exhausted" state of the
704 * master. When the flag is set, slaves know that there is no need to
705 * try to acquire quota from the master since this latter has already
706 * distributed all the space.
708 static int qmt_reba_thread(void *arg)
710 struct qmt_device *qmt = (struct qmt_device *)arg;
711 struct ptlrpc_thread *thread = &qmt->qmt_reba_thread;
712 struct l_wait_info lwi = { 0 };
714 struct lquota_entry *lqe, *tmp;
722 rc = lu_env_init(env, LCT_MD_THREAD);
724 CERROR("%s: failed to init env.", qmt->qmt_svname);
729 thread_set_flags(thread, SVC_RUNNING);
730 wake_up(&thread->t_ctl_waitq);
733 l_wait_event(thread->t_ctl_waitq,
734 !list_empty(&qmt->qmt_reba_list) ||
735 !thread_is_running(thread), &lwi);
737 spin_lock(&qmt->qmt_reba_lock);
738 list_for_each_entry_safe(lqe, tmp, &qmt->qmt_reba_list,
740 list_del_init(&lqe->lqe_link);
741 spin_unlock(&qmt->qmt_reba_lock);
743 if (thread_is_running(thread))
744 qmt_id_lock_glimpse(env, qmt, lqe, NULL);
747 spin_lock(&qmt->qmt_reba_lock);
749 spin_unlock(&qmt->qmt_reba_lock);
751 if (!thread_is_running(thread))
756 thread_set_flags(thread, SVC_STOPPED);
757 wake_up(&thread->t_ctl_waitq);
762 * Start rebalance thread. Called when the QMT is being setup
764 int qmt_start_reba_thread(struct qmt_device *qmt)
766 struct ptlrpc_thread *thread = &qmt->qmt_reba_thread;
767 struct l_wait_info lwi = { 0 };
768 struct task_struct *task;
771 task = kthread_run(qmt_reba_thread, (void *)qmt,
772 "qmt_reba_%s", qmt->qmt_svname);
774 CERROR("%s: failed to start rebalance thread (%ld)\n",
775 qmt->qmt_svname, PTR_ERR(task));
776 thread_set_flags(thread, SVC_STOPPED);
777 RETURN(PTR_ERR(task));
780 l_wait_event(thread->t_ctl_waitq,
781 thread_is_running(thread) || thread_is_stopped(thread),
788 * Stop rebalance thread. Called when the QMT is about to shutdown.
790 void qmt_stop_reba_thread(struct qmt_device *qmt)
792 struct ptlrpc_thread *thread = &qmt->qmt_reba_thread;
794 if (!thread_is_stopped(thread)) {
795 struct l_wait_info lwi = { 0 };
797 thread_set_flags(thread, SVC_STOPPING);
798 wake_up(&thread->t_ctl_waitq);
800 l_wait_event(thread->t_ctl_waitq, thread_is_stopped(thread),
803 LASSERT(list_empty(&qmt->qmt_reba_list));