4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; if not, write to the
18 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
19 * Boston, MA 021110-1307, USA
24 * Copyright (c) 2011, 2012, Intel, Inc.
25 * Use is subject to license terms.
27 * Author: Johann Lombardi <johann.lombardi@intel.com>
28 * Author: Niu Yawei <yawei.niu@intel.com>
32 # define EXPORT_SYMTAB
35 #define DEBUG_SUBSYSTEM S_LQUOTA
37 #include "qsd_internal.h"
40 * Completion function invoked when the global quota lock enqueue has completed
42 static void qsd_reint_completion(const struct lu_env *env,
43 struct qsd_qtype_info *qqi,
44 struct quota_body *req_qbody,
45 struct quota_body *rep_qbody,
46 struct lustre_handle *lockh,
47 union ldlm_wire_lvb *lvb,
50 struct qsd_instance *qsd = qqi->qqi_qsd;
51 __u64 *slv_ver = (__u64 *)arg;
55 CDEBUG_LIMIT(rc != -EAGAIN ? D_ERROR : D_QUOTA,
56 "%s: failed to enqueue global quota lock, glb fid:"
57 DFID", rc:%d\n", qsd->qsd_svname,
58 PFID(&req_qbody->qb_fid), rc);
62 CDEBUG(D_QUOTA, "%s: global quota lock successfully acquired, glb "
63 "fid:"DFID", glb ver:"LPU64", slv fid:"DFID", slv ver:"LPU64"\n",
64 qsd->qsd_svname, PFID(&req_qbody->qb_fid),
65 lvb->l_lquota.lvb_glb_ver, PFID(&rep_qbody->qb_slv_fid),
66 rep_qbody->qb_slv_ver);
68 *slv_ver = rep_qbody->qb_slv_ver;
69 memcpy(&qqi->qqi_slv_fid, &rep_qbody->qb_slv_fid,
70 sizeof(struct lu_fid));
71 lustre_handle_copy(&qqi->qqi_lockh, lockh);
75 static int qsd_reint_qid(const struct lu_env *env, struct qsd_qtype_info *qqi,
76 bool global, union lquota_id *qid, void *rec)
78 struct lquota_entry *lqe;
82 lqe = lqe_locate(env, qqi->qqi_site, qid);
86 LQUOTA_DEBUG(lqe, "reintegrating entry");
88 rc = qsd_update_lqe(env, lqe, global, rec);
92 rc = qsd_update_index(env, qqi, qid, global, 0, rec);
98 static int qsd_reint_entries(const struct lu_env *env,
99 struct qsd_qtype_info *qqi,
100 struct idx_info *ii, bool global,
102 unsigned int npages, bool need_swab)
104 struct qsd_thread_info *qti = qsd_info(env);
105 struct qsd_instance *qsd = qqi->qqi_qsd;
106 union lquota_id *qid = &qti->qti_id;
111 CDEBUG(D_QUOTA, "%s: processing %d pages for %s index\n",
112 qsd->qsd_svname, npages, global ? "global" : "slave");
114 /* sanity check on the record size */
115 if ((global && ii->ii_recsize != sizeof(struct lquota_glb_rec)) ||
116 (!global && ii->ii_recsize != sizeof(struct lquota_slv_rec))) {
117 CERROR("%s: invalid record size (%d) for %s index\n",
118 qsd->qsd_svname, ii->ii_recsize,
119 global ? "global" : "slave");
123 size = ii->ii_recsize + ii->ii_keysize;
125 for (i = 0; i < npages; i++) {
126 union lu_page *lip = cfs_kmap(pages[i]);
128 for (j = 0; j < LU_PAGE_COUNT; j++) {
131 lustre_swab_lip_header(&lip->lp_idx);
133 if (lip->lp_idx.lip_magic != LIP_MAGIC) {
134 CERROR("%s: invalid magic (%x != %x) for page "
135 "%d/%d while transferring %s index\n",
136 qsd->qsd_svname, lip->lp_idx.lip_magic,
137 LIP_MAGIC, i + 1, npages,
138 global ? "global" : "slave");
139 GOTO(out, rc = -EINVAL);
142 CDEBUG(D_QUOTA, "%s: processing page %d/%d with %d "
143 "entries for %s index\n", qsd->qsd_svname, i + 1,
144 npages, lip->lp_idx.lip_nr,
145 global ? "global" : "slave");
147 for (k = 0; k < lip->lp_idx.lip_nr; k++) {
150 entry = lip->lp_idx.lip_entries + k * size;
151 memcpy(qid, entry, ii->ii_keysize); /* key */
152 entry += ii->ii_keysize; /* value */
158 __swab64s(&qid->qid_uid);
159 /* quota records only include 64-bit
161 while (offset < ii->ii_recsize) {
164 offset += sizeof(__u64);
168 rc = qsd_reint_qid(env, qqi, global, qid,
176 cfs_kunmap(pages[i]);
183 static int qsd_reint_index(const struct lu_env *env, struct qsd_qtype_info *qqi,
186 struct qsd_thread_info *qti = qsd_info(env);
187 struct qsd_instance *qsd = qqi->qqi_qsd;
188 struct idx_info *ii = &qti->qti_ii;
190 cfs_page_t **pages = NULL;
191 unsigned int npages, pg_cnt;
192 __u64 start_hash = 0, ver = 0;
193 bool need_swab = false;
197 fid = global ? &qqi->qqi_fid : &qqi->qqi_slv_fid;
199 /* let's do a 1MB bulk */
200 npages = min_t(unsigned int, PTLRPC_MAX_BRW_SIZE, 1 << 20);
201 npages /= CFS_PAGE_SIZE;
203 /* allocate pages for bulk index read */
204 OBD_ALLOC(pages, npages * sizeof(*pages));
206 GOTO(out, rc = -ENOMEM);
207 for (i = 0; i < npages; i++) {
208 pages[i] = cfs_alloc_page(CFS_ALLOC_STD);
209 if (pages[i] == NULL)
210 GOTO(out, rc = -ENOMEM);
214 /* initialize index_info request with FID of global index */
215 memset(ii, 0, sizeof(*ii));
216 memcpy(&ii->ii_fid, fid, sizeof(*fid));
217 ii->ii_magic = IDX_INFO_MAGIC;
218 ii->ii_flags = II_FL_NOHASH;
219 ii->ii_count = npages * LU_PAGE_COUNT;
220 ii->ii_hash_start = start_hash;
222 /* send bulk request to quota master to read global index */
223 rc = qsd_fetch_index(env, qsd->qsd_exp, ii, npages, pages, &need_swab);
225 CWARN("%s: failed to fetch index for "DFID". %d\n",
226 qsd->qsd_svname, PFID(fid), rc);
230 /* various sanity checks */
231 if (ii->ii_magic != IDX_INFO_MAGIC) {
232 CERROR("%s: invalid magic in index transfer %x != %x\n",
233 qsd->qsd_svname, ii->ii_magic, IDX_INFO_MAGIC);
234 GOTO(out, rc = -EPROTO);
236 if ((ii->ii_flags & II_FL_VARKEY) != 0)
237 CWARN("%s: II_FL_VARKEY is set on index transfer for fid "DFID
238 ", it shouldn't be\n", qsd->qsd_svname, PFID(fid));
239 if ((ii->ii_flags & II_FL_NONUNQ) != 0)
240 CWARN("%s: II_FL_NONUNQ is set on index transfer for fid "DFID
241 ", it shouldn't be\n", qsd->qsd_svname, PFID(fid));
242 if (ii->ii_keysize != sizeof(__u64)) {
243 CERROR("%s: invalid key size reported on index transfer for "
244 "fid "DFID", %u != %u\n", qsd->qsd_svname, PFID(fid),
245 ii->ii_keysize, (int)sizeof(__u64));
246 GOTO(out, rc = -EPROTO);
248 if (ii->ii_version == 0 && ii->ii_count != 0)
249 CWARN("%s: index version for fid "DFID" is 0, but index isn't "
250 "empty (%d)\n", qsd->qsd_svname, PFID(fid), ii->ii_count);
252 CDEBUG(D_QUOTA, "%s: reintegration process for fid "DFID" successfully "
253 "fetched %s index, count = %d\n", qsd->qsd_svname,
254 PFID(fid), global ? "global" : "slave", ii->ii_count);
257 /* record version associated with the first bulk transfer */
258 ver = ii->ii_version;
260 pg_cnt = (ii->ii_count + (LU_PAGE_COUNT) - 1);
261 pg_cnt >>= CFS_PAGE_SHIFT - LU_PAGE_SHIFT;
263 if (pg_cnt > npages) {
264 CERROR("%s: master returned more pages than expected, %u > %u"
265 "\n", qsd->qsd_svname, pg_cnt, npages);
269 rc = qsd_reint_entries(env, qqi, ii, global, pages, pg_cnt, need_swab);
273 if (ii->ii_hash_end != II_END_OFF) {
274 start_hash = ii->ii_hash_end;
279 for (i = 0; i < npages; i++)
280 if (pages[i] != NULL)
281 cfs_free_page(pages[i]);
282 OBD_FREE(pages, npages * sizeof(*pages));
285 /* Update index version */
287 rc = qsd_write_version(env, qqi, ver, global);
289 CERROR("%s: write version "LPU64" to "DFID" failed. "
290 "%d\n", qsd->qsd_svname, ver, PFID(fid), rc);
296 static int qsd_reconciliation(const struct lu_env *env,
297 struct qsd_qtype_info *qqi)
299 struct qsd_thread_info *qti = qsd_info(env);
300 struct qsd_instance *qsd = qqi->qqi_qsd;
301 const struct dt_it_ops *iops;
304 struct lquota_entry *lqe;
305 union lquota_id *qid = &qti->qti_id;
309 LASSERT(qqi->qqi_glb_obj != NULL);
310 iops = &qqi->qqi_glb_obj->do_index_ops->dio_it;
312 it = iops->init(env, qqi->qqi_glb_obj, 0, BYPASS_CAPA);
314 CWARN("%s: Initialize it for "DFID" failed. %ld\n",
315 qsd->qsd_svname, PFID(&qqi->qqi_fid), PTR_ERR(it));
319 rc = iops->load(env, it, 0);
321 CWARN("%s: Load first entry for "DFID" failed. %d\n",
322 qsd->qsd_svname, PFID(&qqi->qqi_fid), rc);
324 } else if (rc == 0) {
325 rc = iops->next(env, it);
327 GOTO(out, rc = (rc < 0) ? rc : 0);
331 key = iops->key(env, it);
333 CWARN("%s: Error key for "DFID". %ld\n",
334 qsd->qsd_svname, PFID(&qqi->qqi_fid),
336 GOTO(out, rc = PTR_ERR(key));
339 /* skip the root user/group */
340 if (*((__u64 *)key) == 0)
343 qid->qid_uid = *((__u64 *)key);
345 lqe = lqe_locate(env, qqi->qqi_site, qid);
347 CWARN("%s: failed to locate lqe. "DFID", %ld\n",
348 qsd->qsd_svname, PFID(&qqi->qqi_fid),
350 GOTO(out, rc = PTR_ERR(lqe));
353 rc = qsd_refresh_usage(env, lqe);
355 CWARN("%s: failed to get usage. "DFID", %d\n",
356 qsd->qsd_svname, PFID(&qqi->qqi_fid), rc);
361 rc = qsd_adjust(env, lqe);
364 CWARN("%s: failed to report quota. "DFID", %d\n",
365 qsd->qsd_svname, PFID(&qqi->qqi_fid), rc);
369 rc = iops->next(env, it);
371 CWARN("%s: failed to parse index, ->next error:%d "DFID
372 "\n", qsd->qsd_svname, rc, PFID(&qqi->qqi_fid));
384 static int qsd_connected(struct qsd_instance *qsd)
388 read_lock(&qsd->qsd_lock);
389 connected = qsd->qsd_exp_valid ? 1 : 0;
390 read_unlock(&qsd->qsd_lock);
395 static int qsd_started(struct qsd_instance *qsd)
399 read_lock(&qsd->qsd_lock);
400 started = qsd->qsd_started ? 1 : 0;
401 read_unlock(&qsd->qsd_lock);
407 * Routine executed by the reintegration thread.
409 static int qsd_reint_main(void *args)
412 struct qsd_thread_info *qti;
413 struct qsd_qtype_info *qqi = (struct qsd_qtype_info *)args;
414 struct qsd_instance *qsd = qqi->qqi_qsd;
415 struct ptlrpc_thread *thread = &qqi->qqi_reint_thread;
416 struct l_wait_info lwi = { 0 };
420 cfs_daemonize("qsd_reint");
422 CDEBUG(D_QUOTA, "%s: Starting reintegration thread for "DFID"\n",
423 qsd->qsd_svname, PFID(&qqi->qqi_fid));
426 lu_ref_add(&qqi->qqi_reference, "reint_thread", thread);
428 thread_set_flags(thread, SVC_RUNNING);
429 cfs_waitq_signal(&thread->t_ctl_waitq);
433 GOTO(out, rc = -ENOMEM);
435 /* initialize environment */
436 rc = lu_env_init(env, LCT_DT_THREAD);
441 /* wait for the connection to master established */
442 l_wait_event(thread->t_ctl_waitq,
443 qsd_connected(qsd) || !thread_is_running(thread), &lwi);
445 /* Step 1: enqueue global index lock */
446 if (!thread_is_running(thread))
447 GOTO(out_env_init, rc = 0);
449 LASSERT(qsd->qsd_exp != NULL);
450 LASSERT(qqi->qqi_glb_uptodate == 0 || qqi->qqi_slv_uptodate == 0);
452 memset(&qti->qti_lvb, 0, sizeof(qti->qti_lvb));
454 read_lock(&qsd->qsd_lock);
455 /* check whether we already own a global quota lock for this type */
456 if (lustre_handle_is_used(&qqi->qqi_lockh) &&
457 ldlm_lock_addref_try(&qqi->qqi_lockh, qsd_glb_einfo.ei_mode) == 0) {
458 read_unlock(&qsd->qsd_lock);
459 /* force refresh of global & slave index copy */
460 qti->qti_lvb.l_lquota.lvb_glb_ver = ~0ULL;
461 qti->qti_slv_ver = ~0ULL;
463 /* no valid lock found, let's enqueue a new one */
464 read_unlock(&qsd->qsd_lock);
466 memset(&qti->qti_body, 0, sizeof(qti->qti_body));
467 memcpy(&qti->qti_body.qb_fid, &qqi->qqi_fid,
468 sizeof(qqi->qqi_fid));
470 rc = qsd_intent_lock(env, qsd->qsd_exp, &qti->qti_body, true,
471 IT_QUOTA_CONN, qsd_reint_completion, qqi,
472 &qti->qti_lvb, (void *)&qti->qti_slv_ver);
474 GOTO(out_env_init, rc);
476 CDEBUG(D_QUOTA, "%s: glb_ver:"LPU64"/"LPU64",slv_ver:"LPU64"/"
477 LPU64"\n", qsd->qsd_svname,
478 qti->qti_lvb.l_lquota.lvb_glb_ver, qqi->qqi_glb_ver,
479 qti->qti_slv_ver, qqi->qqi_slv_ver);
482 /* Step 2: reintegrate global index */
483 if (!thread_is_running(thread))
484 GOTO(out_lock, rc = 0);
486 OBD_FAIL_TIMEOUT(OBD_FAIL_QUOTA_DELAY_REINT, 10);
488 if (qqi->qqi_glb_ver != qti->qti_lvb.l_lquota.lvb_glb_ver) {
489 rc = qsd_reint_index(env, qqi, true);
491 CWARN("%s: reint global for "DFID" failed. %d\n",
492 qsd->qsd_svname, PFID(&qqi->qqi_fid), rc);
496 qsd_bump_version(qqi, qqi->qqi_glb_ver, true);
499 /* Step 3: reintegrate slave index */
500 if (!thread_is_running(thread))
501 GOTO(out_lock, rc = 0);
503 if (qqi->qqi_slv_ver != qti->qti_slv_ver) {
504 rc = qsd_reint_index(env, qqi, false);
506 CWARN("%s: Reint slave for "DFID" failed. %d\n",
507 qsd->qsd_svname, PFID(&qqi->qqi_slv_fid), rc);
511 qsd_bump_version(qqi, qqi->qqi_slv_ver, false);
514 /* wait for the connection to master established */
515 l_wait_event(thread->t_ctl_waitq,
516 qsd_started(qsd) || !thread_is_running(thread), &lwi);
518 if (!thread_is_running(thread))
519 GOTO(out_lock, rc = 0);
521 /* Step 4: start reconciliation for each enforced ID */
522 rc = qsd_reconciliation(env, qqi);
524 CWARN("%s: reconciliation failed. "DFID", %d\n",
525 qsd->qsd_svname, PFID(&qti->qti_fid), rc);
529 ldlm_lock_decref(&qqi->qqi_lockh, qsd_glb_einfo.ei_mode);
535 write_lock(&qsd->qsd_lock);
537 write_unlock(&qsd->qsd_lock);
540 lu_ref_del(&qqi->qqi_reference, "reint_thread", thread);
542 thread_set_flags(thread, SVC_STOPPED);
543 cfs_waitq_signal(&thread->t_ctl_waitq);
547 void qsd_stop_reint_thread(struct qsd_qtype_info *qqi)
549 struct ptlrpc_thread *thread = &qqi->qqi_reint_thread;
550 struct l_wait_info lwi = { 0 };
552 if (!thread_is_stopped(thread)) {
553 thread_set_flags(thread, SVC_STOPPING);
554 cfs_waitq_signal(&thread->t_ctl_waitq);
556 l_wait_event(thread->t_ctl_waitq,
557 thread_is_stopped(thread), &lwi);
561 static int qsd_entry_iter_cb(cfs_hash_t *hs, cfs_hash_bd_t *bd,
562 cfs_hlist_node_t *hnode, void *data)
564 struct lquota_entry *lqe;
565 int *pending = (int *)data;
567 lqe = cfs_hlist_entry(hnode, struct lquota_entry, lqe_hash);
568 LASSERT(atomic_read(&lqe->lqe_ref) > 0);
571 *pending += lqe->lqe_pending_req;
572 lqe_read_unlock(lqe);
577 static bool qsd_pending_updates(struct qsd_qtype_info *qqi)
579 struct qsd_instance *qsd = qqi->qqi_qsd;
580 struct qsd_upd_rec *upd;
581 struct lquota_entry *lqe, *n;
583 bool updates = false;
586 /* any pending quota adjust? */
587 spin_lock(&qsd->qsd_adjust_lock);
588 cfs_list_for_each_entry_safe(lqe, n, &qsd->qsd_adjust_list, lqe_link) {
589 if (lqe2qqi(lqe) == qqi) {
590 cfs_list_del_init(&lqe->lqe_link);
594 spin_unlock(&qsd->qsd_adjust_lock);
596 /* any pending updates? */
597 read_lock(&qsd->qsd_lock);
598 cfs_list_for_each_entry(upd, &qsd->qsd_upd_list, qur_link) {
599 if (upd->qur_qqi == qqi) {
600 read_unlock(&qsd->qsd_lock);
601 CDEBUG(D_QUOTA, "%s: pending %s updates for type:%d.\n",
603 upd->qur_global ? "global" : "slave",
605 GOTO(out, updates = true);
608 read_unlock(&qsd->qsd_lock);
610 /* any pending quota request? */
611 cfs_hash_for_each_safe(qqi->qqi_site->lqs_hash, qsd_entry_iter_cb,
614 CDEBUG(D_QUOTA, "%s: pending dqacq for type:%d.\n",
615 qsd->qsd_svname, qqi->qqi_qtype);
621 CERROR("%s: Delaying reintegration for qtype:%d until pending "
622 "updates are flushed.\n",
623 qsd->qsd_svname, qqi->qqi_qtype);
627 int qsd_start_reint_thread(struct qsd_qtype_info *qqi)
629 struct ptlrpc_thread *thread = &qqi->qqi_reint_thread;
630 struct qsd_instance *qsd = qqi->qqi_qsd;
631 struct l_wait_info lwi = { 0 };
635 /* don't bother to do reintegration when quota isn't enabled */
636 if (!qsd_type_enabled(qqi->qqi_qsd, qqi->qqi_qtype))
639 /* check if the reintegration has already started or finished */
640 write_lock(&qsd->qsd_lock);
642 if ((qqi->qqi_glb_uptodate && qqi->qqi_slv_uptodate) ||
643 qqi->qqi_reint || qsd->qsd_stopping) {
644 write_unlock(&qsd->qsd_lock);
649 write_unlock(&qsd->qsd_lock);
651 /* there could be some unfinished global or index entry updates
652 * (very unlikely), to avoid them messing up with the reint
653 * procedure, we just return and try to re-start reint later. */
654 if (qsd_pending_updates(qqi)) {
655 write_lock(&qsd->qsd_lock);
657 write_unlock(&qsd->qsd_lock);
661 rc = cfs_create_thread(qsd_reint_main, (void *)qqi, 0);
663 thread_set_flags(thread, SVC_STOPPED);
664 write_lock(&qsd->qsd_lock);
666 write_unlock(&qsd->qsd_lock);
670 l_wait_event(thread->t_ctl_waitq,
671 thread_is_running(thread) || thread_is_stopped(thread),