4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
31 * lustre/mdd/mdd_trans.c
33 * Lustre Metadata Server (mdd) routines
35 * Author: Wang Di <wangdi@clusterfs.com>
38 #define DEBUG_SUBSYSTEM S_MDS
40 #include <linux/kthread.h>
42 #include <obd_class.h>
43 #include <lprocfs_status.h>
44 #include <lustre_mds.h>
45 #include <lustre_barrier.h>
47 #include "mdd_internal.h"
49 struct thandle *mdd_trans_create(const struct lu_env *env,
50 struct mdd_device *mdd)
53 struct lu_ucred *uc = lu_ucred_check(env);
55 /* If blocked by the write barrier, then return "-EINPROGRESS"
56 * to the caller. Usually, such error will be forwarded to the
57 * client, and the expected behaviour is to re-try such modify
58 * RPC some time later until the barrier is thawed or expired. */
59 if (unlikely(!barrier_entry(mdd->mdd_bottom)))
60 return ERR_PTR(-EINPROGRESS);
62 th = mdd_child_ops(mdd)->dt_trans_create(env, mdd->mdd_child);
63 if (!IS_ERR(th) && uc)
64 th->th_ignore_quota = !!md_capable(uc, CAP_SYS_RESOURCE);
69 int mdd_trans_start(const struct lu_env *env, struct mdd_device *mdd,
72 return mdd_child_ops(mdd)->dt_trans_start(env, mdd->mdd_child, th);
75 struct mdd_changelog_gc {
76 struct mdd_device *mcgc_mdd;
79 __u64 mcgc_maxindexes;
80 char mcgc_name[CHANGELOG_USER_NAMELEN_FULL];
83 /* return first registered ChangeLog user idle since too long
84 * use ChangeLog's user plain LLOG mtime for this */
85 static int mdd_changelog_gc_cb(const struct lu_env *env,
86 struct llog_handle *llh,
87 struct llog_rec_hdr *hdr, void *data)
89 struct llog_changelog_user_rec2 *rec;
90 struct mdd_changelog_gc *mcgc = data;
91 struct mdd_device *mdd = mcgc->mcgc_mdd;
95 if ((llh->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN) == 0)
98 rec = container_of(hdr, typeof(*rec), cur_hdr);
100 /* find oldest idle user, based on last record update/cancel time (new
101 * behavior), or for old user records, last record index vs current
102 * ChangeLog index. Late users with old record format will be treated
103 * first as we assume they could be idle since longer
105 if (rec->cur_time != 0) {
106 u32 time_now = (u32)ktime_get_real_seconds();
107 timeout_t time_out = rec->cur_time +
108 mdd->mdd_changelog_max_idle_time;
109 timeout_t idle_time = time_now - rec->cur_time;
111 /* treat oldest idle user first, and if no old format user
112 * has been already selected
114 if (time_after32(time_now, time_out) &&
115 idle_time > mcgc->mcgc_maxtime &&
116 mcgc->mcgc_maxindexes == 0) {
117 mcgc->mcgc_maxtime = idle_time;
118 mcgc->mcgc_id = rec->cur_id;
119 mdd_chlg_username(rec, mcgc->mcgc_name,
120 sizeof(mcgc->mcgc_name));
123 /* old user record with no idle time stamp, so use empirical
124 * method based on its current index/position
128 idle_indexes = mdd->mdd_cl.mc_index - rec->cur_endrec;
130 /* treat user with the oldest/smallest current index first */
131 if (idle_indexes >= mdd->mdd_changelog_max_idle_indexes &&
132 idle_indexes > mcgc->mcgc_maxindexes) {
133 mcgc->mcgc_maxindexes = idle_indexes;
134 mcgc->mcgc_id = rec->cur_id;
135 mdd_chlg_username(rec, mcgc->mcgc_name,
136 sizeof(mcgc->mcgc_name));
143 /* recover space from long-term inactive ChangeLog users */
144 static int mdd_chlg_garbage_collect(void *data)
146 struct mdd_device *mdd = data;
147 struct lu_env *env = NULL;
149 struct llog_ctxt *ctxt;
153 mdd->mdd_cl.mc_gc_task = current;
155 CDEBUG(D_HA, "%s: ChangeLog garbage collect thread start with PID %d\n",
156 mdd2obd_dev(mdd)->obd_name, current->pid);
160 GOTO(out, rc = -ENOMEM);
162 rc = lu_env_init(env, LCT_MD_THREAD);
167 struct mdd_changelog_gc mcgc = {
170 .mcgc_maxindexes = 0,
173 ctxt = llog_get_context(mdd2obd_dev(mdd),
174 LLOG_CHANGELOG_USER_ORIG_CTXT);
176 (ctxt->loc_handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT) == 0)
177 GOTO(out_ctxt, rc = -ENXIO);
179 rc = llog_cat_process(env, ctxt->loc_handle,
180 mdd_changelog_gc_cb, &mcgc, 0, 0);
181 if (rc != 0 || !mcgc.mcgc_name[0])
185 if (mcgc.mcgc_maxindexes != 0)
186 CWARN("%s: Force deregister of ChangeLog user %s idle with more than %llu unprocessed records\n",
187 mdd2obd_dev(mdd)->obd_name, mcgc.mcgc_name,
188 mcgc.mcgc_maxindexes);
190 CWARN("%s: Force deregister of ChangeLog user %s idle since more than %us\n",
191 mdd2obd_dev(mdd)->obd_name, mcgc.mcgc_name,
194 mdd_changelog_user_purge(env, mdd, mcgc.mcgc_id);
196 if (kthread_should_stop())
197 GOTO(out_env, rc = 0);
211 spin_lock(&mdd->mdd_cl.mc_lock);
212 mdd->mdd_cl.mc_gc_task = MDD_CHLG_GC_NONE;
213 spin_unlock(&mdd->mdd_cl.mc_lock);
218 int mdd_trans_stop(const struct lu_env *env, struct mdd_device *mdd,
219 int result, struct thandle *handle)
223 handle->th_result = result;
224 rc = mdd_child_ops(mdd)->dt_trans_stop(env, mdd->mdd_child, handle);
225 barrier_exit(mdd->mdd_bottom);
227 /* bottom half of changelog garbage-collection mechanism, started
228 * from mdd_changelog_store(). This is required, as running a
229 * kthead can't occur during a journal transaction is being filled
230 * because otherwise a deadlock can happen if memory reclaim is
231 * triggered by kthreadd when forking the new thread, and thus
232 * I/Os could be attempted to the same device from shrinkers
233 * requiring a new journal transaction to be started when current
234 * could never complete (LU-10680).
236 if (unlikely(mdd->mdd_cl.mc_flags & CLM_ON &&
237 cmpxchg(&mdd->mdd_cl.mc_gc_task, MDD_CHLG_GC_NEED,
238 MDD_CHLG_GC_START) == MDD_CHLG_GC_NEED)) {
239 /* XXX we may want to cmpxchg() only if MDD_CHLG_GC_NEED
240 * to save its cost in the frequent case and have an extra
241 * if/test cost in the rare case where we need to spawn?
243 struct task_struct *gc_task;
244 struct obd_device *obd = mdd2obd_dev(mdd);
246 gc_task = kthread_run(mdd_chlg_garbage_collect, mdd,
248 if (IS_ERR(gc_task)) {
249 CERROR("%s: cannot start ChangeLog garbage collection "
250 "thread: rc = %ld\n", obd->obd_name,
252 mdd->mdd_cl.mc_gc_task = MDD_CHLG_GC_NONE;
254 CDEBUG(D_HA, "%s: a ChangeLog garbage collection "
255 "thread has been started\n", obd->obd_name);
259 /* if operation failed, return \a result, otherwise return status of