4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Intel, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/osp/osp_sync.c
38 * Lustre OST Proxy Device
40 * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
41 * Author: Mikhail Pershin <mike.pershin@intel.com>
45 # define EXPORT_SYMTAB
47 #define DEBUG_SUBSYSTEM S_MDS
49 #include <lustre_log.h>
50 #include "osp_internal.h"
52 static int osp_sync_id_traction_init(struct osp_device *d);
53 static void osp_sync_id_traction_fini(struct osp_device *d);
54 static __u32 osp_sync_id_get(struct osp_device *d, __u32 id);
55 static void osp_sync_remove_from_tracker(struct osp_device *d);
58 * this is a components of OSP implementing synchronization between MDS and OST
59 * it llogs all interesting changes (currently it's uig/gid change and object
60 * destroy) atomically, then makes sure changes hit OST storage
62 * we have 4 queues of work:
64 * the first queue is llog itself, once read a change is stored in 2nd queue
65 * in form of RPC (but RPC isn't fired yet).
67 * the second queue (opd_syn_waiting_for_commit) holds changes awaiting local
68 * commit. once change is committed locally it migrates onto 3rd queue.
70 * the third queue (opd_syn_committed_here) holds changes committed locally,
71 * but not sent to OST (as the pipe can be full). once pipe becomes non-full
72 * we take a change from the queue and fire corresponded RPC.
74 * once RPC is reported committed by OST (using regular last_committed mech.)
75 * the change jumps into 4th queue (opd_syn_committed_there), now we can
76 * cancel corresponded llog record and release RPC
78 * opd_syn_changes is a number of unread llog records (to be processed).
79 * notice this number doesn't include llog records from previous boots.
80 * with OSP_SYN_THRESHOLD we try to batch processing a bit (TO BE IMPLEMENTED)
82 * opd_syn_rpc_in_progress is a number of requests in 2-4 queues.
83 * we control this with OSP_MAX_IN_PROGRESS so that OSP don't consume
84 * too much memory -- how to deal with 1000th OSTs ? batching could help?
86 * opd_syn_rpc_in_flight is a number of RPC in flight.
87 * we control this with OSP_MAX_IN_FLIGHT
90 /* XXX: do math to learn reasonable threshold
91 * should it be ~ number of changes fitting bulk? */
93 #define OSP_SYN_THRESHOLD 10
94 #define OSP_MAX_IN_FLIGHT 8
95 #define OSP_MAX_IN_PROGRESS 4096
97 #define OSP_JOB_MAGIC 0x26112005
99 static inline int osp_sync_running(struct osp_device *d)
101 return !!(d->opd_syn_thread.t_flags & SVC_RUNNING);
104 static inline int osp_sync_stopped(struct osp_device *d)
106 return !!(d->opd_syn_thread.t_flags & SVC_STOPPED);
109 static inline int osp_sync_has_new_job(struct osp_device *d)
111 return ((d->opd_syn_last_processed_id < d->opd_syn_last_used_id) &&
112 (d->opd_syn_last_processed_id < d->opd_syn_last_committed_id))
113 || (d->opd_syn_prev_done == 0);
116 static inline int osp_sync_low_in_progress(struct osp_device *d)
118 return d->opd_syn_rpc_in_progress < d->opd_syn_max_rpc_in_progress;
121 static inline int osp_sync_low_in_flight(struct osp_device *d)
123 return d->opd_syn_rpc_in_flight < d->opd_syn_max_rpc_in_flight;
126 static inline int osp_sync_has_work(struct osp_device *d)
128 /* has new/old changes and low in-progress? */
129 if (osp_sync_has_new_job(d) && osp_sync_low_in_progress(d) &&
130 osp_sync_low_in_flight(d) && d->opd_imp_connected)
133 /* has remotely committed? */
134 if (!cfs_list_empty(&d->opd_syn_committed_there))
140 #define osp_sync_check_for_work(d) \
142 if (osp_sync_has_work(d)) { \
143 cfs_waitq_signal(&d->opd_syn_waitq); \
147 void __osp_sync_check_for_work(struct osp_device *d)
149 osp_sync_check_for_work(d);
152 static inline int osp_sync_can_process_new(struct osp_device *d,
153 struct llog_rec_hdr *rec)
157 if (!osp_sync_low_in_progress(d))
159 if (!osp_sync_low_in_flight(d))
161 if (!d->opd_imp_connected)
163 if (d->opd_syn_prev_done == 0)
165 if (d->opd_syn_changes == 0)
167 if (rec == NULL || rec->lrh_id <= d->opd_syn_last_committed_id)
172 int osp_sync_declare_add(const struct lu_env *env, struct osp_object *o,
173 llog_op_type type, struct thandle *th)
175 struct osp_thread_info *osi = osp_env_info(env);
176 struct osp_device *d = lu2osp_dev(o->opo_obj.do_lu.lo_dev);
177 struct llog_ctxt *ctxt;
182 /* it's a layering violation, to access internals of th,
183 * but we can do this as a sanity check, for a while */
184 LASSERT(th->th_dev == d->opd_storage);
187 case MDS_UNLINK64_REC:
188 osi->osi_hdr.lrh_len = sizeof(struct llog_unlink64_rec);
190 case MDS_SETATTR64_REC:
191 osi->osi_hdr.lrh_len = sizeof(struct llog_setattr64_rec);
197 /* we want ->dt_trans_start() to allocate per-thandle structure */
198 th->th_tags |= LCT_OSP_THREAD;
200 ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT);
203 rc = llog_declare_add(env, ctxt->loc_handle, &osi->osi_hdr, th);
209 static int osp_sync_add_rec(const struct lu_env *env, struct osp_device *d,
210 const struct lu_fid *fid, llog_op_type type,
211 int count, struct thandle *th,
212 const struct lu_attr *attr)
214 struct osp_thread_info *osi = osp_env_info(env);
215 struct llog_ctxt *ctxt;
216 struct osp_txn_info *txn;
221 /* it's a layering violation, to access internals of th,
222 * but we can do this as a sanity check, for a while */
223 LASSERT(th->th_dev == d->opd_storage);
226 case MDS_UNLINK64_REC:
227 osi->osi_hdr.lrh_len = sizeof(osi->osi_unlink);
228 osi->osi_hdr.lrh_type = MDS_UNLINK64_REC;
229 osi->osi_unlink.lur_fid = *fid;
230 osi->osi_unlink.lur_count = count;
232 case MDS_SETATTR64_REC:
233 rc = fid_ostid_pack(fid, &osi->osi_oi);
235 osi->osi_hdr.lrh_len = sizeof(osi->osi_setattr);
236 osi->osi_hdr.lrh_type = MDS_SETATTR64_REC;
237 osi->osi_setattr.lsr_oid = osi->osi_oi.oi_id;
238 osi->osi_setattr.lsr_oseq = osi->osi_oi.oi_seq;
240 osi->osi_setattr.lsr_uid = attr->la_uid;
241 osi->osi_setattr.lsr_gid = attr->la_gid;
247 txn = osp_txn_info(&th->th_ctx);
250 txn->oti_current_id = osp_sync_id_get(d, txn->oti_current_id);
251 osi->osi_hdr.lrh_id = txn->oti_current_id;
253 ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT);
256 rc = llog_add(env, ctxt->loc_handle, &osi->osi_hdr, &osi->osi_cookie,
260 CDEBUG(D_OTHER, "%s: new record %lu:%lu:%lu/%lu: %d\n",
261 d->opd_obd->obd_name,
262 (unsigned long) osi->osi_cookie.lgc_lgl.lgl_oid,
263 (unsigned long) osi->osi_cookie.lgc_lgl.lgl_oseq,
264 (unsigned long) osi->osi_cookie.lgc_lgl.lgl_ogen,
265 (unsigned long) osi->osi_cookie.lgc_index, rc);
270 if (likely(rc == 0)) {
271 spin_lock(&d->opd_syn_lock);
272 d->opd_syn_changes++;
273 spin_unlock(&d->opd_syn_lock);
279 int osp_sync_add(const struct lu_env *env, struct osp_object *o,
280 llog_op_type type, struct thandle *th,
281 const struct lu_attr *attr)
283 return osp_sync_add_rec(env, lu2osp_dev(o->opo_obj.do_lu.lo_dev),
284 lu_object_fid(&o->opo_obj.do_lu), type, 1,
288 int osp_sync_gap(const struct lu_env *env, struct osp_device *d,
289 struct lu_fid *fid, int lost, struct thandle *th)
291 return osp_sync_add_rec(env, d, fid, MDS_UNLINK64_REC, lost, th, NULL);
295 * it's quite obvious we can't maintain all the structures in the memory:
296 * while OST is down, MDS can be processing thousands and thousands of unlinks
297 * filling persistent llogs and in-core respresentation
299 * this doesn't scale at all. so we need basically the following:
300 * a) destroy/setattr append llog records
301 * b) once llog has grown to X records, we process first Y committed records
303 * once record R is found via llog_process(), it becomes committed after any
304 * subsequent commit callback (at the most)
308 * called for each atomic on-disk change (not once per transaction batch)
309 * and goes over the list
310 * XXX: should be optimized?
314 * called for each RPC reported committed
316 static void osp_sync_request_commit_cb(struct ptlrpc_request *req)
318 struct osp_device *d = req->rq_cb_data;
319 struct obd_import *imp = req->rq_import;
321 CDEBUG(D_HA, "commit req %p, transno "LPU64"\n", req, req->rq_transno);
323 if (unlikely(req->rq_transno == 0))
326 if (unlikely(req->rq_transno > imp->imp_peer_committed_transno)) {
327 /* this request was aborted by the shutdown procedure,
328 * not committed by the peer. we should preserve llog
330 spin_lock(&d->opd_syn_lock);
331 d->opd_syn_rpc_in_progress--;
332 spin_unlock(&d->opd_syn_lock);
333 cfs_waitq_signal(&d->opd_syn_waitq);
337 /* XXX: what if request isn't committed for very long? */
339 LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
340 LASSERT(cfs_list_empty(&req->rq_exp_list));
342 ptlrpc_request_addref(req);
344 spin_lock(&d->opd_syn_lock);
345 cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
346 spin_unlock(&d->opd_syn_lock);
348 /* XXX: some batching wouldn't hurt */
349 cfs_waitq_signal(&d->opd_syn_waitq);
352 static int osp_sync_interpret(const struct lu_env *env,
353 struct ptlrpc_request *req, void *aa, int rc)
355 struct osp_device *d = req->rq_cb_data;
357 /* XXX: error handling here */
358 if (req->rq_svc_thread != (void *) OSP_JOB_MAGIC)
359 DEBUG_REQ(D_ERROR, req, "bad magic %p\n", req->rq_svc_thread);
360 LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
363 CDEBUG(D_HA, "reply req %p/%d, rc %d, transno %u\n", req,
364 cfs_atomic_read(&req->rq_refcount),
365 rc, (unsigned) req->rq_transno);
366 LASSERT(rc || req->rq_transno);
370 * we tried to destroy object or update attributes,
371 * but object doesn't exist anymore - cancell llog record
373 LASSERT(req->rq_transno == 0);
374 LASSERT(cfs_list_empty(&req->rq_exp_list));
376 ptlrpc_request_addref(req);
378 spin_lock(&d->opd_syn_lock);
379 cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
380 spin_unlock(&d->opd_syn_lock);
382 cfs_waitq_signal(&d->opd_syn_waitq);
384 struct obd_import *imp = req->rq_import;
386 * error happened, we'll try to repeat on next boot ?
388 LASSERTF(req->rq_transno == 0 ||
389 req->rq_import_generation < imp->imp_generation,
390 "transno "LPU64", rc %d, gen: req %d, imp %d\n",
391 req->rq_transno, rc, req->rq_import_generation,
392 imp->imp_generation);
393 LASSERT(d->opd_syn_rpc_in_progress > 0);
394 if (req->rq_transno == 0) {
395 /* this is the last time we see the request
396 * if transno is not zero, then commit cb
397 * will be called at some point */
398 spin_lock(&d->opd_syn_lock);
399 d->opd_syn_rpc_in_progress--;
400 spin_unlock(&d->opd_syn_lock);
403 cfs_waitq_signal(&d->opd_syn_waitq);
404 } else if (unlikely(d->opd_pre_status == -ENOSPC)) {
406 * if current status is -ENOSPC (lack of free space on OST)
407 * then we should poll OST immediately once object destroy
410 osp_statfs_need_now(d);
413 LASSERT(d->opd_syn_rpc_in_flight > 0);
414 spin_lock(&d->opd_syn_lock);
415 d->opd_syn_rpc_in_flight--;
416 spin_unlock(&d->opd_syn_lock);
417 CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
418 d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
419 d->opd_syn_rpc_in_progress);
421 osp_sync_check_for_work(d);
427 * the function walks through list of committed locally changes
428 * and send them to RPC until the pipe is full
430 static void osp_sync_send_new_rpc(struct osp_device *d,
431 struct ptlrpc_request *req)
433 LASSERT(d->opd_syn_rpc_in_flight <= d->opd_syn_max_rpc_in_flight);
434 LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
436 ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
439 static struct ptlrpc_request *osp_sync_new_job(struct osp_device *d,
440 struct llog_handle *llh,
441 struct llog_rec_hdr *h,
443 const struct req_format *format)
445 struct ptlrpc_request *req;
446 struct ost_body *body;
447 struct obd_import *imp;
450 /* Prepare the request */
451 imp = d->opd_obd->u.cli.cl_import;
453 req = ptlrpc_request_alloc(imp, format);
455 RETURN(ERR_PTR(-ENOMEM));
457 rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, op);
459 ptlrpc_req_finished(req);
464 * this is a trick: to save on memory allocations we put cookie
465 * into the request, but don't set corresponded flag in o_valid
466 * so that OST doesn't interpret this cookie. once the request
467 * is committed on OST we take cookie from the request and cancel
469 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
471 body->oa.o_lcookie.lgc_lgl = llh->lgh_id;
472 body->oa.o_lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
473 body->oa.o_lcookie.lgc_index = h->lrh_index;
474 CFS_INIT_LIST_HEAD(&req->rq_exp_list);
475 req->rq_svc_thread = (void *) OSP_JOB_MAGIC;
477 req->rq_interpret_reply = osp_sync_interpret;
478 req->rq_commit_cb = osp_sync_request_commit_cb;
481 ptlrpc_request_set_replen(req);
486 static int osp_sync_new_setattr_job(struct osp_device *d,
487 struct llog_handle *llh,
488 struct llog_rec_hdr *h)
490 struct llog_setattr64_rec *rec = (struct llog_setattr64_rec *)h;
491 struct ptlrpc_request *req;
492 struct ost_body *body;
495 LASSERT(h->lrh_type == MDS_SETATTR64_REC);
497 req = osp_sync_new_job(d, llh, h, OST_SETATTR, &RQF_OST_SETATTR);
499 RETURN(PTR_ERR(req));
501 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
503 body->oa.o_id = rec->lsr_oid;
504 body->oa.o_seq = rec->lsr_oseq;
505 body->oa.o_uid = rec->lsr_uid;
506 body->oa.o_gid = rec->lsr_gid;
507 body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID |
508 OBD_MD_FLUID | OBD_MD_FLGID;
510 osp_sync_send_new_rpc(d, req);
514 /* Old records may be in old format, so we handle that too */
515 static int osp_sync_new_unlink_job(struct osp_device *d,
516 struct llog_handle *llh,
517 struct llog_rec_hdr *h)
519 struct llog_unlink_rec *rec = (struct llog_unlink_rec *)h;
520 struct ptlrpc_request *req;
521 struct ost_body *body;
524 LASSERT(h->lrh_type == MDS_UNLINK_REC);
526 req = osp_sync_new_job(d, llh, h, OST_DESTROY, &RQF_OST_DESTROY);
528 RETURN(PTR_ERR(req));
530 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
532 body->oa.o_id = rec->lur_oid;
533 body->oa.o_seq = rec->lur_oseq;
534 body->oa.o_misc = rec->lur_count;
535 body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID;
537 body->oa.o_valid |= OBD_MD_FLOBJCOUNT;
539 osp_sync_send_new_rpc(d, req);
543 static int osp_sync_new_unlink64_job(struct osp_device *d,
544 struct llog_handle *llh,
545 struct llog_rec_hdr *h)
547 struct llog_unlink64_rec *rec = (struct llog_unlink64_rec *)h;
548 struct ptlrpc_request *req;
549 struct ost_body *body;
553 LASSERT(h->lrh_type == MDS_UNLINK64_REC);
555 req = osp_sync_new_job(d, llh, h, OST_DESTROY, &RQF_OST_DESTROY);
557 RETURN(PTR_ERR(req));
559 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
562 rc = fid_ostid_pack(&rec->lur_fid, &body->oa.o_oi);
565 body->oa.o_misc = rec->lur_count;
566 body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID | OBD_MD_FLOBJCOUNT;
568 osp_sync_send_new_rpc(d, req);
572 static int osp_sync_process_record(const struct lu_env *env,
573 struct osp_device *d,
574 struct llog_handle *llh,
575 struct llog_rec_hdr *rec)
577 struct llog_cookie cookie;
580 cookie.lgc_lgl = llh->lgh_id;
581 cookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
582 cookie.lgc_index = rec->lrh_index;
584 if (unlikely(rec->lrh_type == LLOG_GEN_REC)) {
585 struct llog_gen_rec *gen = (struct llog_gen_rec *)rec;
587 /* we're waiting for the record generated by this instance */
588 LASSERT(d->opd_syn_prev_done == 0);
589 if (!memcmp(&d->opd_syn_generation, &gen->lgr_gen,
590 sizeof(gen->lgr_gen))) {
591 CDEBUG(D_HA, "processed all old entries\n");
592 d->opd_syn_prev_done = 1;
595 /* cancel any generation record */
596 rc = llog_cat_cancel_records(env, llh->u.phd.phd_cat_handle,
603 * now we prepare and fill requests to OST, put them on the queue
604 * and fire after next commit callback
607 /* notice we increment counters before sending RPC, to be consistent
608 * in RPC interpret callback which may happen very quickly */
609 spin_lock(&d->opd_syn_lock);
610 d->opd_syn_rpc_in_flight++;
611 d->opd_syn_rpc_in_progress++;
612 spin_unlock(&d->opd_syn_lock);
614 switch (rec->lrh_type) {
615 /* case MDS_UNLINK_REC is kept for compatibility */
617 rc = osp_sync_new_unlink_job(d, llh, rec);
619 case MDS_UNLINK64_REC:
620 rc = osp_sync_new_unlink64_job(d, llh, rec);
622 case MDS_SETATTR64_REC:
623 rc = osp_sync_new_setattr_job(d, llh, rec);
626 CERROR("unknown record type: %x\n", rec->lrh_type);
631 if (likely(rc == 0)) {
632 spin_lock(&d->opd_syn_lock);
633 if (d->opd_syn_prev_done) {
634 LASSERT(d->opd_syn_changes > 0);
635 LASSERT(rec->lrh_id <= d->opd_syn_last_committed_id);
637 * NOTE: it's possible to meet same id if
638 * OST stores few stripes of same file
640 if (rec->lrh_id > d->opd_syn_last_processed_id)
641 d->opd_syn_last_processed_id = rec->lrh_id;
643 d->opd_syn_changes--;
645 CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
646 d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
647 d->opd_syn_rpc_in_progress);
648 spin_unlock(&d->opd_syn_lock);
650 spin_lock(&d->opd_syn_lock);
651 d->opd_syn_rpc_in_flight--;
652 d->opd_syn_rpc_in_progress--;
653 spin_unlock(&d->opd_syn_lock);
656 CDEBUG(D_HA, "found record %x, %d, idx %u, id %u: %d\n",
657 rec->lrh_type, rec->lrh_len, rec->lrh_index, rec->lrh_id, rc);
661 static void osp_sync_process_committed(const struct lu_env *env,
662 struct osp_device *d)
664 struct obd_device *obd = d->opd_obd;
665 struct obd_import *imp = obd->u.cli.cl_import;
666 struct ost_body *body;
667 struct ptlrpc_request *req, *tmp;
668 struct llog_ctxt *ctxt;
669 struct llog_handle *llh;
675 if (cfs_list_empty(&d->opd_syn_committed_there))
679 * if current status is -ENOSPC (lack of free space on OST)
680 * then we should poll OST immediately once object destroy
682 * notice: we do this upon commit as well because some backends
683 * (like DMU) do not release space right away.
685 if (unlikely(d->opd_pre_status == -ENOSPC))
686 osp_statfs_need_now(d);
689 * now cancel them all
690 * XXX: can we improve this using some batching?
691 * with batch RPC that'll happen automatically?
692 * XXX: can we store ctxt in lod_device and save few cycles ?
694 ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
697 llh = ctxt->loc_handle;
700 CFS_INIT_LIST_HEAD(&list);
701 spin_lock(&d->opd_syn_lock);
702 cfs_list_splice(&d->opd_syn_committed_there, &list);
703 CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
704 spin_unlock(&d->opd_syn_lock);
706 cfs_list_for_each_entry_safe(req, tmp, &list, rq_exp_list) {
707 LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
708 cfs_list_del_init(&req->rq_exp_list);
710 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
713 /* import can be closing, thus all commit cb's are
714 * called we can check committness directly */
715 if (req->rq_transno <= imp->imp_peer_committed_transno) {
716 rc = llog_cat_cancel_records(env, llh, 1,
717 &body->oa.o_lcookie);
719 CERROR("%s: can't cancel record: %d\n",
722 DEBUG_REQ(D_HA, req, "not committed");
725 ptlrpc_req_finished(req);
731 LASSERT(d->opd_syn_rpc_in_progress >= done);
732 spin_lock(&d->opd_syn_lock);
733 d->opd_syn_rpc_in_progress -= done;
734 spin_unlock(&d->opd_syn_lock);
735 CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
736 d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
737 d->opd_syn_rpc_in_progress);
739 osp_sync_check_for_work(d);
745 * this is where most of queues processing happens
747 static int osp_sync_process_queues(const struct lu_env *env,
748 struct llog_handle *llh,
749 struct llog_rec_hdr *rec,
752 struct osp_device *d = data;
756 struct l_wait_info lwi = { 0 };
758 if (!osp_sync_running(d)) {
759 CDEBUG(D_HA, "stop llog processing\n");
760 return LLOG_PROC_BREAK;
763 /* process requests committed by OST */
764 osp_sync_process_committed(env, d);
766 /* if we there are changes to be processed and we have
767 * resources for this ... do now */
768 if (osp_sync_can_process_new(d, rec)) {
770 /* ask llog for another record */
771 CDEBUG(D_HA, "%lu changes, %u in progress, %u in flight\n",
773 d->opd_syn_rpc_in_progress,
774 d->opd_syn_rpc_in_flight);
779 * try to send, in case of disconnection, suspend
780 * processing till we can send this request
783 rc = osp_sync_process_record(env, d, llh, rec);
785 * XXX: probably different handling is needed
786 * for some bugs, like immediate exit or if
790 CERROR("can't send: %d\n", rc);
791 l_wait_event(d->opd_syn_waitq,
792 !osp_sync_running(d) ||
793 osp_sync_has_work(d),
796 } while (rc != 0 && osp_sync_running(d));
802 if (d->opd_syn_last_processed_id == d->opd_syn_last_used_id)
803 osp_sync_remove_from_tracker(d);
805 l_wait_event(d->opd_syn_waitq,
806 !osp_sync_running(d) ||
807 osp_sync_can_process_new(d, rec) ||
808 !cfs_list_empty(&d->opd_syn_committed_there),
814 * this thread runs llog_cat_process() scanner calling our callback
815 * to process llog records. in the callback we implement tricky
816 * state machine as we don't want to start scanning of the llog again
817 * and again, also we don't want to process too many records and send
818 * too many RPCs a time. so, depending on current load (num of changes
819 * being synced to OST) the callback can suspend awaiting for some
820 * new conditions, like syncs completed.
822 * in order to process llog records left by previous boots and to allow
823 * llog_process_thread() to find something (otherwise it'd just exit
824 * immediately) we add a special GENERATATION record on each boot.
826 static int osp_sync_thread(void *_arg)
828 struct osp_device *d = _arg;
829 struct ptlrpc_thread *thread = &d->opd_syn_thread;
830 struct l_wait_info lwi = { 0 };
831 struct llog_ctxt *ctxt;
832 struct obd_device *obd = d->opd_obd;
833 struct llog_handle *llh;
840 rc = lu_env_init(&env, LCT_LOCAL);
842 CERROR("%s: can't initialize env: rc = %d\n",
847 sprintf(pname, "osp-syn-%u\n", d->opd_index);
848 cfs_daemonize(pname);
850 spin_lock(&d->opd_syn_lock);
851 thread->t_flags = SVC_RUNNING;
852 spin_unlock(&d->opd_syn_lock);
853 cfs_waitq_signal(&thread->t_ctl_waitq);
855 ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
857 CERROR("can't get appropriate context\n");
858 GOTO(out, rc = -EINVAL);
861 llh = ctxt->loc_handle;
863 CERROR("can't get llh\n");
865 GOTO(out, rc = -EINVAL);
868 rc = llog_cat_process(&env, llh, osp_sync_process_queues, d, 0, 0);
869 LASSERTF(rc == 0 || rc == LLOG_PROC_BREAK,
870 "%lu changes, %u in progress, %u in flight: %d\n",
871 d->opd_syn_changes, d->opd_syn_rpc_in_progress,
872 d->opd_syn_rpc_in_flight, rc);
874 /* we don't expect llog_process_thread() to exit till umount */
875 LASSERTF(thread->t_flags != SVC_RUNNING,
876 "%lu changes, %u in progress, %u in flight\n",
877 d->opd_syn_changes, d->opd_syn_rpc_in_progress,
878 d->opd_syn_rpc_in_flight);
880 /* wait till all the requests are completed */
881 while (d->opd_syn_rpc_in_progress > 0) {
882 osp_sync_process_committed(&env, d);
883 l_wait_event(d->opd_syn_waitq,
884 d->opd_syn_rpc_in_progress == 0,
888 llog_cat_close(&env, llh);
889 rc = llog_cleanup(&env, ctxt);
891 CERROR("can't cleanup llog: %d\n", rc);
893 thread->t_flags = SVC_STOPPED;
895 cfs_waitq_signal(&thread->t_ctl_waitq);
896 LASSERTF(d->opd_syn_rpc_in_progress == 0,
897 "%s: %d %d %sempty\n",
898 d->opd_obd->obd_name, d->opd_syn_rpc_in_progress,
899 d->opd_syn_rpc_in_flight,
900 cfs_list_empty(&d->opd_syn_committed_there) ? "" : "!");
907 static struct llog_operations osp_mds_ost_orig_logops;
909 static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d)
911 struct osp_thread_info *osi = osp_env_info(env);
912 struct llog_handle *lgh;
913 struct obd_device *obd = d->opd_obd;
914 struct llog_ctxt *ctxt;
922 * open llog corresponding to our OST
924 OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
925 obd->obd_lvfs_ctxt.dt = d->opd_storage;
927 rc = llog_osd_get_cat_list(env, d->opd_storage, d->opd_index, 1,
930 CERROR("%s: can't get id from catalogs: rc = %d\n",
935 CDEBUG(D_INFO, "%s: Init llog for %d - catid "LPX64"/"LPX64":%x\n",
936 obd->obd_name, d->opd_index, osi->osi_cid.lci_logid.lgl_oid,
937 osi->osi_cid.lci_logid.lgl_oseq,
938 osi->osi_cid.lci_logid.lgl_ogen);
940 osp_mds_ost_orig_logops = llog_osd_ops;
941 rc = llog_setup(env, obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, obd,
942 &osp_mds_ost_orig_logops);
946 ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
949 if (likely(osi->osi_cid.lci_logid.lgl_oid != 0)) {
950 rc = llog_open(env, ctxt, &lgh, &osi->osi_cid.lci_logid, NULL,
952 /* re-create llog if it is missing */
954 osi->osi_cid.lci_logid.lgl_oid = 0;
956 GOTO(out_cleanup, rc);
959 if (unlikely(osi->osi_cid.lci_logid.lgl_oid == 0)) {
960 rc = llog_open_create(env, ctxt, &lgh, NULL, NULL);
962 GOTO(out_cleanup, rc);
963 osi->osi_cid.lci_logid = lgh->lgh_id;
966 ctxt->loc_handle = lgh;
967 lgh->lgh_logops->lop_add = llog_cat_add_rec;
968 lgh->lgh_logops->lop_declare_add = llog_cat_declare_add_rec;
970 rc = llog_cat_init_and_process(env, lgh);
974 rc = llog_osd_put_cat_list(env, d->opd_storage, d->opd_index, 1,
980 * put a mark in the llog till which we'll be processing
981 * old records restless
983 d->opd_syn_generation.mnt_cnt = cfs_time_current();
984 d->opd_syn_generation.conn_cnt = cfs_time_current();
986 osi->osi_hdr.lrh_type = LLOG_GEN_REC;
987 osi->osi_hdr.lrh_len = sizeof(osi->osi_gen);
989 memcpy(&osi->osi_gen.lgr_gen, &d->opd_syn_generation,
990 sizeof(osi->osi_gen.lgr_gen));
992 rc = llog_cat_add(env, lgh, &osi->osi_gen.lgr_hdr, &osi->osi_cookie,
999 llog_cat_close(env, lgh);
1001 llog_cleanup(env, ctxt);
1005 static void osp_sync_llog_fini(const struct lu_env *env, struct osp_device *d)
1007 struct llog_ctxt *ctxt;
1009 ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT);
1010 llog_cat_close(env, ctxt->loc_handle);
1011 llog_cleanup(env, ctxt);
1015 * initializes sync component of OSP
1017 int osp_sync_init(const struct lu_env *env, struct osp_device *d)
1019 struct l_wait_info lwi = { 0 };
1024 rc = osp_sync_id_traction_init(d);
1029 * initialize llog storing changes
1031 rc = osp_sync_llog_init(env, d);
1033 CERROR("%s: can't initialize llog: rc = %d\n",
1034 d->opd_obd->obd_name, rc);
1039 * Start synchronization thread
1041 d->opd_syn_max_rpc_in_flight = OSP_MAX_IN_FLIGHT;
1042 d->opd_syn_max_rpc_in_progress = OSP_MAX_IN_PROGRESS;
1043 spin_lock_init(&d->opd_syn_lock);
1044 cfs_waitq_init(&d->opd_syn_waitq);
1045 cfs_waitq_init(&d->opd_syn_thread.t_ctl_waitq);
1046 CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
1048 rc = cfs_create_thread(osp_sync_thread, d, 0);
1050 CERROR("%s: can't start sync thread: rc = %d\n",
1051 d->opd_obd->obd_name, rc);
1055 l_wait_event(d->opd_syn_thread.t_ctl_waitq,
1056 osp_sync_running(d) || osp_sync_stopped(d), &lwi);
1060 osp_sync_llog_fini(env, d);
1062 osp_sync_id_traction_fini(d);
1066 int osp_sync_fini(struct osp_device *d)
1068 struct ptlrpc_thread *thread = &d->opd_syn_thread;
1072 thread->t_flags = SVC_STOPPING;
1073 cfs_waitq_signal(&d->opd_syn_waitq);
1074 cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
1077 * unregister transaction callbacks only when sync thread
1078 * has finished operations with llog
1080 osp_sync_id_traction_fini(d);
1085 static DEFINE_MUTEX(osp_id_tracker_sem);
1086 static CFS_LIST_HEAD(osp_id_tracker_list);
1088 static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie)
1090 struct osp_id_tracker *tr = cookie;
1091 struct osp_device *d;
1092 struct osp_txn_info *txn;
1096 txn = osp_txn_info(&th->th_ctx);
1097 if (txn == NULL || txn->oti_current_id < tr->otr_committed_id)
1100 spin_lock(&tr->otr_lock);
1101 if (likely(txn->oti_current_id > tr->otr_committed_id)) {
1102 CDEBUG(D_OTHER, "committed: %u -> %u\n",
1103 tr->otr_committed_id, txn->oti_current_id);
1104 tr->otr_committed_id = txn->oti_current_id;
1106 cfs_list_for_each_entry(d, &tr->otr_wakeup_list,
1108 d->opd_syn_last_committed_id = tr->otr_committed_id;
1109 cfs_waitq_signal(&d->opd_syn_waitq);
1112 spin_unlock(&tr->otr_lock);
1115 static int osp_sync_id_traction_init(struct osp_device *d)
1117 struct osp_id_tracker *tr, *found = NULL;
1121 LASSERT(d->opd_storage);
1122 LASSERT(d->opd_syn_tracker == NULL);
1123 CFS_INIT_LIST_HEAD(&d->opd_syn_ontrack);
1125 mutex_lock(&osp_id_tracker_sem);
1126 cfs_list_for_each_entry(tr, &osp_id_tracker_list, otr_list) {
1127 if (tr->otr_dev == d->opd_storage) {
1128 LASSERT(cfs_atomic_read(&tr->otr_refcount));
1129 cfs_atomic_inc(&tr->otr_refcount);
1130 d->opd_syn_tracker = tr;
1136 if (found == NULL) {
1140 d->opd_syn_tracker = tr;
1141 spin_lock_init(&tr->otr_lock);
1142 tr->otr_dev = d->opd_storage;
1143 tr->otr_next_id = 1;
1144 tr->otr_committed_id = 0;
1145 cfs_atomic_set(&tr->otr_refcount, 1);
1146 CFS_INIT_LIST_HEAD(&tr->otr_wakeup_list);
1147 cfs_list_add(&tr->otr_list, &osp_id_tracker_list);
1148 tr->otr_tx_cb.dtc_txn_commit =
1149 osp_sync_tracker_commit_cb;
1150 tr->otr_tx_cb.dtc_cookie = tr;
1151 tr->otr_tx_cb.dtc_tag = LCT_MD_THREAD;
1152 dt_txn_callback_add(d->opd_storage, &tr->otr_tx_cb);
1156 mutex_unlock(&osp_id_tracker_sem);
1161 static void osp_sync_id_traction_fini(struct osp_device *d)
1163 struct osp_id_tracker *tr;
1168 tr = d->opd_syn_tracker;
1174 osp_sync_remove_from_tracker(d);
1176 mutex_lock(&osp_id_tracker_sem);
1177 if (cfs_atomic_dec_and_test(&tr->otr_refcount)) {
1178 dt_txn_callback_del(d->opd_storage, &tr->otr_tx_cb);
1179 LASSERT(cfs_list_empty(&tr->otr_wakeup_list));
1180 cfs_list_del(&tr->otr_list);
1182 d->opd_syn_tracker = NULL;
1184 mutex_unlock(&osp_id_tracker_sem);
1190 * generates id for the tracker
1192 static __u32 osp_sync_id_get(struct osp_device *d, __u32 id)
1194 struct osp_id_tracker *tr;
1196 tr = d->opd_syn_tracker;
1199 /* XXX: we can improve this introducing per-cpu preallocated ids? */
1200 spin_lock(&tr->otr_lock);
1201 if (unlikely(tr->otr_next_id <= d->opd_syn_last_used_id)) {
1202 spin_unlock(&tr->otr_lock);
1203 CERROR("%s: next %u, last synced %lu\n",
1204 d->opd_obd->obd_name, tr->otr_next_id,
1205 d->opd_syn_last_used_id);
1210 id = tr->otr_next_id++;
1211 if (id > d->opd_syn_last_used_id)
1212 d->opd_syn_last_used_id = id;
1213 if (cfs_list_empty(&d->opd_syn_ontrack))
1214 cfs_list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list);
1215 spin_unlock(&tr->otr_lock);
1216 CDEBUG(D_OTHER, "new id %u\n", (unsigned) id);
1221 static void osp_sync_remove_from_tracker(struct osp_device *d)
1223 struct osp_id_tracker *tr;
1225 tr = d->opd_syn_tracker;
1228 if (cfs_list_empty(&d->opd_syn_ontrack))
1231 spin_lock(&tr->otr_lock);
1232 cfs_list_del_init(&d->opd_syn_ontrack);
1233 spin_unlock(&tr->otr_lock);