Whamcloud - gitweb
LU-5296 lod: don't skip attr_set for osp objects
[fs/lustre-release.git] / lustre / osp / osp_sync.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2012, 2013, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/osp/osp_sync.c
37  *
38  * Lustre OST Proxy Device
39  *
40  * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
41  * Author: Mikhail Pershin <mike.pershin@intel.com>
42  */
43
44 #define DEBUG_SUBSYSTEM S_MDS
45
46 #include <lustre_log.h>
47 #include "osp_internal.h"
48
49 static int osp_sync_id_traction_init(struct osp_device *d);
50 static void osp_sync_id_traction_fini(struct osp_device *d);
51 static __u32 osp_sync_id_get(struct osp_device *d, __u32 id);
52 static void osp_sync_remove_from_tracker(struct osp_device *d);
53
54 /*
55  * this is a components of OSP implementing synchronization between MDS and OST
56  * it llogs all interesting changes (currently it's uig/gid change and object
57  * destroy) atomically, then makes sure changes hit OST storage
58  *
59  * we have 4 queues of work:
60  *
61  * the first queue is llog itself, once read a change is stored in 2nd queue
62  * in form of RPC (but RPC isn't fired yet).
63  *
64  * the second queue (opd_syn_waiting_for_commit) holds changes awaiting local
65  * commit. once change is committed locally it migrates onto 3rd queue.
66  *
67  * the third queue (opd_syn_committed_here) holds changes committed locally,
68  * but not sent to OST (as the pipe can be full). once pipe becomes non-full
69  * we take a change from the queue and fire corresponded RPC.
70  *
71  * once RPC is reported committed by OST (using regular last_committed mech.)
72  * the change jumps into 4th queue (opd_syn_committed_there), now we can
73  * cancel corresponded llog record and release RPC
74  *
75  * opd_syn_changes is a number of unread llog records (to be processed).
76  * notice this number doesn't include llog records from previous boots.
77  * with OSP_SYN_THRESHOLD we try to batch processing a bit (TO BE IMPLEMENTED)
78  *
79  * opd_syn_rpc_in_progress is a number of requests in 2-4 queues.
80  * we control this with OSP_MAX_IN_PROGRESS so that OSP don't consume
81  * too much memory -- how to deal with 1000th OSTs ? batching could help?
82  *
83  * opd_syn_rpc_in_flight is a number of RPC in flight.
84  * we control this with OSP_MAX_IN_FLIGHT
85  */
86
87 /* XXX: do math to learn reasonable threshold
88  * should it be ~ number of changes fitting bulk? */
89
90 #define OSP_SYN_THRESHOLD       10
91 #define OSP_MAX_IN_FLIGHT       8
92 #define OSP_MAX_IN_PROGRESS     4096
93
94 #define OSP_JOB_MAGIC           0x26112005
95
96 static inline int osp_sync_running(struct osp_device *d)
97 {
98         return !!(d->opd_syn_thread.t_flags & SVC_RUNNING);
99 }
100
101 static inline int osp_sync_stopped(struct osp_device *d)
102 {
103         return !!(d->opd_syn_thread.t_flags & SVC_STOPPED);
104 }
105
106 static inline int osp_sync_has_new_job(struct osp_device *d)
107 {
108         return ((d->opd_syn_last_processed_id < d->opd_syn_last_used_id) &&
109                 (d->opd_syn_last_processed_id < d->opd_syn_last_committed_id))
110                 || (d->opd_syn_prev_done == 0);
111 }
112
113 static inline int osp_sync_low_in_progress(struct osp_device *d)
114 {
115         return d->opd_syn_rpc_in_progress < d->opd_syn_max_rpc_in_progress;
116 }
117
118 static inline int osp_sync_low_in_flight(struct osp_device *d)
119 {
120         return d->opd_syn_rpc_in_flight < d->opd_syn_max_rpc_in_flight;
121 }
122
123 static inline int osp_sync_has_work(struct osp_device *d)
124 {
125         /* has new/old changes and low in-progress? */
126         if (osp_sync_has_new_job(d) && osp_sync_low_in_progress(d) &&
127             osp_sync_low_in_flight(d) && d->opd_imp_connected)
128                 return 1;
129
130         /* has remotely committed? */
131         if (!list_empty(&d->opd_syn_committed_there))
132                 return 1;
133
134         return 0;
135 }
136
137 #define osp_sync_check_for_work(d)                      \
138 {                                                       \
139         if (osp_sync_has_work(d)) {                     \
140                 wake_up(&d->opd_syn_waitq);    \
141         }                                               \
142 }
143
144 void __osp_sync_check_for_work(struct osp_device *d)
145 {
146         osp_sync_check_for_work(d);
147 }
148
149 static inline int osp_sync_can_process_new(struct osp_device *d,
150                                            struct llog_rec_hdr *rec)
151 {
152         LASSERT(d);
153
154         if (unlikely(atomic_read(&d->opd_syn_barrier) > 0))
155                 return 0;
156         if (!osp_sync_low_in_progress(d))
157                 return 0;
158         if (!osp_sync_low_in_flight(d))
159                 return 0;
160         if (!d->opd_imp_connected)
161                 return 0;
162         if (d->opd_syn_prev_done == 0)
163                 return 1;
164         if (d->opd_syn_changes == 0)
165                 return 0;
166         if (rec == NULL || rec->lrh_id <= d->opd_syn_last_committed_id)
167                 return 1;
168         return 0;
169 }
170
171 int osp_sync_declare_add(const struct lu_env *env, struct osp_object *o,
172                          llog_op_type type, struct thandle *th)
173 {
174         struct osp_thread_info  *osi = osp_env_info(env);
175         struct osp_device       *d = lu2osp_dev(o->opo_obj.do_lu.lo_dev);
176         struct llog_ctxt        *ctxt;
177         int                      rc;
178
179         ENTRY;
180
181         /* it's a layering violation, to access internals of th,
182          * but we can do this as a sanity check, for a while */
183         LASSERT(th->th_dev == d->opd_storage);
184
185         switch (type) {
186         case MDS_UNLINK64_REC:
187                 osi->osi_hdr.lrh_len = sizeof(struct llog_unlink64_rec);
188                 break;
189         case MDS_SETATTR64_REC:
190                 osi->osi_hdr.lrh_len = sizeof(struct llog_setattr64_rec);
191                 break;
192         default:
193                 LBUG();
194         }
195
196         /* we want ->dt_trans_start() to allocate per-thandle structure */
197         th->th_tags |= LCT_OSP_THREAD;
198
199         ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT);
200         LASSERT(ctxt);
201
202         rc = llog_declare_add(env, ctxt->loc_handle, &osi->osi_hdr, th);
203         llog_ctxt_put(ctxt);
204
205         RETURN(rc);
206 }
207
208 static int osp_sync_add_rec(const struct lu_env *env, struct osp_device *d,
209                             const struct lu_fid *fid, llog_op_type type,
210                             int count, struct thandle *th,
211                             const struct lu_attr *attr)
212 {
213         struct osp_thread_info  *osi = osp_env_info(env);
214         struct llog_ctxt        *ctxt;
215         struct osp_txn_info     *txn;
216         int                      rc;
217
218         ENTRY;
219
220         /* it's a layering violation, to access internals of th,
221          * but we can do this as a sanity check, for a while */
222         LASSERT(th->th_dev == d->opd_storage);
223
224         switch (type) {
225         case MDS_UNLINK64_REC:
226                 osi->osi_hdr.lrh_len = sizeof(osi->osi_unlink);
227                 osi->osi_hdr.lrh_type = MDS_UNLINK64_REC;
228                 osi->osi_unlink.lur_fid  = *fid;
229                 osi->osi_unlink.lur_count = count;
230                 break;
231         case MDS_SETATTR64_REC:
232                 rc = fid_to_ostid(fid, &osi->osi_oi);
233                 LASSERT(rc == 0);
234                 osi->osi_hdr.lrh_len = sizeof(osi->osi_setattr);
235                 osi->osi_hdr.lrh_type = MDS_SETATTR64_REC;
236                 osi->osi_setattr.lsr_oi  = osi->osi_oi;
237                 LASSERT(attr);
238                 osi->osi_setattr.lsr_uid = attr->la_uid;
239                 osi->osi_setattr.lsr_gid = attr->la_gid;
240                 osi->osi_setattr.lsr_valid =
241                         ((attr->la_valid & LA_UID) ? OBD_MD_FLUID : 0) |
242                         ((attr->la_valid & LA_GID) ? OBD_MD_FLGID : 0);
243                 break;
244         default:
245                 LBUG();
246         }
247
248         txn = osp_txn_info(&th->th_ctx);
249         LASSERT(txn);
250
251         txn->oti_current_id = osp_sync_id_get(d, txn->oti_current_id);
252         osi->osi_hdr.lrh_id = txn->oti_current_id;
253
254         ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT);
255         if (ctxt == NULL)
256                 RETURN(-ENOMEM);
257         rc = llog_add(env, ctxt->loc_handle, &osi->osi_hdr, &osi->osi_cookie,
258                       th);
259         llog_ctxt_put(ctxt);
260
261         CDEBUG(D_OTHER, "%s: new record "DOSTID":%lu/%lu: %d\n",
262                d->opd_obd->obd_name, POSTID(&osi->osi_cookie.lgc_lgl.lgl_oi),
263                (unsigned long) osi->osi_cookie.lgc_lgl.lgl_ogen,
264                (unsigned long) osi->osi_cookie.lgc_index, rc);
265
266         if (rc > 0)
267                 rc = 0;
268
269         if (likely(rc == 0)) {
270                 spin_lock(&d->opd_syn_lock);
271                 d->opd_syn_changes++;
272                 spin_unlock(&d->opd_syn_lock);
273         }
274
275         RETURN(rc);
276 }
277
278 int osp_sync_add(const struct lu_env *env, struct osp_object *o,
279                  llog_op_type type, struct thandle *th,
280                  const struct lu_attr *attr)
281 {
282         return osp_sync_add_rec(env, lu2osp_dev(o->opo_obj.do_lu.lo_dev),
283                                 lu_object_fid(&o->opo_obj.do_lu), type, 1,
284                                 th, attr);
285 }
286
287 int osp_sync_gap(const struct lu_env *env, struct osp_device *d,
288                  struct lu_fid *fid, int lost, struct thandle *th)
289 {
290         return osp_sync_add_rec(env, d, fid, MDS_UNLINK64_REC, lost, th, NULL);
291 }
292
293 /*
294  * it's quite obvious we can't maintain all the structures in the memory:
295  * while OST is down, MDS can be processing thousands and thousands of unlinks
296  * filling persistent llogs and in-core respresentation
297  *
298  * this doesn't scale at all. so we need basically the following:
299  * a) destroy/setattr append llog records
300  * b) once llog has grown to X records, we process first Y committed records
301  *
302  *  once record R is found via llog_process(), it becomes committed after any
303  *  subsequent commit callback (at the most)
304  */
305
306 /*
307  * called for each atomic on-disk change (not once per transaction batch)
308  * and goes over the list
309  * XXX: should be optimized?
310  */
311
312 /**
313  * called for each RPC reported committed
314  */
315 static void osp_sync_request_commit_cb(struct ptlrpc_request *req)
316 {
317         struct osp_device *d = req->rq_cb_data;
318
319         CDEBUG(D_HA, "commit req %p, transno "LPU64"\n", req, req->rq_transno);
320
321         if (unlikely(req->rq_transno == 0))
322                 return;
323
324         /* do not do any opd_dyn_rpc_* accounting here
325          * it's done in osp_sync_interpret sooner or later */
326
327         LASSERT(d);
328         LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
329         LASSERT(list_empty(&req->rq_exp_list));
330
331         ptlrpc_request_addref(req);
332
333         spin_lock(&d->opd_syn_lock);
334         list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
335         spin_unlock(&d->opd_syn_lock);
336
337         /* XXX: some batching wouldn't hurt */
338         wake_up(&d->opd_syn_waitq);
339 }
340
341 static int osp_sync_interpret(const struct lu_env *env,
342                               struct ptlrpc_request *req, void *aa, int rc)
343 {
344         struct osp_device *d = req->rq_cb_data;
345
346         if (req->rq_svc_thread != (void *) OSP_JOB_MAGIC)
347                 DEBUG_REQ(D_ERROR, req, "bad magic %p\n", req->rq_svc_thread);
348         LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
349         LASSERT(d);
350
351         CDEBUG(D_HA, "reply req %p/%d, rc %d, transno %u\n", req,
352                atomic_read(&req->rq_refcount),
353                rc, (unsigned) req->rq_transno);
354         LASSERT(rc || req->rq_transno);
355
356         if (rc == -ENOENT) {
357                 /*
358                  * we tried to destroy object or update attributes,
359                  * but object doesn't exist anymore - cancell llog record
360                  */
361                 LASSERT(req->rq_transno == 0);
362                 LASSERT(list_empty(&req->rq_exp_list));
363
364                 ptlrpc_request_addref(req);
365
366                 spin_lock(&d->opd_syn_lock);
367                 list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
368                 spin_unlock(&d->opd_syn_lock);
369
370                 wake_up(&d->opd_syn_waitq);
371         } else if (rc) {
372                 struct obd_import *imp = req->rq_import;
373                 /*
374                  * error happened, we'll try to repeat on next boot ?
375                  */
376                 LASSERTF(req->rq_transno == 0 ||
377                          req->rq_import_generation < imp->imp_generation,
378                          "transno "LPU64", rc %d, gen: req %d, imp %d\n",
379                          req->rq_transno, rc, req->rq_import_generation,
380                          imp->imp_generation);
381                 if (req->rq_transno == 0) {
382                         /* this is the last time we see the request
383                          * if transno is not zero, then commit cb
384                          * will be called at some point */
385                         LASSERT(d->opd_syn_rpc_in_progress > 0);
386                         spin_lock(&d->opd_syn_lock);
387                         d->opd_syn_rpc_in_progress--;
388                         spin_unlock(&d->opd_syn_lock);
389                 }
390
391                 wake_up(&d->opd_syn_waitq);
392         } else if (d->opd_pre != NULL &&
393                    unlikely(d->opd_pre_status == -ENOSPC)) {
394                 /*
395                  * if current status is -ENOSPC (lack of free space on OST)
396                  * then we should poll OST immediately once object destroy
397                  * is replied
398                  */
399                 osp_statfs_need_now(d);
400         }
401
402         LASSERT(d->opd_syn_rpc_in_flight > 0);
403         spin_lock(&d->opd_syn_lock);
404         d->opd_syn_rpc_in_flight--;
405         spin_unlock(&d->opd_syn_lock);
406         if (unlikely(atomic_read(&d->opd_syn_barrier) > 0))
407                 wake_up(&d->opd_syn_barrier_waitq);
408         CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
409                d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
410                d->opd_syn_rpc_in_progress);
411
412         osp_sync_check_for_work(d);
413
414         return 0;
415 }
416
417 /*
418  * the function walks through list of committed locally changes
419  * and send them to RPC until the pipe is full
420  */
421 static void osp_sync_send_new_rpc(struct osp_device *d,
422                                   struct ptlrpc_request *req)
423 {
424         LASSERT(d->opd_syn_rpc_in_flight <= d->opd_syn_max_rpc_in_flight);
425         LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
426
427         ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
428 }
429
430 static struct ptlrpc_request *osp_sync_new_job(struct osp_device *d,
431                                                struct llog_handle *llh,
432                                                struct llog_rec_hdr *h,
433                                                ost_cmd_t op,
434                                                const struct req_format *format)
435 {
436         struct ptlrpc_request   *req;
437         struct ost_body         *body;
438         struct obd_import       *imp;
439         int                      rc;
440
441         /* Prepare the request */
442         imp = d->opd_obd->u.cli.cl_import;
443         LASSERT(imp);
444         req = ptlrpc_request_alloc(imp, format);
445         if (req == NULL)
446                 RETURN(ERR_PTR(-ENOMEM));
447
448         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, op);
449         if (rc) {
450                 ptlrpc_req_finished(req);
451                 return ERR_PTR(rc);
452         }
453
454         /*
455          * this is a trick: to save on memory allocations we put cookie
456          * into the request, but don't set corresponded flag in o_valid
457          * so that OST doesn't interpret this cookie. once the request
458          * is committed on OST we take cookie from the request and cancel
459          */
460         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
461         LASSERT(body);
462         body->oa.o_lcookie.lgc_lgl = llh->lgh_id;
463         body->oa.o_lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
464         body->oa.o_lcookie.lgc_index = h->lrh_index;
465         INIT_LIST_HEAD(&req->rq_exp_list);
466         req->rq_svc_thread = (void *) OSP_JOB_MAGIC;
467
468         req->rq_interpret_reply = osp_sync_interpret;
469         req->rq_commit_cb = osp_sync_request_commit_cb;
470         req->rq_cb_data = d;
471
472         ptlrpc_request_set_replen(req);
473
474         return req;
475 }
476
477 static int osp_sync_new_setattr_job(struct osp_device *d,
478                                     struct llog_handle *llh,
479                                     struct llog_rec_hdr *h)
480 {
481         struct llog_setattr64_rec       *rec = (struct llog_setattr64_rec *)h;
482         struct ptlrpc_request           *req;
483         struct ost_body                 *body;
484
485         ENTRY;
486         LASSERT(h->lrh_type == MDS_SETATTR64_REC);
487
488         /* lsr_valid can only be 0 or have OBD_MD_{FLUID,FLGID} set,
489          * so no bits other than these should be set. */
490         if ((rec->lsr_valid & ~(OBD_MD_FLUID | OBD_MD_FLGID)) != 0) {
491                 CERROR("%s: invalid setattr record, lsr_valid:"LPU64"\n",
492                        d->opd_obd->obd_name, rec->lsr_valid);
493                 /* return 0 so that sync thread can continue processing
494                  * other records. */
495                 RETURN(0);
496         }
497
498         req = osp_sync_new_job(d, llh, h, OST_SETATTR, &RQF_OST_SETATTR);
499         if (IS_ERR(req))
500                 RETURN(PTR_ERR(req));
501
502         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
503         LASSERT(body);
504         body->oa.o_oi = rec->lsr_oi;
505         body->oa.o_uid = rec->lsr_uid;
506         body->oa.o_gid = rec->lsr_gid;
507         body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID;
508         /* old setattr record (prior 2.6.0) doesn't have 'valid' stored,
509          * we assume that both UID and GID are valid in that case. */
510         if (rec->lsr_valid == 0)
511                 body->oa.o_valid |= (OBD_MD_FLUID | OBD_MD_FLGID);
512         else
513                 body->oa.o_valid |= rec->lsr_valid;
514
515         osp_sync_send_new_rpc(d, req);
516         RETURN(1);
517 }
518
519 static int osp_sync_new_unlink_job(struct osp_device *d,
520                                    struct llog_handle *llh,
521                                    struct llog_rec_hdr *h)
522 {
523         struct llog_unlink_rec  *rec = (struct llog_unlink_rec *)h;
524         struct ptlrpc_request   *req;
525         struct ost_body         *body;
526
527         ENTRY;
528         LASSERT(h->lrh_type == MDS_UNLINK_REC);
529
530         req = osp_sync_new_job(d, llh, h, OST_DESTROY, &RQF_OST_DESTROY);
531         if (IS_ERR(req))
532                 RETURN(PTR_ERR(req));
533
534         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
535         LASSERT(body);
536         ostid_set_seq(&body->oa.o_oi, rec->lur_oseq);
537         ostid_set_id(&body->oa.o_oi, rec->lur_oid);
538         body->oa.o_misc = rec->lur_count;
539         body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID;
540         if (rec->lur_count)
541                 body->oa.o_valid |= OBD_MD_FLOBJCOUNT;
542
543         osp_sync_send_new_rpc(d, req);
544         RETURN(1);
545 }
546
547 static int osp_prep_unlink_update_req(const struct lu_env *env,
548                                       struct osp_device *osp,
549                                       struct llog_handle *llh,
550                                       struct llog_rec_hdr *h,
551                                       struct ptlrpc_request **reqp)
552 {
553         struct llog_unlink64_rec        *rec = (struct llog_unlink64_rec *)h;
554         struct dt_update_request        *update = NULL;
555         struct ptlrpc_request           *req;
556         const char                      *buf;
557         struct llog_cookie              lcookie;
558         int                             size;
559         int                             rc;
560         ENTRY;
561
562         update = out_create_update_req(&osp->opd_dt_dev);
563         if (IS_ERR(update))
564                 RETURN(PTR_ERR(update));
565
566         /* This can only happens for unlink slave directory, so decrease
567          * ref for ".." and "." */
568         rc = out_insert_update(env, update, OUT_REF_DEL, &rec->lur_fid, 0,
569                                NULL, NULL);
570         if (rc != 0)
571                 GOTO(out, rc);
572
573         rc = out_insert_update(env, update, OUT_REF_DEL, &rec->lur_fid, 0,
574                                NULL, NULL);
575         if (rc != 0)
576                 GOTO(out, rc);
577
578         lcookie.lgc_lgl = llh->lgh_id;
579         lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
580         lcookie.lgc_index = h->lrh_index;
581         size = sizeof(lcookie);
582         buf = (const char *)&lcookie;
583
584         rc = out_insert_update(env, update, OUT_DESTROY, &rec->lur_fid, 1,
585                                &size, &buf);
586         if (rc != 0)
587                 GOTO(out, rc);
588
589         rc = out_prep_update_req(env, osp->opd_obd->u.cli.cl_import,
590                                  update->dur_req, &req);
591         if (rc != 0)
592                 GOTO(out, rc);
593
594         INIT_LIST_HEAD(&req->rq_exp_list);
595         req->rq_svc_thread = (void *)OSP_JOB_MAGIC;
596
597         req->rq_interpret_reply = osp_sync_interpret;
598         req->rq_commit_cb = osp_sync_request_commit_cb;
599         req->rq_cb_data = osp;
600
601         ptlrpc_request_set_replen(req);
602         *reqp = req;
603 out:
604         if (update != NULL)
605                 out_destroy_update_req(update);
606
607         RETURN(rc);
608 }
609
610 static int osp_sync_new_unlink64_job(const struct lu_env *env,
611                                      struct osp_device *d,
612                                      struct llog_handle *llh,
613                                      struct llog_rec_hdr *h)
614 {
615         struct llog_unlink64_rec        *rec = (struct llog_unlink64_rec *)h;
616         struct ptlrpc_request           *req = NULL;
617         struct ost_body                 *body;
618         int                              rc;
619
620         ENTRY;
621         LASSERT(h->lrh_type == MDS_UNLINK64_REC);
622
623         if (d->opd_connect_mdt) {
624                 rc = osp_prep_unlink_update_req(env, d, llh, h, &req);
625                 if (rc != 0)
626                         RETURN(rc);
627         } else {
628                 req = osp_sync_new_job(d, llh, h, OST_DESTROY,
629                                        &RQF_OST_DESTROY);
630                 if (IS_ERR(req))
631                         RETURN(PTR_ERR(req));
632
633                 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
634                 if (body == NULL)
635                         RETURN(-EFAULT);
636                 rc = fid_to_ostid(&rec->lur_fid, &body->oa.o_oi);
637                 if (rc < 0)
638                         RETURN(rc);
639                 body->oa.o_misc = rec->lur_count;
640                 body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID |
641                                    OBD_MD_FLOBJCOUNT;
642         }
643         osp_sync_send_new_rpc(d, req);
644         RETURN(1);
645 }
646
647 static int osp_sync_process_record(const struct lu_env *env,
648                                    struct osp_device *d,
649                                    struct llog_handle *llh,
650                                    struct llog_rec_hdr *rec)
651 {
652         struct llog_cookie       cookie;
653         int                      rc = 0;
654
655         cookie.lgc_lgl = llh->lgh_id;
656         cookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
657         cookie.lgc_index = rec->lrh_index;
658
659         if (unlikely(rec->lrh_type == LLOG_GEN_REC)) {
660                 struct llog_gen_rec *gen = (struct llog_gen_rec *)rec;
661
662                 /* we're waiting for the record generated by this instance */
663                 LASSERT(d->opd_syn_prev_done == 0);
664                 if (!memcmp(&d->opd_syn_generation, &gen->lgr_gen,
665                             sizeof(gen->lgr_gen))) {
666                         CDEBUG(D_HA, "processed all old entries\n");
667                         d->opd_syn_prev_done = 1;
668                 }
669
670                 /* cancel any generation record */
671                 rc = llog_cat_cancel_records(env, llh->u.phd.phd_cat_handle,
672                                              1, &cookie);
673
674                 return rc;
675         }
676
677         /*
678          * now we prepare and fill requests to OST, put them on the queue
679          * and fire after next commit callback
680          */
681
682         /* notice we increment counters before sending RPC, to be consistent
683          * in RPC interpret callback which may happen very quickly */
684         spin_lock(&d->opd_syn_lock);
685         d->opd_syn_rpc_in_flight++;
686         d->opd_syn_rpc_in_progress++;
687         spin_unlock(&d->opd_syn_lock);
688
689         switch (rec->lrh_type) {
690         /* case MDS_UNLINK_REC is kept for compatibility */
691         case MDS_UNLINK_REC:
692                 rc = osp_sync_new_unlink_job(d, llh, rec);
693                 break;
694         case MDS_UNLINK64_REC:
695                 rc = osp_sync_new_unlink64_job(env, d, llh, rec);
696                 break;
697         case MDS_SETATTR64_REC:
698                 rc = osp_sync_new_setattr_job(d, llh, rec);
699                 break;
700         default:
701                 CERROR("%s: unknown record type: %x\n", d->opd_obd->obd_name,
702                        rec->lrh_type);
703                 /* we should continue processing */
704         }
705
706         /* rc > 0 means sync RPC being added to the queue */
707         if (likely(rc > 0)) {
708                 spin_lock(&d->opd_syn_lock);
709                 if (d->opd_syn_prev_done) {
710                         LASSERT(d->opd_syn_changes > 0);
711                         LASSERT(rec->lrh_id <= d->opd_syn_last_committed_id);
712                         /*
713                          * NOTE: it's possible to meet same id if
714                          * OST stores few stripes of same file
715                          */
716                         if (rec->lrh_id > d->opd_syn_last_processed_id) {
717                                 d->opd_syn_last_processed_id = rec->lrh_id;
718                                 wake_up(&d->opd_syn_barrier_waitq);
719                         }
720
721                         d->opd_syn_changes--;
722                 }
723                 CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
724                        d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
725                        d->opd_syn_rpc_in_progress);
726                 spin_unlock(&d->opd_syn_lock);
727                 rc = 0;
728         } else {
729                 spin_lock(&d->opd_syn_lock);
730                 d->opd_syn_rpc_in_flight--;
731                 d->opd_syn_rpc_in_progress--;
732                 spin_unlock(&d->opd_syn_lock);
733         }
734
735         CDEBUG(D_HA, "found record %x, %d, idx %u, id %u: %d\n",
736                rec->lrh_type, rec->lrh_len, rec->lrh_index, rec->lrh_id, rc);
737         return rc;
738 }
739
740 static void osp_sync_process_committed(const struct lu_env *env,
741                                        struct osp_device *d)
742 {
743         struct obd_device       *obd = d->opd_obd;
744         struct obd_import       *imp = obd->u.cli.cl_import;
745         struct ost_body         *body;
746         struct ptlrpc_request   *req, *tmp;
747         struct llog_ctxt        *ctxt;
748         struct llog_handle      *llh;
749         struct list_head         list;
750         int                      rc, done = 0;
751
752         ENTRY;
753
754         if (list_empty(&d->opd_syn_committed_there))
755                 return;
756
757         /*
758          * if current status is -ENOSPC (lack of free space on OST)
759          * then we should poll OST immediately once object destroy
760          * is committed.
761          * notice: we do this upon commit as well because some backends
762          * (like DMU) do not release space right away.
763          */
764         if (d->opd_pre != NULL && unlikely(d->opd_pre_status == -ENOSPC))
765                 osp_statfs_need_now(d);
766
767         /*
768          * now cancel them all
769          * XXX: can we improve this using some batching?
770          *      with batch RPC that'll happen automatically?
771          * XXX: can we store ctxt in lod_device and save few cycles ?
772          */
773         ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
774         LASSERT(ctxt);
775
776         llh = ctxt->loc_handle;
777         LASSERT(llh);
778
779         INIT_LIST_HEAD(&list);
780         spin_lock(&d->opd_syn_lock);
781         list_splice(&d->opd_syn_committed_there, &list);
782         INIT_LIST_HEAD(&d->opd_syn_committed_there);
783         spin_unlock(&d->opd_syn_lock);
784
785         list_for_each_entry_safe(req, tmp, &list, rq_exp_list) {
786                 struct llog_cookie *lcookie = NULL;
787
788                 LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
789                 list_del_init(&req->rq_exp_list);
790
791                 if (d->opd_connect_mdt) {
792                         struct object_update_request *ureq;
793                         struct object_update *update;
794                         ureq = req_capsule_client_get(&req->rq_pill,
795                                                       &RMF_OUT_UPDATE);
796                         LASSERT(ureq != NULL &&
797                                 ureq->ourq_magic == UPDATE_REQUEST_MAGIC);
798
799                         /* 1st/2nd is for decref . and .., 3rd one is for
800                          * destroy, where the log cookie is stored.
801                          * See osp_prep_unlink_update_req */
802                         update = object_update_request_get(ureq, 2, NULL);
803                         LASSERT(update != NULL);
804                         lcookie = object_update_param_get(update, 0, NULL);
805                         LASSERT(lcookie != NULL);
806                 } else {
807                         body = req_capsule_client_get(&req->rq_pill,
808                                                       &RMF_OST_BODY);
809                         LASSERT(body);
810                         lcookie = &body->oa.o_lcookie;
811                 }
812                 /* import can be closing, thus all commit cb's are
813                  * called we can check committness directly */
814                 if (req->rq_transno <= imp->imp_peer_committed_transno) {
815                         rc = llog_cat_cancel_records(env, llh, 1, lcookie);
816                         if (rc)
817                                 CERROR("%s: can't cancel record: %d\n",
818                                        obd->obd_name, rc);
819                 } else {
820                         DEBUG_REQ(D_HA, req, "not committed");
821                 }
822
823                 ptlrpc_req_finished(req);
824                 done++;
825         }
826
827         llog_ctxt_put(ctxt);
828
829         LASSERT(d->opd_syn_rpc_in_progress >= done);
830         spin_lock(&d->opd_syn_lock);
831         d->opd_syn_rpc_in_progress -= done;
832         spin_unlock(&d->opd_syn_lock);
833         CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
834                d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
835                d->opd_syn_rpc_in_progress);
836
837         osp_sync_check_for_work(d);
838
839         /* wake up the thread if requested to stop:
840          * it might be waiting for in-progress to complete */
841         if (unlikely(osp_sync_running(d) == 0))
842                 wake_up(&d->opd_syn_waitq);
843
844         EXIT;
845 }
846
847 /*
848  * this is where most of queues processing happens
849  */
850 static int osp_sync_process_queues(const struct lu_env *env,
851                                    struct llog_handle *llh,
852                                    struct llog_rec_hdr *rec,
853                                    void *data)
854 {
855         struct osp_device       *d = data;
856         int                      rc;
857
858         do {
859                 struct l_wait_info lwi = { 0 };
860
861                 if (!osp_sync_running(d)) {
862                         CDEBUG(D_HA, "stop llog processing\n");
863                         return LLOG_PROC_BREAK;
864                 }
865
866                 /* process requests committed by OST */
867                 osp_sync_process_committed(env, d);
868
869                 /* if we there are changes to be processed and we have
870                  * resources for this ... do now */
871                 if (osp_sync_can_process_new(d, rec)) {
872                         if (llh == NULL) {
873                                 /* ask llog for another record */
874                                 CDEBUG(D_HA, "%lu changes, %u in progress, %u in flight\n",
875                                        d->opd_syn_changes,
876                                        d->opd_syn_rpc_in_progress,
877                                        d->opd_syn_rpc_in_flight);
878                                 return 0;
879                         }
880
881                         /*
882                          * try to send, in case of disconnection, suspend
883                          * processing till we can send this request
884                          */
885                         do {
886                                 rc = osp_sync_process_record(env, d, llh, rec);
887                                 /*
888                                  * XXX: probably different handling is needed
889                                  * for some bugs, like immediate exit or if
890                                  * OSP gets inactive
891                                  */
892                                 if (rc) {
893                                         CERROR("can't send: %d\n", rc);
894                                         l_wait_event(d->opd_syn_waitq,
895                                                      !osp_sync_running(d) ||
896                                                      osp_sync_has_work(d),
897                                                      &lwi);
898                                 }
899                         } while (rc != 0 && osp_sync_running(d));
900
901                         llh = NULL;
902                         rec = NULL;
903                 }
904
905                 if (d->opd_syn_last_processed_id == d->opd_syn_last_used_id)
906                         osp_sync_remove_from_tracker(d);
907
908                 l_wait_event(d->opd_syn_waitq,
909                              !osp_sync_running(d) ||
910                              osp_sync_can_process_new(d, rec) ||
911                              !list_empty(&d->opd_syn_committed_there),
912                              &lwi);
913         } while (1);
914 }
915
916 /*
917  * this thread runs llog_cat_process() scanner calling our callback
918  * to process llog records. in the callback we implement tricky
919  * state machine as we don't want to start scanning of the llog again
920  * and again, also we don't want to process too many records and send
921  * too many RPCs a time. so, depending on current load (num of changes
922  * being synced to OST) the callback can suspend awaiting for some
923  * new conditions, like syncs completed.
924  *
925  * in order to process llog records left by previous boots and to allow
926  * llog_process_thread() to find something (otherwise it'd just exit
927  * immediately) we add a special GENERATATION record on each boot.
928  */
929 static int osp_sync_thread(void *_arg)
930 {
931         struct osp_device       *d = _arg;
932         struct ptlrpc_thread    *thread = &d->opd_syn_thread;
933         struct l_wait_info       lwi = { 0 };
934         struct llog_ctxt        *ctxt;
935         struct obd_device       *obd = d->opd_obd;
936         struct llog_handle      *llh;
937         struct lu_env            env;
938         int                      rc, count;
939
940         ENTRY;
941
942         rc = lu_env_init(&env, LCT_LOCAL);
943         if (rc) {
944                 CERROR("%s: can't initialize env: rc = %d\n",
945                        obd->obd_name, rc);
946                 RETURN(rc);
947         }
948
949         spin_lock(&d->opd_syn_lock);
950         thread->t_flags = SVC_RUNNING;
951         spin_unlock(&d->opd_syn_lock);
952         wake_up(&thread->t_ctl_waitq);
953
954         ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
955         if (ctxt == NULL) {
956                 CERROR("can't get appropriate context\n");
957                 GOTO(out, rc = -EINVAL);
958         }
959
960         llh = ctxt->loc_handle;
961         if (llh == NULL) {
962                 CERROR("can't get llh\n");
963                 llog_ctxt_put(ctxt);
964                 GOTO(out, rc = -EINVAL);
965         }
966
967         rc = llog_cat_process(&env, llh, osp_sync_process_queues, d, 0, 0);
968         LASSERTF(rc == 0 || rc == LLOG_PROC_BREAK,
969                  "%lu changes, %u in progress, %u in flight: %d\n",
970                  d->opd_syn_changes, d->opd_syn_rpc_in_progress,
971                  d->opd_syn_rpc_in_flight, rc);
972
973         /* we don't expect llog_process_thread() to exit till umount */
974         LASSERTF(thread->t_flags != SVC_RUNNING,
975                  "%lu changes, %u in progress, %u in flight\n",
976                  d->opd_syn_changes, d->opd_syn_rpc_in_progress,
977                  d->opd_syn_rpc_in_flight);
978
979         /* wait till all the requests are completed */
980         count = 0;
981         while (d->opd_syn_rpc_in_progress > 0) {
982                 osp_sync_process_committed(&env, d);
983
984                 lwi = LWI_TIMEOUT(cfs_time_seconds(5), NULL, NULL);
985                 rc = l_wait_event(d->opd_syn_waitq,
986                                   d->opd_syn_rpc_in_progress == 0,
987                                   &lwi);
988                 if (rc == -ETIMEDOUT)
989                         count++;
990                 LASSERTF(count < 10, "%s: %d %d %sempty\n",
991                          d->opd_obd->obd_name, d->opd_syn_rpc_in_progress,
992                          d->opd_syn_rpc_in_flight,
993                          list_empty(&d->opd_syn_committed_there) ? "" : "!");
994
995         }
996
997         llog_cat_close(&env, llh);
998         rc = llog_cleanup(&env, ctxt);
999         if (rc)
1000                 CERROR("can't cleanup llog: %d\n", rc);
1001 out:
1002         LASSERTF(d->opd_syn_rpc_in_progress == 0,
1003                  "%s: %d %d %sempty\n",
1004                  d->opd_obd->obd_name, d->opd_syn_rpc_in_progress,
1005                  d->opd_syn_rpc_in_flight,
1006                  list_empty(&d->opd_syn_committed_there) ? "" : "!");
1007
1008         thread->t_flags = SVC_STOPPED;
1009
1010         wake_up(&thread->t_ctl_waitq);
1011
1012         lu_env_fini(&env);
1013
1014         RETURN(0);
1015 }
1016
1017 static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d)
1018 {
1019         struct osp_thread_info  *osi = osp_env_info(env);
1020         struct lu_fid           *fid = &osi->osi_fid;
1021         struct llog_handle      *lgh = NULL;
1022         struct obd_device       *obd = d->opd_obd;
1023         struct llog_ctxt        *ctxt;
1024         int                     rc;
1025
1026         ENTRY;
1027
1028         LASSERT(obd);
1029
1030         /*
1031          * open llog corresponding to our OST
1032          */
1033         OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
1034         obd->obd_lvfs_ctxt.dt = d->opd_storage;
1035
1036         if (d->opd_connect_mdt)
1037                 lu_local_obj_fid(fid, SLAVE_LLOG_CATALOGS_OID);
1038         else
1039                 lu_local_obj_fid(fid, LLOG_CATALOGS_OID);
1040
1041         rc = llog_osd_get_cat_list(env, d->opd_storage, d->opd_index, 1,
1042                                    &osi->osi_cid, fid);
1043         if (rc) {
1044                 CERROR("%s: can't get id from catalogs: rc = %d\n",
1045                        obd->obd_name, rc);
1046                 RETURN(rc);
1047         }
1048
1049         CDEBUG(D_INFO, "%s: Init llog for %d - catid "DOSTID":%x\n",
1050                obd->obd_name, d->opd_index,
1051                POSTID(&osi->osi_cid.lci_logid.lgl_oi),
1052                osi->osi_cid.lci_logid.lgl_ogen);
1053
1054         rc = llog_setup(env, obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, obd,
1055                         &osp_mds_ost_orig_logops);
1056         if (rc)
1057                 RETURN(rc);
1058
1059         ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
1060         LASSERT(ctxt);
1061
1062         if (likely(logid_id(&osi->osi_cid.lci_logid) != 0)) {
1063                 rc = llog_open(env, ctxt, &lgh, &osi->osi_cid.lci_logid, NULL,
1064                                LLOG_OPEN_EXISTS);
1065                 /* re-create llog if it is missing */
1066                 if (rc == -ENOENT)
1067                         logid_set_id(&osi->osi_cid.lci_logid, 0);
1068                 else if (rc < 0)
1069                         GOTO(out_cleanup, rc);
1070         }
1071
1072         if (unlikely(logid_id(&osi->osi_cid.lci_logid) == 0)) {
1073                 rc = llog_open_create(env, ctxt, &lgh, NULL, NULL);
1074                 if (rc < 0)
1075                         GOTO(out_cleanup, rc);
1076                 osi->osi_cid.lci_logid = lgh->lgh_id;
1077         }
1078
1079         LASSERT(lgh != NULL);
1080         ctxt->loc_handle = lgh;
1081
1082         rc = llog_cat_init_and_process(env, lgh);
1083         if (rc)
1084                 GOTO(out_close, rc);
1085
1086         rc = llog_osd_put_cat_list(env, d->opd_storage, d->opd_index, 1,
1087                                    &osi->osi_cid, fid);
1088         if (rc)
1089                 GOTO(out_close, rc);
1090
1091         /*
1092          * put a mark in the llog till which we'll be processing
1093          * old records restless
1094          */
1095         d->opd_syn_generation.mnt_cnt = cfs_time_current();
1096         d->opd_syn_generation.conn_cnt = cfs_time_current();
1097
1098         osi->osi_hdr.lrh_type = LLOG_GEN_REC;
1099         osi->osi_hdr.lrh_len = sizeof(osi->osi_gen);
1100
1101         memcpy(&osi->osi_gen.lgr_gen, &d->opd_syn_generation,
1102                sizeof(osi->osi_gen.lgr_gen));
1103
1104         rc = llog_cat_add(env, lgh, &osi->osi_gen.lgr_hdr, &osi->osi_cookie);
1105         if (rc < 0)
1106                 GOTO(out_close, rc);
1107         llog_ctxt_put(ctxt);
1108         RETURN(0);
1109 out_close:
1110         llog_cat_close(env, lgh);
1111 out_cleanup:
1112         llog_cleanup(env, ctxt);
1113         RETURN(rc);
1114 }
1115
1116 static void osp_sync_llog_fini(const struct lu_env *env, struct osp_device *d)
1117 {
1118         struct llog_ctxt *ctxt;
1119
1120         ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT);
1121         if (ctxt != NULL)
1122                 llog_cat_close(env, ctxt->loc_handle);
1123         llog_cleanup(env, ctxt);
1124 }
1125
1126 /*
1127  * initializes sync component of OSP
1128  */
1129 int osp_sync_init(const struct lu_env *env, struct osp_device *d)
1130 {
1131         struct l_wait_info       lwi = { 0 };
1132         struct task_struct      *task;
1133         int                      rc;
1134
1135         ENTRY;
1136
1137         rc = osp_sync_id_traction_init(d);
1138         if (rc)
1139                 RETURN(rc);
1140
1141         /*
1142          * initialize llog storing changes
1143          */
1144         rc = osp_sync_llog_init(env, d);
1145         if (rc) {
1146                 CERROR("%s: can't initialize llog: rc = %d\n",
1147                        d->opd_obd->obd_name, rc);
1148                 GOTO(err_id, rc);
1149         }
1150
1151         /*
1152          * Start synchronization thread
1153          */
1154         d->opd_syn_max_rpc_in_flight = OSP_MAX_IN_FLIGHT;
1155         d->opd_syn_max_rpc_in_progress = OSP_MAX_IN_PROGRESS;
1156         spin_lock_init(&d->opd_syn_lock);
1157         init_waitqueue_head(&d->opd_syn_waitq);
1158         init_waitqueue_head(&d->opd_syn_barrier_waitq);
1159         init_waitqueue_head(&d->opd_syn_thread.t_ctl_waitq);
1160         INIT_LIST_HEAD(&d->opd_syn_committed_there);
1161
1162         task = kthread_run(osp_sync_thread, d, "osp-syn-%u-%u",
1163                            d->opd_index, d->opd_group);
1164         if (IS_ERR(task)) {
1165                 rc = PTR_ERR(task);
1166                 CERROR("%s: cannot start sync thread: rc = %d\n",
1167                        d->opd_obd->obd_name, rc);
1168                 GOTO(err_llog, rc);
1169         }
1170
1171         l_wait_event(d->opd_syn_thread.t_ctl_waitq,
1172                      osp_sync_running(d) || osp_sync_stopped(d), &lwi);
1173
1174         RETURN(0);
1175 err_llog:
1176         osp_sync_llog_fini(env, d);
1177 err_id:
1178         osp_sync_id_traction_fini(d);
1179         return rc;
1180 }
1181
1182 int osp_sync_fini(struct osp_device *d)
1183 {
1184         struct ptlrpc_thread *thread = &d->opd_syn_thread;
1185
1186         ENTRY;
1187
1188         thread->t_flags = SVC_STOPPING;
1189         wake_up(&d->opd_syn_waitq);
1190         wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
1191
1192         /*
1193          * unregister transaction callbacks only when sync thread
1194          * has finished operations with llog
1195          */
1196         osp_sync_id_traction_fini(d);
1197
1198         RETURN(0);
1199 }
1200
1201 static DEFINE_MUTEX(osp_id_tracker_sem);
1202 static struct list_head osp_id_tracker_list =
1203                 LIST_HEAD_INIT(osp_id_tracker_list);
1204
1205 static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie)
1206 {
1207         struct osp_id_tracker   *tr = cookie;
1208         struct osp_device       *d;
1209         struct osp_txn_info     *txn;
1210
1211         LASSERT(tr);
1212
1213         txn = osp_txn_info(&th->th_ctx);
1214         if (txn == NULL || txn->oti_current_id < tr->otr_committed_id)
1215                 return;
1216
1217         spin_lock(&tr->otr_lock);
1218         if (likely(txn->oti_current_id > tr->otr_committed_id)) {
1219                 CDEBUG(D_OTHER, "committed: %u -> %u\n",
1220                        tr->otr_committed_id, txn->oti_current_id);
1221                 tr->otr_committed_id = txn->oti_current_id;
1222
1223                 list_for_each_entry(d, &tr->otr_wakeup_list,
1224                                     opd_syn_ontrack) {
1225                         d->opd_syn_last_committed_id = tr->otr_committed_id;
1226                         wake_up(&d->opd_syn_waitq);
1227                 }
1228         }
1229         spin_unlock(&tr->otr_lock);
1230 }
1231
1232 static int osp_sync_id_traction_init(struct osp_device *d)
1233 {
1234         struct osp_id_tracker   *tr, *found = NULL;
1235         int                      rc = 0;
1236
1237         LASSERT(d);
1238         LASSERT(d->opd_storage);
1239         LASSERT(d->opd_syn_tracker == NULL);
1240         INIT_LIST_HEAD(&d->opd_syn_ontrack);
1241
1242         mutex_lock(&osp_id_tracker_sem);
1243         list_for_each_entry(tr, &osp_id_tracker_list, otr_list) {
1244                 if (tr->otr_dev == d->opd_storage) {
1245                         LASSERT(atomic_read(&tr->otr_refcount));
1246                         atomic_inc(&tr->otr_refcount);
1247                         d->opd_syn_tracker = tr;
1248                         found = tr;
1249                         break;
1250                 }
1251         }
1252
1253         if (found == NULL) {
1254                 rc = -ENOMEM;
1255                 OBD_ALLOC_PTR(tr);
1256                 if (tr) {
1257                         d->opd_syn_tracker = tr;
1258                         spin_lock_init(&tr->otr_lock);
1259                         tr->otr_dev = d->opd_storage;
1260                         tr->otr_next_id = 1;
1261                         tr->otr_committed_id = 0;
1262                         atomic_set(&tr->otr_refcount, 1);
1263                         INIT_LIST_HEAD(&tr->otr_wakeup_list);
1264                         list_add(&tr->otr_list, &osp_id_tracker_list);
1265                         tr->otr_tx_cb.dtc_txn_commit =
1266                                                 osp_sync_tracker_commit_cb;
1267                         tr->otr_tx_cb.dtc_cookie = tr;
1268                         tr->otr_tx_cb.dtc_tag = LCT_MD_THREAD;
1269                         dt_txn_callback_add(d->opd_storage, &tr->otr_tx_cb);
1270                         rc = 0;
1271                 }
1272         }
1273         mutex_unlock(&osp_id_tracker_sem);
1274
1275         return rc;
1276 }
1277
1278 static void osp_sync_id_traction_fini(struct osp_device *d)
1279 {
1280         struct osp_id_tracker *tr;
1281
1282         ENTRY;
1283
1284         LASSERT(d);
1285         tr = d->opd_syn_tracker;
1286         if (tr == NULL) {
1287                 EXIT;
1288                 return;
1289         }
1290
1291         osp_sync_remove_from_tracker(d);
1292
1293         mutex_lock(&osp_id_tracker_sem);
1294         if (atomic_dec_and_test(&tr->otr_refcount)) {
1295                 dt_txn_callback_del(d->opd_storage, &tr->otr_tx_cb);
1296                 LASSERT(list_empty(&tr->otr_wakeup_list));
1297                 list_del(&tr->otr_list);
1298                 OBD_FREE_PTR(tr);
1299                 d->opd_syn_tracker = NULL;
1300         }
1301         mutex_unlock(&osp_id_tracker_sem);
1302
1303         EXIT;
1304 }
1305
1306 /*
1307  * generates id for the tracker
1308  */
1309 static __u32 osp_sync_id_get(struct osp_device *d, __u32 id)
1310 {
1311         struct osp_id_tracker *tr;
1312
1313         tr = d->opd_syn_tracker;
1314         LASSERT(tr);
1315
1316         /* XXX: we can improve this introducing per-cpu preallocated ids? */
1317         spin_lock(&tr->otr_lock);
1318         if (unlikely(tr->otr_next_id <= d->opd_syn_last_used_id)) {
1319                 spin_unlock(&tr->otr_lock);
1320                 CERROR("%s: next %u, last synced %lu\n",
1321                        d->opd_obd->obd_name, tr->otr_next_id,
1322                        d->opd_syn_last_used_id);
1323                 LBUG();
1324         }
1325
1326         if (id == 0)
1327                 id = tr->otr_next_id++;
1328         if (id > d->opd_syn_last_used_id)
1329                 d->opd_syn_last_used_id = id;
1330         if (list_empty(&d->opd_syn_ontrack))
1331                 list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list);
1332         spin_unlock(&tr->otr_lock);
1333         CDEBUG(D_OTHER, "new id %u\n", (unsigned) id);
1334
1335         return id;
1336 }
1337
1338 static void osp_sync_remove_from_tracker(struct osp_device *d)
1339 {
1340         struct osp_id_tracker *tr;
1341
1342         tr = d->opd_syn_tracker;
1343         LASSERT(tr);
1344
1345         if (list_empty(&d->opd_syn_ontrack))
1346                 return;
1347
1348         spin_lock(&tr->otr_lock);
1349         list_del_init(&d->opd_syn_ontrack);
1350         spin_unlock(&tr->otr_lock);
1351 }
1352