Whamcloud - gitweb
LU-2701 osp: wake up sync thread
[fs/lustre-release.git] / lustre / osp / osp_sync.c
1 /*
2  * GPL HEADER START
3  *
4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
5  *
6  * This program is free software; you can redistribute it and/or modify
7  * it under the terms of the GNU General Public License version 2 only,
8  * as published by the Free Software Foundation.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
13  * General Public License version 2 for more details (a copy is included
14  * in the LICENSE file that accompanied this code).
15  *
16  * You should have received a copy of the GNU General Public License
17  * version 2 along with this program; If not, see
18  * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
19  *
20  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21  * CA 95054 USA or visit www.sun.com if you need additional information or
22  * have any questions.
23  *
24  * GPL HEADER END
25  */
26 /*
27  * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
28  * Use is subject to license terms.
29  *
30  * Copyright (c) 2012, Intel Corporation.
31  */
32 /*
33  * This file is part of Lustre, http://www.lustre.org/
34  * Lustre is a trademark of Sun Microsystems, Inc.
35  *
36  * lustre/osp/osp_sync.c
37  *
38  * Lustre OST Proxy Device
39  *
40  * Author: Alex Zhuravlev <alexey.zhuravlev@intel.com>
41  * Author: Mikhail Pershin <mike.pershin@intel.com>
42  */
43
44 #ifndef EXPORT_SYMTAB
45 # define EXPORT_SYMTAB
46 #endif
47 #define DEBUG_SUBSYSTEM S_MDS
48
49 #include <lustre_log.h>
50 #include "osp_internal.h"
51
52 static int osp_sync_id_traction_init(struct osp_device *d);
53 static void osp_sync_id_traction_fini(struct osp_device *d);
54 static __u32 osp_sync_id_get(struct osp_device *d, __u32 id);
55 static void osp_sync_remove_from_tracker(struct osp_device *d);
56
57 /*
58  * this is a components of OSP implementing synchronization between MDS and OST
59  * it llogs all interesting changes (currently it's uig/gid change and object
60  * destroy) atomically, then makes sure changes hit OST storage
61  *
62  * we have 4 queues of work:
63  *
64  * the first queue is llog itself, once read a change is stored in 2nd queue
65  * in form of RPC (but RPC isn't fired yet).
66  *
67  * the second queue (opd_syn_waiting_for_commit) holds changes awaiting local
68  * commit. once change is committed locally it migrates onto 3rd queue.
69  *
70  * the third queue (opd_syn_committed_here) holds changes committed locally,
71  * but not sent to OST (as the pipe can be full). once pipe becomes non-full
72  * we take a change from the queue and fire corresponded RPC.
73  *
74  * once RPC is reported committed by OST (using regular last_committed mech.)
75  * the change jumps into 4th queue (opd_syn_committed_there), now we can
76  * cancel corresponded llog record and release RPC
77  *
78  * opd_syn_changes is a number of unread llog records (to be processed).
79  * notice this number doesn't include llog records from previous boots.
80  * with OSP_SYN_THRESHOLD we try to batch processing a bit (TO BE IMPLEMENTED)
81  *
82  * opd_syn_rpc_in_progress is a number of requests in 2-4 queues.
83  * we control this with OSP_MAX_IN_PROGRESS so that OSP don't consume
84  * too much memory -- how to deal with 1000th OSTs ? batching could help?
85  *
86  * opd_syn_rpc_in_flight is a number of RPC in flight.
87  * we control this with OSP_MAX_IN_FLIGHT
88  */
89
90 /* XXX: do math to learn reasonable threshold
91  * should it be ~ number of changes fitting bulk? */
92
93 #define OSP_SYN_THRESHOLD       10
94 #define OSP_MAX_IN_FLIGHT       8
95 #define OSP_MAX_IN_PROGRESS     4096
96
97 #define OSP_JOB_MAGIC           0x26112005
98
99 static inline int osp_sync_running(struct osp_device *d)
100 {
101         return !!(d->opd_syn_thread.t_flags & SVC_RUNNING);
102 }
103
104 static inline int osp_sync_stopped(struct osp_device *d)
105 {
106         return !!(d->opd_syn_thread.t_flags & SVC_STOPPED);
107 }
108
109 static inline int osp_sync_has_new_job(struct osp_device *d)
110 {
111         return ((d->opd_syn_last_processed_id < d->opd_syn_last_used_id) &&
112                 (d->opd_syn_last_processed_id < d->opd_syn_last_committed_id))
113                 || (d->opd_syn_prev_done == 0);
114 }
115
116 static inline int osp_sync_low_in_progress(struct osp_device *d)
117 {
118         return d->opd_syn_rpc_in_progress < d->opd_syn_max_rpc_in_progress;
119 }
120
121 static inline int osp_sync_low_in_flight(struct osp_device *d)
122 {
123         return d->opd_syn_rpc_in_flight < d->opd_syn_max_rpc_in_flight;
124 }
125
126 static inline int osp_sync_has_work(struct osp_device *d)
127 {
128         /* has new/old changes and low in-progress? */
129         if (osp_sync_has_new_job(d) && osp_sync_low_in_progress(d) &&
130             osp_sync_low_in_flight(d) && d->opd_imp_connected)
131                 return 1;
132
133         /* has remotely committed? */
134         if (!cfs_list_empty(&d->opd_syn_committed_there))
135                 return 1;
136
137         return 0;
138 }
139
140 #define osp_sync_check_for_work(d)                      \
141 {                                                       \
142         if (osp_sync_has_work(d)) {                     \
143                 cfs_waitq_signal(&d->opd_syn_waitq);    \
144         }                                               \
145 }
146
147 void __osp_sync_check_for_work(struct osp_device *d)
148 {
149         osp_sync_check_for_work(d);
150 }
151
152 static inline int osp_sync_can_process_new(struct osp_device *d,
153                                            struct llog_rec_hdr *rec)
154 {
155         LASSERT(d);
156
157         if (!osp_sync_low_in_progress(d))
158                 return 0;
159         if (!osp_sync_low_in_flight(d))
160                 return 0;
161         if (!d->opd_imp_connected)
162                 return 0;
163         if (d->opd_syn_prev_done == 0)
164                 return 1;
165         if (d->opd_syn_changes == 0)
166                 return 0;
167         if (rec == NULL || rec->lrh_id <= d->opd_syn_last_committed_id)
168                 return 1;
169         return 0;
170 }
171
172 int osp_sync_declare_add(const struct lu_env *env, struct osp_object *o,
173                          llog_op_type type, struct thandle *th)
174 {
175         struct osp_thread_info  *osi = osp_env_info(env);
176         struct osp_device       *d = lu2osp_dev(o->opo_obj.do_lu.lo_dev);
177         struct llog_ctxt        *ctxt;
178         int                      rc;
179
180         ENTRY;
181
182         /* it's a layering violation, to access internals of th,
183          * but we can do this as a sanity check, for a while */
184         LASSERT(th->th_dev == d->opd_storage);
185
186         switch (type) {
187         case MDS_UNLINK64_REC:
188                 osi->osi_hdr.lrh_len = sizeof(struct llog_unlink64_rec);
189                 break;
190         case MDS_SETATTR64_REC:
191                 osi->osi_hdr.lrh_len = sizeof(struct llog_setattr64_rec);
192                 break;
193         default:
194                 LBUG();
195         }
196
197         /* we want ->dt_trans_start() to allocate per-thandle structure */
198         th->th_tags |= LCT_OSP_THREAD;
199
200         ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT);
201         LASSERT(ctxt);
202
203         rc = llog_declare_add(env, ctxt->loc_handle, &osi->osi_hdr, th);
204         llog_ctxt_put(ctxt);
205
206         RETURN(rc);
207 }
208
209 static int osp_sync_add_rec(const struct lu_env *env, struct osp_device *d,
210                             const struct lu_fid *fid, llog_op_type type,
211                             int count, struct thandle *th,
212                             const struct lu_attr *attr)
213 {
214         struct osp_thread_info  *osi = osp_env_info(env);
215         struct llog_ctxt        *ctxt;
216         struct osp_txn_info     *txn;
217         int                      rc;
218
219         ENTRY;
220
221         /* it's a layering violation, to access internals of th,
222          * but we can do this as a sanity check, for a while */
223         LASSERT(th->th_dev == d->opd_storage);
224
225         switch (type) {
226         case MDS_UNLINK64_REC:
227                 osi->osi_hdr.lrh_len = sizeof(osi->osi_unlink);
228                 osi->osi_hdr.lrh_type = MDS_UNLINK64_REC;
229                 osi->osi_unlink.lur_fid  = *fid;
230                 osi->osi_unlink.lur_count = count;
231                 break;
232         case MDS_SETATTR64_REC:
233                 rc = fid_ostid_pack(fid, &osi->osi_oi);
234                 LASSERT(rc == 0);
235                 osi->osi_hdr.lrh_len = sizeof(osi->osi_setattr);
236                 osi->osi_hdr.lrh_type = MDS_SETATTR64_REC;
237                 osi->osi_setattr.lsr_oid  = osi->osi_oi.oi_id;
238                 osi->osi_setattr.lsr_oseq = osi->osi_oi.oi_seq;
239                 LASSERT(attr);
240                 osi->osi_setattr.lsr_uid = attr->la_uid;
241                 osi->osi_setattr.lsr_gid = attr->la_gid;
242                 break;
243         default:
244                 LBUG();
245         }
246
247         txn = osp_txn_info(&th->th_ctx);
248         LASSERT(txn);
249
250         txn->oti_current_id = osp_sync_id_get(d, txn->oti_current_id);
251         osi->osi_hdr.lrh_id = txn->oti_current_id;
252
253         ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT);
254         if (ctxt == NULL)
255                 RETURN(-ENOMEM);
256         rc = llog_add(env, ctxt->loc_handle, &osi->osi_hdr, &osi->osi_cookie,
257                       NULL, th);
258         llog_ctxt_put(ctxt);
259
260         CDEBUG(D_OTHER, "%s: new record %lu:%lu:%lu/%lu: %d\n",
261                d->opd_obd->obd_name,
262                (unsigned long) osi->osi_cookie.lgc_lgl.lgl_oid,
263                (unsigned long) osi->osi_cookie.lgc_lgl.lgl_oseq,
264                (unsigned long) osi->osi_cookie.lgc_lgl.lgl_ogen,
265                (unsigned long) osi->osi_cookie.lgc_index, rc);
266
267         if (rc > 0)
268                 rc = 0;
269
270         if (likely(rc == 0)) {
271                 spin_lock(&d->opd_syn_lock);
272                 d->opd_syn_changes++;
273                 spin_unlock(&d->opd_syn_lock);
274         }
275
276         RETURN(rc);
277 }
278
279 int osp_sync_add(const struct lu_env *env, struct osp_object *o,
280                  llog_op_type type, struct thandle *th,
281                  const struct lu_attr *attr)
282 {
283         return osp_sync_add_rec(env, lu2osp_dev(o->opo_obj.do_lu.lo_dev),
284                                 lu_object_fid(&o->opo_obj.do_lu), type, 1,
285                                 th, attr);
286 }
287
288 int osp_sync_gap(const struct lu_env *env, struct osp_device *d,
289                  struct lu_fid *fid, int lost, struct thandle *th)
290 {
291         return osp_sync_add_rec(env, d, fid, MDS_UNLINK64_REC, lost, th, NULL);
292 }
293
294 /*
295  * it's quite obvious we can't maintain all the structures in the memory:
296  * while OST is down, MDS can be processing thousands and thousands of unlinks
297  * filling persistent llogs and in-core respresentation
298  *
299  * this doesn't scale at all. so we need basically the following:
300  * a) destroy/setattr append llog records
301  * b) once llog has grown to X records, we process first Y committed records
302  *
303  *  once record R is found via llog_process(), it becomes committed after any
304  *  subsequent commit callback (at the most)
305  */
306
307 /*
308  * called for each atomic on-disk change (not once per transaction batch)
309  * and goes over the list
310  * XXX: should be optimized?
311  */
312
313 /**
314  * called for each RPC reported committed
315  */
316 static void osp_sync_request_commit_cb(struct ptlrpc_request *req)
317 {
318         struct osp_device *d = req->rq_cb_data;
319         struct obd_import *imp = req->rq_import;
320
321         CDEBUG(D_HA, "commit req %p, transno "LPU64"\n", req, req->rq_transno);
322
323         if (unlikely(req->rq_transno == 0))
324                 return;
325
326         if (unlikely(req->rq_transno > imp->imp_peer_committed_transno)) {
327                 /* this request was aborted by the shutdown procedure,
328                  * not committed by the peer.  we should preserve llog
329                  * record */
330                 spin_lock(&d->opd_syn_lock);
331                 d->opd_syn_rpc_in_progress--;
332                 spin_unlock(&d->opd_syn_lock);
333                 cfs_waitq_signal(&d->opd_syn_waitq);
334                 return;
335         }
336
337         /* XXX: what if request isn't committed for very long? */
338         LASSERT(d);
339         LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
340         LASSERT(cfs_list_empty(&req->rq_exp_list));
341
342         ptlrpc_request_addref(req);
343
344         spin_lock(&d->opd_syn_lock);
345         cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
346         spin_unlock(&d->opd_syn_lock);
347
348         /* XXX: some batching wouldn't hurt */
349         cfs_waitq_signal(&d->opd_syn_waitq);
350 }
351
352 static int osp_sync_interpret(const struct lu_env *env,
353                               struct ptlrpc_request *req, void *aa, int rc)
354 {
355         struct osp_device *d = req->rq_cb_data;
356
357         /* XXX: error handling here */
358         if (req->rq_svc_thread != (void *) OSP_JOB_MAGIC)
359                 DEBUG_REQ(D_ERROR, req, "bad magic %p\n", req->rq_svc_thread);
360         LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
361         LASSERT(d);
362
363         CDEBUG(D_HA, "reply req %p/%d, rc %d, transno %u\n", req,
364                cfs_atomic_read(&req->rq_refcount),
365                rc, (unsigned) req->rq_transno);
366         LASSERT(rc || req->rq_transno);
367
368         if (rc == -ENOENT) {
369                 /*
370                  * we tried to destroy object or update attributes,
371                  * but object doesn't exist anymore - cancell llog record
372                  */
373                 LASSERT(req->rq_transno == 0);
374                 LASSERT(cfs_list_empty(&req->rq_exp_list));
375
376                 ptlrpc_request_addref(req);
377
378                 spin_lock(&d->opd_syn_lock);
379                 cfs_list_add(&req->rq_exp_list, &d->opd_syn_committed_there);
380                 spin_unlock(&d->opd_syn_lock);
381
382                 cfs_waitq_signal(&d->opd_syn_waitq);
383         } else if (rc) {
384                 struct obd_import *imp = req->rq_import;
385                 /*
386                  * error happened, we'll try to repeat on next boot ?
387                  */
388                 LASSERTF(req->rq_transno == 0 ||
389                          req->rq_import_generation < imp->imp_generation,
390                          "transno "LPU64", rc %d, gen: req %d, imp %d\n",
391                          req->rq_transno, rc, req->rq_import_generation,
392                          imp->imp_generation);
393                 LASSERT(d->opd_syn_rpc_in_progress > 0);
394                 if (req->rq_transno == 0) {
395                         /* this is the last time we see the request
396                          * if transno is not zero, then commit cb
397                          * will be called at some point */
398                         spin_lock(&d->opd_syn_lock);
399                         d->opd_syn_rpc_in_progress--;
400                         spin_unlock(&d->opd_syn_lock);
401                 }
402
403                 cfs_waitq_signal(&d->opd_syn_waitq);
404         } else if (unlikely(d->opd_pre_status == -ENOSPC)) {
405                 /*
406                  * if current status is -ENOSPC (lack of free space on OST)
407                  * then we should poll OST immediately once object destroy
408                  * is replied
409                  */
410                 osp_statfs_need_now(d);
411         }
412
413         LASSERT(d->opd_syn_rpc_in_flight > 0);
414         spin_lock(&d->opd_syn_lock);
415         d->opd_syn_rpc_in_flight--;
416         spin_unlock(&d->opd_syn_lock);
417         CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
418                d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
419                d->opd_syn_rpc_in_progress);
420
421         osp_sync_check_for_work(d);
422
423         return 0;
424 }
425
426 /*
427  * the function walks through list of committed locally changes
428  * and send them to RPC until the pipe is full
429  */
430 static void osp_sync_send_new_rpc(struct osp_device *d,
431                                   struct ptlrpc_request *req)
432 {
433         LASSERT(d->opd_syn_rpc_in_flight <= d->opd_syn_max_rpc_in_flight);
434         LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
435
436         ptlrpcd_add_req(req, PDL_POLICY_ROUND, -1);
437 }
438
439 static struct ptlrpc_request *osp_sync_new_job(struct osp_device *d,
440                                                struct llog_handle *llh,
441                                                struct llog_rec_hdr *h,
442                                                ost_cmd_t op,
443                                                const struct req_format *format)
444 {
445         struct ptlrpc_request   *req;
446         struct ost_body         *body;
447         struct obd_import       *imp;
448         int                      rc;
449
450         /* Prepare the request */
451         imp = d->opd_obd->u.cli.cl_import;
452         LASSERT(imp);
453         req = ptlrpc_request_alloc(imp, format);
454         if (req == NULL)
455                 RETURN(ERR_PTR(-ENOMEM));
456
457         rc = ptlrpc_request_pack(req, LUSTRE_OST_VERSION, op);
458         if (rc) {
459                 ptlrpc_req_finished(req);
460                 return ERR_PTR(rc);
461         }
462
463         /*
464          * this is a trick: to save on memory allocations we put cookie
465          * into the request, but don't set corresponded flag in o_valid
466          * so that OST doesn't interpret this cookie. once the request
467          * is committed on OST we take cookie from the request and cancel
468          */
469         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
470         LASSERT(body);
471         body->oa.o_lcookie.lgc_lgl = llh->lgh_id;
472         body->oa.o_lcookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
473         body->oa.o_lcookie.lgc_index = h->lrh_index;
474         CFS_INIT_LIST_HEAD(&req->rq_exp_list);
475         req->rq_svc_thread = (void *) OSP_JOB_MAGIC;
476
477         req->rq_interpret_reply = osp_sync_interpret;
478         req->rq_commit_cb = osp_sync_request_commit_cb;
479         req->rq_cb_data = d;
480
481         ptlrpc_request_set_replen(req);
482
483         return req;
484 }
485
486 static int osp_sync_new_setattr_job(struct osp_device *d,
487                                     struct llog_handle *llh,
488                                     struct llog_rec_hdr *h)
489 {
490         struct llog_setattr64_rec       *rec = (struct llog_setattr64_rec *)h;
491         struct ptlrpc_request           *req;
492         struct ost_body                 *body;
493
494         ENTRY;
495         LASSERT(h->lrh_type == MDS_SETATTR64_REC);
496
497         req = osp_sync_new_job(d, llh, h, OST_SETATTR, &RQF_OST_SETATTR);
498         if (IS_ERR(req))
499                 RETURN(PTR_ERR(req));
500
501         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
502         LASSERT(body);
503         body->oa.o_id  = rec->lsr_oid;
504         body->oa.o_seq = rec->lsr_oseq;
505         body->oa.o_uid = rec->lsr_uid;
506         body->oa.o_gid = rec->lsr_gid;
507         body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID |
508                            OBD_MD_FLUID | OBD_MD_FLGID;
509
510         osp_sync_send_new_rpc(d, req);
511         RETURN(0);
512 }
513
514 /* Old records may be in old format, so we handle that too */
515 static int osp_sync_new_unlink_job(struct osp_device *d,
516                                    struct llog_handle *llh,
517                                    struct llog_rec_hdr *h)
518 {
519         struct llog_unlink_rec  *rec = (struct llog_unlink_rec *)h;
520         struct ptlrpc_request   *req;
521         struct ost_body         *body;
522
523         ENTRY;
524         LASSERT(h->lrh_type == MDS_UNLINK_REC);
525
526         req = osp_sync_new_job(d, llh, h, OST_DESTROY, &RQF_OST_DESTROY);
527         if (IS_ERR(req))
528                 RETURN(PTR_ERR(req));
529
530         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
531         LASSERT(body);
532         body->oa.o_id  = rec->lur_oid;
533         body->oa.o_seq = rec->lur_oseq;
534         body->oa.o_misc = rec->lur_count;
535         body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID;
536         if (rec->lur_count)
537                 body->oa.o_valid |= OBD_MD_FLOBJCOUNT;
538
539         osp_sync_send_new_rpc(d, req);
540         RETURN(0);
541 }
542
543 static int osp_sync_new_unlink64_job(struct osp_device *d,
544                                      struct llog_handle *llh,
545                                      struct llog_rec_hdr *h)
546 {
547         struct llog_unlink64_rec        *rec = (struct llog_unlink64_rec *)h;
548         struct ptlrpc_request           *req;
549         struct ost_body                 *body;
550         int                              rc;
551
552         ENTRY;
553         LASSERT(h->lrh_type == MDS_UNLINK64_REC);
554
555         req = osp_sync_new_job(d, llh, h, OST_DESTROY, &RQF_OST_DESTROY);
556         if (IS_ERR(req))
557                 RETURN(PTR_ERR(req));
558
559         body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
560         if (body == NULL)
561                 RETURN(-EFAULT);
562         rc = fid_ostid_pack(&rec->lur_fid, &body->oa.o_oi);
563         if (rc < 0)
564                 RETURN(rc);
565         body->oa.o_misc = rec->lur_count;
566         body->oa.o_valid = OBD_MD_FLGROUP | OBD_MD_FLID | OBD_MD_FLOBJCOUNT;
567
568         osp_sync_send_new_rpc(d, req);
569         RETURN(0);
570 }
571
572 static int osp_sync_process_record(const struct lu_env *env,
573                                    struct osp_device *d,
574                                    struct llog_handle *llh,
575                                    struct llog_rec_hdr *rec)
576 {
577         struct llog_cookie       cookie;
578         int                      rc = 0;
579
580         cookie.lgc_lgl = llh->lgh_id;
581         cookie.lgc_subsys = LLOG_MDS_OST_ORIG_CTXT;
582         cookie.lgc_index = rec->lrh_index;
583
584         if (unlikely(rec->lrh_type == LLOG_GEN_REC)) {
585                 struct llog_gen_rec *gen = (struct llog_gen_rec *)rec;
586
587                 /* we're waiting for the record generated by this instance */
588                 LASSERT(d->opd_syn_prev_done == 0);
589                 if (!memcmp(&d->opd_syn_generation, &gen->lgr_gen,
590                             sizeof(gen->lgr_gen))) {
591                         CDEBUG(D_HA, "processed all old entries\n");
592                         d->opd_syn_prev_done = 1;
593                 }
594
595                 /* cancel any generation record */
596                 rc = llog_cat_cancel_records(env, llh->u.phd.phd_cat_handle,
597                                              1, &cookie);
598
599                 return rc;
600         }
601
602         /*
603          * now we prepare and fill requests to OST, put them on the queue
604          * and fire after next commit callback
605          */
606
607         /* notice we increment counters before sending RPC, to be consistent
608          * in RPC interpret callback which may happen very quickly */
609         spin_lock(&d->opd_syn_lock);
610         d->opd_syn_rpc_in_flight++;
611         d->opd_syn_rpc_in_progress++;
612         spin_unlock(&d->opd_syn_lock);
613
614         switch (rec->lrh_type) {
615         /* case MDS_UNLINK_REC is kept for compatibility */
616         case MDS_UNLINK_REC:
617                 rc = osp_sync_new_unlink_job(d, llh, rec);
618                 break;
619         case MDS_UNLINK64_REC:
620                 rc = osp_sync_new_unlink64_job(d, llh, rec);
621                 break;
622         case MDS_SETATTR64_REC:
623                 rc = osp_sync_new_setattr_job(d, llh, rec);
624                 break;
625         default:
626                 CERROR("unknown record type: %x\n", rec->lrh_type);
627                        rc = -EINVAL;
628                        break;
629         }
630
631         if (likely(rc == 0)) {
632                 spin_lock(&d->opd_syn_lock);
633                 if (d->opd_syn_prev_done) {
634                         LASSERT(d->opd_syn_changes > 0);
635                         LASSERT(rec->lrh_id <= d->opd_syn_last_committed_id);
636                         /*
637                          * NOTE: it's possible to meet same id if
638                          * OST stores few stripes of same file
639                          */
640                         if (rec->lrh_id > d->opd_syn_last_processed_id)
641                                 d->opd_syn_last_processed_id = rec->lrh_id;
642
643                         d->opd_syn_changes--;
644                 }
645                 CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
646                        d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
647                        d->opd_syn_rpc_in_progress);
648                 spin_unlock(&d->opd_syn_lock);
649         } else {
650                 spin_lock(&d->opd_syn_lock);
651                 d->opd_syn_rpc_in_flight--;
652                 d->opd_syn_rpc_in_progress--;
653                 spin_unlock(&d->opd_syn_lock);
654         }
655
656         CDEBUG(D_HA, "found record %x, %d, idx %u, id %u: %d\n",
657                rec->lrh_type, rec->lrh_len, rec->lrh_index, rec->lrh_id, rc);
658         return rc;
659 }
660
661 static void osp_sync_process_committed(const struct lu_env *env,
662                                        struct osp_device *d)
663 {
664         struct obd_device       *obd = d->opd_obd;
665         struct obd_import       *imp = obd->u.cli.cl_import;
666         struct ost_body         *body;
667         struct ptlrpc_request   *req, *tmp;
668         struct llog_ctxt        *ctxt;
669         struct llog_handle      *llh;
670         cfs_list_t               list;
671         int                      rc, done = 0;
672
673         ENTRY;
674
675         if (cfs_list_empty(&d->opd_syn_committed_there))
676                 return;
677
678         /*
679          * if current status is -ENOSPC (lack of free space on OST)
680          * then we should poll OST immediately once object destroy
681          * is committed.
682          * notice: we do this upon commit as well because some backends
683          * (like DMU) do not release space right away.
684          */
685         if (unlikely(d->opd_pre_status == -ENOSPC))
686                 osp_statfs_need_now(d);
687
688         /*
689          * now cancel them all
690          * XXX: can we improve this using some batching?
691          *      with batch RPC that'll happen automatically?
692          * XXX: can we store ctxt in lod_device and save few cycles ?
693          */
694         ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
695         LASSERT(ctxt);
696
697         llh = ctxt->loc_handle;
698         LASSERT(llh);
699
700         CFS_INIT_LIST_HEAD(&list);
701         spin_lock(&d->opd_syn_lock);
702         cfs_list_splice(&d->opd_syn_committed_there, &list);
703         CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
704         spin_unlock(&d->opd_syn_lock);
705
706         cfs_list_for_each_entry_safe(req, tmp, &list, rq_exp_list) {
707                 LASSERT(req->rq_svc_thread == (void *) OSP_JOB_MAGIC);
708                 cfs_list_del_init(&req->rq_exp_list);
709
710                 body = req_capsule_client_get(&req->rq_pill, &RMF_OST_BODY);
711                 LASSERT(body);
712
713                 /* import can be closing, thus all commit cb's are
714                  * called we can check committness directly */
715                 if (req->rq_transno <= imp->imp_peer_committed_transno) {
716                         rc = llog_cat_cancel_records(env, llh, 1,
717                                                      &body->oa.o_lcookie);
718                         if (rc)
719                                 CERROR("%s: can't cancel record: %d\n",
720                                        obd->obd_name, rc);
721                 } else {
722                         DEBUG_REQ(D_HA, req, "not committed");
723                 }
724
725                 ptlrpc_req_finished(req);
726                 done++;
727         }
728
729         llog_ctxt_put(ctxt);
730
731         LASSERT(d->opd_syn_rpc_in_progress >= done);
732         spin_lock(&d->opd_syn_lock);
733         d->opd_syn_rpc_in_progress -= done;
734         spin_unlock(&d->opd_syn_lock);
735         CDEBUG(D_OTHER, "%s: %d in flight, %d in progress\n",
736                d->opd_obd->obd_name, d->opd_syn_rpc_in_flight,
737                d->opd_syn_rpc_in_progress);
738
739         osp_sync_check_for_work(d);
740
741         /* wake up the thread if requested to stop:
742          * it might be waiting for in-progress to complete */
743         if (unlikely(osp_sync_running(d) == 0))
744                 cfs_waitq_signal(&d->opd_syn_waitq);
745
746         EXIT;
747 }
748
749 /*
750  * this is where most of queues processing happens
751  */
752 static int osp_sync_process_queues(const struct lu_env *env,
753                                    struct llog_handle *llh,
754                                    struct llog_rec_hdr *rec,
755                                    void *data)
756 {
757         struct osp_device       *d = data;
758         int                      rc;
759
760         do {
761                 struct l_wait_info lwi = { 0 };
762
763                 if (!osp_sync_running(d)) {
764                         CDEBUG(D_HA, "stop llog processing\n");
765                         return LLOG_PROC_BREAK;
766                 }
767
768                 /* process requests committed by OST */
769                 osp_sync_process_committed(env, d);
770
771                 /* if we there are changes to be processed and we have
772                  * resources for this ... do now */
773                 if (osp_sync_can_process_new(d, rec)) {
774                         if (llh == NULL) {
775                                 /* ask llog for another record */
776                                 CDEBUG(D_HA, "%lu changes, %u in progress, %u in flight\n",
777                                        d->opd_syn_changes,
778                                        d->opd_syn_rpc_in_progress,
779                                        d->opd_syn_rpc_in_flight);
780                                 return 0;
781                         }
782
783                         /*
784                          * try to send, in case of disconnection, suspend
785                          * processing till we can send this request
786                          */
787                         do {
788                                 rc = osp_sync_process_record(env, d, llh, rec);
789                                 /*
790                                  * XXX: probably different handling is needed
791                                  * for some bugs, like immediate exit or if
792                                  * OSP gets inactive
793                                  */
794                                 if (rc) {
795                                         CERROR("can't send: %d\n", rc);
796                                         l_wait_event(d->opd_syn_waitq,
797                                                      !osp_sync_running(d) ||
798                                                      osp_sync_has_work(d),
799                                                      &lwi);
800                                 }
801                         } while (rc != 0 && osp_sync_running(d));
802
803                         llh = NULL;
804                         rec = NULL;
805                 }
806
807                 if (d->opd_syn_last_processed_id == d->opd_syn_last_used_id)
808                         osp_sync_remove_from_tracker(d);
809
810                 l_wait_event(d->opd_syn_waitq,
811                              !osp_sync_running(d) ||
812                              osp_sync_can_process_new(d, rec) ||
813                              !cfs_list_empty(&d->opd_syn_committed_there),
814                              &lwi);
815         } while (1);
816 }
817
818 /*
819  * this thread runs llog_cat_process() scanner calling our callback
820  * to process llog records. in the callback we implement tricky
821  * state machine as we don't want to start scanning of the llog again
822  * and again, also we don't want to process too many records and send
823  * too many RPCs a time. so, depending on current load (num of changes
824  * being synced to OST) the callback can suspend awaiting for some
825  * new conditions, like syncs completed.
826  *
827  * in order to process llog records left by previous boots and to allow
828  * llog_process_thread() to find something (otherwise it'd just exit
829  * immediately) we add a special GENERATATION record on each boot.
830  */
831 static int osp_sync_thread(void *_arg)
832 {
833         struct osp_device       *d = _arg;
834         struct ptlrpc_thread    *thread = &d->opd_syn_thread;
835         struct l_wait_info       lwi = { 0 };
836         struct llog_ctxt        *ctxt;
837         struct obd_device       *obd = d->opd_obd;
838         struct llog_handle      *llh;
839         struct lu_env            env;
840         int                      rc, count;
841         char                     pname[16];
842
843         ENTRY;
844
845         rc = lu_env_init(&env, LCT_LOCAL);
846         if (rc) {
847                 CERROR("%s: can't initialize env: rc = %d\n",
848                        obd->obd_name, rc);
849                 RETURN(rc);
850         }
851
852         sprintf(pname, "osp-syn-%u", d->opd_index);
853         cfs_daemonize(pname);
854
855         spin_lock(&d->opd_syn_lock);
856         thread->t_flags = SVC_RUNNING;
857         spin_unlock(&d->opd_syn_lock);
858         cfs_waitq_signal(&thread->t_ctl_waitq);
859
860         ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
861         if (ctxt == NULL) {
862                 CERROR("can't get appropriate context\n");
863                 GOTO(out, rc = -EINVAL);
864         }
865
866         llh = ctxt->loc_handle;
867         if (llh == NULL) {
868                 CERROR("can't get llh\n");
869                 llog_ctxt_put(ctxt);
870                 GOTO(out, rc = -EINVAL);
871         }
872
873         rc = llog_cat_process(&env, llh, osp_sync_process_queues, d, 0, 0);
874         LASSERTF(rc == 0 || rc == LLOG_PROC_BREAK,
875                  "%lu changes, %u in progress, %u in flight: %d\n",
876                  d->opd_syn_changes, d->opd_syn_rpc_in_progress,
877                  d->opd_syn_rpc_in_flight, rc);
878
879         /* we don't expect llog_process_thread() to exit till umount */
880         LASSERTF(thread->t_flags != SVC_RUNNING,
881                  "%lu changes, %u in progress, %u in flight\n",
882                  d->opd_syn_changes, d->opd_syn_rpc_in_progress,
883                  d->opd_syn_rpc_in_flight);
884
885         /* wait till all the requests are completed */
886         count = 0;
887         while (d->opd_syn_rpc_in_progress > 0) {
888                 osp_sync_process_committed(&env, d);
889
890                 lwi = LWI_TIMEOUT(cfs_time_seconds(5), NULL, NULL);
891                 rc = l_wait_event(d->opd_syn_waitq,
892                                   d->opd_syn_rpc_in_progress == 0,
893                                   &lwi);
894                 if (rc == -ETIMEDOUT)
895                         count++;
896                 LASSERTF(count < 10, "%s: %d %d %sempty\n",
897                          d->opd_obd->obd_name, d->opd_syn_rpc_in_progress,
898                          d->opd_syn_rpc_in_flight,
899                          cfs_list_empty(&d->opd_syn_committed_there) ? "" :"!");
900
901         }
902
903         llog_cat_close(&env, llh);
904         rc = llog_cleanup(&env, ctxt);
905         if (rc)
906                 CERROR("can't cleanup llog: %d\n", rc);
907 out:
908         thread->t_flags = SVC_STOPPED;
909
910         cfs_waitq_signal(&thread->t_ctl_waitq);
911         LASSERTF(d->opd_syn_rpc_in_progress == 0,
912                  "%s: %d %d %sempty\n",
913                  d->opd_obd->obd_name, d->opd_syn_rpc_in_progress,
914                  d->opd_syn_rpc_in_flight,
915                  cfs_list_empty(&d->opd_syn_committed_there) ? "" : "!");
916
917         lu_env_fini(&env);
918
919         RETURN(0);
920 }
921
922 static int osp_sync_llog_init(const struct lu_env *env, struct osp_device *d)
923 {
924         struct osp_thread_info *osi = osp_env_info(env);
925         struct llog_handle     *lgh;
926         struct obd_device      *obd = d->opd_obd;
927         struct llog_ctxt       *ctxt;
928         int                     rc;
929
930         ENTRY;
931
932         LASSERT(obd);
933
934         /*
935          * open llog corresponding to our OST
936          */
937         OBD_SET_CTXT_MAGIC(&obd->obd_lvfs_ctxt);
938         obd->obd_lvfs_ctxt.dt = d->opd_storage;
939
940         rc = llog_osd_get_cat_list(env, d->opd_storage, d->opd_index, 1,
941                                    &osi->osi_cid);
942         if (rc) {
943                 CERROR("%s: can't get id from catalogs: rc = %d\n",
944                        obd->obd_name, rc);
945                 RETURN(rc);
946         }
947
948         CDEBUG(D_INFO, "%s: Init llog for %d - catid "LPX64"/"LPX64":%x\n",
949                obd->obd_name, d->opd_index, osi->osi_cid.lci_logid.lgl_oid,
950                osi->osi_cid.lci_logid.lgl_oseq,
951                osi->osi_cid.lci_logid.lgl_ogen);
952
953         rc = llog_setup(env, obd, &obd->obd_olg, LLOG_MDS_OST_ORIG_CTXT, obd,
954                         &osp_mds_ost_orig_logops);
955         if (rc)
956                 RETURN(rc);
957
958         ctxt = llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT);
959         LASSERT(ctxt);
960
961         if (likely(osi->osi_cid.lci_logid.lgl_oid != 0)) {
962                 rc = llog_open(env, ctxt, &lgh, &osi->osi_cid.lci_logid, NULL,
963                                LLOG_OPEN_EXISTS);
964                 /* re-create llog if it is missing */
965                 if (rc == -ENOENT)
966                         osi->osi_cid.lci_logid.lgl_oid = 0;
967                 else if (rc < 0)
968                         GOTO(out_cleanup, rc);
969         }
970
971         if (unlikely(osi->osi_cid.lci_logid.lgl_oid == 0)) {
972                 rc = llog_open_create(env, ctxt, &lgh, NULL, NULL);
973                 if (rc < 0)
974                         GOTO(out_cleanup, rc);
975                 osi->osi_cid.lci_logid = lgh->lgh_id;
976         }
977
978         ctxt->loc_handle = lgh;
979
980         rc = llog_cat_init_and_process(env, lgh);
981         if (rc)
982                 GOTO(out_close, rc);
983
984         rc = llog_osd_put_cat_list(env, d->opd_storage, d->opd_index, 1,
985                                    &osi->osi_cid);
986         if (rc)
987                 GOTO(out_close, rc);
988
989         /*
990          * put a mark in the llog till which we'll be processing
991          * old records restless
992          */
993         d->opd_syn_generation.mnt_cnt = cfs_time_current();
994         d->opd_syn_generation.conn_cnt = cfs_time_current();
995
996         osi->osi_hdr.lrh_type = LLOG_GEN_REC;
997         osi->osi_hdr.lrh_len = sizeof(osi->osi_gen);
998
999         memcpy(&osi->osi_gen.lgr_gen, &d->opd_syn_generation,
1000                sizeof(osi->osi_gen.lgr_gen));
1001
1002         rc = llog_cat_add(env, lgh, &osi->osi_gen.lgr_hdr, &osi->osi_cookie,
1003                           NULL);
1004         if (rc < 0)
1005                 GOTO(out_close, rc);
1006         llog_ctxt_put(ctxt);
1007         RETURN(0);
1008 out_close:
1009         llog_cat_close(env, lgh);
1010 out_cleanup:
1011         llog_cleanup(env, ctxt);
1012         RETURN(rc);
1013 }
1014
1015 static void osp_sync_llog_fini(const struct lu_env *env, struct osp_device *d)
1016 {
1017         struct llog_ctxt *ctxt;
1018
1019         ctxt = llog_get_context(d->opd_obd, LLOG_MDS_OST_ORIG_CTXT);
1020         llog_cat_close(env, ctxt->loc_handle);
1021         llog_cleanup(env, ctxt);
1022 }
1023
1024 /*
1025  * initializes sync component of OSP
1026  */
1027 int osp_sync_init(const struct lu_env *env, struct osp_device *d)
1028 {
1029         struct l_wait_info       lwi = { 0 };
1030         int                      rc;
1031
1032         ENTRY;
1033
1034         rc = osp_sync_id_traction_init(d);
1035         if (rc)
1036                 RETURN(rc);
1037
1038         /*
1039          * initialize llog storing changes
1040          */
1041         rc = osp_sync_llog_init(env, d);
1042         if (rc) {
1043                 CERROR("%s: can't initialize llog: rc = %d\n",
1044                        d->opd_obd->obd_name, rc);
1045                 GOTO(err_id, rc);
1046         }
1047
1048         /*
1049          * Start synchronization thread
1050          */
1051         d->opd_syn_max_rpc_in_flight = OSP_MAX_IN_FLIGHT;
1052         d->opd_syn_max_rpc_in_progress = OSP_MAX_IN_PROGRESS;
1053         spin_lock_init(&d->opd_syn_lock);
1054         cfs_waitq_init(&d->opd_syn_waitq);
1055         cfs_waitq_init(&d->opd_syn_thread.t_ctl_waitq);
1056         CFS_INIT_LIST_HEAD(&d->opd_syn_committed_there);
1057
1058         rc = cfs_create_thread(osp_sync_thread, d, 0);
1059         if (rc < 0) {
1060                 CERROR("%s: can't start sync thread: rc = %d\n",
1061                        d->opd_obd->obd_name, rc);
1062                 GOTO(err_llog, rc);
1063         }
1064
1065         l_wait_event(d->opd_syn_thread.t_ctl_waitq,
1066                      osp_sync_running(d) || osp_sync_stopped(d), &lwi);
1067
1068         RETURN(0);
1069 err_llog:
1070         osp_sync_llog_fini(env, d);
1071 err_id:
1072         osp_sync_id_traction_fini(d);
1073         return rc;
1074 }
1075
1076 int osp_sync_fini(struct osp_device *d)
1077 {
1078         struct ptlrpc_thread *thread = &d->opd_syn_thread;
1079
1080         ENTRY;
1081
1082         thread->t_flags = SVC_STOPPING;
1083         cfs_waitq_signal(&d->opd_syn_waitq);
1084         cfs_wait_event(thread->t_ctl_waitq, thread->t_flags & SVC_STOPPED);
1085
1086         /*
1087          * unregister transaction callbacks only when sync thread
1088          * has finished operations with llog
1089          */
1090         osp_sync_id_traction_fini(d);
1091
1092         RETURN(0);
1093 }
1094
1095 static DEFINE_MUTEX(osp_id_tracker_sem);
1096 static CFS_LIST_HEAD(osp_id_tracker_list);
1097
1098 static void osp_sync_tracker_commit_cb(struct thandle *th, void *cookie)
1099 {
1100         struct osp_id_tracker   *tr = cookie;
1101         struct osp_device       *d;
1102         struct osp_txn_info     *txn;
1103
1104         LASSERT(tr);
1105
1106         txn = osp_txn_info(&th->th_ctx);
1107         if (txn == NULL || txn->oti_current_id < tr->otr_committed_id)
1108                 return;
1109
1110         spin_lock(&tr->otr_lock);
1111         if (likely(txn->oti_current_id > tr->otr_committed_id)) {
1112                 CDEBUG(D_OTHER, "committed: %u -> %u\n",
1113                        tr->otr_committed_id, txn->oti_current_id);
1114                 tr->otr_committed_id = txn->oti_current_id;
1115
1116                 cfs_list_for_each_entry(d, &tr->otr_wakeup_list,
1117                                         opd_syn_ontrack) {
1118                         d->opd_syn_last_committed_id = tr->otr_committed_id;
1119                         cfs_waitq_signal(&d->opd_syn_waitq);
1120                 }
1121         }
1122         spin_unlock(&tr->otr_lock);
1123 }
1124
1125 static int osp_sync_id_traction_init(struct osp_device *d)
1126 {
1127         struct osp_id_tracker   *tr, *found = NULL;
1128         int                      rc = 0;
1129
1130         LASSERT(d);
1131         LASSERT(d->opd_storage);
1132         LASSERT(d->opd_syn_tracker == NULL);
1133         CFS_INIT_LIST_HEAD(&d->opd_syn_ontrack);
1134
1135         mutex_lock(&osp_id_tracker_sem);
1136         cfs_list_for_each_entry(tr, &osp_id_tracker_list, otr_list) {
1137                 if (tr->otr_dev == d->opd_storage) {
1138                         LASSERT(cfs_atomic_read(&tr->otr_refcount));
1139                         cfs_atomic_inc(&tr->otr_refcount);
1140                         d->opd_syn_tracker = tr;
1141                         found = tr;
1142                         break;
1143                 }
1144         }
1145
1146         if (found == NULL) {
1147                 rc = -ENOMEM;
1148                 OBD_ALLOC_PTR(tr);
1149                 if (tr) {
1150                         d->opd_syn_tracker = tr;
1151                         spin_lock_init(&tr->otr_lock);
1152                         tr->otr_dev = d->opd_storage;
1153                         tr->otr_next_id = 1;
1154                         tr->otr_committed_id = 0;
1155                         cfs_atomic_set(&tr->otr_refcount, 1);
1156                         CFS_INIT_LIST_HEAD(&tr->otr_wakeup_list);
1157                         cfs_list_add(&tr->otr_list, &osp_id_tracker_list);
1158                         tr->otr_tx_cb.dtc_txn_commit =
1159                                                 osp_sync_tracker_commit_cb;
1160                         tr->otr_tx_cb.dtc_cookie = tr;
1161                         tr->otr_tx_cb.dtc_tag = LCT_MD_THREAD;
1162                         dt_txn_callback_add(d->opd_storage, &tr->otr_tx_cb);
1163                         rc = 0;
1164                 }
1165         }
1166         mutex_unlock(&osp_id_tracker_sem);
1167
1168         return rc;
1169 }
1170
1171 static void osp_sync_id_traction_fini(struct osp_device *d)
1172 {
1173         struct osp_id_tracker *tr;
1174
1175         ENTRY;
1176
1177         LASSERT(d);
1178         tr = d->opd_syn_tracker;
1179         if (tr == NULL) {
1180                 EXIT;
1181                 return;
1182         }
1183
1184         osp_sync_remove_from_tracker(d);
1185
1186         mutex_lock(&osp_id_tracker_sem);
1187         if (cfs_atomic_dec_and_test(&tr->otr_refcount)) {
1188                 dt_txn_callback_del(d->opd_storage, &tr->otr_tx_cb);
1189                 LASSERT(cfs_list_empty(&tr->otr_wakeup_list));
1190                 cfs_list_del(&tr->otr_list);
1191                 OBD_FREE_PTR(tr);
1192                 d->opd_syn_tracker = NULL;
1193         }
1194         mutex_unlock(&osp_id_tracker_sem);
1195
1196         EXIT;
1197 }
1198
1199 /*
1200  * generates id for the tracker
1201  */
1202 static __u32 osp_sync_id_get(struct osp_device *d, __u32 id)
1203 {
1204         struct osp_id_tracker *tr;
1205
1206         tr = d->opd_syn_tracker;
1207         LASSERT(tr);
1208
1209         /* XXX: we can improve this introducing per-cpu preallocated ids? */
1210         spin_lock(&tr->otr_lock);
1211         if (unlikely(tr->otr_next_id <= d->opd_syn_last_used_id)) {
1212                 spin_unlock(&tr->otr_lock);
1213                 CERROR("%s: next %u, last synced %lu\n",
1214                        d->opd_obd->obd_name, tr->otr_next_id,
1215                        d->opd_syn_last_used_id);
1216                 LBUG();
1217         }
1218
1219         if (id == 0)
1220                 id = tr->otr_next_id++;
1221         if (id > d->opd_syn_last_used_id)
1222                 d->opd_syn_last_used_id = id;
1223         if (cfs_list_empty(&d->opd_syn_ontrack))
1224                 cfs_list_add(&d->opd_syn_ontrack, &tr->otr_wakeup_list);
1225         spin_unlock(&tr->otr_lock);
1226         CDEBUG(D_OTHER, "new id %u\n", (unsigned) id);
1227
1228         return id;
1229 }
1230
1231 static void osp_sync_remove_from_tracker(struct osp_device *d)
1232 {
1233         struct osp_id_tracker *tr;
1234
1235         tr = d->opd_syn_tracker;
1236         LASSERT(tr);
1237
1238         if (cfs_list_empty(&d->opd_syn_ontrack))
1239                 return;
1240
1241         spin_lock(&tr->otr_lock);
1242         cfs_list_del_init(&d->opd_syn_ontrack);
1243         spin_unlock(&tr->otr_lock);
1244 }
1245