4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
32 * Lustre Unified Target
33 * These are common function to work with last_received file
35 * Author: Mikhail Pershin <mike.pershin@intel.com>
38 #include <obd_class.h>
39 #include <lustre_fid.h>
41 #include "tgt_internal.h"
43 /** version recovery epoch */
44 #define LR_EPOCH_BITS 32
46 /* Allocate a bitmap for a chunk of reply data slots */
47 static int tgt_bitmap_chunk_alloc(struct lu_target *lut, int chunk)
51 OBD_ALLOC_LARGE(bm, BITS_TO_LONGS(LUT_REPLY_SLOTS_PER_CHUNK) *
56 spin_lock(&lut->lut_client_bitmap_lock);
58 if (lut->lut_reply_bitmap[chunk] != NULL) {
59 /* someone else already allocated the bitmap for this chunk */
60 spin_unlock(&lut->lut_client_bitmap_lock);
61 OBD_FREE_LARGE(bm, BITS_TO_LONGS(LUT_REPLY_SLOTS_PER_CHUNK) *
66 lut->lut_reply_bitmap[chunk] = bm;
68 spin_unlock(&lut->lut_client_bitmap_lock);
73 /* Look for an available reply data slot in the bitmap
75 * Allocate bitmap chunk when first used
76 * XXX algo could be improved if this routine limits performance
78 static int tgt_find_free_reply_slot(struct lu_target *lut)
85 for (chunk = 0; chunk < LUT_REPLY_SLOTS_MAX_CHUNKS; chunk++) {
86 /* allocate the bitmap chunk if necessary */
87 if (unlikely(lut->lut_reply_bitmap[chunk] == NULL)) {
88 rc = tgt_bitmap_chunk_alloc(lut, chunk);
92 bmp = lut->lut_reply_bitmap[chunk];
94 /* look for an available slot in this chunk */
96 b = find_first_zero_bit(bmp, LUT_REPLY_SLOTS_PER_CHUNK);
97 if (b >= LUT_REPLY_SLOTS_PER_CHUNK)
101 if (test_and_set_bit(b, bmp) == 0)
102 return chunk * LUT_REPLY_SLOTS_PER_CHUNK + b;
109 /* Mark the reply data slot @idx 'used' in the corresponding bitmap chunk
111 * Allocate the bitmap chunk if necessary
113 static int tgt_set_reply_slot(struct lu_target *lut, int idx)
119 chunk = idx / LUT_REPLY_SLOTS_PER_CHUNK;
120 b = idx % LUT_REPLY_SLOTS_PER_CHUNK;
122 LASSERT(chunk < LUT_REPLY_SLOTS_MAX_CHUNKS);
123 LASSERT(b < LUT_REPLY_SLOTS_PER_CHUNK);
125 /* allocate the bitmap chunk if necessary */
126 if (unlikely(lut->lut_reply_bitmap[chunk] == NULL)) {
127 rc = tgt_bitmap_chunk_alloc(lut, chunk);
132 /* mark the slot 'used' in this chunk */
133 if (test_and_set_bit(b, lut->lut_reply_bitmap[chunk]) != 0) {
134 CERROR("%s: slot %d already set in bitmap\n",
143 /* Mark the reply data slot @idx 'unused' in the corresponding bitmap chunk
146 static int tgt_clear_reply_slot(struct lu_target *lut, int idx)
151 if (lut->lut_obd->obd_stopping)
153 * in case of failover keep the bit set in order to
154 * avoid overwriting slots in reply_data which might
155 * be required by resent rpcs
158 chunk = idx / LUT_REPLY_SLOTS_PER_CHUNK;
159 b = idx % LUT_REPLY_SLOTS_PER_CHUNK;
161 LASSERT(chunk < LUT_REPLY_SLOTS_MAX_CHUNKS);
162 LASSERT(b < LUT_REPLY_SLOTS_PER_CHUNK);
164 if (lut->lut_reply_bitmap[chunk] == NULL) {
165 CERROR("%s: slot %d not allocated\n",
170 if (test_and_clear_bit(b, lut->lut_reply_bitmap[chunk]) == 0) {
171 CERROR("%s: slot %d already clear in bitmap\n",
180 /* Read header of reply_data file of target @tgt into structure @lrh */
181 static int tgt_reply_header_read(const struct lu_env *env,
182 struct lu_target *tgt,
183 struct lsd_reply_header *lrh)
186 struct lsd_reply_header buf;
187 struct tgt_thread_info *tti = tgt_th_info(env);
190 tti->tti_buf.lb_buf = &buf;
191 tti->tti_buf.lb_len = sizeof(buf);
193 rc = dt_record_read(env, tgt->lut_reply_data, &tti->tti_buf,
198 lrh->lrh_magic = le32_to_cpu(buf.lrh_magic);
199 lrh->lrh_header_size = le32_to_cpu(buf.lrh_header_size);
200 lrh->lrh_reply_size = le32_to_cpu(buf.lrh_reply_size);
202 CDEBUG(D_HA, "%s: read %s header. magic=0x%08x "
203 "header_size=%d reply_size=%d\n",
204 tgt->lut_obd->obd_name, REPLY_DATA,
205 lrh->lrh_magic, lrh->lrh_header_size, lrh->lrh_reply_size);
210 /* Write header into replay_data file of target @tgt from structure @lrh */
211 static int tgt_reply_header_write(const struct lu_env *env,
212 struct lu_target *tgt,
213 struct lsd_reply_header *lrh)
216 struct lsd_reply_header buf;
217 struct tgt_thread_info *tti = tgt_th_info(env);
219 struct dt_object *dto;
221 CDEBUG(D_HA, "%s: write %s header. magic=0x%08x "
222 "header_size=%d reply_size=%d\n",
223 tgt->lut_obd->obd_name, REPLY_DATA,
224 lrh->lrh_magic, lrh->lrh_header_size, lrh->lrh_reply_size);
226 if (tgt->lut_bottom->dd_rdonly)
229 buf.lrh_magic = cpu_to_le32(lrh->lrh_magic);
230 buf.lrh_header_size = cpu_to_le32(lrh->lrh_header_size);
231 buf.lrh_reply_size = cpu_to_le32(lrh->lrh_reply_size);
233 th = dt_trans_create(env, tgt->lut_bottom);
239 tti->tti_buf.lb_buf = &buf;
240 tti->tti_buf.lb_len = sizeof(buf);
242 rc = dt_declare_record_write(env, tgt->lut_reply_data,
243 &tti->tti_buf, tti->tti_off, th);
247 rc = dt_trans_start(env, tgt->lut_bottom, th);
251 dto = dt_object_locate(tgt->lut_reply_data, th->th_dev);
252 rc = dt_record_write(env, dto, &tti->tti_buf, &tti->tti_off, th);
254 dt_trans_stop(env, tgt->lut_bottom, th);
258 /* Write the reply data @lrd into reply_data file of target @tgt
261 static int tgt_reply_data_write(const struct lu_env *env, struct lu_target *tgt,
262 struct lsd_reply_data *lrd, loff_t off,
265 struct tgt_thread_info *tti = tgt_th_info(env);
266 struct dt_object *dto;
267 struct lsd_reply_data *buf = &tti->tti_lrd;
269 lrd->lrd_result = ptlrpc_status_hton(lrd->lrd_result);
271 buf->lrd_transno = cpu_to_le64(lrd->lrd_transno);
272 buf->lrd_xid = cpu_to_le64(lrd->lrd_xid);
273 buf->lrd_data = cpu_to_le64(lrd->lrd_data);
274 buf->lrd_result = cpu_to_le32(lrd->lrd_result);
275 buf->lrd_client_gen = cpu_to_le32(lrd->lrd_client_gen);
277 lrd->lrd_result = ptlrpc_status_ntoh(lrd->lrd_result);
280 tti->tti_buf.lb_buf = buf;
281 tti->tti_buf.lb_len = sizeof(*buf);
283 dto = dt_object_locate(tgt->lut_reply_data, th->th_dev);
284 return dt_record_write(env, dto, &tti->tti_buf, &tti->tti_off, th);
287 /* Read the reply data from reply_data file of target @tgt at offset @off
288 * into structure @lrd
290 static int tgt_reply_data_read(const struct lu_env *env, struct lu_target *tgt,
291 struct lsd_reply_data *lrd, loff_t off)
294 struct tgt_thread_info *tti = tgt_th_info(env);
295 struct lsd_reply_data *buf = &tti->tti_lrd;
298 tti->tti_buf.lb_buf = buf;
299 tti->tti_buf.lb_len = sizeof(*buf);
301 rc = dt_record_read(env, tgt->lut_reply_data, &tti->tti_buf,
306 lrd->lrd_transno = le64_to_cpu(buf->lrd_transno);
307 lrd->lrd_xid = le64_to_cpu(buf->lrd_xid);
308 lrd->lrd_data = le64_to_cpu(buf->lrd_data);
309 lrd->lrd_result = le32_to_cpu(buf->lrd_result);
310 lrd->lrd_client_gen = le32_to_cpu(buf->lrd_client_gen);
316 /* Free the in-memory reply data structure @trd and release
317 * the corresponding slot in the reply_data file of target @lut
318 * Called with ted_lcd_lock held
320 static void tgt_free_reply_data(struct lu_target *lut,
321 struct tg_export_data *ted,
322 struct tg_reply_data *trd)
324 CDEBUG(D_TRACE, "%s: free reply data %p: xid %llu, transno %llu, "
325 "client gen %u, slot idx %d\n",
326 lut == NULL ? "" : tgt_name(lut), trd, trd->trd_reply.lrd_xid,
327 trd->trd_reply.lrd_transno, trd->trd_reply.lrd_client_gen,
330 LASSERT(mutex_is_locked(&ted->ted_lcd_lock));
332 list_del(&trd->trd_list);
333 ted->ted_reply_cnt--;
334 if (lut != NULL && trd->trd_index != TRD_INDEX_MEMORY)
335 tgt_clear_reply_slot(lut, trd->trd_index);
339 /* Release the reply data @trd from target @lut
340 * The reply data with the highest transno for this export
341 * is retained to ensure correctness of target recovery
342 * Called with ted_lcd_lock held
344 static void tgt_release_reply_data(struct lu_target *lut,
345 struct tg_export_data *ted,
346 struct tg_reply_data *trd)
348 CDEBUG(D_TRACE, "%s: release reply data %p: xid %llu, transno %llu, "
349 "client gen %u, slot idx %d\n",
350 lut == NULL ? "" : tgt_name(lut), trd, trd->trd_reply.lrd_xid,
351 trd->trd_reply.lrd_transno, trd->trd_reply.lrd_client_gen,
354 LASSERT(mutex_is_locked(&ted->ted_lcd_lock));
356 /* Do not free the reply data corresponding to the
357 * highest transno of this export.
358 * This ensures on-disk reply data is kept and
359 * last committed transno can be restored from disk in case
362 if (trd->trd_reply.lrd_transno == ted->ted_lcd->lcd_last_transno) {
363 /* free previous retained reply */
364 if (ted->ted_reply_last != NULL)
365 tgt_free_reply_data(lut, ted, ted->ted_reply_last);
366 /* retain the reply */
367 list_del_init(&trd->trd_list);
368 ted->ted_reply_last = trd;
370 tgt_free_reply_data(lut, ted, trd);
374 static inline struct lu_buf *tti_buf_lsd(struct tgt_thread_info *tti)
376 tti->tti_buf.lb_buf = &tti->tti_lsd;
377 tti->tti_buf.lb_len = sizeof(tti->tti_lsd);
378 return &tti->tti_buf;
381 static inline struct lu_buf *tti_buf_lcd(struct tgt_thread_info *tti)
383 tti->tti_buf.lb_buf = &tti->tti_lcd;
384 tti->tti_buf.lb_len = sizeof(tti->tti_lcd);
385 return &tti->tti_buf;
389 * Allocate in-memory data for client slot related to export.
391 int tgt_client_alloc(struct obd_export *exp)
394 LASSERT(exp != exp->exp_obd->obd_self_export);
396 spin_lock_init(&exp->exp_target_data.ted_nodemap_lock);
397 INIT_LIST_HEAD(&exp->exp_target_data.ted_nodemap_member);
398 spin_lock_init(&exp->exp_target_data.ted_fmd_lock);
399 INIT_LIST_HEAD(&exp->exp_target_data.ted_fmd_list);
401 OBD_ALLOC_PTR(exp->exp_target_data.ted_lcd);
402 if (exp->exp_target_data.ted_lcd == NULL)
404 /* Mark that slot is not yet valid, 0 doesn't work here */
405 exp->exp_target_data.ted_lr_idx = -1;
406 INIT_LIST_HEAD(&exp->exp_target_data.ted_reply_list);
407 mutex_init(&exp->exp_target_data.ted_lcd_lock);
410 EXPORT_SYMBOL(tgt_client_alloc);
413 * Free in-memory data for client slot related to export.
415 void tgt_client_free(struct obd_export *exp)
417 struct tg_export_data *ted = &exp->exp_target_data;
418 struct lu_target *lut = class_exp2tgt(exp);
419 struct tg_reply_data *trd, *tmp;
421 LASSERT(exp != exp->exp_obd->obd_self_export);
423 tgt_fmd_cleanup(exp);
425 /* free reply data */
426 mutex_lock(&ted->ted_lcd_lock);
427 list_for_each_entry_safe(trd, tmp, &ted->ted_reply_list, trd_list) {
428 tgt_release_reply_data(lut, ted, trd);
430 if (ted->ted_reply_last != NULL) {
431 tgt_free_reply_data(lut, ted, ted->ted_reply_last);
432 ted->ted_reply_last = NULL;
434 mutex_unlock(&ted->ted_lcd_lock);
436 if (!hlist_unhashed(&exp->exp_gen_hash))
437 cfs_hash_del(exp->exp_obd->obd_gen_hash,
438 &ted->ted_lcd->lcd_generation,
441 OBD_FREE_PTR(ted->ted_lcd);
444 /* Target may have been freed (see LU-7430)
445 * Slot may be not yet assigned */
446 if (exp->exp_obd->u.obt.obt_magic != OBT_MAGIC ||
450 /* Clear bit when lcd is freed */
451 LASSERT(lut && lut->lut_client_bitmap);
452 if (!test_and_clear_bit(ted->ted_lr_idx, lut->lut_client_bitmap)) {
453 CERROR("%s: client %u bit already clear in bitmap\n",
454 exp->exp_obd->obd_name, ted->ted_lr_idx);
458 if (tgt_is_multimodrpcs_client(exp) && !exp->exp_obd->obd_stopping)
459 atomic_dec(&lut->lut_num_clients);
461 EXPORT_SYMBOL(tgt_client_free);
463 static inline void tgt_check_lcd(const char *obd_name, int index,
464 struct lsd_client_data *lcd)
466 size_t uuid_size = sizeof(lcd->lcd_uuid);
468 if (strnlen((char*)lcd->lcd_uuid, uuid_size) == uuid_size) {
469 lcd->lcd_uuid[uuid_size - 1] = '\0';
471 LCONSOLE_ERROR("the client UUID (%s) on %s for exports stored in last_rcvd(index = %d) is bad!\n",
472 lcd->lcd_uuid, obd_name, index);
476 static int tgt_client_data_read(const struct lu_env *env, struct lu_target *tgt,
477 struct lsd_client_data *lcd,
478 loff_t *off, int index)
480 struct tgt_thread_info *tti = tgt_th_info(env);
484 rc = dt_record_read(env, tgt->lut_last_rcvd, &tti->tti_buf, off);
486 tgt_check_lcd(tgt->lut_obd->obd_name, index, &tti->tti_lcd);
487 lcd_le_to_cpu(&tti->tti_lcd, lcd);
488 lcd->lcd_last_result = ptlrpc_status_ntoh(lcd->lcd_last_result);
489 lcd->lcd_last_close_result =
490 ptlrpc_status_ntoh(lcd->lcd_last_close_result);
493 CDEBUG(D_INFO, "%s: read lcd @%lld uuid = %s, last_transno = %llu"
494 ", last_xid = %llu, last_result = %u, last_data = %u, "
495 "last_close_transno = %llu, last_close_xid = %llu, "
496 "last_close_result = %u, rc = %d\n", tgt->lut_obd->obd_name,
497 *off, lcd->lcd_uuid, lcd->lcd_last_transno, lcd->lcd_last_xid,
498 lcd->lcd_last_result, lcd->lcd_last_data,
499 lcd->lcd_last_close_transno, lcd->lcd_last_close_xid,
500 lcd->lcd_last_close_result, rc);
504 static int tgt_client_data_write(const struct lu_env *env,
505 struct lu_target *tgt,
506 struct lsd_client_data *lcd,
507 loff_t *off, struct thandle *th)
509 struct tgt_thread_info *tti = tgt_th_info(env);
510 struct dt_object *dto;
512 lcd->lcd_last_result = ptlrpc_status_hton(lcd->lcd_last_result);
513 lcd->lcd_last_close_result =
514 ptlrpc_status_hton(lcd->lcd_last_close_result);
515 lcd_cpu_to_le(lcd, &tti->tti_lcd);
518 dto = dt_object_locate(tgt->lut_last_rcvd, th->th_dev);
519 return dt_record_write(env, dto, &tti->tti_buf, off, th);
522 struct tgt_new_client_callback {
523 struct dt_txn_commit_cb lncc_cb;
524 struct obd_export *lncc_exp;
527 static void tgt_cb_new_client(struct lu_env *env, struct thandle *th,
528 struct dt_txn_commit_cb *cb, int err)
530 struct tgt_new_client_callback *ccb;
532 ccb = container_of0(cb, struct tgt_new_client_callback, lncc_cb);
534 LASSERT(ccb->lncc_exp->exp_obd);
536 CDEBUG(D_RPCTRACE, "%s: committing for initial connect of %s\n",
537 ccb->lncc_exp->exp_obd->obd_name,
538 ccb->lncc_exp->exp_client_uuid.uuid);
540 spin_lock(&ccb->lncc_exp->exp_lock);
542 ccb->lncc_exp->exp_need_sync = 0;
544 spin_unlock(&ccb->lncc_exp->exp_lock);
545 class_export_cb_put(ccb->lncc_exp);
550 int tgt_new_client_cb_add(struct thandle *th, struct obd_export *exp)
552 struct tgt_new_client_callback *ccb;
553 struct dt_txn_commit_cb *dcb;
560 ccb->lncc_exp = class_export_cb_get(exp);
563 dcb->dcb_func = tgt_cb_new_client;
564 INIT_LIST_HEAD(&dcb->dcb_linkage);
565 strlcpy(dcb->dcb_name, "tgt_cb_new_client", sizeof(dcb->dcb_name));
567 rc = dt_trans_cb_add(th, dcb);
569 class_export_cb_put(exp);
576 * Update client data in last_rcvd
578 static int tgt_client_data_update(const struct lu_env *env,
579 struct obd_export *exp)
581 struct tg_export_data *ted = &exp->exp_target_data;
582 struct lu_target *tgt = class_exp2tgt(exp);
583 struct tgt_thread_info *tti = tgt_th_info(env);
589 if (unlikely(tgt == NULL)) {
590 CDEBUG(D_ERROR, "%s: No target for connected export\n",
591 class_exp2obd(exp)->obd_name);
595 if (tgt->lut_bottom->dd_rdonly)
598 th = dt_trans_create(env, tgt->lut_bottom);
603 rc = dt_declare_record_write(env, tgt->lut_last_rcvd,
605 ted->ted_lr_off, th);
609 rc = dt_trans_start_local(env, tgt->lut_bottom, th);
613 mutex_lock(&ted->ted_lcd_lock);
616 * Until this operations will be committed the sync is needed
617 * for this export. This should be done _after_ starting the
618 * transaction so that many connecting clients will not bring
619 * server down with lots of sync writes.
621 rc = tgt_new_client_cb_add(th, exp);
623 /* can't add callback, do sync now */
626 spin_lock(&exp->exp_lock);
627 exp->exp_need_sync = 1;
628 spin_unlock(&exp->exp_lock);
631 tti->tti_off = ted->ted_lr_off;
632 rc = tgt_client_data_write(env, tgt, ted->ted_lcd, &tti->tti_off, th);
634 mutex_unlock(&ted->ted_lcd_lock);
638 dt_trans_stop(env, tgt->lut_bottom, th);
639 CDEBUG(D_INFO, "%s: update last_rcvd client data for UUID = %s, "
640 "last_transno = %llu: rc = %d\n", tgt->lut_obd->obd_name,
641 tgt->lut_lsd.lsd_uuid, tgt->lut_lsd.lsd_last_transno, rc);
646 static int tgt_server_data_read(const struct lu_env *env, struct lu_target *tgt)
648 struct tgt_thread_info *tti = tgt_th_info(env);
653 rc = dt_record_read(env, tgt->lut_last_rcvd, &tti->tti_buf,
656 lsd_le_to_cpu(&tti->tti_lsd, &tgt->lut_lsd);
658 CDEBUG(D_INFO, "%s: read last_rcvd server data for UUID = %s, "
659 "last_transno = %llu: rc = %d\n", tgt->lut_obd->obd_name,
660 tgt->lut_lsd.lsd_uuid, tgt->lut_lsd.lsd_last_transno, rc);
664 static int tgt_server_data_write(const struct lu_env *env,
665 struct lu_target *tgt, struct thandle *th)
667 struct tgt_thread_info *tti = tgt_th_info(env);
668 struct dt_object *dto;
675 lsd_cpu_to_le(&tgt->lut_lsd, &tti->tti_lsd);
677 dto = dt_object_locate(tgt->lut_last_rcvd, th->th_dev);
678 rc = dt_record_write(env, dto, &tti->tti_buf, &tti->tti_off, th);
680 CDEBUG(D_INFO, "%s: write last_rcvd server data for UUID = %s, "
681 "last_transno = %llu: rc = %d\n", tgt->lut_obd->obd_name,
682 tgt->lut_lsd.lsd_uuid, tgt->lut_lsd.lsd_last_transno, rc);
688 * Update server data in last_rcvd
690 int tgt_server_data_update(const struct lu_env *env, struct lu_target *tgt,
693 struct tgt_thread_info *tti = tgt_th_info(env);
700 "%s: mount_count is %llu, last_transno is %llu\n",
701 tgt->lut_lsd.lsd_uuid, tgt->lut_obd->u.obt.obt_mount_count,
702 tgt->lut_last_transno);
704 /* Always save latest transno to keep it fresh */
705 spin_lock(&tgt->lut_translock);
706 tgt->lut_lsd.lsd_last_transno = tgt->lut_last_transno;
707 spin_unlock(&tgt->lut_translock);
709 if (tgt->lut_bottom->dd_rdonly)
712 th = dt_trans_create(env, tgt->lut_bottom);
719 rc = dt_declare_record_write(env, tgt->lut_last_rcvd,
720 &tti->tti_buf, tti->tti_off, th);
724 rc = dt_trans_start(env, tgt->lut_bottom, th);
728 rc = tgt_server_data_write(env, tgt, th);
730 dt_trans_stop(env, tgt->lut_bottom, th);
732 CDEBUG(D_INFO, "%s: update last_rcvd server data for UUID = %s, "
733 "last_transno = %llu: rc = %d\n", tgt->lut_obd->obd_name,
734 tgt->lut_lsd.lsd_uuid, tgt->lut_lsd.lsd_last_transno, rc);
737 EXPORT_SYMBOL(tgt_server_data_update);
739 static int tgt_truncate_last_rcvd(const struct lu_env *env,
740 struct lu_target *tgt, loff_t size)
742 struct dt_object *dt = tgt->lut_last_rcvd;
749 if (tgt->lut_bottom->dd_rdonly)
753 attr.la_valid = LA_SIZE;
755 th = dt_trans_create(env, tgt->lut_bottom);
758 rc = dt_declare_punch(env, dt, size, OBD_OBJECT_EOF, th);
761 rc = dt_declare_attr_set(env, dt, &attr, th);
764 rc = dt_trans_start_local(env, tgt->lut_bottom, th);
768 rc = dt_punch(env, dt, size, OBD_OBJECT_EOF, th);
770 rc = dt_attr_set(env, dt, &attr, th);
773 dt_trans_stop(env, tgt->lut_bottom, th);
778 static void tgt_client_epoch_update(const struct lu_env *env,
779 struct obd_export *exp)
781 struct lsd_client_data *lcd = exp->exp_target_data.ted_lcd;
782 struct lu_target *tgt = class_exp2tgt(exp);
784 LASSERT(tgt && tgt->lut_bottom);
785 /** VBR: set client last_epoch to current epoch */
786 if (lcd->lcd_last_epoch >= tgt->lut_lsd.lsd_start_epoch)
788 lcd->lcd_last_epoch = tgt->lut_lsd.lsd_start_epoch;
789 tgt_client_data_update(env, exp);
793 * Update boot epoch when recovery ends
795 void tgt_boot_epoch_update(struct lu_target *tgt)
798 struct ptlrpc_request *req;
800 LIST_HEAD(client_list);
803 if (tgt->lut_obd->obd_stopping)
806 rc = lu_env_init(&env, LCT_DT_THREAD);
808 CERROR("%s: can't initialize environment: rc = %d\n",
809 tgt->lut_obd->obd_name, rc);
813 spin_lock(&tgt->lut_translock);
814 start_epoch = (tgt->lut_last_transno >> LR_EPOCH_BITS) + 1;
815 tgt->lut_last_transno = (__u64)start_epoch << LR_EPOCH_BITS;
816 tgt->lut_lsd.lsd_start_epoch = start_epoch;
817 spin_unlock(&tgt->lut_translock);
820 * The recovery is not yet finished and final queue can still be updated
821 * with resend requests. Move final list to separate one for processing
823 spin_lock(&tgt->lut_obd->obd_recovery_task_lock);
824 list_splice_init(&tgt->lut_obd->obd_final_req_queue, &client_list);
825 spin_unlock(&tgt->lut_obd->obd_recovery_task_lock);
828 * go through list of exports participated in recovery and
829 * set new epoch for them
831 list_for_each_entry(req, &client_list, rq_list) {
832 LASSERT(!req->rq_export->exp_delayed);
833 if (!req->rq_export->exp_vbr_failed)
834 tgt_client_epoch_update(&env, req->rq_export);
836 /** return list back at once */
837 spin_lock(&tgt->lut_obd->obd_recovery_task_lock);
838 list_splice_init(&client_list, &tgt->lut_obd->obd_final_req_queue);
839 spin_unlock(&tgt->lut_obd->obd_recovery_task_lock);
841 /** Clear MULTI RPCS incompatibility flag if
842 * - target is MDT and
843 * - there is no client to recover or the recovery was aborted
845 if (!strncmp(tgt->lut_obd->obd_type->typ_name, LUSTRE_MDT_NAME, 3) &&
846 (atomic_read(&tgt->lut_obd->obd_max_recoverable_clients) == 0 ||
847 tgt->lut_obd->obd_abort_recovery))
848 tgt->lut_lsd.lsd_feature_incompat &= ~OBD_INCOMPAT_MULTI_RPCS;
850 /** update server epoch */
851 tgt_server_data_update(&env, tgt, 1);
856 * commit callback, need to update last_committed value
858 struct tgt_last_committed_callback {
859 struct dt_txn_commit_cb llcc_cb;
860 struct lu_target *llcc_tgt;
861 struct obd_export *llcc_exp;
865 static void tgt_cb_last_committed(struct lu_env *env, struct thandle *th,
866 struct dt_txn_commit_cb *cb, int err)
868 struct tgt_last_committed_callback *ccb;
870 ccb = container_of0(cb, struct tgt_last_committed_callback, llcc_cb);
872 LASSERT(ccb->llcc_exp);
873 LASSERT(ccb->llcc_tgt != NULL);
874 LASSERT(ccb->llcc_exp->exp_obd == ccb->llcc_tgt->lut_obd);
876 /* error hit, don't update last committed to provide chance to
877 * replay data after fail */
881 /* Fast path w/o spinlock, if exp_last_committed was updated
882 * with higher transno, no need to take spinlock and check,
883 * also no need to update obd_last_committed. */
884 if (ccb->llcc_transno <= ccb->llcc_exp->exp_last_committed)
886 spin_lock(&ccb->llcc_tgt->lut_translock);
887 if (ccb->llcc_transno > ccb->llcc_tgt->lut_obd->obd_last_committed)
888 ccb->llcc_tgt->lut_obd->obd_last_committed = ccb->llcc_transno;
890 if (ccb->llcc_transno > ccb->llcc_exp->exp_last_committed) {
891 ccb->llcc_exp->exp_last_committed = ccb->llcc_transno;
892 spin_unlock(&ccb->llcc_tgt->lut_translock);
894 ptlrpc_commit_replies(ccb->llcc_exp);
895 tgt_cancel_slc_locks(ccb->llcc_tgt, ccb->llcc_transno);
897 spin_unlock(&ccb->llcc_tgt->lut_translock);
900 CDEBUG(D_HA, "%s: transno %lld is committed\n",
901 ccb->llcc_tgt->lut_obd->obd_name, ccb->llcc_transno);
904 class_export_cb_put(ccb->llcc_exp);
909 * Add commit callback function, it returns a non-zero value to inform
910 * caller to use sync transaction if necessary.
912 static int tgt_last_commit_cb_add(struct thandle *th, struct lu_target *tgt,
913 struct obd_export *exp, __u64 transno)
915 struct tgt_last_committed_callback *ccb;
916 struct dt_txn_commit_cb *dcb;
924 ccb->llcc_exp = class_export_cb_get(exp);
925 ccb->llcc_transno = transno;
928 dcb->dcb_func = tgt_cb_last_committed;
929 INIT_LIST_HEAD(&dcb->dcb_linkage);
930 strlcpy(dcb->dcb_name, "tgt_cb_last_committed", sizeof(dcb->dcb_name));
932 rc = dt_trans_cb_add(th, dcb);
934 class_export_cb_put(exp);
938 if (exp_connect_flags(exp) & OBD_CONNECT_LIGHTWEIGHT)
939 /* report failure to force synchronous operation */
942 /* if exp_need_sync is set, return non-zero value to force
943 * a sync transaction. */
944 return rc ? rc : exp->exp_need_sync;
947 static int tgt_is_local_client(const struct lu_env *env,
948 struct obd_export *exp)
950 struct lu_target *tgt = class_exp2tgt(exp);
951 struct tgt_session_info *tsi = tgt_ses_info(env);
952 struct ptlrpc_request *req = tgt_ses_req(tsi);
954 if (tgt->lut_local_recovery)
958 if (!LNetIsPeerLocal(req->rq_peer.nid))
960 if (exp_connect_flags(exp) & OBD_CONNECT_MDS)
967 * Add new client to the last_rcvd upon new connection.
969 * We use a bitmap to locate a free space in the last_rcvd file and initialize
972 int tgt_client_new(const struct lu_env *env, struct obd_export *exp)
974 struct tg_export_data *ted = &exp->exp_target_data;
975 struct lu_target *tgt = class_exp2tgt(exp);
980 LASSERT(tgt && tgt->lut_client_bitmap != NULL);
981 if (!strcmp(ted->ted_lcd->lcd_uuid, tgt->lut_obd->obd_uuid.uuid))
984 if (exp_connect_flags(exp) & OBD_CONNECT_LIGHTWEIGHT)
987 if (tgt_is_local_client(env, exp)) {
988 LCONSOLE_WARN("%s: local client %s w/o recovery\n",
989 exp->exp_obd->obd_name, ted->ted_lcd->lcd_uuid);
990 exp->exp_no_recovery = 1;
994 /* the bitmap operations can handle cl_idx > sizeof(long) * 8, so
995 * there's no need for extra complication here
997 idx = find_first_zero_bit(tgt->lut_client_bitmap, LR_MAX_CLIENTS);
999 if (idx >= LR_MAX_CLIENTS ||
1000 OBD_FAIL_CHECK(OBD_FAIL_MDS_CLIENT_ADD)) {
1001 CERROR("%s: no room for %u clients - fix LR_MAX_CLIENTS\n",
1002 tgt->lut_obd->obd_name, idx);
1005 if (test_and_set_bit(idx, tgt->lut_client_bitmap)) {
1006 idx = find_next_zero_bit(tgt->lut_client_bitmap,
1007 LR_MAX_CLIENTS, idx);
1011 ted->ted_lr_idx = idx;
1012 ted->ted_lr_off = tgt->lut_lsd.lsd_client_start +
1013 idx * tgt->lut_lsd.lsd_client_size;
1015 LASSERTF(ted->ted_lr_off > 0, "ted_lr_off = %llu\n", ted->ted_lr_off);
1017 if (tgt_is_multimodrpcs_client(exp)) {
1018 /* Set MULTI RPCS incompatibility flag to prevent previous
1019 * Lustre versions to mount a target with reply_data file */
1020 atomic_inc(&tgt->lut_num_clients);
1021 if (!(tgt->lut_lsd.lsd_feature_incompat &
1022 OBD_INCOMPAT_MULTI_RPCS)) {
1023 tgt->lut_lsd.lsd_feature_incompat |=
1024 OBD_INCOMPAT_MULTI_RPCS;
1025 rc = tgt_server_data_update(env, tgt, 1);
1027 CERROR("%s: unable to set MULTI RPCS "
1028 "incompatibility flag\n",
1029 exp->exp_obd->obd_name);
1034 /* assign client slot generation */
1035 ted->ted_lcd->lcd_generation =
1036 atomic_inc_return(&tgt->lut_client_generation);
1038 ted->ted_lcd->lcd_generation = 0;
1041 CDEBUG(D_INFO, "%s: new client at index %d (%llu) with UUID '%s' "
1043 tgt->lut_obd->obd_name, ted->ted_lr_idx, ted->ted_lr_off,
1044 ted->ted_lcd->lcd_uuid, ted->ted_lcd->lcd_generation);
1046 if (OBD_FAIL_CHECK(OBD_FAIL_TGT_CLIENT_ADD))
1049 rc = tgt_client_data_update(env, exp);
1051 CERROR("%s: Failed to write client lcd at idx %d, rc %d\n",
1052 tgt->lut_obd->obd_name, idx, rc);
1056 EXPORT_SYMBOL(tgt_client_new);
1058 /* Add an existing client to the MDS in-memory state based on
1059 * a client that was previously found in the last_rcvd file and
1060 * already has an assigned slot (idx >= 0).
1062 * It should not be possible to fail adding an existing client - otherwise
1063 * mdt_init_server_data() callsite needs to be fixed.
1065 int tgt_client_add(const struct lu_env *env, struct obd_export *exp, int idx)
1067 struct tg_export_data *ted = &exp->exp_target_data;
1068 struct lu_target *tgt = class_exp2tgt(exp);
1072 LASSERT(tgt && tgt->lut_client_bitmap != NULL);
1073 LASSERTF(idx >= 0, "%d\n", idx);
1075 if (!strcmp(ted->ted_lcd->lcd_uuid, tgt->lut_obd->obd_uuid.uuid) ||
1076 exp_connect_flags(exp) & OBD_CONNECT_LIGHTWEIGHT)
1079 if (test_and_set_bit(idx, tgt->lut_client_bitmap)) {
1080 CERROR("%s: client %d: bit already set in bitmap!!\n",
1081 tgt->lut_obd->obd_name, idx);
1084 atomic_inc(&tgt->lut_num_clients);
1086 CDEBUG(D_INFO, "%s: client at idx %d with UUID '%s' added, "
1088 tgt->lut_obd->obd_name, idx, ted->ted_lcd->lcd_uuid,
1089 ted->ted_lcd->lcd_generation);
1091 ted->ted_lr_idx = idx;
1092 ted->ted_lr_off = tgt->lut_lsd.lsd_client_start +
1093 idx * tgt->lut_lsd.lsd_client_size;
1095 mutex_init(&ted->ted_lcd_lock);
1097 LASSERTF(ted->ted_lr_off > 0, "ted_lr_off = %llu\n", ted->ted_lr_off);
1102 int tgt_client_del(const struct lu_env *env, struct obd_export *exp)
1104 struct tg_export_data *ted = &exp->exp_target_data;
1105 struct lu_target *tgt = class_exp2tgt(exp);
1110 LASSERT(ted->ted_lcd);
1112 if (unlikely(tgt == NULL)) {
1113 CDEBUG(D_ERROR, "%s: No target for connected export\n",
1114 class_exp2obd(exp)->obd_name);
1118 /* XXX if lcd_uuid were a real obd_uuid, I could use obd_uuid_equals */
1119 if (!strcmp((char *)ted->ted_lcd->lcd_uuid,
1120 (char *)tgt->lut_obd->obd_uuid.uuid) ||
1121 exp_connect_flags(exp) & OBD_CONNECT_LIGHTWEIGHT ||
1122 exp->exp_no_recovery)
1125 /* Slot may be not yet assigned, use case is race between Client
1126 * reconnect and forced eviction */
1127 if (ted->ted_lr_idx < 0) {
1128 CWARN("%s: client with UUID '%s' not in bitmap\n",
1129 tgt->lut_obd->obd_name, ted->ted_lcd->lcd_uuid);
1133 CDEBUG(D_INFO, "%s: del client at idx %u, off %lld, UUID '%s'\n",
1134 tgt->lut_obd->obd_name, ted->ted_lr_idx, ted->ted_lr_off,
1135 ted->ted_lcd->lcd_uuid);
1137 /* Clear the bit _after_ zeroing out the client so we don't
1138 race with filter_client_add and zero out new clients.*/
1139 if (!test_bit(ted->ted_lr_idx, tgt->lut_client_bitmap)) {
1140 CERROR("%s: client %u: bit already clear in bitmap!!\n",
1141 tgt->lut_obd->obd_name, ted->ted_lr_idx);
1145 /* Do not erase record for recoverable client. */
1146 if (exp->exp_flags & OBD_OPT_FAILOVER)
1149 if (OBD_FAIL_CHECK(OBD_FAIL_TGT_CLIENT_DEL))
1152 /* Make sure the server's last_transno is up to date.
1153 * This should be done before zeroing client slot so last_transno will
1154 * be in server data or in client data in case of failure */
1155 rc = tgt_server_data_update(env, tgt, 0);
1157 CERROR("%s: failed to update server data, skip client %s "
1158 "zeroing, rc %d\n", tgt->lut_obd->obd_name,
1159 ted->ted_lcd->lcd_uuid, rc);
1163 memset(ted->ted_lcd->lcd_uuid, 0, sizeof ted->ted_lcd->lcd_uuid);
1164 rc = tgt_client_data_update(env, exp);
1166 CDEBUG(rc == 0 ? D_INFO : D_ERROR,
1167 "%s: zeroing out client %s at idx %u (%llu), rc %d\n",
1168 tgt->lut_obd->obd_name, ted->ted_lcd->lcd_uuid,
1169 ted->ted_lr_idx, ted->ted_lr_off, rc);
1172 EXPORT_SYMBOL(tgt_client_del);
1174 static void tgt_clean_by_tag(struct obd_export *exp, __u64 xid, __u16 tag)
1176 struct tg_export_data *ted = &exp->exp_target_data;
1177 struct lu_target *lut = class_exp2tgt(exp);
1178 struct tg_reply_data *trd, *tmp;
1183 list_for_each_entry_safe(trd, tmp, &ted->ted_reply_list, trd_list) {
1184 if (trd->trd_tag != tag)
1187 LASSERT(ergo(tgt_is_increasing_xid_client(exp),
1188 trd->trd_reply.lrd_xid <= xid));
1190 ted->ted_release_tag++;
1191 tgt_release_reply_data(lut, ted, trd);
1195 static int tgt_add_reply_data(const struct lu_env *env, struct lu_target *tgt,
1196 struct tg_export_data *ted, struct tg_reply_data *trd,
1197 struct ptlrpc_request *req,
1198 struct thandle *th, bool update_lrd_file)
1200 struct lsd_reply_data *lrd;
1203 lrd = &trd->trd_reply;
1204 /* update export last transno */
1205 mutex_lock(&ted->ted_lcd_lock);
1206 if (lrd->lrd_transno > ted->ted_lcd->lcd_last_transno)
1207 ted->ted_lcd->lcd_last_transno = lrd->lrd_transno;
1208 mutex_unlock(&ted->ted_lcd_lock);
1211 /* find a empty slot */
1212 i = tgt_find_free_reply_slot(tgt);
1213 if (unlikely(i < 0)) {
1214 CERROR("%s: couldn't find a slot for reply data: "
1215 "rc = %d\n", tgt_name(tgt), i);
1220 if (update_lrd_file) {
1224 /* write reply data to disk */
1225 off = sizeof(struct lsd_reply_header) + sizeof(*lrd) * i;
1226 rc = tgt_reply_data_write(env, tgt, lrd, off, th);
1227 if (unlikely(rc != 0)) {
1228 CERROR("%s: can't update %s file: rc = %d\n",
1229 tgt_name(tgt), REPLY_DATA, rc);
1234 trd->trd_index = TRD_INDEX_MEMORY;
1237 /* add reply data to target export's reply list */
1238 mutex_lock(&ted->ted_lcd_lock);
1240 int exclude = tgt_is_increasing_xid_client(req->rq_export) ?
1241 MSG_REPLAY : MSG_REPLAY|MSG_RESENT;
1243 if (req->rq_obsolete) {
1244 mutex_unlock(&ted->ted_lcd_lock);
1248 if (!(lustre_msg_get_flags(req->rq_reqmsg) & exclude))
1249 tgt_clean_by_tag(req->rq_export, req->rq_xid,
1252 list_add(&trd->trd_list, &ted->ted_reply_list);
1253 ted->ted_reply_cnt++;
1254 if (ted->ted_reply_cnt > ted->ted_reply_max)
1255 ted->ted_reply_max = ted->ted_reply_cnt;
1256 mutex_unlock(&ted->ted_lcd_lock);
1258 CDEBUG(D_TRACE, "add reply %p: xid %llu, transno %llu, "
1259 "tag %hu, client gen %u, slot idx %d\n",
1260 trd, lrd->lrd_xid, lrd->lrd_transno,
1261 trd->trd_tag, lrd->lrd_client_gen, trd->trd_index);
1266 int tgt_mk_reply_data(const struct lu_env *env,
1267 struct lu_target *tgt,
1268 struct tg_export_data *ted,
1269 struct ptlrpc_request *req,
1275 struct tg_reply_data *trd;
1276 struct lsd_reply_data *lrd;
1277 __u64 *pre_versions = NULL;
1281 if (unlikely(trd == NULL))
1284 /* fill reply data information */
1285 lrd = &trd->trd_reply;
1286 lrd->lrd_transno = transno;
1288 lrd->lrd_xid = req->rq_xid;
1289 trd->trd_tag = lustre_msg_get_tag(req->rq_reqmsg);
1290 lrd->lrd_client_gen = ted->ted_lcd->lcd_generation;
1292 pre_versions = lustre_msg_get_versions(req->rq_repmsg);
1293 lrd->lrd_result = th->th_result;
1296 struct tgt_session_info *tsi;
1298 LASSERT(env != NULL);
1299 tsi = tgt_ses_info(env);
1300 LASSERT(tsi->tsi_xid != 0);
1302 lrd->lrd_xid = tsi->tsi_xid;
1303 lrd->lrd_result = tsi->tsi_result;
1304 lrd->lrd_client_gen = tsi->tsi_client_gen;
1307 lrd->lrd_data = opdata;
1309 trd->trd_pre_versions[0] = pre_versions[0];
1310 trd->trd_pre_versions[1] = pre_versions[1];
1311 trd->trd_pre_versions[2] = pre_versions[2];
1312 trd->trd_pre_versions[3] = pre_versions[3];
1315 rc = tgt_add_reply_data(env, tgt, ted, trd, req,
1322 EXPORT_SYMBOL(tgt_mk_reply_data);
1325 * last_rcvd & last_committed update callbacks
1327 static int tgt_last_rcvd_update(const struct lu_env *env, struct lu_target *tgt,
1328 struct dt_object *obj, __u64 opdata,
1329 struct thandle *th, struct ptlrpc_request *req)
1331 struct tgt_thread_info *tti = tgt_th_info(env);
1332 struct tgt_session_info *tsi = tgt_ses_info(env);
1333 struct obd_export *exp = tsi->tsi_exp;
1334 struct tg_export_data *ted;
1342 LASSERT(exp != NULL);
1343 ted = &exp->exp_target_data;
1345 lw_client = exp_connect_flags(exp) & OBD_CONNECT_LIGHTWEIGHT;
1346 if (ted->ted_lr_idx < 0 && !lw_client)
1347 /* ofd connect may cause transaction before export has
1352 tti->tti_transno = lustre_msg_get_transno(req->rq_reqmsg);
1354 /* From update replay, tti_transno should be set already */
1355 LASSERT(tti->tti_transno != 0);
1357 spin_lock(&tgt->lut_translock);
1358 if (th->th_result != 0) {
1359 if (tti->tti_transno != 0) {
1360 CERROR("%s: replay transno %llu failed: rc = %d\n",
1361 tgt_name(tgt), tti->tti_transno, th->th_result);
1363 } else if (tti->tti_transno == 0) {
1364 tti->tti_transno = ++tgt->lut_last_transno;
1366 /* should be replay */
1367 if (tti->tti_transno > tgt->lut_last_transno)
1368 tgt->lut_last_transno = tti->tti_transno;
1370 spin_unlock(&tgt->lut_translock);
1372 /** VBR: set new versions */
1373 if (th->th_result == 0 && obj != NULL) {
1374 struct dt_object *dto = dt_object_locate(obj, th->th_dev);
1375 dt_version_set(env, dto, tti->tti_transno, th);
1378 /* filling reply data */
1379 CDEBUG(D_INODE, "transno = %llu, last_committed = %llu\n",
1380 tti->tti_transno, tgt->lut_obd->obd_last_committed);
1383 req->rq_transno = tti->tti_transno;
1384 lustre_msg_set_transno(req->rq_repmsg, tti->tti_transno);
1387 /* if can't add callback, do sync write */
1388 th->th_sync |= !!tgt_last_commit_cb_add(th, tgt, exp, tti->tti_transno);
1391 /* All operations performed by LW clients are synchronous and
1392 * we store the committed transno in the last_rcvd header */
1393 spin_lock(&tgt->lut_translock);
1394 if (tti->tti_transno > tgt->lut_lsd.lsd_last_transno) {
1395 tgt->lut_lsd.lsd_last_transno = tti->tti_transno;
1396 spin_unlock(&tgt->lut_translock);
1397 /* Although lightweight (LW) connections have no slot
1398 * in the last_rcvd, we still want to maintain
1399 * the in-memory lsd_client_data structure in order to
1400 * properly handle reply reconstruction. */
1401 rc = tgt_server_data_write(env, tgt, th);
1403 spin_unlock(&tgt->lut_translock);
1405 } else if (ted->ted_lr_off == 0) {
1406 CERROR("%s: client idx %d has offset %lld\n",
1407 tgt_name(tgt), ted->ted_lr_idx, ted->ted_lr_off);
1411 /* Target that supports multiple reply data */
1412 if (tgt_is_multimodrpcs_client(exp)) {
1413 return tgt_mk_reply_data(env, tgt, ted, req, opdata, th,
1414 !!(req != NULL), tti->tti_transno);
1417 /* Enough for update replay, let's return */
1421 mutex_lock(&ted->ted_lcd_lock);
1422 LASSERT(ergo(tti->tti_transno == 0, th->th_result != 0));
1423 if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_CLOSE) {
1424 transno_p = &ted->ted_lcd->lcd_last_close_transno;
1425 ted->ted_lcd->lcd_last_close_xid = req->rq_xid;
1426 ted->ted_lcd->lcd_last_close_result = th->th_result;
1428 /* VBR: save versions in last_rcvd for reconstruct. */
1429 __u64 *pre_versions = lustre_msg_get_versions(req->rq_repmsg);
1432 ted->ted_lcd->lcd_pre_versions[0] = pre_versions[0];
1433 ted->ted_lcd->lcd_pre_versions[1] = pre_versions[1];
1434 ted->ted_lcd->lcd_pre_versions[2] = pre_versions[2];
1435 ted->ted_lcd->lcd_pre_versions[3] = pre_versions[3];
1437 transno_p = &ted->ted_lcd->lcd_last_transno;
1438 ted->ted_lcd->lcd_last_xid = req->rq_xid;
1439 ted->ted_lcd->lcd_last_result = th->th_result;
1440 /* XXX: lcd_last_data is __u32 but intent_dispostion is __u64,
1441 * see struct ldlm_reply->lock_policy_res1; */
1442 ted->ted_lcd->lcd_last_data = opdata;
1445 /* Update transno in slot only if non-zero number, i.e. no errors */
1446 if (likely(tti->tti_transno != 0)) {
1447 /* Don't overwrite bigger transaction number with lower one.
1448 * That is not sign of problem in all cases, but in any case
1449 * this value should be monotonically increased only. */
1450 if (*transno_p > tti->tti_transno) {
1451 if (!tgt->lut_no_reconstruct) {
1452 CERROR("%s: trying to overwrite bigger transno:"
1453 "on-disk: %llu, new: %llu replay: "
1454 "%d. See LU-617.\n", tgt_name(tgt),
1455 *transno_p, tti->tti_transno,
1456 req_is_replay(req));
1457 if (req_is_replay(req)) {
1458 spin_lock(&req->rq_export->exp_lock);
1459 req->rq_export->exp_vbr_failed = 1;
1460 spin_unlock(&req->rq_export->exp_lock);
1462 mutex_unlock(&ted->ted_lcd_lock);
1463 RETURN(req_is_replay(req) ? -EOVERFLOW : 0);
1466 *transno_p = tti->tti_transno;
1471 tti->tti_off = ted->ted_lr_off;
1472 if (CFS_FAIL_CHECK(OBD_FAIL_TGT_RCVD_EIO))
1475 rc = tgt_client_data_write(env, tgt, ted->ted_lcd,
1478 mutex_unlock(&ted->ted_lcd_lock);
1482 mutex_unlock(&ted->ted_lcd_lock);
1487 * last_rcvd update for echo client simulation.
1488 * It updates last_rcvd client slot and version of object in
1489 * simple way but with all locks to simulate all drawbacks
1491 static int tgt_last_rcvd_update_echo(const struct lu_env *env,
1492 struct lu_target *tgt,
1493 struct dt_object *obj,
1495 struct obd_export *exp)
1497 struct tgt_thread_info *tti = tgt_th_info(env);
1498 struct tg_export_data *ted = &exp->exp_target_data;
1503 tti->tti_transno = 0;
1505 spin_lock(&tgt->lut_translock);
1506 if (th->th_result == 0)
1507 tti->tti_transno = ++tgt->lut_last_transno;
1508 spin_unlock(&tgt->lut_translock);
1510 /** VBR: set new versions */
1511 if (th->th_result == 0 && obj != NULL)
1512 dt_version_set(env, obj, tti->tti_transno, th);
1514 /* if can't add callback, do sync write */
1515 th->th_sync |= !!tgt_last_commit_cb_add(th, tgt, exp,
1518 LASSERT(ted->ted_lr_off > 0);
1520 mutex_lock(&ted->ted_lcd_lock);
1521 LASSERT(ergo(tti->tti_transno == 0, th->th_result != 0));
1522 ted->ted_lcd->lcd_last_transno = tti->tti_transno;
1523 ted->ted_lcd->lcd_last_result = th->th_result;
1525 tti->tti_off = ted->ted_lr_off;
1526 rc = tgt_client_data_write(env, tgt, ted->ted_lcd, &tti->tti_off, th);
1527 mutex_unlock(&ted->ted_lcd_lock);
1531 static int tgt_clients_data_init(const struct lu_env *env,
1532 struct lu_target *tgt,
1533 unsigned long last_size)
1535 struct obd_device *obd = tgt->lut_obd;
1536 struct lr_server_data *lsd = &tgt->lut_lsd;
1537 struct lsd_client_data *lcd = NULL;
1538 struct tg_export_data *ted;
1541 loff_t off = lsd->lsd_client_start;
1542 __u32 generation = 0;
1543 struct cfs_hash *hash = NULL;
1547 if (tgt->lut_bottom->dd_rdonly)
1550 BUILD_BUG_ON(offsetof(struct lsd_client_data, lcd_padding) +
1551 sizeof(lcd->lcd_padding) != LR_CLIENT_SIZE);
1557 hash = cfs_hash_getref(tgt->lut_obd->obd_gen_hash);
1559 GOTO(err_out, rc = -ENODEV);
1561 for (cl_idx = 0; off < last_size; cl_idx++) {
1562 struct obd_export *exp;
1565 /* Don't assume off is incremented properly by
1566 * read_record(), in case sizeof(*lcd)
1567 * isn't the same as fsd->lsd_client_size. */
1568 off = lsd->lsd_client_start + cl_idx * lsd->lsd_client_size;
1569 rc = tgt_client_data_read(env, tgt, lcd, &off, cl_idx);
1571 CERROR("%s: error reading last_rcvd %s idx %d off "
1572 "%llu: rc = %d\n", tgt_name(tgt), LAST_RCVD,
1575 break; /* read error shouldn't cause startup to fail */
1578 if (lcd->lcd_uuid[0] == '\0') {
1579 CDEBUG(D_INFO, "skipping zeroed client at offset %d\n",
1584 last_transno = lcd_last_transno(lcd);
1586 /* These exports are cleaned up by disconnect, so they
1587 * need to be set up like real exports as connect does.
1589 CDEBUG(D_HA, "RCVRNG CLIENT uuid: %s idx: %d lr: %llu"
1590 " srv lr: %llu lx: %llu gen %u\n", lcd->lcd_uuid,
1591 cl_idx, last_transno, lsd->lsd_last_transno,
1592 lcd_last_xid(lcd), lcd->lcd_generation);
1594 exp = class_new_export(obd, (struct obd_uuid *)lcd->lcd_uuid);
1596 if (PTR_ERR(exp) == -EALREADY) {
1597 /* export already exists, zero out this one */
1598 CERROR("%s: Duplicate export %s!\n",
1599 tgt_name(tgt), lcd->lcd_uuid);
1602 GOTO(err_out, rc = PTR_ERR(exp));
1605 ted = &exp->exp_target_data;
1606 *ted->ted_lcd = *lcd;
1608 rc = tgt_client_add(env, exp, cl_idx);
1609 LASSERTF(rc == 0, "rc = %d\n", rc); /* can't fail existing */
1610 /* VBR: set export last committed version */
1611 exp->exp_last_committed = last_transno;
1612 spin_lock(&exp->exp_lock);
1613 exp->exp_connecting = 0;
1614 exp->exp_in_recovery = 0;
1615 spin_unlock(&exp->exp_lock);
1616 atomic_inc(&obd->obd_max_recoverable_clients);
1618 if (tgt->lut_lsd.lsd_feature_incompat &
1619 OBD_INCOMPAT_MULTI_RPCS &&
1620 lcd->lcd_generation != 0) {
1621 /* compute the highest valid client generation */
1622 generation = max(generation, lcd->lcd_generation);
1623 /* fill client_generation <-> export hash table */
1624 rc = cfs_hash_add_unique(hash, &lcd->lcd_generation,
1625 &exp->exp_gen_hash);
1627 CERROR("%s: duplicate export for client "
1629 tgt_name(tgt), lcd->lcd_generation);
1630 class_export_put(exp);
1635 class_export_put(exp);
1637 rc = rev_import_init(exp);
1639 class_unlink_export(exp);
1643 /* Need to check last_rcvd even for duplicated exports. */
1644 CDEBUG(D_OTHER, "client at idx %d has last_transno = %llu\n",
1645 cl_idx, last_transno);
1647 spin_lock(&tgt->lut_translock);
1648 tgt->lut_last_transno = max(last_transno,
1649 tgt->lut_last_transno);
1650 spin_unlock(&tgt->lut_translock);
1653 /* record highest valid client generation */
1654 atomic_set(&tgt->lut_client_generation, generation);
1658 cfs_hash_putref(hash);
1663 struct server_compat_data {
1670 static struct server_compat_data tgt_scd[] = {
1671 [LDD_F_SV_TYPE_MDT] = {
1672 .rocompat = OBD_ROCOMPAT_LOVOBJID,
1673 .incompat = OBD_INCOMPAT_MDT | OBD_INCOMPAT_COMMON_LR |
1674 OBD_INCOMPAT_FID | OBD_INCOMPAT_IAM_DIR |
1675 OBD_INCOMPAT_LMM_VER | OBD_INCOMPAT_MULTI_OI |
1676 OBD_INCOMPAT_MULTI_RPCS,
1677 .rocinit = OBD_ROCOMPAT_LOVOBJID,
1678 .incinit = OBD_INCOMPAT_MDT | OBD_INCOMPAT_COMMON_LR |
1679 OBD_INCOMPAT_MULTI_OI,
1681 [LDD_F_SV_TYPE_OST] = {
1682 .rocompat = OBD_ROCOMPAT_IDX_IN_IDIF,
1683 .incompat = OBD_INCOMPAT_OST | OBD_INCOMPAT_COMMON_LR |
1685 .rocinit = OBD_ROCOMPAT_IDX_IN_IDIF,
1686 .incinit = OBD_INCOMPAT_OST | OBD_INCOMPAT_COMMON_LR,
1690 int tgt_server_data_init(const struct lu_env *env, struct lu_target *tgt)
1692 struct tgt_thread_info *tti = tgt_th_info(env);
1693 struct lr_server_data *lsd = &tgt->lut_lsd;
1694 unsigned long last_rcvd_size;
1698 rc = dt_attr_get(env, tgt->lut_last_rcvd, &tti->tti_attr);
1702 last_rcvd_size = (unsigned long)tti->tti_attr.la_size;
1704 /* ensure padding in the struct is the correct size */
1705 BUILD_BUG_ON(offsetof(struct lr_server_data, lsd_padding) +
1706 sizeof(lsd->lsd_padding) != LR_SERVER_SIZE);
1708 rc = server_name2index(tgt_name(tgt), &index, NULL);
1710 CERROR("%s: Can not get index from name: rc = %d\n",
1714 /* server_name2index() returns type */
1716 if (type != LDD_F_SV_TYPE_MDT && type != LDD_F_SV_TYPE_OST) {
1717 CERROR("%s: unknown target type %x\n", tgt_name(tgt), type);
1721 /* last_rcvd on OST doesn't provide reconstruct support because there
1722 * may be up to 8 in-flight write requests per single slot in
1723 * last_rcvd client data
1725 tgt->lut_no_reconstruct = (type == LDD_F_SV_TYPE_OST);
1727 if (last_rcvd_size == 0) {
1728 LCONSOLE_WARN("%s: new disk, initializing\n", tgt_name(tgt));
1730 memcpy(lsd->lsd_uuid, tgt->lut_obd->obd_uuid.uuid,
1731 sizeof(lsd->lsd_uuid));
1732 lsd->lsd_last_transno = 0;
1733 lsd->lsd_mount_count = 0;
1734 lsd->lsd_server_size = LR_SERVER_SIZE;
1735 lsd->lsd_client_start = LR_CLIENT_START;
1736 lsd->lsd_client_size = LR_CLIENT_SIZE;
1737 lsd->lsd_subdir_count = OBJ_SUBDIR_COUNT;
1738 lsd->lsd_osd_index = index;
1739 lsd->lsd_feature_rocompat = tgt_scd[type].rocinit;
1740 lsd->lsd_feature_incompat = tgt_scd[type].incinit;
1742 rc = tgt_server_data_read(env, tgt);
1744 CERROR("%s: error reading LAST_RCVD: rc= %d\n",
1748 if (strcmp(lsd->lsd_uuid, tgt->lut_obd->obd_uuid.uuid)) {
1749 if (tgt->lut_bottom->dd_rdonly) {
1750 /* Such difference may be caused by mounting
1751 * up snapshot with new fsname under rd_only
1752 * mode. But even if it was NOT, it will not
1753 * damage the system because of "rd_only". */
1754 memcpy(lsd->lsd_uuid,
1755 tgt->lut_obd->obd_uuid.uuid,
1756 sizeof(lsd->lsd_uuid));
1758 LCONSOLE_ERROR_MSG(0x157, "Trying to start "
1759 "OBD %s using the wrong "
1760 "disk %s. Were the /dev/ "
1761 "assignments rearranged?\n",
1762 tgt->lut_obd->obd_uuid.uuid,
1768 if (lsd->lsd_osd_index != index) {
1769 LCONSOLE_ERROR_MSG(0x157,
1770 "%s: index %d in last rcvd is different with the index %d in config log, It might be disk corruption!\n",
1772 lsd->lsd_osd_index, index);
1777 if (lsd->lsd_feature_incompat & ~tgt_scd[type].incompat) {
1778 CERROR("%s: unsupported incompat filesystem feature(s) %x\n",
1780 lsd->lsd_feature_incompat & ~tgt_scd[type].incompat);
1784 if (type == LDD_F_SV_TYPE_MDT)
1785 lsd->lsd_feature_incompat |= OBD_INCOMPAT_FID;
1787 if (lsd->lsd_feature_rocompat & ~tgt_scd[type].rocompat) {
1788 CERROR("%s: unsupported read-only filesystem feature(s) %x\n",
1790 lsd->lsd_feature_rocompat & ~tgt_scd[type].rocompat);
1793 /** Interop: evict all clients at first boot with 1.8 last_rcvd */
1794 if (type == LDD_F_SV_TYPE_MDT &&
1795 !(lsd->lsd_feature_compat & OBD_COMPAT_20)) {
1796 if (last_rcvd_size > lsd->lsd_client_start) {
1797 LCONSOLE_WARN("%s: mounting at first time on 1.8 FS, "
1798 "remove all clients for interop needs\n",
1800 rc = tgt_truncate_last_rcvd(env, tgt,
1801 lsd->lsd_client_start);
1804 last_rcvd_size = lsd->lsd_client_start;
1806 /** set 2.0 flag to upgrade/downgrade between 1.8 and 2.0 */
1807 lsd->lsd_feature_compat |= OBD_COMPAT_20;
1810 spin_lock(&tgt->lut_translock);
1811 tgt->lut_last_transno = lsd->lsd_last_transno;
1812 spin_unlock(&tgt->lut_translock);
1814 lsd->lsd_mount_count++;
1816 CDEBUG(D_INODE, "=======,=BEGIN DUMPING LAST_RCVD========\n");
1817 CDEBUG(D_INODE, "%s: server last_transno: %llu\n",
1818 tgt_name(tgt), tgt->lut_last_transno);
1819 CDEBUG(D_INODE, "%s: server mount_count: %llu\n",
1820 tgt_name(tgt), lsd->lsd_mount_count);
1821 CDEBUG(D_INODE, "%s: server data size: %u\n",
1822 tgt_name(tgt), lsd->lsd_server_size);
1823 CDEBUG(D_INODE, "%s: per-client data start: %u\n",
1824 tgt_name(tgt), lsd->lsd_client_start);
1825 CDEBUG(D_INODE, "%s: per-client data size: %u\n",
1826 tgt_name(tgt), lsd->lsd_client_size);
1827 CDEBUG(D_INODE, "%s: last_rcvd size: %lu\n",
1828 tgt_name(tgt), last_rcvd_size);
1829 CDEBUG(D_INODE, "%s: server subdir_count: %u\n",
1830 tgt_name(tgt), lsd->lsd_subdir_count);
1831 CDEBUG(D_INODE, "%s: last_rcvd clients: %lu\n", tgt_name(tgt),
1832 last_rcvd_size <= lsd->lsd_client_start ? 0 :
1833 (last_rcvd_size - lsd->lsd_client_start) /
1834 lsd->lsd_client_size);
1835 CDEBUG(D_INODE, "========END DUMPING LAST_RCVD========\n");
1837 if (lsd->lsd_server_size == 0 || lsd->lsd_client_start == 0 ||
1838 lsd->lsd_client_size == 0) {
1839 CERROR("%s: bad last_rcvd contents!\n", tgt_name(tgt));
1843 if (!tgt->lut_obd->obd_replayable)
1844 CWARN("%s: recovery support OFF\n", tgt_name(tgt));
1846 rc = tgt_clients_data_init(env, tgt, last_rcvd_size);
1848 GOTO(err_client, rc);
1850 spin_lock(&tgt->lut_translock);
1851 /* obd_last_committed is used for compatibility
1852 * with other lustre recovery code */
1853 tgt->lut_obd->obd_last_committed = tgt->lut_last_transno;
1854 spin_unlock(&tgt->lut_translock);
1856 tgt->lut_obd->u.obt.obt_mount_count = lsd->lsd_mount_count;
1857 tgt->lut_obd->u.obt.obt_instance = (__u32)lsd->lsd_mount_count;
1859 /* save it, so mount count and last_transno is current */
1860 rc = tgt_server_data_update(env, tgt, 0);
1862 GOTO(err_client, rc);
1867 class_disconnect_exports(tgt->lut_obd);
1871 /* add credits for last_rcvd update */
1872 int tgt_txn_start_cb(const struct lu_env *env, struct thandle *th,
1875 struct lu_target *tgt = cookie;
1876 struct tgt_session_info *tsi;
1877 struct tgt_thread_info *tti = tgt_th_info(env);
1878 struct dt_object *dto;
1881 /* For readonly case, the caller should have got failure
1882 * when start the transaction. If the logic comes here,
1883 * there must be something wrong. */
1884 if (unlikely(tgt->lut_bottom->dd_rdonly)) {
1889 /* if there is no session, then this transaction is not result of
1890 * request processing but some local operation */
1891 if (env->le_ses == NULL)
1894 LASSERT(tgt->lut_last_rcvd);
1895 tsi = tgt_ses_info(env);
1896 /* OFD may start transaction without export assigned */
1897 if (tsi->tsi_exp == NULL)
1900 if (tgt_is_multimodrpcs_client(tsi->tsi_exp)) {
1902 * Use maximum possible file offset for declaration to ensure
1903 * ZFS will reserve enough credits for a write anywhere in this
1904 * file, since we don't know where in the file the write will be
1905 * because a replay slot has not been assigned. This should be
1906 * replaced by dmu_tx_hold_append() when available.
1908 tti->tti_buf.lb_buf = NULL;
1909 tti->tti_buf.lb_len = sizeof(struct lsd_reply_data);
1910 dto = dt_object_locate(tgt->lut_reply_data, th->th_dev);
1911 rc = dt_declare_record_write(env, dto, &tti->tti_buf, -1, th);
1915 dto = dt_object_locate(tgt->lut_last_rcvd, th->th_dev);
1917 tti->tti_off = tsi->tsi_exp->exp_target_data.ted_lr_off;
1918 rc = dt_declare_record_write(env, dto, &tti->tti_buf,
1924 if (tsi->tsi_vbr_obj != NULL &&
1925 !lu_object_remote(&tsi->tsi_vbr_obj->do_lu)) {
1926 dto = dt_object_locate(tsi->tsi_vbr_obj, th->th_dev);
1927 rc = dt_declare_version_set(env, dto, th);
1933 /* Update last_rcvd records with latests transaction data */
1934 int tgt_txn_stop_cb(const struct lu_env *env, struct thandle *th,
1937 struct lu_target *tgt = cookie;
1938 struct tgt_session_info *tsi;
1939 struct tgt_thread_info *tti = tgt_th_info(env);
1940 struct dt_object *obj = NULL;
1944 if (env->le_ses == NULL)
1947 tsi = tgt_ses_info(env);
1948 /* OFD may start transaction without export assigned */
1949 if (tsi->tsi_exp == NULL)
1952 echo_client = (tgt_ses_req(tsi) == NULL && tsi->tsi_xid == 0);
1954 if (tti->tti_has_trans && !echo_client) {
1955 if (tti->tti_mult_trans == 0) {
1956 CDEBUG(D_HA, "More than one transaction %llu\n",
1960 /* we need another transno to be assigned */
1961 tti->tti_transno = 0;
1962 } else if (th->th_result == 0) {
1963 tti->tti_has_trans = 1;
1966 if (tsi->tsi_vbr_obj != NULL &&
1967 !lu_object_remote(&tsi->tsi_vbr_obj->do_lu)) {
1968 obj = tsi->tsi_vbr_obj;
1971 if (unlikely(echo_client)) /* echo client special case */
1972 rc = tgt_last_rcvd_update_echo(env, tgt, obj, th,
1975 rc = tgt_last_rcvd_update(env, tgt, obj, tsi->tsi_opdata, th,
1980 int tgt_reply_data_init(const struct lu_env *env, struct lu_target *tgt)
1982 struct tgt_thread_info *tti = tgt_th_info(env);
1983 struct lsd_reply_data *lrd = &tti->tti_lrd;
1984 unsigned long reply_data_size;
1986 struct lsd_reply_header *lrh = NULL;
1987 struct tg_reply_data *trd = NULL;
1990 struct cfs_hash *hash = NULL;
1991 struct obd_export *exp;
1992 struct tg_export_data *ted;
1993 int reply_data_recovered = 0;
1995 rc = dt_attr_get(env, tgt->lut_reply_data, &tti->tti_attr);
1998 reply_data_size = (unsigned long)tti->tti_attr.la_size;
2002 GOTO(out, rc = -ENOMEM);
2004 if (reply_data_size == 0) {
2005 CDEBUG(D_INFO, "%s: new reply_data file, initializing\n",
2007 lrh->lrh_magic = LRH_MAGIC;
2008 lrh->lrh_header_size = sizeof(struct lsd_reply_header);
2009 lrh->lrh_reply_size = sizeof(struct lsd_reply_data);
2010 rc = tgt_reply_header_write(env, tgt, lrh);
2012 CERROR("%s: error writing %s: rc = %d\n",
2013 tgt_name(tgt), REPLY_DATA, rc);
2017 rc = tgt_reply_header_read(env, tgt, lrh);
2019 CERROR("%s: error reading %s: rc = %d\n",
2020 tgt_name(tgt), REPLY_DATA, rc);
2023 if (lrh->lrh_magic != LRH_MAGIC ||
2024 lrh->lrh_header_size != sizeof(struct lsd_reply_header) ||
2025 lrh->lrh_reply_size != sizeof(struct lsd_reply_data)) {
2026 CERROR("%s: invalid header in %s\n",
2027 tgt_name(tgt), REPLY_DATA);
2028 GOTO(out, rc = -EINVAL);
2031 hash = cfs_hash_getref(tgt->lut_obd->obd_gen_hash);
2033 GOTO(out, rc = -ENODEV);
2037 GOTO(out, rc = -ENOMEM);
2039 /* Load reply_data from disk */
2040 for (idx = 0, off = sizeof(struct lsd_reply_header);
2041 off < reply_data_size;
2042 idx++, off += sizeof(struct lsd_reply_data)) {
2043 rc = tgt_reply_data_read(env, tgt, lrd, off);
2045 CERROR("%s: error reading %s: rc = %d\n",
2046 tgt_name(tgt), REPLY_DATA, rc);
2050 exp = cfs_hash_lookup(hash, &lrd->lrd_client_gen);
2052 /* old reply data from a disconnected client */
2055 ted = &exp->exp_target_data;
2056 mutex_lock(&ted->ted_lcd_lock);
2058 /* create in-memory reply_data and link it to
2059 * target export's reply list */
2060 rc = tgt_set_reply_slot(tgt, idx);
2062 mutex_unlock(&ted->ted_lcd_lock);
2065 trd->trd_reply = *lrd;
2066 trd->trd_pre_versions[0] = 0;
2067 trd->trd_pre_versions[1] = 0;
2068 trd->trd_pre_versions[2] = 0;
2069 trd->trd_pre_versions[3] = 0;
2070 trd->trd_index = idx;
2072 list_add(&trd->trd_list, &ted->ted_reply_list);
2073 ted->ted_reply_cnt++;
2074 if (ted->ted_reply_cnt > ted->ted_reply_max)
2075 ted->ted_reply_max = ted->ted_reply_cnt;
2077 CDEBUG(D_HA, "%s: restore reply %p: xid %llu, "
2078 "transno %llu, client gen %u, slot idx %d\n",
2079 tgt_name(tgt), trd, lrd->lrd_xid,
2080 lrd->lrd_transno, lrd->lrd_client_gen,
2083 /* update export last committed transation */
2084 exp->exp_last_committed = max(exp->exp_last_committed,
2086 /* Update lcd_last_transno as well for check in
2087 * tgt_release_reply_data() or the latest client
2088 * transno can be lost.
2090 ted->ted_lcd->lcd_last_transno =
2091 max(ted->ted_lcd->lcd_last_transno,
2092 exp->exp_last_committed);
2094 mutex_unlock(&ted->ted_lcd_lock);
2095 class_export_put(exp);
2097 /* update target last committed transaction */
2098 spin_lock(&tgt->lut_translock);
2099 tgt->lut_last_transno = max(tgt->lut_last_transno,
2101 spin_unlock(&tgt->lut_translock);
2103 reply_data_recovered++;
2107 GOTO(out, rc = -ENOMEM);
2109 CDEBUG(D_INFO, "%s: %d reply data have been recovered\n",
2110 tgt_name(tgt), reply_data_recovered);
2113 spin_lock(&tgt->lut_translock);
2114 /* obd_last_committed is used for compatibility
2115 * with other lustre recovery code */
2116 tgt->lut_obd->obd_last_committed = tgt->lut_last_transno;
2117 spin_unlock(&tgt->lut_translock);
2123 cfs_hash_putref(hash);
2131 static int tgt_check_lookup_req(struct ptlrpc_request *req, int lookup,
2132 struct tg_reply_data *trd)
2134 struct tg_export_data *ted = &req->rq_export->exp_target_data;
2135 struct lu_target *lut = class_exp2tgt(req->rq_export);
2136 __u16 tag = lustre_msg_get_tag(req->rq_reqmsg);
2138 struct tg_reply_data *reply;
2139 bool check_increasing;
2144 check_increasing = tgt_is_increasing_xid_client(req->rq_export) &&
2145 !(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY);
2146 if (!lookup && !check_increasing)
2149 list_for_each_entry(reply, &ted->ted_reply_list, trd_list) {
2150 if (lookup && reply->trd_reply.lrd_xid == req->rq_xid) {
2155 } else if (check_increasing && reply->trd_tag == tag &&
2156 reply->trd_reply.lrd_xid > req->rq_xid) {
2158 CERROR("%s: busy tag=%u req_xid=%llu, trd=%p: xid=%llu transno=%llu client_gen=%u slot_idx=%d: rc = %d\n",
2159 tgt_name(lut), tag, req->rq_xid, trd,
2160 reply->trd_reply.lrd_xid,
2161 reply->trd_reply.lrd_transno,
2162 reply->trd_reply.lrd_client_gen,
2163 reply->trd_index, rc);
2171 /* Look for a reply data matching specified request @req
2172 * A copy is returned in @trd if the pointer is not NULL
2174 int tgt_lookup_reply(struct ptlrpc_request *req, struct tg_reply_data *trd)
2176 struct tg_export_data *ted = &req->rq_export->exp_target_data;
2178 bool not_replay = !(lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY);
2180 mutex_lock(&ted->ted_lcd_lock);
2181 if (not_replay && req->rq_xid <= req->rq_export->exp_last_xid) {
2182 /* A check for the last_xid is needed here in case there is
2183 * no reply data is left in the list. It may happen if another
2184 * RPC on another slot increased the last_xid between our
2185 * process_req_last_xid & tgt_lookup_reply calls */
2188 found = tgt_check_lookup_req(req, 1, trd);
2190 mutex_unlock(&ted->ted_lcd_lock);
2192 CDEBUG(D_TRACE, "%s: lookup reply xid %llu, found %d last_xid %llu\n",
2193 tgt_name(class_exp2tgt(req->rq_export)), req->rq_xid, found,
2194 req->rq_export->exp_last_xid);
2198 EXPORT_SYMBOL(tgt_lookup_reply);
2200 int tgt_handle_received_xid(struct obd_export *exp, __u64 rcvd_xid)
2202 struct tg_export_data *ted = &exp->exp_target_data;
2203 struct lu_target *lut = class_exp2tgt(exp);
2204 struct tg_reply_data *trd, *tmp;
2207 list_for_each_entry_safe(trd, tmp, &ted->ted_reply_list, trd_list) {
2208 if (trd->trd_reply.lrd_xid > rcvd_xid)
2210 ted->ted_release_xid++;
2211 tgt_release_reply_data(lut, ted, trd);
2217 int tgt_handle_tag(struct ptlrpc_request *req)
2219 return tgt_check_lookup_req(req, 0, NULL);