4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #define DEBUG_SUBSYSTEM S_LMV
39 #include <linux/slab.h>
40 #include <linux/module.h>
41 #include <linux/init.h>
42 #include <linux/slab.h>
43 #include <linux/pagemap.h>
45 #include <asm/div64.h>
46 #include <linux/seq_file.h>
47 #include <linux/namei.h>
49 #include <liblustre.h>
52 #include <lustre/lustre_idl.h>
53 #include <obd_support.h>
54 #include <lustre_lib.h>
55 #include <lustre_net.h>
56 #include <obd_class.h>
57 #include <lprocfs_status.h>
58 #include <lustre_lite.h>
59 #include <lustre_fid.h>
60 #include "lmv_internal.h"
63 cfs_mem_cache_t *lmv_object_cache;
64 cfs_atomic_t lmv_object_count = CFS_ATOMIC_INIT(0);
66 static void lmv_activate_target(struct lmv_obd *lmv,
67 struct lmv_tgt_desc *tgt,
70 if (tgt->ltd_active == activate)
73 tgt->ltd_active = activate;
74 lmv->desc.ld_active_tgt_count += (activate ? 1 : -1);
80 * -EINVAL : UUID can't be found in the LMV's target list
81 * -ENOTCONN: The UUID is found, but the target connection is bad (!)
82 * -EBADF : The UUID is found, but the OBD of the wrong type (!)
84 static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
87 struct lmv_tgt_desc *tgt;
88 struct obd_device *obd;
93 CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n",
94 lmv, uuid->uuid, activate);
96 spin_lock(&lmv->lmv_lock);
97 for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
98 if (tgt->ltd_exp == NULL)
101 CDEBUG(D_INFO, "Target idx %d is %s conn "LPX64"\n",
102 i, tgt->ltd_uuid.uuid, tgt->ltd_exp->exp_handle.h_cookie);
104 if (obd_uuid_equals(uuid, &tgt->ltd_uuid))
108 if (i == lmv->desc.ld_tgt_count)
109 GOTO(out_lmv_lock, rc = -EINVAL);
111 obd = class_exp2obd(tgt->ltd_exp);
113 GOTO(out_lmv_lock, rc = -ENOTCONN);
115 CDEBUG(D_INFO, "Found OBD %s=%s device %d (%p) type %s at LMV idx %d\n",
116 obd->obd_name, obd->obd_uuid.uuid, obd->obd_minor, obd,
117 obd->obd_type->typ_name, i);
118 LASSERT(strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0);
120 if (tgt->ltd_active == activate) {
121 CDEBUG(D_INFO, "OBD %p already %sactive!\n", obd,
122 activate ? "" : "in");
123 GOTO(out_lmv_lock, rc);
126 CDEBUG(D_INFO, "Marking OBD %p %sactive\n", obd,
127 activate ? "" : "in");
128 lmv_activate_target(lmv, tgt, activate);
132 spin_unlock(&lmv->lmv_lock);
136 static int lmv_set_mdc_data(struct lmv_obd *lmv, struct obd_uuid *uuid,
137 struct obd_connect_data *data)
139 struct lmv_tgt_desc *tgt;
143 LASSERT(data != NULL);
145 spin_lock(&lmv->lmv_lock);
146 for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
147 if (tgt->ltd_exp == NULL)
150 if (obd_uuid_equals(uuid, &tgt->ltd_uuid)) {
151 lmv->datas[tgt->ltd_idx] = *data;
155 spin_unlock(&lmv->lmv_lock);
159 struct obd_uuid *lmv_get_uuid(struct obd_export *exp) {
160 struct obd_device *obd = exp->exp_obd;
161 struct lmv_obd *lmv = &obd->u.lmv;
162 return obd_get_uuid(lmv->tgts[0].ltd_exp);
165 static int lmv_notify(struct obd_device *obd, struct obd_device *watched,
166 enum obd_notify_event ev, void *data)
168 struct obd_connect_data *conn_data;
169 struct lmv_obd *lmv = &obd->u.lmv;
170 struct obd_uuid *uuid;
174 if (strcmp(watched->obd_type->typ_name, LUSTRE_MDC_NAME)) {
175 CERROR("unexpected notification of %s %s!\n",
176 watched->obd_type->typ_name,
181 uuid = &watched->u.cli.cl_target_uuid;
182 if (ev == OBD_NOTIFY_ACTIVE || ev == OBD_NOTIFY_INACTIVE) {
184 * Set MDC as active before notifying the observer, so the
185 * observer can use the MDC normally.
187 rc = lmv_set_mdc_active(lmv, uuid,
188 ev == OBD_NOTIFY_ACTIVE);
190 CERROR("%sactivation of %s failed: %d\n",
191 ev == OBD_NOTIFY_ACTIVE ? "" : "de",
195 } else if (ev == OBD_NOTIFY_OCD) {
196 conn_data = &watched->u.cli.cl_import->imp_connect_data;
199 * Set connect data to desired target, update exp_connect_flags.
201 rc = lmv_set_mdc_data(lmv, uuid, conn_data);
203 CERROR("can't set connect data to target %s, rc %d\n",
209 * XXX: Make sure that ocd_connect_flags from all targets are
210 * the same. Otherwise one of MDTs runs wrong version or
211 * something like this. --umka
213 obd->obd_self_export->exp_connect_flags =
214 conn_data->ocd_connect_flags;
217 else if (ev == OBD_NOTIFY_DISCON) {
219 * For disconnect event, flush fld cache for failout MDS case.
221 fld_client_flush(&lmv->lmv_fld);
225 * Pass the notification up the chain.
227 if (obd->obd_observer)
228 rc = obd_notify(obd->obd_observer, watched, ev, data);
234 * This is fake connect function. Its purpose is to initialize lmv and say
235 * caller that everything is okay. Real connection will be performed later.
237 static int lmv_connect(const struct lu_env *env,
238 struct obd_export **exp, struct obd_device *obd,
239 struct obd_uuid *cluuid, struct obd_connect_data *data,
243 struct proc_dir_entry *lmv_proc_dir;
245 struct lmv_obd *lmv = &obd->u.lmv;
246 struct lustre_handle conn = { 0 };
251 * We don't want to actually do the underlying connections more than
252 * once, so keep track.
255 if (lmv->refcount > 1) {
260 rc = class_connect(&conn, obd, cluuid);
262 CERROR("class_connection() returned %d\n", rc);
266 *exp = class_conn2export(&conn);
267 class_export_get(*exp);
271 lmv->cluuid = *cluuid;
274 lmv->conn_data = *data;
277 lmv_proc_dir = lprocfs_register("target_obds", obd->obd_proc_entry,
279 if (IS_ERR(lmv_proc_dir)) {
280 CERROR("could not register /proc/fs/lustre/%s/%s/target_obds.",
281 obd->obd_type->typ_name, obd->obd_name);
287 * All real clients should perform actual connection right away, because
288 * it is possible, that LMV will not have opportunity to connect targets
289 * and MDC stuff will be called directly, for instance while reading
290 * ../mdc/../kbytesfree procfs file, etc.
292 if (data->ocd_connect_flags & OBD_CONNECT_REAL)
293 rc = lmv_check_connect(obd);
298 lprocfs_remove(&lmv_proc_dir);
305 static void lmv_set_timeouts(struct obd_device *obd)
307 struct lmv_tgt_desc *tgts;
312 if (lmv->server_timeout == 0)
315 if (lmv->connected == 0)
318 for (i = 0, tgts = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgts++) {
319 if (tgts->ltd_exp == NULL)
322 obd_set_info_async(NULL, tgts->ltd_exp, sizeof(KEY_INTERMDS),
323 KEY_INTERMDS, 0, NULL, NULL);
327 static int lmv_init_ea_size(struct obd_export *exp, int easize,
328 int def_easize, int cookiesize)
330 struct obd_device *obd = exp->exp_obd;
331 struct lmv_obd *lmv = &obd->u.lmv;
337 if (lmv->max_easize < easize) {
338 lmv->max_easize = easize;
341 if (lmv->max_def_easize < def_easize) {
342 lmv->max_def_easize = def_easize;
345 if (lmv->max_cookiesize < cookiesize) {
346 lmv->max_cookiesize = cookiesize;
352 if (lmv->connected == 0)
355 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
356 if (lmv->tgts[i].ltd_exp == NULL) {
357 CWARN("%s: NULL export for %d\n", obd->obd_name, i);
361 rc = md_init_ea_size(lmv->tgts[i].ltd_exp, easize, def_easize,
364 CERROR("obd_init_ea_size() failed on MDT target %d, "
365 "error %d.\n", i, rc);
372 #define MAX_STRING_SIZE 128
374 int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
377 struct proc_dir_entry *lmv_proc_dir;
379 struct lmv_obd *lmv = &obd->u.lmv;
380 struct obd_uuid *cluuid = &lmv->cluuid;
381 struct obd_connect_data *mdc_data = NULL;
382 struct obd_uuid lmv_mdc_uuid = { "LMV_MDC_UUID" };
383 struct obd_device *mdc_obd;
384 struct obd_export *mdc_exp;
385 struct lu_fld_target target;
389 mdc_obd = class_find_client_obd(&tgt->ltd_uuid, LUSTRE_MDC_NAME,
392 CERROR("target %s not attached\n", tgt->ltd_uuid.uuid);
396 CDEBUG(D_CONFIG, "connect to %s(%s) - %s, %s FOR %s\n",
397 mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
398 tgt->ltd_uuid.uuid, obd->obd_uuid.uuid,
401 if (!mdc_obd->obd_set_up) {
402 CERROR("target %s is not set up\n", tgt->ltd_uuid.uuid);
406 rc = obd_connect(NULL, &mdc_exp, mdc_obd, &lmv_mdc_uuid,
407 &lmv->conn_data, NULL);
409 CERROR("target %s connect error %d\n", tgt->ltd_uuid.uuid, rc);
414 * Init fid sequence client for this mdc and add new fld target.
416 rc = obd_fid_init(mdc_exp);
420 target.ft_srv = NULL;
421 target.ft_exp = mdc_exp;
422 target.ft_idx = tgt->ltd_idx;
424 fld_client_add_target(&lmv->lmv_fld, &target);
426 mdc_data = &class_exp2cliimp(mdc_exp)->imp_connect_data;
428 rc = obd_register_observer(mdc_obd, obd);
430 obd_disconnect(mdc_exp);
431 CERROR("target %s register_observer error %d\n",
432 tgt->ltd_uuid.uuid, rc);
436 if (obd->obd_observer) {
438 * Tell the observer about the new target.
440 rc = obd_notify(obd->obd_observer, mdc_exp->exp_obd,
441 OBD_NOTIFY_ACTIVE, (void *)(tgt - lmv->tgts));
443 obd_disconnect(mdc_exp);
449 tgt->ltd_exp = mdc_exp;
450 lmv->desc.ld_active_tgt_count++;
453 * Copy connect data, it may be used later.
455 lmv->datas[tgt->ltd_idx] = *mdc_data;
457 md_init_ea_size(tgt->ltd_exp, lmv->max_easize,
458 lmv->max_def_easize, lmv->max_cookiesize);
460 CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n",
461 mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
462 cfs_atomic_read(&obd->obd_refcount));
465 lmv_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds");
467 struct proc_dir_entry *mdc_symlink;
469 LASSERT(mdc_obd->obd_type != NULL);
470 LASSERT(mdc_obd->obd_type->typ_name != NULL);
471 mdc_symlink = lprocfs_add_symlink(mdc_obd->obd_name,
474 mdc_obd->obd_type->typ_name,
476 if (mdc_symlink == NULL) {
477 CERROR("Could not register LMV target "
478 "/proc/fs/lustre/%s/%s/target_obds/%s.",
479 obd->obd_type->typ_name, obd->obd_name,
481 lprocfs_remove(&lmv_proc_dir);
489 int lmv_add_target(struct obd_device *obd, struct obd_uuid *tgt_uuid)
491 struct lmv_obd *lmv = &obd->u.lmv;
492 struct lmv_tgt_desc *tgt;
496 CDEBUG(D_CONFIG, "Target uuid: %s.\n", tgt_uuid->uuid);
500 if (lmv->desc.ld_active_tgt_count >= LMV_MAX_TGT_COUNT) {
501 lmv_init_unlock(lmv);
502 CERROR("Can't add %s, LMV module compiled for %d MDCs. "
503 "That many MDCs already configured.\n",
504 tgt_uuid->uuid, LMV_MAX_TGT_COUNT);
507 if (lmv->desc.ld_tgt_count == 0) {
508 struct obd_device *mdc_obd;
510 mdc_obd = class_find_client_obd(tgt_uuid, LUSTRE_MDC_NAME,
513 lmv_init_unlock(lmv);
514 CERROR("Target %s not attached\n", tgt_uuid->uuid);
518 spin_lock(&lmv->lmv_lock);
519 tgt = lmv->tgts + lmv->desc.ld_tgt_count++;
520 tgt->ltd_uuid = *tgt_uuid;
521 spin_unlock(&lmv->lmv_lock);
523 if (lmv->connected) {
524 rc = lmv_connect_mdc(obd, tgt);
526 spin_lock(&lmv->lmv_lock);
527 lmv->desc.ld_tgt_count--;
528 memset(tgt, 0, sizeof(*tgt));
529 spin_unlock(&lmv->lmv_lock);
531 int easize = sizeof(struct lmv_stripe_md) +
532 lmv->desc.ld_tgt_count *
533 sizeof(struct lu_fid);
534 lmv_init_ea_size(obd->obd_self_export, easize, 0, 0);
538 lmv_init_unlock(lmv);
542 int lmv_check_connect(struct obd_device *obd)
544 struct lmv_obd *lmv = &obd->u.lmv;
545 struct lmv_tgt_desc *tgt;
555 if (lmv->connected) {
556 lmv_init_unlock(lmv);
560 if (lmv->desc.ld_tgt_count == 0) {
561 lmv_init_unlock(lmv);
562 CERROR("%s: no targets configured.\n", obd->obd_name);
566 CDEBUG(D_CONFIG, "Time to connect %s to %s\n",
567 lmv->cluuid.uuid, obd->obd_name);
569 LASSERT(lmv->tgts != NULL);
571 for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
572 rc = lmv_connect_mdc(obd, tgt);
577 lmv_set_timeouts(obd);
578 class_export_put(lmv->exp);
580 easize = lmv_get_easize(lmv);
581 lmv_init_ea_size(obd->obd_self_export, easize, 0, 0);
582 lmv_init_unlock(lmv);
591 --lmv->desc.ld_active_tgt_count;
592 rc2 = obd_disconnect(tgt->ltd_exp);
594 CERROR("LMV target %s disconnect on "
595 "MDC idx %d: error %d\n",
596 tgt->ltd_uuid.uuid, i, rc2);
600 class_disconnect(lmv->exp);
601 lmv_init_unlock(lmv);
605 static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
608 struct proc_dir_entry *lmv_proc_dir;
610 struct lmv_obd *lmv = &obd->u.lmv;
611 struct obd_device *mdc_obd;
615 LASSERT(tgt != NULL);
616 LASSERT(obd != NULL);
618 mdc_obd = class_exp2obd(tgt->ltd_exp);
621 mdc_obd->obd_force = obd->obd_force;
622 mdc_obd->obd_fail = obd->obd_fail;
623 mdc_obd->obd_no_recov = obd->obd_no_recov;
627 lmv_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds");
629 struct proc_dir_entry *mdc_symlink;
631 mdc_symlink = lprocfs_srch(lmv_proc_dir, mdc_obd->obd_name);
633 lprocfs_remove(&mdc_symlink);
635 CERROR("/proc/fs/lustre/%s/%s/target_obds/%s missing\n",
636 obd->obd_type->typ_name, obd->obd_name,
641 rc = obd_fid_fini(tgt->ltd_exp);
643 CERROR("Can't finanize fids factory\n");
645 CDEBUG(D_INFO, "Disconnected from %s(%s) successfully\n",
646 tgt->ltd_exp->exp_obd->obd_name,
647 tgt->ltd_exp->exp_obd->obd_uuid.uuid);
649 obd_register_observer(tgt->ltd_exp->exp_obd, NULL);
650 rc = obd_disconnect(tgt->ltd_exp);
652 if (tgt->ltd_active) {
653 CERROR("Target %s disconnect error %d\n",
654 tgt->ltd_uuid.uuid, rc);
658 lmv_activate_target(lmv, tgt, 0);
663 static int lmv_disconnect(struct obd_export *exp)
665 struct obd_device *obd = class_exp2obd(exp);
667 struct proc_dir_entry *lmv_proc_dir;
669 struct lmv_obd *lmv = &obd->u.lmv;
678 * Only disconnect the underlying layers on the final disconnect.
681 if (lmv->refcount != 0)
684 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
685 if (lmv->tgts[i].ltd_exp == NULL)
687 lmv_disconnect_mdc(obd, &lmv->tgts[i]);
691 lmv_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds");
693 lprocfs_remove(&lmv_proc_dir);
695 CERROR("/proc/fs/lustre/%s/%s/target_obds missing\n",
696 obd->obd_type->typ_name, obd->obd_name);
702 * This is the case when no real connection is established by
703 * lmv_check_connect().
706 class_export_put(exp);
707 rc = class_disconnect(exp);
708 if (lmv->refcount == 0)
713 static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
714 int len, void *karg, void *uarg)
716 struct obd_device *obddev = class_exp2obd(exp);
717 struct lmv_obd *lmv = &obddev->u.lmv;
721 int count = lmv->desc.ld_tgt_count;
728 case IOC_OBD_STATFS: {
729 struct obd_ioctl_data *data = karg;
730 struct obd_device *mdc_obd;
731 struct obd_statfs stat_buf = {0};
734 memcpy(&index, data->ioc_inlbuf2, sizeof(__u32));
735 if ((index >= count))
738 if (!lmv->tgts[index].ltd_active)
741 mdc_obd = class_exp2obd(lmv->tgts[index].ltd_exp);
746 if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
747 min((int) data->ioc_plen2,
748 (int) sizeof(struct obd_uuid))))
751 rc = obd_statfs(NULL, lmv->tgts[index].ltd_exp, &stat_buf,
752 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
756 if (cfs_copy_to_user(data->ioc_pbuf1, &stat_buf,
757 min((int) data->ioc_plen1,
758 (int) sizeof(stat_buf))))
762 case OBD_IOC_QUOTACTL: {
763 struct if_quotactl *qctl = karg;
764 struct lmv_tgt_desc *tgt = NULL;
765 struct obd_quotactl *oqctl;
767 if (qctl->qc_valid == QC_MDTIDX) {
768 if (qctl->qc_idx < 0 || count <= qctl->qc_idx)
771 tgt = &lmv->tgts[qctl->qc_idx];
774 } else if (qctl->qc_valid == QC_UUID) {
775 for (i = 0; i < count; i++) {
777 if (!obd_uuid_equals(&tgt->ltd_uuid,
781 if (tgt->ltd_exp == NULL)
793 LASSERT(tgt && tgt->ltd_exp);
794 OBD_ALLOC_PTR(oqctl);
798 QCTL_COPY(oqctl, qctl);
799 rc = obd_quotactl(tgt->ltd_exp, oqctl);
801 QCTL_COPY(qctl, oqctl);
802 qctl->qc_valid = QC_MDTIDX;
803 qctl->obd_uuid = tgt->ltd_uuid;
808 case OBD_IOC_CHANGELOG_SEND:
809 case OBD_IOC_CHANGELOG_CLEAR: {
810 struct ioc_changelog *icc = karg;
812 if (icc->icc_mdtindex >= count)
815 rc = obd_iocontrol(cmd, lmv->tgts[icc->icc_mdtindex].ltd_exp,
816 sizeof(*icc), icc, NULL);
819 case LL_IOC_GET_CONNECT_FLAGS: {
820 rc = obd_iocontrol(cmd, lmv->tgts[0].ltd_exp, len, karg, uarg);
825 for (i = 0; i < count; i++) {
827 struct obd_device *mdc_obd;
829 if (lmv->tgts[i].ltd_exp == NULL)
831 /* ll_umount_begin() sets force flag but for lmv, not
832 * mdc. Let's pass it through */
833 mdc_obd = class_exp2obd(lmv->tgts[i].ltd_exp);
834 mdc_obd->obd_force = obddev->obd_force;
835 err = obd_iocontrol(cmd, lmv->tgts[i].ltd_exp, len,
837 if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
840 if (lmv->tgts[i].ltd_active) {
841 CERROR("error: iocontrol MDC %s on MDT"
842 "idx %d cmd %x: err = %d\n",
843 lmv->tgts[i].ltd_uuid.uuid,
858 static int lmv_all_chars_policy(int count, const char *name,
869 static int lmv_nid_policy(struct lmv_obd *lmv)
871 struct obd_import *imp;
875 * XXX: To get nid we assume that underlying obd device is mdc.
877 imp = class_exp2cliimp(lmv->tgts[0].ltd_exp);
878 id = imp->imp_connection->c_self ^ (imp->imp_connection->c_self >> 32);
879 return id % lmv->desc.ld_tgt_count;
882 static int lmv_choose_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
883 placement_policy_t placement)
886 case PLACEMENT_CHAR_POLICY:
887 return lmv_all_chars_policy(lmv->desc.ld_tgt_count,
889 op_data->op_namelen);
890 case PLACEMENT_NID_POLICY:
891 return lmv_nid_policy(lmv);
897 CERROR("Unsupported placement policy %x\n", placement);
902 * This is _inode_ placement policy function (not name).
904 static int lmv_placement_policy(struct obd_device *obd,
905 struct md_op_data *op_data,
908 struct lmv_obd *lmv = &obd->u.lmv;
909 struct lmv_object *obj;
913 LASSERT(mds != NULL);
915 if (lmv->desc.ld_tgt_count == 1) {
921 * Allocate new fid on target according to operation type and parent
924 obj = lmv_object_find(obd, &op_data->op_fid1);
925 if (obj != NULL || op_data->op_name == NULL ||
926 op_data->op_opc != LUSTRE_OPC_MKDIR) {
928 * Allocate fid for non-dir or for null name or for case parent
935 * If we have this flag turned on, and we see that
936 * parent dir is split, this means, that caller did not
937 * notice split yet. This is race and we would like to
938 * let caller know that.
940 if (op_data->op_bias & MDS_CHECK_SPLIT)
945 * Allocate new fid on same mds where parent fid is located and
946 * where operation will be sent. In case of split dir, ->op_fid1
947 * and ->op_mds here will contain fid and mds of slave directory
948 * object (assigned by caller).
950 *mds = op_data->op_mds;
954 * Parent directory is not split and we want to create a
955 * directory in it. Let's calculate where to place it according
956 * to operation data @op_data.
958 *mds = lmv_choose_mds(lmv, op_data, lmv->lmv_placement);
963 CERROR("Can't choose MDS, err = %d\n", rc);
965 LASSERT(*mds < lmv->desc.ld_tgt_count);
971 int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid,
974 struct lmv_tgt_desc *tgt;
978 tgt = lmv_get_target(lmv, mds);
981 * New seq alloc and FLD setup should be atomic. Otherwise we may find
982 * on server that seq in new allocated fid is not yet known.
984 mutex_lock(&tgt->ltd_fid_mutex);
986 if (!tgt->ltd_active)
987 GOTO(out, rc = -ENODEV);
990 * Asking underlaying tgt layer to allocate new fid.
992 rc = obd_fid_alloc(tgt->ltd_exp, fid, NULL);
994 LASSERT(fid_is_sane(fid));
1000 mutex_unlock(&tgt->ltd_fid_mutex);
1004 int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
1005 struct md_op_data *op_data)
1007 struct obd_device *obd = class_exp2obd(exp);
1008 struct lmv_obd *lmv = &obd->u.lmv;
1013 LASSERT(op_data != NULL);
1014 LASSERT(fid != NULL);
1016 rc = lmv_placement_policy(obd, op_data, &mds);
1018 CERROR("Can't get target for allocating fid, "
1023 rc = __lmv_fid_alloc(lmv, fid, mds);
1025 CERROR("Can't alloc new fid, rc %d\n", rc);
1032 static int lmv_fid_delete(struct obd_export *exp, const struct lu_fid *fid)
1035 LASSERT(exp != NULL && fid != NULL);
1036 if (lmv_object_delete(exp, fid)) {
1037 CDEBUG(D_INODE, "Object "DFID" is destroyed.\n",
1043 static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
1045 struct lmv_obd *lmv = &obd->u.lmv;
1046 struct lprocfs_static_vars lvars;
1047 struct lmv_desc *desc;
1052 if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
1053 CERROR("LMV setup requires a descriptor\n");
1057 desc = (struct lmv_desc *)lustre_cfg_buf(lcfg, 1);
1058 if (sizeof(*desc) > LUSTRE_CFG_BUFLEN(lcfg, 1)) {
1059 CERROR("Lmv descriptor size wrong: %d > %d\n",
1060 (int)sizeof(*desc), LUSTRE_CFG_BUFLEN(lcfg, 1));
1064 lmv->tgts_size = LMV_MAX_TGT_COUNT * sizeof(struct lmv_tgt_desc);
1066 OBD_ALLOC(lmv->tgts, lmv->tgts_size);
1067 if (lmv->tgts == NULL)
1070 for (i = 0; i < LMV_MAX_TGT_COUNT; i++) {
1071 mutex_init(&lmv->tgts[i].ltd_fid_mutex);
1072 lmv->tgts[i].ltd_idx = i;
1075 lmv->datas_size = LMV_MAX_TGT_COUNT * sizeof(struct obd_connect_data);
1077 OBD_ALLOC(lmv->datas, lmv->datas_size);
1078 if (lmv->datas == NULL)
1079 GOTO(out_free_tgts, rc = -ENOMEM);
1081 obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid);
1082 lmv->desc.ld_tgt_count = 0;
1083 lmv->desc.ld_active_tgt_count = 0;
1084 lmv->max_cookiesize = 0;
1085 lmv->max_def_easize = 0;
1086 lmv->max_easize = 0;
1087 lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
1089 spin_lock_init(&lmv->lmv_lock);
1090 mutex_init(&lmv->init_mutex);
1092 rc = lmv_object_setup(obd);
1094 CERROR("Can't setup LMV object manager, error %d.\n", rc);
1095 GOTO(out_free_datas, rc);
1098 lprocfs_lmv_init_vars(&lvars);
1099 lprocfs_obd_setup(obd, lvars.obd_vars);
1102 rc = lprocfs_seq_create(obd->obd_proc_entry, "target_obd",
1103 0444, &lmv_proc_target_fops, obd);
1105 CWARN("%s: error adding LMV target_obd file: rc = %d\n",
1109 rc = fld_client_init(&lmv->lmv_fld, obd->obd_name,
1110 LUSTRE_CLI_FLD_HASH_DHT);
1112 CERROR("Can't init FLD, err %d\n", rc);
1113 GOTO(out_free_datas, rc);
1119 OBD_FREE(lmv->datas, lmv->datas_size);
1122 OBD_FREE(lmv->tgts, lmv->tgts_size);
1127 static int lmv_cleanup(struct obd_device *obd)
1129 struct lmv_obd *lmv = &obd->u.lmv;
1132 fld_client_fini(&lmv->lmv_fld);
1133 lmv_object_cleanup(obd);
1134 OBD_FREE(lmv->datas, lmv->datas_size);
1135 OBD_FREE(lmv->tgts, lmv->tgts_size);
1140 static int lmv_process_config(struct obd_device *obd, obd_count len, void *buf)
1142 struct lustre_cfg *lcfg = buf;
1143 struct obd_uuid tgt_uuid;
1147 switch(lcfg->lcfg_command) {
1149 if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(tgt_uuid.uuid))
1150 GOTO(out, rc = -EINVAL);
1152 obd_str2uuid(&tgt_uuid, lustre_cfg_string(lcfg, 1));
1153 rc = lmv_add_target(obd, &tgt_uuid);
1156 CERROR("Unknown command: %d\n", lcfg->lcfg_command);
1157 GOTO(out, rc = -EINVAL);
1164 static int lmv_statfs(const struct lu_env *env, struct obd_export *exp,
1165 struct obd_statfs *osfs, __u64 max_age, __u32 flags)
1167 struct obd_device *obd = class_exp2obd(exp);
1168 struct lmv_obd *lmv = &obd->u.lmv;
1169 struct obd_statfs *temp;
1174 rc = lmv_check_connect(obd);
1178 OBD_ALLOC(temp, sizeof(*temp));
1182 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
1183 if (lmv->tgts[i].ltd_exp == NULL)
1186 rc = obd_statfs(env, lmv->tgts[i].ltd_exp, temp,
1189 CERROR("can't stat MDS #%d (%s), error %d\n", i,
1190 lmv->tgts[i].ltd_exp->exp_obd->obd_name,
1192 GOTO(out_free_temp, rc);
1197 osfs->os_bavail += temp->os_bavail;
1198 osfs->os_blocks += temp->os_blocks;
1199 osfs->os_ffree += temp->os_ffree;
1200 osfs->os_files += temp->os_files;
1206 OBD_FREE(temp, sizeof(*temp));
1210 static int lmv_getstatus(struct obd_export *exp,
1212 struct obd_capa **pc)
1214 struct obd_device *obd = exp->exp_obd;
1215 struct lmv_obd *lmv = &obd->u.lmv;
1219 rc = lmv_check_connect(obd);
1223 rc = md_getstatus(lmv->tgts[0].ltd_exp, fid, pc);
1227 static int lmv_getxattr(struct obd_export *exp, const struct lu_fid *fid,
1228 struct obd_capa *oc, obd_valid valid, const char *name,
1229 const char *input, int input_size, int output_size,
1230 int flags, struct ptlrpc_request **request)
1232 struct obd_device *obd = exp->exp_obd;
1233 struct lmv_obd *lmv = &obd->u.lmv;
1234 struct lmv_tgt_desc *tgt;
1238 rc = lmv_check_connect(obd);
1242 tgt = lmv_find_target(lmv, fid);
1244 RETURN(PTR_ERR(tgt));
1246 rc = md_getxattr(tgt->ltd_exp, fid, oc, valid, name, input,
1247 input_size, output_size, flags, request);
1252 static int lmv_setxattr(struct obd_export *exp, const struct lu_fid *fid,
1253 struct obd_capa *oc, obd_valid valid, const char *name,
1254 const char *input, int input_size, int output_size,
1255 int flags, __u32 suppgid,
1256 struct ptlrpc_request **request)
1258 struct obd_device *obd = exp->exp_obd;
1259 struct lmv_obd *lmv = &obd->u.lmv;
1260 struct lmv_tgt_desc *tgt;
1264 rc = lmv_check_connect(obd);
1268 tgt = lmv_find_target(lmv, fid);
1270 RETURN(PTR_ERR(tgt));
1272 rc = md_setxattr(tgt->ltd_exp, fid, oc, valid, name, input,
1273 input_size, output_size, flags, suppgid,
1279 static int lmv_getattr(struct obd_export *exp, struct md_op_data *op_data,
1280 struct ptlrpc_request **request)
1282 struct obd_device *obd = exp->exp_obd;
1283 struct lmv_obd *lmv = &obd->u.lmv;
1284 struct lmv_tgt_desc *tgt;
1285 struct lmv_object *obj;
1290 rc = lmv_check_connect(obd);
1294 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1296 RETURN(PTR_ERR(tgt));
1298 if (op_data->op_flags & MF_GET_MDT_IDX) {
1299 op_data->op_mds = tgt->ltd_idx;
1303 rc = md_getattr(tgt->ltd_exp, op_data, request);
1307 obj = lmv_object_find_lock(obd, &op_data->op_fid1);
1309 CDEBUG(D_INODE, "GETATTR for "DFID" %s\n", PFID(&op_data->op_fid1),
1310 obj ? "(split)" : "");
1313 * If object is split, then we loop over all the slaves and gather size
1314 * attribute. In ideal world we would have to gather also mds field from
1315 * all slaves, as object is spread over the cluster and this is
1316 * definitely interesting information and it is not good to loss it,
1320 struct mdt_body *body;
1322 if (*request == NULL) {
1323 lmv_object_put(obj);
1327 body = req_capsule_server_get(&(*request)->rq_pill,
1329 LASSERT(body != NULL);
1331 for (i = 0; i < obj->lo_objcount; i++) {
1332 if (lmv->tgts[i].ltd_exp == NULL) {
1333 CWARN("%s: NULL export for %d\n",
1339 * Skip master object.
1341 if (lu_fid_eq(&obj->lo_fid, &obj->lo_stripes[i].ls_fid))
1344 body->size += obj->lo_stripes[i].ls_size;
1347 lmv_object_put_unlock(obj);
1353 static int lmv_change_cbdata(struct obd_export *exp, const struct lu_fid *fid,
1354 ldlm_iterator_t it, void *data)
1356 struct obd_device *obd = exp->exp_obd;
1357 struct lmv_obd *lmv = &obd->u.lmv;
1362 rc = lmv_check_connect(obd);
1366 CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
1369 * With CMD every object can have two locks in different namespaces:
1370 * lookup lock in space of mds storing direntry and update/open lock in
1371 * space of mds storing inode.
1373 for (i = 0; i < lmv->desc.ld_tgt_count; i++)
1374 md_change_cbdata(lmv->tgts[i].ltd_exp, fid, it, data);
1379 static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid,
1380 ldlm_iterator_t it, void *data)
1382 struct obd_device *obd = exp->exp_obd;
1383 struct lmv_obd *lmv = &obd->u.lmv;
1388 rc = lmv_check_connect(obd);
1392 CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
1395 * With CMD every object can have two locks in different namespaces:
1396 * lookup lock in space of mds storing direntry and update/open lock in
1397 * space of mds storing inode.
1399 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
1400 rc = md_find_cbdata(lmv->tgts[i].ltd_exp, fid, it, data);
1409 static int lmv_close(struct obd_export *exp, struct md_op_data *op_data,
1410 struct md_open_data *mod, struct ptlrpc_request **request)
1412 struct obd_device *obd = exp->exp_obd;
1413 struct lmv_obd *lmv = &obd->u.lmv;
1414 struct lmv_tgt_desc *tgt;
1418 rc = lmv_check_connect(obd);
1422 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1424 RETURN(PTR_ERR(tgt));
1426 CDEBUG(D_INODE, "CLOSE "DFID"\n", PFID(&op_data->op_fid1));
1427 rc = md_close(tgt->ltd_exp, op_data, mod, request);
1432 * Called in the case MDS returns -ERESTART on create on open, what means that
1433 * directory is split and its LMV presentation object has to be updated.
1435 int lmv_handle_split(struct obd_export *exp, const struct lu_fid *fid)
1437 struct obd_device *obd = exp->exp_obd;
1438 struct lmv_obd *lmv = &obd->u.lmv;
1439 struct ptlrpc_request *req = NULL;
1440 struct lmv_tgt_desc *tgt;
1441 struct lmv_object *obj;
1442 struct lustre_md md;
1443 struct md_op_data *op_data;
1450 mealen = lmv_get_easize(lmv);
1452 valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA | OBD_MD_MEA;
1454 tgt = lmv_find_target(lmv, fid);
1456 RETURN(PTR_ERR(tgt));
1459 * Time to update mea of parent fid.
1462 OBD_ALLOC_PTR(op_data);
1463 if (op_data == NULL)
1466 op_data->op_fid1 = *fid;
1467 op_data->op_mode = mealen;
1468 op_data->op_valid = valid;
1470 rc = md_getattr(tgt->ltd_exp, op_data, &req);
1471 OBD_FREE_PTR(op_data);
1473 CERROR("md_getattr() failed, error %d\n", rc);
1477 rc = md_get_lustre_md(tgt->ltd_exp, req, NULL, exp, &md);
1479 CERROR("md_get_lustre_md() failed, error %d\n", rc);
1484 GOTO(cleanup, rc = -ENODATA);
1486 obj = lmv_object_create(exp, fid, md.mea);
1490 lmv_object_put(obj);
1492 obd_free_memmd(exp, (void *)&md.mea);
1496 ptlrpc_req_finished(req);
1500 int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
1501 const void *data, int datalen, int mode, __u32 uid,
1502 __u32 gid, cfs_cap_t cap_effective, __u64 rdev,
1503 struct ptlrpc_request **request)
1505 struct obd_device *obd = exp->exp_obd;
1506 struct lmv_obd *lmv = &obd->u.lmv;
1507 struct lmv_tgt_desc *tgt;
1508 struct lmv_object *obj;
1514 rc = lmv_check_connect(obd);
1518 if (!lmv->desc.ld_active_tgt_count)
1524 obj = lmv_object_find(obd, &op_data->op_fid1);
1526 sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
1527 op_data->op_name, op_data->op_namelen);
1528 op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid;
1529 op_data->op_bias &= ~MDS_CHECK_SPLIT;
1530 op_data->op_mds = obj->lo_stripes[sidx].ls_mds;
1531 tgt = lmv_get_target(lmv, op_data->op_mds);
1532 lmv_object_put(obj);
1534 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1535 op_data->op_bias |= MDS_CHECK_SPLIT;
1536 op_data->op_mds = tgt->ltd_idx;
1540 RETURN(PTR_ERR(tgt));
1542 rc = lmv_fid_alloc(exp, &op_data->op_fid2, op_data);
1543 if (rc == -ERESTART)
1548 CDEBUG(D_INODE, "CREATE '%*s' on "DFID" -> mds #%x\n",
1549 op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
1552 op_data->op_flags |= MF_MDC_CANCEL_FID1;
1553 rc = md_create(tgt->ltd_exp, op_data, data, datalen, mode, uid, gid,
1554 cap_effective, rdev, request);
1556 if (*request == NULL)
1558 CDEBUG(D_INODE, "Created - "DFID"\n", PFID(&op_data->op_fid2));
1559 } else if (rc == -ERESTART) {
1560 LASSERT(*request != NULL);
1561 DEBUG_REQ(D_WARNING|D_RPCTRACE, *request,
1562 "Got -ERESTART during create!\n");
1563 ptlrpc_req_finished(*request);
1567 * Directory got split. Time to update local object and repeat
1568 * the request with proper MDS.
1570 rc = lmv_handle_split(exp, &op_data->op_fid1);
1572 rc = lmv_allocate_slaves(obd, &op_data->op_fid1,
1573 op_data, &op_data->op_fid2);
1582 static int lmv_done_writing(struct obd_export *exp,
1583 struct md_op_data *op_data,
1584 struct md_open_data *mod)
1586 struct obd_device *obd = exp->exp_obd;
1587 struct lmv_obd *lmv = &obd->u.lmv;
1588 struct lmv_tgt_desc *tgt;
1592 rc = lmv_check_connect(obd);
1596 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1598 RETURN(PTR_ERR(tgt));
1600 rc = md_done_writing(tgt->ltd_exp, op_data, mod);
1605 lmv_enqueue_slaves(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
1606 struct lookup_intent *it, struct md_op_data *op_data,
1607 struct lustre_handle *lockh, void *lmm, int lmmsize)
1609 struct obd_device *obd = exp->exp_obd;
1610 struct lmv_obd *lmv = &obd->u.lmv;
1611 struct lmv_stripe_md *mea = op_data->op_mea1;
1612 struct md_op_data *op_data2;
1613 struct lmv_tgt_desc *tgt;
1618 OBD_ALLOC_PTR(op_data2);
1619 if (op_data2 == NULL)
1622 LASSERT(mea != NULL);
1623 for (i = 0; i < mea->mea_count; i++) {
1624 memset(op_data2, 0, sizeof(*op_data2));
1625 op_data2->op_fid1 = mea->mea_ids[i];
1626 op_data2->op_bias = 0;
1628 tgt = lmv_find_target(lmv, &op_data2->op_fid1);
1630 GOTO(cleanup, rc = PTR_ERR(tgt));
1632 if (tgt->ltd_exp == NULL)
1635 rc = md_enqueue(tgt->ltd_exp, einfo, it, op_data2,
1636 lockh + i, lmm, lmmsize, NULL, 0);
1638 CDEBUG(D_INODE, "Take lock on slave "DFID" -> %d/%d\n",
1639 PFID(&mea->mea_ids[i]), rc, it->d.lustre.it_status);
1644 if (it->d.lustre.it_data) {
1645 struct ptlrpc_request *req;
1646 req = (struct ptlrpc_request *)it->d.lustre.it_data;
1647 ptlrpc_req_finished(req);
1650 if (it->d.lustre.it_status)
1651 GOTO(cleanup, rc = it->d.lustre.it_status);
1656 OBD_FREE_PTR(op_data2);
1660 * Drop all taken locks.
1663 if (lockh[i].cookie)
1664 ldlm_lock_decref(lockh + i, einfo->ei_mode);
1665 lockh[i].cookie = 0;
1672 lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
1673 struct lookup_intent *it, struct md_op_data *op_data,
1674 struct lustre_handle *lockh, void *lmm, int lmmsize,
1675 int extra_lock_flags)
1677 struct ptlrpc_request *req = it->d.lustre.it_data;
1678 struct obd_device *obd = exp->exp_obd;
1679 struct lmv_obd *lmv = &obd->u.lmv;
1680 struct lustre_handle plock;
1681 struct lmv_tgt_desc *tgt;
1682 struct md_op_data *rdata;
1684 struct mdt_body *body;
1689 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1690 LASSERT(body != NULL);
1692 if (!(body->valid & OBD_MD_MDS))
1695 CDEBUG(D_INODE, "REMOTE_ENQUEUE '%s' on "DFID" -> "DFID"\n",
1696 LL_IT2STR(it), PFID(&op_data->op_fid1), PFID(&body->fid1));
1699 * We got LOOKUP lock, but we really need attrs.
1701 pmode = it->d.lustre.it_lock_mode;
1702 LASSERT(pmode != 0);
1703 memcpy(&plock, lockh, sizeof(plock));
1704 it->d.lustre.it_lock_mode = 0;
1705 it->d.lustre.it_data = NULL;
1708 it->d.lustre.it_disposition &= ~DISP_ENQ_COMPLETE;
1709 ptlrpc_req_finished(req);
1711 tgt = lmv_find_target(lmv, &fid1);
1713 GOTO(out, rc = PTR_ERR(tgt));
1715 OBD_ALLOC_PTR(rdata);
1717 GOTO(out, rc = -ENOMEM);
1719 rdata->op_fid1 = fid1;
1720 rdata->op_bias = MDS_CROSS_REF;
1722 rc = md_enqueue(tgt->ltd_exp, einfo, it, rdata, lockh,
1723 lmm, lmmsize, NULL, extra_lock_flags);
1724 OBD_FREE_PTR(rdata);
1727 ldlm_lock_decref(&plock, pmode);
1732 lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
1733 struct lookup_intent *it, struct md_op_data *op_data,
1734 struct lustre_handle *lockh, void *lmm, int lmmsize,
1735 struct ptlrpc_request **req, __u64 extra_lock_flags)
1737 struct obd_device *obd = exp->exp_obd;
1738 struct lmv_obd *lmv = &obd->u.lmv;
1739 struct lmv_tgt_desc *tgt;
1740 struct lmv_object *obj;
1745 rc = lmv_check_connect(obd);
1749 CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID"\n",
1750 LL_IT2STR(it), PFID(&op_data->op_fid1));
1752 if (op_data->op_mea1 && it && it->it_op == IT_UNLINK) {
1753 rc = lmv_enqueue_slaves(exp, einfo, it, op_data,
1754 lockh, lmm, lmmsize);
1758 obj = lmv_object_find(obd, &op_data->op_fid1);
1759 if (obj && op_data->op_namelen) {
1760 sidx = raw_name2idx(obj->lo_hashtype,
1762 (char *)op_data->op_name,
1763 op_data->op_namelen);
1764 op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid;
1765 tgt = lmv_get_target(lmv, obj->lo_stripes[sidx].ls_mds);
1767 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1770 lmv_object_put(obj);
1773 RETURN(PTR_ERR(tgt));
1775 CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID" -> mds #%d\n",
1776 LL_IT2STR(it), PFID(&op_data->op_fid1), tgt->ltd_idx);
1778 rc = md_enqueue(tgt->ltd_exp, einfo, it, op_data, lockh,
1779 lmm, lmmsize, req, extra_lock_flags);
1781 if (rc == 0 && it && it->it_op == IT_OPEN) {
1782 rc = lmv_enqueue_remote(exp, einfo, it, op_data, lockh,
1783 lmm, lmmsize, extra_lock_flags);
1789 lmv_getattr_name(struct obd_export *exp,struct md_op_data *op_data,
1790 struct ptlrpc_request **request)
1792 struct ptlrpc_request *req = NULL;
1793 struct obd_device *obd = exp->exp_obd;
1794 struct lmv_obd *lmv = &obd->u.lmv;
1795 struct lu_fid rid = op_data->op_fid1;
1796 struct lmv_tgt_desc *tgt;
1797 struct mdt_body *body;
1798 struct lmv_object *obj;
1799 obd_valid valid = op_data->op_valid;
1805 rc = lmv_check_connect(obd);
1812 obj = lmv_object_find(obd, &rid);
1814 sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
1815 op_data->op_name, op_data->op_namelen);
1816 rid = obj->lo_stripes[sidx].ls_fid;
1817 tgt = lmv_get_target(lmv, obj->lo_stripes[sidx].ls_mds);
1818 op_data->op_mds = obj->lo_stripes[sidx].ls_mds;
1819 valid &= ~OBD_MD_FLCKSPLIT;
1820 lmv_object_put(obj);
1822 tgt = lmv_find_target(lmv, &rid);
1823 valid |= OBD_MD_FLCKSPLIT;
1824 op_data->op_mds = tgt->ltd_idx;
1827 RETURN(PTR_ERR(tgt));
1829 CDEBUG(D_INODE, "GETATTR_NAME for %*s on "DFID" - "DFID" -> mds #%d\n",
1830 op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
1831 PFID(&rid), tgt->ltd_idx);
1833 op_data->op_valid = valid;
1834 op_data->op_fid1 = rid;
1835 rc = md_getattr_name(tgt->ltd_exp, op_data, request);
1837 body = req_capsule_server_get(&(*request)->rq_pill,
1839 LASSERT(body != NULL);
1841 if (body->valid & OBD_MD_MDS) {
1843 CDEBUG(D_INODE, "Request attrs for "DFID"\n",
1846 tgt = lmv_find_target(lmv, &rid);
1848 ptlrpc_req_finished(*request);
1849 RETURN(PTR_ERR(tgt));
1852 op_data->op_fid1 = rid;
1853 op_data->op_valid |= OBD_MD_FLCROSSREF;
1854 op_data->op_namelen = 0;
1855 op_data->op_name = NULL;
1856 rc = md_getattr_name(tgt->ltd_exp, op_data, &req);
1857 ptlrpc_req_finished(*request);
1860 } else if (rc == -ERESTART) {
1861 LASSERT(*request != NULL);
1862 DEBUG_REQ(D_WARNING|D_RPCTRACE, *request,
1863 "Got -ERESTART during getattr!\n");
1864 ptlrpc_req_finished(*request);
1868 * Directory got split. Time to update local object and repeat
1869 * the request with proper MDS.
1871 rc = lmv_handle_split(exp, &rid);
1878 #define md_op_data_fid(op_data, fl) \
1879 (fl == MF_MDC_CANCEL_FID1 ? &op_data->op_fid1 : \
1880 fl == MF_MDC_CANCEL_FID2 ? &op_data->op_fid2 : \
1881 fl == MF_MDC_CANCEL_FID3 ? &op_data->op_fid3 : \
1882 fl == MF_MDC_CANCEL_FID4 ? &op_data->op_fid4 : \
1885 static int lmv_early_cancel_slaves(struct obd_export *exp,
1886 struct md_op_data *op_data, int op_tgt,
1887 ldlm_mode_t mode, int bits, int flag)
1889 struct obd_device *obd = exp->exp_obd;
1890 struct lmv_obd *lmv = &obd->u.lmv;
1891 ldlm_policy_data_t policy = {{0}};
1892 struct lu_fid *op_fid;
1893 struct lu_fid *st_fid;
1894 struct lmv_tgt_desc *tgt;
1895 struct lmv_object *obj;
1900 op_fid = md_op_data_fid(op_data, flag);
1901 if (!fid_is_sane(op_fid))
1904 obj = lmv_object_find(obd, op_fid);
1908 policy.l_inodebits.bits = bits;
1909 for (i = 0; i < obj->lo_objcount; i++) {
1910 tgt = lmv_get_target(lmv, obj->lo_stripes[i].ls_mds);
1911 st_fid = &obj->lo_stripes[i].ls_fid;
1912 if (op_tgt != tgt->ltd_idx) {
1913 CDEBUG(D_INODE, "EARLY_CANCEL slave "DFID" -> mds #%d\n",
1914 PFID(st_fid), tgt->ltd_idx);
1915 rc = md_cancel_unused(tgt->ltd_exp, st_fid, &policy,
1916 mode, LCF_ASYNC, NULL);
1918 GOTO(out_put_obj, rc);
1921 "EARLY_CANCEL skip operation target %d on "DFID"\n",
1922 op_tgt, PFID(st_fid));
1924 * Do not cancel locks for operation target, they will
1925 * be handled later in underlaying layer when calling
1926 * function we run on behalf of.
1929 op_data->op_flags |= flag;
1934 lmv_object_put(obj);
1938 static int lmv_early_cancel(struct obd_export *exp, struct md_op_data *op_data,
1939 int op_tgt, ldlm_mode_t mode, int bits, int flag)
1941 struct lu_fid *fid = md_op_data_fid(op_data, flag);
1942 struct obd_device *obd = exp->exp_obd;
1943 struct lmv_obd *lmv = &obd->u.lmv;
1944 struct lmv_tgt_desc *tgt;
1945 ldlm_policy_data_t policy = {{0}};
1946 struct lmv_object *obj;
1950 if (!fid_is_sane(fid))
1953 obj = lmv_object_find(obd, fid);
1955 rc = lmv_early_cancel_slaves(exp, op_data, op_tgt, mode,
1957 lmv_object_put(obj);
1959 tgt = lmv_find_target(lmv, fid);
1961 RETURN(PTR_ERR(tgt));
1963 if (tgt->ltd_idx != op_tgt) {
1964 CDEBUG(D_INODE, "EARLY_CANCEL on "DFID"\n", PFID(fid));
1965 policy.l_inodebits.bits = bits;
1966 rc = md_cancel_unused(tgt->ltd_exp, fid, &policy,
1967 mode, LCF_ASYNC, NULL);
1970 "EARLY_CANCEL skip operation target %d on "DFID"\n",
1972 op_data->op_flags |= flag;
1981 * llite passes fid of an target inode in op_data->op_fid1 and id of directory in
1984 static int lmv_link(struct obd_export *exp, struct md_op_data *op_data,
1985 struct ptlrpc_request **request)
1987 struct obd_device *obd = exp->exp_obd;
1988 struct lmv_obd *lmv = &obd->u.lmv;
1989 struct lmv_tgt_desc *tgt;
1990 struct lmv_object *obj;
1997 rc = lmv_check_connect(obd);
2004 LASSERT(op_data->op_namelen != 0);
2006 CDEBUG(D_INODE, "LINK "DFID":%*s to "DFID"\n",
2007 PFID(&op_data->op_fid2), op_data->op_namelen,
2008 op_data->op_name, PFID(&op_data->op_fid1));
2010 obj = lmv_object_find(obd, &op_data->op_fid2);
2012 sidx = raw_name2idx(obj->lo_hashtype,
2015 op_data->op_namelen);
2016 op_data->op_fid2 = obj->lo_stripes[sidx].ls_fid;
2017 mds = obj->lo_stripes[sidx].ls_mds;
2018 lmv_object_put(obj);
2020 rc = lmv_fld_lookup(lmv, &op_data->op_fid2, &mds);
2025 CDEBUG(D_INODE, "Forward to mds #%x ("DFID")\n",
2026 mds, PFID(&op_data->op_fid1));
2028 op_data->op_fsuid = cfs_curproc_fsuid();
2029 op_data->op_fsgid = cfs_curproc_fsgid();
2030 op_data->op_cap = cfs_curproc_cap_pack();
2031 tgt = lmv_get_target(lmv, mds);
2034 * Cancel UPDATE lock on child (fid1).
2036 op_data->op_flags |= MF_MDC_CANCEL_FID2;
2037 rc = lmv_early_cancel(exp, op_data, tgt->ltd_idx, LCK_EX,
2038 MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID1);
2040 rc = md_link(tgt->ltd_exp, op_data, request);
2041 if (rc == -ERESTART) {
2042 LASSERT(*request != NULL);
2043 DEBUG_REQ(D_WARNING|D_RPCTRACE, *request,
2044 "Got -ERESTART during link!\n");
2045 ptlrpc_req_finished(*request);
2049 * Directory got split. Time to update local object and repeat
2050 * the request with proper MDS.
2052 rc = lmv_handle_split(exp, &op_data->op_fid2);
2060 static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
2061 const char *old, int oldlen, const char *new, int newlen,
2062 struct ptlrpc_request **request)
2064 struct obd_device *obd = exp->exp_obd;
2065 struct lmv_obd *lmv = &obd->u.lmv;
2066 struct lmv_tgt_desc *src_tgt;
2070 struct lmv_object *obj;
2075 LASSERT(oldlen != 0);
2077 CDEBUG(D_INODE, "RENAME %*s in "DFID" to %*s in "DFID"\n",
2078 oldlen, old, PFID(&op_data->op_fid1),
2079 newlen, new, PFID(&op_data->op_fid2));
2081 rc = lmv_check_connect(obd);
2088 obj = lmv_object_find(obd, &op_data->op_fid1);
2090 sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
2091 (char *)old, oldlen);
2092 op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid;
2093 mds1 = obj->lo_stripes[sidx].ls_mds;
2094 CDEBUG(D_INODE, "Parent obj "DFID"\n", PFID(&op_data->op_fid1));
2095 lmv_object_put(obj);
2097 rc = lmv_fld_lookup(lmv, &op_data->op_fid1, &mds1);
2102 obj = lmv_object_find(obd, &op_data->op_fid2);
2105 * Directory is already split, so we have to forward request to
2108 sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
2109 (char *)new, newlen);
2111 mds2 = obj->lo_stripes[sidx].ls_mds;
2112 op_data->op_fid2 = obj->lo_stripes[sidx].ls_fid;
2113 CDEBUG(D_INODE, "Parent obj "DFID"\n", PFID(&op_data->op_fid2));
2114 lmv_object_put(obj);
2116 rc = lmv_fld_lookup(lmv, &op_data->op_fid2, &mds2);
2121 op_data->op_fsuid = cfs_curproc_fsuid();
2122 op_data->op_fsgid = cfs_curproc_fsgid();
2123 op_data->op_cap = cfs_curproc_cap_pack();
2125 src_tgt = lmv_get_target(lmv, mds1);
2128 * LOOKUP lock on src child (fid3) should also be cancelled for
2129 * src_tgt in mdc_rename.
2131 op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3;
2134 * Cancel UPDATE locks on tgt parent (fid2), tgt_tgt is its
2137 rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
2138 LCK_EX, MDS_INODELOCK_UPDATE,
2139 MF_MDC_CANCEL_FID2);
2142 * Cancel LOOKUP locks on tgt child (fid4) for parent tgt_tgt.
2145 rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
2146 LCK_EX, MDS_INODELOCK_LOOKUP,
2147 MF_MDC_CANCEL_FID4);
2151 * Cancel all the locks on tgt child (fid4).
2154 rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
2155 LCK_EX, MDS_INODELOCK_FULL,
2156 MF_MDC_CANCEL_FID4);
2159 rc = md_rename(src_tgt->ltd_exp, op_data, old, oldlen,
2160 new, newlen, request);
2162 if (rc == -ERESTART) {
2163 LASSERT(*request != NULL);
2164 DEBUG_REQ(D_WARNING|D_RPCTRACE, *request,
2165 "Got -ERESTART during rename!\n");
2166 ptlrpc_req_finished(*request);
2170 * Directory got split. Time to update local object and repeat
2171 * the request with proper MDS.
2173 rc = lmv_handle_split(exp, &op_data->op_fid1);
2180 static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
2181 void *ea, int ealen, void *ea2, int ea2len,
2182 struct ptlrpc_request **request,
2183 struct md_open_data **mod)
2185 struct obd_device *obd = exp->exp_obd;
2186 struct lmv_obd *lmv = &obd->u.lmv;
2187 struct ptlrpc_request *req;
2188 struct lmv_tgt_desc *tgt;
2189 struct lmv_object *obj;
2194 rc = lmv_check_connect(obd);
2198 obj = lmv_object_find(obd, &op_data->op_fid1);
2200 CDEBUG(D_INODE, "SETATTR for "DFID", valid 0x%x%s\n",
2201 PFID(&op_data->op_fid1), op_data->op_attr.ia_valid,
2202 obj ? ", split" : "");
2204 op_data->op_flags |= MF_MDC_CANCEL_FID1;
2206 for (i = 0; i < obj->lo_objcount; i++) {
2207 op_data->op_fid1 = obj->lo_stripes[i].ls_fid;
2209 tgt = lmv_get_target(lmv, obj->lo_stripes[i].ls_mds);
2215 rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen,
2216 ea2, ea2len, &req, mod);
2218 if (lu_fid_eq(&obj->lo_fid, &obj->lo_stripes[i].ls_fid)) {
2220 * This is master object and this request should
2221 * be returned back to llite.
2225 ptlrpc_req_finished(req);
2231 lmv_object_put(obj);
2233 tgt = lmv_find_target(lmv, &op_data->op_fid1);
2235 RETURN(PTR_ERR(tgt));
2237 rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen, ea2,
2238 ea2len, request, mod);
2243 static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
2244 struct obd_capa *oc, struct ptlrpc_request **request)
2246 struct obd_device *obd = exp->exp_obd;
2247 struct lmv_obd *lmv = &obd->u.lmv;
2248 struct lmv_tgt_desc *tgt;
2252 rc = lmv_check_connect(obd);
2256 tgt = lmv_find_target(lmv, fid);
2258 RETURN(PTR_ERR(tgt));
2260 rc = md_sync(tgt->ltd_exp, fid, oc, request);
2265 * Main purpose of LMV blocking ast is to remove split directory LMV
2266 * presentation object (struct lmv_object) attached to the lock being revoked.
2268 int lmv_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2269 void *data, int flag)
2271 struct lustre_handle lockh;
2272 struct lmv_object *obj;
2277 case LDLM_CB_BLOCKING:
2278 ldlm_lock2handle(lock, &lockh);
2279 rc = ldlm_cli_cancel(&lockh);
2281 CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
2285 case LDLM_CB_CANCELING:
2287 * Time to drop cached attrs for split directory object
2289 obj = lock->l_ast_data;
2291 CDEBUG(D_INODE, "Cancel %s on "LPU64"/"LPU64
2292 ", master "DFID"\n",
2293 lock->l_resource->lr_name.name[3] == 1 ?
2294 "LOOKUP" : "UPDATE",
2295 lock->l_resource->lr_name.name[0],
2296 lock->l_resource->lr_name.name[1],
2297 PFID(&obj->lo_fid));
2298 lmv_object_put(obj);
2307 static void lmv_hash_adjust(__u64 *hash, __u64 hash_adj)
2311 val = le64_to_cpu(*hash);
2313 val += MAX_HASH_SIZE;
2314 if (val != MDS_DIR_END_OFF)
2315 *hash = cpu_to_le64(val - hash_adj);
2318 static __u32 lmv_node_rank(struct obd_export *exp, const struct lu_fid *fid)
2321 struct obd_import *imp;
2324 * XXX: to get nid we assume that underlying obd device is mdc.
2326 imp = class_exp2cliimp(exp);
2327 id = imp->imp_connection->c_self + fid_flatten(fid);
2329 CDEBUG(D_INODE, "Readpage node rank: "LPX64" "DFID" "LPX64" "LPX64"\n",
2330 imp->imp_connection->c_self, PFID(fid), id, id ^ (id >> 32));
2332 return id ^ (id >> 32);
2335 static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
2336 struct page **pages, struct ptlrpc_request **request)
2338 struct obd_device *obd = exp->exp_obd;
2339 struct lmv_obd *lmv = &obd->u.lmv;
2340 struct lmv_object *obj;
2341 struct lu_fid rid = op_data->op_fid1;
2342 __u64 offset = op_data->op_offset;
2352 /* number of pages read, in CFS_PAGE_SIZE */
2354 /* number of pages transferred in LU_PAGE_SIZE */
2356 struct lmv_stripe *los;
2357 struct lmv_tgt_desc *tgt;
2358 struct lu_dirpage *dp;
2359 struct lu_dirent *ent;
2362 rc = lmv_check_connect(obd);
2366 CDEBUG(D_INODE, "READPAGE at "LPX64" from "DFID"\n", offset, PFID(&rid));
2369 * This case handle directory lookup in clustered metadata case (i.e.
2370 * split directory is located on multiple md servers.)
2371 * each server keeps directory entries for certain range of hashes.
2372 * E.g. we have N server and suppose hash range is 0 to MAX_HASH.
2373 * first server will keep records with hashes [ 0 ... MAX_HASH / N - 1],
2374 * second one with hashes [MAX_HASH / N ... 2 * MAX_HASH / N] and
2376 * readdir can simply start reading entries from 0 - N server in
2377 * order but that will not scale well as all client will request dir in
2378 * to server in same order.
2379 * Following algorithm does optimization:
2380 * Instead of doing readdir in 1, 2, ...., N order, client with a
2381 * rank R does readdir in R, R + 1, ..., N, 1, ... R - 1 order.
2382 * (every client has rank R)
2383 * But ll_readdir() expect offset range [0 to MAX_HASH/N) but
2384 * since client ask dir from MDS{R} client has pages with offsets
2385 * [R*MAX_HASH/N ... (R + 1)*MAX_HASH/N] there for we do hash_adj
2386 * on hash values that we get.
2388 obj = lmv_object_find_lock(obd, &rid);
2390 nr = obj->lo_objcount;
2392 seg_size = MAX_HASH_SIZE;
2393 do_div(seg_size, nr);
2394 los = obj->lo_stripes;
2395 tgt = lmv_get_target(lmv, los[0].ls_mds);
2396 rank = lmv_node_rank(tgt->ltd_exp, &rid) % nr;
2398 do_div(tgt_tmp, seg_size);
2399 tgt0_idx = do_div(tgt_tmp, nr);
2400 tgt_idx = (tgt0_idx + rank) % nr;
2402 if (tgt_idx < tgt0_idx)
2406 * Last segment has unusual length due to division
2409 hash_adj = MAX_HASH_SIZE - seg_size * nr;
2413 hash_adj += rank * seg_size;
2415 CDEBUG(D_INODE, "Readpage hash adjustment: %x "LPX64" "
2416 LPX64"/%x -> "LPX64"/%x\n", rank, hash_adj,
2417 offset, tgt0_idx, offset + hash_adj, tgt_idx);
2419 offset = (offset + hash_adj) & MAX_HASH_SIZE;
2420 rid = obj->lo_stripes[tgt_idx].ls_fid;
2421 tgt = lmv_get_target(lmv, los[tgt_idx].ls_mds);
2423 CDEBUG(D_INODE, "Forward to "DFID" with offset %lu i %d\n",
2424 PFID(&rid), (unsigned long)offset, tgt_idx);
2426 tgt = lmv_find_target(lmv, &rid);
2429 GOTO(cleanup, rc = PTR_ERR(tgt));
2431 op_data->op_fid1 = rid;
2432 rc = md_readpage(tgt->ltd_exp, op_data, pages, request);
2436 nrdpgs = ((*request)->rq_bulk->bd_nob_transferred + CFS_PAGE_SIZE - 1)
2438 nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
2439 LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
2440 LASSERT(nrdpgs > 0 && nrdpgs <= op_data->op_npages);
2442 CDEBUG(D_INODE, "read %d(%d)/%d pages\n", nrdpgs, nlupgs,
2443 op_data->op_npages);
2445 for (i = 0; i < nrdpgs; i++) {
2446 #if CFS_PAGE_SIZE > LU_PAGE_SIZE
2447 struct lu_dirpage *first;
2451 struct lu_dirent *tmp = NULL;
2453 dp = cfs_kmap(pages[i]);
2455 lmv_hash_adjust(&dp->ldp_hash_start, hash_adj);
2456 lmv_hash_adjust(&dp->ldp_hash_end, hash_adj);
2457 LASSERT(le64_to_cpu(dp->ldp_hash_start) <=
2458 op_data->op_offset);
2460 if ((tgt0_idx != nr - 1) &&
2461 (le64_to_cpu(dp->ldp_hash_end) == MDS_DIR_END_OFF))
2463 dp->ldp_hash_end = cpu_to_le32(seg_size *
2466 ""DFID" reset end "LPX64" tgt %d\n",
2468 (__u64)le64_to_cpu(dp->ldp_hash_end),
2473 ent = lu_dirent_start(dp);
2474 #if CFS_PAGE_SIZE > LU_PAGE_SIZE
2476 hash_end = dp->ldp_hash_end;
2480 for (tmp = ent; ent != NULL;
2481 tmp = ent, ent = lu_dirent_next(ent)) {
2483 lmv_hash_adjust(&ent->lde_hash, hash_adj);
2486 #if CFS_PAGE_SIZE > LU_PAGE_SIZE
2487 dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
2488 if (((unsigned long)dp & ~CFS_PAGE_MASK) && nlupgs > 0) {
2489 ent = lu_dirent_start(dp);
2492 lmv_hash_adjust(&dp->ldp_hash_end, hash_adj);
2493 if ((tgt0_idx != nr - 1) &&
2494 (le64_to_cpu(dp->ldp_hash_end) ==
2496 hash_end = cpu_to_le32(seg_size *
2499 ""DFID" reset end "LPX64" tgt %d\n",
2501 (__u64)le64_to_cpu(hash_end),
2505 hash_end = dp->ldp_hash_end;
2506 flags = dp->ldp_flags;
2509 /* enlarge the end entry lde_reclen from 0 to
2510 * first entry of next lu_dirpage, in this way
2511 * several lu_dirpages can be stored into one
2512 * client page on client. */
2513 tmp = ((void *)tmp) +
2514 le16_to_cpu(tmp->lde_reclen);
2516 cpu_to_le16((char *)(dp->ldp_entries) -
2521 first->ldp_hash_end = hash_end;
2522 first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE);
2523 first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE);
2525 SET_BUT_UNUSED(tmp);
2527 cfs_kunmap(pages[i]);
2532 lmv_object_put_unlock(obj);
2536 static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data,
2537 struct ptlrpc_request **request)
2539 struct obd_device *obd = exp->exp_obd;
2540 struct lmv_obd *lmv = &obd->u.lmv;
2541 struct lmv_tgt_desc *tgt = NULL;
2542 struct lmv_object *obj;
2548 rc = lmv_check_connect(obd);
2555 LASSERT(op_data->op_namelen != 0);
2557 obj = lmv_object_find(obd, &op_data->op_fid1);
2559 sidx = raw_name2idx(obj->lo_hashtype,
2562 op_data->op_namelen);
2563 op_data->op_bias &= ~MDS_CHECK_SPLIT;
2564 op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid;
2565 tgt = lmv_get_target(lmv,
2566 obj->lo_stripes[sidx].ls_mds);
2567 lmv_object_put(obj);
2568 CDEBUG(D_INODE, "UNLINK '%*s' in "DFID" -> %u\n",
2569 op_data->op_namelen, op_data->op_name,
2570 PFID(&op_data->op_fid1), sidx);
2574 tgt = lmv_find_target(lmv, &op_data->op_fid1);
2576 RETURN(PTR_ERR(tgt));
2577 op_data->op_bias |= MDS_CHECK_SPLIT;
2580 op_data->op_fsuid = cfs_curproc_fsuid();
2581 op_data->op_fsgid = cfs_curproc_fsgid();
2582 op_data->op_cap = cfs_curproc_cap_pack();
2585 * If child's fid is given, cancel unused locks for it if it is from
2586 * another export than parent.
2588 * LOOKUP lock for child (fid3) should also be cancelled on parent
2589 * tgt_tgt in mdc_unlink().
2591 op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3;
2594 * Cancel FULL locks on child (fid3).
2596 rc = lmv_early_cancel(exp, op_data, tgt->ltd_idx, LCK_EX,
2597 MDS_INODELOCK_FULL, MF_MDC_CANCEL_FID3);
2600 rc = md_unlink(tgt->ltd_exp, op_data, request);
2602 if (rc == -ERESTART) {
2603 LASSERT(*request != NULL);
2604 DEBUG_REQ(D_WARNING|D_RPCTRACE, *request,
2605 "Got -ERESTART during unlink!\n");
2606 ptlrpc_req_finished(*request);
2610 * Directory got split. Time to update local object and repeat
2611 * the request with proper MDS.
2613 rc = lmv_handle_split(exp, &op_data->op_fid1);
2620 static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
2622 struct lmv_obd *lmv = &obd->u.lmv;
2626 case OBD_CLEANUP_EARLY:
2627 /* XXX: here should be calling obd_precleanup() down to
2630 case OBD_CLEANUP_EXPORTS:
2631 fld_client_proc_fini(&lmv->lmv_fld);
2632 lprocfs_obd_cleanup(obd);
2640 static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
2641 __u32 keylen, void *key, __u32 *vallen, void *val,
2642 struct lov_stripe_md *lsm)
2644 struct obd_device *obd;
2645 struct lmv_obd *lmv;
2649 obd = class_exp2obd(exp);
2651 CDEBUG(D_IOCTL, "Invalid client cookie "LPX64"\n",
2652 exp->exp_handle.h_cookie);
2657 if (keylen >= strlen("remote_flag") && !strcmp(key, "remote_flag")) {
2658 struct lmv_tgt_desc *tgts;
2661 rc = lmv_check_connect(obd);
2665 LASSERT(*vallen == sizeof(__u32));
2666 for (i = 0, tgts = lmv->tgts; i < lmv->desc.ld_tgt_count;
2670 * All tgts should be connected when this gets called.
2672 if (!tgts || !tgts->ltd_exp) {
2673 CERROR("target not setup?\n");
2677 if (!obd_get_info(env, tgts->ltd_exp, keylen, key,
2682 } else if (KEY_IS(KEY_MAX_EASIZE) || KEY_IS(KEY_CONN_DATA)) {
2683 rc = lmv_check_connect(obd);
2688 * Forwarding this request to first MDS, it should know LOV
2691 rc = obd_get_info(env, lmv->tgts[0].ltd_exp, keylen, key,
2693 if (!rc && KEY_IS(KEY_CONN_DATA)) {
2694 exp->exp_connect_flags =
2695 ((struct obd_connect_data *)val)->ocd_connect_flags;
2698 } else if (KEY_IS(KEY_TGT_COUNT)) {
2699 *((int *)val) = lmv->desc.ld_tgt_count;
2703 CDEBUG(D_IOCTL, "Invalid key\n");
2707 int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
2708 obd_count keylen, void *key, obd_count vallen,
2709 void *val, struct ptlrpc_request_set *set)
2711 struct lmv_tgt_desc *tgt;
2712 struct obd_device *obd;
2713 struct lmv_obd *lmv;
2717 obd = class_exp2obd(exp);
2719 CDEBUG(D_IOCTL, "Invalid client cookie "LPX64"\n",
2720 exp->exp_handle.h_cookie);
2725 if (KEY_IS(KEY_READ_ONLY) || KEY_IS(KEY_FLUSH_CTX)) {
2728 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2729 tgt = &lmv->tgts[i];
2734 err = obd_set_info_async(env, tgt->ltd_exp,
2735 keylen, key, vallen, val, set);
2746 int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
2747 struct lov_stripe_md *lsm)
2749 struct obd_device *obd = class_exp2obd(exp);
2750 struct lmv_obd *lmv = &obd->u.lmv;
2751 struct lmv_stripe_md *meap;
2752 struct lmv_stripe_md *lsmp;
2757 mea_size = lmv_get_easize(lmv);
2761 if (*lmmp && !lsm) {
2762 OBD_FREE_LARGE(*lmmp, mea_size);
2767 if (*lmmp == NULL) {
2768 OBD_ALLOC_LARGE(*lmmp, mea_size);
2776 lsmp = (struct lmv_stripe_md *)lsm;
2777 meap = (struct lmv_stripe_md *)*lmmp;
2779 if (lsmp->mea_magic != MEA_MAGIC_LAST_CHAR &&
2780 lsmp->mea_magic != MEA_MAGIC_ALL_CHARS)
2783 meap->mea_magic = cpu_to_le32(lsmp->mea_magic);
2784 meap->mea_count = cpu_to_le32(lsmp->mea_count);
2785 meap->mea_master = cpu_to_le32(lsmp->mea_master);
2787 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2788 meap->mea_ids[i] = meap->mea_ids[i];
2789 fid_cpu_to_le(&meap->mea_ids[i], &meap->mea_ids[i]);
2795 int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
2796 struct lov_mds_md *lmm, int lmm_size)
2798 struct obd_device *obd = class_exp2obd(exp);
2799 struct lmv_stripe_md **tmea = (struct lmv_stripe_md **)lsmp;
2800 struct lmv_stripe_md *mea = (struct lmv_stripe_md *)lmm;
2801 struct lmv_obd *lmv = &obd->u.lmv;
2807 mea_size = lmv_get_easize(lmv);
2811 if (*lsmp != NULL && lmm == NULL) {
2812 OBD_FREE_LARGE(*tmea, mea_size);
2817 LASSERT(mea_size == lmm_size);
2819 OBD_ALLOC_LARGE(*tmea, mea_size);
2826 if (mea->mea_magic == MEA_MAGIC_LAST_CHAR ||
2827 mea->mea_magic == MEA_MAGIC_ALL_CHARS ||
2828 mea->mea_magic == MEA_MAGIC_HASH_SEGMENT)
2830 magic = le32_to_cpu(mea->mea_magic);
2833 * Old mea is not handled here.
2835 CERROR("Old not supportable EA is found\n");
2839 (*tmea)->mea_magic = magic;
2840 (*tmea)->mea_count = le32_to_cpu(mea->mea_count);
2841 (*tmea)->mea_master = le32_to_cpu(mea->mea_master);
2843 for (i = 0; i < (*tmea)->mea_count; i++) {
2844 (*tmea)->mea_ids[i] = mea->mea_ids[i];
2845 fid_le_to_cpu(&(*tmea)->mea_ids[i], &(*tmea)->mea_ids[i]);
2850 static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
2851 ldlm_policy_data_t *policy, ldlm_mode_t mode,
2852 ldlm_cancel_flags_t flags, void *opaque)
2854 struct obd_device *obd = exp->exp_obd;
2855 struct lmv_obd *lmv = &obd->u.lmv;
2861 LASSERT(fid != NULL);
2863 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2864 if (!lmv->tgts[i].ltd_exp || !lmv->tgts[i].ltd_active)
2867 err = md_cancel_unused(lmv->tgts[i].ltd_exp, fid,
2868 policy, mode, flags, opaque);
2875 int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
2878 struct obd_device *obd = exp->exp_obd;
2879 struct lmv_obd *lmv = &obd->u.lmv;
2883 rc = md_set_lock_data(lmv->tgts[0].ltd_exp, lockh, data, bits);
2887 ldlm_mode_t lmv_lock_match(struct obd_export *exp, __u64 flags,
2888 const struct lu_fid *fid, ldlm_type_t type,
2889 ldlm_policy_data_t *policy, ldlm_mode_t mode,
2890 struct lustre_handle *lockh)
2892 struct obd_device *obd = exp->exp_obd;
2893 struct lmv_obd *lmv = &obd->u.lmv;
2898 CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid));
2901 * With CMD every object can have two locks in different namespaces:
2902 * lookup lock in space of mds storing direntry and update/open lock in
2903 * space of mds storing inode. Thus we check all targets, not only that
2904 * one fid was created in.
2906 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2907 rc = md_lock_match(lmv->tgts[i].ltd_exp, flags, fid,
2908 type, policy, mode, lockh);
2916 int lmv_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
2917 struct obd_export *dt_exp, struct obd_export *md_exp,
2918 struct lustre_md *md)
2920 struct obd_device *obd = exp->exp_obd;
2921 struct lmv_obd *lmv = &obd->u.lmv;
2924 rc = md_get_lustre_md(lmv->tgts[0].ltd_exp, req, dt_exp, md_exp, md);
2928 int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
2930 struct obd_device *obd = exp->exp_obd;
2931 struct lmv_obd *lmv = &obd->u.lmv;
2935 obd_free_memmd(exp, (void *)&md->mea);
2936 RETURN(md_free_lustre_md(lmv->tgts[0].ltd_exp, md));
2939 int lmv_set_open_replay_data(struct obd_export *exp,
2940 struct obd_client_handle *och,
2941 struct ptlrpc_request *open_req)
2943 struct obd_device *obd = exp->exp_obd;
2944 struct lmv_obd *lmv = &obd->u.lmv;
2945 struct lmv_tgt_desc *tgt;
2948 tgt = lmv_find_target(lmv, &och->och_fid);
2950 RETURN(PTR_ERR(tgt));
2952 RETURN(md_set_open_replay_data(tgt->ltd_exp, och, open_req));
2955 int lmv_clear_open_replay_data(struct obd_export *exp,
2956 struct obd_client_handle *och)
2958 struct obd_device *obd = exp->exp_obd;
2959 struct lmv_obd *lmv = &obd->u.lmv;
2960 struct lmv_tgt_desc *tgt;
2963 tgt = lmv_find_target(lmv, &och->och_fid);
2965 RETURN(PTR_ERR(tgt));
2967 RETURN(md_clear_open_replay_data(tgt->ltd_exp, och));
2970 static int lmv_get_remote_perm(struct obd_export *exp,
2971 const struct lu_fid *fid,
2972 struct obd_capa *oc, __u32 suppgid,
2973 struct ptlrpc_request **request)
2975 struct obd_device *obd = exp->exp_obd;
2976 struct lmv_obd *lmv = &obd->u.lmv;
2977 struct lmv_tgt_desc *tgt;
2981 rc = lmv_check_connect(obd);
2985 tgt = lmv_find_target(lmv, fid);
2987 RETURN(PTR_ERR(tgt));
2989 rc = md_get_remote_perm(tgt->ltd_exp, fid, oc, suppgid, request);
2993 static int lmv_renew_capa(struct obd_export *exp, struct obd_capa *oc,
2996 struct obd_device *obd = exp->exp_obd;
2997 struct lmv_obd *lmv = &obd->u.lmv;
2998 struct lmv_tgt_desc *tgt;
3002 rc = lmv_check_connect(obd);
3006 tgt = lmv_find_target(lmv, &oc->c_capa.lc_fid);
3008 RETURN(PTR_ERR(tgt));
3010 rc = md_renew_capa(tgt->ltd_exp, oc, cb);
3014 int lmv_unpack_capa(struct obd_export *exp, struct ptlrpc_request *req,
3015 const struct req_msg_field *field, struct obd_capa **oc)
3017 struct obd_device *obd = exp->exp_obd;
3018 struct lmv_obd *lmv = &obd->u.lmv;
3022 rc = md_unpack_capa(lmv->tgts[0].ltd_exp, req, field, oc);
3026 int lmv_intent_getattr_async(struct obd_export *exp,
3027 struct md_enqueue_info *minfo,
3028 struct ldlm_enqueue_info *einfo)
3030 struct md_op_data *op_data = &minfo->mi_data;
3031 struct obd_device *obd = exp->exp_obd;
3032 struct lmv_obd *lmv = &obd->u.lmv;
3033 struct lmv_object *obj;
3034 struct lmv_tgt_desc *tgt = NULL;
3039 rc = lmv_check_connect(obd);
3043 if (op_data->op_namelen) {
3044 obj = lmv_object_find(obd, &op_data->op_fid1);
3046 sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
3047 (char *)op_data->op_name,
3048 op_data->op_namelen);
3049 op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid;
3050 tgt = lmv_get_target(lmv, obj->lo_stripes[sidx].ls_mds);
3051 lmv_object_put(obj);
3056 tgt = lmv_find_target(lmv, &op_data->op_fid1);
3059 RETURN(PTR_ERR(tgt));
3061 rc = md_intent_getattr_async(tgt->ltd_exp, minfo, einfo);
3065 int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
3066 struct lu_fid *fid, __u64 *bits)
3068 struct obd_device *obd = exp->exp_obd;
3069 struct lmv_obd *lmv = &obd->u.lmv;
3070 struct lmv_tgt_desc *tgt;
3074 rc = lmv_check_connect(obd);
3078 tgt = lmv_find_target(lmv, fid);
3080 RETURN(PTR_ERR(tgt));
3082 rc = md_revalidate_lock(tgt->ltd_exp, it, fid, bits);
3087 * For lmv, only need to send request to master MDT, and the master MDT will
3088 * process with other slave MDTs. The only exception is Q_GETOQUOTA for which
3089 * we directly fetch data from the slave MDTs.
3091 int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
3092 struct obd_quotactl *oqctl)
3094 struct obd_device *obd = class_exp2obd(exp);
3095 struct lmv_obd *lmv = &obd->u.lmv;
3096 struct lmv_tgt_desc *tgt = &lmv->tgts[0];
3098 __u64 curspace, curinodes;
3101 if (!lmv->desc.ld_tgt_count || !tgt->ltd_active) {
3102 CERROR("master lmv inactive\n");
3106 if (oqctl->qc_cmd != Q_GETOQUOTA) {
3107 rc = obd_quotactl(tgt->ltd_exp, oqctl);
3111 curspace = curinodes = 0;
3112 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
3114 tgt = &lmv->tgts[i];
3116 if (tgt->ltd_exp == NULL)
3118 if (!tgt->ltd_active) {
3119 CDEBUG(D_HA, "mdt %d is inactive.\n", i);
3123 err = obd_quotactl(tgt->ltd_exp, oqctl);
3125 CERROR("getquota on mdt %d failed. %d\n", i, err);
3129 curspace += oqctl->qc_dqblk.dqb_curspace;
3130 curinodes += oqctl->qc_dqblk.dqb_curinodes;
3133 oqctl->qc_dqblk.dqb_curspace = curspace;
3134 oqctl->qc_dqblk.dqb_curinodes = curinodes;
3139 int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
3140 struct obd_quotactl *oqctl)
3142 struct obd_device *obd = class_exp2obd(exp);
3143 struct lmv_obd *lmv = &obd->u.lmv;
3144 struct lmv_tgt_desc *tgt;
3148 for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
3151 if (!tgt->ltd_active) {
3152 CERROR("lmv idx %d inactive\n", i);
3156 err = obd_quotacheck(tgt->ltd_exp, oqctl);
3164 struct obd_ops lmv_obd_ops = {
3165 .o_owner = THIS_MODULE,
3166 .o_setup = lmv_setup,
3167 .o_cleanup = lmv_cleanup,
3168 .o_precleanup = lmv_precleanup,
3169 .o_process_config = lmv_process_config,
3170 .o_connect = lmv_connect,
3171 .o_disconnect = lmv_disconnect,
3172 .o_statfs = lmv_statfs,
3173 .o_get_info = lmv_get_info,
3174 .o_set_info_async = lmv_set_info_async,
3175 .o_packmd = lmv_packmd,
3176 .o_unpackmd = lmv_unpackmd,
3177 .o_notify = lmv_notify,
3178 .o_get_uuid = lmv_get_uuid,
3179 .o_iocontrol = lmv_iocontrol,
3180 .o_fid_delete = lmv_fid_delete,
3181 .o_quotacheck = lmv_quotacheck,
3182 .o_quotactl = lmv_quotactl
3185 struct md_ops lmv_md_ops = {
3186 .m_getstatus = lmv_getstatus,
3187 .m_change_cbdata = lmv_change_cbdata,
3188 .m_find_cbdata = lmv_find_cbdata,
3189 .m_close = lmv_close,
3190 .m_create = lmv_create,
3191 .m_done_writing = lmv_done_writing,
3192 .m_enqueue = lmv_enqueue,
3193 .m_getattr = lmv_getattr,
3194 .m_getxattr = lmv_getxattr,
3195 .m_getattr_name = lmv_getattr_name,
3196 .m_intent_lock = lmv_intent_lock,
3198 .m_rename = lmv_rename,
3199 .m_setattr = lmv_setattr,
3200 .m_setxattr = lmv_setxattr,
3202 .m_readpage = lmv_readpage,
3203 .m_unlink = lmv_unlink,
3204 .m_init_ea_size = lmv_init_ea_size,
3205 .m_cancel_unused = lmv_cancel_unused,
3206 .m_set_lock_data = lmv_set_lock_data,
3207 .m_lock_match = lmv_lock_match,
3208 .m_get_lustre_md = lmv_get_lustre_md,
3209 .m_free_lustre_md = lmv_free_lustre_md,
3210 .m_set_open_replay_data = lmv_set_open_replay_data,
3211 .m_clear_open_replay_data = lmv_clear_open_replay_data,
3212 .m_renew_capa = lmv_renew_capa,
3213 .m_unpack_capa = lmv_unpack_capa,
3214 .m_get_remote_perm = lmv_get_remote_perm,
3215 .m_intent_getattr_async = lmv_intent_getattr_async,
3216 .m_revalidate_lock = lmv_revalidate_lock
3219 int __init lmv_init(void)
3221 struct lprocfs_static_vars lvars;
3224 lmv_object_cache = cfs_mem_cache_create("lmv_objects",
3225 sizeof(struct lmv_object),
3227 if (!lmv_object_cache) {
3228 CERROR("Error allocating lmv objects cache\n");
3232 lprocfs_lmv_init_vars(&lvars);
3234 rc = class_register_type(&lmv_obd_ops, &lmv_md_ops,
3235 lvars.module_vars, LUSTRE_LMV_NAME, NULL);
3237 cfs_mem_cache_destroy(lmv_object_cache);
3243 static void lmv_exit(void)
3245 class_unregister_type(LUSTRE_LMV_NAME);
3247 LASSERTF(cfs_atomic_read(&lmv_object_count) == 0,
3248 "Can't free lmv objects cache, %d object(s) busy\n",
3249 cfs_atomic_read(&lmv_object_count));
3250 cfs_mem_cache_destroy(lmv_object_cache);
3253 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
3254 MODULE_DESCRIPTION("Lustre Logical Metadata Volume OBD driver");
3255 MODULE_LICENSE("GPL");
3257 module_init(lmv_init);
3258 module_exit(lmv_exit);