1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
30 * Use is subject to license terms.
32 * Copyright (c) 2011, 2012, Whamcloud, Inc.
35 * This file is part of Lustre, http://www.lustre.org/
36 * Lustre is a trademark of Sun Microsystems, Inc.
40 # define EXPORT_SYMTAB
42 #define DEBUG_SUBSYSTEM S_LMV
44 #include <linux/slab.h>
45 #include <linux/module.h>
46 #include <linux/init.h>
47 #include <linux/slab.h>
48 #include <linux/pagemap.h>
50 #include <asm/div64.h>
51 #include <linux/seq_file.h>
52 #include <linux/namei.h>
54 #include <liblustre.h>
57 #include <lustre_log.h>
58 #include <obd_support.h>
59 #include <lustre_lib.h>
60 #include <lustre_net.h>
61 #include <obd_class.h>
62 #include <lprocfs_status.h>
63 #include <lustre_lite.h>
64 #include <lustre_fid.h>
65 #include "lmv_internal.h"
68 cfs_mem_cache_t *lmv_object_cache;
69 cfs_atomic_t lmv_object_count = CFS_ATOMIC_INIT(0);
71 static void lmv_activate_target(struct lmv_obd *lmv,
72 struct lmv_tgt_desc *tgt,
75 if (tgt->ltd_active == activate)
78 tgt->ltd_active = activate;
79 lmv->desc.ld_active_tgt_count += (activate ? 1 : -1);
85 * -EINVAL : UUID can't be found in the LMV's target list
86 * -ENOTCONN: The UUID is found, but the target connection is bad (!)
87 * -EBADF : The UUID is found, but the OBD of the wrong type (!)
89 static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
92 struct lmv_tgt_desc *tgt;
93 struct obd_device *obd;
98 CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n",
99 lmv, uuid->uuid, activate);
101 cfs_spin_lock(&lmv->lmv_lock);
102 for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
103 if (tgt->ltd_exp == NULL)
106 CDEBUG(D_INFO, "Target idx %d is %s conn "LPX64"\n",
107 i, tgt->ltd_uuid.uuid, tgt->ltd_exp->exp_handle.h_cookie);
109 if (obd_uuid_equals(uuid, &tgt->ltd_uuid))
113 if (i == lmv->desc.ld_tgt_count)
114 GOTO(out_lmv_lock, rc = -EINVAL);
116 obd = class_exp2obd(tgt->ltd_exp);
118 GOTO(out_lmv_lock, rc = -ENOTCONN);
120 CDEBUG(D_INFO, "Found OBD %s=%s device %d (%p) type %s at LMV idx %d\n",
121 obd->obd_name, obd->obd_uuid.uuid, obd->obd_minor, obd,
122 obd->obd_type->typ_name, i);
123 LASSERT(strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0);
125 if (tgt->ltd_active == activate) {
126 CDEBUG(D_INFO, "OBD %p already %sactive!\n", obd,
127 activate ? "" : "in");
128 GOTO(out_lmv_lock, rc);
131 CDEBUG(D_INFO, "Marking OBD %p %sactive\n", obd,
132 activate ? "" : "in");
133 lmv_activate_target(lmv, tgt, activate);
137 cfs_spin_unlock(&lmv->lmv_lock);
141 static int lmv_set_mdc_data(struct lmv_obd *lmv, struct obd_uuid *uuid,
142 struct obd_connect_data *data)
144 struct lmv_tgt_desc *tgt;
148 LASSERT(data != NULL);
150 cfs_spin_lock(&lmv->lmv_lock);
151 for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
152 if (tgt->ltd_exp == NULL)
155 if (obd_uuid_equals(uuid, &tgt->ltd_uuid)) {
156 lmv->datas[tgt->ltd_idx] = *data;
160 cfs_spin_unlock(&lmv->lmv_lock);
164 struct obd_uuid *lmv_get_uuid(struct obd_export *exp) {
165 struct obd_device *obd = exp->exp_obd;
166 struct lmv_obd *lmv = &obd->u.lmv;
167 return obd_get_uuid(lmv->tgts[0].ltd_exp);
170 static int lmv_notify(struct obd_device *obd, struct obd_device *watched,
171 enum obd_notify_event ev, void *data)
173 struct obd_connect_data *conn_data;
174 struct lmv_obd *lmv = &obd->u.lmv;
175 struct obd_uuid *uuid;
179 if (strcmp(watched->obd_type->typ_name, LUSTRE_MDC_NAME)) {
180 CERROR("unexpected notification of %s %s!\n",
181 watched->obd_type->typ_name,
186 uuid = &watched->u.cli.cl_target_uuid;
187 if (ev == OBD_NOTIFY_ACTIVE || ev == OBD_NOTIFY_INACTIVE) {
189 * Set MDC as active before notifying the observer, so the
190 * observer can use the MDC normally.
192 rc = lmv_set_mdc_active(lmv, uuid,
193 ev == OBD_NOTIFY_ACTIVE);
195 CERROR("%sactivation of %s failed: %d\n",
196 ev == OBD_NOTIFY_ACTIVE ? "" : "de",
200 } else if (ev == OBD_NOTIFY_OCD) {
201 conn_data = &watched->u.cli.cl_import->imp_connect_data;
204 * Set connect data to desired target, update exp_connect_flags.
206 rc = lmv_set_mdc_data(lmv, uuid, conn_data);
208 CERROR("can't set connect data to target %s, rc %d\n",
214 * XXX: Make sure that ocd_connect_flags from all targets are
215 * the same. Otherwise one of MDTs runs wrong version or
216 * something like this. --umka
218 obd->obd_self_export->exp_connect_flags =
219 conn_data->ocd_connect_flags;
222 else if (ev == OBD_NOTIFY_DISCON) {
224 * For disconnect event, flush fld cache for failout MDS case.
226 fld_client_flush(&lmv->lmv_fld);
230 * Pass the notification up the chain.
232 if (obd->obd_observer)
233 rc = obd_notify(obd->obd_observer, watched, ev, data);
239 * This is fake connect function. Its purpose is to initialize lmv and say
240 * caller that everything is okay. Real connection will be performed later.
242 static int lmv_connect(const struct lu_env *env,
243 struct obd_export **exp, struct obd_device *obd,
244 struct obd_uuid *cluuid, struct obd_connect_data *data,
248 struct proc_dir_entry *lmv_proc_dir;
250 struct lmv_obd *lmv = &obd->u.lmv;
251 struct lustre_handle conn = { 0 };
256 * We don't want to actually do the underlying connections more than
257 * once, so keep track.
260 if (lmv->refcount > 1) {
265 rc = class_connect(&conn, obd, cluuid);
267 CERROR("class_connection() returned %d\n", rc);
271 *exp = class_conn2export(&conn);
272 class_export_get(*exp);
276 lmv->cluuid = *cluuid;
279 lmv->conn_data = *data;
282 lmv_proc_dir = lprocfs_register("target_obds", obd->obd_proc_entry,
284 if (IS_ERR(lmv_proc_dir)) {
285 CERROR("could not register /proc/fs/lustre/%s/%s/target_obds.",
286 obd->obd_type->typ_name, obd->obd_name);
292 * All real clients should perform actual connection right away, because
293 * it is possible, that LMV will not have opportunity to connect targets
294 * and MDC stuff will be called directly, for instance while reading
295 * ../mdc/../kbytesfree procfs file, etc.
297 if (data->ocd_connect_flags & OBD_CONNECT_REAL)
298 rc = lmv_check_connect(obd);
303 lprocfs_remove(&lmv_proc_dir);
310 static void lmv_set_timeouts(struct obd_device *obd)
312 struct lmv_tgt_desc *tgts;
317 if (lmv->server_timeout == 0)
320 if (lmv->connected == 0)
323 for (i = 0, tgts = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgts++) {
324 if (tgts->ltd_exp == NULL)
327 obd_set_info_async(tgts->ltd_exp, sizeof(KEY_INTERMDS),
328 KEY_INTERMDS, 0, NULL, NULL);
332 static int lmv_init_ea_size(struct obd_export *exp, int easize,
333 int def_easize, int cookiesize)
335 struct obd_device *obd = exp->exp_obd;
336 struct lmv_obd *lmv = &obd->u.lmv;
342 if (lmv->max_easize < easize) {
343 lmv->max_easize = easize;
346 if (lmv->max_def_easize < def_easize) {
347 lmv->max_def_easize = def_easize;
350 if (lmv->max_cookiesize < cookiesize) {
351 lmv->max_cookiesize = cookiesize;
357 if (lmv->connected == 0)
360 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
361 if (lmv->tgts[i].ltd_exp == NULL) {
362 CWARN("%s: NULL export for %d\n", obd->obd_name, i);
366 rc = md_init_ea_size(lmv->tgts[i].ltd_exp, easize, def_easize,
369 CERROR("obd_init_ea_size() failed on MDT target %d, "
370 "error %d.\n", i, rc);
377 #define MAX_STRING_SIZE 128
379 int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
382 struct proc_dir_entry *lmv_proc_dir;
384 struct lmv_obd *lmv = &obd->u.lmv;
385 struct obd_uuid *cluuid = &lmv->cluuid;
386 struct obd_connect_data *mdc_data = NULL;
387 struct obd_uuid lmv_mdc_uuid = { "LMV_MDC_UUID" };
388 struct obd_device *mdc_obd;
389 struct obd_export *mdc_exp;
390 struct lu_fld_target target;
394 mdc_obd = class_find_client_obd(&tgt->ltd_uuid, LUSTRE_MDC_NAME,
397 CERROR("target %s not attached\n", tgt->ltd_uuid.uuid);
401 CDEBUG(D_CONFIG, "connect to %s(%s) - %s, %s FOR %s\n",
402 mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
403 tgt->ltd_uuid.uuid, obd->obd_uuid.uuid,
406 if (!mdc_obd->obd_set_up) {
407 CERROR("target %s is not set up\n", tgt->ltd_uuid.uuid);
411 rc = obd_connect(NULL, &mdc_exp, mdc_obd, &lmv_mdc_uuid,
412 &lmv->conn_data, NULL);
414 CERROR("target %s connect error %d\n", tgt->ltd_uuid.uuid, rc);
419 * Init fid sequence client for this mdc and add new fld target.
421 rc = obd_fid_init(mdc_exp);
425 target.ft_srv = NULL;
426 target.ft_exp = mdc_exp;
427 target.ft_idx = tgt->ltd_idx;
429 fld_client_add_target(&lmv->lmv_fld, &target);
431 mdc_data = &class_exp2cliimp(mdc_exp)->imp_connect_data;
433 rc = obd_register_observer(mdc_obd, obd);
435 obd_disconnect(mdc_exp);
436 CERROR("target %s register_observer error %d\n",
437 tgt->ltd_uuid.uuid, rc);
441 if (obd->obd_observer) {
443 * Tell the observer about the new target.
445 rc = obd_notify(obd->obd_observer, mdc_exp->exp_obd,
446 OBD_NOTIFY_ACTIVE, (void *)(tgt - lmv->tgts));
448 obd_disconnect(mdc_exp);
454 tgt->ltd_exp = mdc_exp;
455 lmv->desc.ld_active_tgt_count++;
458 * Copy connect data, it may be used later.
460 lmv->datas[tgt->ltd_idx] = *mdc_data;
462 md_init_ea_size(tgt->ltd_exp, lmv->max_easize,
463 lmv->max_def_easize, lmv->max_cookiesize);
465 CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n",
466 mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
467 cfs_atomic_read(&obd->obd_refcount));
470 lmv_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds");
472 struct proc_dir_entry *mdc_symlink;
474 LASSERT(mdc_obd->obd_type != NULL);
475 LASSERT(mdc_obd->obd_type->typ_name != NULL);
476 mdc_symlink = lprocfs_add_symlink(mdc_obd->obd_name,
479 mdc_obd->obd_type->typ_name,
481 if (mdc_symlink == NULL) {
482 CERROR("Could not register LMV target "
483 "/proc/fs/lustre/%s/%s/target_obds/%s.",
484 obd->obd_type->typ_name, obd->obd_name,
486 lprocfs_remove(&lmv_proc_dir);
494 int lmv_add_target(struct obd_device *obd, struct obd_uuid *tgt_uuid)
496 struct lmv_obd *lmv = &obd->u.lmv;
497 struct lmv_tgt_desc *tgt;
501 CDEBUG(D_CONFIG, "Target uuid: %s.\n", tgt_uuid->uuid);
505 if (lmv->desc.ld_active_tgt_count >= LMV_MAX_TGT_COUNT) {
506 lmv_init_unlock(lmv);
507 CERROR("Can't add %s, LMV module compiled for %d MDCs. "
508 "That many MDCs already configured.\n",
509 tgt_uuid->uuid, LMV_MAX_TGT_COUNT);
512 if (lmv->desc.ld_tgt_count == 0) {
513 struct obd_device *mdc_obd;
515 mdc_obd = class_find_client_obd(tgt_uuid, LUSTRE_MDC_NAME,
518 lmv_init_unlock(lmv);
519 CERROR("Target %s not attached\n", tgt_uuid->uuid);
523 rc = obd_llog_init(obd, &obd->obd_olg, mdc_obd, NULL);
525 lmv_init_unlock(lmv);
526 CERROR("lmv failed to setup llogging subsystems\n");
529 cfs_spin_lock(&lmv->lmv_lock);
530 tgt = lmv->tgts + lmv->desc.ld_tgt_count++;
531 tgt->ltd_uuid = *tgt_uuid;
532 cfs_spin_unlock(&lmv->lmv_lock);
534 if (lmv->connected) {
535 rc = lmv_connect_mdc(obd, tgt);
537 cfs_spin_lock(&lmv->lmv_lock);
538 lmv->desc.ld_tgt_count--;
539 memset(tgt, 0, sizeof(*tgt));
540 cfs_spin_unlock(&lmv->lmv_lock);
542 int easize = sizeof(struct lmv_stripe_md) +
543 lmv->desc.ld_tgt_count *
544 sizeof(struct lu_fid);
545 lmv_init_ea_size(obd->obd_self_export, easize, 0, 0);
549 lmv_init_unlock(lmv);
553 int lmv_check_connect(struct obd_device *obd)
555 struct lmv_obd *lmv = &obd->u.lmv;
556 struct lmv_tgt_desc *tgt;
566 if (lmv->connected) {
567 lmv_init_unlock(lmv);
571 if (lmv->desc.ld_tgt_count == 0) {
572 lmv_init_unlock(lmv);
573 CERROR("%s: no targets configured.\n", obd->obd_name);
577 CDEBUG(D_CONFIG, "Time to connect %s to %s\n",
578 lmv->cluuid.uuid, obd->obd_name);
580 LASSERT(lmv->tgts != NULL);
582 for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
583 rc = lmv_connect_mdc(obd, tgt);
588 lmv_set_timeouts(obd);
589 class_export_put(lmv->exp);
591 easize = lmv_get_easize(lmv);
592 lmv_init_ea_size(obd->obd_self_export, easize, 0, 0);
593 lmv_init_unlock(lmv);
602 --lmv->desc.ld_active_tgt_count;
603 rc2 = obd_disconnect(tgt->ltd_exp);
605 CERROR("LMV target %s disconnect on "
606 "MDC idx %d: error %d\n",
607 tgt->ltd_uuid.uuid, i, rc2);
611 class_disconnect(lmv->exp);
612 lmv_init_unlock(lmv);
616 static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
619 struct proc_dir_entry *lmv_proc_dir;
621 struct lmv_obd *lmv = &obd->u.lmv;
622 struct obd_device *mdc_obd;
626 LASSERT(tgt != NULL);
627 LASSERT(obd != NULL);
629 mdc_obd = class_exp2obd(tgt->ltd_exp);
632 mdc_obd->obd_force = obd->obd_force;
633 mdc_obd->obd_fail = obd->obd_fail;
634 mdc_obd->obd_no_recov = obd->obd_no_recov;
638 lmv_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds");
640 struct proc_dir_entry *mdc_symlink;
642 mdc_symlink = lprocfs_srch(lmv_proc_dir, mdc_obd->obd_name);
644 lprocfs_remove(&mdc_symlink);
646 CERROR("/proc/fs/lustre/%s/%s/target_obds/%s missing\n",
647 obd->obd_type->typ_name, obd->obd_name,
652 rc = obd_fid_fini(tgt->ltd_exp);
654 CERROR("Can't finanize fids factory\n");
656 CDEBUG(D_INFO, "Disconnected from %s(%s) successfully\n",
657 tgt->ltd_exp->exp_obd->obd_name,
658 tgt->ltd_exp->exp_obd->obd_uuid.uuid);
660 obd_register_observer(tgt->ltd_exp->exp_obd, NULL);
661 rc = obd_disconnect(tgt->ltd_exp);
663 if (tgt->ltd_active) {
664 CERROR("Target %s disconnect error %d\n",
665 tgt->ltd_uuid.uuid, rc);
669 lmv_activate_target(lmv, tgt, 0);
674 static int lmv_disconnect(struct obd_export *exp)
676 struct obd_device *obd = class_exp2obd(exp);
678 struct proc_dir_entry *lmv_proc_dir;
680 struct lmv_obd *lmv = &obd->u.lmv;
689 * Only disconnect the underlying layers on the final disconnect.
692 if (lmv->refcount != 0)
695 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
696 if (lmv->tgts[i].ltd_exp == NULL)
698 lmv_disconnect_mdc(obd, &lmv->tgts[i]);
702 lmv_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds");
704 lprocfs_remove(&lmv_proc_dir);
706 CERROR("/proc/fs/lustre/%s/%s/target_obds missing\n",
707 obd->obd_type->typ_name, obd->obd_name);
713 * This is the case when no real connection is established by
714 * lmv_check_connect().
717 class_export_put(exp);
718 rc = class_disconnect(exp);
719 if (lmv->refcount == 0)
724 static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
725 int len, void *karg, void *uarg)
727 struct obd_device *obddev = class_exp2obd(exp);
728 struct lmv_obd *lmv = &obddev->u.lmv;
732 int count = lmv->desc.ld_tgt_count;
739 case IOC_OBD_STATFS: {
740 struct obd_ioctl_data *data = karg;
741 struct obd_device *mdc_obd;
742 struct obd_statfs stat_buf = {0};
745 memcpy(&index, data->ioc_inlbuf2, sizeof(__u32));
746 if ((index >= count))
749 if (!lmv->tgts[index].ltd_active)
752 mdc_obd = class_exp2obd(lmv->tgts[index].ltd_exp);
757 if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
758 min((int) data->ioc_plen2,
759 (int) sizeof(struct obd_uuid))))
762 rc = obd_statfs(mdc_obd, &stat_buf,
763 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
767 if (cfs_copy_to_user(data->ioc_pbuf1, &stat_buf,
768 min((int) data->ioc_plen1,
769 (int) sizeof(stat_buf))))
773 case OBD_IOC_QUOTACTL: {
774 struct if_quotactl *qctl = karg;
775 struct lmv_tgt_desc *tgt = NULL;
776 struct obd_quotactl *oqctl;
778 if (qctl->qc_valid == QC_MDTIDX) {
779 if (qctl->qc_idx < 0 || count <= qctl->qc_idx)
782 tgt = &lmv->tgts[qctl->qc_idx];
785 } else if (qctl->qc_valid == QC_UUID) {
786 for (i = 0; i < count; i++) {
788 if (!obd_uuid_equals(&tgt->ltd_uuid,
792 if (tgt->ltd_exp == NULL)
804 LASSERT(tgt && tgt->ltd_exp);
805 OBD_ALLOC_PTR(oqctl);
809 QCTL_COPY(oqctl, qctl);
810 rc = obd_quotactl(tgt->ltd_exp, oqctl);
812 QCTL_COPY(qctl, oqctl);
813 qctl->qc_valid = QC_MDTIDX;
814 qctl->obd_uuid = tgt->ltd_uuid;
819 case OBD_IOC_CHANGELOG_SEND:
820 case OBD_IOC_CHANGELOG_CLEAR: {
821 struct ioc_changelog *icc = karg;
823 if (icc->icc_mdtindex >= count)
826 rc = obd_iocontrol(cmd, lmv->tgts[icc->icc_mdtindex].ltd_exp,
827 sizeof(*icc), icc, NULL);
830 case LL_IOC_GET_CONNECT_FLAGS: {
831 rc = obd_iocontrol(cmd, lmv->tgts[0].ltd_exp, len, karg, uarg);
836 for (i = 0; i < count; i++) {
838 struct obd_device *mdc_obd;
840 if (lmv->tgts[i].ltd_exp == NULL)
842 /* ll_umount_begin() sets force flag but for lmv, not
843 * mdc. Let's pass it through */
844 mdc_obd = class_exp2obd(lmv->tgts[i].ltd_exp);
845 mdc_obd->obd_force = obddev->obd_force;
846 err = obd_iocontrol(cmd, lmv->tgts[i].ltd_exp, len,
848 if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
851 if (lmv->tgts[i].ltd_active) {
852 CERROR("error: iocontrol MDC %s on MDT"
853 "idx %d cmd %x: err = %d\n",
854 lmv->tgts[i].ltd_uuid.uuid,
869 static int lmv_all_chars_policy(int count, const char *name,
880 static int lmv_nid_policy(struct lmv_obd *lmv)
882 struct obd_import *imp;
886 * XXX: To get nid we assume that underlying obd device is mdc.
888 imp = class_exp2cliimp(lmv->tgts[0].ltd_exp);
889 id = imp->imp_connection->c_self ^ (imp->imp_connection->c_self >> 32);
890 return id % lmv->desc.ld_tgt_count;
893 static int lmv_choose_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
894 placement_policy_t placement)
897 case PLACEMENT_CHAR_POLICY:
898 return lmv_all_chars_policy(lmv->desc.ld_tgt_count,
900 op_data->op_namelen);
901 case PLACEMENT_NID_POLICY:
902 return lmv_nid_policy(lmv);
908 CERROR("Unsupported placement policy %x\n", placement);
913 * This is _inode_ placement policy function (not name).
915 static int lmv_placement_policy(struct obd_device *obd,
916 struct md_op_data *op_data,
919 struct lmv_obd *lmv = &obd->u.lmv;
920 struct lmv_object *obj;
924 LASSERT(mds != NULL);
926 if (lmv->desc.ld_tgt_count == 1) {
932 * Allocate new fid on target according to operation type and parent
935 obj = lmv_object_find(obd, &op_data->op_fid1);
936 if (obj != NULL || op_data->op_name == NULL ||
937 op_data->op_opc != LUSTRE_OPC_MKDIR) {
939 * Allocate fid for non-dir or for null name or for case parent
946 * If we have this flag turned on, and we see that
947 * parent dir is split, this means, that caller did not
948 * notice split yet. This is race and we would like to
949 * let caller know that.
951 if (op_data->op_bias & MDS_CHECK_SPLIT)
956 * Allocate new fid on same mds where parent fid is located and
957 * where operation will be sent. In case of split dir, ->op_fid1
958 * and ->op_mds here will contain fid and mds of slave directory
959 * object (assigned by caller).
961 *mds = op_data->op_mds;
965 * Parent directory is not split and we want to create a
966 * directory in it. Let's calculate where to place it according
967 * to operation data @op_data.
969 *mds = lmv_choose_mds(lmv, op_data, lmv->lmv_placement);
974 CERROR("Can't choose MDS, err = %d\n", rc);
976 LASSERT(*mds < lmv->desc.ld_tgt_count);
982 int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid,
985 struct lmv_tgt_desc *tgt;
989 tgt = lmv_get_target(lmv, mds);
992 * New seq alloc and FLD setup should be atomic. Otherwise we may find
993 * on server that seq in new allocated fid is not yet known.
995 cfs_mutex_lock(&tgt->ltd_fid_mutex);
997 if (!tgt->ltd_active)
998 GOTO(out, rc = -ENODEV);
1001 * Asking underlaying tgt layer to allocate new fid.
1003 rc = obd_fid_alloc(tgt->ltd_exp, fid, NULL);
1005 LASSERT(fid_is_sane(fid));
1011 cfs_mutex_unlock(&tgt->ltd_fid_mutex);
1015 int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
1016 struct md_op_data *op_data)
1018 struct obd_device *obd = class_exp2obd(exp);
1019 struct lmv_obd *lmv = &obd->u.lmv;
1024 LASSERT(op_data != NULL);
1025 LASSERT(fid != NULL);
1027 rc = lmv_placement_policy(obd, op_data, &mds);
1029 CERROR("Can't get target for allocating fid, "
1034 rc = __lmv_fid_alloc(lmv, fid, mds);
1036 CERROR("Can't alloc new fid, rc %d\n", rc);
1043 static int lmv_fid_delete(struct obd_export *exp, const struct lu_fid *fid)
1046 LASSERT(exp != NULL && fid != NULL);
1047 if (lmv_object_delete(exp, fid)) {
1048 CDEBUG(D_INODE, "Object "DFID" is destroyed.\n",
1054 static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
1056 struct lmv_obd *lmv = &obd->u.lmv;
1057 struct lprocfs_static_vars lvars;
1058 struct lmv_desc *desc;
1063 if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
1064 CERROR("LMV setup requires a descriptor\n");
1068 desc = (struct lmv_desc *)lustre_cfg_buf(lcfg, 1);
1069 if (sizeof(*desc) > LUSTRE_CFG_BUFLEN(lcfg, 1)) {
1070 CERROR("Lmv descriptor size wrong: %d > %d\n",
1071 (int)sizeof(*desc), LUSTRE_CFG_BUFLEN(lcfg, 1));
1075 lmv->tgts_size = LMV_MAX_TGT_COUNT * sizeof(struct lmv_tgt_desc);
1077 OBD_ALLOC(lmv->tgts, lmv->tgts_size);
1078 if (lmv->tgts == NULL)
1081 for (i = 0; i < LMV_MAX_TGT_COUNT; i++) {
1082 cfs_mutex_init(&lmv->tgts[i].ltd_fid_mutex);
1083 lmv->tgts[i].ltd_idx = i;
1086 lmv->datas_size = LMV_MAX_TGT_COUNT * sizeof(struct obd_connect_data);
1088 OBD_ALLOC(lmv->datas, lmv->datas_size);
1089 if (lmv->datas == NULL)
1090 GOTO(out_free_tgts, rc = -ENOMEM);
1092 obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid);
1093 lmv->desc.ld_tgt_count = 0;
1094 lmv->desc.ld_active_tgt_count = 0;
1095 lmv->max_cookiesize = 0;
1096 lmv->max_def_easize = 0;
1097 lmv->max_easize = 0;
1098 lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
1100 cfs_spin_lock_init(&lmv->lmv_lock);
1101 cfs_mutex_init(&lmv->init_mutex);
1103 rc = lmv_object_setup(obd);
1105 CERROR("Can't setup LMV object manager, error %d.\n", rc);
1106 GOTO(out_free_datas, rc);
1109 lprocfs_lmv_init_vars(&lvars);
1110 lprocfs_obd_setup(obd, lvars.obd_vars);
1113 rc = lprocfs_seq_create(obd->obd_proc_entry, "target_obd",
1114 0444, &lmv_proc_target_fops, obd);
1116 CWARN("%s: error adding LMV target_obd file: rc = %d\n",
1120 rc = fld_client_init(&lmv->lmv_fld, obd->obd_name,
1121 LUSTRE_CLI_FLD_HASH_DHT);
1123 CERROR("Can't init FLD, err %d\n", rc);
1124 GOTO(out_free_datas, rc);
1130 OBD_FREE(lmv->datas, lmv->datas_size);
1133 OBD_FREE(lmv->tgts, lmv->tgts_size);
1138 static int lmv_cleanup(struct obd_device *obd)
1140 struct lmv_obd *lmv = &obd->u.lmv;
1143 fld_client_fini(&lmv->lmv_fld);
1144 lmv_object_cleanup(obd);
1145 OBD_FREE(lmv->datas, lmv->datas_size);
1146 OBD_FREE(lmv->tgts, lmv->tgts_size);
1151 static int lmv_process_config(struct obd_device *obd, obd_count len, void *buf)
1153 struct lustre_cfg *lcfg = buf;
1154 struct obd_uuid tgt_uuid;
1158 switch(lcfg->lcfg_command) {
1160 if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(tgt_uuid.uuid))
1161 GOTO(out, rc = -EINVAL);
1163 obd_str2uuid(&tgt_uuid, lustre_cfg_string(lcfg, 1));
1164 rc = lmv_add_target(obd, &tgt_uuid);
1167 CERROR("Unknown command: %d\n", lcfg->lcfg_command);
1168 GOTO(out, rc = -EINVAL);
1175 static int lmv_statfs(struct obd_device *obd, struct obd_statfs *osfs,
1176 __u64 max_age, __u32 flags)
1178 struct lmv_obd *lmv = &obd->u.lmv;
1179 struct obd_statfs *temp;
1184 rc = lmv_check_connect(obd);
1188 OBD_ALLOC(temp, sizeof(*temp));
1192 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
1193 if (lmv->tgts[i].ltd_exp == NULL)
1196 rc = obd_statfs(lmv->tgts[i].ltd_exp->exp_obd, temp,
1199 CERROR("can't stat MDS #%d (%s), error %d\n", i,
1200 lmv->tgts[i].ltd_exp->exp_obd->obd_name,
1202 GOTO(out_free_temp, rc);
1207 osfs->os_bavail += temp->os_bavail;
1208 osfs->os_blocks += temp->os_blocks;
1209 osfs->os_ffree += temp->os_ffree;
1210 osfs->os_files += temp->os_files;
1216 OBD_FREE(temp, sizeof(*temp));
1220 static int lmv_getstatus(struct obd_export *exp,
1222 struct obd_capa **pc)
1224 struct obd_device *obd = exp->exp_obd;
1225 struct lmv_obd *lmv = &obd->u.lmv;
1229 rc = lmv_check_connect(obd);
1233 rc = md_getstatus(lmv->tgts[0].ltd_exp, fid, pc);
1237 static int lmv_getxattr(struct obd_export *exp, const struct lu_fid *fid,
1238 struct obd_capa *oc, obd_valid valid, const char *name,
1239 const char *input, int input_size, int output_size,
1240 int flags, struct ptlrpc_request **request)
1242 struct obd_device *obd = exp->exp_obd;
1243 struct lmv_obd *lmv = &obd->u.lmv;
1244 struct lmv_tgt_desc *tgt;
1248 rc = lmv_check_connect(obd);
1252 tgt = lmv_find_target(lmv, fid);
1254 RETURN(PTR_ERR(tgt));
1256 rc = md_getxattr(tgt->ltd_exp, fid, oc, valid, name, input,
1257 input_size, output_size, flags, request);
1262 static int lmv_setxattr(struct obd_export *exp, const struct lu_fid *fid,
1263 struct obd_capa *oc, obd_valid valid, const char *name,
1264 const char *input, int input_size, int output_size,
1265 int flags, __u32 suppgid,
1266 struct ptlrpc_request **request)
1268 struct obd_device *obd = exp->exp_obd;
1269 struct lmv_obd *lmv = &obd->u.lmv;
1270 struct lmv_tgt_desc *tgt;
1274 rc = lmv_check_connect(obd);
1278 tgt = lmv_find_target(lmv, fid);
1280 RETURN(PTR_ERR(tgt));
1282 rc = md_setxattr(tgt->ltd_exp, fid, oc, valid, name, input,
1283 input_size, output_size, flags, suppgid,
1289 static int lmv_getattr(struct obd_export *exp, struct md_op_data *op_data,
1290 struct ptlrpc_request **request)
1292 struct obd_device *obd = exp->exp_obd;
1293 struct lmv_obd *lmv = &obd->u.lmv;
1294 struct lmv_tgt_desc *tgt;
1295 struct lmv_object *obj;
1300 rc = lmv_check_connect(obd);
1304 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1306 RETURN(PTR_ERR(tgt));
1308 if (op_data->op_valid & OBD_MD_MDTIDX) {
1309 op_data->op_mds = tgt->ltd_idx;
1313 rc = md_getattr(tgt->ltd_exp, op_data, request);
1317 obj = lmv_object_find_lock(obd, &op_data->op_fid1);
1319 CDEBUG(D_INODE, "GETATTR for "DFID" %s\n", PFID(&op_data->op_fid1),
1320 obj ? "(split)" : "");
1323 * If object is split, then we loop over all the slaves and gather size
1324 * attribute. In ideal world we would have to gather also mds field from
1325 * all slaves, as object is spread over the cluster and this is
1326 * definitely interesting information and it is not good to loss it,
1330 struct mdt_body *body;
1332 if (*request == NULL) {
1333 lmv_object_put(obj);
1337 body = req_capsule_server_get(&(*request)->rq_pill,
1339 LASSERT(body != NULL);
1341 for (i = 0; i < obj->lo_objcount; i++) {
1342 if (lmv->tgts[i].ltd_exp == NULL) {
1343 CWARN("%s: NULL export for %d\n",
1349 * Skip master object.
1351 if (lu_fid_eq(&obj->lo_fid, &obj->lo_stripes[i].ls_fid))
1354 body->size += obj->lo_stripes[i].ls_size;
1357 lmv_object_put_unlock(obj);
1363 static int lmv_change_cbdata(struct obd_export *exp, const struct lu_fid *fid,
1364 ldlm_iterator_t it, void *data)
1366 struct obd_device *obd = exp->exp_obd;
1367 struct lmv_obd *lmv = &obd->u.lmv;
1372 rc = lmv_check_connect(obd);
1376 CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
1379 * With CMD every object can have two locks in different namespaces:
1380 * lookup lock in space of mds storing direntry and update/open lock in
1381 * space of mds storing inode.
1383 for (i = 0; i < lmv->desc.ld_tgt_count; i++)
1384 md_change_cbdata(lmv->tgts[i].ltd_exp, fid, it, data);
1389 static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid,
1390 ldlm_iterator_t it, void *data)
1392 struct obd_device *obd = exp->exp_obd;
1393 struct lmv_obd *lmv = &obd->u.lmv;
1398 rc = lmv_check_connect(obd);
1402 CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
1405 * With CMD every object can have two locks in different namespaces:
1406 * lookup lock in space of mds storing direntry and update/open lock in
1407 * space of mds storing inode.
1409 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
1410 rc = md_find_cbdata(lmv->tgts[i].ltd_exp, fid, it, data);
1419 static int lmv_close(struct obd_export *exp, struct md_op_data *op_data,
1420 struct md_open_data *mod, struct ptlrpc_request **request)
1422 struct obd_device *obd = exp->exp_obd;
1423 struct lmv_obd *lmv = &obd->u.lmv;
1424 struct lmv_tgt_desc *tgt;
1428 rc = lmv_check_connect(obd);
1432 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1434 RETURN(PTR_ERR(tgt));
1436 CDEBUG(D_INODE, "CLOSE "DFID"\n", PFID(&op_data->op_fid1));
1437 rc = md_close(tgt->ltd_exp, op_data, mod, request);
1442 * Called in the case MDS returns -ERESTART on create on open, what means that
1443 * directory is split and its LMV presentation object has to be updated.
1445 int lmv_handle_split(struct obd_export *exp, const struct lu_fid *fid)
1447 struct obd_device *obd = exp->exp_obd;
1448 struct lmv_obd *lmv = &obd->u.lmv;
1449 struct ptlrpc_request *req = NULL;
1450 struct lmv_tgt_desc *tgt;
1451 struct lmv_object *obj;
1452 struct lustre_md md;
1453 struct md_op_data *op_data;
1460 mealen = lmv_get_easize(lmv);
1462 valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA | OBD_MD_MEA;
1464 tgt = lmv_find_target(lmv, fid);
1466 RETURN(PTR_ERR(tgt));
1469 * Time to update mea of parent fid.
1472 OBD_ALLOC_PTR(op_data);
1473 if (op_data == NULL)
1476 op_data->op_fid1 = *fid;
1477 op_data->op_mode = mealen;
1478 op_data->op_valid = valid;
1480 rc = md_getattr(tgt->ltd_exp, op_data, &req);
1481 OBD_FREE_PTR(op_data);
1483 CERROR("md_getattr() failed, error %d\n", rc);
1487 rc = md_get_lustre_md(tgt->ltd_exp, req, NULL, exp, &md);
1489 CERROR("md_get_lustre_md() failed, error %d\n", rc);
1494 GOTO(cleanup, rc = -ENODATA);
1496 obj = lmv_object_create(exp, fid, md.mea);
1500 lmv_object_put(obj);
1502 obd_free_memmd(exp, (void *)&md.mea);
1506 ptlrpc_req_finished(req);
1510 int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
1511 const void *data, int datalen, int mode, __u32 uid,
1512 __u32 gid, cfs_cap_t cap_effective, __u64 rdev,
1513 struct ptlrpc_request **request)
1515 struct obd_device *obd = exp->exp_obd;
1516 struct lmv_obd *lmv = &obd->u.lmv;
1517 struct lmv_tgt_desc *tgt;
1518 struct lmv_object *obj;
1524 rc = lmv_check_connect(obd);
1528 if (!lmv->desc.ld_active_tgt_count)
1534 obj = lmv_object_find(obd, &op_data->op_fid1);
1536 sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
1537 op_data->op_name, op_data->op_namelen);
1538 op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid;
1539 op_data->op_bias &= ~MDS_CHECK_SPLIT;
1540 op_data->op_mds = obj->lo_stripes[sidx].ls_mds;
1541 tgt = lmv_get_target(lmv, op_data->op_mds);
1542 lmv_object_put(obj);
1544 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1545 op_data->op_bias |= MDS_CHECK_SPLIT;
1546 op_data->op_mds = tgt->ltd_idx;
1550 RETURN(PTR_ERR(tgt));
1552 rc = lmv_fid_alloc(exp, &op_data->op_fid2, op_data);
1553 if (rc == -ERESTART)
1558 CDEBUG(D_INODE, "CREATE '%*s' on "DFID" -> mds #%x\n",
1559 op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
1562 op_data->op_flags |= MF_MDC_CANCEL_FID1;
1563 rc = md_create(tgt->ltd_exp, op_data, data, datalen, mode, uid, gid,
1564 cap_effective, rdev, request);
1566 if (*request == NULL)
1568 CDEBUG(D_INODE, "Created - "DFID"\n", PFID(&op_data->op_fid2));
1569 } else if (rc == -ERESTART) {
1570 LASSERT(*request != NULL);
1571 DEBUG_REQ(D_WARNING|D_RPCTRACE, *request,
1572 "Got -ERESTART during create!\n");
1573 ptlrpc_req_finished(*request);
1577 * Directory got split. Time to update local object and repeat
1578 * the request with proper MDS.
1580 rc = lmv_handle_split(exp, &op_data->op_fid1);
1582 rc = lmv_allocate_slaves(obd, &op_data->op_fid1,
1583 op_data, &op_data->op_fid2);
1592 static int lmv_done_writing(struct obd_export *exp,
1593 struct md_op_data *op_data,
1594 struct md_open_data *mod)
1596 struct obd_device *obd = exp->exp_obd;
1597 struct lmv_obd *lmv = &obd->u.lmv;
1598 struct lmv_tgt_desc *tgt;
1602 rc = lmv_check_connect(obd);
1606 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1608 RETURN(PTR_ERR(tgt));
1610 rc = md_done_writing(tgt->ltd_exp, op_data, mod);
1615 lmv_enqueue_slaves(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
1616 struct lookup_intent *it, struct md_op_data *op_data,
1617 struct lustre_handle *lockh, void *lmm, int lmmsize)
1619 struct obd_device *obd = exp->exp_obd;
1620 struct lmv_obd *lmv = &obd->u.lmv;
1621 struct lmv_stripe_md *mea = op_data->op_mea1;
1622 struct md_op_data *op_data2;
1623 struct lmv_tgt_desc *tgt;
1628 OBD_ALLOC_PTR(op_data2);
1629 if (op_data2 == NULL)
1632 LASSERT(mea != NULL);
1633 for (i = 0; i < mea->mea_count; i++) {
1634 memset(op_data2, 0, sizeof(*op_data2));
1635 op_data2->op_fid1 = mea->mea_ids[i];
1636 op_data2->op_bias = 0;
1638 tgt = lmv_find_target(lmv, &op_data2->op_fid1);
1640 GOTO(cleanup, rc = PTR_ERR(tgt));
1642 if (tgt->ltd_exp == NULL)
1645 rc = md_enqueue(tgt->ltd_exp, einfo, it, op_data2,
1646 lockh + i, lmm, lmmsize, NULL, 0);
1648 CDEBUG(D_INODE, "Take lock on slave "DFID" -> %d/%d\n",
1649 PFID(&mea->mea_ids[i]), rc, it->d.lustre.it_status);
1654 if (it->d.lustre.it_data) {
1655 struct ptlrpc_request *req;
1656 req = (struct ptlrpc_request *)it->d.lustre.it_data;
1657 ptlrpc_req_finished(req);
1660 if (it->d.lustre.it_status)
1661 GOTO(cleanup, rc = it->d.lustre.it_status);
1666 OBD_FREE_PTR(op_data2);
1670 * Drop all taken locks.
1673 if (lockh[i].cookie)
1674 ldlm_lock_decref(lockh + i, einfo->ei_mode);
1675 lockh[i].cookie = 0;
1682 lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
1683 struct lookup_intent *it, struct md_op_data *op_data,
1684 struct lustre_handle *lockh, void *lmm, int lmmsize,
1685 int extra_lock_flags)
1687 struct ptlrpc_request *req = it->d.lustre.it_data;
1688 struct obd_device *obd = exp->exp_obd;
1689 struct lmv_obd *lmv = &obd->u.lmv;
1690 struct lustre_handle plock;
1691 struct lmv_tgt_desc *tgt;
1692 struct md_op_data *rdata;
1694 struct mdt_body *body;
1699 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1700 LASSERT(body != NULL);
1702 if (!(body->valid & OBD_MD_MDS))
1705 CDEBUG(D_INODE, "REMOTE_ENQUEUE '%s' on "DFID" -> "DFID"\n",
1706 LL_IT2STR(it), PFID(&op_data->op_fid1), PFID(&body->fid1));
1709 * We got LOOKUP lock, but we really need attrs.
1711 pmode = it->d.lustre.it_lock_mode;
1712 LASSERT(pmode != 0);
1713 memcpy(&plock, lockh, sizeof(plock));
1714 it->d.lustre.it_lock_mode = 0;
1715 it->d.lustre.it_data = NULL;
1718 it->d.lustre.it_disposition &= ~DISP_ENQ_COMPLETE;
1719 ptlrpc_req_finished(req);
1721 tgt = lmv_find_target(lmv, &fid1);
1723 GOTO(out, rc = PTR_ERR(tgt));
1725 OBD_ALLOC_PTR(rdata);
1727 GOTO(out, rc = -ENOMEM);
1729 rdata->op_fid1 = fid1;
1730 rdata->op_bias = MDS_CROSS_REF;
1732 rc = md_enqueue(tgt->ltd_exp, einfo, it, rdata, lockh,
1733 lmm, lmmsize, NULL, extra_lock_flags);
1734 OBD_FREE_PTR(rdata);
1737 ldlm_lock_decref(&plock, pmode);
1742 lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
1743 struct lookup_intent *it, struct md_op_data *op_data,
1744 struct lustre_handle *lockh, void *lmm, int lmmsize,
1745 struct ptlrpc_request **req, int extra_lock_flags)
1747 struct obd_device *obd = exp->exp_obd;
1748 struct lmv_obd *lmv = &obd->u.lmv;
1749 struct lmv_tgt_desc *tgt;
1750 struct lmv_object *obj;
1755 rc = lmv_check_connect(obd);
1759 CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID"\n",
1760 LL_IT2STR(it), PFID(&op_data->op_fid1));
1762 if (op_data->op_mea1 && it && it->it_op == IT_UNLINK) {
1763 rc = lmv_enqueue_slaves(exp, einfo, it, op_data,
1764 lockh, lmm, lmmsize);
1768 obj = lmv_object_find(obd, &op_data->op_fid1);
1769 if (obj && op_data->op_namelen) {
1770 sidx = raw_name2idx(obj->lo_hashtype,
1772 (char *)op_data->op_name,
1773 op_data->op_namelen);
1774 op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid;
1775 tgt = lmv_get_target(lmv, obj->lo_stripes[sidx].ls_mds);
1777 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1780 lmv_object_put(obj);
1783 RETURN(PTR_ERR(tgt));
1785 CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID" -> mds #%d\n",
1786 LL_IT2STR(it), PFID(&op_data->op_fid1), tgt->ltd_idx);
1788 rc = md_enqueue(tgt->ltd_exp, einfo, it, op_data, lockh,
1789 lmm, lmmsize, req, extra_lock_flags);
1791 if (rc == 0 && it && it->it_op == IT_OPEN) {
1792 rc = lmv_enqueue_remote(exp, einfo, it, op_data, lockh,
1793 lmm, lmmsize, extra_lock_flags);
1799 lmv_getattr_name(struct obd_export *exp,struct md_op_data *op_data,
1800 struct ptlrpc_request **request)
1802 struct ptlrpc_request *req = NULL;
1803 struct obd_device *obd = exp->exp_obd;
1804 struct lmv_obd *lmv = &obd->u.lmv;
1805 struct lu_fid rid = op_data->op_fid1;
1806 struct lmv_tgt_desc *tgt;
1807 struct mdt_body *body;
1808 struct lmv_object *obj;
1809 obd_valid valid = op_data->op_valid;
1815 rc = lmv_check_connect(obd);
1822 obj = lmv_object_find(obd, &rid);
1824 sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
1825 op_data->op_name, op_data->op_namelen);
1826 rid = obj->lo_stripes[sidx].ls_fid;
1827 tgt = lmv_get_target(lmv, obj->lo_stripes[sidx].ls_mds);
1828 op_data->op_mds = obj->lo_stripes[sidx].ls_mds;
1829 valid &= ~OBD_MD_FLCKSPLIT;
1830 lmv_object_put(obj);
1832 tgt = lmv_find_target(lmv, &rid);
1833 valid |= OBD_MD_FLCKSPLIT;
1834 op_data->op_mds = tgt->ltd_idx;
1837 RETURN(PTR_ERR(tgt));
1839 CDEBUG(D_INODE, "GETATTR_NAME for %*s on "DFID" - "DFID" -> mds #%d\n",
1840 op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
1841 PFID(&rid), tgt->ltd_idx);
1843 op_data->op_valid = valid;
1844 op_data->op_fid1 = rid;
1845 rc = md_getattr_name(tgt->ltd_exp, op_data, request);
1847 body = req_capsule_server_get(&(*request)->rq_pill,
1849 LASSERT(body != NULL);
1851 if (body->valid & OBD_MD_MDS) {
1853 CDEBUG(D_INODE, "Request attrs for "DFID"\n",
1856 tgt = lmv_find_target(lmv, &rid);
1858 ptlrpc_req_finished(*request);
1859 RETURN(PTR_ERR(tgt));
1862 op_data->op_fid1 = rid;
1863 op_data->op_valid |= OBD_MD_FLCROSSREF;
1864 op_data->op_namelen = 0;
1865 op_data->op_name = NULL;
1866 rc = md_getattr_name(tgt->ltd_exp, op_data, &req);
1867 ptlrpc_req_finished(*request);
1870 } else if (rc == -ERESTART) {
1871 LASSERT(*request != NULL);
1872 DEBUG_REQ(D_WARNING|D_RPCTRACE, *request,
1873 "Got -ERESTART during getattr!\n");
1874 ptlrpc_req_finished(*request);
1878 * Directory got split. Time to update local object and repeat
1879 * the request with proper MDS.
1881 rc = lmv_handle_split(exp, &rid);
1888 #define md_op_data_fid(op_data, fl) \
1889 (fl == MF_MDC_CANCEL_FID1 ? &op_data->op_fid1 : \
1890 fl == MF_MDC_CANCEL_FID2 ? &op_data->op_fid2 : \
1891 fl == MF_MDC_CANCEL_FID3 ? &op_data->op_fid3 : \
1892 fl == MF_MDC_CANCEL_FID4 ? &op_data->op_fid4 : \
1895 static int lmv_early_cancel_slaves(struct obd_export *exp,
1896 struct md_op_data *op_data, int op_tgt,
1897 ldlm_mode_t mode, int bits, int flag)
1899 struct obd_device *obd = exp->exp_obd;
1900 struct lmv_obd *lmv = &obd->u.lmv;
1901 ldlm_policy_data_t policy = {{0}};
1902 struct lu_fid *op_fid;
1903 struct lu_fid *st_fid;
1904 struct lmv_tgt_desc *tgt;
1905 struct lmv_object *obj;
1910 op_fid = md_op_data_fid(op_data, flag);
1911 if (!fid_is_sane(op_fid))
1914 obj = lmv_object_find(obd, op_fid);
1918 policy.l_inodebits.bits = bits;
1919 for (i = 0; i < obj->lo_objcount; i++) {
1920 tgt = lmv_get_target(lmv, obj->lo_stripes[i].ls_mds);
1921 st_fid = &obj->lo_stripes[i].ls_fid;
1922 if (op_tgt != tgt->ltd_idx) {
1923 CDEBUG(D_INODE, "EARLY_CANCEL slave "DFID" -> mds #%d\n",
1924 PFID(st_fid), tgt->ltd_idx);
1925 rc = md_cancel_unused(tgt->ltd_exp, st_fid, &policy,
1926 mode, LCF_ASYNC, NULL);
1928 GOTO(out_put_obj, rc);
1931 "EARLY_CANCEL skip operation target %d on "DFID"\n",
1932 op_tgt, PFID(st_fid));
1934 * Do not cancel locks for operation target, they will
1935 * be handled later in underlaying layer when calling
1936 * function we run on behalf of.
1939 op_data->op_flags |= flag;
1944 lmv_object_put(obj);
1948 static int lmv_early_cancel(struct obd_export *exp, struct md_op_data *op_data,
1949 int op_tgt, ldlm_mode_t mode, int bits, int flag)
1951 struct lu_fid *fid = md_op_data_fid(op_data, flag);
1952 struct obd_device *obd = exp->exp_obd;
1953 struct lmv_obd *lmv = &obd->u.lmv;
1954 struct lmv_tgt_desc *tgt;
1955 ldlm_policy_data_t policy = {{0}};
1956 struct lmv_object *obj;
1960 if (!fid_is_sane(fid))
1963 obj = lmv_object_find(obd, fid);
1965 rc = lmv_early_cancel_slaves(exp, op_data, op_tgt, mode,
1967 lmv_object_put(obj);
1969 tgt = lmv_find_target(lmv, fid);
1971 RETURN(PTR_ERR(tgt));
1973 if (tgt->ltd_idx != op_tgt) {
1974 CDEBUG(D_INODE, "EARLY_CANCEL on "DFID"\n", PFID(fid));
1975 policy.l_inodebits.bits = bits;
1976 rc = md_cancel_unused(tgt->ltd_exp, fid, &policy,
1977 mode, LCF_ASYNC, NULL);
1980 "EARLY_CANCEL skip operation target %d on "DFID"\n",
1982 op_data->op_flags |= flag;
1991 * llite passes fid of an target inode in op_data->op_fid1 and id of directory in
1994 static int lmv_link(struct obd_export *exp, struct md_op_data *op_data,
1995 struct ptlrpc_request **request)
1997 struct obd_device *obd = exp->exp_obd;
1998 struct lmv_obd *lmv = &obd->u.lmv;
1999 struct lmv_tgt_desc *tgt;
2000 struct lmv_object *obj;
2007 rc = lmv_check_connect(obd);
2014 LASSERT(op_data->op_namelen != 0);
2016 CDEBUG(D_INODE, "LINK "DFID":%*s to "DFID"\n",
2017 PFID(&op_data->op_fid2), op_data->op_namelen,
2018 op_data->op_name, PFID(&op_data->op_fid1));
2020 obj = lmv_object_find(obd, &op_data->op_fid2);
2022 sidx = raw_name2idx(obj->lo_hashtype,
2025 op_data->op_namelen);
2026 op_data->op_fid2 = obj->lo_stripes[sidx].ls_fid;
2027 mds = obj->lo_stripes[sidx].ls_mds;
2028 lmv_object_put(obj);
2030 rc = lmv_fld_lookup(lmv, &op_data->op_fid2, &mds);
2035 CDEBUG(D_INODE, "Forward to mds #%x ("DFID")\n",
2036 mds, PFID(&op_data->op_fid1));
2038 op_data->op_fsuid = cfs_curproc_fsuid();
2039 op_data->op_fsgid = cfs_curproc_fsgid();
2040 op_data->op_cap = cfs_curproc_cap_pack();
2041 tgt = lmv_get_target(lmv, mds);
2044 * Cancel UPDATE lock on child (fid1).
2046 op_data->op_flags |= MF_MDC_CANCEL_FID2;
2047 rc = lmv_early_cancel(exp, op_data, tgt->ltd_idx, LCK_EX,
2048 MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID1);
2050 rc = md_link(tgt->ltd_exp, op_data, request);
2051 if (rc == -ERESTART) {
2052 LASSERT(*request != NULL);
2053 DEBUG_REQ(D_WARNING|D_RPCTRACE, *request,
2054 "Got -ERESTART during link!\n");
2055 ptlrpc_req_finished(*request);
2059 * Directory got split. Time to update local object and repeat
2060 * the request with proper MDS.
2062 rc = lmv_handle_split(exp, &op_data->op_fid2);
2070 static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
2071 const char *old, int oldlen, const char *new, int newlen,
2072 struct ptlrpc_request **request)
2074 struct obd_device *obd = exp->exp_obd;
2075 struct lmv_obd *lmv = &obd->u.lmv;
2076 struct lmv_tgt_desc *src_tgt;
2080 struct lmv_object *obj;
2085 LASSERT(oldlen != 0);
2087 CDEBUG(D_INODE, "RENAME %*s in "DFID" to %*s in "DFID"\n",
2088 oldlen, old, PFID(&op_data->op_fid1),
2089 newlen, new, PFID(&op_data->op_fid2));
2091 rc = lmv_check_connect(obd);
2098 obj = lmv_object_find(obd, &op_data->op_fid1);
2100 sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
2101 (char *)old, oldlen);
2102 op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid;
2103 mds1 = obj->lo_stripes[sidx].ls_mds;
2104 CDEBUG(D_INODE, "Parent obj "DFID"\n", PFID(&op_data->op_fid1));
2105 lmv_object_put(obj);
2107 rc = lmv_fld_lookup(lmv, &op_data->op_fid1, &mds1);
2112 obj = lmv_object_find(obd, &op_data->op_fid2);
2115 * Directory is already split, so we have to forward request to
2118 sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
2119 (char *)new, newlen);
2121 mds2 = obj->lo_stripes[sidx].ls_mds;
2122 op_data->op_fid2 = obj->lo_stripes[sidx].ls_fid;
2123 CDEBUG(D_INODE, "Parent obj "DFID"\n", PFID(&op_data->op_fid2));
2124 lmv_object_put(obj);
2126 rc = lmv_fld_lookup(lmv, &op_data->op_fid2, &mds2);
2131 op_data->op_fsuid = cfs_curproc_fsuid();
2132 op_data->op_fsgid = cfs_curproc_fsgid();
2133 op_data->op_cap = cfs_curproc_cap_pack();
2135 src_tgt = lmv_get_target(lmv, mds1);
2138 * LOOKUP lock on src child (fid3) should also be cancelled for
2139 * src_tgt in mdc_rename.
2141 op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3;
2144 * Cancel UPDATE locks on tgt parent (fid2), tgt_tgt is its
2147 rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
2148 LCK_EX, MDS_INODELOCK_UPDATE,
2149 MF_MDC_CANCEL_FID2);
2152 * Cancel LOOKUP locks on tgt child (fid4) for parent tgt_tgt.
2155 rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
2156 LCK_EX, MDS_INODELOCK_LOOKUP,
2157 MF_MDC_CANCEL_FID4);
2161 * Cancel all the locks on tgt child (fid4).
2164 rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
2165 LCK_EX, MDS_INODELOCK_FULL,
2166 MF_MDC_CANCEL_FID4);
2169 rc = md_rename(src_tgt->ltd_exp, op_data, old, oldlen,
2170 new, newlen, request);
2172 if (rc == -ERESTART) {
2173 LASSERT(*request != NULL);
2174 DEBUG_REQ(D_WARNING|D_RPCTRACE, *request,
2175 "Got -ERESTART during rename!\n");
2176 ptlrpc_req_finished(*request);
2180 * Directory got split. Time to update local object and repeat
2181 * the request with proper MDS.
2183 rc = lmv_handle_split(exp, &op_data->op_fid1);
2190 static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
2191 void *ea, int ealen, void *ea2, int ea2len,
2192 struct ptlrpc_request **request,
2193 struct md_open_data **mod)
2195 struct obd_device *obd = exp->exp_obd;
2196 struct lmv_obd *lmv = &obd->u.lmv;
2197 struct ptlrpc_request *req;
2198 struct lmv_tgt_desc *tgt;
2199 struct lmv_object *obj;
2204 rc = lmv_check_connect(obd);
2208 obj = lmv_object_find(obd, &op_data->op_fid1);
2210 CDEBUG(D_INODE, "SETATTR for "DFID", valid 0x%x%s\n",
2211 PFID(&op_data->op_fid1), op_data->op_attr.ia_valid,
2212 obj ? ", split" : "");
2214 op_data->op_flags |= MF_MDC_CANCEL_FID1;
2216 for (i = 0; i < obj->lo_objcount; i++) {
2217 op_data->op_fid1 = obj->lo_stripes[i].ls_fid;
2219 tgt = lmv_get_target(lmv, obj->lo_stripes[i].ls_mds);
2225 rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen,
2226 ea2, ea2len, &req, mod);
2228 if (lu_fid_eq(&obj->lo_fid, &obj->lo_stripes[i].ls_fid)) {
2230 * This is master object and this request should
2231 * be returned back to llite.
2235 ptlrpc_req_finished(req);
2241 lmv_object_put(obj);
2243 tgt = lmv_find_target(lmv, &op_data->op_fid1);
2245 RETURN(PTR_ERR(tgt));
2247 rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen, ea2,
2248 ea2len, request, mod);
2253 static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
2254 struct obd_capa *oc, struct ptlrpc_request **request)
2256 struct obd_device *obd = exp->exp_obd;
2257 struct lmv_obd *lmv = &obd->u.lmv;
2258 struct lmv_tgt_desc *tgt;
2262 rc = lmv_check_connect(obd);
2266 tgt = lmv_find_target(lmv, fid);
2268 RETURN(PTR_ERR(tgt));
2270 rc = md_sync(tgt->ltd_exp, fid, oc, request);
2275 * Main purpose of LMV blocking ast is to remove split directory LMV
2276 * presentation object (struct lmv_object) attached to the lock being revoked.
2278 int lmv_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2279 void *data, int flag)
2281 struct lustre_handle lockh;
2282 struct lmv_object *obj;
2287 case LDLM_CB_BLOCKING:
2288 ldlm_lock2handle(lock, &lockh);
2289 rc = ldlm_cli_cancel(&lockh);
2291 CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
2295 case LDLM_CB_CANCELING:
2297 * Time to drop cached attrs for split directory object
2299 obj = lock->l_ast_data;
2301 CDEBUG(D_INODE, "Cancel %s on "LPU64"/"LPU64
2302 ", master "DFID"\n",
2303 lock->l_resource->lr_name.name[3] == 1 ?
2304 "LOOKUP" : "UPDATE",
2305 lock->l_resource->lr_name.name[0],
2306 lock->l_resource->lr_name.name[1],
2307 PFID(&obj->lo_fid));
2308 lmv_object_put(obj);
2317 static void lmv_hash_adjust(__u64 *hash, __u64 hash_adj)
2321 val = le64_to_cpu(*hash);
2323 val += MAX_HASH_SIZE;
2324 if (val != MDS_DIR_END_OFF)
2325 *hash = cpu_to_le64(val - hash_adj);
2328 static __u32 lmv_node_rank(struct obd_export *exp, const struct lu_fid *fid)
2331 struct obd_import *imp;
2334 * XXX: to get nid we assume that underlying obd device is mdc.
2336 imp = class_exp2cliimp(exp);
2337 id = imp->imp_connection->c_self + fid_flatten(fid);
2339 CDEBUG(D_INODE, "Readpage node rank: "LPX64" "DFID" "LPX64" "LPX64"\n",
2340 imp->imp_connection->c_self, PFID(fid), id, id ^ (id >> 32));
2342 return id ^ (id >> 32);
2345 static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
2346 struct page **pages, struct ptlrpc_request **request)
2348 struct obd_device *obd = exp->exp_obd;
2349 struct lmv_obd *lmv = &obd->u.lmv;
2350 struct lmv_object *obj;
2351 struct lu_fid rid = op_data->op_fid1;
2352 __u64 offset = op_data->op_offset;
2362 /* number of pages read, in CFS_PAGE_SIZE */
2364 /* number of pages transferred in LU_PAGE_SIZE */
2366 struct lmv_stripe *los;
2367 struct lmv_tgt_desc *tgt;
2368 struct lu_dirpage *dp;
2369 struct lu_dirent *ent;
2372 rc = lmv_check_connect(obd);
2376 CDEBUG(D_INODE, "READPAGE at "LPX64" from "DFID"\n", offset, PFID(&rid));
2379 * This case handle directory lookup in clustered metadata case (i.e.
2380 * split directory is located on multiple md servers.)
2381 * each server keeps directory entries for certain range of hashes.
2382 * E.g. we have N server and suppose hash range is 0 to MAX_HASH.
2383 * first server will keep records with hashes [ 0 ... MAX_HASH / N - 1],
2384 * second one with hashes [MAX_HASH / N ... 2 * MAX_HASH / N] and
2386 * readdir can simply start reading entries from 0 - N server in
2387 * order but that will not scale well as all client will request dir in
2388 * to server in same order.
2389 * Following algorithm does optimization:
2390 * Instead of doing readdir in 1, 2, ...., N order, client with a
2391 * rank R does readdir in R, R + 1, ..., N, 1, ... R - 1 order.
2392 * (every client has rank R)
2393 * But ll_readdir() expect offset range [0 to MAX_HASH/N) but
2394 * since client ask dir from MDS{R} client has pages with offsets
2395 * [R*MAX_HASH/N ... (R + 1)*MAX_HASH/N] there for we do hash_adj
2396 * on hash values that we get.
2398 obj = lmv_object_find_lock(obd, &rid);
2400 nr = obj->lo_objcount;
2402 seg_size = MAX_HASH_SIZE;
2403 do_div(seg_size, nr);
2404 los = obj->lo_stripes;
2405 tgt = lmv_get_target(lmv, los[0].ls_mds);
2406 rank = lmv_node_rank(tgt->ltd_exp, &rid) % nr;
2408 do_div(tgt_tmp, seg_size);
2409 tgt0_idx = do_div(tgt_tmp, nr);
2410 tgt_idx = (tgt0_idx + rank) % nr;
2412 if (tgt_idx < tgt0_idx)
2416 * Last segment has unusual length due to division
2419 hash_adj = MAX_HASH_SIZE - seg_size * nr;
2423 hash_adj += rank * seg_size;
2425 CDEBUG(D_INODE, "Readpage hash adjustment: %x "LPX64" "
2426 LPX64"/%x -> "LPX64"/%x\n", rank, hash_adj,
2427 offset, tgt0_idx, offset + hash_adj, tgt_idx);
2429 offset = (offset + hash_adj) & MAX_HASH_SIZE;
2430 rid = obj->lo_stripes[tgt_idx].ls_fid;
2431 tgt = lmv_get_target(lmv, los[tgt_idx].ls_mds);
2433 CDEBUG(D_INODE, "Forward to "DFID" with offset %lu i %d\n",
2434 PFID(&rid), (unsigned long)offset, tgt_idx);
2436 tgt = lmv_find_target(lmv, &rid);
2439 GOTO(cleanup, rc = PTR_ERR(tgt));
2441 op_data->op_fid1 = rid;
2442 rc = md_readpage(tgt->ltd_exp, op_data, pages, request);
2446 nrdpgs = ((*request)->rq_bulk->bd_nob_transferred + CFS_PAGE_SIZE - 1)
2448 nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
2449 LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
2450 LASSERT(nrdpgs > 0 && nrdpgs <= op_data->op_npages);
2452 CDEBUG(D_INODE, "read %d(%d)/%d pages\n", nrdpgs, nlupgs,
2453 op_data->op_npages);
2455 for (i = 0; i < nrdpgs; i++) {
2456 #if CFS_PAGE_SIZE > LU_PAGE_SIZE
2457 struct lu_dirpage *first;
2461 struct lu_dirent *tmp = NULL;
2463 dp = cfs_kmap(pages[i]);
2465 lmv_hash_adjust(&dp->ldp_hash_start, hash_adj);
2466 lmv_hash_adjust(&dp->ldp_hash_end, hash_adj);
2467 LASSERT(le64_to_cpu(dp->ldp_hash_start) <=
2468 op_data->op_offset);
2470 if ((tgt0_idx != nr - 1) &&
2471 (le64_to_cpu(dp->ldp_hash_end) == MDS_DIR_END_OFF))
2473 dp->ldp_hash_end = cpu_to_le32(seg_size *
2476 ""DFID" reset end "LPX64" tgt %d\n",
2478 (__u64)le64_to_cpu(dp->ldp_hash_end),
2483 ent = lu_dirent_start(dp);
2484 #if CFS_PAGE_SIZE > LU_PAGE_SIZE
2486 hash_end = dp->ldp_hash_end;
2490 for (tmp = ent; ent != NULL;
2491 tmp = ent, ent = lu_dirent_next(ent)) {
2493 lmv_hash_adjust(&ent->lde_hash, hash_adj);
2496 #if CFS_PAGE_SIZE > LU_PAGE_SIZE
2497 dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
2498 if (((unsigned long)dp & ~CFS_PAGE_MASK) && nlupgs > 0) {
2499 ent = lu_dirent_start(dp);
2502 lmv_hash_adjust(&dp->ldp_hash_end, hash_adj);
2503 if ((tgt0_idx != nr - 1) &&
2504 (le64_to_cpu(dp->ldp_hash_end) ==
2506 hash_end = cpu_to_le32(seg_size *
2509 ""DFID" reset end "LPX64" tgt %d\n",
2511 (__u64)le64_to_cpu(hash_end),
2515 hash_end = dp->ldp_hash_end;
2516 flags = dp->ldp_flags;
2519 /* enlarge the end entry lde_reclen from 0 to
2520 * first entry of next lu_dirpage, in this way
2521 * several lu_dirpages can be stored into one
2522 * client page on client. */
2523 tmp = ((void *)tmp) +
2524 le16_to_cpu(tmp->lde_reclen);
2526 cpu_to_le16((char *)(dp->ldp_entries) -
2531 first->ldp_hash_end = hash_end;
2532 first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE);
2533 first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE);
2535 SET_BUT_UNUSED(tmp);
2537 cfs_kunmap(pages[i]);
2542 lmv_object_put_unlock(obj);
2546 static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data,
2547 struct ptlrpc_request **request)
2549 struct obd_device *obd = exp->exp_obd;
2550 struct lmv_obd *lmv = &obd->u.lmv;
2551 struct lmv_tgt_desc *tgt = NULL;
2552 struct lmv_object *obj;
2558 rc = lmv_check_connect(obd);
2565 LASSERT(op_data->op_namelen != 0);
2567 obj = lmv_object_find(obd, &op_data->op_fid1);
2569 sidx = raw_name2idx(obj->lo_hashtype,
2572 op_data->op_namelen);
2573 op_data->op_bias &= ~MDS_CHECK_SPLIT;
2574 op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid;
2575 tgt = lmv_get_target(lmv,
2576 obj->lo_stripes[sidx].ls_mds);
2577 lmv_object_put(obj);
2578 CDEBUG(D_INODE, "UNLINK '%*s' in "DFID" -> %u\n",
2579 op_data->op_namelen, op_data->op_name,
2580 PFID(&op_data->op_fid1), sidx);
2584 tgt = lmv_find_target(lmv, &op_data->op_fid1);
2586 RETURN(PTR_ERR(tgt));
2587 op_data->op_bias |= MDS_CHECK_SPLIT;
2590 op_data->op_fsuid = cfs_curproc_fsuid();
2591 op_data->op_fsgid = cfs_curproc_fsgid();
2592 op_data->op_cap = cfs_curproc_cap_pack();
2595 * If child's fid is given, cancel unused locks for it if it is from
2596 * another export than parent.
2598 * LOOKUP lock for child (fid3) should also be cancelled on parent
2599 * tgt_tgt in mdc_unlink().
2601 op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3;
2604 * Cancel FULL locks on child (fid3).
2606 rc = lmv_early_cancel(exp, op_data, tgt->ltd_idx, LCK_EX,
2607 MDS_INODELOCK_FULL, MF_MDC_CANCEL_FID3);
2610 rc = md_unlink(tgt->ltd_exp, op_data, request);
2612 if (rc == -ERESTART) {
2613 LASSERT(*request != NULL);
2614 DEBUG_REQ(D_WARNING|D_RPCTRACE, *request,
2615 "Got -ERESTART during unlink!\n");
2616 ptlrpc_req_finished(*request);
2620 * Directory got split. Time to update local object and repeat
2621 * the request with proper MDS.
2623 rc = lmv_handle_split(exp, &op_data->op_fid1);
2630 static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
2632 struct lmv_obd *lmv = &obd->u.lmv;
2636 case OBD_CLEANUP_EARLY:
2637 /* XXX: here should be calling obd_precleanup() down to
2640 case OBD_CLEANUP_EXPORTS:
2641 fld_client_proc_fini(&lmv->lmv_fld);
2642 lprocfs_obd_cleanup(obd);
2643 rc = obd_llog_finish(obd, 0);
2645 CERROR("failed to cleanup llogging subsystems\n");
2653 static int lmv_get_info(struct obd_export *exp, __u32 keylen,
2654 void *key, __u32 *vallen, void *val,
2655 struct lov_stripe_md *lsm)
2657 struct obd_device *obd;
2658 struct lmv_obd *lmv;
2662 obd = class_exp2obd(exp);
2664 CDEBUG(D_IOCTL, "Invalid client cookie "LPX64"\n",
2665 exp->exp_handle.h_cookie);
2670 if (keylen >= strlen("remote_flag") && !strcmp(key, "remote_flag")) {
2671 struct lmv_tgt_desc *tgts;
2674 rc = lmv_check_connect(obd);
2678 LASSERT(*vallen == sizeof(__u32));
2679 for (i = 0, tgts = lmv->tgts; i < lmv->desc.ld_tgt_count;
2683 * All tgts should be connected when this gets called.
2685 if (!tgts || !tgts->ltd_exp) {
2686 CERROR("target not setup?\n");
2690 if (!obd_get_info(tgts->ltd_exp, keylen, key,
2695 } else if (KEY_IS(KEY_MAX_EASIZE) || KEY_IS(KEY_CONN_DATA)) {
2696 rc = lmv_check_connect(obd);
2701 * Forwarding this request to first MDS, it should know LOV
2704 rc = obd_get_info(lmv->tgts[0].ltd_exp, keylen, key,
2706 if (!rc && KEY_IS(KEY_CONN_DATA)) {
2707 exp->exp_connect_flags =
2708 ((struct obd_connect_data *)val)->ocd_connect_flags;
2711 } else if (KEY_IS(KEY_TGT_COUNT)) {
2712 *((int *)val) = lmv->desc.ld_tgt_count;
2716 CDEBUG(D_IOCTL, "Invalid key\n");
2720 int lmv_set_info_async(struct obd_export *exp, obd_count keylen,
2721 void *key, obd_count vallen, void *val,
2722 struct ptlrpc_request_set *set)
2724 struct lmv_tgt_desc *tgt;
2725 struct obd_device *obd;
2726 struct lmv_obd *lmv;
2730 obd = class_exp2obd(exp);
2732 CDEBUG(D_IOCTL, "Invalid client cookie "LPX64"\n",
2733 exp->exp_handle.h_cookie);
2738 if (KEY_IS(KEY_READ_ONLY) || KEY_IS(KEY_FLUSH_CTX)) {
2741 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2742 tgt = &lmv->tgts[i];
2747 err = obd_set_info_async(tgt->ltd_exp,
2748 keylen, key, vallen, val, set);
2759 int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
2760 struct lov_stripe_md *lsm)
2762 struct obd_device *obd = class_exp2obd(exp);
2763 struct lmv_obd *lmv = &obd->u.lmv;
2764 struct lmv_stripe_md *meap;
2765 struct lmv_stripe_md *lsmp;
2770 mea_size = lmv_get_easize(lmv);
2774 if (*lmmp && !lsm) {
2775 OBD_FREE_LARGE(*lmmp, mea_size);
2780 if (*lmmp == NULL) {
2781 OBD_ALLOC_LARGE(*lmmp, mea_size);
2789 lsmp = (struct lmv_stripe_md *)lsm;
2790 meap = (struct lmv_stripe_md *)*lmmp;
2792 if (lsmp->mea_magic != MEA_MAGIC_LAST_CHAR &&
2793 lsmp->mea_magic != MEA_MAGIC_ALL_CHARS)
2796 meap->mea_magic = cpu_to_le32(lsmp->mea_magic);
2797 meap->mea_count = cpu_to_le32(lsmp->mea_count);
2798 meap->mea_master = cpu_to_le32(lsmp->mea_master);
2800 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2801 meap->mea_ids[i] = meap->mea_ids[i];
2802 fid_cpu_to_le(&meap->mea_ids[i], &meap->mea_ids[i]);
2808 int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
2809 struct lov_mds_md *lmm, int lmm_size)
2811 struct obd_device *obd = class_exp2obd(exp);
2812 struct lmv_stripe_md **tmea = (struct lmv_stripe_md **)lsmp;
2813 struct lmv_stripe_md *mea = (struct lmv_stripe_md *)lmm;
2814 struct lmv_obd *lmv = &obd->u.lmv;
2820 mea_size = lmv_get_easize(lmv);
2824 if (*lsmp != NULL && lmm == NULL) {
2825 OBD_FREE_LARGE(*tmea, mea_size);
2830 LASSERT(mea_size == lmm_size);
2832 OBD_ALLOC_LARGE(*tmea, mea_size);
2839 if (mea->mea_magic == MEA_MAGIC_LAST_CHAR ||
2840 mea->mea_magic == MEA_MAGIC_ALL_CHARS ||
2841 mea->mea_magic == MEA_MAGIC_HASH_SEGMENT)
2843 magic = le32_to_cpu(mea->mea_magic);
2846 * Old mea is not handled here.
2848 CERROR("Old not supportable EA is found\n");
2852 (*tmea)->mea_magic = magic;
2853 (*tmea)->mea_count = le32_to_cpu(mea->mea_count);
2854 (*tmea)->mea_master = le32_to_cpu(mea->mea_master);
2856 for (i = 0; i < (*tmea)->mea_count; i++) {
2857 (*tmea)->mea_ids[i] = mea->mea_ids[i];
2858 fid_le_to_cpu(&(*tmea)->mea_ids[i], &(*tmea)->mea_ids[i]);
2863 static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
2864 ldlm_policy_data_t *policy, ldlm_mode_t mode,
2865 ldlm_cancel_flags_t flags, void *opaque)
2867 struct obd_device *obd = exp->exp_obd;
2868 struct lmv_obd *lmv = &obd->u.lmv;
2874 LASSERT(fid != NULL);
2876 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2877 if (!lmv->tgts[i].ltd_exp || !lmv->tgts[i].ltd_active)
2880 err = md_cancel_unused(lmv->tgts[i].ltd_exp, fid,
2881 policy, mode, flags, opaque);
2888 int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
2891 struct obd_device *obd = exp->exp_obd;
2892 struct lmv_obd *lmv = &obd->u.lmv;
2896 rc = md_set_lock_data(lmv->tgts[0].ltd_exp, lockh, data, bits);
2900 ldlm_mode_t lmv_lock_match(struct obd_export *exp, int flags,
2901 const struct lu_fid *fid, ldlm_type_t type,
2902 ldlm_policy_data_t *policy, ldlm_mode_t mode,
2903 struct lustre_handle *lockh)
2905 struct obd_device *obd = exp->exp_obd;
2906 struct lmv_obd *lmv = &obd->u.lmv;
2911 CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid));
2914 * With CMD every object can have two locks in different namespaces:
2915 * lookup lock in space of mds storing direntry and update/open lock in
2916 * space of mds storing inode. Thus we check all targets, not only that
2917 * one fid was created in.
2919 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2920 rc = md_lock_match(lmv->tgts[i].ltd_exp, flags, fid,
2921 type, policy, mode, lockh);
2929 int lmv_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
2930 struct obd_export *dt_exp, struct obd_export *md_exp,
2931 struct lustre_md *md)
2933 struct obd_device *obd = exp->exp_obd;
2934 struct lmv_obd *lmv = &obd->u.lmv;
2937 rc = md_get_lustre_md(lmv->tgts[0].ltd_exp, req, dt_exp, md_exp, md);
2941 int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
2943 struct obd_device *obd = exp->exp_obd;
2944 struct lmv_obd *lmv = &obd->u.lmv;
2948 obd_free_memmd(exp, (void *)&md->mea);
2949 RETURN(md_free_lustre_md(lmv->tgts[0].ltd_exp, md));
2952 int lmv_set_open_replay_data(struct obd_export *exp,
2953 struct obd_client_handle *och,
2954 struct ptlrpc_request *open_req)
2956 struct obd_device *obd = exp->exp_obd;
2957 struct lmv_obd *lmv = &obd->u.lmv;
2958 struct lmv_tgt_desc *tgt;
2961 tgt = lmv_find_target(lmv, &och->och_fid);
2963 RETURN(PTR_ERR(tgt));
2965 RETURN(md_set_open_replay_data(tgt->ltd_exp, och, open_req));
2968 int lmv_clear_open_replay_data(struct obd_export *exp,
2969 struct obd_client_handle *och)
2971 struct obd_device *obd = exp->exp_obd;
2972 struct lmv_obd *lmv = &obd->u.lmv;
2973 struct lmv_tgt_desc *tgt;
2976 tgt = lmv_find_target(lmv, &och->och_fid);
2978 RETURN(PTR_ERR(tgt));
2980 RETURN(md_clear_open_replay_data(tgt->ltd_exp, och));
2983 static int lmv_get_remote_perm(struct obd_export *exp,
2984 const struct lu_fid *fid,
2985 struct obd_capa *oc, __u32 suppgid,
2986 struct ptlrpc_request **request)
2988 struct obd_device *obd = exp->exp_obd;
2989 struct lmv_obd *lmv = &obd->u.lmv;
2990 struct lmv_tgt_desc *tgt;
2994 rc = lmv_check_connect(obd);
2998 tgt = lmv_find_target(lmv, fid);
3000 RETURN(PTR_ERR(tgt));
3002 rc = md_get_remote_perm(tgt->ltd_exp, fid, oc, suppgid, request);
3006 static int lmv_renew_capa(struct obd_export *exp, struct obd_capa *oc,
3009 struct obd_device *obd = exp->exp_obd;
3010 struct lmv_obd *lmv = &obd->u.lmv;
3011 struct lmv_tgt_desc *tgt;
3015 rc = lmv_check_connect(obd);
3019 tgt = lmv_find_target(lmv, &oc->c_capa.lc_fid);
3021 RETURN(PTR_ERR(tgt));
3023 rc = md_renew_capa(tgt->ltd_exp, oc, cb);
3027 int lmv_unpack_capa(struct obd_export *exp, struct ptlrpc_request *req,
3028 const struct req_msg_field *field, struct obd_capa **oc)
3030 struct obd_device *obd = exp->exp_obd;
3031 struct lmv_obd *lmv = &obd->u.lmv;
3035 rc = md_unpack_capa(lmv->tgts[0].ltd_exp, req, field, oc);
3039 int lmv_intent_getattr_async(struct obd_export *exp,
3040 struct md_enqueue_info *minfo,
3041 struct ldlm_enqueue_info *einfo)
3043 struct md_op_data *op_data = &minfo->mi_data;
3044 struct obd_device *obd = exp->exp_obd;
3045 struct lmv_obd *lmv = &obd->u.lmv;
3046 struct lmv_object *obj;
3047 struct lmv_tgt_desc *tgt = NULL;
3052 rc = lmv_check_connect(obd);
3056 if (op_data->op_namelen) {
3057 obj = lmv_object_find(obd, &op_data->op_fid1);
3059 sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
3060 (char *)op_data->op_name,
3061 op_data->op_namelen);
3062 op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid;
3063 tgt = lmv_get_target(lmv, obj->lo_stripes[sidx].ls_mds);
3064 lmv_object_put(obj);
3069 tgt = lmv_find_target(lmv, &op_data->op_fid1);
3072 RETURN(PTR_ERR(tgt));
3074 rc = md_intent_getattr_async(tgt->ltd_exp, minfo, einfo);
3078 int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
3079 struct lu_fid *fid, __u64 *bits)
3081 struct obd_device *obd = exp->exp_obd;
3082 struct lmv_obd *lmv = &obd->u.lmv;
3083 struct lmv_tgt_desc *tgt;
3087 rc = lmv_check_connect(obd);
3091 tgt = lmv_find_target(lmv, fid);
3093 RETURN(PTR_ERR(tgt));
3095 rc = md_revalidate_lock(tgt->ltd_exp, it, fid, bits);
3100 * For lmv, only need to send request to master MDT, and the master MDT will
3101 * process with other slave MDTs. The only exception is Q_GETOQUOTA for which
3102 * we directly fetch data from the slave MDTs.
3104 int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
3105 struct obd_quotactl *oqctl)
3107 struct obd_device *obd = class_exp2obd(exp);
3108 struct lmv_obd *lmv = &obd->u.lmv;
3109 struct lmv_tgt_desc *tgt = &lmv->tgts[0];
3111 __u64 curspace, curinodes;
3114 if (!lmv->desc.ld_tgt_count || !tgt->ltd_active) {
3115 CERROR("master lmv inactive\n");
3119 if (oqctl->qc_cmd != Q_GETOQUOTA) {
3120 rc = obd_quotactl(tgt->ltd_exp, oqctl);
3124 curspace = curinodes = 0;
3125 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
3127 tgt = &lmv->tgts[i];
3129 if (tgt->ltd_exp == NULL)
3131 if (!tgt->ltd_active) {
3132 CDEBUG(D_HA, "mdt %d is inactive.\n", i);
3136 err = obd_quotactl(tgt->ltd_exp, oqctl);
3138 CERROR("getquota on mdt %d failed. %d\n", i, err);
3142 curspace += oqctl->qc_dqblk.dqb_curspace;
3143 curinodes += oqctl->qc_dqblk.dqb_curinodes;
3146 oqctl->qc_dqblk.dqb_curspace = curspace;
3147 oqctl->qc_dqblk.dqb_curinodes = curinodes;
3152 int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
3153 struct obd_quotactl *oqctl)
3155 struct obd_device *obd = class_exp2obd(exp);
3156 struct lmv_obd *lmv = &obd->u.lmv;
3157 struct lmv_tgt_desc *tgt;
3161 for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
3164 if (!tgt->ltd_active) {
3165 CERROR("lmv idx %d inactive\n", i);
3169 err = obd_quotacheck(tgt->ltd_exp, oqctl);
3177 struct obd_ops lmv_obd_ops = {
3178 .o_owner = THIS_MODULE,
3179 .o_setup = lmv_setup,
3180 .o_cleanup = lmv_cleanup,
3181 .o_precleanup = lmv_precleanup,
3182 .o_process_config = lmv_process_config,
3183 .o_connect = lmv_connect,
3184 .o_disconnect = lmv_disconnect,
3185 .o_statfs = lmv_statfs,
3186 .o_get_info = lmv_get_info,
3187 .o_set_info_async = lmv_set_info_async,
3188 .o_packmd = lmv_packmd,
3189 .o_unpackmd = lmv_unpackmd,
3190 .o_notify = lmv_notify,
3191 .o_get_uuid = lmv_get_uuid,
3192 .o_iocontrol = lmv_iocontrol,
3193 .o_fid_delete = lmv_fid_delete,
3194 .o_quotacheck = lmv_quotacheck,
3195 .o_quotactl = lmv_quotactl
3198 struct md_ops lmv_md_ops = {
3199 .m_getstatus = lmv_getstatus,
3200 .m_change_cbdata = lmv_change_cbdata,
3201 .m_find_cbdata = lmv_find_cbdata,
3202 .m_close = lmv_close,
3203 .m_create = lmv_create,
3204 .m_done_writing = lmv_done_writing,
3205 .m_enqueue = lmv_enqueue,
3206 .m_getattr = lmv_getattr,
3207 .m_getxattr = lmv_getxattr,
3208 .m_getattr_name = lmv_getattr_name,
3209 .m_intent_lock = lmv_intent_lock,
3211 .m_rename = lmv_rename,
3212 .m_setattr = lmv_setattr,
3213 .m_setxattr = lmv_setxattr,
3215 .m_readpage = lmv_readpage,
3216 .m_unlink = lmv_unlink,
3217 .m_init_ea_size = lmv_init_ea_size,
3218 .m_cancel_unused = lmv_cancel_unused,
3219 .m_set_lock_data = lmv_set_lock_data,
3220 .m_lock_match = lmv_lock_match,
3221 .m_get_lustre_md = lmv_get_lustre_md,
3222 .m_free_lustre_md = lmv_free_lustre_md,
3223 .m_set_open_replay_data = lmv_set_open_replay_data,
3224 .m_clear_open_replay_data = lmv_clear_open_replay_data,
3225 .m_renew_capa = lmv_renew_capa,
3226 .m_unpack_capa = lmv_unpack_capa,
3227 .m_get_remote_perm = lmv_get_remote_perm,
3228 .m_intent_getattr_async = lmv_intent_getattr_async,
3229 .m_revalidate_lock = lmv_revalidate_lock
3232 int __init lmv_init(void)
3234 struct lprocfs_static_vars lvars;
3237 lmv_object_cache = cfs_mem_cache_create("lmv_objects",
3238 sizeof(struct lmv_object),
3240 if (!lmv_object_cache) {
3241 CERROR("Error allocating lmv objects cache\n");
3245 lprocfs_lmv_init_vars(&lvars);
3247 rc = class_register_type(&lmv_obd_ops, &lmv_md_ops,
3248 lvars.module_vars, LUSTRE_LMV_NAME, NULL);
3250 cfs_mem_cache_destroy(lmv_object_cache);
3256 static void lmv_exit(void)
3258 class_unregister_type(LUSTRE_LMV_NAME);
3260 LASSERTF(cfs_atomic_read(&lmv_object_count) == 0,
3261 "Can't free lmv objects cache, %d object(s) busy\n",
3262 cfs_atomic_read(&lmv_object_count));
3263 cfs_mem_cache_destroy(lmv_object_cache);
3266 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
3267 MODULE_DESCRIPTION("Lustre Logical Metadata Volume OBD driver");
3268 MODULE_LICENSE("GPL");
3270 module_init(lmv_init);
3271 module_exit(lmv_exit);