4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2012, Whamcloud, Inc.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
38 # define EXPORT_SYMTAB
40 #define DEBUG_SUBSYSTEM S_LMV
42 #include <linux/slab.h>
43 #include <linux/module.h>
44 #include <linux/init.h>
45 #include <linux/slab.h>
46 #include <linux/pagemap.h>
48 #include <asm/div64.h>
49 #include <linux/seq_file.h>
50 #include <linux/namei.h>
52 #include <liblustre.h>
55 #include <lustre_log.h>
56 #include <obd_support.h>
57 #include <lustre_lib.h>
58 #include <lustre_net.h>
59 #include <obd_class.h>
60 #include <lprocfs_status.h>
61 #include <lustre_lite.h>
62 #include <lustre_fid.h>
63 #include "lmv_internal.h"
66 cfs_mem_cache_t *lmv_object_cache;
67 cfs_atomic_t lmv_object_count = CFS_ATOMIC_INIT(0);
69 static void lmv_activate_target(struct lmv_obd *lmv,
70 struct lmv_tgt_desc *tgt,
73 if (tgt->ltd_active == activate)
76 tgt->ltd_active = activate;
77 lmv->desc.ld_active_tgt_count += (activate ? 1 : -1);
83 * -EINVAL : UUID can't be found in the LMV's target list
84 * -ENOTCONN: The UUID is found, but the target connection is bad (!)
85 * -EBADF : The UUID is found, but the OBD of the wrong type (!)
87 static int lmv_set_mdc_active(struct lmv_obd *lmv, struct obd_uuid *uuid,
90 struct lmv_tgt_desc *tgt;
91 struct obd_device *obd;
96 CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n",
97 lmv, uuid->uuid, activate);
99 cfs_spin_lock(&lmv->lmv_lock);
100 for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
101 if (tgt->ltd_exp == NULL)
104 CDEBUG(D_INFO, "Target idx %d is %s conn "LPX64"\n",
105 i, tgt->ltd_uuid.uuid, tgt->ltd_exp->exp_handle.h_cookie);
107 if (obd_uuid_equals(uuid, &tgt->ltd_uuid))
111 if (i == lmv->desc.ld_tgt_count)
112 GOTO(out_lmv_lock, rc = -EINVAL);
114 obd = class_exp2obd(tgt->ltd_exp);
116 GOTO(out_lmv_lock, rc = -ENOTCONN);
118 CDEBUG(D_INFO, "Found OBD %s=%s device %d (%p) type %s at LMV idx %d\n",
119 obd->obd_name, obd->obd_uuid.uuid, obd->obd_minor, obd,
120 obd->obd_type->typ_name, i);
121 LASSERT(strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0);
123 if (tgt->ltd_active == activate) {
124 CDEBUG(D_INFO, "OBD %p already %sactive!\n", obd,
125 activate ? "" : "in");
126 GOTO(out_lmv_lock, rc);
129 CDEBUG(D_INFO, "Marking OBD %p %sactive\n", obd,
130 activate ? "" : "in");
131 lmv_activate_target(lmv, tgt, activate);
135 cfs_spin_unlock(&lmv->lmv_lock);
139 static int lmv_set_mdc_data(struct lmv_obd *lmv, struct obd_uuid *uuid,
140 struct obd_connect_data *data)
142 struct lmv_tgt_desc *tgt;
146 LASSERT(data != NULL);
148 cfs_spin_lock(&lmv->lmv_lock);
149 for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
150 if (tgt->ltd_exp == NULL)
153 if (obd_uuid_equals(uuid, &tgt->ltd_uuid)) {
154 lmv->datas[tgt->ltd_idx] = *data;
158 cfs_spin_unlock(&lmv->lmv_lock);
162 struct obd_uuid *lmv_get_uuid(struct obd_export *exp) {
163 struct obd_device *obd = exp->exp_obd;
164 struct lmv_obd *lmv = &obd->u.lmv;
165 return obd_get_uuid(lmv->tgts[0].ltd_exp);
168 static int lmv_notify(struct obd_device *obd, struct obd_device *watched,
169 enum obd_notify_event ev, void *data)
171 struct obd_connect_data *conn_data;
172 struct lmv_obd *lmv = &obd->u.lmv;
173 struct obd_uuid *uuid;
177 if (strcmp(watched->obd_type->typ_name, LUSTRE_MDC_NAME)) {
178 CERROR("unexpected notification of %s %s!\n",
179 watched->obd_type->typ_name,
184 uuid = &watched->u.cli.cl_target_uuid;
185 if (ev == OBD_NOTIFY_ACTIVE || ev == OBD_NOTIFY_INACTIVE) {
187 * Set MDC as active before notifying the observer, so the
188 * observer can use the MDC normally.
190 rc = lmv_set_mdc_active(lmv, uuid,
191 ev == OBD_NOTIFY_ACTIVE);
193 CERROR("%sactivation of %s failed: %d\n",
194 ev == OBD_NOTIFY_ACTIVE ? "" : "de",
198 } else if (ev == OBD_NOTIFY_OCD) {
199 conn_data = &watched->u.cli.cl_import->imp_connect_data;
202 * Set connect data to desired target, update exp_connect_flags.
204 rc = lmv_set_mdc_data(lmv, uuid, conn_data);
206 CERROR("can't set connect data to target %s, rc %d\n",
212 * XXX: Make sure that ocd_connect_flags from all targets are
213 * the same. Otherwise one of MDTs runs wrong version or
214 * something like this. --umka
216 obd->obd_self_export->exp_connect_flags =
217 conn_data->ocd_connect_flags;
220 else if (ev == OBD_NOTIFY_DISCON) {
222 * For disconnect event, flush fld cache for failout MDS case.
224 fld_client_flush(&lmv->lmv_fld);
228 * Pass the notification up the chain.
230 if (obd->obd_observer)
231 rc = obd_notify(obd->obd_observer, watched, ev, data);
237 * This is fake connect function. Its purpose is to initialize lmv and say
238 * caller that everything is okay. Real connection will be performed later.
240 static int lmv_connect(const struct lu_env *env,
241 struct obd_export **exp, struct obd_device *obd,
242 struct obd_uuid *cluuid, struct obd_connect_data *data,
246 struct proc_dir_entry *lmv_proc_dir;
248 struct lmv_obd *lmv = &obd->u.lmv;
249 struct lustre_handle conn = { 0 };
254 * We don't want to actually do the underlying connections more than
255 * once, so keep track.
258 if (lmv->refcount > 1) {
263 rc = class_connect(&conn, obd, cluuid);
265 CERROR("class_connection() returned %d\n", rc);
269 *exp = class_conn2export(&conn);
270 class_export_get(*exp);
274 lmv->cluuid = *cluuid;
277 lmv->conn_data = *data;
280 lmv_proc_dir = lprocfs_register("target_obds", obd->obd_proc_entry,
282 if (IS_ERR(lmv_proc_dir)) {
283 CERROR("could not register /proc/fs/lustre/%s/%s/target_obds.",
284 obd->obd_type->typ_name, obd->obd_name);
290 * All real clients should perform actual connection right away, because
291 * it is possible, that LMV will not have opportunity to connect targets
292 * and MDC stuff will be called directly, for instance while reading
293 * ../mdc/../kbytesfree procfs file, etc.
295 if (data->ocd_connect_flags & OBD_CONNECT_REAL)
296 rc = lmv_check_connect(obd);
301 lprocfs_remove(&lmv_proc_dir);
308 static void lmv_set_timeouts(struct obd_device *obd)
310 struct lmv_tgt_desc *tgts;
315 if (lmv->server_timeout == 0)
318 if (lmv->connected == 0)
321 for (i = 0, tgts = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgts++) {
322 if (tgts->ltd_exp == NULL)
325 obd_set_info_async(tgts->ltd_exp, sizeof(KEY_INTERMDS),
326 KEY_INTERMDS, 0, NULL, NULL);
330 static int lmv_init_ea_size(struct obd_export *exp, int easize,
331 int def_easize, int cookiesize)
333 struct obd_device *obd = exp->exp_obd;
334 struct lmv_obd *lmv = &obd->u.lmv;
340 if (lmv->max_easize < easize) {
341 lmv->max_easize = easize;
344 if (lmv->max_def_easize < def_easize) {
345 lmv->max_def_easize = def_easize;
348 if (lmv->max_cookiesize < cookiesize) {
349 lmv->max_cookiesize = cookiesize;
355 if (lmv->connected == 0)
358 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
359 if (lmv->tgts[i].ltd_exp == NULL) {
360 CWARN("%s: NULL export for %d\n", obd->obd_name, i);
364 rc = md_init_ea_size(lmv->tgts[i].ltd_exp, easize, def_easize,
367 CERROR("obd_init_ea_size() failed on MDT target %d, "
368 "error %d.\n", i, rc);
375 #define MAX_STRING_SIZE 128
377 int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
380 struct proc_dir_entry *lmv_proc_dir;
382 struct lmv_obd *lmv = &obd->u.lmv;
383 struct obd_uuid *cluuid = &lmv->cluuid;
384 struct obd_connect_data *mdc_data = NULL;
385 struct obd_uuid lmv_mdc_uuid = { "LMV_MDC_UUID" };
386 struct obd_device *mdc_obd;
387 struct obd_export *mdc_exp;
388 struct lu_fld_target target;
392 mdc_obd = class_find_client_obd(&tgt->ltd_uuid, LUSTRE_MDC_NAME,
395 CERROR("target %s not attached\n", tgt->ltd_uuid.uuid);
399 CDEBUG(D_CONFIG, "connect to %s(%s) - %s, %s FOR %s\n",
400 mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
401 tgt->ltd_uuid.uuid, obd->obd_uuid.uuid,
404 if (!mdc_obd->obd_set_up) {
405 CERROR("target %s is not set up\n", tgt->ltd_uuid.uuid);
409 rc = obd_connect(NULL, &mdc_exp, mdc_obd, &lmv_mdc_uuid,
410 &lmv->conn_data, NULL);
412 CERROR("target %s connect error %d\n", tgt->ltd_uuid.uuid, rc);
417 * Init fid sequence client for this mdc and add new fld target.
419 rc = obd_fid_init(mdc_exp);
423 target.ft_srv = NULL;
424 target.ft_exp = mdc_exp;
425 target.ft_idx = tgt->ltd_idx;
427 fld_client_add_target(&lmv->lmv_fld, &target);
429 mdc_data = &class_exp2cliimp(mdc_exp)->imp_connect_data;
431 rc = obd_register_observer(mdc_obd, obd);
433 obd_disconnect(mdc_exp);
434 CERROR("target %s register_observer error %d\n",
435 tgt->ltd_uuid.uuid, rc);
439 if (obd->obd_observer) {
441 * Tell the observer about the new target.
443 rc = obd_notify(obd->obd_observer, mdc_exp->exp_obd,
444 OBD_NOTIFY_ACTIVE, (void *)(tgt - lmv->tgts));
446 obd_disconnect(mdc_exp);
452 tgt->ltd_exp = mdc_exp;
453 lmv->desc.ld_active_tgt_count++;
456 * Copy connect data, it may be used later.
458 lmv->datas[tgt->ltd_idx] = *mdc_data;
460 md_init_ea_size(tgt->ltd_exp, lmv->max_easize,
461 lmv->max_def_easize, lmv->max_cookiesize);
463 CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n",
464 mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
465 cfs_atomic_read(&obd->obd_refcount));
468 lmv_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds");
470 struct proc_dir_entry *mdc_symlink;
472 LASSERT(mdc_obd->obd_type != NULL);
473 LASSERT(mdc_obd->obd_type->typ_name != NULL);
474 mdc_symlink = lprocfs_add_symlink(mdc_obd->obd_name,
477 mdc_obd->obd_type->typ_name,
479 if (mdc_symlink == NULL) {
480 CERROR("Could not register LMV target "
481 "/proc/fs/lustre/%s/%s/target_obds/%s.",
482 obd->obd_type->typ_name, obd->obd_name,
484 lprocfs_remove(&lmv_proc_dir);
492 int lmv_add_target(struct obd_device *obd, struct obd_uuid *tgt_uuid)
494 struct lmv_obd *lmv = &obd->u.lmv;
495 struct lmv_tgt_desc *tgt;
499 CDEBUG(D_CONFIG, "Target uuid: %s.\n", tgt_uuid->uuid);
503 if (lmv->desc.ld_active_tgt_count >= LMV_MAX_TGT_COUNT) {
504 lmv_init_unlock(lmv);
505 CERROR("Can't add %s, LMV module compiled for %d MDCs. "
506 "That many MDCs already configured.\n",
507 tgt_uuid->uuid, LMV_MAX_TGT_COUNT);
510 if (lmv->desc.ld_tgt_count == 0) {
511 struct obd_device *mdc_obd;
513 mdc_obd = class_find_client_obd(tgt_uuid, LUSTRE_MDC_NAME,
516 lmv_init_unlock(lmv);
517 CERROR("Target %s not attached\n", tgt_uuid->uuid);
521 rc = obd_llog_init(obd, &obd->obd_olg, mdc_obd, NULL);
523 lmv_init_unlock(lmv);
524 CERROR("lmv failed to setup llogging subsystems\n");
527 cfs_spin_lock(&lmv->lmv_lock);
528 tgt = lmv->tgts + lmv->desc.ld_tgt_count++;
529 tgt->ltd_uuid = *tgt_uuid;
530 cfs_spin_unlock(&lmv->lmv_lock);
532 if (lmv->connected) {
533 rc = lmv_connect_mdc(obd, tgt);
535 cfs_spin_lock(&lmv->lmv_lock);
536 lmv->desc.ld_tgt_count--;
537 memset(tgt, 0, sizeof(*tgt));
538 cfs_spin_unlock(&lmv->lmv_lock);
540 int easize = sizeof(struct lmv_stripe_md) +
541 lmv->desc.ld_tgt_count *
542 sizeof(struct lu_fid);
543 lmv_init_ea_size(obd->obd_self_export, easize, 0, 0);
547 lmv_init_unlock(lmv);
551 int lmv_check_connect(struct obd_device *obd)
553 struct lmv_obd *lmv = &obd->u.lmv;
554 struct lmv_tgt_desc *tgt;
564 if (lmv->connected) {
565 lmv_init_unlock(lmv);
569 if (lmv->desc.ld_tgt_count == 0) {
570 lmv_init_unlock(lmv);
571 CERROR("%s: no targets configured.\n", obd->obd_name);
575 CDEBUG(D_CONFIG, "Time to connect %s to %s\n",
576 lmv->cluuid.uuid, obd->obd_name);
578 LASSERT(lmv->tgts != NULL);
580 for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
581 rc = lmv_connect_mdc(obd, tgt);
586 lmv_set_timeouts(obd);
587 class_export_put(lmv->exp);
589 easize = lmv_get_easize(lmv);
590 lmv_init_ea_size(obd->obd_self_export, easize, 0, 0);
591 lmv_init_unlock(lmv);
600 --lmv->desc.ld_active_tgt_count;
601 rc2 = obd_disconnect(tgt->ltd_exp);
603 CERROR("LMV target %s disconnect on "
604 "MDC idx %d: error %d\n",
605 tgt->ltd_uuid.uuid, i, rc2);
609 class_disconnect(lmv->exp);
610 lmv_init_unlock(lmv);
614 static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
617 struct proc_dir_entry *lmv_proc_dir;
619 struct lmv_obd *lmv = &obd->u.lmv;
620 struct obd_device *mdc_obd;
624 LASSERT(tgt != NULL);
625 LASSERT(obd != NULL);
627 mdc_obd = class_exp2obd(tgt->ltd_exp);
630 mdc_obd->obd_force = obd->obd_force;
631 mdc_obd->obd_fail = obd->obd_fail;
632 mdc_obd->obd_no_recov = obd->obd_no_recov;
636 lmv_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds");
638 struct proc_dir_entry *mdc_symlink;
640 mdc_symlink = lprocfs_srch(lmv_proc_dir, mdc_obd->obd_name);
642 lprocfs_remove(&mdc_symlink);
644 CERROR("/proc/fs/lustre/%s/%s/target_obds/%s missing\n",
645 obd->obd_type->typ_name, obd->obd_name,
650 rc = obd_fid_fini(tgt->ltd_exp);
652 CERROR("Can't finanize fids factory\n");
654 CDEBUG(D_INFO, "Disconnected from %s(%s) successfully\n",
655 tgt->ltd_exp->exp_obd->obd_name,
656 tgt->ltd_exp->exp_obd->obd_uuid.uuid);
658 obd_register_observer(tgt->ltd_exp->exp_obd, NULL);
659 rc = obd_disconnect(tgt->ltd_exp);
661 if (tgt->ltd_active) {
662 CERROR("Target %s disconnect error %d\n",
663 tgt->ltd_uuid.uuid, rc);
667 lmv_activate_target(lmv, tgt, 0);
672 static int lmv_disconnect(struct obd_export *exp)
674 struct obd_device *obd = class_exp2obd(exp);
676 struct proc_dir_entry *lmv_proc_dir;
678 struct lmv_obd *lmv = &obd->u.lmv;
687 * Only disconnect the underlying layers on the final disconnect.
690 if (lmv->refcount != 0)
693 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
694 if (lmv->tgts[i].ltd_exp == NULL)
696 lmv_disconnect_mdc(obd, &lmv->tgts[i]);
700 lmv_proc_dir = lprocfs_srch(obd->obd_proc_entry, "target_obds");
702 lprocfs_remove(&lmv_proc_dir);
704 CERROR("/proc/fs/lustre/%s/%s/target_obds missing\n",
705 obd->obd_type->typ_name, obd->obd_name);
711 * This is the case when no real connection is established by
712 * lmv_check_connect().
715 class_export_put(exp);
716 rc = class_disconnect(exp);
717 if (lmv->refcount == 0)
722 static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
723 int len, void *karg, void *uarg)
725 struct obd_device *obddev = class_exp2obd(exp);
726 struct lmv_obd *lmv = &obddev->u.lmv;
730 int count = lmv->desc.ld_tgt_count;
737 case IOC_OBD_STATFS: {
738 struct obd_ioctl_data *data = karg;
739 struct obd_device *mdc_obd;
740 struct obd_statfs stat_buf = {0};
743 memcpy(&index, data->ioc_inlbuf2, sizeof(__u32));
744 if ((index >= count))
747 if (!lmv->tgts[index].ltd_active)
750 mdc_obd = class_exp2obd(lmv->tgts[index].ltd_exp);
755 if (cfs_copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
756 min((int) data->ioc_plen2,
757 (int) sizeof(struct obd_uuid))))
760 rc = obd_statfs(mdc_obd, &stat_buf,
761 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
765 if (cfs_copy_to_user(data->ioc_pbuf1, &stat_buf,
766 min((int) data->ioc_plen1,
767 (int) sizeof(stat_buf))))
771 case OBD_IOC_QUOTACTL: {
772 struct if_quotactl *qctl = karg;
773 struct lmv_tgt_desc *tgt = NULL;
774 struct obd_quotactl *oqctl;
776 if (qctl->qc_valid == QC_MDTIDX) {
777 if (qctl->qc_idx < 0 || count <= qctl->qc_idx)
780 tgt = &lmv->tgts[qctl->qc_idx];
783 } else if (qctl->qc_valid == QC_UUID) {
784 for (i = 0; i < count; i++) {
786 if (!obd_uuid_equals(&tgt->ltd_uuid,
790 if (tgt->ltd_exp == NULL)
802 LASSERT(tgt && tgt->ltd_exp);
803 OBD_ALLOC_PTR(oqctl);
807 QCTL_COPY(oqctl, qctl);
808 rc = obd_quotactl(tgt->ltd_exp, oqctl);
810 QCTL_COPY(qctl, oqctl);
811 qctl->qc_valid = QC_MDTIDX;
812 qctl->obd_uuid = tgt->ltd_uuid;
817 case OBD_IOC_CHANGELOG_SEND:
818 case OBD_IOC_CHANGELOG_CLEAR: {
819 struct ioc_changelog *icc = karg;
821 if (icc->icc_mdtindex >= count)
824 rc = obd_iocontrol(cmd, lmv->tgts[icc->icc_mdtindex].ltd_exp,
825 sizeof(*icc), icc, NULL);
828 case LL_IOC_GET_CONNECT_FLAGS: {
829 rc = obd_iocontrol(cmd, lmv->tgts[0].ltd_exp, len, karg, uarg);
834 for (i = 0; i < count; i++) {
836 struct obd_device *mdc_obd;
838 if (lmv->tgts[i].ltd_exp == NULL)
840 /* ll_umount_begin() sets force flag but for lmv, not
841 * mdc. Let's pass it through */
842 mdc_obd = class_exp2obd(lmv->tgts[i].ltd_exp);
843 mdc_obd->obd_force = obddev->obd_force;
844 err = obd_iocontrol(cmd, lmv->tgts[i].ltd_exp, len,
846 if (err == -ENODATA && cmd == OBD_IOC_POLL_QUOTACHECK) {
849 if (lmv->tgts[i].ltd_active) {
850 CERROR("error: iocontrol MDC %s on MDT"
851 "idx %d cmd %x: err = %d\n",
852 lmv->tgts[i].ltd_uuid.uuid,
867 static int lmv_all_chars_policy(int count, const char *name,
878 static int lmv_nid_policy(struct lmv_obd *lmv)
880 struct obd_import *imp;
884 * XXX: To get nid we assume that underlying obd device is mdc.
886 imp = class_exp2cliimp(lmv->tgts[0].ltd_exp);
887 id = imp->imp_connection->c_self ^ (imp->imp_connection->c_self >> 32);
888 return id % lmv->desc.ld_tgt_count;
891 static int lmv_choose_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
892 placement_policy_t placement)
895 case PLACEMENT_CHAR_POLICY:
896 return lmv_all_chars_policy(lmv->desc.ld_tgt_count,
898 op_data->op_namelen);
899 case PLACEMENT_NID_POLICY:
900 return lmv_nid_policy(lmv);
906 CERROR("Unsupported placement policy %x\n", placement);
911 * This is _inode_ placement policy function (not name).
913 static int lmv_placement_policy(struct obd_device *obd,
914 struct md_op_data *op_data,
917 struct lmv_obd *lmv = &obd->u.lmv;
918 struct lmv_object *obj;
922 LASSERT(mds != NULL);
924 if (lmv->desc.ld_tgt_count == 1) {
930 * Allocate new fid on target according to operation type and parent
933 obj = lmv_object_find(obd, &op_data->op_fid1);
934 if (obj != NULL || op_data->op_name == NULL ||
935 op_data->op_opc != LUSTRE_OPC_MKDIR) {
937 * Allocate fid for non-dir or for null name or for case parent
944 * If we have this flag turned on, and we see that
945 * parent dir is split, this means, that caller did not
946 * notice split yet. This is race and we would like to
947 * let caller know that.
949 if (op_data->op_bias & MDS_CHECK_SPLIT)
954 * Allocate new fid on same mds where parent fid is located and
955 * where operation will be sent. In case of split dir, ->op_fid1
956 * and ->op_mds here will contain fid and mds of slave directory
957 * object (assigned by caller).
959 *mds = op_data->op_mds;
963 * Parent directory is not split and we want to create a
964 * directory in it. Let's calculate where to place it according
965 * to operation data @op_data.
967 *mds = lmv_choose_mds(lmv, op_data, lmv->lmv_placement);
972 CERROR("Can't choose MDS, err = %d\n", rc);
974 LASSERT(*mds < lmv->desc.ld_tgt_count);
980 int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid,
983 struct lmv_tgt_desc *tgt;
987 tgt = lmv_get_target(lmv, mds);
990 * New seq alloc and FLD setup should be atomic. Otherwise we may find
991 * on server that seq in new allocated fid is not yet known.
993 cfs_mutex_lock(&tgt->ltd_fid_mutex);
995 if (!tgt->ltd_active)
996 GOTO(out, rc = -ENODEV);
999 * Asking underlaying tgt layer to allocate new fid.
1001 rc = obd_fid_alloc(tgt->ltd_exp, fid, NULL);
1003 LASSERT(fid_is_sane(fid));
1009 cfs_mutex_unlock(&tgt->ltd_fid_mutex);
1013 int lmv_fid_alloc(struct obd_export *exp, struct lu_fid *fid,
1014 struct md_op_data *op_data)
1016 struct obd_device *obd = class_exp2obd(exp);
1017 struct lmv_obd *lmv = &obd->u.lmv;
1022 LASSERT(op_data != NULL);
1023 LASSERT(fid != NULL);
1025 rc = lmv_placement_policy(obd, op_data, &mds);
1027 CERROR("Can't get target for allocating fid, "
1032 rc = __lmv_fid_alloc(lmv, fid, mds);
1034 CERROR("Can't alloc new fid, rc %d\n", rc);
1041 static int lmv_fid_delete(struct obd_export *exp, const struct lu_fid *fid)
1044 LASSERT(exp != NULL && fid != NULL);
1045 if (lmv_object_delete(exp, fid)) {
1046 CDEBUG(D_INODE, "Object "DFID" is destroyed.\n",
1052 static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
1054 struct lmv_obd *lmv = &obd->u.lmv;
1055 struct lprocfs_static_vars lvars;
1056 struct lmv_desc *desc;
1061 if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
1062 CERROR("LMV setup requires a descriptor\n");
1066 desc = (struct lmv_desc *)lustre_cfg_buf(lcfg, 1);
1067 if (sizeof(*desc) > LUSTRE_CFG_BUFLEN(lcfg, 1)) {
1068 CERROR("Lmv descriptor size wrong: %d > %d\n",
1069 (int)sizeof(*desc), LUSTRE_CFG_BUFLEN(lcfg, 1));
1073 lmv->tgts_size = LMV_MAX_TGT_COUNT * sizeof(struct lmv_tgt_desc);
1075 OBD_ALLOC(lmv->tgts, lmv->tgts_size);
1076 if (lmv->tgts == NULL)
1079 for (i = 0; i < LMV_MAX_TGT_COUNT; i++) {
1080 cfs_mutex_init(&lmv->tgts[i].ltd_fid_mutex);
1081 lmv->tgts[i].ltd_idx = i;
1084 lmv->datas_size = LMV_MAX_TGT_COUNT * sizeof(struct obd_connect_data);
1086 OBD_ALLOC(lmv->datas, lmv->datas_size);
1087 if (lmv->datas == NULL)
1088 GOTO(out_free_tgts, rc = -ENOMEM);
1090 obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid);
1091 lmv->desc.ld_tgt_count = 0;
1092 lmv->desc.ld_active_tgt_count = 0;
1093 lmv->max_cookiesize = 0;
1094 lmv->max_def_easize = 0;
1095 lmv->max_easize = 0;
1096 lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
1098 cfs_spin_lock_init(&lmv->lmv_lock);
1099 cfs_mutex_init(&lmv->init_mutex);
1101 rc = lmv_object_setup(obd);
1103 CERROR("Can't setup LMV object manager, error %d.\n", rc);
1104 GOTO(out_free_datas, rc);
1107 lprocfs_lmv_init_vars(&lvars);
1108 lprocfs_obd_setup(obd, lvars.obd_vars);
1111 rc = lprocfs_seq_create(obd->obd_proc_entry, "target_obd",
1112 0444, &lmv_proc_target_fops, obd);
1114 CWARN("%s: error adding LMV target_obd file: rc = %d\n",
1118 rc = fld_client_init(&lmv->lmv_fld, obd->obd_name,
1119 LUSTRE_CLI_FLD_HASH_DHT);
1121 CERROR("Can't init FLD, err %d\n", rc);
1122 GOTO(out_free_datas, rc);
1128 OBD_FREE(lmv->datas, lmv->datas_size);
1131 OBD_FREE(lmv->tgts, lmv->tgts_size);
1136 static int lmv_cleanup(struct obd_device *obd)
1138 struct lmv_obd *lmv = &obd->u.lmv;
1141 fld_client_fini(&lmv->lmv_fld);
1142 lmv_object_cleanup(obd);
1143 OBD_FREE(lmv->datas, lmv->datas_size);
1144 OBD_FREE(lmv->tgts, lmv->tgts_size);
1149 static int lmv_process_config(struct obd_device *obd, obd_count len, void *buf)
1151 struct lustre_cfg *lcfg = buf;
1152 struct obd_uuid tgt_uuid;
1156 switch(lcfg->lcfg_command) {
1158 if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(tgt_uuid.uuid))
1159 GOTO(out, rc = -EINVAL);
1161 obd_str2uuid(&tgt_uuid, lustre_cfg_string(lcfg, 1));
1162 rc = lmv_add_target(obd, &tgt_uuid);
1165 CERROR("Unknown command: %d\n", lcfg->lcfg_command);
1166 GOTO(out, rc = -EINVAL);
1173 static int lmv_statfs(struct obd_device *obd, struct obd_statfs *osfs,
1174 __u64 max_age, __u32 flags)
1176 struct lmv_obd *lmv = &obd->u.lmv;
1177 struct obd_statfs *temp;
1182 rc = lmv_check_connect(obd);
1186 OBD_ALLOC(temp, sizeof(*temp));
1190 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
1191 if (lmv->tgts[i].ltd_exp == NULL)
1194 rc = obd_statfs(lmv->tgts[i].ltd_exp->exp_obd, temp,
1197 CERROR("can't stat MDS #%d (%s), error %d\n", i,
1198 lmv->tgts[i].ltd_exp->exp_obd->obd_name,
1200 GOTO(out_free_temp, rc);
1205 osfs->os_bavail += temp->os_bavail;
1206 osfs->os_blocks += temp->os_blocks;
1207 osfs->os_ffree += temp->os_ffree;
1208 osfs->os_files += temp->os_files;
1214 OBD_FREE(temp, sizeof(*temp));
1218 static int lmv_getstatus(struct obd_export *exp,
1220 struct obd_capa **pc)
1222 struct obd_device *obd = exp->exp_obd;
1223 struct lmv_obd *lmv = &obd->u.lmv;
1227 rc = lmv_check_connect(obd);
1231 rc = md_getstatus(lmv->tgts[0].ltd_exp, fid, pc);
1235 static int lmv_getxattr(struct obd_export *exp, const struct lu_fid *fid,
1236 struct obd_capa *oc, obd_valid valid, const char *name,
1237 const char *input, int input_size, int output_size,
1238 int flags, struct ptlrpc_request **request)
1240 struct obd_device *obd = exp->exp_obd;
1241 struct lmv_obd *lmv = &obd->u.lmv;
1242 struct lmv_tgt_desc *tgt;
1246 rc = lmv_check_connect(obd);
1250 tgt = lmv_find_target(lmv, fid);
1252 RETURN(PTR_ERR(tgt));
1254 rc = md_getxattr(tgt->ltd_exp, fid, oc, valid, name, input,
1255 input_size, output_size, flags, request);
1260 static int lmv_setxattr(struct obd_export *exp, const struct lu_fid *fid,
1261 struct obd_capa *oc, obd_valid valid, const char *name,
1262 const char *input, int input_size, int output_size,
1263 int flags, __u32 suppgid,
1264 struct ptlrpc_request **request)
1266 struct obd_device *obd = exp->exp_obd;
1267 struct lmv_obd *lmv = &obd->u.lmv;
1268 struct lmv_tgt_desc *tgt;
1272 rc = lmv_check_connect(obd);
1276 tgt = lmv_find_target(lmv, fid);
1278 RETURN(PTR_ERR(tgt));
1280 rc = md_setxattr(tgt->ltd_exp, fid, oc, valid, name, input,
1281 input_size, output_size, flags, suppgid,
1287 static int lmv_getattr(struct obd_export *exp, struct md_op_data *op_data,
1288 struct ptlrpc_request **request)
1290 struct obd_device *obd = exp->exp_obd;
1291 struct lmv_obd *lmv = &obd->u.lmv;
1292 struct lmv_tgt_desc *tgt;
1293 struct lmv_object *obj;
1298 rc = lmv_check_connect(obd);
1302 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1304 RETURN(PTR_ERR(tgt));
1306 if (op_data->op_valid & OBD_MD_MDTIDX) {
1307 op_data->op_mds = tgt->ltd_idx;
1311 rc = md_getattr(tgt->ltd_exp, op_data, request);
1315 obj = lmv_object_find_lock(obd, &op_data->op_fid1);
1317 CDEBUG(D_INODE, "GETATTR for "DFID" %s\n", PFID(&op_data->op_fid1),
1318 obj ? "(split)" : "");
1321 * If object is split, then we loop over all the slaves and gather size
1322 * attribute. In ideal world we would have to gather also mds field from
1323 * all slaves, as object is spread over the cluster and this is
1324 * definitely interesting information and it is not good to loss it,
1328 struct mdt_body *body;
1330 if (*request == NULL) {
1331 lmv_object_put(obj);
1335 body = req_capsule_server_get(&(*request)->rq_pill,
1337 LASSERT(body != NULL);
1339 for (i = 0; i < obj->lo_objcount; i++) {
1340 if (lmv->tgts[i].ltd_exp == NULL) {
1341 CWARN("%s: NULL export for %d\n",
1347 * Skip master object.
1349 if (lu_fid_eq(&obj->lo_fid, &obj->lo_stripes[i].ls_fid))
1352 body->size += obj->lo_stripes[i].ls_size;
1355 lmv_object_put_unlock(obj);
1361 static int lmv_change_cbdata(struct obd_export *exp, const struct lu_fid *fid,
1362 ldlm_iterator_t it, void *data)
1364 struct obd_device *obd = exp->exp_obd;
1365 struct lmv_obd *lmv = &obd->u.lmv;
1370 rc = lmv_check_connect(obd);
1374 CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
1377 * With CMD every object can have two locks in different namespaces:
1378 * lookup lock in space of mds storing direntry and update/open lock in
1379 * space of mds storing inode.
1381 for (i = 0; i < lmv->desc.ld_tgt_count; i++)
1382 md_change_cbdata(lmv->tgts[i].ltd_exp, fid, it, data);
1387 static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid,
1388 ldlm_iterator_t it, void *data)
1390 struct obd_device *obd = exp->exp_obd;
1391 struct lmv_obd *lmv = &obd->u.lmv;
1396 rc = lmv_check_connect(obd);
1400 CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
1403 * With CMD every object can have two locks in different namespaces:
1404 * lookup lock in space of mds storing direntry and update/open lock in
1405 * space of mds storing inode.
1407 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
1408 rc = md_find_cbdata(lmv->tgts[i].ltd_exp, fid, it, data);
1417 static int lmv_close(struct obd_export *exp, struct md_op_data *op_data,
1418 struct md_open_data *mod, struct ptlrpc_request **request)
1420 struct obd_device *obd = exp->exp_obd;
1421 struct lmv_obd *lmv = &obd->u.lmv;
1422 struct lmv_tgt_desc *tgt;
1426 rc = lmv_check_connect(obd);
1430 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1432 RETURN(PTR_ERR(tgt));
1434 CDEBUG(D_INODE, "CLOSE "DFID"\n", PFID(&op_data->op_fid1));
1435 rc = md_close(tgt->ltd_exp, op_data, mod, request);
1440 * Called in the case MDS returns -ERESTART on create on open, what means that
1441 * directory is split and its LMV presentation object has to be updated.
1443 int lmv_handle_split(struct obd_export *exp, const struct lu_fid *fid)
1445 struct obd_device *obd = exp->exp_obd;
1446 struct lmv_obd *lmv = &obd->u.lmv;
1447 struct ptlrpc_request *req = NULL;
1448 struct lmv_tgt_desc *tgt;
1449 struct lmv_object *obj;
1450 struct lustre_md md;
1451 struct md_op_data *op_data;
1458 mealen = lmv_get_easize(lmv);
1460 valid = OBD_MD_FLEASIZE | OBD_MD_FLDIREA | OBD_MD_MEA;
1462 tgt = lmv_find_target(lmv, fid);
1464 RETURN(PTR_ERR(tgt));
1467 * Time to update mea of parent fid.
1470 OBD_ALLOC_PTR(op_data);
1471 if (op_data == NULL)
1474 op_data->op_fid1 = *fid;
1475 op_data->op_mode = mealen;
1476 op_data->op_valid = valid;
1478 rc = md_getattr(tgt->ltd_exp, op_data, &req);
1479 OBD_FREE_PTR(op_data);
1481 CERROR("md_getattr() failed, error %d\n", rc);
1485 rc = md_get_lustre_md(tgt->ltd_exp, req, NULL, exp, &md);
1487 CERROR("md_get_lustre_md() failed, error %d\n", rc);
1492 GOTO(cleanup, rc = -ENODATA);
1494 obj = lmv_object_create(exp, fid, md.mea);
1498 lmv_object_put(obj);
1500 obd_free_memmd(exp, (void *)&md.mea);
1504 ptlrpc_req_finished(req);
1508 int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
1509 const void *data, int datalen, int mode, __u32 uid,
1510 __u32 gid, cfs_cap_t cap_effective, __u64 rdev,
1511 struct ptlrpc_request **request)
1513 struct obd_device *obd = exp->exp_obd;
1514 struct lmv_obd *lmv = &obd->u.lmv;
1515 struct lmv_tgt_desc *tgt;
1516 struct lmv_object *obj;
1522 rc = lmv_check_connect(obd);
1526 if (!lmv->desc.ld_active_tgt_count)
1532 obj = lmv_object_find(obd, &op_data->op_fid1);
1534 sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
1535 op_data->op_name, op_data->op_namelen);
1536 op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid;
1537 op_data->op_bias &= ~MDS_CHECK_SPLIT;
1538 op_data->op_mds = obj->lo_stripes[sidx].ls_mds;
1539 tgt = lmv_get_target(lmv, op_data->op_mds);
1540 lmv_object_put(obj);
1542 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1543 op_data->op_bias |= MDS_CHECK_SPLIT;
1544 op_data->op_mds = tgt->ltd_idx;
1548 RETURN(PTR_ERR(tgt));
1550 rc = lmv_fid_alloc(exp, &op_data->op_fid2, op_data);
1551 if (rc == -ERESTART)
1556 CDEBUG(D_INODE, "CREATE '%*s' on "DFID" -> mds #%x\n",
1557 op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
1560 op_data->op_flags |= MF_MDC_CANCEL_FID1;
1561 rc = md_create(tgt->ltd_exp, op_data, data, datalen, mode, uid, gid,
1562 cap_effective, rdev, request);
1564 if (*request == NULL)
1566 CDEBUG(D_INODE, "Created - "DFID"\n", PFID(&op_data->op_fid2));
1567 } else if (rc == -ERESTART) {
1568 LASSERT(*request != NULL);
1569 DEBUG_REQ(D_WARNING|D_RPCTRACE, *request,
1570 "Got -ERESTART during create!\n");
1571 ptlrpc_req_finished(*request);
1575 * Directory got split. Time to update local object and repeat
1576 * the request with proper MDS.
1578 rc = lmv_handle_split(exp, &op_data->op_fid1);
1580 rc = lmv_allocate_slaves(obd, &op_data->op_fid1,
1581 op_data, &op_data->op_fid2);
1590 static int lmv_done_writing(struct obd_export *exp,
1591 struct md_op_data *op_data,
1592 struct md_open_data *mod)
1594 struct obd_device *obd = exp->exp_obd;
1595 struct lmv_obd *lmv = &obd->u.lmv;
1596 struct lmv_tgt_desc *tgt;
1600 rc = lmv_check_connect(obd);
1604 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1606 RETURN(PTR_ERR(tgt));
1608 rc = md_done_writing(tgt->ltd_exp, op_data, mod);
1613 lmv_enqueue_slaves(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
1614 struct lookup_intent *it, struct md_op_data *op_data,
1615 struct lustre_handle *lockh, void *lmm, int lmmsize)
1617 struct obd_device *obd = exp->exp_obd;
1618 struct lmv_obd *lmv = &obd->u.lmv;
1619 struct lmv_stripe_md *mea = op_data->op_mea1;
1620 struct md_op_data *op_data2;
1621 struct lmv_tgt_desc *tgt;
1626 OBD_ALLOC_PTR(op_data2);
1627 if (op_data2 == NULL)
1630 LASSERT(mea != NULL);
1631 for (i = 0; i < mea->mea_count; i++) {
1632 memset(op_data2, 0, sizeof(*op_data2));
1633 op_data2->op_fid1 = mea->mea_ids[i];
1634 op_data2->op_bias = 0;
1636 tgt = lmv_find_target(lmv, &op_data2->op_fid1);
1638 GOTO(cleanup, rc = PTR_ERR(tgt));
1640 if (tgt->ltd_exp == NULL)
1643 rc = md_enqueue(tgt->ltd_exp, einfo, it, op_data2,
1644 lockh + i, lmm, lmmsize, NULL, 0);
1646 CDEBUG(D_INODE, "Take lock on slave "DFID" -> %d/%d\n",
1647 PFID(&mea->mea_ids[i]), rc, it->d.lustre.it_status);
1652 if (it->d.lustre.it_data) {
1653 struct ptlrpc_request *req;
1654 req = (struct ptlrpc_request *)it->d.lustre.it_data;
1655 ptlrpc_req_finished(req);
1658 if (it->d.lustre.it_status)
1659 GOTO(cleanup, rc = it->d.lustre.it_status);
1664 OBD_FREE_PTR(op_data2);
1668 * Drop all taken locks.
1671 if (lockh[i].cookie)
1672 ldlm_lock_decref(lockh + i, einfo->ei_mode);
1673 lockh[i].cookie = 0;
1680 lmv_enqueue_remote(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
1681 struct lookup_intent *it, struct md_op_data *op_data,
1682 struct lustre_handle *lockh, void *lmm, int lmmsize,
1683 int extra_lock_flags)
1685 struct ptlrpc_request *req = it->d.lustre.it_data;
1686 struct obd_device *obd = exp->exp_obd;
1687 struct lmv_obd *lmv = &obd->u.lmv;
1688 struct lustre_handle plock;
1689 struct lmv_tgt_desc *tgt;
1690 struct md_op_data *rdata;
1692 struct mdt_body *body;
1697 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1698 LASSERT(body != NULL);
1700 if (!(body->valid & OBD_MD_MDS))
1703 CDEBUG(D_INODE, "REMOTE_ENQUEUE '%s' on "DFID" -> "DFID"\n",
1704 LL_IT2STR(it), PFID(&op_data->op_fid1), PFID(&body->fid1));
1707 * We got LOOKUP lock, but we really need attrs.
1709 pmode = it->d.lustre.it_lock_mode;
1710 LASSERT(pmode != 0);
1711 memcpy(&plock, lockh, sizeof(plock));
1712 it->d.lustre.it_lock_mode = 0;
1713 it->d.lustre.it_data = NULL;
1716 it->d.lustre.it_disposition &= ~DISP_ENQ_COMPLETE;
1717 ptlrpc_req_finished(req);
1719 tgt = lmv_find_target(lmv, &fid1);
1721 GOTO(out, rc = PTR_ERR(tgt));
1723 OBD_ALLOC_PTR(rdata);
1725 GOTO(out, rc = -ENOMEM);
1727 rdata->op_fid1 = fid1;
1728 rdata->op_bias = MDS_CROSS_REF;
1730 rc = md_enqueue(tgt->ltd_exp, einfo, it, rdata, lockh,
1731 lmm, lmmsize, NULL, extra_lock_flags);
1732 OBD_FREE_PTR(rdata);
1735 ldlm_lock_decref(&plock, pmode);
1740 lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
1741 struct lookup_intent *it, struct md_op_data *op_data,
1742 struct lustre_handle *lockh, void *lmm, int lmmsize,
1743 struct ptlrpc_request **req, int extra_lock_flags)
1745 struct obd_device *obd = exp->exp_obd;
1746 struct lmv_obd *lmv = &obd->u.lmv;
1747 struct lmv_tgt_desc *tgt;
1748 struct lmv_object *obj;
1753 rc = lmv_check_connect(obd);
1757 CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID"\n",
1758 LL_IT2STR(it), PFID(&op_data->op_fid1));
1760 if (op_data->op_mea1 && it && it->it_op == IT_UNLINK) {
1761 rc = lmv_enqueue_slaves(exp, einfo, it, op_data,
1762 lockh, lmm, lmmsize);
1766 obj = lmv_object_find(obd, &op_data->op_fid1);
1767 if (obj && op_data->op_namelen) {
1768 sidx = raw_name2idx(obj->lo_hashtype,
1770 (char *)op_data->op_name,
1771 op_data->op_namelen);
1772 op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid;
1773 tgt = lmv_get_target(lmv, obj->lo_stripes[sidx].ls_mds);
1775 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1778 lmv_object_put(obj);
1781 RETURN(PTR_ERR(tgt));
1783 CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID" -> mds #%d\n",
1784 LL_IT2STR(it), PFID(&op_data->op_fid1), tgt->ltd_idx);
1786 rc = md_enqueue(tgt->ltd_exp, einfo, it, op_data, lockh,
1787 lmm, lmmsize, req, extra_lock_flags);
1789 if (rc == 0 && it && it->it_op == IT_OPEN) {
1790 rc = lmv_enqueue_remote(exp, einfo, it, op_data, lockh,
1791 lmm, lmmsize, extra_lock_flags);
1797 lmv_getattr_name(struct obd_export *exp,struct md_op_data *op_data,
1798 struct ptlrpc_request **request)
1800 struct ptlrpc_request *req = NULL;
1801 struct obd_device *obd = exp->exp_obd;
1802 struct lmv_obd *lmv = &obd->u.lmv;
1803 struct lu_fid rid = op_data->op_fid1;
1804 struct lmv_tgt_desc *tgt;
1805 struct mdt_body *body;
1806 struct lmv_object *obj;
1807 obd_valid valid = op_data->op_valid;
1813 rc = lmv_check_connect(obd);
1820 obj = lmv_object_find(obd, &rid);
1822 sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
1823 op_data->op_name, op_data->op_namelen);
1824 rid = obj->lo_stripes[sidx].ls_fid;
1825 tgt = lmv_get_target(lmv, obj->lo_stripes[sidx].ls_mds);
1826 op_data->op_mds = obj->lo_stripes[sidx].ls_mds;
1827 valid &= ~OBD_MD_FLCKSPLIT;
1828 lmv_object_put(obj);
1830 tgt = lmv_find_target(lmv, &rid);
1831 valid |= OBD_MD_FLCKSPLIT;
1832 op_data->op_mds = tgt->ltd_idx;
1835 RETURN(PTR_ERR(tgt));
1837 CDEBUG(D_INODE, "GETATTR_NAME for %*s on "DFID" - "DFID" -> mds #%d\n",
1838 op_data->op_namelen, op_data->op_name, PFID(&op_data->op_fid1),
1839 PFID(&rid), tgt->ltd_idx);
1841 op_data->op_valid = valid;
1842 op_data->op_fid1 = rid;
1843 rc = md_getattr_name(tgt->ltd_exp, op_data, request);
1845 body = req_capsule_server_get(&(*request)->rq_pill,
1847 LASSERT(body != NULL);
1849 if (body->valid & OBD_MD_MDS) {
1851 CDEBUG(D_INODE, "Request attrs for "DFID"\n",
1854 tgt = lmv_find_target(lmv, &rid);
1856 ptlrpc_req_finished(*request);
1857 RETURN(PTR_ERR(tgt));
1860 op_data->op_fid1 = rid;
1861 op_data->op_valid |= OBD_MD_FLCROSSREF;
1862 op_data->op_namelen = 0;
1863 op_data->op_name = NULL;
1864 rc = md_getattr_name(tgt->ltd_exp, op_data, &req);
1865 ptlrpc_req_finished(*request);
1868 } else if (rc == -ERESTART) {
1869 LASSERT(*request != NULL);
1870 DEBUG_REQ(D_WARNING|D_RPCTRACE, *request,
1871 "Got -ERESTART during getattr!\n");
1872 ptlrpc_req_finished(*request);
1876 * Directory got split. Time to update local object and repeat
1877 * the request with proper MDS.
1879 rc = lmv_handle_split(exp, &rid);
1886 #define md_op_data_fid(op_data, fl) \
1887 (fl == MF_MDC_CANCEL_FID1 ? &op_data->op_fid1 : \
1888 fl == MF_MDC_CANCEL_FID2 ? &op_data->op_fid2 : \
1889 fl == MF_MDC_CANCEL_FID3 ? &op_data->op_fid3 : \
1890 fl == MF_MDC_CANCEL_FID4 ? &op_data->op_fid4 : \
1893 static int lmv_early_cancel_slaves(struct obd_export *exp,
1894 struct md_op_data *op_data, int op_tgt,
1895 ldlm_mode_t mode, int bits, int flag)
1897 struct obd_device *obd = exp->exp_obd;
1898 struct lmv_obd *lmv = &obd->u.lmv;
1899 ldlm_policy_data_t policy = {{0}};
1900 struct lu_fid *op_fid;
1901 struct lu_fid *st_fid;
1902 struct lmv_tgt_desc *tgt;
1903 struct lmv_object *obj;
1908 op_fid = md_op_data_fid(op_data, flag);
1909 if (!fid_is_sane(op_fid))
1912 obj = lmv_object_find(obd, op_fid);
1916 policy.l_inodebits.bits = bits;
1917 for (i = 0; i < obj->lo_objcount; i++) {
1918 tgt = lmv_get_target(lmv, obj->lo_stripes[i].ls_mds);
1919 st_fid = &obj->lo_stripes[i].ls_fid;
1920 if (op_tgt != tgt->ltd_idx) {
1921 CDEBUG(D_INODE, "EARLY_CANCEL slave "DFID" -> mds #%d\n",
1922 PFID(st_fid), tgt->ltd_idx);
1923 rc = md_cancel_unused(tgt->ltd_exp, st_fid, &policy,
1924 mode, LCF_ASYNC, NULL);
1926 GOTO(out_put_obj, rc);
1929 "EARLY_CANCEL skip operation target %d on "DFID"\n",
1930 op_tgt, PFID(st_fid));
1932 * Do not cancel locks for operation target, they will
1933 * be handled later in underlaying layer when calling
1934 * function we run on behalf of.
1937 op_data->op_flags |= flag;
1942 lmv_object_put(obj);
1946 static int lmv_early_cancel(struct obd_export *exp, struct md_op_data *op_data,
1947 int op_tgt, ldlm_mode_t mode, int bits, int flag)
1949 struct lu_fid *fid = md_op_data_fid(op_data, flag);
1950 struct obd_device *obd = exp->exp_obd;
1951 struct lmv_obd *lmv = &obd->u.lmv;
1952 struct lmv_tgt_desc *tgt;
1953 ldlm_policy_data_t policy = {{0}};
1954 struct lmv_object *obj;
1958 if (!fid_is_sane(fid))
1961 obj = lmv_object_find(obd, fid);
1963 rc = lmv_early_cancel_slaves(exp, op_data, op_tgt, mode,
1965 lmv_object_put(obj);
1967 tgt = lmv_find_target(lmv, fid);
1969 RETURN(PTR_ERR(tgt));
1971 if (tgt->ltd_idx != op_tgt) {
1972 CDEBUG(D_INODE, "EARLY_CANCEL on "DFID"\n", PFID(fid));
1973 policy.l_inodebits.bits = bits;
1974 rc = md_cancel_unused(tgt->ltd_exp, fid, &policy,
1975 mode, LCF_ASYNC, NULL);
1978 "EARLY_CANCEL skip operation target %d on "DFID"\n",
1980 op_data->op_flags |= flag;
1989 * llite passes fid of an target inode in op_data->op_fid1 and id of directory in
1992 static int lmv_link(struct obd_export *exp, struct md_op_data *op_data,
1993 struct ptlrpc_request **request)
1995 struct obd_device *obd = exp->exp_obd;
1996 struct lmv_obd *lmv = &obd->u.lmv;
1997 struct lmv_tgt_desc *tgt;
1998 struct lmv_object *obj;
2005 rc = lmv_check_connect(obd);
2012 LASSERT(op_data->op_namelen != 0);
2014 CDEBUG(D_INODE, "LINK "DFID":%*s to "DFID"\n",
2015 PFID(&op_data->op_fid2), op_data->op_namelen,
2016 op_data->op_name, PFID(&op_data->op_fid1));
2018 obj = lmv_object_find(obd, &op_data->op_fid2);
2020 sidx = raw_name2idx(obj->lo_hashtype,
2023 op_data->op_namelen);
2024 op_data->op_fid2 = obj->lo_stripes[sidx].ls_fid;
2025 mds = obj->lo_stripes[sidx].ls_mds;
2026 lmv_object_put(obj);
2028 rc = lmv_fld_lookup(lmv, &op_data->op_fid2, &mds);
2033 CDEBUG(D_INODE, "Forward to mds #%x ("DFID")\n",
2034 mds, PFID(&op_data->op_fid1));
2036 op_data->op_fsuid = cfs_curproc_fsuid();
2037 op_data->op_fsgid = cfs_curproc_fsgid();
2038 op_data->op_cap = cfs_curproc_cap_pack();
2039 tgt = lmv_get_target(lmv, mds);
2042 * Cancel UPDATE lock on child (fid1).
2044 op_data->op_flags |= MF_MDC_CANCEL_FID2;
2045 rc = lmv_early_cancel(exp, op_data, tgt->ltd_idx, LCK_EX,
2046 MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID1);
2048 rc = md_link(tgt->ltd_exp, op_data, request);
2049 if (rc == -ERESTART) {
2050 LASSERT(*request != NULL);
2051 DEBUG_REQ(D_WARNING|D_RPCTRACE, *request,
2052 "Got -ERESTART during link!\n");
2053 ptlrpc_req_finished(*request);
2057 * Directory got split. Time to update local object and repeat
2058 * the request with proper MDS.
2060 rc = lmv_handle_split(exp, &op_data->op_fid2);
2068 static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
2069 const char *old, int oldlen, const char *new, int newlen,
2070 struct ptlrpc_request **request)
2072 struct obd_device *obd = exp->exp_obd;
2073 struct lmv_obd *lmv = &obd->u.lmv;
2074 struct lmv_tgt_desc *src_tgt;
2078 struct lmv_object *obj;
2083 LASSERT(oldlen != 0);
2085 CDEBUG(D_INODE, "RENAME %*s in "DFID" to %*s in "DFID"\n",
2086 oldlen, old, PFID(&op_data->op_fid1),
2087 newlen, new, PFID(&op_data->op_fid2));
2089 rc = lmv_check_connect(obd);
2096 obj = lmv_object_find(obd, &op_data->op_fid1);
2098 sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
2099 (char *)old, oldlen);
2100 op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid;
2101 mds1 = obj->lo_stripes[sidx].ls_mds;
2102 CDEBUG(D_INODE, "Parent obj "DFID"\n", PFID(&op_data->op_fid1));
2103 lmv_object_put(obj);
2105 rc = lmv_fld_lookup(lmv, &op_data->op_fid1, &mds1);
2110 obj = lmv_object_find(obd, &op_data->op_fid2);
2113 * Directory is already split, so we have to forward request to
2116 sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
2117 (char *)new, newlen);
2119 mds2 = obj->lo_stripes[sidx].ls_mds;
2120 op_data->op_fid2 = obj->lo_stripes[sidx].ls_fid;
2121 CDEBUG(D_INODE, "Parent obj "DFID"\n", PFID(&op_data->op_fid2));
2122 lmv_object_put(obj);
2124 rc = lmv_fld_lookup(lmv, &op_data->op_fid2, &mds2);
2129 op_data->op_fsuid = cfs_curproc_fsuid();
2130 op_data->op_fsgid = cfs_curproc_fsgid();
2131 op_data->op_cap = cfs_curproc_cap_pack();
2133 src_tgt = lmv_get_target(lmv, mds1);
2136 * LOOKUP lock on src child (fid3) should also be cancelled for
2137 * src_tgt in mdc_rename.
2139 op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3;
2142 * Cancel UPDATE locks on tgt parent (fid2), tgt_tgt is its
2145 rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
2146 LCK_EX, MDS_INODELOCK_UPDATE,
2147 MF_MDC_CANCEL_FID2);
2150 * Cancel LOOKUP locks on tgt child (fid4) for parent tgt_tgt.
2153 rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
2154 LCK_EX, MDS_INODELOCK_LOOKUP,
2155 MF_MDC_CANCEL_FID4);
2159 * Cancel all the locks on tgt child (fid4).
2162 rc = lmv_early_cancel(exp, op_data, src_tgt->ltd_idx,
2163 LCK_EX, MDS_INODELOCK_FULL,
2164 MF_MDC_CANCEL_FID4);
2167 rc = md_rename(src_tgt->ltd_exp, op_data, old, oldlen,
2168 new, newlen, request);
2170 if (rc == -ERESTART) {
2171 LASSERT(*request != NULL);
2172 DEBUG_REQ(D_WARNING|D_RPCTRACE, *request,
2173 "Got -ERESTART during rename!\n");
2174 ptlrpc_req_finished(*request);
2178 * Directory got split. Time to update local object and repeat
2179 * the request with proper MDS.
2181 rc = lmv_handle_split(exp, &op_data->op_fid1);
2188 static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
2189 void *ea, int ealen, void *ea2, int ea2len,
2190 struct ptlrpc_request **request,
2191 struct md_open_data **mod)
2193 struct obd_device *obd = exp->exp_obd;
2194 struct lmv_obd *lmv = &obd->u.lmv;
2195 struct ptlrpc_request *req;
2196 struct lmv_tgt_desc *tgt;
2197 struct lmv_object *obj;
2202 rc = lmv_check_connect(obd);
2206 obj = lmv_object_find(obd, &op_data->op_fid1);
2208 CDEBUG(D_INODE, "SETATTR for "DFID", valid 0x%x%s\n",
2209 PFID(&op_data->op_fid1), op_data->op_attr.ia_valid,
2210 obj ? ", split" : "");
2212 op_data->op_flags |= MF_MDC_CANCEL_FID1;
2214 for (i = 0; i < obj->lo_objcount; i++) {
2215 op_data->op_fid1 = obj->lo_stripes[i].ls_fid;
2217 tgt = lmv_get_target(lmv, obj->lo_stripes[i].ls_mds);
2223 rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen,
2224 ea2, ea2len, &req, mod);
2226 if (lu_fid_eq(&obj->lo_fid, &obj->lo_stripes[i].ls_fid)) {
2228 * This is master object and this request should
2229 * be returned back to llite.
2233 ptlrpc_req_finished(req);
2239 lmv_object_put(obj);
2241 tgt = lmv_find_target(lmv, &op_data->op_fid1);
2243 RETURN(PTR_ERR(tgt));
2245 rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen, ea2,
2246 ea2len, request, mod);
2251 static int lmv_sync(struct obd_export *exp, const struct lu_fid *fid,
2252 struct obd_capa *oc, struct ptlrpc_request **request)
2254 struct obd_device *obd = exp->exp_obd;
2255 struct lmv_obd *lmv = &obd->u.lmv;
2256 struct lmv_tgt_desc *tgt;
2260 rc = lmv_check_connect(obd);
2264 tgt = lmv_find_target(lmv, fid);
2266 RETURN(PTR_ERR(tgt));
2268 rc = md_sync(tgt->ltd_exp, fid, oc, request);
2273 * Main purpose of LMV blocking ast is to remove split directory LMV
2274 * presentation object (struct lmv_object) attached to the lock being revoked.
2276 int lmv_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
2277 void *data, int flag)
2279 struct lustre_handle lockh;
2280 struct lmv_object *obj;
2285 case LDLM_CB_BLOCKING:
2286 ldlm_lock2handle(lock, &lockh);
2287 rc = ldlm_cli_cancel(&lockh);
2289 CDEBUG(D_INODE, "ldlm_cli_cancel: %d\n", rc);
2293 case LDLM_CB_CANCELING:
2295 * Time to drop cached attrs for split directory object
2297 obj = lock->l_ast_data;
2299 CDEBUG(D_INODE, "Cancel %s on "LPU64"/"LPU64
2300 ", master "DFID"\n",
2301 lock->l_resource->lr_name.name[3] == 1 ?
2302 "LOOKUP" : "UPDATE",
2303 lock->l_resource->lr_name.name[0],
2304 lock->l_resource->lr_name.name[1],
2305 PFID(&obj->lo_fid));
2306 lmv_object_put(obj);
2315 static void lmv_hash_adjust(__u64 *hash, __u64 hash_adj)
2319 val = le64_to_cpu(*hash);
2321 val += MAX_HASH_SIZE;
2322 if (val != MDS_DIR_END_OFF)
2323 *hash = cpu_to_le64(val - hash_adj);
2326 static __u32 lmv_node_rank(struct obd_export *exp, const struct lu_fid *fid)
2329 struct obd_import *imp;
2332 * XXX: to get nid we assume that underlying obd device is mdc.
2334 imp = class_exp2cliimp(exp);
2335 id = imp->imp_connection->c_self + fid_flatten(fid);
2337 CDEBUG(D_INODE, "Readpage node rank: "LPX64" "DFID" "LPX64" "LPX64"\n",
2338 imp->imp_connection->c_self, PFID(fid), id, id ^ (id >> 32));
2340 return id ^ (id >> 32);
2343 static int lmv_readpage(struct obd_export *exp, struct md_op_data *op_data,
2344 struct page **pages, struct ptlrpc_request **request)
2346 struct obd_device *obd = exp->exp_obd;
2347 struct lmv_obd *lmv = &obd->u.lmv;
2348 struct lmv_object *obj;
2349 struct lu_fid rid = op_data->op_fid1;
2350 __u64 offset = op_data->op_offset;
2360 /* number of pages read, in CFS_PAGE_SIZE */
2362 /* number of pages transferred in LU_PAGE_SIZE */
2364 struct lmv_stripe *los;
2365 struct lmv_tgt_desc *tgt;
2366 struct lu_dirpage *dp;
2367 struct lu_dirent *ent;
2370 rc = lmv_check_connect(obd);
2374 CDEBUG(D_INODE, "READPAGE at "LPX64" from "DFID"\n", offset, PFID(&rid));
2377 * This case handle directory lookup in clustered metadata case (i.e.
2378 * split directory is located on multiple md servers.)
2379 * each server keeps directory entries for certain range of hashes.
2380 * E.g. we have N server and suppose hash range is 0 to MAX_HASH.
2381 * first server will keep records with hashes [ 0 ... MAX_HASH / N - 1],
2382 * second one with hashes [MAX_HASH / N ... 2 * MAX_HASH / N] and
2384 * readdir can simply start reading entries from 0 - N server in
2385 * order but that will not scale well as all client will request dir in
2386 * to server in same order.
2387 * Following algorithm does optimization:
2388 * Instead of doing readdir in 1, 2, ...., N order, client with a
2389 * rank R does readdir in R, R + 1, ..., N, 1, ... R - 1 order.
2390 * (every client has rank R)
2391 * But ll_readdir() expect offset range [0 to MAX_HASH/N) but
2392 * since client ask dir from MDS{R} client has pages with offsets
2393 * [R*MAX_HASH/N ... (R + 1)*MAX_HASH/N] there for we do hash_adj
2394 * on hash values that we get.
2396 obj = lmv_object_find_lock(obd, &rid);
2398 nr = obj->lo_objcount;
2400 seg_size = MAX_HASH_SIZE;
2401 do_div(seg_size, nr);
2402 los = obj->lo_stripes;
2403 tgt = lmv_get_target(lmv, los[0].ls_mds);
2404 rank = lmv_node_rank(tgt->ltd_exp, &rid) % nr;
2406 do_div(tgt_tmp, seg_size);
2407 tgt0_idx = do_div(tgt_tmp, nr);
2408 tgt_idx = (tgt0_idx + rank) % nr;
2410 if (tgt_idx < tgt0_idx)
2414 * Last segment has unusual length due to division
2417 hash_adj = MAX_HASH_SIZE - seg_size * nr;
2421 hash_adj += rank * seg_size;
2423 CDEBUG(D_INODE, "Readpage hash adjustment: %x "LPX64" "
2424 LPX64"/%x -> "LPX64"/%x\n", rank, hash_adj,
2425 offset, tgt0_idx, offset + hash_adj, tgt_idx);
2427 offset = (offset + hash_adj) & MAX_HASH_SIZE;
2428 rid = obj->lo_stripes[tgt_idx].ls_fid;
2429 tgt = lmv_get_target(lmv, los[tgt_idx].ls_mds);
2431 CDEBUG(D_INODE, "Forward to "DFID" with offset %lu i %d\n",
2432 PFID(&rid), (unsigned long)offset, tgt_idx);
2434 tgt = lmv_find_target(lmv, &rid);
2437 GOTO(cleanup, rc = PTR_ERR(tgt));
2439 op_data->op_fid1 = rid;
2440 rc = md_readpage(tgt->ltd_exp, op_data, pages, request);
2444 nrdpgs = ((*request)->rq_bulk->bd_nob_transferred + CFS_PAGE_SIZE - 1)
2446 nlupgs = (*request)->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
2447 LASSERT(!((*request)->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
2448 LASSERT(nrdpgs > 0 && nrdpgs <= op_data->op_npages);
2450 CDEBUG(D_INODE, "read %d(%d)/%d pages\n", nrdpgs, nlupgs,
2451 op_data->op_npages);
2453 for (i = 0; i < nrdpgs; i++) {
2454 #if CFS_PAGE_SIZE > LU_PAGE_SIZE
2455 struct lu_dirpage *first;
2459 struct lu_dirent *tmp = NULL;
2461 dp = cfs_kmap(pages[i]);
2463 lmv_hash_adjust(&dp->ldp_hash_start, hash_adj);
2464 lmv_hash_adjust(&dp->ldp_hash_end, hash_adj);
2465 LASSERT(le64_to_cpu(dp->ldp_hash_start) <=
2466 op_data->op_offset);
2468 if ((tgt0_idx != nr - 1) &&
2469 (le64_to_cpu(dp->ldp_hash_end) == MDS_DIR_END_OFF))
2471 dp->ldp_hash_end = cpu_to_le32(seg_size *
2474 ""DFID" reset end "LPX64" tgt %d\n",
2476 (__u64)le64_to_cpu(dp->ldp_hash_end),
2481 ent = lu_dirent_start(dp);
2482 #if CFS_PAGE_SIZE > LU_PAGE_SIZE
2484 hash_end = dp->ldp_hash_end;
2488 for (tmp = ent; ent != NULL;
2489 tmp = ent, ent = lu_dirent_next(ent)) {
2491 lmv_hash_adjust(&ent->lde_hash, hash_adj);
2494 #if CFS_PAGE_SIZE > LU_PAGE_SIZE
2495 dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
2496 if (((unsigned long)dp & ~CFS_PAGE_MASK) && nlupgs > 0) {
2497 ent = lu_dirent_start(dp);
2500 lmv_hash_adjust(&dp->ldp_hash_end, hash_adj);
2501 if ((tgt0_idx != nr - 1) &&
2502 (le64_to_cpu(dp->ldp_hash_end) ==
2504 hash_end = cpu_to_le32(seg_size *
2507 ""DFID" reset end "LPX64" tgt %d\n",
2509 (__u64)le64_to_cpu(hash_end),
2513 hash_end = dp->ldp_hash_end;
2514 flags = dp->ldp_flags;
2517 /* enlarge the end entry lde_reclen from 0 to
2518 * first entry of next lu_dirpage, in this way
2519 * several lu_dirpages can be stored into one
2520 * client page on client. */
2521 tmp = ((void *)tmp) +
2522 le16_to_cpu(tmp->lde_reclen);
2524 cpu_to_le16((char *)(dp->ldp_entries) -
2529 first->ldp_hash_end = hash_end;
2530 first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE);
2531 first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE);
2533 SET_BUT_UNUSED(tmp);
2535 cfs_kunmap(pages[i]);
2540 lmv_object_put_unlock(obj);
2544 static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data,
2545 struct ptlrpc_request **request)
2547 struct obd_device *obd = exp->exp_obd;
2548 struct lmv_obd *lmv = &obd->u.lmv;
2549 struct lmv_tgt_desc *tgt = NULL;
2550 struct lmv_object *obj;
2556 rc = lmv_check_connect(obd);
2563 LASSERT(op_data->op_namelen != 0);
2565 obj = lmv_object_find(obd, &op_data->op_fid1);
2567 sidx = raw_name2idx(obj->lo_hashtype,
2570 op_data->op_namelen);
2571 op_data->op_bias &= ~MDS_CHECK_SPLIT;
2572 op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid;
2573 tgt = lmv_get_target(lmv,
2574 obj->lo_stripes[sidx].ls_mds);
2575 lmv_object_put(obj);
2576 CDEBUG(D_INODE, "UNLINK '%*s' in "DFID" -> %u\n",
2577 op_data->op_namelen, op_data->op_name,
2578 PFID(&op_data->op_fid1), sidx);
2582 tgt = lmv_find_target(lmv, &op_data->op_fid1);
2584 RETURN(PTR_ERR(tgt));
2585 op_data->op_bias |= MDS_CHECK_SPLIT;
2588 op_data->op_fsuid = cfs_curproc_fsuid();
2589 op_data->op_fsgid = cfs_curproc_fsgid();
2590 op_data->op_cap = cfs_curproc_cap_pack();
2593 * If child's fid is given, cancel unused locks for it if it is from
2594 * another export than parent.
2596 * LOOKUP lock for child (fid3) should also be cancelled on parent
2597 * tgt_tgt in mdc_unlink().
2599 op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3;
2602 * Cancel FULL locks on child (fid3).
2604 rc = lmv_early_cancel(exp, op_data, tgt->ltd_idx, LCK_EX,
2605 MDS_INODELOCK_FULL, MF_MDC_CANCEL_FID3);
2608 rc = md_unlink(tgt->ltd_exp, op_data, request);
2610 if (rc == -ERESTART) {
2611 LASSERT(*request != NULL);
2612 DEBUG_REQ(D_WARNING|D_RPCTRACE, *request,
2613 "Got -ERESTART during unlink!\n");
2614 ptlrpc_req_finished(*request);
2618 * Directory got split. Time to update local object and repeat
2619 * the request with proper MDS.
2621 rc = lmv_handle_split(exp, &op_data->op_fid1);
2628 static int lmv_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
2630 struct lmv_obd *lmv = &obd->u.lmv;
2634 case OBD_CLEANUP_EARLY:
2635 /* XXX: here should be calling obd_precleanup() down to
2638 case OBD_CLEANUP_EXPORTS:
2639 fld_client_proc_fini(&lmv->lmv_fld);
2640 lprocfs_obd_cleanup(obd);
2641 rc = obd_llog_finish(obd, 0);
2643 CERROR("failed to cleanup llogging subsystems\n");
2651 static int lmv_get_info(struct obd_export *exp, __u32 keylen,
2652 void *key, __u32 *vallen, void *val,
2653 struct lov_stripe_md *lsm)
2655 struct obd_device *obd;
2656 struct lmv_obd *lmv;
2660 obd = class_exp2obd(exp);
2662 CDEBUG(D_IOCTL, "Invalid client cookie "LPX64"\n",
2663 exp->exp_handle.h_cookie);
2668 if (keylen >= strlen("remote_flag") && !strcmp(key, "remote_flag")) {
2669 struct lmv_tgt_desc *tgts;
2672 rc = lmv_check_connect(obd);
2676 LASSERT(*vallen == sizeof(__u32));
2677 for (i = 0, tgts = lmv->tgts; i < lmv->desc.ld_tgt_count;
2681 * All tgts should be connected when this gets called.
2683 if (!tgts || !tgts->ltd_exp) {
2684 CERROR("target not setup?\n");
2688 if (!obd_get_info(tgts->ltd_exp, keylen, key,
2693 } else if (KEY_IS(KEY_MAX_EASIZE) || KEY_IS(KEY_CONN_DATA)) {
2694 rc = lmv_check_connect(obd);
2699 * Forwarding this request to first MDS, it should know LOV
2702 rc = obd_get_info(lmv->tgts[0].ltd_exp, keylen, key,
2704 if (!rc && KEY_IS(KEY_CONN_DATA)) {
2705 exp->exp_connect_flags =
2706 ((struct obd_connect_data *)val)->ocd_connect_flags;
2709 } else if (KEY_IS(KEY_TGT_COUNT)) {
2710 *((int *)val) = lmv->desc.ld_tgt_count;
2714 CDEBUG(D_IOCTL, "Invalid key\n");
2718 int lmv_set_info_async(struct obd_export *exp, obd_count keylen,
2719 void *key, obd_count vallen, void *val,
2720 struct ptlrpc_request_set *set)
2722 struct lmv_tgt_desc *tgt;
2723 struct obd_device *obd;
2724 struct lmv_obd *lmv;
2728 obd = class_exp2obd(exp);
2730 CDEBUG(D_IOCTL, "Invalid client cookie "LPX64"\n",
2731 exp->exp_handle.h_cookie);
2736 if (KEY_IS(KEY_READ_ONLY) || KEY_IS(KEY_FLUSH_CTX)) {
2739 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2740 tgt = &lmv->tgts[i];
2745 err = obd_set_info_async(tgt->ltd_exp,
2746 keylen, key, vallen, val, set);
2757 int lmv_packmd(struct obd_export *exp, struct lov_mds_md **lmmp,
2758 struct lov_stripe_md *lsm)
2760 struct obd_device *obd = class_exp2obd(exp);
2761 struct lmv_obd *lmv = &obd->u.lmv;
2762 struct lmv_stripe_md *meap;
2763 struct lmv_stripe_md *lsmp;
2768 mea_size = lmv_get_easize(lmv);
2772 if (*lmmp && !lsm) {
2773 OBD_FREE_LARGE(*lmmp, mea_size);
2778 if (*lmmp == NULL) {
2779 OBD_ALLOC_LARGE(*lmmp, mea_size);
2787 lsmp = (struct lmv_stripe_md *)lsm;
2788 meap = (struct lmv_stripe_md *)*lmmp;
2790 if (lsmp->mea_magic != MEA_MAGIC_LAST_CHAR &&
2791 lsmp->mea_magic != MEA_MAGIC_ALL_CHARS)
2794 meap->mea_magic = cpu_to_le32(lsmp->mea_magic);
2795 meap->mea_count = cpu_to_le32(lsmp->mea_count);
2796 meap->mea_master = cpu_to_le32(lsmp->mea_master);
2798 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2799 meap->mea_ids[i] = meap->mea_ids[i];
2800 fid_cpu_to_le(&meap->mea_ids[i], &meap->mea_ids[i]);
2806 int lmv_unpackmd(struct obd_export *exp, struct lov_stripe_md **lsmp,
2807 struct lov_mds_md *lmm, int lmm_size)
2809 struct obd_device *obd = class_exp2obd(exp);
2810 struct lmv_stripe_md **tmea = (struct lmv_stripe_md **)lsmp;
2811 struct lmv_stripe_md *mea = (struct lmv_stripe_md *)lmm;
2812 struct lmv_obd *lmv = &obd->u.lmv;
2818 mea_size = lmv_get_easize(lmv);
2822 if (*lsmp != NULL && lmm == NULL) {
2823 OBD_FREE_LARGE(*tmea, mea_size);
2828 LASSERT(mea_size == lmm_size);
2830 OBD_ALLOC_LARGE(*tmea, mea_size);
2837 if (mea->mea_magic == MEA_MAGIC_LAST_CHAR ||
2838 mea->mea_magic == MEA_MAGIC_ALL_CHARS ||
2839 mea->mea_magic == MEA_MAGIC_HASH_SEGMENT)
2841 magic = le32_to_cpu(mea->mea_magic);
2844 * Old mea is not handled here.
2846 CERROR("Old not supportable EA is found\n");
2850 (*tmea)->mea_magic = magic;
2851 (*tmea)->mea_count = le32_to_cpu(mea->mea_count);
2852 (*tmea)->mea_master = le32_to_cpu(mea->mea_master);
2854 for (i = 0; i < (*tmea)->mea_count; i++) {
2855 (*tmea)->mea_ids[i] = mea->mea_ids[i];
2856 fid_le_to_cpu(&(*tmea)->mea_ids[i], &(*tmea)->mea_ids[i]);
2861 static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
2862 ldlm_policy_data_t *policy, ldlm_mode_t mode,
2863 ldlm_cancel_flags_t flags, void *opaque)
2865 struct obd_device *obd = exp->exp_obd;
2866 struct lmv_obd *lmv = &obd->u.lmv;
2872 LASSERT(fid != NULL);
2874 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2875 if (!lmv->tgts[i].ltd_exp || !lmv->tgts[i].ltd_active)
2878 err = md_cancel_unused(lmv->tgts[i].ltd_exp, fid,
2879 policy, mode, flags, opaque);
2886 int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
2889 struct obd_device *obd = exp->exp_obd;
2890 struct lmv_obd *lmv = &obd->u.lmv;
2894 rc = md_set_lock_data(lmv->tgts[0].ltd_exp, lockh, data, bits);
2898 ldlm_mode_t lmv_lock_match(struct obd_export *exp, int flags,
2899 const struct lu_fid *fid, ldlm_type_t type,
2900 ldlm_policy_data_t *policy, ldlm_mode_t mode,
2901 struct lustre_handle *lockh)
2903 struct obd_device *obd = exp->exp_obd;
2904 struct lmv_obd *lmv = &obd->u.lmv;
2909 CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid));
2912 * With CMD every object can have two locks in different namespaces:
2913 * lookup lock in space of mds storing direntry and update/open lock in
2914 * space of mds storing inode. Thus we check all targets, not only that
2915 * one fid was created in.
2917 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2918 rc = md_lock_match(lmv->tgts[i].ltd_exp, flags, fid,
2919 type, policy, mode, lockh);
2927 int lmv_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
2928 struct obd_export *dt_exp, struct obd_export *md_exp,
2929 struct lustre_md *md)
2931 struct obd_device *obd = exp->exp_obd;
2932 struct lmv_obd *lmv = &obd->u.lmv;
2935 rc = md_get_lustre_md(lmv->tgts[0].ltd_exp, req, dt_exp, md_exp, md);
2939 int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
2941 struct obd_device *obd = exp->exp_obd;
2942 struct lmv_obd *lmv = &obd->u.lmv;
2946 obd_free_memmd(exp, (void *)&md->mea);
2947 RETURN(md_free_lustre_md(lmv->tgts[0].ltd_exp, md));
2950 int lmv_set_open_replay_data(struct obd_export *exp,
2951 struct obd_client_handle *och,
2952 struct ptlrpc_request *open_req)
2954 struct obd_device *obd = exp->exp_obd;
2955 struct lmv_obd *lmv = &obd->u.lmv;
2956 struct lmv_tgt_desc *tgt;
2959 tgt = lmv_find_target(lmv, &och->och_fid);
2961 RETURN(PTR_ERR(tgt));
2963 RETURN(md_set_open_replay_data(tgt->ltd_exp, och, open_req));
2966 int lmv_clear_open_replay_data(struct obd_export *exp,
2967 struct obd_client_handle *och)
2969 struct obd_device *obd = exp->exp_obd;
2970 struct lmv_obd *lmv = &obd->u.lmv;
2971 struct lmv_tgt_desc *tgt;
2974 tgt = lmv_find_target(lmv, &och->och_fid);
2976 RETURN(PTR_ERR(tgt));
2978 RETURN(md_clear_open_replay_data(tgt->ltd_exp, och));
2981 static int lmv_get_remote_perm(struct obd_export *exp,
2982 const struct lu_fid *fid,
2983 struct obd_capa *oc, __u32 suppgid,
2984 struct ptlrpc_request **request)
2986 struct obd_device *obd = exp->exp_obd;
2987 struct lmv_obd *lmv = &obd->u.lmv;
2988 struct lmv_tgt_desc *tgt;
2992 rc = lmv_check_connect(obd);
2996 tgt = lmv_find_target(lmv, fid);
2998 RETURN(PTR_ERR(tgt));
3000 rc = md_get_remote_perm(tgt->ltd_exp, fid, oc, suppgid, request);
3004 static int lmv_renew_capa(struct obd_export *exp, struct obd_capa *oc,
3007 struct obd_device *obd = exp->exp_obd;
3008 struct lmv_obd *lmv = &obd->u.lmv;
3009 struct lmv_tgt_desc *tgt;
3013 rc = lmv_check_connect(obd);
3017 tgt = lmv_find_target(lmv, &oc->c_capa.lc_fid);
3019 RETURN(PTR_ERR(tgt));
3021 rc = md_renew_capa(tgt->ltd_exp, oc, cb);
3025 int lmv_unpack_capa(struct obd_export *exp, struct ptlrpc_request *req,
3026 const struct req_msg_field *field, struct obd_capa **oc)
3028 struct obd_device *obd = exp->exp_obd;
3029 struct lmv_obd *lmv = &obd->u.lmv;
3033 rc = md_unpack_capa(lmv->tgts[0].ltd_exp, req, field, oc);
3037 int lmv_intent_getattr_async(struct obd_export *exp,
3038 struct md_enqueue_info *minfo,
3039 struct ldlm_enqueue_info *einfo)
3041 struct md_op_data *op_data = &minfo->mi_data;
3042 struct obd_device *obd = exp->exp_obd;
3043 struct lmv_obd *lmv = &obd->u.lmv;
3044 struct lmv_object *obj;
3045 struct lmv_tgt_desc *tgt = NULL;
3050 rc = lmv_check_connect(obd);
3054 if (op_data->op_namelen) {
3055 obj = lmv_object_find(obd, &op_data->op_fid1);
3057 sidx = raw_name2idx(obj->lo_hashtype, obj->lo_objcount,
3058 (char *)op_data->op_name,
3059 op_data->op_namelen);
3060 op_data->op_fid1 = obj->lo_stripes[sidx].ls_fid;
3061 tgt = lmv_get_target(lmv, obj->lo_stripes[sidx].ls_mds);
3062 lmv_object_put(obj);
3067 tgt = lmv_find_target(lmv, &op_data->op_fid1);
3070 RETURN(PTR_ERR(tgt));
3072 rc = md_intent_getattr_async(tgt->ltd_exp, minfo, einfo);
3076 int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
3077 struct lu_fid *fid, __u64 *bits)
3079 struct obd_device *obd = exp->exp_obd;
3080 struct lmv_obd *lmv = &obd->u.lmv;
3081 struct lmv_tgt_desc *tgt;
3085 rc = lmv_check_connect(obd);
3089 tgt = lmv_find_target(lmv, fid);
3091 RETURN(PTR_ERR(tgt));
3093 rc = md_revalidate_lock(tgt->ltd_exp, it, fid, bits);
3098 * For lmv, only need to send request to master MDT, and the master MDT will
3099 * process with other slave MDTs. The only exception is Q_GETOQUOTA for which
3100 * we directly fetch data from the slave MDTs.
3102 int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
3103 struct obd_quotactl *oqctl)
3105 struct obd_device *obd = class_exp2obd(exp);
3106 struct lmv_obd *lmv = &obd->u.lmv;
3107 struct lmv_tgt_desc *tgt = &lmv->tgts[0];
3109 __u64 curspace, curinodes;
3112 if (!lmv->desc.ld_tgt_count || !tgt->ltd_active) {
3113 CERROR("master lmv inactive\n");
3117 if (oqctl->qc_cmd != Q_GETOQUOTA) {
3118 rc = obd_quotactl(tgt->ltd_exp, oqctl);
3122 curspace = curinodes = 0;
3123 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
3125 tgt = &lmv->tgts[i];
3127 if (tgt->ltd_exp == NULL)
3129 if (!tgt->ltd_active) {
3130 CDEBUG(D_HA, "mdt %d is inactive.\n", i);
3134 err = obd_quotactl(tgt->ltd_exp, oqctl);
3136 CERROR("getquota on mdt %d failed. %d\n", i, err);
3140 curspace += oqctl->qc_dqblk.dqb_curspace;
3141 curinodes += oqctl->qc_dqblk.dqb_curinodes;
3144 oqctl->qc_dqblk.dqb_curspace = curspace;
3145 oqctl->qc_dqblk.dqb_curinodes = curinodes;
3150 int lmv_quotacheck(struct obd_device *unused, struct obd_export *exp,
3151 struct obd_quotactl *oqctl)
3153 struct obd_device *obd = class_exp2obd(exp);
3154 struct lmv_obd *lmv = &obd->u.lmv;
3155 struct lmv_tgt_desc *tgt;
3159 for (i = 0, tgt = lmv->tgts; i < lmv->desc.ld_tgt_count; i++, tgt++) {
3162 if (!tgt->ltd_active) {
3163 CERROR("lmv idx %d inactive\n", i);
3167 err = obd_quotacheck(tgt->ltd_exp, oqctl);
3175 struct obd_ops lmv_obd_ops = {
3176 .o_owner = THIS_MODULE,
3177 .o_setup = lmv_setup,
3178 .o_cleanup = lmv_cleanup,
3179 .o_precleanup = lmv_precleanup,
3180 .o_process_config = lmv_process_config,
3181 .o_connect = lmv_connect,
3182 .o_disconnect = lmv_disconnect,
3183 .o_statfs = lmv_statfs,
3184 .o_get_info = lmv_get_info,
3185 .o_set_info_async = lmv_set_info_async,
3186 .o_packmd = lmv_packmd,
3187 .o_unpackmd = lmv_unpackmd,
3188 .o_notify = lmv_notify,
3189 .o_get_uuid = lmv_get_uuid,
3190 .o_iocontrol = lmv_iocontrol,
3191 .o_fid_delete = lmv_fid_delete,
3192 .o_quotacheck = lmv_quotacheck,
3193 .o_quotactl = lmv_quotactl
3196 struct md_ops lmv_md_ops = {
3197 .m_getstatus = lmv_getstatus,
3198 .m_change_cbdata = lmv_change_cbdata,
3199 .m_find_cbdata = lmv_find_cbdata,
3200 .m_close = lmv_close,
3201 .m_create = lmv_create,
3202 .m_done_writing = lmv_done_writing,
3203 .m_enqueue = lmv_enqueue,
3204 .m_getattr = lmv_getattr,
3205 .m_getxattr = lmv_getxattr,
3206 .m_getattr_name = lmv_getattr_name,
3207 .m_intent_lock = lmv_intent_lock,
3209 .m_rename = lmv_rename,
3210 .m_setattr = lmv_setattr,
3211 .m_setxattr = lmv_setxattr,
3213 .m_readpage = lmv_readpage,
3214 .m_unlink = lmv_unlink,
3215 .m_init_ea_size = lmv_init_ea_size,
3216 .m_cancel_unused = lmv_cancel_unused,
3217 .m_set_lock_data = lmv_set_lock_data,
3218 .m_lock_match = lmv_lock_match,
3219 .m_get_lustre_md = lmv_get_lustre_md,
3220 .m_free_lustre_md = lmv_free_lustre_md,
3221 .m_set_open_replay_data = lmv_set_open_replay_data,
3222 .m_clear_open_replay_data = lmv_clear_open_replay_data,
3223 .m_renew_capa = lmv_renew_capa,
3224 .m_unpack_capa = lmv_unpack_capa,
3225 .m_get_remote_perm = lmv_get_remote_perm,
3226 .m_intent_getattr_async = lmv_intent_getattr_async,
3227 .m_revalidate_lock = lmv_revalidate_lock
3230 int __init lmv_init(void)
3232 struct lprocfs_static_vars lvars;
3235 lmv_object_cache = cfs_mem_cache_create("lmv_objects",
3236 sizeof(struct lmv_object),
3238 if (!lmv_object_cache) {
3239 CERROR("Error allocating lmv objects cache\n");
3243 lprocfs_lmv_init_vars(&lvars);
3245 rc = class_register_type(&lmv_obd_ops, &lmv_md_ops,
3246 lvars.module_vars, LUSTRE_LMV_NAME, NULL);
3248 cfs_mem_cache_destroy(lmv_object_cache);
3254 static void lmv_exit(void)
3256 class_unregister_type(LUSTRE_LMV_NAME);
3258 LASSERTF(cfs_atomic_read(&lmv_object_count) == 0,
3259 "Can't free lmv objects cache, %d object(s) busy\n",
3260 cfs_atomic_read(&lmv_object_count));
3261 cfs_mem_cache_destroy(lmv_object_cache);
3264 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
3265 MODULE_DESCRIPTION("Lustre Logical Metadata Volume OBD driver");
3266 MODULE_LICENSE("GPL");
3268 module_init(lmv_init);
3269 module_exit(lmv_exit);