4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_LMV
35 #include <linux/file.h>
36 #include <linux/module.h>
37 #include <linux/init.h>
38 #include <linux/user_namespace.h>
39 #ifdef HAVE_UIDGID_HEADER
40 # include <linux/uidgid.h>
42 #include <linux/slab.h>
43 #include <linux/pagemap.h>
45 #include <linux/math64.h>
46 #include <linux/seq_file.h>
47 #include <linux/namei.h>
49 #include <obd_support.h>
50 #include <lustre_lib.h>
51 #include <lustre_net.h>
52 #include <obd_class.h>
53 #include <lustre_lmv.h>
54 #include <lprocfs_status.h>
55 #include <cl_object.h>
56 #include <lustre_fid.h>
57 #include <uapi/linux/lustre/lustre_ioctl.h>
58 #include <lustre_kernelcomm.h>
59 #include "lmv_internal.h"
61 static int lmv_check_connect(struct obd_device *obd);
63 static void lmv_activate_target(struct lmv_obd *lmv,
64 struct lmv_tgt_desc *tgt,
67 if (tgt->ltd_active == activate)
70 tgt->ltd_active = activate;
71 lmv->desc.ld_active_tgt_count += (activate ? 1 : -1);
73 tgt->ltd_exp->exp_obd->obd_inactive = !activate;
79 * -EINVAL : UUID can't be found in the LMV's target list
80 * -ENOTCONN: The UUID is found, but the target connection is bad (!)
81 * -EBADF : The UUID is found, but the OBD of the wrong type (!)
83 static int lmv_set_mdc_active(struct lmv_obd *lmv,
84 const struct obd_uuid *uuid,
87 struct lmv_tgt_desc *tgt = NULL;
88 struct obd_device *obd;
93 CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n",
94 lmv, uuid->uuid, activate);
96 spin_lock(&lmv->lmv_lock);
97 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
99 if (tgt == NULL || tgt->ltd_exp == NULL)
102 CDEBUG(D_INFO, "Target idx %d is %s conn %#llx\n", i,
103 tgt->ltd_uuid.uuid, tgt->ltd_exp->exp_handle.h_cookie);
105 if (obd_uuid_equals(uuid, &tgt->ltd_uuid))
109 if (i == lmv->desc.ld_tgt_count)
110 GOTO(out_lmv_lock, rc = -EINVAL);
112 obd = class_exp2obd(tgt->ltd_exp);
114 GOTO(out_lmv_lock, rc = -ENOTCONN);
116 CDEBUG(D_INFO, "Found OBD %s=%s device %d (%p) type %s at LMV idx %d\n",
117 obd->obd_name, obd->obd_uuid.uuid, obd->obd_minor, obd,
118 obd->obd_type->typ_name, i);
119 LASSERT(strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0);
121 if (tgt->ltd_active == activate) {
122 CDEBUG(D_INFO, "OBD %p already %sactive!\n", obd,
123 activate ? "" : "in");
124 GOTO(out_lmv_lock, rc);
127 CDEBUG(D_INFO, "Marking OBD %p %sactive\n", obd,
128 activate ? "" : "in");
129 lmv_activate_target(lmv, tgt, activate);
133 spin_unlock(&lmv->lmv_lock);
137 struct obd_uuid *lmv_get_uuid(struct obd_export *exp)
139 struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
140 struct lmv_tgt_desc *tgt = lmv->tgts[0];
142 return (tgt == NULL) ? NULL : obd_get_uuid(tgt->ltd_exp);
145 static int lmv_notify(struct obd_device *obd, struct obd_device *watched,
146 enum obd_notify_event ev)
148 struct obd_connect_data *conn_data;
149 struct lmv_obd *lmv = &obd->u.lmv;
150 struct obd_uuid *uuid;
154 if (strcmp(watched->obd_type->typ_name, LUSTRE_MDC_NAME)) {
155 CERROR("unexpected notification of %s %s!\n",
156 watched->obd_type->typ_name,
161 uuid = &watched->u.cli.cl_target_uuid;
162 if (ev == OBD_NOTIFY_ACTIVE || ev == OBD_NOTIFY_INACTIVE) {
164 * Set MDC as active before notifying the observer, so the
165 * observer can use the MDC normally.
167 rc = lmv_set_mdc_active(lmv, uuid,
168 ev == OBD_NOTIFY_ACTIVE);
170 CERROR("%sactivation of %s failed: %d\n",
171 ev == OBD_NOTIFY_ACTIVE ? "" : "de",
175 } else if (ev == OBD_NOTIFY_OCD) {
176 conn_data = &watched->u.cli.cl_import->imp_connect_data;
178 * XXX: Make sure that ocd_connect_flags from all targets are
179 * the same. Otherwise one of MDTs runs wrong version or
180 * something like this. --umka
182 obd->obd_self_export->exp_connect_data = *conn_data;
186 * Pass the notification up the chain.
188 if (obd->obd_observer)
189 rc = obd_notify(obd->obd_observer, watched, ev);
194 static int lmv_connect(const struct lu_env *env,
195 struct obd_export **pexp, struct obd_device *obd,
196 struct obd_uuid *cluuid, struct obd_connect_data *data,
199 struct lmv_obd *lmv = &obd->u.lmv;
200 struct lustre_handle conn = { 0 };
201 struct obd_export *exp;
205 rc = class_connect(&conn, obd, cluuid);
207 CERROR("class_connection() returned %d\n", rc);
211 exp = class_conn2export(&conn);
214 lmv->conn_data = *data;
216 lmv->lmv_tgts_kobj = kobject_create_and_add("target_obds",
217 &obd->obd_kset.kobj);
218 if (!lmv->lmv_tgts_kobj) {
219 CERROR("%s: cannot create /sys/fs/lustre/%s/%s/target_obds\n",
220 obd->obd_name, obd->obd_type->typ_name, obd->obd_name);
223 rc = lmv_check_connect(obd);
232 if (lmv->lmv_tgts_kobj)
233 kobject_put(lmv->lmv_tgts_kobj);
235 class_disconnect(exp);
240 static int lmv_init_ea_size(struct obd_export *exp, __u32 easize,
243 struct obd_device *obd = exp->exp_obd;
244 struct lmv_obd *lmv = &obd->u.lmv;
250 if (lmv->max_easize < easize) {
251 lmv->max_easize = easize;
254 if (lmv->max_def_easize < def_easize) {
255 lmv->max_def_easize = def_easize;
262 if (lmv->connected == 0)
265 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
266 struct lmv_tgt_desc *tgt = lmv->tgts[i];
268 if (tgt == NULL || tgt->ltd_exp == NULL) {
269 CWARN("%s: NULL export for %d\n", obd->obd_name, i);
272 if (!tgt->ltd_active)
275 rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize);
277 CERROR("%s: obd_init_ea_size() failed on MDT target %d:"
278 " rc = %d\n", obd->obd_name, i, rc);
285 #define MAX_STRING_SIZE 128
287 int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
289 struct lmv_obd *lmv = &obd->u.lmv;
290 struct obd_device *mdc_obd;
291 struct obd_export *mdc_exp;
292 struct lu_fld_target target;
296 mdc_obd = class_find_client_obd(&tgt->ltd_uuid, LUSTRE_MDC_NAME,
299 CERROR("target %s not attached\n", tgt->ltd_uuid.uuid);
303 CDEBUG(D_CONFIG, "connect to %s(%s) - %s, %s\n",
304 mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
305 tgt->ltd_uuid.uuid, obd->obd_uuid.uuid);
307 if (!mdc_obd->obd_set_up) {
308 CERROR("target %s is not set up\n", tgt->ltd_uuid.uuid);
312 rc = obd_connect(NULL, &mdc_exp, mdc_obd, &obd->obd_uuid,
313 &lmv->conn_data, NULL);
315 CERROR("target %s connect error %d\n", tgt->ltd_uuid.uuid, rc);
320 * Init fid sequence client for this mdc and add new fld target.
322 rc = obd_fid_init(mdc_obd, mdc_exp, LUSTRE_SEQ_METADATA);
326 target.ft_srv = NULL;
327 target.ft_exp = mdc_exp;
328 target.ft_idx = tgt->ltd_idx;
330 fld_client_add_target(&lmv->lmv_fld, &target);
332 rc = obd_register_observer(mdc_obd, obd);
334 obd_disconnect(mdc_exp);
335 CERROR("target %s register_observer error %d\n",
336 tgt->ltd_uuid.uuid, rc);
340 if (obd->obd_observer) {
342 * Tell the observer about the new target.
344 rc = obd_notify(obd->obd_observer, mdc_exp->exp_obd,
347 obd_disconnect(mdc_exp);
353 tgt->ltd_exp = mdc_exp;
354 lmv->desc.ld_active_tgt_count++;
356 md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize);
358 CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n",
359 mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
360 atomic_read(&obd->obd_refcount));
362 if (lmv->lmv_tgts_kobj)
363 /* Even if we failed to create the link, that's fine */
364 rc = sysfs_create_link(lmv->lmv_tgts_kobj,
365 &mdc_obd->obd_kset.kobj,
370 static void lmv_del_target(struct lmv_obd *lmv, int index)
372 if (lmv->tgts[index] == NULL)
375 OBD_FREE_PTR(lmv->tgts[index]);
376 lmv->tgts[index] = NULL;
380 static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
381 __u32 index, int gen)
383 struct obd_device *mdc_obd;
384 struct lmv_obd *lmv = &obd->u.lmv;
385 struct lmv_tgt_desc *tgt;
386 int orig_tgt_count = 0;
390 CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index);
391 mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME,
394 CERROR("%s: Target %s not attached: rc = %d\n",
395 obd->obd_name, uuidp->uuid, -EINVAL);
399 mutex_lock(&lmv->lmv_init_mutex);
400 if ((index < lmv->tgts_size) && (lmv->tgts[index] != NULL)) {
401 tgt = lmv->tgts[index];
402 CERROR("%s: UUID %s already assigned at LMV target index %d:"
403 " rc = %d\n", obd->obd_name,
404 obd_uuid2str(&tgt->ltd_uuid), index, -EEXIST);
405 mutex_unlock(&lmv->lmv_init_mutex);
409 if (index >= lmv->tgts_size) {
410 /* We need to reallocate the lmv target array. */
411 struct lmv_tgt_desc **newtgts, **old = NULL;
415 while (newsize < index + 1)
416 newsize = newsize << 1;
417 OBD_ALLOC(newtgts, sizeof(*newtgts) * newsize);
418 if (newtgts == NULL) {
419 mutex_unlock(&lmv->lmv_init_mutex);
423 if (lmv->tgts_size) {
424 memcpy(newtgts, lmv->tgts,
425 sizeof(*newtgts) * lmv->tgts_size);
427 oldsize = lmv->tgts_size;
431 lmv->tgts_size = newsize;
434 OBD_FREE(old, sizeof(*old) * oldsize);
436 CDEBUG(D_CONFIG, "tgts: %p size: %d\n", lmv->tgts,
442 mutex_unlock(&lmv->lmv_init_mutex);
446 mutex_init(&tgt->ltd_fid_mutex);
447 tgt->ltd_idx = index;
448 tgt->ltd_uuid = *uuidp;
450 lmv->tgts[index] = tgt;
451 if (index >= lmv->desc.ld_tgt_count) {
452 orig_tgt_count = lmv->desc.ld_tgt_count;
453 lmv->desc.ld_tgt_count = index + 1;
456 if (lmv->connected == 0) {
457 /* lmv_check_connect() will connect this target. */
458 mutex_unlock(&lmv->lmv_init_mutex);
462 /* Otherwise let's connect it ourselves */
463 mutex_unlock(&lmv->lmv_init_mutex);
464 rc = lmv_connect_mdc(obd, tgt);
466 spin_lock(&lmv->lmv_lock);
467 if (lmv->desc.ld_tgt_count == index + 1)
468 lmv->desc.ld_tgt_count = orig_tgt_count;
469 memset(tgt, 0, sizeof(*tgt));
470 spin_unlock(&lmv->lmv_lock);
472 int easize = sizeof(struct lmv_stripe_md) +
473 lmv->desc.ld_tgt_count * sizeof(struct lu_fid);
474 lmv_init_ea_size(obd->obd_self_export, easize, 0);
480 static int lmv_check_connect(struct obd_device *obd)
482 struct lmv_obd *lmv = &obd->u.lmv;
483 struct lmv_tgt_desc *tgt;
492 mutex_lock(&lmv->lmv_init_mutex);
493 if (lmv->connected) {
494 mutex_unlock(&lmv->lmv_init_mutex);
498 if (lmv->desc.ld_tgt_count == 0) {
499 mutex_unlock(&lmv->lmv_init_mutex);
500 CERROR("%s: no targets configured.\n", obd->obd_name);
504 LASSERT(lmv->tgts != NULL);
506 if (lmv->tgts[0] == NULL) {
507 mutex_unlock(&lmv->lmv_init_mutex);
508 CERROR("%s: no target configured for index 0.\n",
513 CDEBUG(D_CONFIG, "Time to connect %s to %s\n",
514 obd->obd_uuid.uuid, obd->obd_name);
516 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
520 rc = lmv_connect_mdc(obd, tgt);
526 easize = lmv_mds_md_size(lmv->desc.ld_tgt_count, LMV_MAGIC);
527 lmv_init_ea_size(obd->obd_self_export, easize, 0);
528 mutex_unlock(&lmv->lmv_init_mutex);
539 --lmv->desc.ld_active_tgt_count;
540 rc2 = obd_disconnect(tgt->ltd_exp);
542 CERROR("LMV target %s disconnect on "
543 "MDC idx %d: error %d\n",
544 tgt->ltd_uuid.uuid, i, rc2);
549 mutex_unlock(&lmv->lmv_init_mutex);
554 static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
556 struct lmv_obd *lmv = &obd->u.lmv;
557 struct obd_device *mdc_obd;
561 LASSERT(tgt != NULL);
562 LASSERT(obd != NULL);
564 mdc_obd = class_exp2obd(tgt->ltd_exp);
567 mdc_obd->obd_force = obd->obd_force;
568 mdc_obd->obd_fail = obd->obd_fail;
569 mdc_obd->obd_no_recov = obd->obd_no_recov;
571 if (lmv->lmv_tgts_kobj)
572 sysfs_remove_link(lmv->lmv_tgts_kobj,
576 rc = obd_fid_fini(tgt->ltd_exp->exp_obd);
578 CERROR("Can't finanize fids factory\n");
580 CDEBUG(D_INFO, "Disconnected from %s(%s) successfully\n",
581 tgt->ltd_exp->exp_obd->obd_name,
582 tgt->ltd_exp->exp_obd->obd_uuid.uuid);
584 obd_register_observer(tgt->ltd_exp->exp_obd, NULL);
585 rc = obd_disconnect(tgt->ltd_exp);
587 if (tgt->ltd_active) {
588 CERROR("Target %s disconnect error %d\n",
589 tgt->ltd_uuid.uuid, rc);
593 lmv_activate_target(lmv, tgt, 0);
598 static int lmv_disconnect(struct obd_export *exp)
600 struct obd_device *obd = class_exp2obd(exp);
601 struct lmv_obd *lmv = &obd->u.lmv;
609 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
610 if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
613 lmv_disconnect_mdc(obd, lmv->tgts[i]);
616 if (lmv->lmv_tgts_kobj)
617 kobject_put(lmv->lmv_tgts_kobj);
621 * This is the case when no real connection is established by
622 * lmv_check_connect().
625 class_export_put(exp);
626 rc = class_disconnect(exp);
632 static int lmv_fid2path(struct obd_export *exp, int len, void *karg,
635 struct obd_device *obddev = class_exp2obd(exp);
636 struct lmv_obd *lmv = &obddev->u.lmv;
637 struct getinfo_fid2path *gf;
638 struct lmv_tgt_desc *tgt;
639 struct getinfo_fid2path *remote_gf = NULL;
640 struct lu_fid root_fid;
641 int remote_gf_size = 0;
645 tgt = lmv_find_target(lmv, &gf->gf_fid);
647 RETURN(PTR_ERR(tgt));
649 root_fid = *gf->gf_u.gf_root_fid;
650 LASSERT(fid_is_sane(&root_fid));
653 rc = obd_iocontrol(OBD_IOC_FID2PATH, tgt->ltd_exp, len, gf, uarg);
654 if (rc != 0 && rc != -EREMOTE)
655 GOTO(out_fid2path, rc);
657 /* If remote_gf != NULL, it means just building the
658 * path on the remote MDT, copy this path segement to gf */
659 if (remote_gf != NULL) {
660 struct getinfo_fid2path *ori_gf;
664 ori_gf = (struct getinfo_fid2path *)karg;
665 if (strlen(ori_gf->gf_u.gf_path) + 1 +
666 strlen(gf->gf_u.gf_path) + 1 > ori_gf->gf_pathlen)
667 GOTO(out_fid2path, rc = -EOVERFLOW);
669 ptr = ori_gf->gf_u.gf_path;
671 len = strlen(gf->gf_u.gf_path);
672 /* move the current path to the right to release space
673 * for closer-to-root part */
674 memmove(ptr + len + 1, ptr, strlen(ori_gf->gf_u.gf_path));
675 memcpy(ptr, gf->gf_u.gf_path, len);
679 CDEBUG(D_INFO, "%s: get path %s "DFID" rec: %llu ln: %u\n",
680 tgt->ltd_exp->exp_obd->obd_name,
681 gf->gf_u.gf_path, PFID(&gf->gf_fid), gf->gf_recno,
685 GOTO(out_fid2path, rc);
687 /* sigh, has to go to another MDT to do path building further */
688 if (remote_gf == NULL) {
689 remote_gf_size = sizeof(*remote_gf) + PATH_MAX;
690 OBD_ALLOC(remote_gf, remote_gf_size);
691 if (remote_gf == NULL)
692 GOTO(out_fid2path, rc = -ENOMEM);
693 remote_gf->gf_pathlen = PATH_MAX;
696 if (!fid_is_sane(&gf->gf_fid)) {
697 CERROR("%s: invalid FID "DFID": rc = %d\n",
698 tgt->ltd_exp->exp_obd->obd_name,
699 PFID(&gf->gf_fid), -EINVAL);
700 GOTO(out_fid2path, rc = -EINVAL);
703 tgt = lmv_find_target(lmv, &gf->gf_fid);
705 GOTO(out_fid2path, rc = -EINVAL);
707 remote_gf->gf_fid = gf->gf_fid;
708 remote_gf->gf_recno = -1;
709 remote_gf->gf_linkno = -1;
710 memset(remote_gf->gf_u.gf_path, 0, remote_gf->gf_pathlen);
711 *remote_gf->gf_u.gf_root_fid = root_fid;
713 goto repeat_fid2path;
716 if (remote_gf != NULL)
717 OBD_FREE(remote_gf, remote_gf_size);
721 static int lmv_hsm_req_count(struct lmv_obd *lmv,
722 const struct hsm_user_request *hur,
723 const struct lmv_tgt_desc *tgt_mds)
727 struct lmv_tgt_desc *curr_tgt;
729 /* count how many requests must be sent to the given target */
730 for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
731 curr_tgt = lmv_find_target(lmv, &hur->hur_user_item[i].hui_fid);
732 if (IS_ERR(curr_tgt))
733 RETURN(PTR_ERR(curr_tgt));
734 if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid))
740 static int lmv_hsm_req_build(struct lmv_obd *lmv,
741 struct hsm_user_request *hur_in,
742 const struct lmv_tgt_desc *tgt_mds,
743 struct hsm_user_request *hur_out)
746 struct lmv_tgt_desc *curr_tgt;
748 /* build the hsm_user_request for the given target */
749 hur_out->hur_request = hur_in->hur_request;
751 for (i = 0; i < hur_in->hur_request.hr_itemcount; i++) {
752 curr_tgt = lmv_find_target(lmv,
753 &hur_in->hur_user_item[i].hui_fid);
754 if (IS_ERR(curr_tgt))
755 RETURN(PTR_ERR(curr_tgt));
756 if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) {
757 hur_out->hur_user_item[nr_out] =
758 hur_in->hur_user_item[i];
762 hur_out->hur_request.hr_itemcount = nr_out;
763 memcpy(hur_data(hur_out), hur_data(hur_in),
764 hur_in->hur_request.hr_data_len);
769 static int lmv_hsm_ct_unregister(struct obd_device *obd, unsigned int cmd,
770 int len, struct lustre_kernelcomm *lk,
773 struct lmv_obd *lmv = &obd->u.lmv;
778 /* unregister request (call from llapi_hsm_copytool_fini) */
779 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
780 struct lmv_tgt_desc *tgt = lmv->tgts[i];
782 if (tgt == NULL || tgt->ltd_exp == NULL)
784 /* best effort: try to clean as much as possible
785 * (continue on error) */
786 obd_iocontrol(cmd, tgt->ltd_exp, len, lk, uarg);
789 /* Whatever the result, remove copytool from kuc groups.
790 * Unreached coordinators will get EPIPE on next requests
791 * and will unregister automatically.
793 rc = libcfs_kkuc_group_rem(&obd->obd_uuid, lk->lk_uid, lk->lk_group);
798 static int lmv_hsm_ct_register(struct obd_device *obd, unsigned int cmd,
799 int len, struct lustre_kernelcomm *lk,
802 struct lmv_obd *lmv = &obd->u.lmv;
806 bool any_set = false;
807 struct kkuc_ct_data *kcd;
812 filp = fget(lk->lk_wfd);
816 if (lk->lk_flags & LK_FLG_DATANR)
817 kcd_size = offsetof(struct kkuc_ct_data,
818 kcd_archives[lk->lk_data_count]);
820 kcd_size = sizeof(*kcd);
822 OBD_ALLOC(kcd, kcd_size);
824 GOTO(err_fput, rc = -ENOMEM);
826 kcd->kcd_nr_archives = lk->lk_data_count;
827 if (lk->lk_flags & LK_FLG_DATANR) {
828 kcd->kcd_magic = KKUC_CT_DATA_ARRAY_MAGIC;
829 if (lk->lk_data_count > 0)
830 memcpy(kcd->kcd_archives, lk->lk_data,
831 sizeof(*kcd->kcd_archives) * lk->lk_data_count);
833 kcd->kcd_magic = KKUC_CT_DATA_BITMAP_MAGIC;
836 rc = libcfs_kkuc_group_add(filp, &obd->obd_uuid, lk->lk_uid,
837 lk->lk_group, kcd, kcd_size);
838 OBD_FREE(kcd, kcd_size);
842 /* All or nothing: try to register to all MDS.
843 * In case of failure, unregister from previous MDS,
844 * except if it because of inactive target. */
845 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
846 struct lmv_tgt_desc *tgt = lmv->tgts[i];
848 if (tgt == NULL || tgt->ltd_exp == NULL)
851 err = obd_iocontrol(cmd, tgt->ltd_exp, len, lk, uarg);
853 if (tgt->ltd_active) {
854 /* permanent error */
855 CERROR("%s: iocontrol MDC %s on MDT"
856 " idx %d cmd %x: err = %d\n",
857 lmv2obd_dev(lmv)->obd_name,
858 tgt->ltd_uuid.uuid, i, cmd, err);
860 lk->lk_flags |= LK_FLG_STOP;
861 /* unregister from previous MDS */
862 for (j = 0; j < i; j++) {
864 if (tgt == NULL || tgt->ltd_exp == NULL)
866 obd_iocontrol(cmd, tgt->ltd_exp, len,
869 GOTO(err_kkuc_rem, rc);
871 /* else: transient error.
872 * kuc will register to the missing MDT
880 /* no registration done: return error */
881 GOTO(err_kkuc_rem, rc = -ENOTCONN);
886 libcfs_kkuc_group_rem(&obd->obd_uuid, lk->lk_uid, lk->lk_group);
896 static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
897 int len, void *karg, void __user *uarg)
899 struct obd_device *obddev = class_exp2obd(exp);
900 struct lmv_obd *lmv = &obddev->u.lmv;
901 struct lmv_tgt_desc *tgt = NULL;
905 __u32 count = lmv->desc.ld_tgt_count;
912 case IOC_OBD_STATFS: {
913 struct obd_ioctl_data *data = karg;
914 struct obd_device *mdc_obd;
915 struct obd_statfs stat_buf = {0};
918 memcpy(&index, data->ioc_inlbuf2, sizeof(__u32));
919 if ((index >= count))
922 tgt = lmv->tgts[index];
923 if (tgt == NULL || !tgt->ltd_active)
926 mdc_obd = class_exp2obd(tgt->ltd_exp);
931 if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
932 min((int) data->ioc_plen2,
933 (int) sizeof(struct obd_uuid))))
936 rc = obd_statfs(NULL, tgt->ltd_exp, &stat_buf,
937 ktime_get_seconds() - OBD_STATFS_CACHE_SECONDS,
941 if (copy_to_user(data->ioc_pbuf1, &stat_buf,
942 min((int) data->ioc_plen1,
943 (int) sizeof(stat_buf))))
947 case OBD_IOC_QUOTACTL: {
948 struct if_quotactl *qctl = karg;
949 struct obd_quotactl *oqctl;
951 if (qctl->qc_valid == QC_MDTIDX) {
952 if (count <= qctl->qc_idx)
955 tgt = lmv->tgts[qctl->qc_idx];
956 if (tgt == NULL || tgt->ltd_exp == NULL)
958 } else if (qctl->qc_valid == QC_UUID) {
959 for (i = 0; i < count; i++) {
963 if (!obd_uuid_equals(&tgt->ltd_uuid,
967 if (tgt->ltd_exp == NULL)
979 LASSERT(tgt != NULL && tgt->ltd_exp != NULL);
980 OBD_ALLOC_PTR(oqctl);
984 QCTL_COPY(oqctl, qctl);
985 rc = obd_quotactl(tgt->ltd_exp, oqctl);
987 QCTL_COPY(qctl, oqctl);
988 qctl->qc_valid = QC_MDTIDX;
989 qctl->obd_uuid = tgt->ltd_uuid;
994 case LL_IOC_GET_CONNECT_FLAGS: {
996 if (tgt == NULL || tgt->ltd_exp == NULL)
998 rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
1001 case LL_IOC_FID2MDTIDX: {
1002 struct lu_fid *fid = karg;
1005 rc = lmv_fld_lookup(lmv, fid, &mdt_index);
1009 /* Note: this is from llite(see ll_dir_ioctl()), @uarg does not
1010 * point to user space memory for FID2MDTIDX. */
1011 *(__u32 *)uarg = mdt_index;
1014 case OBD_IOC_FID2PATH: {
1015 rc = lmv_fid2path(exp, len, karg, uarg);
1018 case LL_IOC_HSM_STATE_GET:
1019 case LL_IOC_HSM_STATE_SET:
1020 case LL_IOC_HSM_ACTION: {
1021 struct md_op_data *op_data = karg;
1023 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1025 RETURN(PTR_ERR(tgt));
1027 if (tgt->ltd_exp == NULL)
1030 rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
1033 case LL_IOC_HSM_PROGRESS: {
1034 const struct hsm_progress_kernel *hpk = karg;
1036 tgt = lmv_find_target(lmv, &hpk->hpk_fid);
1038 RETURN(PTR_ERR(tgt));
1039 rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
1042 case LL_IOC_HSM_REQUEST: {
1043 struct hsm_user_request *hur = karg;
1044 unsigned int reqcount = hur->hur_request.hr_itemcount;
1049 /* if the request is about a single fid
1050 * or if there is a single MDS, no need to split
1052 if (reqcount == 1 || count == 1) {
1053 tgt = lmv_find_target(lmv,
1054 &hur->hur_user_item[0].hui_fid);
1056 RETURN(PTR_ERR(tgt));
1057 rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
1059 /* split fid list to their respective MDS */
1060 for (i = 0; i < count; i++) {
1063 struct hsm_user_request *req;
1066 if (tgt == NULL || tgt->ltd_exp == NULL)
1069 nr = lmv_hsm_req_count(lmv, hur, tgt);
1072 if (nr == 0) /* nothing for this MDS */
1075 /* build a request with fids for this MDS */
1076 reqlen = offsetof(typeof(*hur),
1078 + hur->hur_request.hr_data_len;
1079 OBD_ALLOC_LARGE(req, reqlen);
1082 rc1 = lmv_hsm_req_build(lmv, hur, tgt, req);
1084 GOTO(hsm_req_err, rc1);
1085 rc1 = obd_iocontrol(cmd, tgt->ltd_exp, reqlen,
1088 if (rc1 != 0 && rc == 0)
1090 OBD_FREE_LARGE(req, reqlen);
1095 case LL_IOC_LOV_SWAP_LAYOUTS: {
1096 struct md_op_data *op_data = karg;
1097 struct lmv_tgt_desc *tgt1, *tgt2;
1099 tgt1 = lmv_find_target(lmv, &op_data->op_fid1);
1101 RETURN(PTR_ERR(tgt1));
1103 tgt2 = lmv_find_target(lmv, &op_data->op_fid2);
1105 RETURN(PTR_ERR(tgt2));
1107 if ((tgt1->ltd_exp == NULL) || (tgt2->ltd_exp == NULL))
1110 /* only files on same MDT can have their layouts swapped */
1111 if (tgt1->ltd_idx != tgt2->ltd_idx)
1114 rc = obd_iocontrol(cmd, tgt1->ltd_exp, len, karg, uarg);
1117 case LL_IOC_HSM_CT_START: {
1118 struct lustre_kernelcomm *lk = karg;
1119 if (lk->lk_flags & LK_FLG_STOP)
1120 rc = lmv_hsm_ct_unregister(obddev, cmd, len, lk, uarg);
1122 rc = lmv_hsm_ct_register(obddev, cmd, len, lk, uarg);
1126 for (i = 0; i < count; i++) {
1127 struct obd_device *mdc_obd;
1131 if (tgt == NULL || tgt->ltd_exp == NULL)
1133 /* ll_umount_begin() sets force flag but for lmv, not
1134 * mdc. Let's pass it through */
1135 mdc_obd = class_exp2obd(tgt->ltd_exp);
1136 mdc_obd->obd_force = obddev->obd_force;
1137 err = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
1139 if (tgt->ltd_active) {
1140 CERROR("error: iocontrol MDC %s on MDT"
1141 " idx %d cmd %x: err = %d\n",
1142 tgt->ltd_uuid.uuid, i, cmd, err);
1156 * This is _inode_ placement policy function (not name).
1158 static int lmv_placement_policy(struct obd_device *obd,
1159 struct md_op_data *op_data, u32 *mds)
1161 struct lmv_obd *lmv = &obd->u.lmv;
1162 struct lmv_user_md *lum;
1166 LASSERT(mds != NULL);
1168 if (lmv->desc.ld_tgt_count == 1) {
1173 lum = op_data->op_data;
1175 * 1. See if the stripe offset is specified by lum.
1176 * 2. Then check if there is default stripe offset.
1177 * 3. Finally choose MDS by name hash if the parent
1178 * is striped directory. (see lmv_locate_tgt()). */
1179 if (op_data->op_cli_flags & CLI_SET_MEA && lum != NULL &&
1180 le32_to_cpu(lum->lum_stripe_offset) != (__u32)-1) {
1181 *mds = le32_to_cpu(lum->lum_stripe_offset);
1182 } else if (op_data->op_default_stripe_offset != (__u32)-1) {
1183 *mds = op_data->op_default_stripe_offset;
1184 op_data->op_mds = *mds;
1185 /* Correct the stripe offset in lum */
1187 lum->lum_stripe_offset = cpu_to_le32(*mds);
1189 *mds = op_data->op_mds;
1195 int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds)
1197 struct lmv_tgt_desc *tgt;
1201 tgt = lmv_get_target(lmv, mds, NULL);
1203 RETURN(PTR_ERR(tgt));
1206 * New seq alloc and FLD setup should be atomic. Otherwise we may find
1207 * on server that seq in new allocated fid is not yet known.
1209 mutex_lock(&tgt->ltd_fid_mutex);
1211 if (tgt->ltd_active == 0 || tgt->ltd_exp == NULL)
1212 GOTO(out, rc = -ENODEV);
1215 * Asking underlying tgt layer to allocate new fid.
1217 rc = obd_fid_alloc(NULL, tgt->ltd_exp, fid, NULL);
1219 LASSERT(fid_is_sane(fid));
1225 mutex_unlock(&tgt->ltd_fid_mutex);
1229 int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp,
1230 struct lu_fid *fid, struct md_op_data *op_data)
1232 struct obd_device *obd = class_exp2obd(exp);
1233 struct lmv_obd *lmv = &obd->u.lmv;
1238 LASSERT(op_data != NULL);
1239 LASSERT(fid != NULL);
1241 rc = lmv_placement_policy(obd, op_data, &mds);
1243 CERROR("Can't get target for allocating fid, "
1248 rc = __lmv_fid_alloc(lmv, fid, mds);
1250 CERROR("Can't alloc new fid, rc %d\n", rc);
1257 static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
1259 struct lmv_obd *lmv = &obd->u.lmv;
1260 struct lmv_desc *desc;
1264 if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
1265 CERROR("LMV setup requires a descriptor\n");
1269 desc = (struct lmv_desc *)lustre_cfg_buf(lcfg, 1);
1270 if (sizeof(*desc) > LUSTRE_CFG_BUFLEN(lcfg, 1)) {
1271 CERROR("Lmv descriptor size wrong: %d > %d\n",
1272 (int)sizeof(*desc), LUSTRE_CFG_BUFLEN(lcfg, 1));
1276 lmv->tgts_size = 32U;
1277 OBD_ALLOC(lmv->tgts, sizeof(*lmv->tgts) * lmv->tgts_size);
1278 if (lmv->tgts == NULL)
1281 obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid);
1282 lmv->desc.ld_tgt_count = 0;
1283 lmv->desc.ld_active_tgt_count = 0;
1284 lmv->max_def_easize = 0;
1285 lmv->max_easize = 0;
1287 spin_lock_init(&lmv->lmv_lock);
1288 mutex_init(&lmv->lmv_init_mutex);
1290 rc = lmv_tunables_init(obd);
1292 CWARN("%s: error adding LMV sysfs/debugfs files: rc = %d\n",
1295 rc = fld_client_init(&lmv->lmv_fld, obd->obd_name,
1296 LUSTRE_CLI_FLD_HASH_DHT);
1298 CERROR("Can't init FLD, err %d\n", rc);
1308 static int lmv_cleanup(struct obd_device *obd)
1310 struct lmv_obd *lmv = &obd->u.lmv;
1313 fld_client_fini(&lmv->lmv_fld);
1314 if (lmv->tgts != NULL) {
1316 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
1317 if (lmv->tgts[i] == NULL)
1319 lmv_del_target(lmv, i);
1321 OBD_FREE(lmv->tgts, sizeof(*lmv->tgts) * lmv->tgts_size);
1327 static int lmv_process_config(struct obd_device *obd, size_t len, void *buf)
1329 struct lustre_cfg *lcfg = buf;
1330 struct obd_uuid obd_uuid;
1336 switch (lcfg->lcfg_command) {
1338 /* modify_mdc_tgts add 0:lustre-clilmv 1:lustre-MDT0000_UUID
1339 * 2:0 3:1 4:lustre-MDT0000-mdc_UUID */
1340 if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(obd_uuid.uuid))
1341 GOTO(out, rc = -EINVAL);
1343 obd_str2uuid(&obd_uuid, lustre_cfg_buf(lcfg, 1));
1345 if (sscanf(lustre_cfg_buf(lcfg, 2), "%u", &index) != 1)
1346 GOTO(out, rc = -EINVAL);
1347 if (sscanf(lustre_cfg_buf(lcfg, 3), "%d", &gen) != 1)
1348 GOTO(out, rc = -EINVAL);
1349 rc = lmv_add_target(obd, &obd_uuid, index, gen);
1352 CERROR("Unknown command: %d\n", lcfg->lcfg_command);
1353 GOTO(out, rc = -EINVAL);
1359 static int lmv_select_statfs_mdt(struct lmv_obd *lmv, __u32 flags)
1363 if (flags & OBD_STATFS_FOR_MDT0)
1366 if (lmv->lmv_statfs_start || lmv->desc.ld_tgt_count == 1)
1367 return lmv->lmv_statfs_start;
1369 /* choose initial MDT for this client */
1371 struct lnet_process_id lnet_id;
1372 if (LNetGetId(i, &lnet_id) == -ENOENT)
1375 if (LNET_NETTYP(LNET_NIDNET(lnet_id.nid)) != LOLND) {
1376 /* We dont need a full 64-bit modulus, just enough
1377 * to distribute the requests across MDTs evenly.
1379 lmv->lmv_statfs_start =
1380 (u32)lnet_id.nid % lmv->desc.ld_tgt_count;
1385 return lmv->lmv_statfs_start;
1388 static int lmv_statfs(const struct lu_env *env, struct obd_export *exp,
1389 struct obd_statfs *osfs, time64_t max_age, __u32 flags)
1391 struct obd_device *obd = class_exp2obd(exp);
1392 struct lmv_obd *lmv = &obd->u.lmv;
1393 struct obd_statfs *temp;
1398 OBD_ALLOC(temp, sizeof(*temp));
1402 /* distribute statfs among MDTs */
1403 idx = lmv_select_statfs_mdt(lmv, flags);
1405 for (i = 0; i < lmv->desc.ld_tgt_count; i++, idx++) {
1406 idx = idx % lmv->desc.ld_tgt_count;
1407 if (lmv->tgts[idx] == NULL || lmv->tgts[idx]->ltd_exp == NULL)
1410 rc = obd_statfs(env, lmv->tgts[idx]->ltd_exp, temp,
1413 CERROR("%s: can't stat MDS #%d: rc = %d\n",
1414 lmv->tgts[idx]->ltd_exp->exp_obd->obd_name, i,
1416 GOTO(out_free_temp, rc);
1419 if (temp->os_state & OS_STATE_SUM ||
1420 flags == OBD_STATFS_FOR_MDT0) {
1421 /* reset to the last aggregated values
1422 * and don't sum with non-aggrated data */
1423 /* If the statfs is from mount, it needs to retrieve
1424 * necessary information from MDT0. i.e. mount does
1425 * not need the merged osfs from all of MDT. Also
1426 * clients can be mounted as long as MDT0 is in
1435 osfs->os_bavail += temp->os_bavail;
1436 osfs->os_blocks += temp->os_blocks;
1437 osfs->os_ffree += temp->os_ffree;
1438 osfs->os_files += temp->os_files;
1439 osfs->os_granted += temp->os_granted;
1445 OBD_FREE(temp, sizeof(*temp));
1449 static int lmv_get_root(struct obd_export *exp, const char *fileset,
1452 struct obd_device *obd = exp->exp_obd;
1453 struct lmv_obd *lmv = &obd->u.lmv;
1457 rc = md_get_root(lmv->tgts[0]->ltd_exp, fileset, fid);
1461 static int lmv_getxattr(struct obd_export *exp, const struct lu_fid *fid,
1462 u64 obd_md_valid, const char *name, size_t buf_size,
1463 struct ptlrpc_request **req)
1465 struct obd_device *obd = exp->exp_obd;
1466 struct lmv_obd *lmv = &obd->u.lmv;
1467 struct lmv_tgt_desc *tgt;
1471 tgt = lmv_find_target(lmv, fid);
1473 RETURN(PTR_ERR(tgt));
1475 rc = md_getxattr(tgt->ltd_exp, fid, obd_md_valid, name, buf_size, req);
1480 static int lmv_setxattr(struct obd_export *exp, const struct lu_fid *fid,
1481 u64 obd_md_valid, const char *name,
1482 const void *value, size_t value_size,
1483 unsigned int xattr_flags, u32 suppgid,
1484 struct ptlrpc_request **req)
1486 struct obd_device *obd = exp->exp_obd;
1487 struct lmv_obd *lmv = &obd->u.lmv;
1488 struct lmv_tgt_desc *tgt;
1492 tgt = lmv_find_target(lmv, fid);
1494 RETURN(PTR_ERR(tgt));
1496 rc = md_setxattr(tgt->ltd_exp, fid, obd_md_valid, name,
1497 value, value_size, xattr_flags, suppgid, req);
1502 static int lmv_getattr(struct obd_export *exp, struct md_op_data *op_data,
1503 struct ptlrpc_request **request)
1505 struct obd_device *obd = exp->exp_obd;
1506 struct lmv_obd *lmv = &obd->u.lmv;
1507 struct lmv_tgt_desc *tgt;
1511 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1513 RETURN(PTR_ERR(tgt));
1515 if (op_data->op_flags & MF_GET_MDT_IDX) {
1516 op_data->op_mds = tgt->ltd_idx;
1520 rc = md_getattr(tgt->ltd_exp, op_data, request);
1525 static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid)
1527 struct obd_device *obd = exp->exp_obd;
1528 struct lmv_obd *lmv = &obd->u.lmv;
1532 CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
1535 * With DNE every object can have two locks in different namespaces:
1536 * lookup lock in space of MDT storing direntry and update/open lock in
1537 * space of MDT storing inode.
1539 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
1540 if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
1542 md_null_inode(lmv->tgts[i]->ltd_exp, fid);
1548 static int lmv_close(struct obd_export *exp, struct md_op_data *op_data,
1549 struct md_open_data *mod, struct ptlrpc_request **request)
1551 struct obd_device *obd = exp->exp_obd;
1552 struct lmv_obd *lmv = &obd->u.lmv;
1553 struct lmv_tgt_desc *tgt;
1557 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1559 RETURN(PTR_ERR(tgt));
1561 CDEBUG(D_INODE, "CLOSE "DFID"\n", PFID(&op_data->op_fid1));
1562 rc = md_close(tgt->ltd_exp, op_data, mod, request);
1566 struct lmv_tgt_desc*
1567 __lmv_locate_tgt(struct lmv_obd *lmv, struct lmv_stripe_md *lsm,
1568 const char *name, int namelen, struct lu_fid *fid, u32 *mds,
1571 struct lmv_tgt_desc *tgt;
1572 const struct lmv_oinfo *oinfo;
1574 if (lsm == NULL || namelen == 0) {
1575 tgt = lmv_find_target(lmv, fid);
1580 *mds = tgt->ltd_idx;
1584 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_NAME_HASH)) {
1585 if (cfs_fail_val >= lsm->lsm_md_stripe_count)
1586 return ERR_PTR(-EBADF);
1587 oinfo = &lsm->lsm_md_oinfo[cfs_fail_val];
1589 oinfo = lsm_name_to_stripe_info(lsm, name, namelen,
1592 return ERR_CAST(oinfo);
1596 *fid = oinfo->lmo_fid;
1598 *mds = oinfo->lmo_mds;
1600 tgt = lmv_get_target(lmv, oinfo->lmo_mds, NULL);
1602 CDEBUG(D_INFO, "locate on mds %u "DFID"\n", oinfo->lmo_mds,
1603 PFID(&oinfo->lmo_fid));
1610 * Locate mdt by fid or name
1612 * For striped directory, it will locate the stripe by name hash, if hash_type
1613 * is unknown, it will return the stripe specified by 'op_data->op_stripe_index'
1614 * which is set outside, and if dir is migrating, 'op_data->op_post_migrate'
1615 * indicates whether old or new layout is used to locate.
1617 * For normal direcotry, it will locate MDS by FID directly.
1619 * \param[in] lmv LMV device
1620 * \param[in] op_data client MD stack parameters, name, namelen
1622 * \param[in] fid object FID used to locate MDS.
1624 * retval pointer to the lmv_tgt_desc if succeed.
1625 * ERR_PTR(errno) if failed.
1627 struct lmv_tgt_desc*
1628 lmv_locate_tgt(struct lmv_obd *lmv, struct md_op_data *op_data,
1631 struct lmv_stripe_md *lsm = op_data->op_mea1;
1632 struct lmv_oinfo *oinfo;
1633 struct lmv_tgt_desc *tgt;
1635 /* During creating VOLATILE file, it should honor the mdt
1636 * index if the file under striped dir is being restored, see
1638 if (op_data->op_bias & MDS_CREATE_VOLATILE &&
1639 (int)op_data->op_mds != -1) {
1640 tgt = lmv_get_target(lmv, op_data->op_mds, NULL);
1647 /* refill the right parent fid */
1648 for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1649 oinfo = &lsm->lsm_md_oinfo[i];
1650 if (oinfo->lmo_mds == op_data->op_mds) {
1651 *fid = oinfo->lmo_fid;
1656 if (i == lsm->lsm_md_stripe_count)
1657 *fid = lsm->lsm_md_oinfo[0].lmo_fid;
1659 } else if (lmv_is_dir_bad_hash(lsm)) {
1660 LASSERT(op_data->op_stripe_index < lsm->lsm_md_stripe_count);
1661 oinfo = &lsm->lsm_md_oinfo[op_data->op_stripe_index];
1663 *fid = oinfo->lmo_fid;
1664 op_data->op_mds = oinfo->lmo_mds;
1665 tgt = lmv_get_target(lmv, oinfo->lmo_mds, NULL);
1667 tgt = __lmv_locate_tgt(lmv, lsm, op_data->op_name,
1668 op_data->op_namelen, fid,
1670 op_data->op_post_migrate);
1676 int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
1677 const void *data, size_t datalen, umode_t mode, uid_t uid,
1678 gid_t gid, cfs_cap_t cap_effective, __u64 rdev,
1679 struct ptlrpc_request **request)
1681 struct obd_device *obd = exp->exp_obd;
1682 struct lmv_obd *lmv = &obd->u.lmv;
1683 struct lmv_tgt_desc *tgt;
1687 if (!lmv->desc.ld_active_tgt_count)
1690 if (lmv_is_dir_bad_hash(op_data->op_mea1))
1693 if (lmv_is_dir_migrating(op_data->op_mea1)) {
1695 * if parent is migrating, create() needs to lookup existing
1696 * name, to avoid creating new file under old layout of
1697 * migrating directory, check old layout here.
1699 tgt = lmv_locate_tgt(lmv, op_data, &op_data->op_fid1);
1701 RETURN(PTR_ERR(tgt));
1703 rc = md_getattr_name(tgt->ltd_exp, op_data, request);
1705 ptlrpc_req_finished(*request);
1713 op_data->op_post_migrate = true;
1716 tgt = lmv_locate_tgt(lmv, op_data, &op_data->op_fid1);
1718 RETURN(PTR_ERR(tgt));
1720 CDEBUG(D_INODE, "CREATE name '%.*s' on "DFID" -> mds #%x\n",
1721 (int)op_data->op_namelen, op_data->op_name,
1722 PFID(&op_data->op_fid1), op_data->op_mds);
1724 rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
1728 if (exp_connect_flags(exp) & OBD_CONNECT_DIR_STRIPE) {
1729 /* Send the create request to the MDT where the object
1730 * will be located */
1731 tgt = lmv_find_target(lmv, &op_data->op_fid2);
1733 RETURN(PTR_ERR(tgt));
1735 op_data->op_mds = tgt->ltd_idx;
1737 CDEBUG(D_CONFIG, "Server doesn't support striped dirs\n");
1740 CDEBUG(D_INODE, "CREATE obj "DFID" -> mds #%x\n",
1741 PFID(&op_data->op_fid2), op_data->op_mds);
1743 op_data->op_flags |= MF_MDC_CANCEL_FID1;
1744 rc = md_create(tgt->ltd_exp, op_data, data, datalen, mode, uid, gid,
1745 cap_effective, rdev, request);
1747 if (*request == NULL)
1749 CDEBUG(D_INODE, "Created - "DFID"\n", PFID(&op_data->op_fid2));
1755 lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
1756 const union ldlm_policy_data *policy, struct md_op_data *op_data,
1757 struct lustre_handle *lockh, __u64 extra_lock_flags)
1759 struct obd_device *obd = exp->exp_obd;
1760 struct lmv_obd *lmv = &obd->u.lmv;
1761 struct lmv_tgt_desc *tgt;
1765 CDEBUG(D_INODE, "ENQUEUE on "DFID"\n", PFID(&op_data->op_fid1));
1767 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1769 RETURN(PTR_ERR(tgt));
1771 CDEBUG(D_INODE, "ENQUEUE on "DFID" -> mds #%u\n",
1772 PFID(&op_data->op_fid1), tgt->ltd_idx);
1774 rc = md_enqueue(tgt->ltd_exp, einfo, policy, op_data, lockh,
1781 lmv_getattr_name(struct obd_export *exp,struct md_op_data *op_data,
1782 struct ptlrpc_request **preq)
1784 struct obd_device *obd = exp->exp_obd;
1785 struct lmv_obd *lmv = &obd->u.lmv;
1786 struct lmv_tgt_desc *tgt;
1787 struct mdt_body *body;
1793 tgt = lmv_locate_tgt(lmv, op_data, &op_data->op_fid1);
1795 RETURN(PTR_ERR(tgt));
1797 CDEBUG(D_INODE, "GETATTR_NAME for %*s on "DFID" -> mds #%d\n",
1798 (int)op_data->op_namelen, op_data->op_name,
1799 PFID(&op_data->op_fid1), tgt->ltd_idx);
1801 rc = md_getattr_name(tgt->ltd_exp, op_data, preq);
1802 if (rc == -ENOENT && lmv_dir_retry_check_update(op_data)) {
1803 ptlrpc_req_finished(*preq);
1811 body = req_capsule_server_get(&(*preq)->rq_pill, &RMF_MDT_BODY);
1812 LASSERT(body != NULL);
1814 if (body->mbo_valid & OBD_MD_MDS) {
1815 op_data->op_fid1 = body->mbo_fid1;
1816 op_data->op_valid |= OBD_MD_FLCROSSREF;
1817 op_data->op_namelen = 0;
1818 op_data->op_name = NULL;
1820 ptlrpc_req_finished(*preq);
1829 #define md_op_data_fid(op_data, fl) \
1830 (fl == MF_MDC_CANCEL_FID1 ? &op_data->op_fid1 : \
1831 fl == MF_MDC_CANCEL_FID2 ? &op_data->op_fid2 : \
1832 fl == MF_MDC_CANCEL_FID3 ? &op_data->op_fid3 : \
1833 fl == MF_MDC_CANCEL_FID4 ? &op_data->op_fid4 : \
1836 static int lmv_early_cancel(struct obd_export *exp, struct lmv_tgt_desc *tgt,
1837 struct md_op_data *op_data, __u32 op_tgt,
1838 enum ldlm_mode mode, int bits, int flag)
1840 struct lu_fid *fid = md_op_data_fid(op_data, flag);
1841 struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
1842 union ldlm_policy_data policy = { { 0 } };
1846 if (!fid_is_sane(fid))
1850 tgt = lmv_find_target(lmv, fid);
1852 RETURN(PTR_ERR(tgt));
1855 if (tgt->ltd_idx != op_tgt) {
1856 CDEBUG(D_INODE, "EARLY_CANCEL on "DFID"\n", PFID(fid));
1857 policy.l_inodebits.bits = bits;
1858 rc = md_cancel_unused(tgt->ltd_exp, fid, &policy,
1859 mode, LCF_ASYNC, NULL);
1862 "EARLY_CANCEL skip operation target %d on "DFID"\n",
1864 op_data->op_flags |= flag;
1872 * llite passes fid of an target inode in op_data->op_fid1 and id of directory in
1875 static int lmv_link(struct obd_export *exp, struct md_op_data *op_data,
1876 struct ptlrpc_request **request)
1878 struct obd_device *obd = exp->exp_obd;
1879 struct lmv_obd *lmv = &obd->u.lmv;
1880 struct lmv_tgt_desc *tgt;
1884 LASSERT(op_data->op_namelen != 0);
1886 CDEBUG(D_INODE, "LINK "DFID":%*s to "DFID"\n",
1887 PFID(&op_data->op_fid2), (int)op_data->op_namelen,
1888 op_data->op_name, PFID(&op_data->op_fid1));
1890 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
1891 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
1892 op_data->op_cap = cfs_curproc_cap_pack();
1894 if (lmv_is_dir_migrating(op_data->op_mea2)) {
1895 struct lu_fid fid1 = op_data->op_fid1;
1896 struct lmv_stripe_md *lsm1 = op_data->op_mea1;
1899 * avoid creating new file under old layout of migrating
1900 * directory, check it here.
1902 tgt = __lmv_locate_tgt(lmv, op_data->op_mea2, op_data->op_name,
1903 op_data->op_namelen, &op_data->op_fid2,
1904 &op_data->op_mds, false);
1905 tgt = lmv_locate_tgt(lmv, op_data, &op_data->op_fid1);
1907 RETURN(PTR_ERR(tgt));
1909 op_data->op_fid1 = op_data->op_fid2;
1910 op_data->op_mea1 = op_data->op_mea2;
1911 rc = md_getattr_name(tgt->ltd_exp, op_data, request);
1912 op_data->op_fid1 = fid1;
1913 op_data->op_mea1 = lsm1;
1915 ptlrpc_req_finished(*request);
1924 tgt = __lmv_locate_tgt(lmv, op_data->op_mea2, op_data->op_name,
1925 op_data->op_namelen, &op_data->op_fid2,
1926 &op_data->op_mds, true);
1928 RETURN(PTR_ERR(tgt));
1931 * Cancel UPDATE lock on child (fid1).
1933 op_data->op_flags |= MF_MDC_CANCEL_FID2;
1934 rc = lmv_early_cancel(exp, NULL, op_data, tgt->ltd_idx, LCK_EX,
1935 MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID1);
1939 rc = md_link(tgt->ltd_exp, op_data, request);
1944 static int lmv_migrate(struct obd_export *exp, struct md_op_data *op_data,
1945 const char *name, size_t namelen,
1946 struct ptlrpc_request **request)
1948 struct obd_device *obd = exp->exp_obd;
1949 struct lmv_obd *lmv = &obd->u.lmv;
1950 struct lmv_stripe_md *lsm = op_data->op_mea1;
1951 struct lmv_tgt_desc *parent_tgt;
1952 struct lmv_tgt_desc *sp_tgt;
1953 struct lmv_tgt_desc *tp_tgt = NULL;
1954 struct lmv_tgt_desc *child_tgt;
1955 struct lmv_tgt_desc *tgt;
1956 struct lu_fid target_fid;
1961 LASSERT(op_data->op_cli_flags & CLI_MIGRATE);
1963 CDEBUG(D_INODE, "MIGRATE "DFID"/%.*s\n",
1964 PFID(&op_data->op_fid1), (int)namelen, name);
1966 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
1967 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
1968 op_data->op_cap = cfs_curproc_cap_pack();
1970 parent_tgt = lmv_find_target(lmv, &op_data->op_fid1);
1971 if (IS_ERR(parent_tgt))
1972 RETURN(PTR_ERR(parent_tgt));
1975 __u32 hash_type = lsm->lsm_md_hash_type;
1976 __u32 stripe_count = lsm->lsm_md_stripe_count;
1979 * old stripes are appended after new stripes for migrating
1982 if (lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION) {
1983 hash_type = lsm->lsm_md_migrate_hash;
1984 stripe_count -= lsm->lsm_md_migrate_offset;
1987 rc = lmv_name_to_stripe_index(hash_type, stripe_count, name,
1992 if (lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION)
1993 rc += lsm->lsm_md_migrate_offset;
1995 /* save it in fid4 temporarily for early cancel */
1996 op_data->op_fid4 = lsm->lsm_md_oinfo[rc].lmo_fid;
1997 sp_tgt = lmv_get_target(lmv, lsm->lsm_md_oinfo[rc].lmo_mds,
2000 RETURN(PTR_ERR(sp_tgt));
2003 * if parent is being migrated too, fill op_fid2 with target
2004 * stripe fid, otherwise the target stripe is not created yet.
2006 if (lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION) {
2007 hash_type = lsm->lsm_md_hash_type &
2008 ~LMV_HASH_FLAG_MIGRATION;
2009 stripe_count = lsm->lsm_md_migrate_offset;
2011 rc = lmv_name_to_stripe_index(hash_type, stripe_count,
2016 op_data->op_fid2 = lsm->lsm_md_oinfo[rc].lmo_fid;
2017 tp_tgt = lmv_get_target(lmv,
2018 lsm->lsm_md_oinfo[rc].lmo_mds,
2021 RETURN(PTR_ERR(tp_tgt));
2024 sp_tgt = parent_tgt;
2027 child_tgt = lmv_find_target(lmv, &op_data->op_fid3);
2028 if (IS_ERR(child_tgt))
2029 RETURN(PTR_ERR(child_tgt));
2031 if (!S_ISDIR(op_data->op_mode) && tp_tgt)
2032 rc = __lmv_fid_alloc(lmv, &target_fid, tp_tgt->ltd_idx);
2034 rc = lmv_fid_alloc(NULL, exp, &target_fid, op_data);
2039 * for directory, send migrate request to the MDT where the object will
2040 * be migrated to, because we can't create a striped directory remotely.
2042 * otherwise, send to the MDT where source is located because regular
2043 * file may open lease.
2045 * NB. if MDT doesn't support DIR_MIGRATE, send to source MDT too for
2046 * backward compatibility.
2048 if (S_ISDIR(op_data->op_mode) &&
2049 (exp_connect_flags2(exp) & OBD_CONNECT2_DIR_MIGRATE)) {
2050 tgt = lmv_find_target(lmv, &target_fid);
2052 RETURN(PTR_ERR(tgt));
2057 /* cancel UPDATE lock of parent master object */
2058 rc = lmv_early_cancel(exp, parent_tgt, op_data, tgt->ltd_idx, LCK_EX,
2059 MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID1);
2063 /* cancel UPDATE lock of source parent */
2064 if (sp_tgt != parent_tgt) {
2066 * migrate RPC packs master object FID, because we can only pack
2067 * two FIDs in reint RPC, but MDS needs to know both source
2068 * parent and target parent, and it will obtain them from master
2069 * FID and LMV, the other FID in RPC is kept for target.
2071 * since this FID is not passed to MDC, cancel it anyway.
2073 rc = lmv_early_cancel(exp, sp_tgt, op_data, -1, LCK_EX,
2074 MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID4);
2078 op_data->op_flags &= ~MF_MDC_CANCEL_FID4;
2080 op_data->op_fid4 = target_fid;
2082 /* cancel UPDATE locks of target parent */
2083 rc = lmv_early_cancel(exp, tp_tgt, op_data, tgt->ltd_idx, LCK_EX,
2084 MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID2);
2088 /* cancel LOOKUP lock of source if source is remote object */
2089 if (child_tgt != sp_tgt) {
2090 rc = lmv_early_cancel(exp, sp_tgt, op_data, tgt->ltd_idx,
2091 LCK_EX, MDS_INODELOCK_LOOKUP,
2092 MF_MDC_CANCEL_FID3);
2097 /* cancel ELC locks of source */
2098 rc = lmv_early_cancel(exp, child_tgt, op_data, tgt->ltd_idx, LCK_EX,
2099 MDS_INODELOCK_ELC, MF_MDC_CANCEL_FID3);
2103 rc = md_rename(tgt->ltd_exp, op_data, name, namelen, NULL, 0, request);
2108 static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
2109 const char *old, size_t oldlen,
2110 const char *new, size_t newlen,
2111 struct ptlrpc_request **request)
2113 struct obd_device *obd = exp->exp_obd;
2114 struct lmv_obd *lmv = &obd->u.lmv;
2115 struct lmv_tgt_desc *sp_tgt;
2116 struct lmv_tgt_desc *tp_tgt = NULL;
2117 struct lmv_tgt_desc *src_tgt = NULL;
2118 struct lmv_tgt_desc *tgt;
2119 struct mdt_body *body;
2124 LASSERT(oldlen != 0);
2126 if (op_data->op_cli_flags & CLI_MIGRATE) {
2127 rc = lmv_migrate(exp, op_data, old, oldlen, request);
2131 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2132 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
2133 op_data->op_cap = cfs_curproc_cap_pack();
2135 if (lmv_is_dir_migrating(op_data->op_mea2)) {
2136 struct lu_fid fid1 = op_data->op_fid1;
2137 struct lmv_stripe_md *lsm1 = op_data->op_mea1;
2140 * we avoid creating new file under old layout of migrating
2141 * directory, if there is an existing file with new name under
2142 * old layout, we can't unlink file in old layout and rename to
2143 * new layout in one transaction, so return -EBUSY here.`
2145 tgt = __lmv_locate_tgt(lmv, op_data->op_mea2, new, newlen,
2146 &op_data->op_fid2, &op_data->op_mds,
2149 RETURN(PTR_ERR(tgt));
2151 op_data->op_fid1 = op_data->op_fid2;
2152 op_data->op_mea1 = op_data->op_mea2;
2153 op_data->op_name = new;
2154 op_data->op_namelen = newlen;
2155 rc = md_getattr_name(tgt->ltd_exp, op_data, request);
2156 op_data->op_fid1 = fid1;
2157 op_data->op_mea1 = lsm1;
2158 op_data->op_name = NULL;
2159 op_data->op_namelen = 0;
2161 ptlrpc_req_finished(*request);
2170 /* rename to new layout for migrating directory */
2171 tp_tgt = __lmv_locate_tgt(lmv, op_data->op_mea2, new, newlen,
2172 &op_data->op_fid2, &op_data->op_mds, true);
2174 RETURN(PTR_ERR(tp_tgt));
2176 /* Since the target child might be destroyed, and it might become
2177 * orphan, and we can only check orphan on the local MDT right now, so
2178 * we send rename request to the MDT where target child is located. If
2179 * target child does not exist, then it will send the request to the
2181 if (fid_is_sane(&op_data->op_fid4)) {
2182 tgt = lmv_find_target(lmv, &op_data->op_fid4);
2184 RETURN(PTR_ERR(tgt));
2189 op_data->op_flags |= MF_MDC_CANCEL_FID4;
2191 /* cancel UPDATE locks of target parent */
2192 rc = lmv_early_cancel(exp, tp_tgt, op_data, tgt->ltd_idx, LCK_EX,
2193 MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID2);
2197 if (fid_is_sane(&op_data->op_fid4)) {
2198 /* cancel LOOKUP lock of target on target parent */
2199 if (tgt != tp_tgt) {
2200 rc = lmv_early_cancel(exp, tp_tgt, op_data,
2201 tgt->ltd_idx, LCK_EX,
2202 MDS_INODELOCK_LOOKUP,
2203 MF_MDC_CANCEL_FID4);
2209 if (fid_is_sane(&op_data->op_fid3)) {
2210 src_tgt = lmv_find_target(lmv, &op_data->op_fid3);
2211 if (IS_ERR(src_tgt))
2212 RETURN(PTR_ERR(src_tgt));
2214 /* cancel ELC locks of source */
2215 rc = lmv_early_cancel(exp, src_tgt, op_data, tgt->ltd_idx,
2216 LCK_EX, MDS_INODELOCK_ELC,
2217 MF_MDC_CANCEL_FID3);
2223 sp_tgt = __lmv_locate_tgt(lmv, op_data->op_mea1, old, oldlen,
2224 &op_data->op_fid1, &op_data->op_mds,
2225 op_data->op_post_migrate);
2227 RETURN(PTR_ERR(sp_tgt));
2229 /* cancel UPDATE locks of source parent */
2230 rc = lmv_early_cancel(exp, sp_tgt, op_data, tgt->ltd_idx, LCK_EX,
2231 MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID1);
2235 if (fid_is_sane(&op_data->op_fid3)) {
2236 /* cancel LOOKUP lock of source on source parent */
2237 if (src_tgt != sp_tgt) {
2238 rc = lmv_early_cancel(exp, sp_tgt, op_data,
2239 tgt->ltd_idx, LCK_EX,
2240 MDS_INODELOCK_LOOKUP,
2241 MF_MDC_CANCEL_FID3);
2248 CDEBUG(D_INODE, "RENAME "DFID"/%.*s to "DFID"/%.*s\n",
2249 PFID(&op_data->op_fid1), (int)oldlen, old,
2250 PFID(&op_data->op_fid2), (int)newlen, new);
2252 rc = md_rename(tgt->ltd_exp, op_data, old, oldlen, new, newlen,
2254 if (rc == -ENOENT && lmv_dir_retry_check_update(op_data)) {
2255 ptlrpc_req_finished(*request);
2260 if (rc && rc != -EXDEV)
2263 body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
2267 /* Not cross-ref case, just get out of here. */
2268 if (likely(!(body->mbo_valid & OBD_MD_MDS)))
2271 op_data->op_fid4 = body->mbo_fid1;
2273 ptlrpc_req_finished(*request);
2276 tgt = lmv_find_target(lmv, &op_data->op_fid4);
2278 RETURN(PTR_ERR(tgt));
2280 if (fid_is_sane(&op_data->op_fid4)) {
2281 /* cancel LOOKUP lock of target on target parent */
2282 if (tgt != tp_tgt) {
2283 rc = lmv_early_cancel(exp, tp_tgt, op_data,
2284 tgt->ltd_idx, LCK_EX,
2285 MDS_INODELOCK_LOOKUP,
2286 MF_MDC_CANCEL_FID4);
2295 static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
2296 void *ea, size_t ealen, struct ptlrpc_request **request)
2298 struct obd_device *obd = exp->exp_obd;
2299 struct lmv_obd *lmv = &obd->u.lmv;
2300 struct lmv_tgt_desc *tgt;
2304 CDEBUG(D_INODE, "SETATTR for "DFID", valid 0x%x/0x%x\n",
2305 PFID(&op_data->op_fid1), op_data->op_attr.ia_valid,
2306 op_data->op_xvalid);
2308 op_data->op_flags |= MF_MDC_CANCEL_FID1;
2309 tgt = lmv_find_target(lmv, &op_data->op_fid1);
2311 RETURN(PTR_ERR(tgt));
2313 rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen, request);
2318 static int lmv_fsync(struct obd_export *exp, const struct lu_fid *fid,
2319 struct ptlrpc_request **request)
2321 struct obd_device *obd = exp->exp_obd;
2322 struct lmv_obd *lmv = &obd->u.lmv;
2323 struct lmv_tgt_desc *tgt;
2327 tgt = lmv_find_target(lmv, fid);
2329 RETURN(PTR_ERR(tgt));
2331 rc = md_fsync(tgt->ltd_exp, fid, request);
2335 struct stripe_dirent {
2336 struct page *sd_page;
2337 struct lu_dirpage *sd_dp;
2338 struct lu_dirent *sd_ent;
2342 struct lmv_dir_ctxt {
2343 struct lmv_obd *ldc_lmv;
2344 struct md_op_data *ldc_op_data;
2345 struct md_callback *ldc_cb_op;
2348 struct stripe_dirent ldc_stripes[0];
2351 static inline void stripe_dirent_unload(struct stripe_dirent *stripe)
2353 if (stripe->sd_page) {
2354 kunmap(stripe->sd_page);
2355 put_page(stripe->sd_page);
2356 stripe->sd_page = NULL;
2357 stripe->sd_ent = NULL;
2361 static inline void put_lmv_dir_ctxt(struct lmv_dir_ctxt *ctxt)
2365 for (i = 0; i < ctxt->ldc_count; i++)
2366 stripe_dirent_unload(&ctxt->ldc_stripes[i]);
2369 /* if @ent is dummy, or . .., get next */
2370 static struct lu_dirent *stripe_dirent_get(struct lmv_dir_ctxt *ctxt,
2371 struct lu_dirent *ent,
2374 for (; ent; ent = lu_dirent_next(ent)) {
2375 /* Skip dummy entry */
2376 if (le16_to_cpu(ent->lde_namelen) == 0)
2379 /* skip . and .. for other stripes */
2381 (strncmp(ent->lde_name, ".",
2382 le16_to_cpu(ent->lde_namelen)) == 0 ||
2383 strncmp(ent->lde_name, "..",
2384 le16_to_cpu(ent->lde_namelen)) == 0))
2387 if (le64_to_cpu(ent->lde_hash) >= ctxt->ldc_hash)
2394 static struct lu_dirent *stripe_dirent_load(struct lmv_dir_ctxt *ctxt,
2395 struct stripe_dirent *stripe,
2398 struct md_op_data *op_data = ctxt->ldc_op_data;
2399 struct lmv_oinfo *oinfo;
2400 struct lu_fid fid = op_data->op_fid1;
2401 struct inode *inode = op_data->op_data;
2402 struct lmv_tgt_desc *tgt;
2403 struct lu_dirent *ent = stripe->sd_ent;
2404 __u64 hash = ctxt->ldc_hash;
2409 LASSERT(stripe == &ctxt->ldc_stripes[stripe_index]);
2413 if (stripe->sd_page) {
2414 __u64 end = le64_to_cpu(stripe->sd_dp->ldp_hash_end);
2416 /* @hash should be the last dirent hash */
2417 LASSERTF(hash <= end,
2418 "ctxt@%p stripe@%p hash %llx end %llx\n",
2419 ctxt, stripe, hash, end);
2420 /* unload last page */
2421 stripe_dirent_unload(stripe);
2423 if (end == MDS_DIR_END_OFF) {
2424 stripe->sd_eof = true;
2430 oinfo = &op_data->op_mea1->lsm_md_oinfo[stripe_index];
2431 tgt = lmv_get_target(ctxt->ldc_lmv, oinfo->lmo_mds, NULL);
2437 /* op_data is shared by stripes, reset after use */
2438 op_data->op_fid1 = oinfo->lmo_fid;
2439 op_data->op_fid2 = oinfo->lmo_fid;
2440 op_data->op_data = oinfo->lmo_root;
2442 rc = md_read_page(tgt->ltd_exp, op_data, ctxt->ldc_cb_op, hash,
2445 op_data->op_fid1 = fid;
2446 op_data->op_fid2 = fid;
2447 op_data->op_data = inode;
2452 stripe->sd_dp = page_address(stripe->sd_page);
2453 ent = stripe_dirent_get(ctxt, lu_dirent_start(stripe->sd_dp),
2455 /* in case a page filled with ., .. and dummy, read next */
2458 stripe->sd_ent = ent;
2461 /* treat error as eof, so dir can be partially accessed */
2462 stripe->sd_eof = true;
2463 LCONSOLE_WARN("dir "DFID" stripe %d readdir failed: %d, "
2464 "directory is partially accessed!\n",
2465 PFID(&ctxt->ldc_op_data->op_fid1), stripe_index,
2472 static int lmv_file_resync(struct obd_export *exp, struct md_op_data *data)
2474 struct obd_device *obd = exp->exp_obd;
2475 struct lmv_obd *lmv = &obd->u.lmv;
2476 struct lmv_tgt_desc *tgt;
2480 rc = lmv_check_connect(obd);
2484 tgt = lmv_find_target(lmv, &data->op_fid1);
2486 RETURN(PTR_ERR(tgt));
2488 data->op_flags |= MF_MDC_CANCEL_FID1;
2489 rc = md_file_resync(tgt->ltd_exp, data);
2494 * Get dirent with the closest hash for striped directory
2496 * This function will search the dir entry, whose hash value is the
2497 * closest(>=) to hash from all of sub-stripes, and it is only being called
2498 * for striped directory.
2500 * \param[in] ctxt dir read context
2502 * \retval dirent get the entry successfully
2503 * NULL does not get the entry, normally it means
2504 * it reaches the end of the directory, while read
2505 * stripe dirent error is ignored to allow partial
2508 static struct lu_dirent *lmv_dirent_next(struct lmv_dir_ctxt *ctxt)
2510 struct stripe_dirent *stripe;
2511 struct lu_dirent *ent = NULL;
2515 /* TODO: optimize with k-way merge sort */
2516 for (i = 0; i < ctxt->ldc_count; i++) {
2517 stripe = &ctxt->ldc_stripes[i];
2521 if (!stripe->sd_ent) {
2522 stripe_dirent_load(ctxt, stripe, i);
2523 if (!stripe->sd_ent) {
2524 LASSERT(stripe->sd_eof);
2530 le64_to_cpu(ctxt->ldc_stripes[min].sd_ent->lde_hash) >
2531 le64_to_cpu(stripe->sd_ent->lde_hash)) {
2533 if (le64_to_cpu(stripe->sd_ent->lde_hash) ==
2540 stripe = &ctxt->ldc_stripes[min];
2541 ent = stripe->sd_ent;
2542 /* pop found dirent */
2543 stripe->sd_ent = stripe_dirent_get(ctxt, lu_dirent_next(ent),
2551 * Build dir entry page for striped directory
2553 * This function gets one entry by @offset from a striped directory. It will
2554 * read entries from all of stripes, and choose one closest to the required
2555 * offset(&offset). A few notes
2556 * 1. skip . and .. for non-zero stripes, because there can only have one .
2557 * and .. in a directory.
2558 * 2. op_data will be shared by all of stripes, instead of allocating new
2559 * one, so need to restore before reusing.
2561 * \param[in] exp obd export refer to LMV
2562 * \param[in] op_data hold those MD parameters of read_entry
2563 * \param[in] cb_op ldlm callback being used in enqueue in mdc_read_entry
2564 * \param[in] offset starting hash offset
2565 * \param[out] ppage the page holding the entry. Note: because the entry
2566 * will be accessed in upper layer, so we need hold the
2567 * page until the usages of entry is finished, see
2568 * ll_dir_entry_next.
2570 * retval =0 if get entry successfully
2571 * <0 cannot get entry
2573 static int lmv_striped_read_page(struct obd_export *exp,
2574 struct md_op_data *op_data,
2575 struct md_callback *cb_op,
2576 __u64 offset, struct page **ppage)
2578 struct page *page = NULL;
2579 struct lu_dirpage *dp;
2581 struct lu_dirent *ent;
2582 struct lu_dirent *last_ent;
2584 struct lmv_dir_ctxt *ctxt;
2585 struct lu_dirent *next = NULL;
2591 /* Allocate a page and read entries from all of stripes and fill
2592 * the page by hash order */
2593 page = alloc_page(GFP_KERNEL);
2597 /* Initialize the entry page */
2599 memset(dp, 0, sizeof(*dp));
2600 dp->ldp_hash_start = cpu_to_le64(offset);
2603 left_bytes = PAGE_SIZE - sizeof(*dp);
2607 /* initalize dir read context */
2608 stripe_count = op_data->op_mea1->lsm_md_stripe_count;
2609 OBD_ALLOC(ctxt, offsetof(typeof(*ctxt), ldc_stripes[stripe_count]));
2611 GOTO(free_page, rc = -ENOMEM);
2612 ctxt->ldc_lmv = &exp->exp_obd->u.lmv;
2613 ctxt->ldc_op_data = op_data;
2614 ctxt->ldc_cb_op = cb_op;
2615 ctxt->ldc_hash = offset;
2616 ctxt->ldc_count = stripe_count;
2619 next = lmv_dirent_next(ctxt);
2621 /* end of directory */
2623 ctxt->ldc_hash = MDS_DIR_END_OFF;
2626 ctxt->ldc_hash = le64_to_cpu(next->lde_hash);
2628 ent_size = le16_to_cpu(next->lde_reclen);
2630 /* the last entry lde_reclen is 0, but it might not be the last
2631 * one of this temporay dir page */
2633 ent_size = lu_dirent_calc_size(
2634 le16_to_cpu(next->lde_namelen),
2635 le32_to_cpu(next->lde_attrs));
2637 if (ent_size > left_bytes)
2640 memcpy(ent, next, ent_size);
2642 /* Replace . with master FID and Replace .. with the parent FID
2643 * of master object */
2644 if (strncmp(ent->lde_name, ".",
2645 le16_to_cpu(ent->lde_namelen)) == 0 &&
2646 le16_to_cpu(ent->lde_namelen) == 1)
2647 fid_cpu_to_le(&ent->lde_fid, &op_data->op_fid1);
2648 else if (strncmp(ent->lde_name, "..",
2649 le16_to_cpu(ent->lde_namelen)) == 0 &&
2650 le16_to_cpu(ent->lde_namelen) == 2)
2651 fid_cpu_to_le(&ent->lde_fid, &op_data->op_fid3);
2653 CDEBUG(D_INODE, "entry %.*s hash %#llx\n",
2654 le16_to_cpu(ent->lde_namelen), ent->lde_name,
2655 le64_to_cpu(ent->lde_hash));
2657 left_bytes -= ent_size;
2658 ent->lde_reclen = cpu_to_le16(ent_size);
2660 ent = (void *)ent + ent_size;
2663 last_ent->lde_reclen = 0;
2666 dp->ldp_flags |= LDF_EMPTY;
2667 else if (ctxt->ldc_hash == le64_to_cpu(last_ent->lde_hash))
2668 dp->ldp_flags |= LDF_COLLIDE;
2669 dp->ldp_flags = cpu_to_le32(dp->ldp_flags);
2670 dp->ldp_hash_end = cpu_to_le64(ctxt->ldc_hash);
2672 put_lmv_dir_ctxt(ctxt);
2673 OBD_FREE(ctxt, offsetof(typeof(*ctxt), ldc_stripes[stripe_count]));
2686 int lmv_read_page(struct obd_export *exp, struct md_op_data *op_data,
2687 struct md_callback *cb_op, __u64 offset,
2688 struct page **ppage)
2690 struct obd_device *obd = exp->exp_obd;
2691 struct lmv_obd *lmv = &obd->u.lmv;
2692 struct lmv_stripe_md *lsm = op_data->op_mea1;
2693 struct lmv_tgt_desc *tgt;
2697 if (unlikely(lsm != NULL)) {
2698 rc = lmv_striped_read_page(exp, op_data, cb_op, offset, ppage);
2702 tgt = lmv_find_target(lmv, &op_data->op_fid1);
2704 RETURN(PTR_ERR(tgt));
2706 rc = md_read_page(tgt->ltd_exp, op_data, cb_op, offset, ppage);
2712 * Unlink a file/directory
2714 * Unlink a file or directory under the parent dir. The unlink request
2715 * usually will be sent to the MDT where the child is located, but if
2716 * the client does not have the child FID then request will be sent to the
2717 * MDT where the parent is located.
2719 * If the parent is a striped directory then it also needs to locate which
2720 * stripe the name of the child is located, and replace the parent FID
2721 * (@op->op_fid1) with the stripe FID. Note: if the stripe is unknown,
2722 * it will walk through all of sub-stripes until the child is being
2725 * \param[in] exp export refer to LMV
2726 * \param[in] op_data different parameters transferred beween client
2727 * MD stacks, name, namelen, FIDs etc.
2728 * op_fid1 is the parent FID, op_fid2 is the child
2730 * \param[out] request point to the request of unlink.
2732 * retval 0 if succeed
2733 * negative errno if failed.
2735 static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data,
2736 struct ptlrpc_request **request)
2738 struct obd_device *obd = exp->exp_obd;
2739 struct lmv_obd *lmv = &obd->u.lmv;
2740 struct lmv_tgt_desc *tgt;
2741 struct lmv_tgt_desc *parent_tgt;
2742 struct mdt_body *body;
2747 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2748 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
2749 op_data->op_cap = cfs_curproc_cap_pack();
2752 parent_tgt = lmv_locate_tgt(lmv, op_data, &op_data->op_fid1);
2753 if (IS_ERR(parent_tgt))
2754 RETURN(PTR_ERR(parent_tgt));
2756 if (likely(!fid_is_zero(&op_data->op_fid2))) {
2757 tgt = lmv_find_target(lmv, &op_data->op_fid2);
2759 RETURN(PTR_ERR(tgt));
2765 * If child's fid is given, cancel unused locks for it if it is from
2766 * another export than parent.
2768 * LOOKUP lock for child (fid3) should also be cancelled on parent
2769 * tgt_tgt in mdc_unlink().
2771 op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3;
2773 if (parent_tgt != tgt)
2774 rc = lmv_early_cancel(exp, parent_tgt, op_data, tgt->ltd_idx,
2775 LCK_EX, MDS_INODELOCK_LOOKUP,
2776 MF_MDC_CANCEL_FID3);
2778 rc = lmv_early_cancel(exp, NULL, op_data, tgt->ltd_idx, LCK_EX,
2779 MDS_INODELOCK_ELC, MF_MDC_CANCEL_FID3);
2783 CDEBUG(D_INODE, "unlink with fid="DFID"/"DFID" -> mds #%u\n",
2784 PFID(&op_data->op_fid1), PFID(&op_data->op_fid2), tgt->ltd_idx);
2786 rc = md_unlink(tgt->ltd_exp, op_data, request);
2787 if (rc == -ENOENT && lmv_dir_retry_check_update(op_data)) {
2788 ptlrpc_req_finished(*request);
2796 body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
2800 /* Not cross-ref case, just get out of here. */
2801 if (likely(!(body->mbo_valid & OBD_MD_MDS)))
2804 /* This is a remote object, try remote MDT. */
2805 op_data->op_fid2 = body->mbo_fid1;
2806 ptlrpc_req_finished(*request);
2809 tgt = lmv_find_target(lmv, &op_data->op_fid2);
2811 RETURN(PTR_ERR(tgt));
2816 static int lmv_precleanup(struct obd_device *obd)
2819 libcfs_kkuc_group_rem(&obd->obd_uuid, 0, KUC_GRP_HSM);
2820 fld_client_debugfs_fini(&obd->u.lmv.lmv_fld);
2821 lprocfs_obd_cleanup(obd);
2822 lprocfs_free_md_stats(obd);
2827 * Get by key a value associated with a LMV device.
2829 * Dispatch request to lower-layer devices as needed.
2831 * \param[in] env execution environment for this thread
2832 * \param[in] exp export for the LMV device
2833 * \param[in] keylen length of key identifier
2834 * \param[in] key identifier of key to get value for
2835 * \param[in] vallen size of \a val
2836 * \param[out] val pointer to storage location for value
2837 * \param[in] lsm optional striping metadata of object
2839 * \retval 0 on success
2840 * \retval negative negated errno on failure
2842 static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
2843 __u32 keylen, void *key, __u32 *vallen, void *val)
2845 struct obd_device *obd;
2846 struct lmv_obd *lmv;
2850 obd = class_exp2obd(exp);
2852 CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n",
2853 exp->exp_handle.h_cookie);
2858 if (keylen >= strlen("remote_flag") && !strcmp(key, "remote_flag")) {
2861 LASSERT(*vallen == sizeof(__u32));
2862 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2863 struct lmv_tgt_desc *tgt = lmv->tgts[i];
2865 * All tgts should be connected when this gets called.
2867 if (tgt == NULL || tgt->ltd_exp == NULL)
2870 if (!obd_get_info(env, tgt->ltd_exp, keylen, key,
2875 } else if (KEY_IS(KEY_MAX_EASIZE) ||
2876 KEY_IS(KEY_DEFAULT_EASIZE) ||
2877 KEY_IS(KEY_CONN_DATA)) {
2879 * Forwarding this request to first MDS, it should know LOV
2882 rc = obd_get_info(env, lmv->tgts[0]->ltd_exp, keylen, key,
2884 if (!rc && KEY_IS(KEY_CONN_DATA))
2885 exp->exp_connect_data = *(struct obd_connect_data *)val;
2887 } else if (KEY_IS(KEY_TGT_COUNT)) {
2888 *((int *)val) = lmv->desc.ld_tgt_count;
2892 CDEBUG(D_IOCTL, "Invalid key\n");
2897 * Asynchronously set by key a value associated with a LMV device.
2899 * Dispatch request to lower-layer devices as needed.
2901 * \param[in] env execution environment for this thread
2902 * \param[in] exp export for the LMV device
2903 * \param[in] keylen length of key identifier
2904 * \param[in] key identifier of key to store value for
2905 * \param[in] vallen size of value to store
2906 * \param[in] val pointer to data to be stored
2907 * \param[in] set optional list of related ptlrpc requests
2909 * \retval 0 on success
2910 * \retval negative negated errno on failure
2912 int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
2913 __u32 keylen, void *key, __u32 vallen, void *val,
2914 struct ptlrpc_request_set *set)
2916 struct lmv_tgt_desc *tgt = NULL;
2917 struct obd_device *obd;
2918 struct lmv_obd *lmv;
2922 obd = class_exp2obd(exp);
2924 CDEBUG(D_IOCTL, "Invalid client cookie %#llx\n",
2925 exp->exp_handle.h_cookie);
2930 if (KEY_IS(KEY_READ_ONLY) || KEY_IS(KEY_FLUSH_CTX) ||
2931 KEY_IS(KEY_DEFAULT_EASIZE)) {
2934 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2937 if (tgt == NULL || tgt->ltd_exp == NULL)
2940 err = obd_set_info_async(env, tgt->ltd_exp,
2941 keylen, key, vallen, val, set);
2952 static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm,
2953 const struct lmv_mds_md_v1 *lmm1)
2955 struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
2962 lsm->lsm_md_magic = le32_to_cpu(lmm1->lmv_magic);
2963 lsm->lsm_md_stripe_count = le32_to_cpu(lmm1->lmv_stripe_count);
2964 lsm->lsm_md_master_mdt_index = le32_to_cpu(lmm1->lmv_master_mdt_index);
2965 if (OBD_FAIL_CHECK(OBD_FAIL_UNKNOWN_LMV_STRIPE))
2966 lsm->lsm_md_hash_type = LMV_HASH_TYPE_UNKNOWN;
2968 lsm->lsm_md_hash_type = le32_to_cpu(lmm1->lmv_hash_type);
2969 lsm->lsm_md_layout_version = le32_to_cpu(lmm1->lmv_layout_version);
2970 lsm->lsm_md_migrate_offset = le32_to_cpu(lmm1->lmv_migrate_offset);
2971 lsm->lsm_md_migrate_hash = le32_to_cpu(lmm1->lmv_migrate_hash);
2972 cplen = strlcpy(lsm->lsm_md_pool_name, lmm1->lmv_pool_name,
2973 sizeof(lsm->lsm_md_pool_name));
2975 if (cplen >= sizeof(lsm->lsm_md_pool_name))
2978 CDEBUG(D_INFO, "unpack lsm count %d, master %d hash_type %#x "
2979 "layout_version %d\n", lsm->lsm_md_stripe_count,
2980 lsm->lsm_md_master_mdt_index, lsm->lsm_md_hash_type,
2981 lsm->lsm_md_layout_version);
2983 stripe_count = le32_to_cpu(lmm1->lmv_stripe_count);
2984 for (i = 0; i < stripe_count; i++) {
2985 fid_le_to_cpu(&lsm->lsm_md_oinfo[i].lmo_fid,
2986 &lmm1->lmv_stripe_fids[i]);
2987 rc = lmv_fld_lookup(lmv, &lsm->lsm_md_oinfo[i].lmo_fid,
2988 &lsm->lsm_md_oinfo[i].lmo_mds);
2991 CDEBUG(D_INFO, "unpack fid #%d "DFID"\n", i,
2992 PFID(&lsm->lsm_md_oinfo[i].lmo_fid));
2998 static int lmv_unpackmd(struct obd_export *exp, struct lmv_stripe_md **lsmp,
2999 const union lmv_mds_md *lmm, size_t lmm_size)
3001 struct lmv_stripe_md *lsm;
3004 bool allocated = false;
3007 LASSERT(lsmp != NULL);
3011 if (lsm != NULL && lmm == NULL) {
3014 for (i = 0; i < lsm->lsm_md_stripe_count; i++)
3015 iput(lsm->lsm_md_oinfo[i].lmo_root);
3016 lsm_size = lmv_stripe_md_size(lsm->lsm_md_stripe_count);
3017 OBD_FREE(lsm, lsm_size);
3022 if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_STRIPE)
3026 if (le32_to_cpu(lmm->lmv_magic) != LMV_MAGIC_V1 &&
3027 le32_to_cpu(lmm->lmv_magic) != LMV_USER_MAGIC) {
3028 CERROR("%s: invalid lmv magic %x: rc = %d\n",
3029 exp->exp_obd->obd_name, le32_to_cpu(lmm->lmv_magic),
3034 if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_V1)
3035 lsm_size = lmv_stripe_md_size(lmv_mds_md_stripe_count_get(lmm));
3038 * Unpack default dirstripe(lmv_user_md) to lmv_stripe_md,
3039 * stripecount should be 0 then.
3041 lsm_size = lmv_stripe_md_size(0);
3043 lsm_size = lmv_stripe_md_size(lmv_mds_md_stripe_count_get(lmm));
3045 OBD_ALLOC(lsm, lsm_size);
3052 switch (le32_to_cpu(lmm->lmv_magic)) {
3054 rc = lmv_unpack_md_v1(exp, lsm, &lmm->lmv_md_v1);
3057 CERROR("%s: unrecognized magic %x\n", exp->exp_obd->obd_name,
3058 le32_to_cpu(lmm->lmv_magic));
3063 if (rc != 0 && allocated) {
3064 OBD_FREE(lsm, lsm_size);
3071 void lmv_free_memmd(struct lmv_stripe_md *lsm)
3073 lmv_unpackmd(NULL, &lsm, NULL, 0);
3075 EXPORT_SYMBOL(lmv_free_memmd);
3077 static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
3078 union ldlm_policy_data *policy,
3079 enum ldlm_mode mode, enum ldlm_cancel_flags flags,
3082 struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
3087 LASSERT(fid != NULL);
3089 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
3090 struct lmv_tgt_desc *tgt = lmv->tgts[i];
3093 if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active)
3096 err = md_cancel_unused(tgt->ltd_exp, fid, policy, mode, flags,
3104 static int lmv_set_lock_data(struct obd_export *exp,
3105 const struct lustre_handle *lockh,
3106 void *data, __u64 *bits)
3108 struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
3109 struct lmv_tgt_desc *tgt = lmv->tgts[0];
3113 if (tgt == NULL || tgt->ltd_exp == NULL)
3115 rc = md_set_lock_data(tgt->ltd_exp, lockh, data, bits);
3119 enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags,
3120 const struct lu_fid *fid, enum ldlm_type type,
3121 union ldlm_policy_data *policy,
3122 enum ldlm_mode mode, struct lustre_handle *lockh)
3124 struct obd_device *obd = exp->exp_obd;
3125 struct lmv_obd *lmv = &obd->u.lmv;
3131 CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid));
3134 * With DNE every object can have two locks in different namespaces:
3135 * lookup lock in space of MDT storing direntry and update/open lock in
3136 * space of MDT storing inode. Try the MDT that the FID maps to first,
3137 * since this can be easily found, and only try others if that fails.
3139 for (i = 0, tgt = lmv_find_target_index(lmv, fid);
3140 i < lmv->desc.ld_tgt_count;
3141 i++, tgt = (tgt + 1) % lmv->desc.ld_tgt_count) {
3143 CDEBUG(D_HA, "%s: "DFID" is inaccessible: rc = %d\n",
3144 obd->obd_name, PFID(fid), tgt);
3148 if (lmv->tgts[tgt] == NULL ||
3149 lmv->tgts[tgt]->ltd_exp == NULL ||
3150 lmv->tgts[tgt]->ltd_active == 0)
3153 rc = md_lock_match(lmv->tgts[tgt]->ltd_exp, flags, fid,
3154 type, policy, mode, lockh);
3162 int lmv_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
3163 struct obd_export *dt_exp, struct obd_export *md_exp,
3164 struct lustre_md *md)
3166 struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
3167 struct lmv_tgt_desc *tgt = lmv->tgts[0];
3169 if (tgt == NULL || tgt->ltd_exp == NULL)
3172 return md_get_lustre_md(lmv->tgts[0]->ltd_exp, req, dt_exp, md_exp, md);
3175 int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
3177 struct obd_device *obd = exp->exp_obd;
3178 struct lmv_obd *lmv = &obd->u.lmv;
3179 struct lmv_tgt_desc *tgt = lmv->tgts[0];
3182 if (md->lmv != NULL) {
3183 lmv_free_memmd(md->lmv);
3186 if (tgt == NULL || tgt->ltd_exp == NULL)
3188 RETURN(md_free_lustre_md(lmv->tgts[0]->ltd_exp, md));
3191 int lmv_set_open_replay_data(struct obd_export *exp,
3192 struct obd_client_handle *och,
3193 struct lookup_intent *it)
3195 struct obd_device *obd = exp->exp_obd;
3196 struct lmv_obd *lmv = &obd->u.lmv;
3197 struct lmv_tgt_desc *tgt;
3200 tgt = lmv_find_target(lmv, &och->och_fid);
3202 RETURN(PTR_ERR(tgt));
3204 RETURN(md_set_open_replay_data(tgt->ltd_exp, och, it));
3207 int lmv_clear_open_replay_data(struct obd_export *exp,
3208 struct obd_client_handle *och)
3210 struct obd_device *obd = exp->exp_obd;
3211 struct lmv_obd *lmv = &obd->u.lmv;
3212 struct lmv_tgt_desc *tgt;
3215 tgt = lmv_find_target(lmv, &och->och_fid);
3217 RETURN(PTR_ERR(tgt));
3219 RETURN(md_clear_open_replay_data(tgt->ltd_exp, och));
3222 int lmv_intent_getattr_async(struct obd_export *exp,
3223 struct md_enqueue_info *minfo)
3225 struct md_op_data *op_data = &minfo->mi_data;
3226 struct obd_device *obd = exp->exp_obd;
3227 struct lmv_obd *lmv = &obd->u.lmv;
3228 struct lmv_tgt_desc *tgt = NULL;
3232 if (!fid_is_sane(&op_data->op_fid2))
3235 tgt = lmv_find_target(lmv, &op_data->op_fid1);
3237 RETURN(PTR_ERR(tgt));
3240 * no special handle for remote dir, which needs to fetch both LOOKUP
3241 * lock on parent, and then UPDATE lock on child MDT, which makes all
3242 * complicated because this is done async. So only LOOKUP lock is
3243 * fetched for remote dir, but considering remote dir is rare case,
3244 * and not supporting it in statahead won't cause any issue, just leave
3248 rc = md_intent_getattr_async(tgt->ltd_exp, minfo);
3252 int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
3253 struct lu_fid *fid, __u64 *bits)
3255 struct obd_device *obd = exp->exp_obd;
3256 struct lmv_obd *lmv = &obd->u.lmv;
3257 struct lmv_tgt_desc *tgt;
3261 tgt = lmv_find_target(lmv, fid);
3263 RETURN(PTR_ERR(tgt));
3265 rc = md_revalidate_lock(tgt->ltd_exp, it, fid, bits);
3269 int lmv_get_fid_from_lsm(struct obd_export *exp,
3270 const struct lmv_stripe_md *lsm,
3271 const char *name, int namelen, struct lu_fid *fid)
3273 const struct lmv_oinfo *oinfo;
3275 LASSERT(lsm != NULL);
3276 oinfo = lsm_name_to_stripe_info(lsm, name, namelen, false);
3278 return PTR_ERR(oinfo);
3280 *fid = oinfo->lmo_fid;
3286 * For lmv, only need to send request to master MDT, and the master MDT will
3287 * process with other slave MDTs. The only exception is Q_GETOQUOTA for which
3288 * we directly fetch data from the slave MDTs.
3290 int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
3291 struct obd_quotactl *oqctl)
3293 struct obd_device *obd = class_exp2obd(exp);
3294 struct lmv_obd *lmv = &obd->u.lmv;
3295 struct lmv_tgt_desc *tgt = lmv->tgts[0];
3298 __u64 curspace, curinodes;
3302 tgt->ltd_exp == NULL ||
3304 lmv->desc.ld_tgt_count == 0) {
3305 CERROR("master lmv inactive\n");
3309 if (oqctl->qc_cmd != Q_GETOQUOTA) {
3310 rc = obd_quotactl(tgt->ltd_exp, oqctl);
3314 curspace = curinodes = 0;
3315 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
3319 if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active)
3322 err = obd_quotactl(tgt->ltd_exp, oqctl);
3324 CERROR("getquota on mdt %d failed. %d\n", i, err);
3328 curspace += oqctl->qc_dqblk.dqb_curspace;
3329 curinodes += oqctl->qc_dqblk.dqb_curinodes;
3332 oqctl->qc_dqblk.dqb_curspace = curspace;
3333 oqctl->qc_dqblk.dqb_curinodes = curinodes;
3338 static int lmv_merge_attr(struct obd_export *exp,
3339 const struct lmv_stripe_md *lsm,
3340 struct cl_attr *attr,
3341 ldlm_blocking_callback cb_blocking)
3346 rc = lmv_revalidate_slaves(exp, lsm, cb_blocking, 0);
3350 for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
3351 struct inode *inode = lsm->lsm_md_oinfo[i].lmo_root;
3353 CDEBUG(D_INFO, ""DFID" size %llu, blocks %llu nlink %u,"
3354 " atime %lu ctime %lu, mtime %lu.\n",
3355 PFID(&lsm->lsm_md_oinfo[i].lmo_fid),
3356 i_size_read(inode), (unsigned long long)inode->i_blocks,
3357 inode->i_nlink, LTIME_S(inode->i_atime),
3358 LTIME_S(inode->i_ctime), LTIME_S(inode->i_mtime));
3360 /* for slave stripe, it needs to subtract nlink for . and .. */
3362 attr->cat_nlink += inode->i_nlink - 2;
3364 attr->cat_nlink = inode->i_nlink;
3366 attr->cat_size += i_size_read(inode);
3367 attr->cat_blocks += inode->i_blocks;
3369 if (attr->cat_atime < LTIME_S(inode->i_atime))
3370 attr->cat_atime = LTIME_S(inode->i_atime);
3372 if (attr->cat_ctime < LTIME_S(inode->i_ctime))
3373 attr->cat_ctime = LTIME_S(inode->i_ctime);
3375 if (attr->cat_mtime < LTIME_S(inode->i_mtime))
3376 attr->cat_mtime = LTIME_S(inode->i_mtime);
3381 struct obd_ops lmv_obd_ops = {
3382 .o_owner = THIS_MODULE,
3383 .o_setup = lmv_setup,
3384 .o_cleanup = lmv_cleanup,
3385 .o_precleanup = lmv_precleanup,
3386 .o_process_config = lmv_process_config,
3387 .o_connect = lmv_connect,
3388 .o_disconnect = lmv_disconnect,
3389 .o_statfs = lmv_statfs,
3390 .o_get_info = lmv_get_info,
3391 .o_set_info_async = lmv_set_info_async,
3392 .o_notify = lmv_notify,
3393 .o_get_uuid = lmv_get_uuid,
3394 .o_iocontrol = lmv_iocontrol,
3395 .o_quotactl = lmv_quotactl
3398 struct md_ops lmv_md_ops = {
3399 .m_get_root = lmv_get_root,
3400 .m_null_inode = lmv_null_inode,
3401 .m_close = lmv_close,
3402 .m_create = lmv_create,
3403 .m_enqueue = lmv_enqueue,
3404 .m_getattr = lmv_getattr,
3405 .m_getxattr = lmv_getxattr,
3406 .m_getattr_name = lmv_getattr_name,
3407 .m_intent_lock = lmv_intent_lock,
3409 .m_rename = lmv_rename,
3410 .m_setattr = lmv_setattr,
3411 .m_setxattr = lmv_setxattr,
3412 .m_fsync = lmv_fsync,
3413 .m_file_resync = lmv_file_resync,
3414 .m_read_page = lmv_read_page,
3415 .m_unlink = lmv_unlink,
3416 .m_init_ea_size = lmv_init_ea_size,
3417 .m_cancel_unused = lmv_cancel_unused,
3418 .m_set_lock_data = lmv_set_lock_data,
3419 .m_lock_match = lmv_lock_match,
3420 .m_get_lustre_md = lmv_get_lustre_md,
3421 .m_free_lustre_md = lmv_free_lustre_md,
3422 .m_merge_attr = lmv_merge_attr,
3423 .m_set_open_replay_data = lmv_set_open_replay_data,
3424 .m_clear_open_replay_data = lmv_clear_open_replay_data,
3425 .m_intent_getattr_async = lmv_intent_getattr_async,
3426 .m_revalidate_lock = lmv_revalidate_lock,
3427 .m_get_fid_from_lsm = lmv_get_fid_from_lsm,
3428 .m_unpackmd = lmv_unpackmd,
3431 static int __init lmv_init(void)
3433 return class_register_type(&lmv_obd_ops, &lmv_md_ops, true, NULL,
3434 LUSTRE_LMV_NAME, NULL);
3437 static void __exit lmv_exit(void)
3439 class_unregister_type(LUSTRE_LMV_NAME);
3442 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3443 MODULE_DESCRIPTION("Lustre Logical Metadata Volume");
3444 MODULE_VERSION(LUSTRE_VERSION_STRING);
3445 MODULE_LICENSE("GPL");
3447 module_init(lmv_init);
3448 module_exit(lmv_exit);