4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
20 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
21 * CA 95054 USA or visit www.sun.com if you need additional information or
27 * Copyright (c) 2004, 2010, Oracle and/or its affiliates. All rights reserved.
28 * Use is subject to license terms.
30 * Copyright (c) 2011, 2015, Intel Corporation.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
37 #define DEBUG_SUBSYSTEM S_LMV
38 #include <linux/slab.h>
39 #include <linux/module.h>
40 #include <linux/init.h>
41 #include <linux/user_namespace.h>
42 #ifdef HAVE_UIDGID_HEADER
43 # include <linux/uidgid.h>
45 #include <linux/slab.h>
46 #include <linux/pagemap.h>
48 #include <linux/math64.h>
49 #include <linux/seq_file.h>
50 #include <linux/namei.h>
52 #include <lustre/lustre_idl.h>
53 #include <obd_support.h>
54 #include <lustre_lib.h>
55 #include <lustre_net.h>
56 #include <obd_class.h>
57 #include <lustre_lmv.h>
58 #include <lprocfs_status.h>
59 #include <cl_object.h>
60 #include <lustre_fid.h>
61 #include <lustre_ioctl.h>
62 #include <lustre_kernelcomm.h>
63 #include "lmv_internal.h"
65 static void lmv_activate_target(struct lmv_obd *lmv,
66 struct lmv_tgt_desc *tgt,
69 if (tgt->ltd_active == activate)
72 tgt->ltd_active = activate;
73 lmv->desc.ld_active_tgt_count += (activate ? 1 : -1);
75 tgt->ltd_exp->exp_obd->obd_inactive = !activate;
81 * -EINVAL : UUID can't be found in the LMV's target list
82 * -ENOTCONN: The UUID is found, but the target connection is bad (!)
83 * -EBADF : The UUID is found, but the OBD of the wrong type (!)
85 static int lmv_set_mdc_active(struct lmv_obd *lmv,
86 const struct obd_uuid *uuid,
89 struct lmv_tgt_desc *tgt = NULL;
90 struct obd_device *obd;
95 CDEBUG(D_INFO, "Searching in lmv %p for uuid %s (activate=%d)\n",
96 lmv, uuid->uuid, activate);
98 spin_lock(&lmv->lmv_lock);
99 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
101 if (tgt == NULL || tgt->ltd_exp == NULL)
104 CDEBUG(D_INFO, "Target idx %d is %s conn "LPX64"\n", i,
105 tgt->ltd_uuid.uuid, tgt->ltd_exp->exp_handle.h_cookie);
107 if (obd_uuid_equals(uuid, &tgt->ltd_uuid))
111 if (i == lmv->desc.ld_tgt_count)
112 GOTO(out_lmv_lock, rc = -EINVAL);
114 obd = class_exp2obd(tgt->ltd_exp);
116 GOTO(out_lmv_lock, rc = -ENOTCONN);
118 CDEBUG(D_INFO, "Found OBD %s=%s device %d (%p) type %s at LMV idx %d\n",
119 obd->obd_name, obd->obd_uuid.uuid, obd->obd_minor, obd,
120 obd->obd_type->typ_name, i);
121 LASSERT(strcmp(obd->obd_type->typ_name, LUSTRE_MDC_NAME) == 0);
123 if (tgt->ltd_active == activate) {
124 CDEBUG(D_INFO, "OBD %p already %sactive!\n", obd,
125 activate ? "" : "in");
126 GOTO(out_lmv_lock, rc);
129 CDEBUG(D_INFO, "Marking OBD %p %sactive\n", obd,
130 activate ? "" : "in");
131 lmv_activate_target(lmv, tgt, activate);
135 spin_unlock(&lmv->lmv_lock);
139 struct obd_uuid *lmv_get_uuid(struct obd_export *exp)
141 struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
142 struct lmv_tgt_desc *tgt = lmv->tgts[0];
144 return (tgt == NULL) ? NULL : obd_get_uuid(tgt->ltd_exp);
147 static int lmv_notify(struct obd_device *obd, struct obd_device *watched,
148 enum obd_notify_event ev, void *data)
150 struct obd_connect_data *conn_data;
151 struct lmv_obd *lmv = &obd->u.lmv;
152 struct obd_uuid *uuid;
156 if (strcmp(watched->obd_type->typ_name, LUSTRE_MDC_NAME)) {
157 CERROR("unexpected notification of %s %s!\n",
158 watched->obd_type->typ_name,
163 uuid = &watched->u.cli.cl_target_uuid;
164 if (ev == OBD_NOTIFY_ACTIVE || ev == OBD_NOTIFY_INACTIVE) {
166 * Set MDC as active before notifying the observer, so the
167 * observer can use the MDC normally.
169 rc = lmv_set_mdc_active(lmv, uuid,
170 ev == OBD_NOTIFY_ACTIVE);
172 CERROR("%sactivation of %s failed: %d\n",
173 ev == OBD_NOTIFY_ACTIVE ? "" : "de",
177 } else if (ev == OBD_NOTIFY_OCD) {
178 conn_data = &watched->u.cli.cl_import->imp_connect_data;
180 * XXX: Make sure that ocd_connect_flags from all targets are
181 * the same. Otherwise one of MDTs runs wrong version or
182 * something like this. --umka
184 obd->obd_self_export->exp_connect_data = *conn_data;
187 else if (ev == OBD_NOTIFY_DISCON) {
189 * For disconnect event, flush fld cache for failout MDS case.
191 fld_client_flush(&lmv->lmv_fld);
195 * Pass the notification up the chain.
197 if (obd->obd_observer)
198 rc = obd_notify(obd->obd_observer, watched, ev, data);
204 * This is fake connect function. Its purpose is to initialize lmv and say
205 * caller that everything is okay. Real connection will be performed later.
207 static int lmv_connect(const struct lu_env *env,
208 struct obd_export **exp, struct obd_device *obd,
209 struct obd_uuid *cluuid, struct obd_connect_data *data,
212 struct lmv_obd *lmv = &obd->u.lmv;
213 struct lustre_handle conn = { 0 };
218 * We don't want to actually do the underlying connections more than
219 * once, so keep track.
222 if (lmv->refcount > 1) {
227 rc = class_connect(&conn, obd, cluuid);
229 CERROR("class_connection() returned %d\n", rc);
233 *exp = class_conn2export(&conn);
234 class_export_get(*exp);
238 lmv->cluuid = *cluuid;
241 lmv->conn_data = *data;
243 if (lmv->targets_proc_entry == NULL) {
244 lmv->targets_proc_entry = lprocfs_register("target_obds",
247 if (IS_ERR(lmv->targets_proc_entry)) {
248 CERROR("%s: cannot register "
249 "/proc/fs/lustre/%s/%s/target_obds\n",
250 obd->obd_name, obd->obd_type->typ_name,
252 lmv->targets_proc_entry = NULL;
257 * All real clients should perform actual connection right away, because
258 * it is possible, that LMV will not have opportunity to connect targets
259 * and MDC stuff will be called directly, for instance while reading
260 * ../mdc/../kbytesfree procfs file, etc.
262 if (data != NULL && (data->ocd_connect_flags & OBD_CONNECT_REAL))
263 rc = lmv_check_connect(obd);
265 if (rc && lmv->targets_proc_entry != NULL)
266 lprocfs_remove(&lmv->targets_proc_entry);
270 static int lmv_init_ea_size(struct obd_export *exp, __u32 easize,
273 struct obd_device *obd = exp->exp_obd;
274 struct lmv_obd *lmv = &obd->u.lmv;
280 if (lmv->max_easize < easize) {
281 lmv->max_easize = easize;
284 if (lmv->max_def_easize < def_easize) {
285 lmv->max_def_easize = def_easize;
292 if (lmv->connected == 0)
295 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
296 struct lmv_tgt_desc *tgt = lmv->tgts[i];
298 if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active) {
299 CWARN("%s: NULL export for %d\n", obd->obd_name, i);
303 rc = md_init_ea_size(tgt->ltd_exp, easize, def_easize);
305 CERROR("%s: obd_init_ea_size() failed on MDT target %d:"
306 " rc = %d\n", obd->obd_name, i, rc);
313 #define MAX_STRING_SIZE 128
315 int lmv_connect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
317 struct lmv_obd *lmv = &obd->u.lmv;
318 struct obd_uuid *cluuid = &lmv->cluuid;
319 struct obd_uuid lmv_mdc_uuid = { "LMV_MDC_UUID" };
320 struct obd_device *mdc_obd;
321 struct obd_export *mdc_exp;
322 struct lu_fld_target target;
326 mdc_obd = class_find_client_obd(&tgt->ltd_uuid, LUSTRE_MDC_NAME,
329 CERROR("target %s not attached\n", tgt->ltd_uuid.uuid);
333 CDEBUG(D_CONFIG, "connect to %s(%s) - %s, %s FOR %s\n",
334 mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
335 tgt->ltd_uuid.uuid, obd->obd_uuid.uuid,
338 if (!mdc_obd->obd_set_up) {
339 CERROR("target %s is not set up\n", tgt->ltd_uuid.uuid);
343 rc = obd_connect(NULL, &mdc_exp, mdc_obd, &lmv_mdc_uuid,
344 &lmv->conn_data, NULL);
346 CERROR("target %s connect error %d\n", tgt->ltd_uuid.uuid, rc);
351 * Init fid sequence client for this mdc and add new fld target.
353 rc = obd_fid_init(mdc_obd, mdc_exp, LUSTRE_SEQ_METADATA);
357 target.ft_srv = NULL;
358 target.ft_exp = mdc_exp;
359 target.ft_idx = tgt->ltd_idx;
361 fld_client_add_target(&lmv->lmv_fld, &target);
363 rc = obd_register_observer(mdc_obd, obd);
365 obd_disconnect(mdc_exp);
366 CERROR("target %s register_observer error %d\n",
367 tgt->ltd_uuid.uuid, rc);
371 if (obd->obd_observer) {
373 * Tell the observer about the new target.
375 rc = obd_notify(obd->obd_observer, mdc_exp->exp_obd,
377 (void *)(tgt - lmv->tgts[0]));
379 obd_disconnect(mdc_exp);
385 tgt->ltd_exp = mdc_exp;
386 lmv->desc.ld_active_tgt_count++;
388 md_init_ea_size(tgt->ltd_exp, lmv->max_easize, lmv->max_def_easize);
390 CDEBUG(D_CONFIG, "Connected to %s(%s) successfully (%d)\n",
391 mdc_obd->obd_name, mdc_obd->obd_uuid.uuid,
392 atomic_read(&obd->obd_refcount));
394 if (lmv->targets_proc_entry != NULL) {
395 struct proc_dir_entry *mdc_symlink;
397 LASSERT(mdc_obd->obd_type != NULL);
398 LASSERT(mdc_obd->obd_type->typ_name != NULL);
399 mdc_symlink = lprocfs_add_symlink(mdc_obd->obd_name,
400 lmv->targets_proc_entry,
402 mdc_obd->obd_type->typ_name,
404 if (mdc_symlink == NULL) {
405 CERROR("cannot register LMV target "
406 "/proc/fs/lustre/%s/%s/target_obds/%s\n",
407 obd->obd_type->typ_name, obd->obd_name,
414 static void lmv_del_target(struct lmv_obd *lmv, int index)
416 if (lmv->tgts[index] == NULL)
419 OBD_FREE_PTR(lmv->tgts[index]);
420 lmv->tgts[index] = NULL;
424 static int lmv_add_target(struct obd_device *obd, struct obd_uuid *uuidp,
425 __u32 index, int gen)
427 struct obd_device *mdc_obd;
428 struct lmv_obd *lmv = &obd->u.lmv;
429 struct lmv_tgt_desc *tgt;
430 int orig_tgt_count = 0;
434 CDEBUG(D_CONFIG, "Target uuid: %s. index %d\n", uuidp->uuid, index);
435 mdc_obd = class_find_client_obd(uuidp, LUSTRE_MDC_NAME,
438 CERROR("%s: Target %s not attached: rc = %d\n",
439 obd->obd_name, uuidp->uuid, -EINVAL);
443 mutex_lock(&lmv->lmv_init_mutex);
444 if ((index < lmv->tgts_size) && (lmv->tgts[index] != NULL)) {
445 tgt = lmv->tgts[index];
446 CERROR("%s: UUID %s already assigned at LOV target index %d:"
447 " rc = %d\n", obd->obd_name,
448 obd_uuid2str(&tgt->ltd_uuid), index, -EEXIST);
449 mutex_unlock(&lmv->lmv_init_mutex);
453 if (index >= lmv->tgts_size) {
454 /* We need to reallocate the lmv target array. */
455 struct lmv_tgt_desc **newtgts, **old = NULL;
459 while (newsize < index + 1)
460 newsize = newsize << 1;
461 OBD_ALLOC(newtgts, sizeof(*newtgts) * newsize);
462 if (newtgts == NULL) {
463 mutex_unlock(&lmv->lmv_init_mutex);
467 if (lmv->tgts_size) {
468 memcpy(newtgts, lmv->tgts,
469 sizeof(*newtgts) * lmv->tgts_size);
471 oldsize = lmv->tgts_size;
475 lmv->tgts_size = newsize;
478 OBD_FREE(old, sizeof(*old) * oldsize);
480 CDEBUG(D_CONFIG, "tgts: %p size: %d\n", lmv->tgts,
486 mutex_unlock(&lmv->lmv_init_mutex);
490 mutex_init(&tgt->ltd_fid_mutex);
491 tgt->ltd_idx = index;
492 tgt->ltd_uuid = *uuidp;
494 lmv->tgts[index] = tgt;
495 if (index >= lmv->desc.ld_tgt_count) {
496 orig_tgt_count = lmv->desc.ld_tgt_count;
497 lmv->desc.ld_tgt_count = index + 1;
500 if (lmv->connected == 0) {
501 /* lmv_check_connect() will connect this target. */
502 mutex_unlock(&lmv->lmv_init_mutex);
506 /* Otherwise let's connect it ourselves */
507 mutex_unlock(&lmv->lmv_init_mutex);
508 rc = lmv_connect_mdc(obd, tgt);
510 spin_lock(&lmv->lmv_lock);
511 if (lmv->desc.ld_tgt_count == index + 1)
512 lmv->desc.ld_tgt_count = orig_tgt_count;
513 memset(tgt, 0, sizeof(*tgt));
514 spin_unlock(&lmv->lmv_lock);
516 int easize = sizeof(struct lmv_stripe_md) +
517 lmv->desc.ld_tgt_count * sizeof(struct lu_fid);
518 lmv_init_ea_size(obd->obd_self_export, easize, 0);
524 int lmv_check_connect(struct obd_device *obd)
526 struct lmv_obd *lmv = &obd->u.lmv;
527 struct lmv_tgt_desc *tgt;
536 mutex_lock(&lmv->lmv_init_mutex);
537 if (lmv->connected) {
538 mutex_unlock(&lmv->lmv_init_mutex);
542 if (lmv->desc.ld_tgt_count == 0) {
543 mutex_unlock(&lmv->lmv_init_mutex);
544 CERROR("%s: no targets configured.\n", obd->obd_name);
548 LASSERT(lmv->tgts != NULL);
550 if (lmv->tgts[0] == NULL) {
551 mutex_unlock(&lmv->lmv_init_mutex);
552 CERROR("%s: no target configured for index 0.\n",
557 CDEBUG(D_CONFIG, "Time to connect %s to %s\n",
558 lmv->cluuid.uuid, obd->obd_name);
560 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
564 rc = lmv_connect_mdc(obd, tgt);
569 class_export_put(lmv->exp);
571 easize = lmv_mds_md_size(lmv->desc.ld_tgt_count, LMV_MAGIC);
572 lmv_init_ea_size(obd->obd_self_export, easize, 0);
573 mutex_unlock(&lmv->lmv_init_mutex);
584 --lmv->desc.ld_active_tgt_count;
585 rc2 = obd_disconnect(tgt->ltd_exp);
587 CERROR("LMV target %s disconnect on "
588 "MDC idx %d: error %d\n",
589 tgt->ltd_uuid.uuid, i, rc2);
593 class_disconnect(lmv->exp);
594 mutex_unlock(&lmv->lmv_init_mutex);
598 static int lmv_disconnect_mdc(struct obd_device *obd, struct lmv_tgt_desc *tgt)
600 struct lmv_obd *lmv = &obd->u.lmv;
601 struct obd_device *mdc_obd;
605 LASSERT(tgt != NULL);
606 LASSERT(obd != NULL);
608 mdc_obd = class_exp2obd(tgt->ltd_exp);
611 mdc_obd->obd_force = obd->obd_force;
612 mdc_obd->obd_fail = obd->obd_fail;
613 mdc_obd->obd_no_recov = obd->obd_no_recov;
615 if (lmv->targets_proc_entry != NULL)
616 lprocfs_remove_proc_entry(mdc_obd->obd_name,
617 lmv->targets_proc_entry);
620 rc = obd_fid_fini(tgt->ltd_exp->exp_obd);
622 CERROR("Can't finanize fids factory\n");
624 CDEBUG(D_INFO, "Disconnected from %s(%s) successfully\n",
625 tgt->ltd_exp->exp_obd->obd_name,
626 tgt->ltd_exp->exp_obd->obd_uuid.uuid);
628 obd_register_observer(tgt->ltd_exp->exp_obd, NULL);
629 rc = obd_disconnect(tgt->ltd_exp);
631 if (tgt->ltd_active) {
632 CERROR("Target %s disconnect error %d\n",
633 tgt->ltd_uuid.uuid, rc);
637 lmv_activate_target(lmv, tgt, 0);
642 static int lmv_disconnect(struct obd_export *exp)
644 struct obd_device *obd = class_exp2obd(exp);
645 struct lmv_obd *lmv = &obd->u.lmv;
654 * Only disconnect the underlying layers on the final disconnect.
657 if (lmv->refcount != 0)
660 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
661 if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
664 lmv_disconnect_mdc(obd, lmv->tgts[i]);
667 if (lmv->targets_proc_entry != NULL)
668 lprocfs_remove(&lmv->targets_proc_entry);
670 CERROR("/proc/fs/lustre/%s/%s/target_obds missing\n",
671 obd->obd_type->typ_name, obd->obd_name);
675 * This is the case when no real connection is established by
676 * lmv_check_connect().
679 class_export_put(exp);
680 rc = class_disconnect(exp);
681 if (lmv->refcount == 0)
686 static int lmv_fid2path(struct obd_export *exp, int len, void *karg, void *uarg)
688 struct obd_device *obddev = class_exp2obd(exp);
689 struct lmv_obd *lmv = &obddev->u.lmv;
690 struct getinfo_fid2path *gf;
691 struct lmv_tgt_desc *tgt;
692 struct getinfo_fid2path *remote_gf = NULL;
693 int remote_gf_size = 0;
696 gf = (struct getinfo_fid2path *)karg;
697 tgt = lmv_find_target(lmv, &gf->gf_fid);
699 RETURN(PTR_ERR(tgt));
702 rc = obd_iocontrol(OBD_IOC_FID2PATH, tgt->ltd_exp, len, gf, uarg);
703 if (rc != 0 && rc != -EREMOTE)
704 GOTO(out_fid2path, rc);
706 /* If remote_gf != NULL, it means just building the
707 * path on the remote MDT, copy this path segement to gf */
708 if (remote_gf != NULL) {
709 struct getinfo_fid2path *ori_gf;
712 ori_gf = (struct getinfo_fid2path *)karg;
713 if (strlen(ori_gf->gf_path) +
714 strlen(gf->gf_path) > ori_gf->gf_pathlen)
715 GOTO(out_fid2path, rc = -EOVERFLOW);
717 ptr = ori_gf->gf_path;
719 memmove(ptr + strlen(gf->gf_path) + 1, ptr,
720 strlen(ori_gf->gf_path));
722 strncpy(ptr, gf->gf_path, strlen(gf->gf_path));
723 ptr += strlen(gf->gf_path);
727 CDEBUG(D_INFO, "%s: get path %s "DFID" rec: "LPU64" ln: %u\n",
728 tgt->ltd_exp->exp_obd->obd_name,
729 gf->gf_path, PFID(&gf->gf_fid), gf->gf_recno,
733 GOTO(out_fid2path, rc);
735 /* sigh, has to go to another MDT to do path building further */
736 if (remote_gf == NULL) {
737 remote_gf_size = sizeof(*remote_gf) + PATH_MAX;
738 OBD_ALLOC(remote_gf, remote_gf_size);
739 if (remote_gf == NULL)
740 GOTO(out_fid2path, rc = -ENOMEM);
741 remote_gf->gf_pathlen = PATH_MAX;
744 if (!fid_is_sane(&gf->gf_fid)) {
745 CERROR("%s: invalid FID "DFID": rc = %d\n",
746 tgt->ltd_exp->exp_obd->obd_name,
747 PFID(&gf->gf_fid), -EINVAL);
748 GOTO(out_fid2path, rc = -EINVAL);
751 tgt = lmv_find_target(lmv, &gf->gf_fid);
753 GOTO(out_fid2path, rc = -EINVAL);
755 remote_gf->gf_fid = gf->gf_fid;
756 remote_gf->gf_recno = -1;
757 remote_gf->gf_linkno = -1;
758 memset(remote_gf->gf_path, 0, remote_gf->gf_pathlen);
760 goto repeat_fid2path;
763 if (remote_gf != NULL)
764 OBD_FREE(remote_gf, remote_gf_size);
768 static int lmv_hsm_req_count(struct lmv_obd *lmv,
769 const struct hsm_user_request *hur,
770 const struct lmv_tgt_desc *tgt_mds)
774 struct lmv_tgt_desc *curr_tgt;
776 /* count how many requests must be sent to the given target */
777 for (i = 0; i < hur->hur_request.hr_itemcount; i++) {
778 curr_tgt = lmv_find_target(lmv, &hur->hur_user_item[i].hui_fid);
779 if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid))
785 static void lmv_hsm_req_build(struct lmv_obd *lmv,
786 struct hsm_user_request *hur_in,
787 const struct lmv_tgt_desc *tgt_mds,
788 struct hsm_user_request *hur_out)
791 struct lmv_tgt_desc *curr_tgt;
793 /* build the hsm_user_request for the given target */
794 hur_out->hur_request = hur_in->hur_request;
796 for (i = 0; i < hur_in->hur_request.hr_itemcount; i++) {
797 curr_tgt = lmv_find_target(lmv,
798 &hur_in->hur_user_item[i].hui_fid);
799 if (obd_uuid_equals(&curr_tgt->ltd_uuid, &tgt_mds->ltd_uuid)) {
800 hur_out->hur_user_item[nr_out] =
801 hur_in->hur_user_item[i];
805 hur_out->hur_request.hr_itemcount = nr_out;
806 memcpy(hur_data(hur_out), hur_data(hur_in),
807 hur_in->hur_request.hr_data_len);
810 static int lmv_hsm_ct_unregister(struct lmv_obd *lmv, unsigned int cmd, int len,
811 struct lustre_kernelcomm *lk, void *uarg)
817 /* unregister request (call from llapi_hsm_copytool_fini) */
818 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
819 struct lmv_tgt_desc *tgt = lmv->tgts[i];
821 if (tgt == NULL || tgt->ltd_exp == NULL)
823 /* best effort: try to clean as much as possible
824 * (continue on error) */
825 obd_iocontrol(cmd, tgt->ltd_exp, len, lk, uarg);
828 /* Whatever the result, remove copytool from kuc groups.
829 * Unreached coordinators will get EPIPE on next requests
830 * and will unregister automatically.
832 rc = libcfs_kkuc_group_rem(lk->lk_uid, lk->lk_group);
837 static int lmv_hsm_ct_register(struct lmv_obd *lmv, unsigned int cmd, int len,
838 struct lustre_kernelcomm *lk, void *uarg)
843 bool any_set = false;
844 struct kkuc_ct_data kcd = { 0 };
847 /* All or nothing: try to register to all MDS.
848 * In case of failure, unregister from previous MDS,
849 * except if it because of inactive target. */
850 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
851 struct lmv_tgt_desc *tgt = lmv->tgts[i];
853 if (tgt == NULL || tgt->ltd_exp == NULL)
855 err = obd_iocontrol(cmd, tgt->ltd_exp, len, lk, uarg);
857 if (tgt->ltd_active) {
858 /* permanent error */
859 CERROR("%s: iocontrol MDC %s on MDT"
860 " idx %d cmd %x: err = %d\n",
861 class_exp2obd(lmv->exp)->obd_name,
862 tgt->ltd_uuid.uuid, i, cmd, err);
864 lk->lk_flags |= LK_FLG_STOP;
865 /* unregister from previous MDS */
866 for (j = 0; j < i; j++) {
868 if (tgt == NULL || tgt->ltd_exp == NULL)
870 obd_iocontrol(cmd, tgt->ltd_exp, len,
875 /* else: transient error.
876 * kuc will register to the missing MDT
884 /* no registration done: return error */
887 /* at least one registration done, with no failure */
888 filp = fget(lk->lk_wfd);
892 kcd.kcd_magic = KKUC_CT_DATA_MAGIC;
893 kcd.kcd_uuid = lmv->cluuid;
894 kcd.kcd_archive = lk->lk_data;
896 rc = libcfs_kkuc_group_add(filp, lk->lk_uid, lk->lk_group,
907 static int lmv_iocontrol(unsigned int cmd, struct obd_export *exp,
908 int len, void *karg, void *uarg)
910 struct obd_device *obddev = class_exp2obd(exp);
911 struct lmv_obd *lmv = &obddev->u.lmv;
912 struct lmv_tgt_desc *tgt = NULL;
916 __u32 count = lmv->desc.ld_tgt_count;
923 case IOC_OBD_STATFS: {
924 struct obd_ioctl_data *data = karg;
925 struct obd_device *mdc_obd;
926 struct obd_statfs stat_buf = {0};
929 memcpy(&index, data->ioc_inlbuf2, sizeof(__u32));
930 if ((index >= count))
933 tgt = lmv->tgts[index];
934 if (tgt == NULL || !tgt->ltd_active)
937 mdc_obd = class_exp2obd(tgt->ltd_exp);
942 if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(mdc_obd),
943 min((int) data->ioc_plen2,
944 (int) sizeof(struct obd_uuid))))
947 rc = obd_statfs(NULL, tgt->ltd_exp, &stat_buf,
948 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
952 if (copy_to_user(data->ioc_pbuf1, &stat_buf,
953 min((int) data->ioc_plen1,
954 (int) sizeof(stat_buf))))
958 case OBD_IOC_QUOTACTL: {
959 struct if_quotactl *qctl = karg;
960 struct obd_quotactl *oqctl;
962 if (qctl->qc_valid == QC_MDTIDX) {
963 if (count <= qctl->qc_idx)
966 tgt = lmv->tgts[qctl->qc_idx];
967 if (tgt == NULL || tgt->ltd_exp == NULL)
969 } else if (qctl->qc_valid == QC_UUID) {
970 for (i = 0; i < count; i++) {
974 if (!obd_uuid_equals(&tgt->ltd_uuid,
978 if (tgt->ltd_exp == NULL)
990 LASSERT(tgt != NULL && tgt->ltd_exp != NULL);
991 OBD_ALLOC_PTR(oqctl);
995 QCTL_COPY(oqctl, qctl);
996 rc = obd_quotactl(tgt->ltd_exp, oqctl);
998 QCTL_COPY(qctl, oqctl);
999 qctl->qc_valid = QC_MDTIDX;
1000 qctl->obd_uuid = tgt->ltd_uuid;
1002 OBD_FREE_PTR(oqctl);
1005 case OBD_IOC_CHANGELOG_SEND:
1006 case OBD_IOC_CHANGELOG_CLEAR: {
1007 struct ioc_changelog *icc = karg;
1009 if (icc->icc_mdtindex >= count)
1012 tgt = lmv->tgts[icc->icc_mdtindex];
1013 if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active)
1015 rc = obd_iocontrol(cmd, tgt->ltd_exp, sizeof(*icc), icc, NULL);
1018 case LL_IOC_GET_CONNECT_FLAGS: {
1020 if (tgt == NULL || tgt->ltd_exp == NULL)
1022 rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
1025 case LL_IOC_FID2MDTIDX: {
1026 struct lu_fid *fid = karg;
1029 rc = lmv_fld_lookup(lmv, fid, &mdt_index);
1033 /* Note: this is from llite(see ll_dir_ioctl()), @uarg does not
1034 * point to user space memory for FID2MDTIDX. */
1035 *(__u32 *)uarg = mdt_index;
1038 case OBD_IOC_FID2PATH: {
1039 rc = lmv_fid2path(exp, len, karg, uarg);
1042 case LL_IOC_HSM_STATE_GET:
1043 case LL_IOC_HSM_STATE_SET:
1044 case LL_IOC_HSM_ACTION: {
1045 struct md_op_data *op_data = karg;
1047 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1049 RETURN(PTR_ERR(tgt));
1051 if (tgt->ltd_exp == NULL)
1054 rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
1057 case LL_IOC_HSM_PROGRESS: {
1058 const struct hsm_progress_kernel *hpk = karg;
1060 tgt = lmv_find_target(lmv, &hpk->hpk_fid);
1062 RETURN(PTR_ERR(tgt));
1063 rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
1066 case LL_IOC_HSM_REQUEST: {
1067 struct hsm_user_request *hur = karg;
1068 unsigned int reqcount = hur->hur_request.hr_itemcount;
1073 /* if the request is about a single fid
1074 * or if there is a single MDS, no need to split
1076 if (reqcount == 1 || count == 1) {
1077 tgt = lmv_find_target(lmv,
1078 &hur->hur_user_item[0].hui_fid);
1080 RETURN(PTR_ERR(tgt));
1081 rc = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
1083 /* split fid list to their respective MDS */
1084 for (i = 0; i < count; i++) {
1085 unsigned int nr, reqlen;
1087 struct hsm_user_request *req;
1090 if (tgt == NULL || tgt->ltd_exp == NULL)
1093 nr = lmv_hsm_req_count(lmv, hur, tgt);
1094 if (nr == 0) /* nothing for this MDS */
1097 /* build a request with fids for this MDS */
1098 reqlen = offsetof(typeof(*hur),
1100 + hur->hur_request.hr_data_len;
1101 OBD_ALLOC_LARGE(req, reqlen);
1105 lmv_hsm_req_build(lmv, hur, tgt, req);
1107 rc1 = obd_iocontrol(cmd, tgt->ltd_exp, reqlen,
1109 if (rc1 != 0 && rc == 0)
1111 OBD_FREE_LARGE(req, reqlen);
1116 case LL_IOC_LOV_SWAP_LAYOUTS: {
1117 struct md_op_data *op_data = karg;
1118 struct lmv_tgt_desc *tgt1, *tgt2;
1120 tgt1 = lmv_find_target(lmv, &op_data->op_fid1);
1122 RETURN(PTR_ERR(tgt1));
1124 tgt2 = lmv_find_target(lmv, &op_data->op_fid2);
1126 RETURN(PTR_ERR(tgt2));
1128 if ((tgt1->ltd_exp == NULL) || (tgt2->ltd_exp == NULL))
1131 /* only files on same MDT can have their layouts swapped */
1132 if (tgt1->ltd_idx != tgt2->ltd_idx)
1135 rc = obd_iocontrol(cmd, tgt1->ltd_exp, len, karg, uarg);
1138 case LL_IOC_HSM_CT_START: {
1139 struct lustre_kernelcomm *lk = karg;
1140 if (lk->lk_flags & LK_FLG_STOP)
1141 rc = lmv_hsm_ct_unregister(lmv, cmd, len, lk, uarg);
1143 rc = lmv_hsm_ct_register(lmv, cmd, len, lk, uarg);
1147 for (i = 0; i < count; i++) {
1148 struct obd_device *mdc_obd;
1152 if (tgt == NULL || tgt->ltd_exp == NULL)
1154 /* ll_umount_begin() sets force flag but for lmv, not
1155 * mdc. Let's pass it through */
1156 mdc_obd = class_exp2obd(tgt->ltd_exp);
1157 mdc_obd->obd_force = obddev->obd_force;
1158 err = obd_iocontrol(cmd, tgt->ltd_exp, len, karg, uarg);
1160 if (tgt->ltd_active) {
1161 CERROR("error: iocontrol MDC %s on MDT"
1162 " idx %d cmd %x: err = %d\n",
1163 tgt->ltd_uuid.uuid, i, cmd, err);
1177 static int lmv_all_chars_policy(int count, const char *name,
1188 static int lmv_nid_policy(struct lmv_obd *lmv)
1190 struct obd_import *imp;
1194 * XXX: To get nid we assume that underlying obd device is mdc.
1196 imp = class_exp2cliimp(lmv->tgts[0].ltd_exp);
1197 id = imp->imp_connection->c_self ^ (imp->imp_connection->c_self >> 32);
1198 return id % lmv->desc.ld_tgt_count;
1201 static int lmv_choose_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
1202 placement_policy_t placement)
1204 switch (placement) {
1205 case PLACEMENT_CHAR_POLICY:
1206 return lmv_all_chars_policy(lmv->desc.ld_tgt_count,
1208 op_data->op_namelen);
1209 case PLACEMENT_NID_POLICY:
1210 return lmv_nid_policy(lmv);
1216 CERROR("Unsupported placement policy %x\n", placement);
1222 * This is _inode_ placement policy function (not name).
1224 static int lmv_placement_policy(struct obd_device *obd,
1225 struct md_op_data *op_data, u32 *mds)
1227 struct lmv_obd *lmv = &obd->u.lmv;
1230 LASSERT(mds != NULL);
1232 if (lmv->desc.ld_tgt_count == 1) {
1237 if (op_data->op_default_stripe_offset != -1) {
1238 *mds = op_data->op_default_stripe_offset;
1243 * If stripe_offset is provided during setdirstripe
1244 * (setdirstripe -i xx), xx MDS will be choosen.
1246 if (op_data->op_cli_flags & CLI_SET_MEA && op_data->op_data != NULL) {
1247 struct lmv_user_md *lum;
1249 lum = op_data->op_data;
1251 if (le32_to_cpu(lum->lum_stripe_offset) != (__u32)-1) {
1252 *mds = le32_to_cpu(lum->lum_stripe_offset);
1254 /* -1 means default, which will be in the same MDT with
1256 *mds = op_data->op_mds;
1257 lum->lum_stripe_offset = cpu_to_le32(op_data->op_mds);
1260 /* Allocate new fid on target according to operation type and
1261 * parent home mds. */
1262 *mds = op_data->op_mds;
1268 int __lmv_fid_alloc(struct lmv_obd *lmv, struct lu_fid *fid, u32 mds)
1270 struct lmv_tgt_desc *tgt;
1274 tgt = lmv_get_target(lmv, mds, NULL);
1276 RETURN(PTR_ERR(tgt));
1279 * New seq alloc and FLD setup should be atomic. Otherwise we may find
1280 * on server that seq in new allocated fid is not yet known.
1282 mutex_lock(&tgt->ltd_fid_mutex);
1284 if (tgt->ltd_active == 0 || tgt->ltd_exp == NULL)
1285 GOTO(out, rc = -ENODEV);
1288 * Asking underlying tgt layer to allocate new fid.
1290 rc = obd_fid_alloc(NULL, tgt->ltd_exp, fid, NULL);
1292 LASSERT(fid_is_sane(fid));
1298 mutex_unlock(&tgt->ltd_fid_mutex);
1302 int lmv_fid_alloc(const struct lu_env *env, struct obd_export *exp,
1303 struct lu_fid *fid, struct md_op_data *op_data)
1305 struct obd_device *obd = class_exp2obd(exp);
1306 struct lmv_obd *lmv = &obd->u.lmv;
1311 LASSERT(op_data != NULL);
1312 LASSERT(fid != NULL);
1314 rc = lmv_placement_policy(obd, op_data, &mds);
1316 CERROR("Can't get target for allocating fid, "
1321 rc = __lmv_fid_alloc(lmv, fid, mds);
1323 CERROR("Can't alloc new fid, rc %d\n", rc);
1330 static int lmv_setup(struct obd_device *obd, struct lustre_cfg *lcfg)
1332 struct lmv_obd *lmv = &obd->u.lmv;
1333 struct lmv_desc *desc;
1337 if (LUSTRE_CFG_BUFLEN(lcfg, 1) < 1) {
1338 CERROR("LMV setup requires a descriptor\n");
1342 desc = (struct lmv_desc *)lustre_cfg_buf(lcfg, 1);
1343 if (sizeof(*desc) > LUSTRE_CFG_BUFLEN(lcfg, 1)) {
1344 CERROR("Lmv descriptor size wrong: %d > %d\n",
1345 (int)sizeof(*desc), LUSTRE_CFG_BUFLEN(lcfg, 1));
1349 lmv->tgts_size = 32U;
1350 OBD_ALLOC(lmv->tgts, sizeof(*lmv->tgts) * lmv->tgts_size);
1351 if (lmv->tgts == NULL)
1354 obd_str2uuid(&lmv->desc.ld_uuid, desc->ld_uuid.uuid);
1355 lmv->desc.ld_tgt_count = 0;
1356 lmv->desc.ld_active_tgt_count = 0;
1357 lmv->max_def_easize = 0;
1358 lmv->max_easize = 0;
1359 lmv->lmv_placement = PLACEMENT_CHAR_POLICY;
1361 spin_lock_init(&lmv->lmv_lock);
1362 mutex_init(&lmv->lmv_init_mutex);
1364 #ifdef CONFIG_PROC_FS
1365 obd->obd_vars = lprocfs_lmv_obd_vars;
1366 lprocfs_obd_setup(obd);
1367 lprocfs_alloc_md_stats(obd, 0);
1368 rc = lprocfs_seq_create(obd->obd_proc_entry, "target_obd",
1369 0444, &lmv_proc_target_fops, obd);
1371 CWARN("%s: error adding LMV target_obd file: rc = %d\n",
1374 rc = fld_client_init(&lmv->lmv_fld, obd->obd_name,
1375 LUSTRE_CLI_FLD_HASH_DHT);
1377 CERROR("Can't init FLD, err %d\n", rc);
1387 static int lmv_cleanup(struct obd_device *obd)
1389 struct lmv_obd *lmv = &obd->u.lmv;
1392 fld_client_fini(&lmv->lmv_fld);
1393 if (lmv->tgts != NULL) {
1395 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
1396 if (lmv->tgts[i] == NULL)
1398 lmv_del_target(lmv, i);
1400 OBD_FREE(lmv->tgts, sizeof(*lmv->tgts) * lmv->tgts_size);
1406 static int lmv_process_config(struct obd_device *obd, size_t len, void *buf)
1408 struct lustre_cfg *lcfg = buf;
1409 struct obd_uuid obd_uuid;
1415 switch (lcfg->lcfg_command) {
1417 /* modify_mdc_tgts add 0:lustre-clilmv 1:lustre-MDT0000_UUID
1418 * 2:0 3:1 4:lustre-MDT0000-mdc_UUID */
1419 if (LUSTRE_CFG_BUFLEN(lcfg, 1) > sizeof(obd_uuid.uuid))
1420 GOTO(out, rc = -EINVAL);
1422 obd_str2uuid(&obd_uuid, lustre_cfg_buf(lcfg, 1));
1424 if (sscanf(lustre_cfg_buf(lcfg, 2), "%u", &index) != 1)
1425 GOTO(out, rc = -EINVAL);
1426 if (sscanf(lustre_cfg_buf(lcfg, 3), "%d", &gen) != 1)
1427 GOTO(out, rc = -EINVAL);
1428 rc = lmv_add_target(obd, &obd_uuid, index, gen);
1431 CERROR("Unknown command: %d\n", lcfg->lcfg_command);
1432 GOTO(out, rc = -EINVAL);
1438 static int lmv_statfs(const struct lu_env *env, struct obd_export *exp,
1439 struct obd_statfs *osfs, __u64 max_age, __u32 flags)
1441 struct obd_device *obd = class_exp2obd(exp);
1442 struct lmv_obd *lmv = &obd->u.lmv;
1443 struct obd_statfs *temp;
1448 rc = lmv_check_connect(obd);
1452 OBD_ALLOC(temp, sizeof(*temp));
1456 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
1457 if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
1460 rc = obd_statfs(env, lmv->tgts[i]->ltd_exp, temp,
1463 CERROR("can't stat MDS #%d (%s), error %d\n", i,
1464 lmv->tgts[i]->ltd_exp->exp_obd->obd_name,
1466 GOTO(out_free_temp, rc);
1471 /* If the statfs is from mount, it will needs
1472 * retrieve necessary information from MDT0.
1473 * i.e. mount does not need the merged osfs
1475 * And also clients can be mounted as long as
1476 * MDT0 is in service*/
1477 if (flags & OBD_STATFS_FOR_MDT0)
1478 GOTO(out_free_temp, rc);
1480 osfs->os_bavail += temp->os_bavail;
1481 osfs->os_blocks += temp->os_blocks;
1482 osfs->os_ffree += temp->os_ffree;
1483 osfs->os_files += temp->os_files;
1489 OBD_FREE(temp, sizeof(*temp));
1493 static int lmv_getstatus(struct obd_export *exp, struct lu_fid *fid)
1495 struct obd_device *obd = exp->exp_obd;
1496 struct lmv_obd *lmv = &obd->u.lmv;
1500 rc = lmv_check_connect(obd);
1504 rc = md_getstatus(lmv->tgts[0]->ltd_exp, fid);
1508 static int lmv_getxattr(struct obd_export *exp, const struct lu_fid *fid,
1509 u64 valid, const char *name,
1510 const char *input, int input_size, int output_size,
1511 int flags, struct ptlrpc_request **request)
1513 struct obd_device *obd = exp->exp_obd;
1514 struct lmv_obd *lmv = &obd->u.lmv;
1515 struct lmv_tgt_desc *tgt;
1519 rc = lmv_check_connect(obd);
1523 tgt = lmv_find_target(lmv, fid);
1525 RETURN(PTR_ERR(tgt));
1527 rc = md_getxattr(tgt->ltd_exp, fid, valid, name, input,
1528 input_size, output_size, flags, request);
1533 static int lmv_setxattr(struct obd_export *exp, const struct lu_fid *fid,
1534 u64 valid, const char *name,
1535 const char *input, int input_size, int output_size,
1536 int flags, __u32 suppgid,
1537 struct ptlrpc_request **request)
1539 struct obd_device *obd = exp->exp_obd;
1540 struct lmv_obd *lmv = &obd->u.lmv;
1541 struct lmv_tgt_desc *tgt;
1545 rc = lmv_check_connect(obd);
1549 tgt = lmv_find_target(lmv, fid);
1551 RETURN(PTR_ERR(tgt));
1553 rc = md_setxattr(tgt->ltd_exp, fid, valid, name, input,
1554 input_size, output_size, flags, suppgid,
1560 static int lmv_getattr(struct obd_export *exp, struct md_op_data *op_data,
1561 struct ptlrpc_request **request)
1563 struct obd_device *obd = exp->exp_obd;
1564 struct lmv_obd *lmv = &obd->u.lmv;
1565 struct lmv_tgt_desc *tgt;
1569 rc = lmv_check_connect(obd);
1573 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1575 RETURN(PTR_ERR(tgt));
1577 if (op_data->op_flags & MF_GET_MDT_IDX) {
1578 op_data->op_mds = tgt->ltd_idx;
1582 rc = md_getattr(tgt->ltd_exp, op_data, request);
1587 static int lmv_null_inode(struct obd_export *exp, const struct lu_fid *fid)
1589 struct obd_device *obd = exp->exp_obd;
1590 struct lmv_obd *lmv = &obd->u.lmv;
1595 rc = lmv_check_connect(obd);
1599 CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
1602 * With DNE every object can have two locks in different namespaces:
1603 * lookup lock in space of MDT storing direntry and update/open lock in
1604 * space of MDT storing inode.
1606 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
1607 if (lmv->tgts[i] == NULL || lmv->tgts[i]->ltd_exp == NULL)
1609 md_null_inode(lmv->tgts[i]->ltd_exp, fid);
1615 static int lmv_find_cbdata(struct obd_export *exp, const struct lu_fid *fid,
1616 ldlm_iterator_t it, void *data)
1618 struct obd_device *obd = exp->exp_obd;
1619 struct lmv_obd *lmv = &obd->u.lmv;
1625 rc = lmv_check_connect(obd);
1629 CDEBUG(D_INODE, "CBDATA for "DFID"\n", PFID(fid));
1632 * With DNE every object can have two locks in different namespaces:
1633 * lookup lock in space of MDT storing direntry and update/open lock in
1634 * space of MDT storing inode. Try the MDT that the FID maps to first,
1635 * since this can be easily found, and only try others if that fails.
1637 for (i = 0, tgt = lmv_find_target_index(lmv, fid);
1638 i < lmv->desc.ld_tgt_count;
1639 i++, tgt = (tgt + 1) % lmv->desc.ld_tgt_count) {
1641 CDEBUG(D_HA, "%s: "DFID" is inaccessible: rc = %d\n",
1642 obd->obd_name, PFID(fid), tgt);
1646 if (lmv->tgts[tgt] == NULL ||
1647 lmv->tgts[tgt]->ltd_exp == NULL)
1650 rc = md_find_cbdata(lmv->tgts[tgt]->ltd_exp, fid, it, data);
1659 static int lmv_close(struct obd_export *exp, struct md_op_data *op_data,
1660 struct md_open_data *mod, struct ptlrpc_request **request)
1662 struct obd_device *obd = exp->exp_obd;
1663 struct lmv_obd *lmv = &obd->u.lmv;
1664 struct lmv_tgt_desc *tgt;
1668 rc = lmv_check_connect(obd);
1672 tgt = lmv_find_target(lmv, &op_data->op_fid1);
1674 RETURN(PTR_ERR(tgt));
1676 CDEBUG(D_INODE, "CLOSE "DFID"\n", PFID(&op_data->op_fid1));
1677 rc = md_close(tgt->ltd_exp, op_data, mod, request);
1682 * Choosing the MDT by name or FID in @op_data.
1683 * For non-striped directory, it will locate MDT by fid.
1684 * For striped-directory, it will locate MDT by name. And also
1685 * it will reset op_fid1 with the FID of the choosen stripe.
1687 struct lmv_tgt_desc *
1688 lmv_locate_target_for_name(struct lmv_obd *lmv, struct lmv_stripe_md *lsm,
1689 const char *name, int namelen, struct lu_fid *fid,
1692 struct lmv_tgt_desc *tgt;
1693 const struct lmv_oinfo *oinfo;
1695 if (OBD_FAIL_CHECK(OBD_FAIL_LFSCK_BAD_NAME_HASH)) {
1696 if (cfs_fail_val >= lsm->lsm_md_stripe_count)
1697 RETURN(ERR_PTR(-EBADF));
1698 oinfo = &lsm->lsm_md_oinfo[cfs_fail_val];
1700 oinfo = lsm_name_to_stripe_info(lsm, name, namelen);
1702 RETURN(ERR_CAST(oinfo));
1706 *fid = oinfo->lmo_fid;
1708 *mds = oinfo->lmo_mds;
1710 tgt = lmv_get_target(lmv, oinfo->lmo_mds, NULL);
1712 CDEBUG(D_INFO, "locate on mds %u "DFID"\n", oinfo->lmo_mds,
1713 PFID(&oinfo->lmo_fid));
1718 * Locate mds by fid or name
1720 * For striped directory (lsm != NULL), it will locate the stripe
1721 * by name hash (see lsm_name_to_stripe_info()). Note: if the hash_type
1722 * is unknown, it will return -EBADFD, and lmv_intent_lookup might need
1723 * walk through all of stripes to locate the entry.
1725 * For normal direcotry, it will locate MDS by FID directly.
1726 * \param[in] lmv LMV device
1727 * \param[in] op_data client MD stack parameters, name, namelen
1729 * \param[in] fid object FID used to locate MDS.
1731 * retval pointer to the lmv_tgt_desc if succeed.
1732 * ERR_PTR(errno) if failed.
1734 struct lmv_tgt_desc*
1735 lmv_locate_mds(struct lmv_obd *lmv, struct md_op_data *op_data,
1738 struct lmv_stripe_md *lsm = op_data->op_mea1;
1739 struct lmv_tgt_desc *tgt;
1741 /* During creating VOLATILE file, it should honor the mdt
1742 * index if the file under striped dir is being restored, see
1744 if (op_data->op_bias & MDS_CREATE_VOLATILE &&
1745 (int)op_data->op_mds != -1 && lsm != NULL) {
1747 tgt = lmv_get_target(lmv, op_data->op_mds, NULL);
1751 /* refill the right parent fid */
1752 for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
1753 struct lmv_oinfo *oinfo;
1755 oinfo = &lsm->lsm_md_oinfo[i];
1756 if (oinfo->lmo_mds == op_data->op_mds) {
1757 *fid = oinfo->lmo_fid;
1762 /* Hmm, can not find the stripe by mdt_index(op_mds) */
1763 if (i == lsm->lsm_md_stripe_count)
1764 tgt = ERR_PTR(-EINVAL);
1769 if (lsm == NULL || op_data->op_namelen == 0) {
1770 tgt = lmv_find_target(lmv, fid);
1774 op_data->op_mds = tgt->ltd_idx;
1778 return lmv_locate_target_for_name(lmv, lsm, op_data->op_name,
1779 op_data->op_namelen, fid,
1783 int lmv_create(struct obd_export *exp, struct md_op_data *op_data,
1784 const void *data, size_t datalen, umode_t mode, uid_t uid,
1785 gid_t gid, cfs_cap_t cap_effective, __u64 rdev,
1786 struct ptlrpc_request **request)
1788 struct obd_device *obd = exp->exp_obd;
1789 struct lmv_obd *lmv = &obd->u.lmv;
1790 struct lmv_tgt_desc *tgt;
1794 rc = lmv_check_connect(obd);
1798 if (!lmv->desc.ld_active_tgt_count)
1801 tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
1803 RETURN(PTR_ERR(tgt));
1805 CDEBUG(D_INODE, "CREATE name '%.*s' on "DFID" -> mds #%x\n",
1806 (int)op_data->op_namelen, op_data->op_name,
1807 PFID(&op_data->op_fid1), op_data->op_mds);
1809 rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
1812 if (exp_connect_flags(exp) & OBD_CONNECT_DIR_STRIPE) {
1813 /* Send the create request to the MDT where the object
1814 * will be located */
1815 tgt = lmv_find_target(lmv, &op_data->op_fid2);
1817 RETURN(PTR_ERR(tgt));
1819 op_data->op_mds = tgt->ltd_idx;
1821 CDEBUG(D_CONFIG, "Server doesn't support striped dirs\n");
1824 CDEBUG(D_INODE, "CREATE obj "DFID" -> mds #%x\n",
1825 PFID(&op_data->op_fid2), op_data->op_mds);
1827 op_data->op_flags |= MF_MDC_CANCEL_FID1;
1828 rc = md_create(tgt->ltd_exp, op_data, data, datalen, mode, uid, gid,
1829 cap_effective, rdev, request);
1831 if (*request == NULL)
1833 CDEBUG(D_INODE, "Created - "DFID"\n", PFID(&op_data->op_fid2));
1839 lmv_enqueue(struct obd_export *exp, struct ldlm_enqueue_info *einfo,
1840 const union ldlm_policy_data *policy,
1841 struct lookup_intent *it, struct md_op_data *op_data,
1842 struct lustre_handle *lockh, __u64 extra_lock_flags)
1844 struct obd_device *obd = exp->exp_obd;
1845 struct lmv_obd *lmv = &obd->u.lmv;
1846 struct lmv_tgt_desc *tgt;
1850 rc = lmv_check_connect(obd);
1854 CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID"\n",
1855 LL_IT2STR(it), PFID(&op_data->op_fid1));
1857 tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
1859 RETURN(PTR_ERR(tgt));
1861 CDEBUG(D_INODE, "ENQUEUE '%s' on "DFID" -> mds #%u\n",
1862 LL_IT2STR(it), PFID(&op_data->op_fid1), tgt->ltd_idx);
1864 rc = md_enqueue(tgt->ltd_exp, einfo, policy, it, op_data, lockh,
1871 lmv_getattr_name(struct obd_export *exp,struct md_op_data *op_data,
1872 struct ptlrpc_request **preq)
1874 struct ptlrpc_request *req = NULL;
1875 struct obd_device *obd = exp->exp_obd;
1876 struct lmv_obd *lmv = &obd->u.lmv;
1877 struct lmv_tgt_desc *tgt;
1878 struct mdt_body *body;
1882 rc = lmv_check_connect(obd);
1886 tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
1888 RETURN(PTR_ERR(tgt));
1890 CDEBUG(D_INODE, "GETATTR_NAME for %*s on "DFID" -> mds #%d\n",
1891 (int)op_data->op_namelen, op_data->op_name,
1892 PFID(&op_data->op_fid1), tgt->ltd_idx);
1894 rc = md_getattr_name(tgt->ltd_exp, op_data, preq);
1898 body = req_capsule_server_get(&(*preq)->rq_pill, &RMF_MDT_BODY);
1899 LASSERT(body != NULL);
1901 if (body->mbo_valid & OBD_MD_MDS) {
1902 struct lu_fid rid = body->mbo_fid1;
1903 CDEBUG(D_INODE, "Request attrs for "DFID"\n",
1906 tgt = lmv_find_target(lmv, &rid);
1908 ptlrpc_req_finished(*preq);
1910 RETURN(PTR_ERR(tgt));
1913 op_data->op_fid1 = rid;
1914 op_data->op_valid |= OBD_MD_FLCROSSREF;
1915 op_data->op_namelen = 0;
1916 op_data->op_name = NULL;
1917 rc = md_getattr_name(tgt->ltd_exp, op_data, &req);
1918 ptlrpc_req_finished(*preq);
1925 #define md_op_data_fid(op_data, fl) \
1926 (fl == MF_MDC_CANCEL_FID1 ? &op_data->op_fid1 : \
1927 fl == MF_MDC_CANCEL_FID2 ? &op_data->op_fid2 : \
1928 fl == MF_MDC_CANCEL_FID3 ? &op_data->op_fid3 : \
1929 fl == MF_MDC_CANCEL_FID4 ? &op_data->op_fid4 : \
1932 static int lmv_early_cancel(struct obd_export *exp, struct lmv_tgt_desc *tgt,
1933 struct md_op_data *op_data, __u32 op_tgt,
1934 enum ldlm_mode mode, int bits, int flag)
1936 struct lu_fid *fid = md_op_data_fid(op_data, flag);
1937 struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
1938 union ldlm_policy_data policy = { { 0 } };
1942 if (!fid_is_sane(fid))
1946 tgt = lmv_find_target(lmv, fid);
1948 RETURN(PTR_ERR(tgt));
1951 if (tgt->ltd_idx != op_tgt) {
1952 CDEBUG(D_INODE, "EARLY_CANCEL on "DFID"\n", PFID(fid));
1953 policy.l_inodebits.bits = bits;
1954 rc = md_cancel_unused(tgt->ltd_exp, fid, &policy,
1955 mode, LCF_ASYNC, NULL);
1958 "EARLY_CANCEL skip operation target %d on "DFID"\n",
1960 op_data->op_flags |= flag;
1968 * llite passes fid of an target inode in op_data->op_fid1 and id of directory in
1971 static int lmv_link(struct obd_export *exp, struct md_op_data *op_data,
1972 struct ptlrpc_request **request)
1974 struct obd_device *obd = exp->exp_obd;
1975 struct lmv_obd *lmv = &obd->u.lmv;
1976 struct lmv_tgt_desc *tgt;
1980 rc = lmv_check_connect(obd);
1984 LASSERT(op_data->op_namelen != 0);
1986 CDEBUG(D_INODE, "LINK "DFID":%*s to "DFID"\n",
1987 PFID(&op_data->op_fid2), (int)op_data->op_namelen,
1988 op_data->op_name, PFID(&op_data->op_fid1));
1990 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
1991 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
1992 op_data->op_cap = cfs_curproc_cap_pack();
1993 if (op_data->op_mea2 != NULL) {
1994 struct lmv_stripe_md *lsm = op_data->op_mea2;
1995 const struct lmv_oinfo *oinfo;
1997 oinfo = lsm_name_to_stripe_info(lsm, op_data->op_name,
1998 op_data->op_namelen);
2000 RETURN(PTR_ERR(oinfo));
2002 op_data->op_fid2 = oinfo->lmo_fid;
2005 tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2);
2007 RETURN(PTR_ERR(tgt));
2010 * Cancel UPDATE lock on child (fid1).
2012 op_data->op_flags |= MF_MDC_CANCEL_FID2;
2013 rc = lmv_early_cancel(exp, NULL, op_data, tgt->ltd_idx, LCK_EX,
2014 MDS_INODELOCK_UPDATE, MF_MDC_CANCEL_FID1);
2018 rc = md_link(tgt->ltd_exp, op_data, request);
2023 static int lmv_rename(struct obd_export *exp, struct md_op_data *op_data,
2024 const char *old, size_t oldlen,
2025 const char *new, size_t newlen,
2026 struct ptlrpc_request **request)
2028 struct obd_device *obd = exp->exp_obd;
2029 struct lmv_obd *lmv = &obd->u.lmv;
2030 struct lmv_tgt_desc *src_tgt;
2031 struct lmv_tgt_desc *tgt_tgt;
2032 struct obd_export *target_exp;
2033 struct mdt_body *body;
2037 LASSERT(oldlen != 0);
2039 CDEBUG(D_INODE, "RENAME %.*s in "DFID":%d to %.*s in "DFID":%d\n",
2040 (int)oldlen, old, PFID(&op_data->op_fid1),
2041 op_data->op_mea1 ? op_data->op_mea1->lsm_md_stripe_count : 0,
2042 (int)newlen, new, PFID(&op_data->op_fid2),
2043 op_data->op_mea2 ? op_data->op_mea2->lsm_md_stripe_count : 0);
2045 rc = lmv_check_connect(obd);
2049 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2050 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
2051 op_data->op_cap = cfs_curproc_cap_pack();
2052 if (op_data->op_cli_flags & CLI_MIGRATE) {
2053 LASSERTF(fid_is_sane(&op_data->op_fid3), "invalid FID "DFID"\n",
2054 PFID(&op_data->op_fid3));
2056 if (op_data->op_mea1 != NULL) {
2057 struct lmv_stripe_md *lsm = op_data->op_mea1;
2058 struct lmv_tgt_desc *tmp;
2060 /* Fix the parent fid for striped dir */
2061 tmp = lmv_locate_target_for_name(lmv, lsm, old,
2066 RETURN(PTR_ERR(tmp));
2069 rc = lmv_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
2073 src_tgt = lmv_find_target(lmv, &op_data->op_fid3);
2074 if (IS_ERR(src_tgt))
2075 RETURN(PTR_ERR(src_tgt));
2077 target_exp = src_tgt->ltd_exp;
2079 if (op_data->op_mea1 != NULL) {
2080 struct lmv_stripe_md *lsm = op_data->op_mea1;
2082 src_tgt = lmv_locate_target_for_name(lmv, lsm, old,
2087 src_tgt = lmv_find_target(lmv, &op_data->op_fid1);
2089 if (IS_ERR(src_tgt))
2090 RETURN(PTR_ERR(src_tgt));
2093 if (op_data->op_mea2 != NULL) {
2094 struct lmv_stripe_md *lsm = op_data->op_mea2;
2096 tgt_tgt = lmv_locate_target_for_name(lmv, lsm, new,
2101 tgt_tgt = lmv_find_target(lmv, &op_data->op_fid2);
2104 if (IS_ERR(tgt_tgt))
2105 RETURN(PTR_ERR(tgt_tgt));
2107 target_exp = tgt_tgt->ltd_exp;
2111 * LOOKUP lock on src child (fid3) should also be cancelled for
2112 * src_tgt in mdc_rename.
2114 op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3;
2117 * Cancel UPDATE locks on tgt parent (fid2), tgt_tgt is its
2120 rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx,
2121 LCK_EX, MDS_INODELOCK_UPDATE,
2122 MF_MDC_CANCEL_FID2);
2127 * Cancel LOOKUP locks on source child (fid3) for parent tgt_tgt.
2129 if (fid_is_sane(&op_data->op_fid3)) {
2130 struct lmv_tgt_desc *tgt;
2132 tgt = lmv_find_target(lmv, &op_data->op_fid1);
2134 RETURN(PTR_ERR(tgt));
2136 /* Cancel LOOKUP lock on its parent */
2137 rc = lmv_early_cancel(exp, tgt, op_data, src_tgt->ltd_idx,
2138 LCK_EX, MDS_INODELOCK_LOOKUP,
2139 MF_MDC_CANCEL_FID3);
2143 rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx,
2144 LCK_EX, MDS_INODELOCK_FULL,
2145 MF_MDC_CANCEL_FID3);
2152 * Cancel all the locks on tgt child (fid4).
2154 if (fid_is_sane(&op_data->op_fid4)) {
2155 struct lmv_tgt_desc *tgt;
2157 rc = lmv_early_cancel(exp, NULL, op_data, src_tgt->ltd_idx,
2158 LCK_EX, MDS_INODELOCK_FULL,
2159 MF_MDC_CANCEL_FID4);
2163 tgt = lmv_find_target(lmv, &op_data->op_fid4);
2165 RETURN(PTR_ERR(tgt));
2167 /* Since the target child might be destroyed, and it might
2168 * become orphan, and we can only check orphan on the local
2169 * MDT right now, so we send rename request to the MDT where
2170 * target child is located. If target child does not exist,
2171 * then it will send the request to the target parent */
2172 target_exp = tgt->ltd_exp;
2175 rc = md_rename(target_exp, op_data, old, oldlen, new, newlen,
2178 if (rc != 0 && rc != -EXDEV)
2181 body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
2185 /* Not cross-ref case, just get out of here. */
2186 if (likely(!(body->mbo_valid & OBD_MD_MDS)))
2189 CDEBUG(D_INODE, "%s: try rename to another MDT for "DFID"\n",
2190 exp->exp_obd->obd_name, PFID(&body->mbo_fid1));
2192 op_data->op_fid4 = body->mbo_fid1;
2193 ptlrpc_req_finished(*request);
2198 static int lmv_setattr(struct obd_export *exp, struct md_op_data *op_data,
2199 void *ea, size_t ealen, struct ptlrpc_request **request)
2201 struct obd_device *obd = exp->exp_obd;
2202 struct lmv_obd *lmv = &obd->u.lmv;
2203 struct lmv_tgt_desc *tgt;
2207 rc = lmv_check_connect(obd);
2211 CDEBUG(D_INODE, "SETATTR for "DFID", valid 0x%x\n",
2212 PFID(&op_data->op_fid1), op_data->op_attr.ia_valid);
2214 op_data->op_flags |= MF_MDC_CANCEL_FID1;
2215 tgt = lmv_find_target(lmv, &op_data->op_fid1);
2217 RETURN(PTR_ERR(tgt));
2219 rc = md_setattr(tgt->ltd_exp, op_data, ea, ealen, request);
2224 static int lmv_fsync(struct obd_export *exp, const struct lu_fid *fid,
2225 struct ptlrpc_request **request)
2227 struct obd_device *obd = exp->exp_obd;
2228 struct lmv_obd *lmv = &obd->u.lmv;
2229 struct lmv_tgt_desc *tgt;
2233 rc = lmv_check_connect(obd);
2237 tgt = lmv_find_target(lmv, fid);
2239 RETURN(PTR_ERR(tgt));
2241 rc = md_fsync(tgt->ltd_exp, fid, request);
2246 * Get current minimum entry from striped directory
2248 * This function will search the dir entry, whose hash value is the
2249 * closest(>=) to @hash_offset, from all of sub-stripes, and it is
2250 * only being called for striped directory.
2252 * \param[in] exp export of LMV
2253 * \param[in] op_data parameters transferred beween client MD stack
2254 * stripe_information will be included in this
2256 * \param[in] cb_op ldlm callback being used in enqueue in
2258 * \param[in] hash_offset the hash value, which is used to locate
2259 * minum(closet) dir entry
2260 * \param[in|out] stripe_offset the caller use this to indicate the stripe
2261 * index of last entry, so to avoid hash conflict
2262 * between stripes. It will also be used to
2263 * return the stripe index of current dir entry.
2264 * \param[in|out] entp the minum entry and it also is being used
2265 * to input the last dir entry to resolve the
2268 * \param[out] ppage the page which holds the minum entry
2270 * \retval = 0 get the entry successfully
2271 * negative errno (< 0) does not get the entry
2273 static int lmv_get_min_striped_entry(struct obd_export *exp,
2274 struct md_op_data *op_data,
2275 struct md_callback *cb_op,
2276 __u64 hash_offset, int *stripe_offset,
2277 struct lu_dirent **entp,
2278 struct page **ppage)
2280 struct obd_device *obd = exp->exp_obd;
2281 struct lmv_obd *lmv = &obd->u.lmv;
2282 struct lmv_stripe_md *lsm = op_data->op_mea1;
2283 struct lmv_tgt_desc *tgt;
2285 struct lu_dirent *min_ent = NULL;
2286 struct page *min_page = NULL;
2292 stripe_count = lsm->lsm_md_stripe_count;
2293 for (i = 0; i < stripe_count; i++) {
2294 struct lu_dirent *ent = NULL;
2295 struct page *page = NULL;
2296 struct lu_dirpage *dp;
2297 __u64 stripe_hash = hash_offset;
2299 tgt = lmv_get_target(lmv, lsm->lsm_md_oinfo[i].lmo_mds, NULL);
2301 GOTO(out, rc = PTR_ERR(tgt));
2303 /* op_data will be shared by each stripe, so we need
2304 * reset these value for each stripe */
2305 op_data->op_fid1 = lsm->lsm_md_oinfo[i].lmo_fid;
2306 op_data->op_fid2 = lsm->lsm_md_oinfo[i].lmo_fid;
2307 op_data->op_data = lsm->lsm_md_oinfo[i].lmo_root;
2309 rc = md_read_page(tgt->ltd_exp, op_data, cb_op, stripe_hash,
2314 dp = page_address(page);
2315 for (ent = lu_dirent_start(dp); ent != NULL;
2316 ent = lu_dirent_next(ent)) {
2317 /* Skip dummy entry */
2318 if (le16_to_cpu(ent->lde_namelen) == 0)
2321 if (le64_to_cpu(ent->lde_hash) < hash_offset)
2324 if (le64_to_cpu(ent->lde_hash) == hash_offset &&
2325 (*entp == ent || i < *stripe_offset))
2328 /* skip . and .. for other stripes */
2330 (strncmp(ent->lde_name, ".",
2331 le16_to_cpu(ent->lde_namelen)) == 0 ||
2332 strncmp(ent->lde_name, "..",
2333 le16_to_cpu(ent->lde_namelen)) == 0))
2339 stripe_hash = le64_to_cpu(dp->ldp_hash_end);
2342 page_cache_release(page);
2345 /* reach the end of current stripe, go to next stripe */
2346 if (stripe_hash == MDS_DIR_END_OFF)
2352 if (min_ent != NULL) {
2353 if (le64_to_cpu(min_ent->lde_hash) >
2354 le64_to_cpu(ent->lde_hash)) {
2357 page_cache_release(min_page);
2362 page_cache_release(page);
2373 if (*ppage != NULL) {
2375 page_cache_release(*ppage);
2377 *stripe_offset = min_idx;
2384 * Build dir entry page from a striped directory
2386 * This function gets one entry by @offset from a striped directory. It will
2387 * read entries from all of stripes, and choose one closest to the required
2388 * offset(&offset). A few notes
2389 * 1. skip . and .. for non-zero stripes, because there can only have one .
2390 * and .. in a directory.
2391 * 2. op_data will be shared by all of stripes, instead of allocating new
2392 * one, so need to restore before reusing.
2393 * 3. release the entry page if that is not being chosen.
2395 * \param[in] exp obd export refer to LMV
2396 * \param[in] op_data hold those MD parameters of read_entry
2397 * \param[in] cb_op ldlm callback being used in enqueue in mdc_read_entry
2398 * \param[out] ldp the entry being read
2399 * \param[out] ppage the page holding the entry. Note: because the entry
2400 * will be accessed in upper layer, so we need hold the
2401 * page until the usages of entry is finished, see
2402 * ll_dir_entry_next.
2404 * retval =0 if get entry successfully
2405 * <0 cannot get entry
2407 static int lmv_read_striped_page(struct obd_export *exp,
2408 struct md_op_data *op_data,
2409 struct md_callback *cb_op,
2410 __u64 offset, struct page **ppage)
2412 struct obd_device *obd = exp->exp_obd;
2413 struct lu_fid master_fid = op_data->op_fid1;
2414 struct inode *master_inode = op_data->op_data;
2415 __u64 hash_offset = offset;
2416 struct lu_dirpage *dp;
2417 struct page *min_ent_page = NULL;
2418 struct page *ent_page = NULL;
2419 struct lu_dirent *ent;
2422 struct lu_dirent *min_ent = NULL;
2423 struct lu_dirent *last_ent;
2428 rc = lmv_check_connect(obd);
2432 /* Allocate a page and read entries from all of stripes and fill
2433 * the page by hash order */
2434 ent_page = alloc_page(GFP_KERNEL);
2435 if (ent_page == NULL)
2438 /* Initialize the entry page */
2439 dp = kmap(ent_page);
2440 memset(dp, 0, sizeof(*dp));
2441 dp->ldp_hash_start = cpu_to_le64(offset);
2442 dp->ldp_flags |= LDF_COLLIDE;
2445 left_bytes = PAGE_CACHE_SIZE - sizeof(*dp);
2451 /* Find the minum entry from all sub-stripes */
2452 rc = lmv_get_min_striped_entry(exp, op_data, cb_op, hash_offset,
2458 /* If it can not get minum entry, it means it already reaches
2459 * the end of this directory */
2460 if (min_ent == NULL) {
2461 last_ent->lde_reclen = 0;
2462 hash_offset = MDS_DIR_END_OFF;
2466 ent_size = le16_to_cpu(min_ent->lde_reclen);
2468 /* the last entry lde_reclen is 0, but it might not
2469 * the end of this entry of this temporay entry */
2471 ent_size = lu_dirent_calc_size(
2472 le16_to_cpu(min_ent->lde_namelen),
2473 le32_to_cpu(min_ent->lde_attrs));
2474 if (ent_size > left_bytes) {
2475 last_ent->lde_reclen = cpu_to_le16(0);
2476 hash_offset = le64_to_cpu(min_ent->lde_hash);
2480 memcpy(ent, min_ent, ent_size);
2482 /* Replace . with master FID and Replace .. with the parent FID
2483 * of master object */
2484 if (strncmp(ent->lde_name, ".",
2485 le16_to_cpu(ent->lde_namelen)) == 0 &&
2486 le16_to_cpu(ent->lde_namelen) == 1)
2487 fid_cpu_to_le(&ent->lde_fid, &master_fid);
2488 else if (strncmp(ent->lde_name, "..",
2489 le16_to_cpu(ent->lde_namelen)) == 0 &&
2490 le16_to_cpu(ent->lde_namelen) == 2)
2491 fid_cpu_to_le(&ent->lde_fid, &op_data->op_fid3);
2493 left_bytes -= ent_size;
2494 ent->lde_reclen = cpu_to_le16(ent_size);
2496 ent = (void *)ent + ent_size;
2497 hash_offset = le64_to_cpu(min_ent->lde_hash);
2498 if (hash_offset == MDS_DIR_END_OFF) {
2499 last_ent->lde_reclen = 0;
2504 if (min_ent_page != NULL) {
2505 kunmap(min_ent_page);
2506 page_cache_release(min_ent_page);
2509 if (unlikely(rc != 0)) {
2510 __free_page(ent_page);
2514 dp->ldp_flags |= LDF_EMPTY;
2515 dp->ldp_flags = cpu_to_le32(dp->ldp_flags);
2516 dp->ldp_hash_end = cpu_to_le64(hash_offset);
2519 /* We do not want to allocate md_op_data during each
2520 * dir entry reading, so op_data will be shared by every stripe,
2521 * then we need to restore it back to original value before
2522 * return to the upper layer */
2523 op_data->op_fid1 = master_fid;
2524 op_data->op_fid2 = master_fid;
2525 op_data->op_data = master_inode;
2532 int lmv_read_page(struct obd_export *exp, struct md_op_data *op_data,
2533 struct md_callback *cb_op, __u64 offset,
2534 struct page **ppage)
2536 struct obd_device *obd = exp->exp_obd;
2537 struct lmv_obd *lmv = &obd->u.lmv;
2538 struct lmv_stripe_md *lsm = op_data->op_mea1;
2539 struct lmv_tgt_desc *tgt;
2543 rc = lmv_check_connect(obd);
2547 if (unlikely(lsm != NULL)) {
2548 rc = lmv_read_striped_page(exp, op_data, cb_op, offset, ppage);
2552 tgt = lmv_find_target(lmv, &op_data->op_fid1);
2554 RETURN(PTR_ERR(tgt));
2556 rc = md_read_page(tgt->ltd_exp, op_data, cb_op, offset, ppage);
2562 * Unlink a file/directory
2564 * Unlink a file or directory under the parent dir. The unlink request
2565 * usually will be sent to the MDT where the child is located, but if
2566 * the client does not have the child FID then request will be sent to the
2567 * MDT where the parent is located.
2569 * If the parent is a striped directory then it also needs to locate which
2570 * stripe the name of the child is located, and replace the parent FID
2571 * (@op->op_fid1) with the stripe FID. Note: if the stripe is unknown,
2572 * it will walk through all of sub-stripes until the child is being
2575 * \param[in] exp export refer to LMV
2576 * \param[in] op_data different parameters transferred beween client
2577 * MD stacks, name, namelen, FIDs etc.
2578 * op_fid1 is the parent FID, op_fid2 is the child
2580 * \param[out] request point to the request of unlink.
2582 * retval 0 if succeed
2583 * negative errno if failed.
2585 static int lmv_unlink(struct obd_export *exp, struct md_op_data *op_data,
2586 struct ptlrpc_request **request)
2588 struct obd_device *obd = exp->exp_obd;
2589 struct lmv_obd *lmv = &obd->u.lmv;
2590 struct lmv_tgt_desc *tgt = NULL;
2591 struct lmv_tgt_desc *parent_tgt = NULL;
2592 struct mdt_body *body;
2594 int stripe_index = 0;
2595 struct lmv_stripe_md *lsm = op_data->op_mea1;
2598 rc = lmv_check_connect(obd);
2602 /* For striped dir, we need to locate the parent as well */
2604 struct lmv_tgt_desc *tmp;
2606 LASSERT(op_data->op_name != NULL &&
2607 op_data->op_namelen != 0);
2609 tmp = lmv_locate_target_for_name(lmv, lsm,
2611 op_data->op_namelen,
2615 /* return -EBADFD means unknown hash type, might
2616 * need try all sub-stripe here */
2617 if (IS_ERR(tmp) && PTR_ERR(tmp) != -EBADFD)
2618 RETURN(PTR_ERR(tmp));
2620 /* Note: both migrating dir and unknown hash dir need to
2621 * try all of sub-stripes, so we need start search the
2622 * name from stripe 0, but migrating dir is already handled
2623 * inside lmv_locate_target_for_name(), so we only check
2624 * unknown hash type directory here */
2625 if (!lmv_is_known_hash_type(lsm->lsm_md_hash_type)) {
2626 struct lmv_oinfo *oinfo;
2628 oinfo = &lsm->lsm_md_oinfo[stripe_index];
2630 op_data->op_fid1 = oinfo->lmo_fid;
2631 op_data->op_mds = oinfo->lmo_mds;
2636 /* Send unlink requests to the MDT where the child is located */
2637 if (likely(!fid_is_zero(&op_data->op_fid2)))
2638 tgt = lmv_find_target(lmv, &op_data->op_fid2);
2639 else if (lsm != NULL)
2640 tgt = lmv_get_target(lmv, op_data->op_mds, NULL);
2642 tgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
2645 RETURN(PTR_ERR(tgt));
2647 op_data->op_fsuid = from_kuid(&init_user_ns, current_fsuid());
2648 op_data->op_fsgid = from_kgid(&init_user_ns, current_fsgid());
2649 op_data->op_cap = cfs_curproc_cap_pack();
2652 * If child's fid is given, cancel unused locks for it if it is from
2653 * another export than parent.
2655 * LOOKUP lock for child (fid3) should also be cancelled on parent
2656 * tgt_tgt in mdc_unlink().
2658 op_data->op_flags |= MF_MDC_CANCEL_FID1 | MF_MDC_CANCEL_FID3;
2661 * Cancel FULL locks on child (fid3).
2663 parent_tgt = lmv_find_target(lmv, &op_data->op_fid1);
2664 if (IS_ERR(parent_tgt))
2665 RETURN(PTR_ERR(parent_tgt));
2667 if (parent_tgt != tgt) {
2668 rc = lmv_early_cancel(exp, parent_tgt, op_data, tgt->ltd_idx,
2669 LCK_EX, MDS_INODELOCK_LOOKUP,
2670 MF_MDC_CANCEL_FID3);
2673 rc = lmv_early_cancel(exp, NULL, op_data, tgt->ltd_idx, LCK_EX,
2674 MDS_INODELOCK_FULL, MF_MDC_CANCEL_FID3);
2678 CDEBUG(D_INODE, "unlink with fid="DFID"/"DFID" -> mds #%u\n",
2679 PFID(&op_data->op_fid1), PFID(&op_data->op_fid2), tgt->ltd_idx);
2681 rc = md_unlink(tgt->ltd_exp, op_data, request);
2682 if (rc != 0 && rc != -EREMOTE && rc != -ENOENT)
2685 /* Try next stripe if it is needed. */
2686 if (rc == -ENOENT && lsm != NULL && lmv_need_try_all_stripes(lsm)) {
2687 struct lmv_oinfo *oinfo;
2690 if (stripe_index >= lsm->lsm_md_stripe_count)
2693 oinfo = &lsm->lsm_md_oinfo[stripe_index];
2695 op_data->op_fid1 = oinfo->lmo_fid;
2696 op_data->op_mds = oinfo->lmo_mds;
2698 ptlrpc_req_finished(*request);
2701 goto try_next_stripe;
2704 body = req_capsule_server_get(&(*request)->rq_pill, &RMF_MDT_BODY);
2708 /* Not cross-ref case, just get out of here. */
2709 if (likely(!(body->mbo_valid & OBD_MD_MDS)))
2712 CDEBUG(D_INODE, "%s: try unlink to another MDT for "DFID"\n",
2713 exp->exp_obd->obd_name, PFID(&body->mbo_fid1));
2715 /* This is a remote object, try remote MDT, Note: it may
2716 * try more than 1 time here, Considering following case
2717 * /mnt/lustre is root on MDT0, remote1 is on MDT1
2718 * 1. Initially A does not know where remote1 is, it send
2719 * unlink RPC to MDT0, MDT0 return -EREMOTE, it will
2720 * resend unlink RPC to MDT1 (retry 1st time).
2722 * 2. During the unlink RPC in flight,
2723 * client B mv /mnt/lustre/remote1 /mnt/lustre/remote2
2724 * and create new remote1, but on MDT0
2726 * 3. MDT1 get unlink RPC(from A), then do remote lock on
2727 * /mnt/lustre, then lookup get fid of remote1, and find
2728 * it is remote dir again, and replay -EREMOTE again.
2730 * 4. Then A will resend unlink RPC to MDT0. (retry 2nd times).
2732 * In theory, it might try unlimited time here, but it should
2733 * be very rare case. */
2734 op_data->op_fid2 = body->mbo_fid1;
2735 ptlrpc_req_finished(*request);
2741 static int lmv_precleanup(struct obd_device *obd)
2744 fld_client_proc_fini(&obd->u.lmv.lmv_fld);
2745 lprocfs_obd_cleanup(obd);
2746 lprocfs_free_md_stats(obd);
2751 * Get by key a value associated with a LMV device.
2753 * Dispatch request to lower-layer devices as needed.
2755 * \param[in] env execution environment for this thread
2756 * \param[in] exp export for the LMV device
2757 * \param[in] keylen length of key identifier
2758 * \param[in] key identifier of key to get value for
2759 * \param[in] vallen size of \a val
2760 * \param[out] val pointer to storage location for value
2761 * \param[in] lsm optional striping metadata of object
2763 * \retval 0 on success
2764 * \retval negative negated errno on failure
2766 static int lmv_get_info(const struct lu_env *env, struct obd_export *exp,
2767 __u32 keylen, void *key, __u32 *vallen, void *val)
2769 struct obd_device *obd;
2770 struct lmv_obd *lmv;
2774 obd = class_exp2obd(exp);
2776 CDEBUG(D_IOCTL, "Invalid client cookie "LPX64"\n",
2777 exp->exp_handle.h_cookie);
2782 if (keylen >= strlen("remote_flag") && !strcmp(key, "remote_flag")) {
2785 rc = lmv_check_connect(obd);
2789 LASSERT(*vallen == sizeof(__u32));
2790 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2791 struct lmv_tgt_desc *tgt = lmv->tgts[i];
2793 * All tgts should be connected when this gets called.
2795 if (tgt == NULL || tgt->ltd_exp == NULL)
2798 if (!obd_get_info(env, tgt->ltd_exp, keylen, key,
2803 } else if (KEY_IS(KEY_MAX_EASIZE) ||
2804 KEY_IS(KEY_DEFAULT_EASIZE) ||
2805 KEY_IS(KEY_CONN_DATA)) {
2806 rc = lmv_check_connect(obd);
2811 * Forwarding this request to first MDS, it should know LOV
2814 rc = obd_get_info(env, lmv->tgts[0]->ltd_exp, keylen, key,
2816 if (!rc && KEY_IS(KEY_CONN_DATA))
2817 exp->exp_connect_data = *(struct obd_connect_data *)val;
2819 } else if (KEY_IS(KEY_TGT_COUNT)) {
2820 *((int *)val) = lmv->desc.ld_tgt_count;
2824 CDEBUG(D_IOCTL, "Invalid key\n");
2829 * Asynchronously set by key a value associated with a LMV device.
2831 * Dispatch request to lower-layer devices as needed.
2833 * \param[in] env execution environment for this thread
2834 * \param[in] exp export for the LMV device
2835 * \param[in] keylen length of key identifier
2836 * \param[in] key identifier of key to store value for
2837 * \param[in] vallen size of value to store
2838 * \param[in] val pointer to data to be stored
2839 * \param[in] set optional list of related ptlrpc requests
2841 * \retval 0 on success
2842 * \retval negative negated errno on failure
2844 int lmv_set_info_async(const struct lu_env *env, struct obd_export *exp,
2845 __u32 keylen, void *key, __u32 vallen, void *val,
2846 struct ptlrpc_request_set *set)
2848 struct lmv_tgt_desc *tgt = NULL;
2849 struct obd_device *obd;
2850 struct lmv_obd *lmv;
2854 obd = class_exp2obd(exp);
2856 CDEBUG(D_IOCTL, "Invalid client cookie "LPX64"\n",
2857 exp->exp_handle.h_cookie);
2862 if (KEY_IS(KEY_READ_ONLY) || KEY_IS(KEY_FLUSH_CTX) ||
2863 KEY_IS(KEY_DEFAULT_EASIZE)) {
2866 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
2869 if (tgt == NULL || tgt->ltd_exp == NULL)
2872 err = obd_set_info_async(env, tgt->ltd_exp,
2873 keylen, key, vallen, val, set);
2884 static int lmv_unpack_md_v1(struct obd_export *exp, struct lmv_stripe_md *lsm,
2885 const struct lmv_mds_md_v1 *lmm1)
2887 struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
2894 lsm->lsm_md_magic = le32_to_cpu(lmm1->lmv_magic);
2895 lsm->lsm_md_stripe_count = le32_to_cpu(lmm1->lmv_stripe_count);
2896 lsm->lsm_md_master_mdt_index = le32_to_cpu(lmm1->lmv_master_mdt_index);
2897 if (OBD_FAIL_CHECK(OBD_FAIL_UNKNOWN_LMV_STRIPE))
2898 lsm->lsm_md_hash_type = LMV_HASH_TYPE_UNKNOWN;
2900 lsm->lsm_md_hash_type = le32_to_cpu(lmm1->lmv_hash_type);
2901 lsm->lsm_md_layout_version = le32_to_cpu(lmm1->lmv_layout_version);
2902 cplen = strlcpy(lsm->lsm_md_pool_name, lmm1->lmv_pool_name,
2903 sizeof(lsm->lsm_md_pool_name));
2905 if (cplen >= sizeof(lsm->lsm_md_pool_name))
2908 CDEBUG(D_INFO, "unpack lsm count %d, master %d hash_type %d"
2909 "layout_version %d\n", lsm->lsm_md_stripe_count,
2910 lsm->lsm_md_master_mdt_index, lsm->lsm_md_hash_type,
2911 lsm->lsm_md_layout_version);
2913 stripe_count = le32_to_cpu(lmm1->lmv_stripe_count);
2914 for (i = 0; i < stripe_count; i++) {
2915 fid_le_to_cpu(&lsm->lsm_md_oinfo[i].lmo_fid,
2916 &lmm1->lmv_stripe_fids[i]);
2917 rc = lmv_fld_lookup(lmv, &lsm->lsm_md_oinfo[i].lmo_fid,
2918 &lsm->lsm_md_oinfo[i].lmo_mds);
2921 CDEBUG(D_INFO, "unpack fid #%d "DFID"\n", i,
2922 PFID(&lsm->lsm_md_oinfo[i].lmo_fid));
2928 static int lmv_unpackmd(struct obd_export *exp, struct lmv_stripe_md **lsmp,
2929 const union lmv_mds_md *lmm, size_t lmm_size)
2931 struct lmv_stripe_md *lsm;
2934 bool allocated = false;
2937 LASSERT(lsmp != NULL);
2941 if (lsm != NULL && lmm == NULL) {
2943 for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
2944 /* For migrating inode, the master stripe and master
2945 * object will be the same, so do not need iput, see
2946 * ll_update_lsm_md */
2947 if (!(lsm->lsm_md_hash_type & LMV_HASH_FLAG_MIGRATION &&
2948 i == 0) && lsm->lsm_md_oinfo[i].lmo_root != NULL)
2949 iput(lsm->lsm_md_oinfo[i].lmo_root);
2951 lsm_size = lmv_stripe_md_size(lsm->lsm_md_stripe_count);
2952 OBD_FREE(lsm, lsm_size);
2957 if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_STRIPE)
2961 if (le32_to_cpu(lmm->lmv_magic) != LMV_MAGIC_V1 &&
2962 le32_to_cpu(lmm->lmv_magic) != LMV_USER_MAGIC) {
2963 CERROR("%s: invalid lmv magic %x: rc = %d\n",
2964 exp->exp_obd->obd_name, le32_to_cpu(lmm->lmv_magic),
2969 if (le32_to_cpu(lmm->lmv_magic) == LMV_MAGIC_V1)
2970 lsm_size = lmv_stripe_md_size(lmv_mds_md_stripe_count_get(lmm));
2973 * Unpack default dirstripe(lmv_user_md) to lmv_stripe_md,
2974 * stripecount should be 0 then.
2976 lsm_size = lmv_stripe_md_size(0);
2978 lsm_size = lmv_stripe_md_size(lmv_mds_md_stripe_count_get(lmm));
2980 OBD_ALLOC(lsm, lsm_size);
2987 switch (le32_to_cpu(lmm->lmv_magic)) {
2989 rc = lmv_unpack_md_v1(exp, lsm, &lmm->lmv_md_v1);
2992 CERROR("%s: unrecognized magic %x\n", exp->exp_obd->obd_name,
2993 le32_to_cpu(lmm->lmv_magic));
2998 if (rc != 0 && allocated) {
2999 OBD_FREE(lsm, lsm_size);
3006 void lmv_free_memmd(struct lmv_stripe_md *lsm)
3008 lmv_unpackmd(NULL, &lsm, NULL, 0);
3010 EXPORT_SYMBOL(lmv_free_memmd);
3012 static int lmv_cancel_unused(struct obd_export *exp, const struct lu_fid *fid,
3013 union ldlm_policy_data *policy,
3014 enum ldlm_mode mode, enum ldlm_cancel_flags flags,
3017 struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
3022 LASSERT(fid != NULL);
3024 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
3025 struct lmv_tgt_desc *tgt = lmv->tgts[i];
3028 if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active)
3031 err = md_cancel_unused(tgt->ltd_exp, fid, policy, mode, flags,
3039 int lmv_set_lock_data(struct obd_export *exp, __u64 *lockh, void *data,
3042 struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
3043 struct lmv_tgt_desc *tgt = lmv->tgts[0];
3047 if (tgt == NULL || tgt->ltd_exp == NULL)
3049 rc = md_set_lock_data(tgt->ltd_exp, lockh, data, bits);
3053 enum ldlm_mode lmv_lock_match(struct obd_export *exp, __u64 flags,
3054 const struct lu_fid *fid, enum ldlm_type type,
3055 union ldlm_policy_data *policy,
3056 enum ldlm_mode mode, struct lustre_handle *lockh)
3058 struct obd_device *obd = exp->exp_obd;
3059 struct lmv_obd *lmv = &obd->u.lmv;
3065 CDEBUG(D_INODE, "Lock match for "DFID"\n", PFID(fid));
3068 * With DNE every object can have two locks in different namespaces:
3069 * lookup lock in space of MDT storing direntry and update/open lock in
3070 * space of MDT storing inode. Try the MDT that the FID maps to first,
3071 * since this can be easily found, and only try others if that fails.
3073 for (i = 0, tgt = lmv_find_target_index(lmv, fid);
3074 i < lmv->desc.ld_tgt_count;
3075 i++, tgt = (tgt + 1) % lmv->desc.ld_tgt_count) {
3077 CDEBUG(D_HA, "%s: "DFID" is inaccessible: rc = %d\n",
3078 obd->obd_name, PFID(fid), tgt);
3082 if (lmv->tgts[tgt] == NULL ||
3083 lmv->tgts[tgt]->ltd_exp == NULL ||
3084 lmv->tgts[tgt]->ltd_active == 0)
3087 rc = md_lock_match(lmv->tgts[tgt]->ltd_exp, flags, fid,
3088 type, policy, mode, lockh);
3096 int lmv_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
3097 struct obd_export *dt_exp, struct obd_export *md_exp,
3098 struct lustre_md *md)
3100 struct lmv_obd *lmv = &exp->exp_obd->u.lmv;
3101 struct lmv_tgt_desc *tgt = lmv->tgts[0];
3103 if (tgt == NULL || tgt->ltd_exp == NULL)
3106 return md_get_lustre_md(lmv->tgts[0]->ltd_exp, req, dt_exp, md_exp, md);
3109 int lmv_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
3111 struct obd_device *obd = exp->exp_obd;
3112 struct lmv_obd *lmv = &obd->u.lmv;
3113 struct lmv_tgt_desc *tgt = lmv->tgts[0];
3116 if (md->lmv != NULL) {
3117 lmv_free_memmd(md->lmv);
3120 if (tgt == NULL || tgt->ltd_exp == NULL)
3122 RETURN(md_free_lustre_md(lmv->tgts[0]->ltd_exp, md));
3125 int lmv_set_open_replay_data(struct obd_export *exp,
3126 struct obd_client_handle *och,
3127 struct lookup_intent *it)
3129 struct obd_device *obd = exp->exp_obd;
3130 struct lmv_obd *lmv = &obd->u.lmv;
3131 struct lmv_tgt_desc *tgt;
3134 tgt = lmv_find_target(lmv, &och->och_fid);
3136 RETURN(PTR_ERR(tgt));
3138 RETURN(md_set_open_replay_data(tgt->ltd_exp, och, it));
3141 int lmv_clear_open_replay_data(struct obd_export *exp,
3142 struct obd_client_handle *och)
3144 struct obd_device *obd = exp->exp_obd;
3145 struct lmv_obd *lmv = &obd->u.lmv;
3146 struct lmv_tgt_desc *tgt;
3149 tgt = lmv_find_target(lmv, &och->och_fid);
3151 RETURN(PTR_ERR(tgt));
3153 RETURN(md_clear_open_replay_data(tgt->ltd_exp, och));
3156 static int lmv_get_remote_perm(struct obd_export *exp, const struct lu_fid *fid,
3157 u32 suppgid, struct ptlrpc_request **request)
3159 struct obd_device *obd = exp->exp_obd;
3160 struct lmv_obd *lmv = &obd->u.lmv;
3161 struct lmv_tgt_desc *tgt;
3165 rc = lmv_check_connect(obd);
3169 tgt = lmv_find_target(lmv, fid);
3171 RETURN(PTR_ERR(tgt));
3173 rc = md_get_remote_perm(tgt->ltd_exp, fid, suppgid, request);
3177 int lmv_intent_getattr_async(struct obd_export *exp,
3178 struct md_enqueue_info *minfo)
3180 struct md_op_data *op_data = &minfo->mi_data;
3181 struct obd_device *obd = exp->exp_obd;
3182 struct lmv_obd *lmv = &obd->u.lmv;
3183 struct lmv_tgt_desc *ptgt = NULL;
3184 struct lmv_tgt_desc *ctgt = NULL;
3188 if (!fid_is_sane(&op_data->op_fid2))
3191 rc = lmv_check_connect(obd);
3195 ptgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid1);
3197 RETURN(PTR_ERR(ptgt));
3199 ctgt = lmv_locate_mds(lmv, op_data, &op_data->op_fid2);
3201 RETURN(PTR_ERR(ctgt));
3204 * if child is on remote MDT, we need 2 async RPCs to fetch both LOOKUP
3205 * lock on parent, and UPDATE lock on child MDT, which makes all
3206 * complicated. Considering remote dir is rare case, and not supporting
3207 * it in statahead won't cause any issue, drop its support for now.
3212 rc = md_intent_getattr_async(ptgt->ltd_exp, minfo);
3216 int lmv_revalidate_lock(struct obd_export *exp, struct lookup_intent *it,
3217 struct lu_fid *fid, __u64 *bits)
3219 struct obd_device *obd = exp->exp_obd;
3220 struct lmv_obd *lmv = &obd->u.lmv;
3221 struct lmv_tgt_desc *tgt;
3225 rc = lmv_check_connect(obd);
3229 tgt = lmv_find_target(lmv, fid);
3231 RETURN(PTR_ERR(tgt));
3233 rc = md_revalidate_lock(tgt->ltd_exp, it, fid, bits);
3237 int lmv_get_fid_from_lsm(struct obd_export *exp,
3238 const struct lmv_stripe_md *lsm,
3239 const char *name, int namelen, struct lu_fid *fid)
3241 const struct lmv_oinfo *oinfo;
3243 LASSERT(lsm != NULL);
3244 oinfo = lsm_name_to_stripe_info(lsm, name, namelen);
3246 return PTR_ERR(oinfo);
3248 *fid = oinfo->lmo_fid;
3254 * For lmv, only need to send request to master MDT, and the master MDT will
3255 * process with other slave MDTs. The only exception is Q_GETOQUOTA for which
3256 * we directly fetch data from the slave MDTs.
3258 int lmv_quotactl(struct obd_device *unused, struct obd_export *exp,
3259 struct obd_quotactl *oqctl)
3261 struct obd_device *obd = class_exp2obd(exp);
3262 struct lmv_obd *lmv = &obd->u.lmv;
3263 struct lmv_tgt_desc *tgt = lmv->tgts[0];
3266 __u64 curspace, curinodes;
3270 tgt->ltd_exp == NULL ||
3272 lmv->desc.ld_tgt_count == 0) {
3273 CERROR("master lmv inactive\n");
3277 if (oqctl->qc_cmd != Q_GETOQUOTA) {
3278 rc = obd_quotactl(tgt->ltd_exp, oqctl);
3282 curspace = curinodes = 0;
3283 for (i = 0; i < lmv->desc.ld_tgt_count; i++) {
3287 if (tgt == NULL || tgt->ltd_exp == NULL || !tgt->ltd_active)
3290 err = obd_quotactl(tgt->ltd_exp, oqctl);
3292 CERROR("getquota on mdt %d failed. %d\n", i, err);
3296 curspace += oqctl->qc_dqblk.dqb_curspace;
3297 curinodes += oqctl->qc_dqblk.dqb_curinodes;
3300 oqctl->qc_dqblk.dqb_curspace = curspace;
3301 oqctl->qc_dqblk.dqb_curinodes = curinodes;
3306 static int lmv_merge_attr(struct obd_export *exp,
3307 const struct lmv_stripe_md *lsm,
3308 struct cl_attr *attr,
3309 ldlm_blocking_callback cb_blocking)
3314 rc = lmv_revalidate_slaves(exp, lsm, cb_blocking, 0);
3318 for (i = 0; i < lsm->lsm_md_stripe_count; i++) {
3319 struct inode *inode = lsm->lsm_md_oinfo[i].lmo_root;
3321 CDEBUG(D_INFO, ""DFID" size %llu, blocks %llu nlink %u,"
3322 " atime %lu ctime %lu, mtime %lu.\n",
3323 PFID(&lsm->lsm_md_oinfo[i].lmo_fid),
3324 i_size_read(inode), (unsigned long long)inode->i_blocks,
3325 inode->i_nlink, LTIME_S(inode->i_atime),
3326 LTIME_S(inode->i_ctime), LTIME_S(inode->i_mtime));
3328 /* for slave stripe, it needs to subtract nlink for . and .. */
3330 attr->cat_nlink += inode->i_nlink - 2;
3332 attr->cat_nlink = inode->i_nlink;
3334 attr->cat_size += i_size_read(inode);
3335 attr->cat_blocks += inode->i_blocks;
3337 if (attr->cat_atime < LTIME_S(inode->i_atime))
3338 attr->cat_atime = LTIME_S(inode->i_atime);
3340 if (attr->cat_ctime < LTIME_S(inode->i_ctime))
3341 attr->cat_ctime = LTIME_S(inode->i_ctime);
3343 if (attr->cat_mtime < LTIME_S(inode->i_mtime))
3344 attr->cat_mtime = LTIME_S(inode->i_mtime);
3349 struct obd_ops lmv_obd_ops = {
3350 .o_owner = THIS_MODULE,
3351 .o_setup = lmv_setup,
3352 .o_cleanup = lmv_cleanup,
3353 .o_precleanup = lmv_precleanup,
3354 .o_process_config = lmv_process_config,
3355 .o_connect = lmv_connect,
3356 .o_disconnect = lmv_disconnect,
3357 .o_statfs = lmv_statfs,
3358 .o_get_info = lmv_get_info,
3359 .o_set_info_async = lmv_set_info_async,
3360 .o_notify = lmv_notify,
3361 .o_get_uuid = lmv_get_uuid,
3362 .o_iocontrol = lmv_iocontrol,
3363 .o_quotactl = lmv_quotactl
3366 struct md_ops lmv_md_ops = {
3367 .m_getstatus = lmv_getstatus,
3368 .m_null_inode = lmv_null_inode,
3369 .m_find_cbdata = lmv_find_cbdata,
3370 .m_close = lmv_close,
3371 .m_create = lmv_create,
3372 .m_enqueue = lmv_enqueue,
3373 .m_getattr = lmv_getattr,
3374 .m_getxattr = lmv_getxattr,
3375 .m_getattr_name = lmv_getattr_name,
3376 .m_intent_lock = lmv_intent_lock,
3378 .m_rename = lmv_rename,
3379 .m_setattr = lmv_setattr,
3380 .m_setxattr = lmv_setxattr,
3381 .m_fsync = lmv_fsync,
3382 .m_read_page = lmv_read_page,
3383 .m_unlink = lmv_unlink,
3384 .m_init_ea_size = lmv_init_ea_size,
3385 .m_cancel_unused = lmv_cancel_unused,
3386 .m_set_lock_data = lmv_set_lock_data,
3387 .m_lock_match = lmv_lock_match,
3388 .m_get_lustre_md = lmv_get_lustre_md,
3389 .m_free_lustre_md = lmv_free_lustre_md,
3390 .m_merge_attr = lmv_merge_attr,
3391 .m_set_open_replay_data = lmv_set_open_replay_data,
3392 .m_clear_open_replay_data = lmv_clear_open_replay_data,
3393 .m_get_remote_perm = lmv_get_remote_perm,
3394 .m_intent_getattr_async = lmv_intent_getattr_async,
3395 .m_revalidate_lock = lmv_revalidate_lock,
3396 .m_get_fid_from_lsm = lmv_get_fid_from_lsm,
3397 .m_unpackmd = lmv_unpackmd,
3400 static int __init lmv_init(void)
3402 return class_register_type(&lmv_obd_ops, &lmv_md_ops, true, NULL,
3403 LUSTRE_LMV_NAME, NULL);
3406 static void __exit lmv_exit(void)
3408 class_unregister_type(LUSTRE_LMV_NAME);
3411 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3412 MODULE_DESCRIPTION("Lustre Logical Metadata Volume");
3413 MODULE_VERSION(LUSTRE_VERSION_STRING);
3414 MODULE_LICENSE("GPL");
3416 module_init(lmv_init);
3417 module_exit(lmv_exit);