4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_MDC
35 #include <linux/init.h>
36 #include <linux/kthread.h>
37 #include <linux/module.h>
38 #include <linux/pagemap.h>
39 #include <linux/user_namespace.h>
40 #include <linux/utsname.h>
41 #include <linux/delay.h>
42 #include <linux/uidgid.h>
43 #include <linux/device.h>
45 #include <lustre_errno.h>
47 #include <cl_object.h>
48 #include <llog_swab.h>
49 #include <lprocfs_status.h>
50 #include <lustre_acl.h>
51 #include <lustre_fid.h>
52 #include <uapi/linux/lustre/lustre_ioctl.h>
53 #include <lustre_kernelcomm.h>
54 #include <lustre_lmv.h>
55 #include <lustre_log.h>
56 #include <lustre_swab.h>
57 #include <obd_class.h>
58 #include <lustre_osc.h>
60 #include "mdc_internal.h"
62 #define REQUEST_MINOR 244
64 static int mdc_cleanup(struct obd_device *obd);
66 static inline int mdc_queue_wait(struct ptlrpc_request *req)
68 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
71 /* obd_get_request_slot() ensures that this client has no more
72 * than cl_max_rpcs_in_flight RPCs simultaneously inf light
74 rc = obd_get_request_slot(cli);
78 rc = ptlrpc_queue_wait(req);
79 obd_put_request_slot(cli);
85 * Send MDS_GET_ROOT RPC to fetch root FID.
87 * If \a fileset is not NULL it should contain a subdirectory off
88 * the ROOT/ directory to be mounted on the client. Return the FID
89 * of the subdirectory to the client to mount onto its mountpoint.
91 * \param[in] imp MDC import
92 * \param[in] fileset fileset name, which could be NULL
93 * \param[out] rootfid root FID of this mountpoint
94 * \param[out] pc root capa will be unpacked and saved in this pointer
96 * \retval 0 on success, negative errno on failure
98 static int mdc_get_root(struct obd_export *exp, const char *fileset,
99 struct lu_fid *rootfid)
101 struct ptlrpc_request *req;
102 struct mdt_body *body;
107 if (fileset && !(exp_connect_flags(exp) & OBD_CONNECT_SUBTREE))
110 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
116 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
117 strlen(fileset) + 1);
118 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GET_ROOT);
120 ptlrpc_request_free(req);
123 mdc_pack_body(req, NULL, 0, 0, -1, 0);
124 if (fileset != NULL) {
125 char *name = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
127 memcpy(name, fileset, strlen(fileset));
129 lustre_msg_add_flags(req->rq_reqmsg, LUSTRE_IMP_FULL);
130 req->rq_send_state = LUSTRE_IMP_FULL;
132 ptlrpc_request_set_replen(req);
134 rc = ptlrpc_queue_wait(req);
138 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
140 GOTO(out, rc = -EPROTO);
142 *rootfid = body->mbo_fid1;
143 CDEBUG(D_NET, "root fid="DFID", last_committed=%llu\n",
144 PFID(rootfid), lustre_msg_get_last_committed(req->rq_repmsg));
147 ptlrpc_req_finished(req);
153 * This function now is known to always saying that it will receive 4 buffers
154 * from server. Even for cases when acl_size and md_size is zero, RPC header
155 * will contain 4 fields and RPC itself will contain zero size fields. This is
156 * because mdt_getattr*() _always_ returns 4 fields, but if acl is not needed
157 * and thus zero, it shrinks it, making zero size. The same story about
158 * md_size. And this is course of problem when client waits for smaller number
159 * of fields. This issue will be fixed later when client gets aware of RPC
162 static int mdc_getattr_common(struct obd_export *exp,
163 struct ptlrpc_request *req)
165 struct req_capsule *pill = &req->rq_pill;
166 struct mdt_body *body;
171 /* Request message already built. */
172 rc = ptlrpc_queue_wait(req);
176 /* sanity check for the reply */
177 body = req_capsule_server_get(pill, &RMF_MDT_BODY);
181 CDEBUG(D_NET, "mode: %o\n", body->mbo_mode);
183 mdc_update_max_ea_from_body(exp, body);
184 if (body->mbo_eadatasize != 0) {
185 eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
186 body->mbo_eadatasize);
194 static void mdc_reset_acl_req(struct ptlrpc_request *req)
196 spin_lock(&req->rq_early_free_lock);
197 sptlrpc_cli_free_repbuf(req);
198 req->rq_repbuf = NULL;
199 req->rq_repbuf_len = 0;
200 req->rq_repdata = NULL;
201 req->rq_reqdata_len = 0;
202 spin_unlock(&req->rq_early_free_lock);
205 static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
206 struct ptlrpc_request **request)
208 struct ptlrpc_request *req;
209 struct obd_import *imp = class_exp2cliimp(exp);
210 __u32 acl_bufsize = LUSTRE_POSIX_ACL_MAX_SIZE_OLD;
214 /* Single MDS without an LMV case */
215 if (op_data->op_flags & MF_GET_MDT_IDX) {
221 req = ptlrpc_request_alloc(imp, &RQF_MDS_GETATTR);
225 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
227 ptlrpc_request_free(req);
232 mdc_pack_body(req, &op_data->op_fid1, op_data->op_valid,
233 op_data->op_mode, -1, 0);
234 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, acl_bufsize);
235 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
237 ptlrpc_request_set_replen(req);
239 rc = mdc_getattr_common(exp, req);
242 acl_bufsize = min_t(__u32,
243 imp->imp_connect_data.ocd_max_easize,
245 mdc_reset_acl_req(req);
249 ptlrpc_req_finished(req);
257 static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
258 struct ptlrpc_request **request)
260 struct ptlrpc_request *req;
261 struct obd_import *imp = class_exp2cliimp(exp);
262 __u32 acl_bufsize = LUSTRE_POSIX_ACL_MAX_SIZE_OLD;
267 req = ptlrpc_request_alloc(imp, &RQF_MDS_GETATTR_NAME);
271 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
272 op_data->op_namelen + 1);
274 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR_NAME);
276 ptlrpc_request_free(req);
280 if (op_data->op_name) {
281 char *name = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
282 LASSERT(strnlen(op_data->op_name, op_data->op_namelen) ==
283 op_data->op_namelen);
284 memcpy(name, op_data->op_name, op_data->op_namelen);
288 mdc_pack_body(req, &op_data->op_fid1, op_data->op_valid,
289 op_data->op_mode, op_data->op_suppgids[0], 0);
290 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
292 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, acl_bufsize);
293 ptlrpc_request_set_replen(req);
295 rc = mdc_getattr_common(exp, req);
298 acl_bufsize = min_t(__u32,
299 imp->imp_connect_data.ocd_max_easize,
301 mdc_reset_acl_req(req);
305 ptlrpc_req_finished(req);
313 static int mdc_xattr_common(struct obd_export *exp,const struct req_format *fmt,
314 const struct lu_fid *fid, int opcode, u64 valid,
315 const char *xattr_name, const char *input,
316 int input_size, int output_size, int flags,
317 __u32 suppgid, struct ptlrpc_request **request)
319 struct ptlrpc_request *req;
320 int xattr_namelen = 0;
326 req = ptlrpc_request_alloc(class_exp2cliimp(exp), fmt);
331 xattr_namelen = strlen(xattr_name) + 1;
332 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
337 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
340 /* get SELinux policy info if any */
341 rc = sptlrpc_get_sepol(req);
343 ptlrpc_request_free(req);
346 req_capsule_set_size(&req->rq_pill, &RMF_SELINUX_POL, RCL_CLIENT,
347 strlen(req->rq_sepol) ?
348 strlen(req->rq_sepol) + 1 : 0);
350 /* Flush local XATTR locks to get rid of a possible cancel RPC */
351 if (opcode == MDS_REINT && fid_is_sane(fid) &&
352 exp->exp_connect_data.ocd_ibits_known & MDS_INODELOCK_XATTR) {
356 /* Without that packing would fail */
358 req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
361 count = mdc_resource_get_unused(exp, fid,
363 MDS_INODELOCK_XATTR);
365 rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
367 ptlrpc_request_free(req);
371 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, opcode);
373 ptlrpc_request_free(req);
378 if (opcode == MDS_REINT) {
379 struct mdt_rec_setxattr *rec;
381 BUILD_BUG_ON(sizeof(struct mdt_rec_setxattr) !=
382 sizeof(struct mdt_rec_reint));
383 rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
384 rec->sx_opcode = REINT_SETXATTR;
385 rec->sx_fsuid = from_kuid(&init_user_ns, current_fsuid());
386 rec->sx_fsgid = from_kgid(&init_user_ns, current_fsgid());
387 rec->sx_cap = cfs_curproc_cap_pack();
388 rec->sx_suppgid1 = suppgid;
389 rec->sx_suppgid2 = -1;
391 rec->sx_valid = valid | OBD_MD_FLCTIME;
392 rec->sx_time = ktime_get_real_seconds();
393 rec->sx_size = output_size;
394 rec->sx_flags = flags;
396 mdc_pack_body(req, fid, valid, output_size, suppgid, flags);
400 tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
401 memcpy(tmp, xattr_name, xattr_namelen);
404 tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
405 memcpy(tmp, input, input_size);
408 mdc_file_sepol_pack(req);
410 if (req_capsule_has_field(&req->rq_pill, &RMF_EADATA, RCL_SERVER))
411 req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
412 RCL_SERVER, output_size);
413 ptlrpc_request_set_replen(req);
416 if (opcode == MDS_REINT)
417 ptlrpc_get_mod_rpc_slot(req);
419 rc = ptlrpc_queue_wait(req);
421 if (opcode == MDS_REINT)
422 ptlrpc_put_mod_rpc_slot(req);
425 ptlrpc_req_finished(req);
431 static int mdc_setxattr(struct obd_export *exp, const struct lu_fid *fid,
432 u64 obd_md_valid, const char *name,
433 const void *value, size_t value_size,
434 unsigned int xattr_flags, u32 suppgid,
435 struct ptlrpc_request **req)
437 LASSERT(obd_md_valid == OBD_MD_FLXATTR ||
438 obd_md_valid == OBD_MD_FLXATTRRM);
440 return mdc_xattr_common(exp, &RQF_MDS_REINT_SETXATTR,
441 fid, MDS_REINT, obd_md_valid, name,
442 value, value_size, 0, xattr_flags, suppgid,
446 static int mdc_getxattr(struct obd_export *exp, const struct lu_fid *fid,
447 u64 obd_md_valid, const char *name, size_t buf_size,
448 struct ptlrpc_request **req)
450 struct mdt_body *body;
453 LASSERT(obd_md_valid == OBD_MD_FLXATTR ||
454 obd_md_valid == OBD_MD_FLXATTRLS);
456 /* The below message is checked in sanity-selinux.sh test_20d */
457 CDEBUG(D_INFO, "%s: get xattr '%s' for "DFID"\n",
458 exp->exp_obd->obd_name, name, PFID(fid));
459 rc = mdc_xattr_common(exp, &RQF_MDS_GETXATTR, fid, MDS_GETXATTR,
460 obd_md_valid, name, NULL, 0, buf_size, 0, -1,
465 body = req_capsule_server_get(&(*req)->rq_pill, &RMF_MDT_BODY);
467 GOTO(out, rc = -EPROTO);
469 /* only detect the xattr size */
471 /* LU-11109: Older MDTs do not distinguish
472 * between nonexistent xattrs and zero length
473 * values in this case. Newer MDTs will return
474 * -ENODATA or set OBD_MD_FLXATTR. */
475 GOTO(out, rc = body->mbo_eadatasize);
478 if (body->mbo_eadatasize == 0) {
479 /* LU-11109: Newer MDTs set OBD_MD_FLXATTR on
480 * success so that we can distinguish between
481 * zero length value and nonexistent xattr.
483 * If OBD_MD_FLXATTR is not set then we keep
484 * the old behavior and return -ENODATA for
485 * getxattr() when mbo_eadatasize is 0. But
486 * -ENODATA only makes sense for getxattr()
487 * and not for listxattr(). */
488 if (body->mbo_valid & OBD_MD_FLXATTR)
490 else if (obd_md_valid == OBD_MD_FLXATTR)
491 GOTO(out, rc = -ENODATA);
496 GOTO(out, rc = body->mbo_eadatasize);
499 ptlrpc_req_finished(*req);
506 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
507 static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md)
509 struct req_capsule *pill = &req->rq_pill;
510 struct mdt_body *body = md->body;
511 struct posix_acl *acl;
516 if (!body->mbo_aclsize)
519 buf = req_capsule_server_sized_get(pill, &RMF_ACL, body->mbo_aclsize);
524 acl = posix_acl_from_xattr(&init_user_ns, buf, body->mbo_aclsize);
529 CERROR("convert xattr to acl: %d\n", rc);
533 rc = posix_acl_valid(&init_user_ns, acl);
535 CERROR("validate acl: %d\n", rc);
536 posix_acl_release(acl);
544 #define mdc_unpack_acl(req, md) 0
546 #ifdef HAVE_XA_IS_VALUE
547 # include <linux/xarray.h>
550 int mdc_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
551 struct obd_export *dt_exp, struct obd_export *md_exp,
552 struct lustre_md *md)
554 struct req_capsule *pill = &req->rq_pill;
559 memset(md, 0, sizeof(*md));
561 md->body = req_capsule_server_get(pill, &RMF_MDT_BODY);
562 LASSERT(md->body != NULL);
564 if (md->body->mbo_valid & OBD_MD_FLEASIZE) {
565 if (!S_ISREG(md->body->mbo_mode)) {
566 CDEBUG(D_INFO, "OBD_MD_FLEASIZE set, should be a "
567 "regular file, but is not\n");
568 GOTO(out, rc = -EPROTO);
571 if (md->body->mbo_eadatasize == 0) {
572 CDEBUG(D_INFO, "OBD_MD_FLEASIZE set, "
573 "but eadatasize 0\n");
574 GOTO(out, rc = -EPROTO);
577 md->layout.lb_len = md->body->mbo_eadatasize;
578 md->layout.lb_buf = req_capsule_server_sized_get(pill,
581 if (md->layout.lb_buf == NULL)
582 GOTO(out, rc = -EPROTO);
583 } else if (md->body->mbo_valid & OBD_MD_FLDIREA) {
584 const union lmv_mds_md *lmv;
587 if (!S_ISDIR(md->body->mbo_mode)) {
588 CDEBUG(D_INFO, "OBD_MD_FLDIREA set, should be a "
589 "directory, but is not\n");
590 GOTO(out, rc = -EPROTO);
593 if (md->body->mbo_valid & OBD_MD_MEA) {
594 lmv_size = md->body->mbo_eadatasize;
596 CDEBUG(D_INFO, "OBD_MD_FLDIREA is set, "
597 "but eadatasize 0\n");
601 lmv = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
604 GOTO(out, rc = -EPROTO);
606 rc = md_unpackmd(md_exp, &md->lmv, lmv, lmv_size);
610 if (rc < (int)sizeof(*md->lmv)) {
611 struct lmv_foreign_md *lfm = md->lfm;
613 /* short (< sizeof(struct lmv_stripe_md))
616 if (lfm->lfm_magic != LMV_MAGIC_FOREIGN) {
618 "lmv size too small: %d < %d\n",
619 rc, (int)sizeof(*md->lmv));
620 GOTO(out, rc = -EPROTO);
625 /* since 2.12.58 intent_getattr fetches default LMV */
626 if (md->body->mbo_valid & OBD_MD_DEFAULT_MEA) {
627 lmv_size = sizeof(struct lmv_user_md);
628 lmv = req_capsule_server_sized_get(pill,
632 GOTO(out, rc = -EPROTO);
634 rc = md_unpackmd(md_exp, &md->default_lmv, lmv,
639 if (rc < (int)sizeof(*md->default_lmv)) {
641 "default lmv size too small: %d < %d\n",
642 rc, (int)sizeof(*md->default_lmv));
643 GOTO(out, rc = -EPROTO);
649 if (md->body->mbo_valid & OBD_MD_FLACL) {
650 /* for ACL, it's possible that FLACL is set but aclsize is zero.
651 * only when aclsize != 0 there's an actual segment for ACL
654 if (md->body->mbo_aclsize) {
655 rc = mdc_unpack_acl(req, md);
658 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
660 md->posix_acl = NULL;
668 #ifdef CONFIG_LUSTRE_FS_POSIX_ACL
669 posix_acl_release(md->posix_acl);
675 int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
681 void mdc_replay_open(struct ptlrpc_request *req)
683 struct md_open_data *mod = req->rq_cb_data;
684 struct ptlrpc_request *close_req;
685 struct obd_client_handle *och;
686 struct lustre_handle old_open_handle = { };
687 struct mdt_body *body;
691 DEBUG_REQ(D_ERROR, req,
692 "cannot properly replay without open data");
697 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
698 LASSERT(body != NULL);
700 spin_lock(&req->rq_lock);
702 if (och && och->och_open_handle.cookie)
703 req->rq_early_free_repbuf = 1;
705 req->rq_early_free_repbuf = 0;
706 spin_unlock(&req->rq_lock);
708 if (req->rq_early_free_repbuf) {
709 struct lustre_handle *file_open_handle;
711 LASSERT(och->och_magic == OBD_CLIENT_HANDLE_MAGIC);
713 file_open_handle = &och->och_open_handle;
714 CDEBUG(D_HA, "updating handle from %#llx to %#llx\n",
715 file_open_handle->cookie, body->mbo_open_handle.cookie);
716 old_open_handle = *file_open_handle;
717 *file_open_handle = body->mbo_open_handle;
720 close_req = mod->mod_close_req;
722 __u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg);
723 struct mdt_ioepoch *epoch;
725 LASSERT(opc == MDS_CLOSE);
726 epoch = req_capsule_client_get(&close_req->rq_pill,
730 if (req->rq_early_free_repbuf)
731 LASSERT(old_open_handle.cookie ==
732 epoch->mio_open_handle.cookie);
734 DEBUG_REQ(D_HA, close_req, "updating close body with new fh");
735 epoch->mio_open_handle = body->mbo_open_handle;
740 void mdc_commit_open(struct ptlrpc_request *req)
742 struct md_open_data *mod = req->rq_cb_data;
747 * No need to touch md_open_data::mod_och, it holds a reference on
748 * \var mod and will zero references to each other, \var mod will be
749 * freed after that when md_open_data::mod_och will put the reference.
753 * Do not let open request to disappear as it still may be needed
754 * for close rpc to happen (it may happen on evict only, otherwise
755 * ptlrpc_request::rq_replay does not let mdc_commit_open() to be
756 * called), just mark this rpc as committed to distinguish these 2
757 * cases, see mdc_close() for details. The open request reference will
758 * be put along with freeing \var mod.
760 ptlrpc_request_addref(req);
761 spin_lock(&req->rq_lock);
762 req->rq_committed = 1;
763 spin_unlock(&req->rq_lock);
764 req->rq_cb_data = NULL;
768 int mdc_set_open_replay_data(struct obd_export *exp,
769 struct obd_client_handle *och,
770 struct lookup_intent *it)
772 struct md_open_data *mod;
773 struct mdt_rec_create *rec;
774 struct mdt_body *body;
775 struct ptlrpc_request *open_req = it->it_request;
776 struct obd_import *imp = open_req->rq_import;
779 if (!open_req->rq_replay)
782 rec = req_capsule_client_get(&open_req->rq_pill, &RMF_REC_REINT);
783 body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
784 LASSERT(rec != NULL);
785 /* Incoming message in my byte order (it's been swabbed). */
786 /* Outgoing messages always in my byte order. */
787 LASSERT(body != NULL);
789 /* Only if the import is replayable, we set replay_open data */
790 if (och && imp->imp_replayable) {
791 mod = obd_mod_alloc();
793 DEBUG_REQ(D_ERROR, open_req,
794 "cannot allocate md_open_data");
799 * Take a reference on \var mod, to be freed on mdc_close().
800 * It protects \var mod from being freed on eviction (commit
801 * callback is called despite rq_replay flag).
802 * Another reference for \var och.
807 spin_lock(&open_req->rq_lock);
810 mod->mod_is_create = it_disposition(it, DISP_OPEN_CREATE) ||
811 it_disposition(it, DISP_OPEN_STRIPE);
812 mod->mod_open_req = open_req;
813 open_req->rq_cb_data = mod;
814 open_req->rq_commit_cb = mdc_commit_open;
815 open_req->rq_early_free_repbuf = 1;
816 spin_unlock(&open_req->rq_lock);
819 rec->cr_fid2 = body->mbo_fid1;
820 rec->cr_open_handle_old = body->mbo_open_handle;
821 open_req->rq_replay_cb = mdc_replay_open;
822 if (!fid_is_sane(&body->mbo_fid1)) {
823 DEBUG_REQ(D_ERROR, open_req,
824 "saving replay request with insane FID " DFID,
825 PFID(&body->mbo_fid1));
829 DEBUG_REQ(D_RPCTRACE, open_req, "Set up open replay data");
833 static void mdc_free_open(struct md_open_data *mod)
837 if (mod->mod_is_create == 0 &&
838 imp_connect_disp_stripe(mod->mod_open_req->rq_import))
842 * No reason to asssert here if the open request has
843 * rq_replay == 1. It means that mdc_close failed, and
844 * close request wasn`t sent. It is not fatal to client.
845 * The worst thing is eviction if the client gets open lock
848 DEBUG_REQ(D_RPCTRACE, mod->mod_open_req,
849 "free open request, rq_replay=%d",
850 mod->mod_open_req->rq_replay);
852 ptlrpc_request_committed(mod->mod_open_req, committed);
853 if (mod->mod_close_req)
854 ptlrpc_request_committed(mod->mod_close_req, committed);
857 int mdc_clear_open_replay_data(struct obd_export *exp,
858 struct obd_client_handle *och)
860 struct md_open_data *mod = och->och_mod;
864 * It is possible to not have \var mod in a case of eviction between
865 * lookup and ll_file_open().
870 LASSERT(mod != LP_POISON);
871 LASSERT(mod->mod_open_req != NULL);
873 spin_lock(&mod->mod_open_req->rq_lock);
875 mod->mod_och->och_open_handle.cookie = 0;
876 mod->mod_open_req->rq_early_free_repbuf = 0;
877 spin_unlock(&mod->mod_open_req->rq_lock);
887 static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
888 struct md_open_data *mod, struct ptlrpc_request **request)
890 struct obd_device *obd = class_exp2obd(exp);
891 struct ptlrpc_request *req;
892 struct req_format *req_fmt;
893 size_t u32_count = 0;
898 CDEBUG(D_INODE, "%s: "DFID" file closed with intent: %x\n",
899 exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
902 if (op_data->op_bias & MDS_CLOSE_INTENT) {
903 req_fmt = &RQF_MDS_CLOSE_INTENT;
904 if (op_data->op_bias & MDS_HSM_RELEASE) {
905 /* allocate a FID for volatile file */
906 rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2,
909 CERROR("%s: "DFID" allocating FID: rc = %d\n",
910 obd->obd_name, PFID(&op_data->op_fid1),
912 /* save the errcode and proceed to close */
916 if (op_data->op_bias & MDS_CLOSE_RESYNC_DONE) {
917 size_t count = op_data->op_data_size / sizeof(__u32);
919 if (count > INLINE_RESYNC_ARRAY_SIZE)
923 req_fmt = &RQF_MDS_CLOSE;
927 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_CLOSE))
930 req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt);
932 /* Ensure that this close's handle is fixed up during replay. */
933 if (likely(mod != NULL)) {
934 LASSERTF(mod->mod_open_req != NULL &&
935 mod->mod_open_req->rq_type != LI_POISON,
936 "POISONED open %p!\n", mod->mod_open_req);
938 mod->mod_close_req = req;
940 DEBUG_REQ(D_RPCTRACE, mod->mod_open_req, "matched open");
941 /* We no longer want to preserve this open for replay even
942 * though the open was committed. b=3632, b=3633 */
943 spin_lock(&mod->mod_open_req->rq_lock);
944 mod->mod_open_req->rq_replay = 0;
945 spin_unlock(&mod->mod_open_req->rq_lock);
947 CDEBUG(D_HA, "couldn't find open req; expecting close error\n");
951 * TODO: repeat close after errors
953 CWARN("%s: close of FID "DFID" failed, file reference will be "
954 "dropped when this client unmounts or is evicted\n",
955 obd->obd_name, PFID(&op_data->op_fid1));
956 GOTO(out, rc = -ENOMEM);
960 req_capsule_set_size(&req->rq_pill, &RMF_U32, RCL_CLIENT,
961 u32_count * sizeof(__u32));
963 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE);
965 ptlrpc_request_free(req);
970 /* To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a
971 * portal whose threads are not taking any DLM locks and are therefore
972 * always progressing */
973 req->rq_request_portal = MDS_READPAGE_PORTAL;
974 ptlrpc_at_set_req_timeout(req);
976 if (!(exp_connect_flags2(exp) & OBD_CONNECT2_LSOM))
977 op_data->op_xvalid &= ~(OP_XVALID_LAZYSIZE |
978 OP_XVALID_LAZYBLOCKS);
980 mdc_close_pack(req, op_data);
982 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
983 obd->u.cli.cl_default_mds_easize);
985 ptlrpc_request_set_replen(req);
987 ptlrpc_get_mod_rpc_slot(req);
988 rc = ptlrpc_queue_wait(req);
989 ptlrpc_put_mod_rpc_slot(req);
991 if (req->rq_repmsg == NULL) {
992 CDEBUG(D_RPCTRACE, "request %p failed to send: rc = %d\n", req,
995 rc = req->rq_status ?: -EIO;
996 } else if (rc == 0 || rc == -EAGAIN) {
997 struct mdt_body *body;
999 rc = lustre_msg_get_status(req->rq_repmsg);
1000 if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
1001 DEBUG_REQ(D_ERROR, req,
1002 "type = PTL_RPC_MSG_ERR: rc = %d", rc);
1006 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
1009 } else if (rc == -ESTALE) {
1011 * it can be allowed error after 3633 if open was committed and
1012 * server failed before close was sent. Let's check if mod
1013 * exists and return no error in that case
1016 DEBUG_REQ(D_HA, req, "Reset ESTALE = %d", rc);
1017 LASSERT(mod->mod_open_req != NULL);
1018 if (mod->mod_open_req->rq_committed)
1026 mod->mod_close_req = NULL;
1027 /* Since now, mod is accessed through open_req only,
1028 * thus close req does not keep a reference on mod anymore. */
1033 RETURN(rc < 0 ? rc : saved_rc);
1036 static int mdc_getpage(struct obd_export *exp, const struct lu_fid *fid,
1037 u64 offset, struct page **pages, int npages,
1038 struct ptlrpc_request **request)
1040 struct ptlrpc_request *req;
1041 struct ptlrpc_bulk_desc *desc;
1050 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE);
1054 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_READPAGE);
1056 ptlrpc_request_free(req);
1060 req->rq_request_portal = MDS_READPAGE_PORTAL;
1061 ptlrpc_at_set_req_timeout(req);
1063 desc = ptlrpc_prep_bulk_imp(req, npages, 1,
1064 PTLRPC_BULK_PUT_SINK,
1066 &ptlrpc_bulk_kiov_pin_ops);
1068 ptlrpc_req_finished(req);
1072 /* NB req now owns desc and will free it when it gets freed */
1073 for (i = 0; i < npages; i++)
1074 desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
1077 mdc_readdir_pack(req, offset, PAGE_SIZE * npages, fid);
1079 ptlrpc_request_set_replen(req);
1080 rc = ptlrpc_queue_wait(req);
1082 ptlrpc_req_finished(req);
1083 if (rc != -ETIMEDOUT)
1087 if (!client_should_resend(resends, &exp->exp_obd->u.cli)) {
1088 CERROR("%s: too many resend retries: rc = %d\n",
1089 exp->exp_obd->obd_name, -EIO);
1093 /* If a signal interrupts then the timeout returned will
1094 * not be zero. In that case return -EINTR
1096 if (msleep_interruptible(resends * 1000))
1102 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
1103 req->rq_bulk->bd_nob_transferred);
1105 ptlrpc_req_finished(req);
1109 if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
1110 CERROR("%s: unexpected bytes transferred: %d (%ld expected)\n",
1111 exp->exp_obd->obd_name, req->rq_bulk->bd_nob_transferred,
1112 PAGE_SIZE * npages);
1113 ptlrpc_req_finished(req);
1121 static void mdc_release_page(struct page *page, int remove)
1125 if (likely(page->mapping != NULL))
1126 delete_from_page_cache(page);
1132 static struct page *mdc_page_locate(struct address_space *mapping, __u64 *hash,
1133 __u64 *start, __u64 *end, int hash64)
1136 * Complement of hash is used as an index so that
1137 * radix_tree_gang_lookup() can be used to find a page with starting
1138 * hash _smaller_ than one we are looking for.
1140 unsigned long offset = hash_x_index(*hash, hash64);
1142 unsigned long flags;
1145 xa_lock_irqsave(&mapping->i_pages, flags);
1146 found = radix_tree_gang_lookup(&mapping->page_tree,
1147 (void **)&page, offset, 1);
1148 if (found > 0 && !xa_is_value(page)) {
1149 struct lu_dirpage *dp;
1152 xa_unlock_irqrestore(&mapping->i_pages, flags);
1154 * In contrast to find_lock_page() we are sure that directory
1155 * page cannot be truncated (while DLM lock is held) and,
1156 * hence, can avoid restart.
1158 * In fact, page cannot be locked here at all, because
1159 * mdc_read_page_remote does synchronous io.
1161 wait_on_page_locked(page);
1162 if (PageUptodate(page)) {
1164 if (BITS_PER_LONG == 32 && hash64) {
1165 *start = le64_to_cpu(dp->ldp_hash_start) >> 32;
1166 *end = le64_to_cpu(dp->ldp_hash_end) >> 32;
1167 *hash = *hash >> 32;
1169 *start = le64_to_cpu(dp->ldp_hash_start);
1170 *end = le64_to_cpu(dp->ldp_hash_end);
1172 if (unlikely(*start == 1 && *hash == 0))
1175 LASSERTF(*start <= *hash, "start = %#llx"
1176 ",end = %#llx,hash = %#llx\n",
1177 *start, *end, *hash);
1178 CDEBUG(D_VFSTRACE, "offset %lx [%#llx %#llx],"
1179 " hash %#llx\n", offset, *start, *end, *hash);
1182 mdc_release_page(page, 0);
1184 } else if (*end != *start && *hash == *end) {
1186 * upon hash collision, remove this page,
1187 * otherwise put page reference, and
1188 * mdc_read_page_remote() will issue RPC to
1189 * fetch the page we want.
1192 mdc_release_page(page,
1193 le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1198 page = ERR_PTR(-EIO);
1201 xa_unlock_irqrestore(&mapping->i_pages, flags);
1208 * Adjust a set of pages, each page containing an array of lu_dirpages,
1209 * so that each page can be used as a single logical lu_dirpage.
1211 * A lu_dirpage is laid out as follows, where s = ldp_hash_start,
1212 * e = ldp_hash_end, f = ldp_flags, p = padding, and each "ent" is a
1213 * struct lu_dirent. It has size up to LU_PAGE_SIZE. The ldp_hash_end
1214 * value is used as a cookie to request the next lu_dirpage in a
1215 * directory listing that spans multiple pages (two in this example):
1218 * .|--------v------- -----.
1219 * |s|e|f|p|ent|ent| ... |ent|
1220 * '--|-------------- -----' Each PAGE contains a single
1221 * '------. lu_dirpage.
1222 * .---------v------- -----.
1223 * |s|e|f|p|ent| 0 | ... | 0 |
1224 * '----------------- -----'
1226 * However, on hosts where the native VM page size (PAGE_SIZE) is
1227 * larger than LU_PAGE_SIZE, a single host page may contain multiple
1228 * lu_dirpages. After reading the lu_dirpages from the MDS, the
1229 * ldp_hash_end of the first lu_dirpage refers to the one immediately
1230 * after it in the same PAGE (arrows simplified for brevity, but
1231 * in general e0==s1, e1==s2, etc.):
1233 * .-------------------- -----.
1234 * |s0|e0|f0|p|ent|ent| ... |ent|
1235 * |---v---------------- -----|
1236 * |s1|e1|f1|p|ent|ent| ... |ent|
1237 * |---v---------------- -----| Here, each PAGE contains
1238 * ... multiple lu_dirpages.
1239 * |---v---------------- -----|
1240 * |s'|e'|f'|p|ent|ent| ... |ent|
1241 * '---|---------------- -----'
1243 * .----------------------------.
1246 * This structure is transformed into a single logical lu_dirpage as follows:
1248 * - Replace e0 with e' so the request for the next lu_dirpage gets the page
1249 * labeled 'next PAGE'.
1251 * - Copy the LDF_COLLIDE flag from f' to f0 to correctly reflect whether
1252 * a hash collision with the next page exists.
1254 * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
1255 * to the first entry of the next lu_dirpage.
1257 #if PAGE_SIZE > LU_PAGE_SIZE
1258 static void mdc_adjust_dirpages(struct page **pages, int cfs_pgs, int lu_pgs)
1262 for (i = 0; i < cfs_pgs; i++) {
1263 struct lu_dirpage *dp = kmap(pages[i]);
1264 struct lu_dirpage *first = dp;
1265 struct lu_dirent *end_dirent = NULL;
1266 struct lu_dirent *ent;
1267 __u64 hash_end = dp->ldp_hash_end;
1268 __u32 flags = dp->ldp_flags;
1270 while (--lu_pgs > 0) {
1271 ent = lu_dirent_start(dp);
1272 for (end_dirent = ent; ent != NULL;
1273 end_dirent = ent, ent = lu_dirent_next(ent));
1275 /* Advance dp to next lu_dirpage. */
1276 dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
1278 /* Check if we've reached the end of the PAGE. */
1279 if (!((unsigned long)dp & ~PAGE_MASK))
1282 /* Save the hash and flags of this lu_dirpage. */
1283 hash_end = dp->ldp_hash_end;
1284 flags = dp->ldp_flags;
1286 /* Check if lu_dirpage contains no entries. */
1287 if (end_dirent == NULL)
1290 /* Enlarge the end entry lde_reclen from 0 to
1291 * first entry of next lu_dirpage. */
1292 LASSERT(le16_to_cpu(end_dirent->lde_reclen) == 0);
1293 end_dirent->lde_reclen =
1294 cpu_to_le16((char *)(dp->ldp_entries) -
1295 (char *)end_dirent);
1298 first->ldp_hash_end = hash_end;
1299 first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE);
1300 first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE);
1304 LASSERTF(lu_pgs == 0, "left = %d\n", lu_pgs);
1307 #define mdc_adjust_dirpages(pages, cfs_pgs, lu_pgs) do {} while (0)
1308 #endif /* PAGE_SIZE > LU_PAGE_SIZE */
1310 /* parameters for readdir page */
1311 struct readpage_param {
1312 struct md_op_data *rp_mod;
1315 struct obd_export *rp_exp;
1316 struct md_callback *rp_cb;
1320 * Read pages from server.
1322 * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains
1323 * a header lu_dirpage which describes the start/end hash, and whether this
1324 * page is empty (contains no dir entry) or hash collide with next page.
1325 * After client receives reply, several pages will be integrated into dir page
1326 * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the
1327 * lu_dirpage for this integrated page will be adjusted.
1329 static int mdc_read_page_remote(void *data, struct page *page0)
1331 struct readpage_param *rp = data;
1332 struct page **page_pool;
1334 struct lu_dirpage *dp;
1335 struct md_op_data *op_data = rp->rp_mod;
1336 struct ptlrpc_request *req;
1338 struct inode *inode;
1340 int rd_pgs = 0; /* number of pages actually read */
1346 max_pages = rp->rp_exp->exp_obd->u.cli.cl_max_pages_per_rpc;
1347 inode = op_data->op_data;
1348 fid = &op_data->op_fid1;
1349 LASSERT(inode != NULL);
1351 OBD_ALLOC(page_pool, sizeof(page_pool[0]) * max_pages);
1352 if (page_pool != NULL) {
1353 page_pool[0] = page0;
1359 for (npages = 1; npages < max_pages; npages++) {
1360 page = page_cache_alloc(inode->i_mapping);
1363 page_pool[npages] = page;
1366 rc = mdc_getpage(rp->rp_exp, fid, rp->rp_off, page_pool, npages, &req);
1368 /* page0 is special, which was added into page cache early */
1369 delete_from_page_cache(page0);
1373 rd_pgs = (req->rq_bulk->bd_nob_transferred + PAGE_SIZE - 1) >>
1375 lu_pgs = req->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
1376 LASSERT(!(req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
1378 CDEBUG(D_INODE, "read %d(%d) pages\n", rd_pgs, lu_pgs);
1380 mdc_adjust_dirpages(page_pool, rd_pgs, lu_pgs);
1382 SetPageUptodate(page0);
1386 ptlrpc_req_finished(req);
1387 CDEBUG(D_CACHE, "read %d/%d pages\n", rd_pgs, npages);
1388 for (i = 1; i < npages; i++) {
1389 unsigned long offset;
1393 page = page_pool[i];
1395 if (rc < 0 || i >= rd_pgs) {
1400 SetPageUptodate(page);
1403 hash = le64_to_cpu(dp->ldp_hash_start);
1406 offset = hash_x_index(hash, rp->rp_hash64);
1408 prefetchw(&page->flags);
1409 ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
1414 CDEBUG(D_VFSTRACE, "page %lu add to page cache failed:"
1415 " rc = %d\n", offset, ret);
1419 if (page_pool != &page0)
1420 OBD_FREE(page_pool, sizeof(page_pool[0]) * max_pages);
1426 * Read dir page from cache first, if it can not find it, read it from
1427 * server and add into the cache.
1429 * \param[in] exp MDC export
1430 * \param[in] op_data client MD stack parameters, transfering parameters
1431 * between different layers on client MD stack.
1432 * \param[in] cb_op callback required for ldlm lock enqueue during
1434 * \param[in] hash_offset the hash offset of the page to be read
1435 * \param[in] ppage the page to be read
1437 * retval = 0 get the page successfully
1438 * errno(<0) get the page failed
1440 static int mdc_read_page(struct obd_export *exp, struct md_op_data *op_data,
1441 struct md_callback *cb_op, __u64 hash_offset,
1442 struct page **ppage)
1444 struct lookup_intent it = { .it_op = IT_READDIR };
1446 struct inode *dir = op_data->op_data;
1447 struct address_space *mapping;
1448 struct lu_dirpage *dp;
1451 struct lustre_handle lockh;
1452 struct ptlrpc_request *enq_req = NULL;
1453 struct readpage_param rp_param;
1460 LASSERT(dir != NULL);
1461 mapping = dir->i_mapping;
1463 rc = mdc_intent_lock(exp, op_data, &it, &enq_req,
1464 cb_op->md_blocking_ast, 0);
1465 if (enq_req != NULL)
1466 ptlrpc_req_finished(enq_req);
1469 CERROR("%s: "DFID" lock enqueue fails: rc = %d\n",
1470 exp->exp_obd->obd_name, PFID(&op_data->op_fid1), rc);
1475 lockh.cookie = it.it_lock_handle;
1476 mdc_set_lock_data(exp, &lockh, dir, NULL);
1478 rp_param.rp_off = hash_offset;
1479 rp_param.rp_hash64 = op_data->op_cli_flags & CLI_HASH64;
1480 page = mdc_page_locate(mapping, &rp_param.rp_off, &start, &end,
1481 rp_param.rp_hash64);
1483 CERROR("%s: dir page locate: "DFID" at %llu: rc %ld\n",
1484 exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
1485 rp_param.rp_off, PTR_ERR(page));
1486 GOTO(out_unlock, rc = PTR_ERR(page));
1487 } else if (page != NULL) {
1489 * XXX nikita: not entirely correct handling of a corner case:
1490 * suppose hash chain of entries with hash value HASH crosses
1491 * border between pages P0 and P1. First both P0 and P1 are
1492 * cached, seekdir() is called for some entry from the P0 part
1493 * of the chain. Later P0 goes out of cache. telldir(HASH)
1494 * happens and finds P1, as it starts with matching hash
1495 * value. Remaining entries from P0 part of the chain are
1496 * skipped. (Is that really a bug?)
1498 * Possible solutions: 0. don't cache P1 is such case, handle
1499 * it as an "overflow" page. 1. invalidate all pages at
1500 * once. 2. use HASH|1 as an index for P1.
1502 GOTO(hash_collision, page);
1505 rp_param.rp_exp = exp;
1506 rp_param.rp_mod = op_data;
1507 page = read_cache_page(mapping,
1508 hash_x_index(rp_param.rp_off,
1509 rp_param.rp_hash64),
1510 mdc_read_page_remote, &rp_param);
1512 CDEBUG(D_INFO, "%s: read cache page: "DFID" at %llu: %ld\n",
1513 exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
1514 rp_param.rp_off, PTR_ERR(page));
1515 GOTO(out_unlock, rc = PTR_ERR(page));
1518 wait_on_page_locked(page);
1520 if (!PageUptodate(page)) {
1521 CERROR("%s: page not updated: "DFID" at %llu: rc %d\n",
1522 exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
1523 rp_param.rp_off, -5);
1526 if (!PageChecked(page))
1527 SetPageChecked(page);
1528 if (PageError(page)) {
1529 CERROR("%s: page error: "DFID" at %llu: rc %d\n",
1530 exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
1531 rp_param.rp_off, -5);
1536 dp = page_address(page);
1537 if (BITS_PER_LONG == 32 && rp_param.rp_hash64) {
1538 start = le64_to_cpu(dp->ldp_hash_start) >> 32;
1539 end = le64_to_cpu(dp->ldp_hash_end) >> 32;
1540 rp_param.rp_off = hash_offset >> 32;
1542 start = le64_to_cpu(dp->ldp_hash_start);
1543 end = le64_to_cpu(dp->ldp_hash_end);
1544 rp_param.rp_off = hash_offset;
1547 LASSERT(start == rp_param.rp_off);
1548 CWARN("Page-wide hash collision: %#lx\n", (unsigned long)end);
1549 #if BITS_PER_LONG == 32
1550 CWARN("Real page-wide hash collision at [%llu %llu] with "
1551 "hash %llu\n", le64_to_cpu(dp->ldp_hash_start),
1552 le64_to_cpu(dp->ldp_hash_end), hash_offset);
1556 * Fetch whole overflow chain...
1564 ldlm_lock_decref(&lockh, it.it_lock_mode);
1568 mdc_release_page(page, 1);
1573 static int mdc_statfs_interpret(const struct lu_env *env,
1574 struct ptlrpc_request *req, void *args, int rc)
1576 struct obd_info *oinfo = args;
1577 struct obd_statfs *osfs;
1580 osfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
1584 oinfo->oi_osfs = osfs;
1586 CDEBUG(D_CACHE, "blocks=%llu free=%llu avail=%llu "
1587 "objects=%llu free=%llu state=%x\n",
1588 osfs->os_blocks, osfs->os_bfree, osfs->os_bavail,
1589 osfs->os_files, osfs->os_ffree, osfs->os_state);
1592 oinfo->oi_cb_up(oinfo, rc);
1597 static int mdc_statfs_async(struct obd_export *exp,
1598 struct obd_info *oinfo, time64_t max_age,
1599 struct ptlrpc_request_set *unused)
1601 struct ptlrpc_request *req;
1602 struct obd_info *aa;
1604 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_STATFS,
1605 LUSTRE_MDS_VERSION, MDS_STATFS);
1609 ptlrpc_request_set_replen(req);
1610 req->rq_interpret_reply = mdc_statfs_interpret;
1612 aa = ptlrpc_req_async_args(aa, req);
1615 ptlrpcd_add_req(req);
1620 static int mdc_statfs(const struct lu_env *env,
1621 struct obd_export *exp, struct obd_statfs *osfs,
1622 time64_t max_age, __u32 flags)
1624 struct obd_device *obd = class_exp2obd(exp);
1625 struct req_format *fmt;
1626 struct ptlrpc_request *req;
1627 struct obd_statfs *msfs;
1628 struct obd_import *imp = NULL;
1633 * Since the request might also come from lprocfs, so we need
1634 * sync this with client_disconnect_export Bug15684
1636 down_read(&obd->u.cli.cl_sem);
1637 if (obd->u.cli.cl_import)
1638 imp = class_import_get(obd->u.cli.cl_import);
1639 up_read(&obd->u.cli.cl_sem);
1643 fmt = &RQF_MDS_STATFS;
1644 if ((exp_connect_flags2(exp) & OBD_CONNECT2_SUM_STATFS) &&
1645 (flags & OBD_STATFS_SUM))
1646 fmt = &RQF_MDS_STATFS_NEW;
1647 req = ptlrpc_request_alloc_pack(imp, fmt, LUSTRE_MDS_VERSION,
1650 GOTO(output, rc = -ENOMEM);
1652 if ((flags & OBD_STATFS_SUM) &&
1653 (exp_connect_flags2(exp) & OBD_CONNECT2_SUM_STATFS)) {
1654 /* request aggregated states */
1655 struct mdt_body *body;
1657 body = req_capsule_client_get(&req->rq_pill, &RMF_MDT_BODY);
1659 GOTO(out, rc = -EPROTO);
1660 body->mbo_valid = OBD_MD_FLAGSTATFS;
1663 ptlrpc_request_set_replen(req);
1665 if (flags & OBD_STATFS_NODELAY) {
1666 /* procfs requests not want stay in wait for avoid deadlock */
1667 req->rq_no_resend = 1;
1668 req->rq_no_delay = 1;
1671 rc = ptlrpc_queue_wait(req);
1673 /* check connection error first */
1674 if (imp->imp_connect_error)
1675 rc = imp->imp_connect_error;
1679 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
1681 GOTO(out, rc = -EPROTO);
1686 ptlrpc_req_finished(req);
1688 class_import_put(imp);
1692 static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
1694 __u32 keylen, vallen;
1698 if (gf->gf_pathlen > PATH_MAX)
1699 RETURN(-ENAMETOOLONG);
1700 if (gf->gf_pathlen < 2)
1703 /* Key is KEY_FID2PATH + getinfo_fid2path description */
1704 keylen = cfs_size_round(sizeof(KEY_FID2PATH) + sizeof(*gf) +
1705 sizeof(struct lu_fid));
1706 OBD_ALLOC(key, keylen);
1709 memcpy(key, KEY_FID2PATH, sizeof(KEY_FID2PATH));
1710 memcpy(key + cfs_size_round(sizeof(KEY_FID2PATH)), gf, sizeof(*gf));
1711 memcpy(key + cfs_size_round(sizeof(KEY_FID2PATH)) + sizeof(*gf),
1712 gf->gf_u.gf_root_fid, sizeof(struct lu_fid));
1713 CDEBUG(D_IOCTL, "path get "DFID" from %llu #%d\n",
1714 PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno);
1716 if (!fid_is_sane(&gf->gf_fid))
1717 GOTO(out, rc = -EINVAL);
1719 /* Val is struct getinfo_fid2path result plus path */
1720 vallen = sizeof(*gf) + gf->gf_pathlen;
1722 rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf);
1723 if (rc != 0 && rc != -EREMOTE)
1726 if (vallen <= sizeof(*gf))
1727 GOTO(out, rc = -EPROTO);
1728 if (vallen > sizeof(*gf) + gf->gf_pathlen)
1729 GOTO(out, rc = -EOVERFLOW);
1731 CDEBUG(D_IOCTL, "path got "DFID" from %llu #%d: %s\n",
1732 PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno,
1733 gf->gf_pathlen < 512 ? gf->gf_u.gf_path :
1734 /* only log the last 512 characters of the path */
1735 gf->gf_u.gf_path + gf->gf_pathlen - 512);
1738 OBD_FREE(key, keylen);
1742 static int mdc_ioc_hsm_progress(struct obd_export *exp,
1743 struct hsm_progress_kernel *hpk)
1745 struct obd_import *imp = class_exp2cliimp(exp);
1746 struct hsm_progress_kernel *req_hpk;
1747 struct ptlrpc_request *req;
1751 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_PROGRESS,
1752 LUSTRE_MDS_VERSION, MDS_HSM_PROGRESS);
1754 GOTO(out, rc = -ENOMEM);
1756 mdc_pack_body(req, NULL, 0, 0, -1, 0);
1758 /* Copy hsm_progress struct */
1759 req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS);
1760 if (req_hpk == NULL)
1761 GOTO(out, rc = -EPROTO);
1764 req_hpk->hpk_errval = lustre_errno_hton(hpk->hpk_errval);
1766 ptlrpc_request_set_replen(req);
1768 ptlrpc_get_mod_rpc_slot(req);
1769 rc = ptlrpc_queue_wait(req);
1770 ptlrpc_put_mod_rpc_slot(req);
1774 ptlrpc_req_finished(req);
1778 * Send hsm_ct_register to MDS
1780 * \param[in] imp import
1781 * \param[in] archive_count if in bitmap format, it is the bitmap,
1782 * else it is the count of archive_ids
1783 * \param[in] archives if in bitmap format, it is NULL,
1784 * else it is archive_id lists
1786 static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archive_count,
1789 struct ptlrpc_request *req;
1790 __u32 *archive_array;
1791 size_t archives_size;
1795 req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_CT_REGISTER);
1799 if (archives != NULL)
1800 archives_size = sizeof(*archive_array) * archive_count;
1802 archives_size = sizeof(archive_count);
1804 req_capsule_set_size(&req->rq_pill, &RMF_MDS_HSM_ARCHIVE,
1805 RCL_CLIENT, archives_size);
1807 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_CT_REGISTER);
1809 ptlrpc_request_free(req);
1813 mdc_pack_body(req, NULL, 0, 0, -1, 0);
1815 archive_array = req_capsule_client_get(&req->rq_pill,
1816 &RMF_MDS_HSM_ARCHIVE);
1817 if (archive_array == NULL)
1818 GOTO(out, rc = -EPROTO);
1820 if (archives != NULL)
1821 memcpy(archive_array, archives, archives_size);
1823 *archive_array = archive_count;
1825 ptlrpc_request_set_replen(req);
1827 rc = mdc_queue_wait(req);
1830 ptlrpc_req_finished(req);
1834 static int mdc_ioc_hsm_current_action(struct obd_export *exp,
1835 struct md_op_data *op_data)
1837 struct hsm_current_action *hca = op_data->op_data;
1838 struct hsm_current_action *req_hca;
1839 struct ptlrpc_request *req;
1843 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
1844 &RQF_MDS_HSM_ACTION);
1848 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_ACTION);
1850 ptlrpc_request_free(req);
1854 mdc_pack_body(req, &op_data->op_fid1, 0, 0,
1855 op_data->op_suppgids[0], 0);
1857 ptlrpc_request_set_replen(req);
1859 rc = mdc_queue_wait(req);
1863 req_hca = req_capsule_server_get(&req->rq_pill,
1864 &RMF_MDS_HSM_CURRENT_ACTION);
1865 if (req_hca == NULL)
1866 GOTO(out, rc = -EPROTO);
1872 ptlrpc_req_finished(req);
1876 static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp)
1878 struct ptlrpc_request *req;
1882 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_UNREGISTER,
1884 MDS_HSM_CT_UNREGISTER);
1886 GOTO(out, rc = -ENOMEM);
1888 mdc_pack_body(req, NULL, 0, 0, -1, 0);
1890 ptlrpc_request_set_replen(req);
1892 rc = mdc_queue_wait(req);
1895 ptlrpc_req_finished(req);
1899 static int mdc_ioc_hsm_state_get(struct obd_export *exp,
1900 struct md_op_data *op_data)
1902 struct hsm_user_state *hus = op_data->op_data;
1903 struct hsm_user_state *req_hus;
1904 struct ptlrpc_request *req;
1908 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
1909 &RQF_MDS_HSM_STATE_GET);
1913 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_GET);
1915 ptlrpc_request_free(req);
1919 mdc_pack_body(req, &op_data->op_fid1, 0, 0,
1920 op_data->op_suppgids[0], 0);
1922 ptlrpc_request_set_replen(req);
1924 rc = mdc_queue_wait(req);
1928 req_hus = req_capsule_server_get(&req->rq_pill, &RMF_HSM_USER_STATE);
1929 if (req_hus == NULL)
1930 GOTO(out, rc = -EPROTO);
1936 ptlrpc_req_finished(req);
1940 static int mdc_ioc_hsm_state_set(struct obd_export *exp,
1941 struct md_op_data *op_data)
1943 struct hsm_state_set *hss = op_data->op_data;
1944 struct hsm_state_set *req_hss;
1945 struct ptlrpc_request *req;
1949 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
1950 &RQF_MDS_HSM_STATE_SET);
1954 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_SET);
1956 ptlrpc_request_free(req);
1960 mdc_pack_body(req, &op_data->op_fid1, 0, 0,
1961 op_data->op_suppgids[0], 0);
1964 req_hss = req_capsule_client_get(&req->rq_pill, &RMF_HSM_STATE_SET);
1965 if (req_hss == NULL)
1966 GOTO(out, rc = -EPROTO);
1969 ptlrpc_request_set_replen(req);
1971 ptlrpc_get_mod_rpc_slot(req);
1972 rc = ptlrpc_queue_wait(req);
1973 ptlrpc_put_mod_rpc_slot(req);
1977 ptlrpc_req_finished(req);
1981 static int mdc_ioc_hsm_request(struct obd_export *exp,
1982 struct hsm_user_request *hur)
1984 struct obd_import *imp = class_exp2cliimp(exp);
1985 struct ptlrpc_request *req;
1986 struct hsm_request *req_hr;
1987 struct hsm_user_item *req_hui;
1992 req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_REQUEST);
1994 GOTO(out, rc = -ENOMEM);
1996 req_capsule_set_size(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM, RCL_CLIENT,
1997 hur->hur_request.hr_itemcount
1998 * sizeof(struct hsm_user_item));
1999 req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA, RCL_CLIENT,
2000 hur->hur_request.hr_data_len);
2002 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_REQUEST);
2004 ptlrpc_request_free(req);
2008 mdc_pack_body(req, NULL, 0, 0, -1, 0);
2010 /* Copy hsm_request struct */
2011 req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST);
2013 GOTO(out, rc = -EPROTO);
2014 *req_hr = hur->hur_request;
2016 /* Copy hsm_user_item structs */
2017 req_hui = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM);
2018 if (req_hui == NULL)
2019 GOTO(out, rc = -EPROTO);
2020 memcpy(req_hui, hur->hur_user_item,
2021 hur->hur_request.hr_itemcount * sizeof(struct hsm_user_item));
2023 /* Copy opaque field */
2024 req_opaque = req_capsule_client_get(&req->rq_pill, &RMF_GENERIC_DATA);
2025 if (req_opaque == NULL)
2026 GOTO(out, rc = -EPROTO);
2027 memcpy(req_opaque, hur_data(hur), hur->hur_request.hr_data_len);
2029 ptlrpc_request_set_replen(req);
2031 ptlrpc_get_mod_rpc_slot(req);
2032 rc = ptlrpc_queue_wait(req);
2033 ptlrpc_put_mod_rpc_slot(req);
2038 ptlrpc_req_finished(req);
2042 static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
2043 struct lustre_kernelcomm *lk);
2045 static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
2046 struct obd_quotactl *oqctl)
2048 struct ptlrpc_request *req;
2049 struct obd_quotactl *oqc;
2053 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
2054 &RQF_MDS_QUOTACTL, LUSTRE_MDS_VERSION,
2059 oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
2062 ptlrpc_request_set_replen(req);
2063 ptlrpc_at_set_req_timeout(req);
2065 rc = ptlrpc_queue_wait(req);
2067 CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc);
2069 if (req->rq_repmsg &&
2070 (oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL))) {
2073 CERROR ("Can't unpack obd_quotactl\n");
2076 ptlrpc_req_finished(req);
2081 static int mdc_ioc_swap_layouts(struct obd_export *exp,
2082 struct md_op_data *op_data)
2085 struct ptlrpc_request *req;
2087 struct mdc_swap_layouts *msl, *payload;
2090 msl = op_data->op_data;
2092 /* When the MDT will get the MDS_SWAP_LAYOUTS RPC the
2093 * first thing it will do is to cancel the 2 layout
2094 * locks held by this client.
2095 * So the client must cancel its layout locks on the 2 fids
2096 * with the request RPC to avoid extra RPC round trips.
2098 count = mdc_resource_get_unused(exp, &op_data->op_fid1, &cancels,
2099 LCK_EX, MDS_INODELOCK_LAYOUT |
2100 MDS_INODELOCK_XATTR);
2101 count += mdc_resource_get_unused(exp, &op_data->op_fid2, &cancels,
2102 LCK_EX, MDS_INODELOCK_LAYOUT |
2103 MDS_INODELOCK_XATTR);
2105 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2106 &RQF_MDS_SWAP_LAYOUTS);
2108 ldlm_lock_list_put(&cancels, l_bl_ast, count);
2112 rc = mdc_prep_elc_req(exp, req, MDS_SWAP_LAYOUTS, &cancels, count);
2114 ptlrpc_request_free(req);
2118 mdc_swap_layouts_pack(req, op_data);
2120 payload = req_capsule_client_get(&req->rq_pill, &RMF_SWAP_LAYOUTS);
2125 ptlrpc_request_set_replen(req);
2127 rc = ptlrpc_queue_wait(req);
2133 ptlrpc_req_finished(req);
2137 static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2138 void *karg, void __user *uarg)
2140 struct obd_device *obd = exp->exp_obd;
2141 struct obd_ioctl_data *data = karg;
2142 struct obd_import *imp = obd->u.cli.cl_import;
2146 if (!try_module_get(THIS_MODULE)) {
2147 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
2148 module_name(THIS_MODULE));
2152 case OBD_IOC_FID2PATH:
2153 rc = mdc_ioc_fid2path(exp, karg);
2155 case LL_IOC_HSM_CT_START:
2156 rc = mdc_ioc_hsm_ct_start(exp, karg);
2157 /* ignore if it was already registered on this MDS. */
2161 case LL_IOC_HSM_PROGRESS:
2162 rc = mdc_ioc_hsm_progress(exp, karg);
2164 case LL_IOC_HSM_STATE_GET:
2165 rc = mdc_ioc_hsm_state_get(exp, karg);
2167 case LL_IOC_HSM_STATE_SET:
2168 rc = mdc_ioc_hsm_state_set(exp, karg);
2170 case LL_IOC_HSM_ACTION:
2171 rc = mdc_ioc_hsm_current_action(exp, karg);
2173 case LL_IOC_HSM_REQUEST:
2174 rc = mdc_ioc_hsm_request(exp, karg);
2176 case OBD_IOC_CLIENT_RECOVER:
2177 rc = ptlrpc_recover_import(imp, data->ioc_inlbuf1, 0);
2181 case IOC_OSC_SET_ACTIVE:
2182 rc = ptlrpc_set_import_active(imp, data->ioc_offset);
2185 * Normally IOC_OBD_STATFS, OBD_IOC_QUOTACTL iocontrol are handled by
2186 * LMV instead of MDC. But when the cluster is upgraded from 1.8,
2187 * there'd be no LMV layer thus we might be called here. Eventually
2188 * this code should be removed.
2191 case IOC_OBD_STATFS: {
2192 struct obd_statfs stat_buf = {0};
2194 if (*((__u32 *) data->ioc_inlbuf2) != 0)
2195 GOTO(out, rc = -ENODEV);
2198 if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd),
2199 min((int)data->ioc_plen2,
2200 (int)sizeof(struct obd_uuid))))
2201 GOTO(out, rc = -EFAULT);
2203 rc = mdc_statfs(NULL, obd->obd_self_export, &stat_buf,
2204 ktime_get_seconds() - OBD_STATFS_CACHE_SECONDS,
2209 if (copy_to_user(data->ioc_pbuf1, &stat_buf,
2210 min((int) data->ioc_plen1,
2211 (int) sizeof(stat_buf))))
2212 GOTO(out, rc = -EFAULT);
2216 case OBD_IOC_QUOTACTL: {
2217 struct if_quotactl *qctl = karg;
2218 struct obd_quotactl *oqctl;
2220 OBD_ALLOC_PTR(oqctl);
2222 GOTO(out, rc = -ENOMEM);
2224 QCTL_COPY(oqctl, qctl);
2225 rc = obd_quotactl(exp, oqctl);
2227 QCTL_COPY(qctl, oqctl);
2228 qctl->qc_valid = QC_MDTIDX;
2229 qctl->obd_uuid = obd->u.cli.cl_target_uuid;
2232 OBD_FREE_PTR(oqctl);
2235 case LL_IOC_GET_CONNECT_FLAGS:
2236 if (copy_to_user(uarg, exp_connect_flags_ptr(exp),
2237 sizeof(*exp_connect_flags_ptr(exp))))
2238 GOTO(out, rc = -EFAULT);
2241 case LL_IOC_LOV_SWAP_LAYOUTS:
2242 rc = mdc_ioc_swap_layouts(exp, karg);
2245 CERROR("unrecognised ioctl: cmd = %#x\n", cmd);
2246 GOTO(out, rc = -ENOTTY);
2249 module_put(THIS_MODULE);
2254 static int mdc_get_info_rpc(struct obd_export *exp,
2255 u32 keylen, void *key,
2256 u32 vallen, void *val)
2258 struct obd_import *imp = class_exp2cliimp(exp);
2259 struct ptlrpc_request *req;
2264 req = ptlrpc_request_alloc(imp, &RQF_MDS_GET_INFO);
2268 req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY,
2269 RCL_CLIENT, keylen);
2270 req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VALLEN,
2271 RCL_CLIENT, sizeof(vallen));
2273 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GET_INFO);
2275 ptlrpc_request_free(req);
2279 tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY);
2280 memcpy(tmp, key, keylen);
2281 tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_VALLEN);
2282 memcpy(tmp, &vallen, sizeof(vallen));
2284 req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VAL,
2285 RCL_SERVER, vallen);
2286 ptlrpc_request_set_replen(req);
2288 rc = ptlrpc_queue_wait(req);
2289 /* -EREMOTE means the get_info result is partial, and it needs to
2290 * continue on another MDT, see fid2path part in lmv_iocontrol */
2291 if (rc == 0 || rc == -EREMOTE) {
2292 tmp = req_capsule_server_get(&req->rq_pill, &RMF_GETINFO_VAL);
2293 memcpy(val, tmp, vallen);
2294 if (ptlrpc_rep_need_swab(req)) {
2295 if (KEY_IS(KEY_FID2PATH))
2296 lustre_swab_fid2path(val);
2299 ptlrpc_req_finished(req);
2304 static void lustre_swab_hai(struct hsm_action_item *h)
2306 __swab32s(&h->hai_len);
2307 __swab32s(&h->hai_action);
2308 lustre_swab_lu_fid(&h->hai_fid);
2309 lustre_swab_lu_fid(&h->hai_dfid);
2310 __swab64s(&h->hai_cookie);
2311 __swab64s(&h->hai_extent.offset);
2312 __swab64s(&h->hai_extent.length);
2313 __swab64s(&h->hai_gid);
2316 static void lustre_swab_hal(struct hsm_action_list *h)
2318 struct hsm_action_item *hai;
2321 __swab32s(&h->hal_version);
2322 __swab32s(&h->hal_count);
2323 __swab32s(&h->hal_archive_id);
2324 __swab64s(&h->hal_flags);
2326 for (i = 0; i < h->hal_count; i++, hai = hai_next(hai))
2327 lustre_swab_hai(hai);
2330 static void lustre_swab_kuch(struct kuc_hdr *l)
2332 __swab16s(&l->kuc_magic);
2333 /* __u8 l->kuc_transport */
2334 __swab16s(&l->kuc_msgtype);
2335 __swab16s(&l->kuc_msglen);
2338 static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
2339 struct lustre_kernelcomm *lk)
2341 struct obd_import *imp = class_exp2cliimp(exp);
2344 if (lk->lk_group != KUC_GRP_HSM) {
2345 CERROR("Bad copytool group %d\n", lk->lk_group);
2349 CDEBUG(D_HSM, "CT start r%d w%d u%d g%d f%#x\n", lk->lk_rfd, lk->lk_wfd,
2350 lk->lk_uid, lk->lk_group, lk->lk_flags);
2352 if (lk->lk_flags & LK_FLG_STOP) {
2353 /* Unregister with the coordinator */
2354 rc = mdc_ioc_hsm_ct_unregister(imp);
2356 __u32 *archives = NULL;
2358 if ((lk->lk_flags & LK_FLG_DATANR) && lk->lk_data_count > 0)
2359 archives = lk->lk_data;
2361 rc = mdc_ioc_hsm_ct_register(imp, lk->lk_data_count, archives);
2368 * Send a message to any listening copytools
2369 * @param val KUC message (kuc_hdr + hsm_action_list)
2370 * @param len total length of message
2372 static int mdc_hsm_copytool_send(const struct obd_uuid *uuid,
2373 size_t len, void *val)
2375 struct kuc_hdr *lh = (struct kuc_hdr *)val;
2376 struct hsm_action_list *hal = (struct hsm_action_list *)(lh + 1);
2380 if (len < sizeof(*lh) + sizeof(*hal)) {
2381 CERROR("Short HSM message %zu < %zu\n", len,
2382 sizeof(*lh) + sizeof(*hal));
2385 if (lh->kuc_magic == __swab16(KUC_MAGIC)) {
2386 lustre_swab_kuch(lh);
2387 lustre_swab_hal(hal);
2388 } else if (lh->kuc_magic != KUC_MAGIC) {
2389 CERROR("Bad magic %x!=%x\n", lh->kuc_magic, KUC_MAGIC);
2393 CDEBUG(D_HSM, " Received message mg=%x t=%d m=%d l=%d actions=%d "
2395 lh->kuc_magic, lh->kuc_transport, lh->kuc_msgtype,
2396 lh->kuc_msglen, hal->hal_count, hal->hal_fsname);
2398 /* Broadcast to HSM listeners */
2399 rc = libcfs_kkuc_group_put(uuid, KUC_GRP_HSM, lh);
2405 * callback function passed to kuc for re-registering each HSM copytool
2406 * running on MDC, after MDT shutdown/recovery.
2407 * @param data copytool registration data
2408 * @param cb_arg callback argument (obd_import)
2410 static int mdc_hsm_ct_reregister(void *data, void *cb_arg)
2412 struct obd_import *imp = (struct obd_import *)cb_arg;
2413 struct kkuc_ct_data *kcd = data;
2414 __u32 *archives = NULL;
2418 (kcd->kcd_magic != KKUC_CT_DATA_ARRAY_MAGIC &&
2419 kcd->kcd_magic != KKUC_CT_DATA_BITMAP_MAGIC))
2422 if (kcd->kcd_magic == KKUC_CT_DATA_BITMAP_MAGIC) {
2423 CDEBUG(D_HA, "%s: recover copytool registration to MDT "
2424 "(archive=%#x)\n", imp->imp_obd->obd_name,
2425 kcd->kcd_nr_archives);
2427 CDEBUG(D_HA, "%s: recover copytool registration to MDT "
2428 "(archive nr = %u)\n",
2429 imp->imp_obd->obd_name, kcd->kcd_nr_archives);
2430 if (kcd->kcd_nr_archives != 0)
2431 archives = kcd->kcd_archives;
2434 rc = mdc_ioc_hsm_ct_register(imp, kcd->kcd_nr_archives, archives);
2435 /* ignore error if the copytool is already registered */
2436 return (rc == -EEXIST) ? 0 : rc;
2440 * Re-establish all kuc contexts with MDT
2441 * after MDT shutdown/recovery.
2443 static int mdc_kuc_reregister(struct obd_import *imp)
2445 /* re-register HSM agents */
2446 return libcfs_kkuc_group_foreach(&imp->imp_obd->obd_uuid, KUC_GRP_HSM,
2447 mdc_hsm_ct_reregister, imp);
2450 static int mdc_set_info_async(const struct lu_env *env,
2451 struct obd_export *exp,
2452 u32 keylen, void *key,
2453 u32 vallen, void *val,
2454 struct ptlrpc_request_set *set)
2456 struct obd_import *imp = class_exp2cliimp(exp);
2460 if (KEY_IS(KEY_READ_ONLY)) {
2461 if (vallen != sizeof(int))
2464 spin_lock(&imp->imp_lock);
2465 if (*((int *)val)) {
2466 imp->imp_connect_flags_orig |= OBD_CONNECT_RDONLY;
2467 imp->imp_connect_data.ocd_connect_flags |=
2470 imp->imp_connect_flags_orig &= ~OBD_CONNECT_RDONLY;
2471 imp->imp_connect_data.ocd_connect_flags &=
2472 ~OBD_CONNECT_RDONLY;
2474 spin_unlock(&imp->imp_lock);
2476 rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
2477 keylen, key, vallen, val, set);
2480 if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
2481 rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
2482 keylen, key, vallen, val, set);
2485 if (KEY_IS(KEY_HSM_COPYTOOL_SEND)) {
2486 rc = mdc_hsm_copytool_send(&imp->imp_obd->obd_uuid, vallen,
2491 if (KEY_IS(KEY_DEFAULT_EASIZE)) {
2492 __u32 *default_easize = val;
2494 exp->exp_obd->u.cli.cl_default_mds_easize = *default_easize;
2498 rc = osc_set_info_async(env, exp, keylen, key, vallen, val, set);
2502 static int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
2503 __u32 keylen, void *key, __u32 *vallen, void *val)
2507 if (KEY_IS(KEY_MAX_EASIZE)) {
2508 __u32 mdsize, *max_easize;
2510 if (*vallen != sizeof(int))
2512 mdsize = *(__u32 *)val;
2513 if (mdsize > exp->exp_obd->u.cli.cl_max_mds_easize)
2514 exp->exp_obd->u.cli.cl_max_mds_easize = mdsize;
2516 *max_easize = exp->exp_obd->u.cli.cl_max_mds_easize;
2518 } else if (KEY_IS(KEY_DEFAULT_EASIZE)) {
2519 __u32 *default_easize;
2521 if (*vallen != sizeof(int))
2523 default_easize = val;
2524 *default_easize = exp->exp_obd->u.cli.cl_default_mds_easize;
2526 } else if (KEY_IS(KEY_CONN_DATA)) {
2527 struct obd_import *imp = class_exp2cliimp(exp);
2528 struct obd_connect_data *data = val;
2530 if (*vallen != sizeof(*data))
2533 *data = imp->imp_connect_data;
2535 } else if (KEY_IS(KEY_TGT_COUNT)) {
2536 *((__u32 *)val) = 1;
2540 rc = mdc_get_info_rpc(exp, keylen, key, *vallen, val);
2545 static int mdc_fsync(struct obd_export *exp, const struct lu_fid *fid,
2546 struct ptlrpc_request **request)
2548 struct ptlrpc_request *req;
2553 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SYNC);
2557 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_SYNC);
2559 ptlrpc_request_free(req);
2563 mdc_pack_body(req, fid, 0, 0, -1, 0);
2565 ptlrpc_request_set_replen(req);
2567 rc = ptlrpc_queue_wait(req);
2569 ptlrpc_req_finished(req);
2575 struct mdc_rmfid_args {
2580 int mdc_rmfid_interpret(const struct lu_env *env, struct ptlrpc_request *req,
2583 struct mdc_rmfid_args *aa;
2588 aa = ptlrpc_req_async_args(aa, req);
2590 size = req_capsule_get_size(&req->rq_pill, &RMF_RCS,
2592 LASSERT(size == sizeof(int) * aa->mra_nr);
2593 rcs = req_capsule_server_get(&req->rq_pill, &RMF_RCS);
2595 LASSERT(aa->mra_rcs);
2596 LASSERT(aa->mra_nr);
2597 memcpy(aa->mra_rcs, rcs, size);
2603 static int mdc_rmfid(struct obd_export *exp, struct fid_array *fa,
2604 int *rcs, struct ptlrpc_request_set *set)
2606 struct ptlrpc_request *req;
2607 struct mdc_rmfid_args *aa;
2613 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_RMFID);
2617 flen = fa->fa_nr * sizeof(struct lu_fid);
2618 req_capsule_set_size(&req->rq_pill, &RMF_FID_ARRAY,
2620 req_capsule_set_size(&req->rq_pill, &RMF_FID_ARRAY,
2622 req_capsule_set_size(&req->rq_pill, &RMF_RCS,
2623 RCL_SERVER, fa->fa_nr * sizeof(__u32));
2624 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_RMFID);
2626 ptlrpc_request_free(req);
2629 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FID_ARRAY);
2630 memcpy(tmp, fa->fa_fids, flen);
2632 mdc_pack_body(req, NULL, 0, 0, -1, 0);
2633 b = req_capsule_client_get(&req->rq_pill, &RMF_MDT_BODY);
2634 b->mbo_ctime = ktime_get_real_seconds();
2636 ptlrpc_request_set_replen(req);
2639 aa = ptlrpc_req_async_args(aa, req);
2641 aa->mra_nr = fa->fa_nr;
2642 req->rq_interpret_reply = mdc_rmfid_interpret;
2644 ptlrpc_set_add_req(set, req);
2645 ptlrpc_check_set(NULL, set);
2650 static int mdc_import_event(struct obd_device *obd, struct obd_import *imp,
2651 enum obd_import_event event)
2653 struct client_obd *cli = &obd->u.cli;
2656 LASSERT(imp->imp_obd == obd);
2659 case IMP_EVENT_DISCON:
2660 spin_lock(&cli->cl_loi_list_lock);
2661 cli->cl_avail_grant = 0;
2662 cli->cl_lost_grant = 0;
2663 spin_unlock(&cli->cl_loi_list_lock);
2665 case IMP_EVENT_INACTIVE:
2667 * Flush current sequence to make client obtain new one
2668 * from server in case of disconnect/reconnect.
2670 down_read(&cli->cl_seq_rwsem);
2672 seq_client_flush(cli->cl_seq);
2673 up_read(&cli->cl_seq_rwsem);
2675 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE);
2677 case IMP_EVENT_INVALIDATE: {
2678 struct ldlm_namespace *ns = obd->obd_namespace;
2682 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
2684 env = cl_env_get(&refcheck);
2686 /* Reset grants. All pages go to failing rpcs due to
2687 * the invalid import.
2689 osc_io_unplug(env, cli, NULL);
2691 cfs_hash_for_each_nolock(ns->ns_rs_hash,
2692 osc_ldlm_resource_invalidate,
2694 cl_env_put(env, &refcheck);
2695 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
2701 case IMP_EVENT_ACTIVE:
2702 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE);
2703 /* redo the kuc registration after reconnecting */
2705 rc = mdc_kuc_reregister(imp);
2707 case IMP_EVENT_OCD: {
2708 struct obd_connect_data *ocd = &imp->imp_connect_data;
2710 if (OCD_HAS_FLAG(ocd, GRANT))
2711 osc_init_grant(cli, ocd);
2713 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD);
2716 case IMP_EVENT_DEACTIVATE:
2717 case IMP_EVENT_ACTIVATE:
2720 CERROR("Unknown import event %x\n", event);
2726 int mdc_fid_alloc(const struct lu_env *env, struct obd_export *exp,
2727 struct lu_fid *fid, struct md_op_data *op_data)
2729 struct client_obd *cli = &exp->exp_obd->u.cli;
2734 down_read(&cli->cl_seq_rwsem);
2736 rc = seq_client_alloc_fid(env, cli->cl_seq, fid);
2737 up_read(&cli->cl_seq_rwsem);
2742 static struct obd_uuid *mdc_get_uuid(struct obd_export *exp)
2744 struct client_obd *cli = &exp->exp_obd->u.cli;
2745 return &cli->cl_target_uuid;
2749 * Determine whether the lock can be canceled before replaying it during
2750 * recovery, non zero value will be return if the lock can be canceled,
2751 * or zero returned for not
2753 static int mdc_cancel_weight(struct ldlm_lock *lock)
2755 if (lock->l_resource->lr_type != LDLM_IBITS)
2758 /* FIXME: if we ever get into a situation where there are too many
2759 * opened files with open locks on a single node, then we really
2760 * should replay these open locks to reget it */
2761 if (lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_OPEN)
2764 /* Special case for DoM locks, cancel only unused and granted locks */
2765 if (ldlm_has_dom(lock) &&
2766 (lock->l_granted_mode != lock->l_req_mode ||
2767 osc_ldlm_weigh_ast(lock) != 0))
2773 static int mdc_resource_inode_free(struct ldlm_resource *res)
2775 if (res->lr_lvb_inode)
2776 res->lr_lvb_inode = NULL;
2781 static struct ldlm_valblock_ops inode_lvbo = {
2782 .lvbo_free = mdc_resource_inode_free
2785 static int mdc_llog_init(struct obd_device *obd)
2787 struct obd_llog_group *olg = &obd->obd_olg;
2788 struct llog_ctxt *ctxt;
2793 rc = llog_setup(NULL, obd, olg, LLOG_CHANGELOG_REPL_CTXT, obd,
2798 ctxt = llog_group_get_ctxt(olg, LLOG_CHANGELOG_REPL_CTXT);
2799 llog_initiator_connect(ctxt);
2800 llog_ctxt_put(ctxt);
2805 static void mdc_llog_finish(struct obd_device *obd)
2807 struct llog_ctxt *ctxt;
2811 ctxt = llog_get_context(obd, LLOG_CHANGELOG_REPL_CTXT);
2813 llog_cleanup(NULL, ctxt);
2818 int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
2824 rc = osc_setup_common(obd, cfg);
2828 rc = mdc_tunables_init(obd);
2830 GOTO(err_osc_cleanup, rc);
2832 obd->u.cli.cl_dom_min_inline_repsize = MDC_DOM_DEF_INLINE_REPSIZE;
2834 ns_register_cancel(obd->obd_namespace, mdc_cancel_weight);
2836 obd->obd_namespace->ns_lvbo = &inode_lvbo;
2838 rc = mdc_llog_init(obd);
2840 CERROR("%s: failed to setup llogging subsystems: rc = %d\n",
2842 GOTO(err_llog_cleanup, rc);
2845 rc = mdc_changelog_cdev_init(obd);
2847 CERROR("%s: failed to setup changelog char device: rc = %d\n",
2849 GOTO(err_changelog_cleanup, rc);
2854 err_changelog_cleanup:
2855 mdc_llog_finish(obd);
2857 lprocfs_free_md_stats(obd);
2858 ptlrpc_lprocfs_unregister_obd(obd);
2860 osc_cleanup_common(obd);
2864 /* Initialize the default and maximum LOV EA sizes. This allows
2865 * us to make MDS RPCs with large enough reply buffers to hold a default
2866 * sized EA without having to calculate this (via a call into the
2867 * LOV + OSCs) each time we make an RPC. The maximum size is also tracked
2868 * but not used to avoid wastefully vmalloc()'ing large reply buffers when
2869 * a large number of stripes is possible. If a larger reply buffer is
2870 * required it will be reallocated in the ptlrpc layer due to overflow.
2872 static int mdc_init_ea_size(struct obd_export *exp, __u32 easize,
2875 struct obd_device *obd = exp->exp_obd;
2876 struct client_obd *cli = &obd->u.cli;
2879 if (cli->cl_max_mds_easize < easize)
2880 cli->cl_max_mds_easize = easize;
2882 if (cli->cl_default_mds_easize < def_easize)
2883 cli->cl_default_mds_easize = def_easize;
2888 static int mdc_precleanup(struct obd_device *obd)
2892 osc_precleanup_common(obd);
2893 mdc_changelog_cdev_finish(obd);
2895 obd_cleanup_client_import(obd);
2896 ptlrpc_lprocfs_unregister_obd(obd);
2897 lprocfs_free_md_stats(obd);
2898 mdc_llog_finish(obd);
2902 static int mdc_cleanup(struct obd_device *obd)
2904 return osc_cleanup_common(obd);
2907 static const struct obd_ops mdc_obd_ops = {
2908 .o_owner = THIS_MODULE,
2909 .o_setup = mdc_setup,
2910 .o_precleanup = mdc_precleanup,
2911 .o_cleanup = mdc_cleanup,
2912 .o_add_conn = client_import_add_conn,
2913 .o_del_conn = client_import_del_conn,
2914 .o_connect = client_connect_import,
2915 .o_reconnect = osc_reconnect,
2916 .o_disconnect = osc_disconnect,
2917 .o_iocontrol = mdc_iocontrol,
2918 .o_set_info_async = mdc_set_info_async,
2919 .o_statfs = mdc_statfs,
2920 .o_statfs_async = mdc_statfs_async,
2921 .o_fid_init = client_fid_init,
2922 .o_fid_fini = client_fid_fini,
2923 .o_fid_alloc = mdc_fid_alloc,
2924 .o_import_event = mdc_import_event,
2925 .o_get_info = mdc_get_info,
2926 .o_get_uuid = mdc_get_uuid,
2927 .o_quotactl = mdc_quotactl,
2930 static const struct md_ops mdc_md_ops = {
2931 .m_get_root = mdc_get_root,
2932 .m_null_inode = mdc_null_inode,
2933 .m_close = mdc_close,
2934 .m_create = mdc_create,
2935 .m_enqueue = mdc_enqueue,
2936 .m_getattr = mdc_getattr,
2937 .m_getattr_name = mdc_getattr_name,
2938 .m_intent_lock = mdc_intent_lock,
2940 .m_rename = mdc_rename,
2941 .m_setattr = mdc_setattr,
2942 .m_setxattr = mdc_setxattr,
2943 .m_getxattr = mdc_getxattr,
2944 .m_fsync = mdc_fsync,
2945 .m_file_resync = mdc_file_resync,
2946 .m_read_page = mdc_read_page,
2947 .m_unlink = mdc_unlink,
2948 .m_cancel_unused = mdc_cancel_unused,
2949 .m_init_ea_size = mdc_init_ea_size,
2950 .m_set_lock_data = mdc_set_lock_data,
2951 .m_lock_match = mdc_lock_match,
2952 .m_get_lustre_md = mdc_get_lustre_md,
2953 .m_free_lustre_md = mdc_free_lustre_md,
2954 .m_set_open_replay_data = mdc_set_open_replay_data,
2955 .m_clear_open_replay_data = mdc_clear_open_replay_data,
2956 .m_intent_getattr_async = mdc_intent_getattr_async,
2957 .m_revalidate_lock = mdc_revalidate_lock,
2958 .m_rmfid = mdc_rmfid,
2961 dev_t mdc_changelog_dev;
2962 struct class *mdc_changelog_class;
2963 static int __init mdc_init(void)
2966 rc = alloc_chrdev_region(&mdc_changelog_dev, 0,
2967 MDC_CHANGELOG_DEV_COUNT,
2968 MDC_CHANGELOG_DEV_NAME);
2972 mdc_changelog_class = class_create(THIS_MODULE, MDC_CHANGELOG_DEV_NAME);
2973 if (IS_ERR(mdc_changelog_class)) {
2974 rc = PTR_ERR(mdc_changelog_class);
2978 rc = class_register_type(&mdc_obd_ops, &mdc_md_ops, true, NULL,
2979 LUSTRE_MDC_NAME, &mdc_device_type);
2986 unregister_chrdev_region(mdc_changelog_dev, MDC_CHANGELOG_DEV_COUNT);
2990 static void __exit mdc_exit(void)
2992 class_destroy(mdc_changelog_class);
2993 unregister_chrdev_region(mdc_changelog_dev, MDC_CHANGELOG_DEV_COUNT);
2994 class_unregister_type(LUSTRE_MDC_NAME);
2997 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2998 MODULE_DESCRIPTION("Lustre Metadata Client");
2999 MODULE_VERSION(LUSTRE_VERSION_STRING);
3000 MODULE_LICENSE("GPL");
3002 module_init(mdc_init);
3003 module_exit(mdc_exit);