4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2017, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
32 #define DEBUG_SUBSYSTEM S_MDC
34 #include <linux/init.h>
35 #include <linux/kthread.h>
36 #include <linux/module.h>
37 #include <linux/pagemap.h>
38 #include <linux/user_namespace.h>
39 #include <linux/utsname.h>
40 #include <linux/delay.h>
41 #include <linux/uidgid.h>
42 #include <linux/device.h>
43 #include <linux/xarray.h>
45 #include <lustre_errno.h>
47 #include <cl_object.h>
48 #include <llog_swab.h>
49 #include <lprocfs_status.h>
50 #include <lustre_acl.h>
51 #include <lustre_compat.h>
52 #include <lustre_fid.h>
53 #include <uapi/linux/lustre/lustre_ioctl.h>
54 #include <lustre_ioctl_old.h>
55 #include <lustre_kernelcomm.h>
56 #include <lustre_lmv.h>
57 #include <lustre_log.h>
58 #include <lustre_osc.h>
59 #include <lustre_swab.h>
60 #include <lustre_quota.h>
61 #include <obd_class.h>
63 #include "mdc_internal.h"
65 #define REQUEST_MINOR 244
67 static int mdc_cleanup(struct obd_device *obd);
69 static inline int mdc_queue_wait(struct ptlrpc_request *req)
71 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
74 /* obd_get_request_slot() ensures that this client has no more
75 * than cl_max_rpcs_in_flight RPCs simultaneously inf light
77 rc = obd_get_request_slot(cli);
81 rc = ptlrpc_queue_wait(req);
82 obd_put_request_slot(cli);
88 * Send MDS_GET_ROOT RPC to fetch root FID.
90 * If \a fileset is not NULL it should contain a subdirectory off
91 * the ROOT/ directory to be mounted on the client. Return the FID
92 * of the subdirectory to the client to mount onto its mountpoint.
94 * \param[in] imp MDC import
95 * \param[in] fileset fileset name, which could be NULL
96 * \param[out] rootfid root FID of this mountpoint
97 * \param[out] pc root capa will be unpacked and saved in this pointer
99 * \retval 0 on success, negative errno on failure
101 static int mdc_get_root(struct obd_export *exp, const char *fileset,
102 struct lu_fid *rootfid)
104 struct ptlrpc_request *req;
105 struct mdt_body *body;
110 if (fileset && !(exp_connect_flags(exp) & OBD_CONNECT_SUBTREE))
113 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
119 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
120 strlen(fileset) + 1);
121 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GET_ROOT);
123 ptlrpc_request_free(req);
126 mdc_pack_body(&req->rq_pill, NULL, 0, 0, -1, 0);
127 if (fileset != NULL) {
128 char *name = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
130 memcpy(name, fileset, strlen(fileset));
132 lustre_msg_add_flags(req->rq_reqmsg, LUSTRE_IMP_FULL);
133 req->rq_send_state = LUSTRE_IMP_FULL;
135 ptlrpc_request_set_replen(req);
137 rc = ptlrpc_queue_wait(req);
141 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
143 GOTO(out, rc = -EPROTO);
145 *rootfid = body->mbo_fid1;
146 CDEBUG(D_NET, "root fid="DFID", last_committed=%llu\n",
147 PFID(rootfid), lustre_msg_get_last_committed(req->rq_repmsg));
156 * This function now is known to always saying that it will receive 4 buffers
157 * from server. Even for cases when acl_size and md_size is zero, RPC header
158 * will contain 4 fields and RPC itself will contain zero size fields. This is
159 * because mdt_getattr*() _always_ returns 4 fields, but if acl is not needed
160 * and thus zero, it shrinks it, making zero size. The same story about
161 * md_size. And this is course of problem when client waits for smaller number
162 * of fields. This issue will be fixed later when client gets aware of RPC
165 static int mdc_getattr_common(struct obd_export *exp,
166 struct ptlrpc_request *req,
167 struct md_op_data *op_data)
169 struct req_capsule *pill = &req->rq_pill;
170 struct mdt_body *body;
175 /* Request message already built. */
176 rc = ptlrpc_queue_wait(req);
180 /* sanity check for the reply */
181 body = req_capsule_server_get(pill, &RMF_MDT_BODY);
185 CDEBUG(D_NET, "mode: %o\n", body->mbo_mode);
187 mdc_update_max_ea_from_body(exp, body);
188 if (body->mbo_eadatasize != 0) {
189 eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
190 body->mbo_eadatasize);
195 /* If encryption context was returned by MDT, put it in op_data
196 * so that caller can set it on inode and save an extra getxattr.
198 if (op_data && op_data->op_valid & OBD_MD_ENCCTX &&
199 body->mbo_valid & OBD_MD_ENCCTX) {
200 op_data->op_file_encctx =
201 req_capsule_server_get(pill, &RMF_FILE_ENCCTX);
202 op_data->op_file_encctx_size =
203 req_capsule_get_size(pill, &RMF_FILE_ENCCTX,
210 static void mdc_reset_acl_req(struct ptlrpc_request *req)
212 spin_lock(&req->rq_early_free_lock);
213 sptlrpc_cli_free_repbuf(req);
214 req->rq_repbuf = NULL;
215 req->rq_repbuf_len = 0;
216 req->rq_repdata = NULL;
217 req->rq_reqdata_len = 0;
218 spin_unlock(&req->rq_early_free_lock);
221 static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
222 struct ptlrpc_request **request)
224 struct ptlrpc_request *req;
225 struct obd_device *obd = class_exp2obd(exp);
226 struct obd_import *imp = class_exp2cliimp(exp);
227 __u32 acl_bufsize = LUSTRE_POSIX_ACL_MAX_SIZE_OLD;
231 /* Single MDS without an LMV case */
232 if (op_data->op_flags & MF_GET_MDT_IDX) {
238 req = ptlrpc_request_alloc(imp, &RQF_MDS_GETATTR);
242 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
244 ptlrpc_request_free(req);
248 /* LU-15245: avoid deadlock with modifying RPCs on MDS_REQUEST_PORTAL */
249 req->rq_request_portal = MDS_READPAGE_PORTAL;
252 mdc_pack_body(&req->rq_pill, &op_data->op_fid1, op_data->op_valid,
253 op_data->op_mode, -1, 0);
254 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, acl_bufsize);
255 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
257 if (exp_connect_encrypt(exp) && op_data->op_valid & OBD_MD_ENCCTX)
258 req_capsule_set_size(&req->rq_pill, &RMF_FILE_ENCCTX,
260 obd->u.cli.cl_max_mds_easize);
262 req_capsule_set_size(&req->rq_pill, &RMF_FILE_ENCCTX,
264 ptlrpc_request_set_replen(req);
266 rc = mdc_getattr_common(exp, req, op_data);
269 acl_bufsize = min_t(__u32,
270 imp->imp_connect_data.ocd_max_easize,
272 mdc_reset_acl_req(req);
284 static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
285 struct ptlrpc_request **request)
287 struct ptlrpc_request *req;
288 struct obd_import *imp = class_exp2cliimp(exp);
289 __u32 acl_bufsize = LUSTRE_POSIX_ACL_MAX_SIZE_OLD;
294 req = ptlrpc_request_alloc(imp, &RQF_MDS_GETATTR_NAME);
298 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
299 op_data->op_namelen + 1);
301 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR_NAME);
303 ptlrpc_request_free(req);
307 if (op_data->op_name) {
308 char *name = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
309 LASSERT(strnlen(op_data->op_name, op_data->op_namelen) ==
310 op_data->op_namelen);
311 memcpy(name, op_data->op_name, op_data->op_namelen);
315 mdc_pack_body(&req->rq_pill, &op_data->op_fid1, op_data->op_valid,
316 op_data->op_mode, op_data->op_suppgids[0], 0);
317 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
319 req_capsule_set_size(&req->rq_pill, &RMF_ACL, RCL_SERVER, acl_bufsize);
320 req_capsule_set_size(&req->rq_pill, &RMF_FILE_ENCCTX, RCL_SERVER, 0);
321 ptlrpc_request_set_replen(req);
322 if (op_data->op_bias & MDS_FID_OP) {
323 struct mdt_body *b = req_capsule_client_get(&req->rq_pill,
327 b->mbo_valid |= OBD_MD_NAMEHASH;
328 b->mbo_fid2 = op_data->op_fid2;
332 rc = mdc_getattr_common(exp, req, NULL);
335 acl_bufsize = min_t(__u32,
336 imp->imp_connect_data.ocd_max_easize,
338 mdc_reset_acl_req(req);
350 static int mdc_xattr_common(struct obd_export *exp,const struct req_format *fmt,
351 const struct lu_fid *fid, int opcode, u64 valid,
352 const char *xattr_name, const char *input,
353 int input_size, int output_size, int flags,
354 __u32 suppgid, struct ptlrpc_request **request)
356 struct ptlrpc_request *req;
357 struct sptlrpc_sepol *sepol;
358 int xattr_namelen = 0;
364 req = ptlrpc_request_alloc(class_exp2cliimp(exp), fmt);
369 xattr_namelen = strlen(xattr_name) + 1;
370 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
375 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
378 /* get SELinux policy info if any */
379 sepol = sptlrpc_sepol_get(req);
381 GOTO(err_free_rq, rc = PTR_ERR(sepol));
383 req_capsule_set_size(&req->rq_pill, &RMF_SELINUX_POL, RCL_CLIENT,
384 sptlrpc_sepol_size(sepol));
386 /* Flush local XATTR locks to get rid of a possible cancel RPC */
387 if (opcode == MDS_REINT && fid_is_sane(fid) &&
388 exp->exp_connect_data.ocd_ibits_known & MDS_INODELOCK_XATTR) {
392 /* Without that packing would fail */
394 req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
397 count = mdc_resource_get_unused(exp, fid,
399 MDS_INODELOCK_XATTR);
401 rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
403 GOTO(err_put_sepol, rc);
405 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, opcode);
407 GOTO(err_put_sepol, rc);
410 if (opcode == MDS_REINT) {
411 struct mdt_rec_setxattr *rec;
413 BUILD_BUG_ON(sizeof(struct mdt_rec_setxattr) !=
414 sizeof(struct mdt_rec_reint));
415 rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
416 rec->sx_opcode = REINT_SETXATTR;
417 rec->sx_fsuid = from_kuid(&init_user_ns, current_fsuid());
418 rec->sx_fsgid = from_kgid(&init_user_ns, current_fsgid());
419 rec->sx_cap = ll_capability_u32(current_cap());
420 rec->sx_suppgid1 = suppgid;
421 rec->sx_suppgid2 = -1;
423 rec->sx_valid = valid | OBD_MD_FLCTIME;
424 rec->sx_time = ktime_get_real_seconds();
425 rec->sx_size = output_size;
426 rec->sx_flags = flags;
428 mdc_pack_body(&req->rq_pill, fid, valid, output_size,
430 /* Avoid deadlock with modifying RPCs on MDS_REQUEST_PORTAL.
433 req->rq_request_portal = MDS_READPAGE_PORTAL;
437 tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
438 memcpy(tmp, xattr_name, xattr_namelen);
441 tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
442 memcpy(tmp, input, input_size);
445 mdc_file_sepol_pack(&req->rq_pill, sepol);
446 sptlrpc_sepol_put(sepol);
448 if (req_capsule_has_field(&req->rq_pill, &RMF_EADATA, RCL_SERVER))
449 req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
450 RCL_SERVER, output_size);
451 ptlrpc_request_set_replen(req);
454 if (opcode == MDS_REINT)
455 ptlrpc_get_mod_rpc_slot(req);
457 rc = ptlrpc_queue_wait(req);
459 if (opcode == MDS_REINT)
460 ptlrpc_put_mod_rpc_slot(req);
469 sptlrpc_sepol_put(sepol);
471 ptlrpc_request_free(req);
476 static int mdc_setxattr(struct obd_export *exp, const struct lu_fid *fid,
477 u64 obd_md_valid, const char *name,
478 const void *value, size_t value_size,
479 unsigned int xattr_flags, u32 suppgid,
480 struct ptlrpc_request **req)
482 LASSERT(obd_md_valid == OBD_MD_FLXATTR ||
483 obd_md_valid == OBD_MD_FLXATTRRM);
485 return mdc_xattr_common(exp, &RQF_MDS_REINT_SETXATTR,
486 fid, MDS_REINT, obd_md_valid, name,
487 value, value_size, 0, xattr_flags, suppgid,
491 static int mdc_getxattr(struct obd_export *exp, const struct lu_fid *fid,
492 u64 obd_md_valid, const char *name, size_t buf_size,
493 struct ptlrpc_request **req)
495 struct mdt_body *body;
498 LASSERT(obd_md_valid == OBD_MD_FLXATTR ||
499 obd_md_valid == OBD_MD_FLXATTRLS);
501 /* Message below is checked in sanity-selinux test_20d
502 * and sanity-sec test_49
504 CDEBUG(D_INFO, "%s: get xattr '%s' for "DFID"\n",
505 exp->exp_obd->obd_name, name, PFID(fid));
506 rc = mdc_xattr_common(exp, &RQF_MDS_GETXATTR, fid, MDS_GETXATTR,
507 obd_md_valid, name, NULL, 0, buf_size, 0, -1,
512 body = req_capsule_server_get(&(*req)->rq_pill, &RMF_MDT_BODY);
514 GOTO(out, rc = -EPROTO);
516 /* only detect the xattr size */
518 /* LU-11109: Older MDTs do not distinguish
519 * between nonexistent xattrs and zero length
520 * values in this case. Newer MDTs will return
521 * -ENODATA or set OBD_MD_FLXATTR. */
522 GOTO(out, rc = body->mbo_eadatasize);
525 if (body->mbo_eadatasize == 0) {
526 /* LU-11109: Newer MDTs set OBD_MD_FLXATTR on
527 * success so that we can distinguish between
528 * zero length value and nonexistent xattr.
530 * If OBD_MD_FLXATTR is not set then we keep
531 * the old behavior and return -ENODATA for
532 * getxattr() when mbo_eadatasize is 0. But
533 * -ENODATA only makes sense for getxattr()
534 * and not for listxattr(). */
535 if (body->mbo_valid & OBD_MD_FLXATTR)
537 else if (obd_md_valid == OBD_MD_FLXATTR)
538 GOTO(out, rc = -ENODATA);
543 GOTO(out, rc = body->mbo_eadatasize);
546 ptlrpc_req_put(*req);
553 static int mdc_get_lustre_md(struct obd_export *exp, struct req_capsule *pill,
554 struct obd_export *dt_exp,
555 struct obd_export *md_exp,
556 struct lustre_md *md)
562 memset(md, 0, sizeof(*md));
564 md->body = req_capsule_server_get(pill, &RMF_MDT_BODY);
565 LASSERT(md->body != NULL);
567 if (md->body->mbo_valid & OBD_MD_FLEASIZE) {
568 if (!S_ISREG(md->body->mbo_mode)) {
569 CDEBUG(D_INFO, "OBD_MD_FLEASIZE set, should be a "
570 "regular file, but is not\n");
571 GOTO(out, rc = -EPROTO);
574 if (md->body->mbo_eadatasize == 0) {
575 CDEBUG(D_INFO, "OBD_MD_FLEASIZE set, "
576 "but eadatasize 0\n");
577 GOTO(out, rc = -EPROTO);
580 md->layout.lb_len = md->body->mbo_eadatasize;
581 md->layout.lb_buf = req_capsule_server_sized_get(pill,
584 if (md->layout.lb_buf == NULL)
585 GOTO(out, rc = -EPROTO);
586 } else if (md->body->mbo_valid & OBD_MD_FLDIREA) {
587 const union lmv_mds_md *lmv;
590 if (!S_ISDIR(md->body->mbo_mode)) {
591 CDEBUG(D_INFO, "OBD_MD_FLDIREA set, should be a "
592 "directory, but is not\n");
593 GOTO(out, rc = -EPROTO);
596 if (md_exp->exp_obd->obd_type->typ_lu == &mdc_device_type) {
597 CERROR("%s: no LMV, upgrading from old version?\n",
598 md_exp->exp_obd->obd_name);
600 GOTO(out_acl, rc = 0);
603 if (md->body->mbo_valid & OBD_MD_MEA) {
604 lmv_size = md->body->mbo_eadatasize;
606 CDEBUG(D_INFO, "OBD_MD_FLDIREA is set, "
607 "but eadatasize 0\n");
611 lmv = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
614 GOTO(out, rc = -EPROTO);
616 rc = md_stripe_object_create(md_exp, &md->lsm_obj,
622 /* since 2.12.58 intent_getattr fetches default LMV */
623 if (md->body->mbo_valid & OBD_MD_DEFAULT_MEA) {
624 lmv_size = req_capsule_get_size(pill,
627 lmv = req_capsule_server_sized_get(pill,
631 GOTO(out, rc = -EPROTO);
633 rc = md_stripe_object_create(md_exp, &md->def_lsm_obj,
642 if (md->body->mbo_valid & OBD_MD_FLACL) {
643 /* for ACL, it's possible that FLACL is set but aclsize is zero.
644 * only when aclsize != 0 there's an actual segment for ACL
647 rc = mdc_unpack_acl(pill, md);
656 md_put_lustre_md(md_exp, md);
662 void mdc_replay_open(struct ptlrpc_request *req)
664 struct md_open_data *mod = req->rq_cb_data;
665 struct ptlrpc_request *close_req;
666 struct obd_client_handle *och;
667 struct lustre_handle old_open_handle = { };
668 struct mdt_body *body;
669 struct ldlm_reply *rep;
673 DEBUG_REQ(D_ERROR, req,
674 "cannot properly replay without open data");
679 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
680 LASSERT(body != NULL);
682 rep = req_capsule_server_get(&req->rq_pill, &RMF_DLM_REP);
683 if (rep != NULL && rep->lock_policy_res2 != 0)
684 DEBUG_REQ(D_ERROR, req, "Open request replay failed with %ld ",
685 (long int)rep->lock_policy_res2);
687 spin_lock(&req->rq_lock);
689 if (och && och->och_open_handle.cookie)
690 req->rq_early_free_repbuf = 1;
692 req->rq_early_free_repbuf = 0;
693 spin_unlock(&req->rq_lock);
695 if (req->rq_early_free_repbuf) {
696 struct lustre_handle *file_open_handle;
698 LASSERT(och->och_magic == OBD_CLIENT_HANDLE_MAGIC);
700 file_open_handle = &och->och_open_handle;
701 CDEBUG(D_HA, "updating handle from %#llx to %#llx\n",
702 file_open_handle->cookie, body->mbo_open_handle.cookie);
703 old_open_handle = *file_open_handle;
704 *file_open_handle = body->mbo_open_handle;
707 close_req = mod->mod_close_req;
709 __u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg);
710 struct mdt_ioepoch *epoch;
712 LASSERT(opc == MDS_CLOSE);
713 epoch = req_capsule_client_get(&close_req->rq_pill,
717 if (req->rq_early_free_repbuf)
718 LASSERT(old_open_handle.cookie ==
719 epoch->mio_open_handle.cookie);
721 DEBUG_REQ(D_HA, close_req, "updating close body with new fh");
722 epoch->mio_open_handle = body->mbo_open_handle;
727 void mdc_commit_open(struct ptlrpc_request *req)
729 struct md_open_data *mod = req->rq_cb_data;
734 * No need to touch md_open_data::mod_och, it holds a reference on
735 * \var mod and will zero references to each other, \var mod will be
736 * freed after that when md_open_data::mod_och will put the reference.
740 * Do not let open request to disappear as it still may be needed
741 * for close rpc to happen (it may happen on evict only, otherwise
742 * ptlrpc_request::rq_replay does not let mdc_commit_open() to be
743 * called), just mark this rpc as committed to distinguish these 2
744 * cases, see mdc_close() for details. The open request reference will
745 * be put along with freeing \var mod.
747 ptlrpc_request_addref(req);
748 spin_lock(&req->rq_lock);
749 req->rq_committed = 1;
750 spin_unlock(&req->rq_lock);
751 req->rq_cb_data = NULL;
755 int mdc_set_open_replay_data(struct obd_export *exp,
756 struct obd_client_handle *och,
757 struct lookup_intent *it)
759 struct md_open_data *mod;
760 struct mdt_rec_create *rec;
761 struct mdt_body *body;
762 struct ptlrpc_request *open_req = it->it_request;
763 struct obd_import *imp = open_req->rq_import;
766 if (!open_req->rq_replay)
769 rec = req_capsule_client_get(&open_req->rq_pill, &RMF_REC_REINT);
770 body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
771 LASSERT(rec != NULL);
772 /* Incoming message in my byte order (it's been swabbed). */
773 /* Outgoing messages always in my byte order. */
774 LASSERT(body != NULL);
776 /* Only if the import is replayable, we set replay_open data */
777 if (och && imp->imp_replayable) {
778 mod = obd_mod_alloc();
780 DEBUG_REQ(D_ERROR, open_req,
781 "cannot allocate md_open_data");
786 * Take a reference on \var mod, to be freed on mdc_close().
787 * It protects \var mod from being freed on eviction (commit
788 * callback is called despite rq_replay flag).
789 * Another reference for \var och.
794 spin_lock(&open_req->rq_lock);
797 mod->mod_is_create = it_disposition(it, DISP_OPEN_CREATE) ||
798 it_disposition(it, DISP_OPEN_STRIPE);
799 mod->mod_open_req = open_req;
800 open_req->rq_cb_data = mod;
801 open_req->rq_commit_cb = mdc_commit_open;
802 open_req->rq_early_free_repbuf = 1;
803 spin_unlock(&open_req->rq_lock);
806 rec->cr_fid2 = body->mbo_fid1;
807 rec->cr_open_handle_old = body->mbo_open_handle;
808 open_req->rq_replay_cb = mdc_replay_open;
809 if (!fid_is_sane(&body->mbo_fid1)) {
810 DEBUG_REQ(D_ERROR, open_req,
811 "saving replay request with insane FID " DFID,
812 PFID(&body->mbo_fid1));
816 DEBUG_REQ(D_RPCTRACE, open_req, "Set up open replay data");
820 static void mdc_free_open(struct md_open_data *mod)
824 if (mod->mod_is_create == 0 &&
825 imp_connect_disp_stripe(mod->mod_open_req->rq_import))
829 * No reason to asssert here if the open request has
830 * rq_replay == 1. It means that mdc_close failed, and
831 * close request wasn`t sent. It is not fatal to client.
832 * The worst thing is eviction if the client gets open lock
835 DEBUG_REQ(D_RPCTRACE, mod->mod_open_req,
836 "free open request, rq_replay=%d",
837 mod->mod_open_req->rq_replay);
839 ptlrpc_request_committed(mod->mod_open_req, committed);
840 if (mod->mod_close_req)
841 ptlrpc_request_committed(mod->mod_close_req, committed);
844 static int mdc_clear_open_replay_data(struct obd_export *exp,
845 struct obd_client_handle *och)
847 struct md_open_data *mod = och->och_mod;
851 * It is possible to not have \var mod in a case of eviction between
852 * lookup and ll_file_open().
857 LASSERT(mod != LP_POISON);
858 LASSERT(mod->mod_open_req != NULL);
860 spin_lock(&mod->mod_open_req->rq_lock);
862 mod->mod_och->och_open_handle.cookie = 0;
863 mod->mod_open_req->rq_early_free_repbuf = 0;
864 spin_unlock(&mod->mod_open_req->rq_lock);
874 static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
875 struct md_open_data *mod, struct ptlrpc_request **request)
877 struct obd_device *obd = class_exp2obd(exp);
878 struct ptlrpc_request *req;
879 struct req_format *req_fmt;
880 size_t u32_count = 0;
885 CDEBUG(D_INODE, "%s: "DFID" file closed with intent: %x\n",
886 exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
889 if (op_data->op_bias & MDS_CLOSE_INTENT) {
890 req_fmt = &RQF_MDS_CLOSE_INTENT;
891 if (op_data->op_bias & MDS_HSM_RELEASE) {
892 /* allocate a FID for volatile file */
893 rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2,
896 CERROR("%s: "DFID" allocating FID: rc = %d\n",
897 obd->obd_name, PFID(&op_data->op_fid1),
899 /* save the errcode and proceed to close */
903 if (op_data->op_bias & MDS_CLOSE_RESYNC_DONE) {
904 size_t count = op_data->op_data_size / sizeof(__u32);
906 if (count > INLINE_RESYNC_ARRAY_SIZE)
910 req_fmt = &RQF_MDS_CLOSE;
914 if (CFS_FAIL_CHECK(OBD_FAIL_MDC_CLOSE))
917 req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt);
919 /* Ensure that this close's handle is fixed up during replay. */
920 if (likely(mod != NULL)) {
921 LASSERTF(mod->mod_open_req != NULL &&
922 mod->mod_open_req->rq_type != LI_POISON,
923 "POISONED open %px!\n", mod->mod_open_req);
925 mod->mod_close_req = req;
927 DEBUG_REQ(D_RPCTRACE, mod->mod_open_req, "matched open");
928 /* We no longer want to preserve this open for replay even
929 * though the open was committed. b=3632, b=3633 */
930 spin_lock(&mod->mod_open_req->rq_lock);
931 mod->mod_open_req->rq_replay = 0;
932 spin_unlock(&mod->mod_open_req->rq_lock);
934 CDEBUG(D_HA, "couldn't find open req; expecting close error\n");
938 * TODO: repeat close after errors
940 CWARN("%s: close of FID "DFID" failed, file reference will be "
941 "dropped when this client unmounts or is evicted\n",
942 obd->obd_name, PFID(&op_data->op_fid1));
943 GOTO(out, rc = -ENOMEM);
947 req_capsule_set_size(&req->rq_pill, &RMF_U32, RCL_CLIENT,
948 u32_count * sizeof(__u32));
950 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE);
952 ptlrpc_request_free(req);
957 /* To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a
958 * portal whose threads are not taking any DLM locks and are therefore
959 * always progressing */
960 req->rq_request_portal = MDS_READPAGE_PORTAL;
961 ptlrpc_at_set_req_timeout(req);
963 if (!obd->u.cli.cl_lsom_update ||
964 !(exp_connect_flags2(exp) & OBD_CONNECT2_LSOM))
965 op_data->op_xvalid &= ~(OP_XVALID_LAZYSIZE |
966 OP_XVALID_LAZYBLOCKS);
968 mdc_close_pack(&req->rq_pill, op_data);
970 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
971 obd->u.cli.cl_default_mds_easize);
973 ptlrpc_request_set_replen(req);
975 ptlrpc_get_mod_rpc_slot(req);
976 rc = ptlrpc_queue_wait(req);
977 ptlrpc_put_mod_rpc_slot(req);
979 if (req->rq_repmsg == NULL) {
980 CDEBUG(D_RPCTRACE, "request %p failed to send: rc = %d\n", req,
983 rc = req->rq_status ?: -EIO;
984 } else if (rc == 0 || rc == -EAGAIN) {
985 struct mdt_body *body;
987 rc = lustre_msg_get_status(req->rq_repmsg);
988 if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
989 DEBUG_REQ(D_ERROR, req,
990 "type = PTL_RPC_MSG_ERR: rc = %d", rc);
994 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
997 } else if (rc == -ESTALE) {
999 * it can be allowed error after 3633 if open was committed and
1000 * server failed before close was sent. Let's check if mod
1001 * exists and return no error in that case
1004 DEBUG_REQ(D_HA, req, "Reset ESTALE = %d", rc);
1005 LASSERT(mod->mod_open_req != NULL);
1006 if (mod->mod_open_req->rq_committed)
1014 mod->mod_close_req = NULL;
1015 if (mod->mod_close_req)
1016 ptlrpc_request_addref(mod->mod_close_req);
1017 /* Since now, mod is accessed through open_req only,
1018 * thus close req does not keep a reference on mod anymore. */
1023 RETURN(rc < 0 ? rc : saved_rc);
1026 static int mdc_getpage(struct obd_export *exp, const struct lu_fid *fid,
1027 u64 offset, struct page **pages, int npages,
1028 struct ptlrpc_request **request)
1030 struct ptlrpc_request *req;
1031 struct ptlrpc_bulk_desc *desc;
1040 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE);
1044 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_READPAGE);
1046 ptlrpc_request_free(req);
1050 req->rq_request_portal = MDS_READPAGE_PORTAL;
1051 ptlrpc_at_set_req_timeout(req);
1053 desc = ptlrpc_prep_bulk_imp(req, npages, 1,
1054 PTLRPC_BULK_PUT_SINK,
1056 &ptlrpc_bulk_kiov_pin_ops);
1058 ptlrpc_req_put(req);
1062 /* NB req now owns desc and will free it when it gets freed */
1063 for (i = 0; i < npages; i++)
1064 desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
1067 mdc_readdir_pack(&req->rq_pill, offset, PAGE_SIZE * npages, fid);
1069 ptlrpc_request_set_replen(req);
1070 rc = ptlrpc_queue_wait(req);
1072 ptlrpc_req_put(req);
1073 if (rc != -ETIMEDOUT)
1077 if (!client_should_resend(resends, &exp->exp_obd->u.cli)) {
1078 CERROR("%s: too many resend retries: rc = %d\n",
1079 exp->exp_obd->obd_name, -EIO);
1083 /* If a signal interrupts then the timeout returned will
1084 * not be zero. In that case return -EINTR
1086 if (msleep_interruptible(resends * 1000))
1092 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
1093 req->rq_bulk->bd_nob_transferred);
1095 ptlrpc_req_put(req);
1099 if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
1100 CERROR("%s: unexpected bytes transferred: %d (%ld expected)\n",
1101 exp->exp_obd->obd_name, req->rq_bulk->bd_nob_transferred,
1102 PAGE_SIZE * npages);
1103 ptlrpc_req_put(req);
1111 static void mdc_release_page(struct page *page, int remove)
1115 if (likely(page->mapping != NULL))
1116 cfs_delete_from_page_cache(page);
1122 static struct page *mdc_page_locate(struct address_space *mapping, __u64 *hash,
1123 __u64 *start, __u64 *end, int hash64)
1126 * Complement of hash is used as an index so that
1127 * radix_tree_gang_lookup() can be used to find a page with starting
1128 * hash _smaller_ than one we are looking for.
1130 unsigned long offset = hash_x_index(*hash, hash64);
1132 unsigned long flags;
1135 ll_xa_lock_irqsave(&mapping->i_pages, flags);
1136 found = radix_tree_gang_lookup(&mapping->page_tree,
1137 (void **)&page, offset, 1);
1138 if (found > 0 && !ll_xa_is_value(page)) {
1139 struct lu_dirpage *dp;
1142 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
1144 * In contrast to find_lock_page() we are sure that directory
1145 * page cannot be truncated (while DLM lock is held) and,
1146 * hence, can avoid restart.
1148 * In fact, page cannot be locked here at all, because
1149 * mdc_read_page_remote does synchronous io.
1151 wait_on_page_locked(page);
1152 if (PageUptodate(page)) {
1154 if (BITS_PER_LONG == 32 && hash64) {
1155 *start = le64_to_cpu(dp->ldp_hash_start) >> 32;
1156 *end = le64_to_cpu(dp->ldp_hash_end) >> 32;
1157 *hash = *hash >> 32;
1159 *start = le64_to_cpu(dp->ldp_hash_start);
1160 *end = le64_to_cpu(dp->ldp_hash_end);
1162 if (unlikely(*start == 1 && *hash == 0))
1165 LASSERTF(*start <= *hash, "start = %#llx"
1166 ",end = %#llx,hash = %#llx\n",
1167 *start, *end, *hash);
1168 CDEBUG(D_VFSTRACE, "offset %lx [%#llx %#llx],"
1169 " hash %#llx\n", offset, *start, *end, *hash);
1172 mdc_release_page(page, 0);
1174 } else if (*end != *start && *hash == *end) {
1176 * upon hash collision, remove this page,
1177 * otherwise put page reference, and
1178 * mdc_read_page_remote() will issue RPC to
1179 * fetch the page we want.
1182 mdc_release_page(page,
1183 le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1188 page = ERR_PTR(-EIO);
1191 ll_xa_unlock_irqrestore(&mapping->i_pages, flags);
1198 * Adjust a set of pages, each page containing an array of lu_dirpages,
1199 * so that each page can be used as a single logical lu_dirpage.
1201 * A lu_dirpage is laid out as follows, where s = ldp_hash_start,
1202 * e = ldp_hash_end, f = ldp_flags, p = padding, and each "ent" is a
1203 * struct lu_dirent. It has size up to LU_PAGE_SIZE. The ldp_hash_end
1204 * value is used as a cookie to request the next lu_dirpage in a
1205 * directory listing that spans multiple pages (two in this example):
1208 * .|--------v------- -----.
1209 * |s|e|f|p|ent|ent| ... |ent|
1210 * '--|-------------- -----' Each PAGE contains a single
1211 * '------. lu_dirpage.
1212 * .---------v------- -----.
1213 * |s|e|f|p|ent| 0 | ... | 0 |
1214 * '----------------- -----'
1216 * However, on hosts where the native VM page size (PAGE_SIZE) is
1217 * larger than LU_PAGE_SIZE, a single host page may contain multiple
1218 * lu_dirpages. After reading the lu_dirpages from the MDS, the
1219 * ldp_hash_end of the first lu_dirpage refers to the one immediately
1220 * after it in the same PAGE (arrows simplified for brevity, but
1221 * in general e0==s1, e1==s2, etc.):
1223 * .-------------------- -----.
1224 * |s0|e0|f0|p|ent|ent| ... |ent|
1225 * |---v---------------- -----|
1226 * |s1|e1|f1|p|ent|ent| ... |ent|
1227 * |---v---------------- -----| Here, each PAGE contains
1228 * ... multiple lu_dirpages.
1229 * |---v---------------- -----|
1230 * |s'|e'|f'|p|ent|ent| ... |ent|
1231 * '---|---------------- -----'
1233 * .----------------------------.
1236 * This structure is transformed into a single logical lu_dirpage as follows:
1238 * - Replace e0 with e' so the request for the next lu_dirpage gets the page
1239 * labeled 'next PAGE'.
1241 * - Copy the LDF_COLLIDE flag from f' to f0 to correctly reflect whether
1242 * a hash collision with the next page exists.
1244 * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
1245 * to the first entry of the next lu_dirpage.
1247 #if PAGE_SIZE > LU_PAGE_SIZE
1248 static void mdc_adjust_dirpages(struct page **pages, int cfs_pgs, int lu_pgs)
1252 for (i = 0; i < cfs_pgs; i++) {
1253 struct lu_dirpage *dp = kmap(pages[i]);
1254 struct lu_dirpage *first = dp;
1255 struct lu_dirent *end_dirent = NULL;
1256 struct lu_dirent *ent;
1257 __u64 hash_end = dp->ldp_hash_end;
1258 __u32 flags = dp->ldp_flags;
1260 while (--lu_pgs > 0) {
1261 ent = lu_dirent_start(dp);
1262 for (end_dirent = ent; ent != NULL;
1263 end_dirent = ent, ent = lu_dirent_next(ent));
1265 /* Advance dp to next lu_dirpage. */
1266 dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
1268 /* Check if we've reached the end of the PAGE. */
1269 if (!((unsigned long)dp & ~PAGE_MASK))
1272 /* Save the hash and flags of this lu_dirpage. */
1273 hash_end = dp->ldp_hash_end;
1274 flags = dp->ldp_flags;
1276 /* Check if lu_dirpage contains no entries. */
1277 if (end_dirent == NULL)
1280 /* Enlarge the end entry lde_reclen from 0 to
1281 * first entry of next lu_dirpage. */
1282 LASSERT(le16_to_cpu(end_dirent->lde_reclen) == 0);
1283 end_dirent->lde_reclen =
1284 cpu_to_le16((char *)(dp->ldp_entries) -
1285 (char *)end_dirent);
1288 first->ldp_hash_end = hash_end;
1289 first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE);
1290 first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE);
1294 LASSERTF(lu_pgs == 0, "left = %d\n", lu_pgs);
1297 #define mdc_adjust_dirpages(pages, cfs_pgs, lu_pgs) do {} while (0)
1298 #endif /* PAGE_SIZE > LU_PAGE_SIZE */
1300 /* parameters for readdir page */
1301 struct readpage_param {
1302 struct md_op_data *rp_mod;
1305 struct obd_export *rp_exp;
1309 * Read pages from server.
1311 * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains
1312 * a header lu_dirpage which describes the start/end hash, and whether this
1313 * page is empty (contains no dir entry) or hash collide with next page.
1314 * After client receives reply, several pages will be integrated into dir page
1315 * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the
1316 * lu_dirpage for this integrated page will be adjusted.
1318 static int ll_mdc_read_page_remote(void *data, struct page *page0)
1320 struct readpage_param *rp = data;
1321 struct page **page_pool;
1323 struct lu_dirpage *dp;
1324 struct md_op_data *op_data = rp->rp_mod;
1325 struct ptlrpc_request *req;
1327 struct inode *inode;
1329 int rd_pgs = 0; /* number of pages actually read */
1335 max_pages = rp->rp_exp->exp_obd->u.cli.cl_max_pages_per_rpc;
1336 inode = op_data->op_data;
1337 fid = &op_data->op_fid1;
1338 LASSERT(inode != NULL);
1340 OBD_ALLOC_PTR_ARRAY_LARGE(page_pool, max_pages);
1341 if (page_pool != NULL) {
1342 page_pool[0] = page0;
1348 for (npages = 1; npages < max_pages; npages++) {
1349 page = page_cache_alloc(inode->i_mapping);
1352 page_pool[npages] = page;
1355 rc = mdc_getpage(rp->rp_exp, fid, rp->rp_off, page_pool, npages, &req);
1357 /* page0 is special, which was added into page cache early */
1358 cfs_delete_from_page_cache(page0);
1362 rd_pgs = (req->rq_bulk->bd_nob_transferred + PAGE_SIZE - 1) >>
1364 lu_pgs = req->rq_bulk->bd_nob_transferred >> LU_PAGE_SHIFT;
1365 LASSERT(!(req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
1367 CDEBUG(D_INODE, "read %d(%d) pages\n", rd_pgs, lu_pgs);
1369 mdc_adjust_dirpages(page_pool, rd_pgs, lu_pgs);
1371 SetPageUptodate(page0);
1375 ptlrpc_req_put(req);
1376 CDEBUG(D_CACHE, "read %d/%d pages\n", rd_pgs, npages);
1377 for (i = 1; i < npages; i++) {
1378 unsigned long offset;
1382 page = page_pool[i];
1384 if (rc < 0 || i >= rd_pgs) {
1389 SetPageUptodate(page);
1392 hash = le64_to_cpu(dp->ldp_hash_start);
1395 offset = hash_x_index(hash, rp->rp_hash64);
1397 prefetchw(&page->flags);
1398 ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
1403 CDEBUG(D_VFSTRACE, "page %lu add to page cache failed:"
1404 " rc = %d\n", offset, ret);
1408 if (page_pool != &page0)
1409 OBD_FREE_PTR_ARRAY_LARGE(page_pool, max_pages);
1414 #ifdef HAVE_READ_CACHE_PAGE_WANTS_FILE
1415 static inline int mdc_read_folio_remote(struct file *file, struct folio *folio)
1417 return ll_mdc_read_page_remote(file->private_data,
1418 folio_page(folio, 0));
1421 #define mdc_read_folio_remote ll_mdc_read_page_remote
1425 * Read dir page from cache first, if it can not find it, read it from
1426 * server and add into the cache.
1428 * \param[in] exp MDC export
1429 * \param[in] op_data client MD stack parameters, transfering parameters
1430 * between different layers on client MD stack.
1431 * \param[in] mrinfo callback required for ldlm lock enqueue during
1433 * \param[in] hash_offset the hash offset of the page to be read
1434 * \param[in] ppage the page to be read
1436 * retval = 0 get the page successfully
1437 * errno(<0) get the page failed
1439 static int mdc_read_page(struct obd_export *exp, struct md_op_data *op_data,
1440 struct md_readdir_info *mrinfo, __u64 hash_offset,
1441 struct page **ppage)
1443 struct lookup_intent it = { .it_op = IT_READDIR };
1445 struct inode *dir = op_data->op_data;
1446 struct address_space *mapping;
1447 struct lu_dirpage *dp;
1450 struct lustre_handle lockh;
1451 struct ptlrpc_request *enq_req = NULL;
1452 struct readpage_param rp_param;
1459 LASSERT(dir != NULL);
1460 mapping = dir->i_mapping;
1462 rc = mdc_intent_lock(exp, op_data, &it, &enq_req,
1463 mrinfo->mr_blocking_ast, 0);
1464 if (enq_req != NULL)
1465 ptlrpc_req_put(enq_req);
1468 CERROR("%s: "DFID" lock enqueue fails: rc = %d\n",
1469 exp->exp_obd->obd_name, PFID(&op_data->op_fid1), rc);
1474 lockh.cookie = it.it_lock_handle;
1475 mdc_set_lock_data(exp, &lockh, dir, NULL);
1477 rp_param.rp_off = hash_offset;
1478 rp_param.rp_hash64 = op_data->op_cli_flags & CLI_HASH64;
1479 page = mdc_page_locate(mapping, &rp_param.rp_off, &start, &end,
1480 rp_param.rp_hash64);
1482 CERROR("%s: dir page locate: "DFID" at %llu: rc %ld\n",
1483 exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
1484 rp_param.rp_off, PTR_ERR(page));
1485 GOTO(out_unlock, rc = PTR_ERR(page));
1486 } else if (page != NULL) {
1488 * XXX nikita: not entirely correct handling of a corner case:
1489 * suppose hash chain of entries with hash value HASH crosses
1490 * border between pages P0 and P1. First both P0 and P1 are
1491 * cached, seekdir() is called for some entry from the P0 part
1492 * of the chain. Later P0 goes out of cache. telldir(HASH)
1493 * happens and finds P1, as it starts with matching hash
1494 * value. Remaining entries from P0 part of the chain are
1495 * skipped. (Is that really a bug?)
1497 * Possible solutions: 0. don't cache P1 is such case, handle
1498 * it as an "overflow" page. 1. invalidate all pages at
1499 * once. 2. use HASH|1 as an index for P1.
1501 GOTO(hash_collision, page);
1504 rp_param.rp_exp = exp;
1505 rp_param.rp_mod = op_data;
1506 page = ll_read_cache_page(mapping,
1507 hash_x_index(rp_param.rp_off,
1508 rp_param.rp_hash64),
1509 mdc_read_folio_remote, &rp_param);
1511 CDEBUG(D_INFO, "%s: read cache page: "DFID" at %llu: %ld\n",
1512 exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
1513 rp_param.rp_off, PTR_ERR(page));
1514 GOTO(out_unlock, rc = PTR_ERR(page));
1517 wait_on_page_locked(page);
1519 if (!PageUptodate(page)) {
1520 CERROR("%s: page not updated: "DFID" at %llu: rc %d\n",
1521 exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
1522 rp_param.rp_off, -5);
1525 if (!PageChecked(page))
1526 SetPageChecked(page);
1527 if (PageError(page)) {
1528 CERROR("%s: page error: "DFID" at %llu: rc %d\n",
1529 exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
1530 rp_param.rp_off, -5);
1535 dp = page_address(page);
1536 if (BITS_PER_LONG == 32 && rp_param.rp_hash64) {
1537 start = le64_to_cpu(dp->ldp_hash_start) >> 32;
1538 end = le64_to_cpu(dp->ldp_hash_end) >> 32;
1539 rp_param.rp_off = hash_offset >> 32;
1541 start = le64_to_cpu(dp->ldp_hash_start);
1542 end = le64_to_cpu(dp->ldp_hash_end);
1543 rp_param.rp_off = hash_offset;
1546 LASSERT(start == rp_param.rp_off);
1547 CWARN("Page-wide hash collision: %#lx\n", (unsigned long)end);
1548 #if BITS_PER_LONG == 32
1549 CWARN("Real page-wide hash collision at [%llu %llu] with "
1550 "hash %llu\n", le64_to_cpu(dp->ldp_hash_start),
1551 le64_to_cpu(dp->ldp_hash_end), hash_offset);
1555 * Fetch whole overflow chain...
1563 ldlm_lock_decref(&lockh, it.it_lock_mode);
1567 mdc_release_page(page, 1);
1572 static int mdc_statfs_interpret(const struct lu_env *env,
1573 struct ptlrpc_request *req, void *args, int rc)
1575 struct obd_info *oinfo = args;
1576 struct obd_statfs *osfs;
1579 osfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
1583 oinfo->oi_osfs = osfs;
1585 CDEBUG(D_CACHE, "blocks=%llu free=%llu avail=%llu "
1586 "objects=%llu free=%llu state=%x\n",
1587 osfs->os_blocks, osfs->os_bfree, osfs->os_bavail,
1588 osfs->os_files, osfs->os_ffree, osfs->os_state);
1591 oinfo->oi_cb_up(oinfo, rc);
1596 static int mdc_statfs_async(struct obd_export *exp,
1597 struct obd_info *oinfo, time64_t max_age,
1598 struct ptlrpc_request_set *unused)
1600 struct ptlrpc_request *req;
1601 struct obd_info *aa;
1603 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp), &RQF_MDS_STATFS,
1604 LUSTRE_MDS_VERSION, MDS_STATFS);
1606 return PTR_ERR(req);
1608 ptlrpc_request_set_replen(req);
1609 req->rq_interpret_reply = mdc_statfs_interpret;
1611 aa = ptlrpc_req_async_args(aa, req);
1614 ptlrpcd_add_req(req);
1619 static int mdc_statfs(const struct lu_env *env,
1620 struct obd_export *exp, struct obd_statfs *osfs,
1621 time64_t max_age, __u32 flags)
1623 struct obd_device *obd = class_exp2obd(exp);
1624 struct req_format *fmt;
1625 struct ptlrpc_request *req;
1626 struct obd_statfs *msfs;
1627 struct obd_import *imp, *imp0;
1632 * Since the request might also come from lprocfs, so we need
1633 * sync this with client_disconnect_export Bug15684
1635 with_imp_locked(obd, imp0, rc)
1636 imp = class_import_get(imp0);
1640 fmt = &RQF_MDS_STATFS;
1641 if ((exp_connect_flags2(exp) & OBD_CONNECT2_SUM_STATFS) &&
1642 (flags & OBD_STATFS_SUM))
1643 fmt = &RQF_MDS_STATFS_NEW;
1644 req = ptlrpc_request_alloc_pack(imp, fmt, LUSTRE_MDS_VERSION,
1647 GOTO(output, rc = PTR_ERR(req));
1648 req->rq_allow_intr = 1;
1650 if ((flags & OBD_STATFS_SUM) &&
1651 (exp_connect_flags2(exp) & OBD_CONNECT2_SUM_STATFS)) {
1652 /* request aggregated states */
1653 struct mdt_body *body;
1655 body = req_capsule_client_get(&req->rq_pill, &RMF_MDT_BODY);
1657 GOTO(out, rc = -EPROTO);
1658 body->mbo_valid = OBD_MD_FLAGSTATFS;
1661 ptlrpc_request_set_replen(req);
1663 if (flags & OBD_STATFS_NODELAY) {
1664 /* procfs requests not want stay in wait for avoid deadlock */
1665 req->rq_no_resend = 1;
1666 req->rq_no_delay = 1;
1669 rc = ptlrpc_queue_wait(req);
1671 /* check connection error first */
1672 if (imp->imp_connect_error)
1673 rc = imp->imp_connect_error;
1677 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
1679 GOTO(out, rc = -EPROTO);
1684 ptlrpc_req_put(req);
1686 class_import_put(imp);
1690 static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
1692 __u32 keylen, vallen;
1696 if (gf->gf_pathlen < 2)
1699 /* Key is KEY_FID2PATH + getinfo_fid2path description */
1700 keylen = round_up(sizeof(KEY_FID2PATH) + sizeof(*gf) +
1701 sizeof(struct lu_fid), 8);
1702 OBD_ALLOC(key, keylen);
1705 memcpy(key, KEY_FID2PATH, sizeof(KEY_FID2PATH));
1706 memcpy(key + round_up(sizeof(KEY_FID2PATH), 8), gf, sizeof(*gf));
1707 memcpy(key + round_up(sizeof(KEY_FID2PATH), 8) + sizeof(*gf),
1708 gf->gf_u.gf_root_fid, sizeof(struct lu_fid));
1709 CDEBUG(D_IOCTL, "path get "DFID" from %llu #%d\n",
1710 PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno);
1712 if (!fid_is_sane(&gf->gf_fid))
1713 GOTO(out, rc = -EINVAL);
1715 /* Val is struct getinfo_fid2path result plus path */
1716 vallen = sizeof(*gf) + gf->gf_pathlen;
1718 rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf);
1719 if (rc != 0 && rc != -EREMOTE)
1722 if (vallen <= sizeof(*gf))
1723 GOTO(out, rc = -EPROTO);
1724 if (vallen > sizeof(*gf) + gf->gf_pathlen)
1725 GOTO(out, rc = -EOVERFLOW);
1727 CDEBUG(D_IOCTL, "path got "DFID" from %llu #%d: %.*s\n",
1728 PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno,
1729 /* only log the first 512 characters of the path */
1730 512, gf->gf_u.gf_path);
1733 OBD_FREE(key, keylen);
1737 static int mdc_ioc_hsm_progress(struct obd_export *exp,
1738 struct hsm_progress_kernel *hpk)
1740 struct obd_import *imp = class_exp2cliimp(exp);
1741 struct hsm_progress_kernel *req_hpk;
1742 struct ptlrpc_request *req;
1746 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_PROGRESS,
1747 LUSTRE_MDS_VERSION, MDS_HSM_PROGRESS);
1749 RETURN(PTR_ERR(req));
1751 mdc_pack_body(&req->rq_pill, NULL, 0, 0, -1, 0);
1753 /* Copy hsm_progress struct */
1754 req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS);
1755 if (req_hpk == NULL)
1756 GOTO(out, rc = -EPROTO);
1759 req_hpk->hpk_errval = lustre_errno_hton(hpk->hpk_errval);
1761 ptlrpc_request_set_replen(req);
1763 ptlrpc_get_mod_rpc_slot(req);
1764 rc = ptlrpc_queue_wait(req);
1765 ptlrpc_put_mod_rpc_slot(req);
1769 ptlrpc_req_put(req);
1774 * Send hsm_ct_register to MDS
1776 * \param[in] imp import
1777 * \param[in] archive_count if in bitmap format, it is the bitmap,
1778 * else it is the count of archive_ids
1779 * \param[in] archives if in bitmap format, it is NULL,
1780 * else it is archive_id lists
1782 static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archive_count,
1785 struct ptlrpc_request *req;
1786 __u32 *archive_array;
1787 size_t archives_size;
1791 req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_CT_REGISTER);
1795 if (archives != NULL)
1796 archives_size = sizeof(*archive_array) * archive_count;
1798 archives_size = sizeof(archive_count);
1800 req_capsule_set_size(&req->rq_pill, &RMF_MDS_HSM_ARCHIVE,
1801 RCL_CLIENT, archives_size);
1803 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_CT_REGISTER);
1805 ptlrpc_request_free(req);
1809 mdc_pack_body(&req->rq_pill, NULL, 0, 0, -1, 0);
1811 archive_array = req_capsule_client_get(&req->rq_pill,
1812 &RMF_MDS_HSM_ARCHIVE);
1813 if (archive_array == NULL)
1814 GOTO(out, rc = -EPROTO);
1816 if (archives != NULL)
1817 memcpy(archive_array, archives, archives_size);
1819 *archive_array = archive_count;
1821 ptlrpc_request_set_replen(req);
1823 rc = mdc_queue_wait(req);
1826 ptlrpc_req_put(req);
1830 static int mdc_ioc_hsm_current_action(struct obd_export *exp,
1831 struct md_op_data *op_data)
1833 struct hsm_current_action *hca = op_data->op_data;
1834 struct hsm_current_action *req_hca;
1835 struct ptlrpc_request *req;
1839 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
1840 &RQF_MDS_HSM_ACTION);
1844 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_ACTION);
1846 ptlrpc_request_free(req);
1850 mdc_pack_body(&req->rq_pill, &op_data->op_fid1, 0, 0,
1851 op_data->op_suppgids[0], 0);
1853 ptlrpc_request_set_replen(req);
1855 rc = mdc_queue_wait(req);
1859 req_hca = req_capsule_server_get(&req->rq_pill,
1860 &RMF_MDS_HSM_CURRENT_ACTION);
1861 if (req_hca == NULL)
1862 GOTO(out, rc = -EPROTO);
1868 ptlrpc_req_put(req);
1872 static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp)
1874 struct ptlrpc_request *req;
1878 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_UNREGISTER,
1880 MDS_HSM_CT_UNREGISTER);
1882 RETURN(PTR_ERR(req));
1884 mdc_pack_body(&req->rq_pill, NULL, 0, 0, -1, 0);
1886 ptlrpc_request_set_replen(req);
1888 rc = mdc_queue_wait(req);
1890 ptlrpc_req_put(req);
1895 static int mdc_ioc_hsm_state_get(struct obd_export *exp,
1896 struct md_op_data *op_data)
1898 struct hsm_user_state *hus = op_data->op_data;
1899 struct hsm_user_state *req_hus;
1900 struct ptlrpc_request *req;
1904 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
1905 &RQF_MDS_HSM_STATE_GET);
1909 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_GET);
1911 ptlrpc_request_free(req);
1915 mdc_pack_body(&req->rq_pill, &op_data->op_fid1, 0, 0,
1916 op_data->op_suppgids[0], 0);
1918 ptlrpc_request_set_replen(req);
1920 rc = mdc_queue_wait(req);
1924 req_hus = req_capsule_server_get(&req->rq_pill, &RMF_HSM_USER_STATE);
1925 if (req_hus == NULL)
1926 GOTO(out, rc = -EPROTO);
1932 ptlrpc_req_put(req);
1936 static int mdc_ioc_hsm_state_set(struct obd_export *exp,
1937 struct md_op_data *op_data)
1939 struct hsm_state_set *hss = op_data->op_data;
1940 struct hsm_state_set *req_hss;
1941 struct ptlrpc_request *req;
1945 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
1946 &RQF_MDS_HSM_STATE_SET);
1950 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_SET);
1952 ptlrpc_request_free(req);
1956 mdc_pack_body(&req->rq_pill, &op_data->op_fid1, 0, 0,
1957 op_data->op_suppgids[0], 0);
1960 req_hss = req_capsule_client_get(&req->rq_pill, &RMF_HSM_STATE_SET);
1961 if (req_hss == NULL)
1962 GOTO(out, rc = -EPROTO);
1965 ptlrpc_request_set_replen(req);
1967 ptlrpc_get_mod_rpc_slot(req);
1968 rc = ptlrpc_queue_wait(req);
1969 ptlrpc_put_mod_rpc_slot(req);
1973 ptlrpc_req_put(req);
1977 /* For RESTORE and RELEASE the mdt will take EX lock on the file layout.
1978 * So we can use early cancel on client side locks for that resource.
1980 static inline int mdc_hsm_request_lock_to_cancel(struct obd_export *exp,
1981 struct hsm_user_request *hur,
1982 struct list_head *cancels)
1984 struct hsm_user_item *hui = &hur->hur_user_item[0];
1985 struct hsm_request *req_hr = &hur->hur_request;
1989 if (req_hr->hr_action != HUA_RESTORE &&
1990 req_hr->hr_action != HUA_RELEASE)
1993 for (i = 0; i < req_hr->hr_itemcount; i++, hui++) {
1994 if (!fid_is_sane(&hui->hui_fid))
1996 count += mdc_resource_get_unused(exp, &hui->hui_fid, cancels,
1997 LCK_EX, MDS_INODELOCK_LAYOUT);
2003 static int mdc_ioc_hsm_request(struct obd_export *exp,
2004 struct hsm_user_request *hur)
2006 struct obd_import *imp = class_exp2cliimp(exp);
2007 struct ptlrpc_request *req;
2008 struct hsm_request *req_hr;
2009 struct hsm_user_item *req_hui;
2016 req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_REQUEST);
2020 req_capsule_set_size(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM, RCL_CLIENT,
2021 hur->hur_request.hr_itemcount
2022 * sizeof(struct hsm_user_item));
2023 req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA, RCL_CLIENT,
2024 hur->hur_request.hr_data_len);
2026 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_REQUEST);
2028 ptlrpc_request_free(req);
2032 /* Cancel existing locks */
2033 count = mdc_hsm_request_lock_to_cancel(exp, hur, &cancels);
2034 ldlm_cli_cancel_list(&cancels, count, NULL, 0);
2035 mdc_pack_body(&req->rq_pill, NULL, 0, 0, -1, 0);
2037 /* Copy hsm_request struct */
2038 req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST);
2040 GOTO(out, rc = -EPROTO);
2041 *req_hr = hur->hur_request;
2043 /* Copy hsm_user_item structs */
2044 req_hui = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM);
2045 if (req_hui == NULL)
2046 GOTO(out, rc = -EPROTO);
2047 memcpy(req_hui, hur->hur_user_item,
2048 hur->hur_request.hr_itemcount * sizeof(struct hsm_user_item));
2050 /* Copy opaque field */
2051 req_opaque = req_capsule_client_get(&req->rq_pill, &RMF_GENERIC_DATA);
2052 if (req_opaque == NULL)
2053 GOTO(out, rc = -EPROTO);
2054 memcpy(req_opaque, hur_data(hur), hur->hur_request.hr_data_len);
2056 ptlrpc_request_set_replen(req);
2058 ptlrpc_get_mod_rpc_slot(req);
2059 rc = ptlrpc_queue_wait(req);
2060 ptlrpc_put_mod_rpc_slot(req);
2065 ptlrpc_req_put(req);
2069 static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
2070 struct lustre_kernelcomm *lk);
2071 static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
2072 struct obd_quotactl *oqctl)
2074 struct ptlrpc_request *req;
2075 struct obd_quotactl *oqc;
2079 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_QUOTACTL);
2084 if (LUSTRE_Q_CMD_IS_POOL(oqctl->qc_cmd))
2085 req_capsule_set_size(&req->rq_pill,
2088 sizeof(*oqc) + LOV_MAXPOOLNAME + 1);
2090 if (oqctl->qc_cmd == LUSTRE_Q_ITERQUOTA ||
2091 oqctl->qc_cmd == LUSTRE_Q_ITEROQUOTA)
2092 req_capsule_set_size(&req->rq_pill, &RMF_OBD_QUOTA_ITER,
2093 RCL_SERVER, LQUOTA_ITER_BUFLEN);
2095 req_capsule_set_size(&req->rq_pill, &RMF_OBD_QUOTA_ITER,
2098 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION,
2101 ptlrpc_request_free(req);
2105 oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
2106 QCTL_COPY(oqc, oqctl);
2108 ptlrpc_request_set_replen(req);
2109 ptlrpc_at_set_req_timeout(req);
2111 rc = ptlrpc_queue_wait(req);
2113 CERROR("%s: ptlrpc_queue_wait failed: rc = %d\n",
2114 exp->exp_obd->obd_name, rc);
2118 if (req->rq_repmsg &&
2119 (oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL))) {
2120 struct list_head *lst = (struct list_head *)oqctl->qc_iter_list;
2122 QCTL_COPY(oqctl, oqc);
2124 if (oqctl->qc_cmd == LUSTRE_Q_ITERQUOTA ||
2125 oqctl->qc_cmd == LUSTRE_Q_ITEROQUOTA) {
2127 struct lquota_iter *iter;
2129 buffer = req_capsule_server_get(&req->rq_pill,
2130 &RMF_OBD_QUOTA_ITER);
2132 if (buffer == NULL) {
2133 CDEBUG(D_QUOTA, "%s: no buffer in iter req\n",
2134 exp->exp_obd->obd_name);
2140 OBD_ALLOC_LARGE(iter,
2141 sizeof(struct lquota_iter) + LQUOTA_ITER_BUFLEN);
2143 GOTO(out, rc = -ENOMEM);
2145 INIT_LIST_HEAD(&iter->li_link);
2146 list_add(&iter->li_link, lst);
2148 memcpy(iter->li_buffer, buffer, LQUOTA_ITER_BUFLEN);
2149 iter->li_md_size = oqctl->qc_iter_md_buflen;
2150 if (oqctl->qc_cmd == LUSTRE_Q_ITERQUOTA)
2151 iter->li_dt_size = oqctl->qc_iter_dt_buflen;
2153 oqctl->qc_iter_md_buflen = 0;
2154 oqctl->qc_iter_dt_buflen = 0;
2158 CERROR("%s: cannot unpack obd_quotactl: rc = %d\n",
2159 exp->exp_obd->obd_name, rc);
2162 ptlrpc_req_put(req);
2167 static int mdc_ioc_swap_layouts(struct obd_export *exp,
2168 struct md_op_data *op_data)
2171 struct ptlrpc_request *req;
2173 struct mdc_swap_layouts *msl, *payload;
2176 msl = op_data->op_data;
2178 /* When the MDT will get the MDS_SWAP_LAYOUTS RPC the
2179 * first thing it will do is to cancel the 2 layout
2180 * locks held by this client.
2181 * So the client must cancel its layout locks on the 2 fids
2182 * with the request RPC to avoid extra RPC round trips.
2184 count = mdc_resource_get_unused(exp, &op_data->op_fid1, &cancels,
2185 LCK_EX, MDS_INODELOCK_LAYOUT |
2186 MDS_INODELOCK_XATTR);
2187 count += mdc_resource_get_unused(exp, &op_data->op_fid2, &cancels,
2188 LCK_EX, MDS_INODELOCK_LAYOUT |
2189 MDS_INODELOCK_XATTR);
2191 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2192 &RQF_MDS_SWAP_LAYOUTS);
2194 ldlm_lock_list_put(&cancels, l_bl_ast, count);
2198 rc = mdc_prep_elc_req(exp, req, MDS_SWAP_LAYOUTS, &cancels, count);
2200 ptlrpc_request_free(req);
2204 mdc_swap_layouts_pack(&req->rq_pill, op_data);
2206 payload = req_capsule_client_get(&req->rq_pill, &RMF_SWAP_LAYOUTS);
2211 ptlrpc_request_set_replen(req);
2213 rc = ptlrpc_queue_wait(req);
2219 ptlrpc_req_put(req);
2223 static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2224 void *karg, void __user *uarg)
2226 struct obd_device *obd = exp->exp_obd;
2227 struct obd_ioctl_data *data;
2228 struct obd_import *imp = obd->u.cli.cl_import;
2232 CDEBUG(D_IOCTL, "%s: cmd=%x len=%u karg=%pK uarg=%pK\n",
2233 obd->obd_name, cmd, len, karg, uarg);
2235 /* handle commands that do not need @karg first */
2237 case LL_IOC_GET_CONNECT_FLAGS:
2238 if (copy_to_user(uarg, exp_connect_flags_ptr(exp),
2239 sizeof(*exp_connect_flags_ptr(exp))))
2244 if (unlikely(karg == NULL))
2245 RETURN(OBD_IOC_ERROR(obd->obd_name, cmd, "karg=NULL", -EINVAL));
2248 if (!try_module_get(THIS_MODULE)) {
2249 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
2250 module_name(THIS_MODULE));
2254 case OBD_IOC_FID2PATH:
2255 rc = mdc_ioc_fid2path(exp, karg);
2257 case LL_IOC_HSM_CT_START:
2258 rc = mdc_ioc_hsm_ct_start(exp, karg);
2259 /* ignore if it was already registered on this MDS. */
2263 case LL_IOC_HSM_PROGRESS:
2264 rc = mdc_ioc_hsm_progress(exp, karg);
2266 case LL_IOC_HSM_STATE_GET:
2267 rc = mdc_ioc_hsm_state_get(exp, karg);
2269 case LL_IOC_HSM_STATE_SET:
2270 rc = mdc_ioc_hsm_state_set(exp, karg);
2272 case LL_IOC_HSM_ACTION:
2273 rc = mdc_ioc_hsm_current_action(exp, karg);
2275 case LL_IOC_HSM_REQUEST:
2276 rc = mdc_ioc_hsm_request(exp, karg);
2278 case OBD_IOC_CLIENT_RECOVER:
2279 rc = ptlrpc_recover_import(imp, data->ioc_inlbuf1, 0);
2283 #ifdef IOC_OSC_SET_ACTIVE
2284 case_OBD_IOC_DEPRECATED_FT(IOC_OSC_SET_ACTIVE, obd->obd_name, 2, 17);
2286 case OBD_IOC_SET_ACTIVE:
2287 rc = ptlrpc_set_import_active(imp, data->ioc_offset);
2290 * Normally IOC_OBD_STATFS, OBD_IOC_QUOTACTL iocontrol are handled by
2291 * LMV instead of MDC. But when the cluster is upgraded from 1.8,
2292 * there'd be no LMV layer thus we might be called here. Eventually
2293 * this code should be removed.
2296 case IOC_OBD_STATFS: {
2297 struct obd_statfs stat_buf = {0};
2299 if (*((__u32 *) data->ioc_inlbuf2) != 0)
2300 GOTO(out, rc = -ENODEV);
2303 if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd),
2304 min((int)data->ioc_plen2,
2305 (int)sizeof(struct obd_uuid))))
2306 GOTO(out, rc = -EFAULT);
2308 rc = mdc_statfs(NULL, obd->obd_self_export, &stat_buf,
2309 ktime_get_seconds() - OBD_STATFS_CACHE_SECONDS,
2314 if (copy_to_user(data->ioc_pbuf1, &stat_buf,
2315 min((int) data->ioc_plen1,
2316 (int) sizeof(stat_buf))))
2317 GOTO(out, rc = -EFAULT);
2321 case OBD_IOC_QUOTACTL: {
2322 struct if_quotactl *qctl = karg;
2323 struct obd_quotactl *oqctl;
2325 OBD_ALLOC_PTR(oqctl);
2327 GOTO(out, rc = -ENOMEM);
2329 QCTL_COPY(oqctl, qctl);
2330 rc = obd_quotactl(exp, oqctl);
2332 QCTL_COPY_NO_PNAME(qctl, oqctl);
2333 qctl->qc_valid = QC_MDTIDX;
2334 qctl->obd_uuid = obd->u.cli.cl_target_uuid;
2337 OBD_FREE_PTR(oqctl);
2340 case LL_IOC_LOV_SWAP_LAYOUTS:
2341 rc = mdc_ioc_swap_layouts(exp, karg);
2344 rc = OBD_IOC_ERROR(obd->obd_name, cmd, "unrecognized", -ENOTTY);
2348 module_put(THIS_MODULE);
2353 static int mdc_get_info_rpc(struct obd_export *exp,
2354 u32 keylen, void *key,
2355 u32 vallen, void *val)
2357 struct obd_import *imp = class_exp2cliimp(exp);
2358 struct ptlrpc_request *req;
2363 req = ptlrpc_request_alloc(imp, &RQF_MDS_GET_INFO);
2367 req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY,
2368 RCL_CLIENT, keylen);
2369 req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VALLEN,
2370 RCL_CLIENT, sizeof(vallen));
2372 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GET_INFO);
2374 ptlrpc_request_free(req);
2378 tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY);
2379 memcpy(tmp, key, keylen);
2380 tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_VALLEN);
2381 memcpy(tmp, &vallen, sizeof(vallen));
2383 req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VAL,
2384 RCL_SERVER, vallen);
2385 ptlrpc_request_set_replen(req);
2387 /* if server failed to resolve FID, and OI scrub not able to fix it, it
2388 * will return -EINPROGRESS, ptlrpc_queue_wait() will keep retrying,
2389 * set request interruptible to avoid deadlock.
2391 if (KEY_IS(KEY_FID2PATH))
2392 req->rq_allow_intr = 1;
2394 rc = ptlrpc_queue_wait(req);
2395 /* -EREMOTE means the get_info result is partial, and it needs to
2396 * continue on another MDT, see fid2path part in lmv_iocontrol */
2397 if (rc == 0 || rc == -EREMOTE) {
2398 tmp = req_capsule_server_get(&req->rq_pill, &RMF_GETINFO_VAL);
2399 memcpy(val, tmp, vallen);
2400 if (req_capsule_rep_need_swab(&req->rq_pill)) {
2401 if (KEY_IS(KEY_FID2PATH))
2402 lustre_swab_fid2path(val);
2405 ptlrpc_req_put(req);
2410 static void lustre_swab_hai(struct hsm_action_item *h)
2412 __swab32s(&h->hai_len);
2413 __swab32s(&h->hai_action);
2414 lustre_swab_lu_fid(&h->hai_fid);
2415 lustre_swab_lu_fid(&h->hai_dfid);
2416 __swab64s(&h->hai_cookie);
2417 __swab64s(&h->hai_extent.offset);
2418 __swab64s(&h->hai_extent.length);
2419 __swab64s(&h->hai_gid);
2422 static void lustre_swab_hal(struct hsm_action_list *h)
2424 struct hsm_action_item *hai;
2427 __swab32s(&h->hal_version);
2428 __swab32s(&h->hal_count);
2429 __swab32s(&h->hal_archive_id);
2430 __swab64s(&h->hal_flags);
2432 for (i = 0; i < h->hal_count; i++, hai = hai_next(hai))
2433 lustre_swab_hai(hai);
2436 static void lustre_swab_kuch(struct kuc_hdr *l)
2438 __swab16s(&l->kuc_magic);
2439 /* __u8 l->kuc_transport */
2440 __swab16s(&l->kuc_msgtype);
2441 __swab16s(&l->kuc_msglen);
2444 static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
2445 struct lustre_kernelcomm *lk)
2447 struct obd_import *imp = class_exp2cliimp(exp);
2450 if (lk->lk_group != KUC_GRP_HSM) {
2451 CERROR("Bad copytool group %d\n", lk->lk_group);
2455 CDEBUG(D_HSM, "CT start r%d w%d u%d g%d f%#x\n", lk->lk_rfd, lk->lk_wfd,
2456 lk->lk_uid, lk->lk_group, lk->lk_flags);
2458 if (lk->lk_flags & LK_FLG_STOP) {
2459 /* Unregister with the coordinator */
2460 rc = mdc_ioc_hsm_ct_unregister(imp);
2462 __u32 *archives = NULL;
2464 if ((lk->lk_flags & LK_FLG_DATANR) && lk->lk_data_count > 0)
2465 archives = lk->lk_data;
2467 rc = mdc_ioc_hsm_ct_register(imp, lk->lk_data_count, archives);
2474 * Send a message to any listening copytools
2475 * @param val KUC message (kuc_hdr + hsm_action_list)
2476 * @param len total length of message
2478 static int mdc_hsm_copytool_send(const struct obd_uuid *uuid,
2479 size_t len, void *val)
2481 struct kuc_hdr *lh = (struct kuc_hdr *)val;
2482 struct hsm_action_list *hal = (struct hsm_action_list *)(lh + 1);
2486 if (len < sizeof(*lh) + sizeof(*hal)) {
2487 CERROR("Short HSM message %zu < %zu\n", len,
2488 sizeof(*lh) + sizeof(*hal));
2491 if (lh->kuc_magic == __swab16(KUC_MAGIC)) {
2492 lustre_swab_kuch(lh);
2493 lustre_swab_hal(hal);
2494 } else if (lh->kuc_magic != KUC_MAGIC) {
2495 CERROR("Bad magic %x!=%x\n", lh->kuc_magic, KUC_MAGIC);
2499 CDEBUG(D_HSM, " Received message mg=%x t=%d m=%d l=%d actions=%d "
2501 lh->kuc_magic, lh->kuc_transport, lh->kuc_msgtype,
2502 lh->kuc_msglen, hal->hal_count, hal->hal_fsname);
2504 /* Broadcast to HSM listeners */
2505 rc = libcfs_kkuc_group_put(uuid, KUC_GRP_HSM, lh);
2511 * callback function passed to kuc for re-registering each HSM copytool
2512 * running on MDC, after MDT shutdown/recovery.
2513 * @param data copytool registration data
2514 * @param cb_arg callback argument (obd_import)
2516 static int mdc_hsm_ct_reregister(void *data, void *cb_arg)
2518 struct obd_import *imp = (struct obd_import *)cb_arg;
2519 struct kkuc_ct_data *kcd = data;
2520 __u32 *archives = NULL;
2524 (kcd->kcd_magic != KKUC_CT_DATA_ARRAY_MAGIC &&
2525 kcd->kcd_magic != KKUC_CT_DATA_BITMAP_MAGIC))
2528 if (kcd->kcd_magic == KKUC_CT_DATA_BITMAP_MAGIC) {
2529 CDEBUG(D_HA, "%s: recover copytool registration to MDT "
2530 "(archive=%#x)\n", imp->imp_obd->obd_name,
2531 kcd->kcd_nr_archives);
2533 CDEBUG(D_HA, "%s: recover copytool registration to MDT "
2534 "(archive nr = %u)\n",
2535 imp->imp_obd->obd_name, kcd->kcd_nr_archives);
2536 if (kcd->kcd_nr_archives != 0)
2537 archives = kcd->kcd_archives;
2540 rc = mdc_ioc_hsm_ct_register(imp, kcd->kcd_nr_archives, archives);
2541 /* ignore error if the copytool is already registered */
2542 return (rc == -EEXIST) ? 0 : rc;
2545 static int mdc_kuc_reregister_thread(void *data)
2547 struct obd_import *imp = data;
2551 /* re-register HSM agents */
2552 rc = libcfs_kkuc_group_foreach(&imp->imp_obd->obd_uuid, KUC_GRP_HSM,
2553 mdc_hsm_ct_reregister, imp);
2554 if (rc < 0 && rc != -EEXIST)
2555 CWARN("%s: Failed to re-register HSM agents (uuid: %s): rc = %d\n",
2556 imp->imp_obd->obd_name,
2557 obd_uuid2str(&imp->imp_obd->obd_uuid), rc);
2559 class_import_put(imp);
2564 * Re-establish all kuc contexts with MDT
2565 * after MDT shutdown/recovery.
2566 * This is done in background.
2568 static int mdc_kuc_reregister(struct obd_import *imp)
2570 struct task_struct *task;
2573 class_import_get(imp);
2574 task = kthread_run(mdc_kuc_reregister_thread, imp, "kuc_reregister");
2577 class_import_put(imp);
2584 static int mdc_set_info_async(const struct lu_env *env,
2585 struct obd_export *exp,
2586 u32 keylen, void *key,
2587 u32 vallen, void *val,
2588 struct ptlrpc_request_set *set)
2590 struct obd_import *imp = class_exp2cliimp(exp);
2594 if (KEY_IS(KEY_READ_ONLY)) {
2595 if (vallen != sizeof(int))
2598 spin_lock(&imp->imp_lock);
2599 if (*((int *)val)) {
2600 imp->imp_connect_flags_orig |= OBD_CONNECT_RDONLY;
2601 imp->imp_connect_data.ocd_connect_flags |=
2604 imp->imp_connect_flags_orig &= ~OBD_CONNECT_RDONLY;
2605 imp->imp_connect_data.ocd_connect_flags &=
2606 ~OBD_CONNECT_RDONLY;
2608 spin_unlock(&imp->imp_lock);
2610 rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
2611 keylen, key, vallen, val, set);
2614 if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
2615 rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
2616 keylen, key, vallen, val, set);
2619 if (KEY_IS(KEY_HSM_COPYTOOL_SEND)) {
2620 rc = mdc_hsm_copytool_send(&imp->imp_obd->obd_uuid, vallen,
2625 if (KEY_IS(KEY_DEFAULT_EASIZE)) {
2626 __u32 *default_easize = val;
2628 exp->exp_obd->u.cli.cl_default_mds_easize = *default_easize;
2632 rc = osc_set_info_async(env, exp, keylen, key, vallen, val, set);
2636 static int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
2637 __u32 keylen, void *key, __u32 *vallen, void *val)
2641 if (KEY_IS(KEY_MAX_EASIZE)) {
2642 __u32 mdsize, *max_easize;
2644 if (*vallen != sizeof(int))
2646 mdsize = *(__u32 *)val;
2647 if (mdsize > exp->exp_obd->u.cli.cl_max_mds_easize)
2648 exp->exp_obd->u.cli.cl_max_mds_easize = mdsize;
2650 *max_easize = exp->exp_obd->u.cli.cl_max_mds_easize;
2652 } else if (KEY_IS(KEY_DEFAULT_EASIZE)) {
2653 __u32 *default_easize;
2655 if (*vallen != sizeof(int))
2657 default_easize = val;
2658 *default_easize = exp->exp_obd->u.cli.cl_default_mds_easize;
2660 } else if (KEY_IS(KEY_CONN_DATA)) {
2661 struct obd_import *imp = class_exp2cliimp(exp);
2662 struct obd_connect_data *data = val;
2664 if (*vallen != sizeof(*data))
2667 *data = imp->imp_connect_data;
2669 } else if (KEY_IS(KEY_TGT_COUNT)) {
2670 *((__u32 *)val) = 1;
2674 rc = mdc_get_info_rpc(exp, keylen, key, *vallen, val);
2679 static int mdc_fsync(struct obd_export *exp, const struct lu_fid *fid,
2680 struct ptlrpc_request **request)
2682 struct ptlrpc_request *req;
2687 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SYNC);
2691 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_SYNC);
2693 ptlrpc_request_free(req);
2697 mdc_pack_body(&req->rq_pill, fid, 0, 0, -1, 0);
2699 ptlrpc_request_set_replen(req);
2701 rc = ptlrpc_queue_wait(req);
2703 ptlrpc_req_put(req);
2709 struct mdc_rmfid_args {
2714 static int mdc_rmfid_interpret(const struct lu_env *env,
2715 struct ptlrpc_request *req,
2718 struct mdc_rmfid_args *aa;
2723 aa = ptlrpc_req_async_args(aa, req);
2725 size = req_capsule_get_size(&req->rq_pill, &RMF_RCS,
2727 LASSERT(size == sizeof(int) * aa->mra_nr);
2728 rcs = req_capsule_server_get(&req->rq_pill, &RMF_RCS);
2730 LASSERT(aa->mra_rcs);
2731 LASSERT(aa->mra_nr);
2732 memcpy(aa->mra_rcs, rcs, size);
2738 static int mdc_rmfid(struct obd_export *exp, struct fid_array *fa,
2739 int *rcs, struct ptlrpc_request_set *set)
2741 struct ptlrpc_request *req;
2742 struct mdc_rmfid_args *aa;
2748 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_RMFID);
2752 flen = fa->fa_nr * sizeof(struct lu_fid);
2753 req_capsule_set_size(&req->rq_pill, &RMF_FID_ARRAY,
2755 req_capsule_set_size(&req->rq_pill, &RMF_FID_ARRAY,
2757 req_capsule_set_size(&req->rq_pill, &RMF_RCS,
2758 RCL_SERVER, fa->fa_nr * sizeof(__u32));
2759 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_RMFID);
2761 ptlrpc_request_free(req);
2764 tmp = req_capsule_client_get(&req->rq_pill, &RMF_FID_ARRAY);
2765 memcpy(tmp, fa->fa_fids, flen);
2767 mdc_pack_body(&req->rq_pill, NULL, 0, 0, -1, 0);
2768 b = req_capsule_client_get(&req->rq_pill, &RMF_MDT_BODY);
2769 b->mbo_ctime = ktime_get_real_seconds();
2771 ptlrpc_request_set_replen(req);
2774 aa = ptlrpc_req_async_args(aa, req);
2776 aa->mra_nr = fa->fa_nr;
2777 req->rq_interpret_reply = mdc_rmfid_interpret;
2779 ptlrpc_set_add_req(set, req);
2780 ptlrpc_check_set(NULL, set);
2785 static int mdc_import_event(struct obd_device *obd, struct obd_import *imp,
2786 enum obd_import_event event)
2788 struct client_obd *cli;
2792 if (WARN_ON_ONCE(!obd || !imp || imp->imp_obd != obd))
2800 case IMP_EVENT_DISCON:
2801 spin_lock(&cli->cl_loi_list_lock);
2802 cli->cl_avail_grant = 0;
2803 cli->cl_lost_grant = 0;
2804 spin_unlock(&cli->cl_loi_list_lock);
2806 case IMP_EVENT_INACTIVE:
2808 * Flush current sequence to make client obtain new one
2809 * from server in case of disconnect/reconnect.
2811 down_read(&cli->cl_seq_rwsem);
2813 seq_client_flush(cli->cl_seq);
2814 up_read(&cli->cl_seq_rwsem);
2816 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE);
2818 case IMP_EVENT_INVALIDATE: {
2819 struct ldlm_namespace *ns = obd->obd_namespace;
2823 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
2825 env = cl_env_get(&refcheck);
2827 /* Reset grants. All pages go to failing rpcs due to
2828 * the invalid import.
2830 osc_io_unplug(env, cli, NULL);
2832 cfs_hash_for_each_nolock(ns->ns_rs_hash,
2833 osc_ldlm_resource_invalidate,
2835 cl_env_put(env, &refcheck);
2836 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
2842 case IMP_EVENT_ACTIVE:
2843 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE);
2844 /* redo the kuc registration after reconnecting */
2846 rc = mdc_kuc_reregister(imp);
2848 case IMP_EVENT_OCD: {
2849 struct obd_connect_data *ocd = &imp->imp_connect_data;
2851 if (OCD_HAS_FLAG(ocd, GRANT))
2852 osc_init_grant(cli, ocd);
2854 md_init_ea_size(obd->obd_self_export, ocd->ocd_max_easize, 0);
2855 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD);
2858 case IMP_EVENT_DEACTIVATE:
2859 case IMP_EVENT_ACTIVATE:
2862 CERROR("Unknown import event %x\n", event);
2868 int mdc_fid_alloc(const struct lu_env *env, struct obd_export *exp,
2869 struct lu_fid *fid, struct md_op_data *op_data)
2871 struct client_obd *cli = &exp->exp_obd->u.cli;
2876 down_read(&cli->cl_seq_rwsem);
2878 rc = seq_client_alloc_fid(env, cli->cl_seq, fid);
2879 up_read(&cli->cl_seq_rwsem);
2884 static struct obd_uuid *mdc_get_uuid(struct obd_export *exp)
2886 struct client_obd *cli = &exp->exp_obd->u.cli;
2887 return &cli->cl_target_uuid;
2891 * Determine whether the lock can be canceled before replaying it during
2892 * recovery, non zero value will be return if the lock can be canceled,
2893 * or zero returned for not
2895 static int mdc_cancel_weight(struct ldlm_lock *lock)
2897 if (lock->l_resource->lr_type != LDLM_IBITS)
2900 /* FIXME: if we ever get into a situation where there are too many
2901 * opened files with open locks on a single node, then we really
2902 * should replay these open locks to reget it */
2903 if (lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_OPEN)
2906 /* Special case for DoM locks, cancel only unused and granted locks */
2907 if (ldlm_has_dom(lock) &&
2908 (lock->l_granted_mode != lock->l_req_mode ||
2909 osc_ldlm_weigh_ast(lock) != 0))
2915 static int mdc_resource_inode_free(struct ldlm_resource *res)
2917 if (res->lr_lvb_inode)
2918 res->lr_lvb_inode = NULL;
2923 static struct ldlm_valblock_ops inode_lvbo = {
2924 .lvbo_free = mdc_resource_inode_free
2927 static int mdc_llog_init(struct obd_device *obd)
2929 struct obd_llog_group *olg = &obd->obd_olg;
2930 struct llog_ctxt *ctxt;
2935 rc = llog_setup(NULL, obd, olg, LLOG_CHANGELOG_REPL_CTXT, obd,
2940 ctxt = llog_group_get_ctxt(olg, LLOG_CHANGELOG_REPL_CTXT);
2941 llog_initiator_connect(ctxt);
2942 llog_ctxt_put(ctxt);
2947 static void mdc_llog_finish(struct obd_device *obd)
2949 struct llog_ctxt *ctxt;
2953 ctxt = llog_get_context(obd, LLOG_CHANGELOG_REPL_CTXT);
2955 llog_cleanup(NULL, ctxt);
2960 int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
2966 rc = osc_setup_common(obd, cfg);
2970 rc = mdc_tunables_init(obd);
2972 GOTO(err_osc_cleanup, rc);
2974 obd->u.cli.cl_dom_min_inline_repsize = MDC_DOM_DEF_INLINE_REPSIZE;
2975 obd->u.cli.cl_lsom_update = true;
2977 ns_register_cancel(obd->obd_namespace, mdc_cancel_weight);
2979 obd->obd_namespace->ns_lvbo = &inode_lvbo;
2981 rc = mdc_llog_init(obd);
2983 CERROR("%s: failed to setup llogging subsystems: rc = %d\n",
2985 GOTO(err_llog_cleanup, rc);
2988 rc = mdc_changelog_cdev_init(obd);
2990 CERROR("%s: failed to setup changelog char device: rc = %d\n",
2992 GOTO(err_changelog_cleanup, rc);
2997 err_changelog_cleanup:
2998 mdc_llog_finish(obd);
3000 lprocfs_free_md_stats(obd);
3001 ptlrpc_lprocfs_unregister_obd(obd);
3003 osc_cleanup_common(obd);
3007 /* Initialize the default and maximum LOV EA sizes. This allows
3008 * us to make MDS RPCs with large enough reply buffers to hold a default
3009 * sized EA without having to calculate this (via a call into the
3010 * LOV + OSCs) each time we make an RPC. The maximum size is also tracked
3011 * but not used to avoid wastefully vmalloc()'ing large reply buffers when
3012 * a large number of stripes is possible. If a larger reply buffer is
3013 * required it will be reallocated in the ptlrpc layer due to overflow.
3015 static int mdc_init_ea_size(struct obd_export *exp, __u32 easize,
3018 struct obd_device *obd = exp->exp_obd;
3019 struct client_obd *cli = &obd->u.cli;
3022 if (cli->cl_max_mds_easize < easize)
3023 cli->cl_max_mds_easize = easize;
3025 if (cli->cl_default_mds_easize < def_easize)
3026 cli->cl_default_mds_easize = def_easize;
3031 static int mdc_precleanup(struct obd_device *obd)
3035 osc_precleanup_common(obd);
3037 mdc_changelog_cdev_finish(obd);
3038 mdc_llog_finish(obd);
3039 lprocfs_free_md_stats(obd);
3040 ptlrpc_lprocfs_unregister_obd(obd);
3045 static int mdc_cleanup(struct obd_device *obd)
3047 struct client_obd *cli = &obd->u.cli;
3048 LASSERT(cli->cl_mod_rpcs_in_flight == 0);
3049 return osc_cleanup_common(obd);
3052 static const struct obd_ops mdc_obd_ops = {
3053 .o_owner = THIS_MODULE,
3054 .o_setup = mdc_setup,
3055 .o_precleanup = mdc_precleanup,
3056 .o_cleanup = mdc_cleanup,
3057 .o_add_conn = client_import_add_conn,
3058 .o_del_conn = client_import_del_conn,
3059 .o_connect = client_connect_import,
3060 .o_reconnect = osc_reconnect,
3061 .o_disconnect = osc_disconnect,
3062 .o_iocontrol = mdc_iocontrol,
3063 .o_set_info_async = mdc_set_info_async,
3064 .o_statfs = mdc_statfs,
3065 .o_statfs_async = mdc_statfs_async,
3066 .o_fid_init = client_fid_init,
3067 .o_fid_fini = client_fid_fini,
3068 .o_fid_alloc = mdc_fid_alloc,
3069 .o_import_event = mdc_import_event,
3070 .o_get_info = mdc_get_info,
3071 .o_get_uuid = mdc_get_uuid,
3072 .o_quotactl = mdc_quotactl,
3075 static const struct md_ops mdc_md_ops = {
3076 .m_get_root = mdc_get_root,
3077 .m_null_inode = mdc_null_inode,
3078 .m_close = mdc_close,
3079 .m_create = mdc_create,
3080 .m_enqueue = mdc_enqueue,
3081 .m_getattr = mdc_getattr,
3082 .m_getattr_name = mdc_getattr_name,
3083 .m_intent_lock = mdc_intent_lock,
3085 .m_rename = mdc_rename,
3086 .m_setattr = mdc_setattr,
3087 .m_setxattr = mdc_setxattr,
3088 .m_getxattr = mdc_getxattr,
3089 .m_fsync = mdc_fsync,
3090 .m_file_resync = mdc_file_resync,
3091 .m_read_page = mdc_read_page,
3092 .m_unlink = mdc_unlink,
3093 .m_cancel_unused = mdc_cancel_unused,
3094 .m_init_ea_size = mdc_init_ea_size,
3095 .m_set_lock_data = mdc_set_lock_data,
3096 .m_lock_match = mdc_lock_match,
3097 .m_get_lustre_md = mdc_get_lustre_md,
3098 .m_set_open_replay_data = mdc_set_open_replay_data,
3099 .m_clear_open_replay_data = mdc_clear_open_replay_data,
3100 .m_intent_getattr_async = mdc_intent_getattr_async,
3101 .m_revalidate_lock = mdc_revalidate_lock,
3102 .m_rmfid = mdc_rmfid,
3103 .m_batch_create = cli_batch_create,
3104 .m_batch_stop = cli_batch_stop,
3105 .m_batch_flush = cli_batch_flush,
3106 .m_batch_add = mdc_batch_add,
3109 dev_t mdc_changelog_dev;
3110 struct class *mdc_changelog_class;
3111 static int __init mdc_init(void)
3115 rc = libcfs_setup();
3119 rc = alloc_chrdev_region(&mdc_changelog_dev, 0,
3120 MDC_CHANGELOG_DEV_COUNT,
3121 MDC_CHANGELOG_DEV_NAME);
3125 mdc_changelog_class = ll_class_create(MDC_CHANGELOG_DEV_NAME);
3126 if (IS_ERR(mdc_changelog_class)) {
3127 rc = PTR_ERR(mdc_changelog_class);
3131 rc = class_register_type(&mdc_obd_ops, &mdc_md_ops, true,
3132 LUSTRE_MDC_NAME, &mdc_device_type);
3139 class_destroy(mdc_changelog_class);
3141 unregister_chrdev_region(mdc_changelog_dev, MDC_CHANGELOG_DEV_COUNT);
3145 static void __exit mdc_exit(void)
3147 class_unregister_type(LUSTRE_MDC_NAME);
3148 class_destroy(mdc_changelog_class);
3149 unregister_chrdev_region(mdc_changelog_dev, MDC_CHANGELOG_DEV_COUNT);
3150 idr_destroy(&mdc_changelog_minor_idr);
3153 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
3154 MODULE_DESCRIPTION("Lustre Metadata Client");
3155 MODULE_VERSION(LUSTRE_VERSION_STRING);
3156 MODULE_LICENSE("GPL");
3158 module_init(mdc_init);
3159 module_exit(mdc_exit);