4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 only,
8 * as published by the Free Software Foundation.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13 * General Public License version 2 for more details (a copy is included
14 * in the LICENSE file that accompanied this code).
16 * You should have received a copy of the GNU General Public License
17 * version 2 along with this program; If not, see
18 * http://www.gnu.org/licenses/gpl-2.0.html
23 * Copyright (c) 2001, 2010, Oracle and/or its affiliates. All rights reserved.
24 * Use is subject to license terms.
26 * Copyright (c) 2011, 2016, Intel Corporation.
29 * This file is part of Lustre, http://www.lustre.org/
30 * Lustre is a trademark of Sun Microsystems, Inc.
33 #define DEBUG_SUBSYSTEM S_MDC
35 #include <linux/init.h>
36 #include <linux/kthread.h>
37 #include <linux/miscdevice.h>
38 #include <linux/module.h>
39 #include <linux/pagemap.h>
40 #include <linux/user_namespace.h>
41 #include <linux/utsname.h>
42 #ifdef HAVE_UIDGID_HEADER
43 # include <linux/uidgid.h>
46 #include <lustre/lustre_errno.h>
48 #include <cl_object.h>
49 #include <llog_swab.h>
50 #include <lprocfs_status.h>
51 #include <lustre_acl.h>
52 #include <lustre_fid.h>
53 #include <lustre_ioctl.h>
54 #include <lustre_kernelcomm.h>
55 #include <lustre_lmv.h>
56 #include <lustre_log.h>
57 #include <lustre_param.h>
58 #include <lustre_swab.h>
59 #include <obd_class.h>
61 #include "mdc_internal.h"
63 #define REQUEST_MINOR 244
65 static int mdc_cleanup(struct obd_device *obd);
67 static inline int mdc_queue_wait(struct ptlrpc_request *req)
69 struct client_obd *cli = &req->rq_import->imp_obd->u.cli;
72 /* obd_get_request_slot() ensures that this client has no more
73 * than cl_max_rpcs_in_flight RPCs simultaneously inf light
75 rc = obd_get_request_slot(cli);
79 rc = ptlrpc_queue_wait(req);
80 obd_put_request_slot(cli);
86 * Send MDS_GET_ROOT RPC to fetch root FID.
88 * If \a fileset is not NULL it should contain a subdirectory off
89 * the ROOT/ directory to be mounted on the client. Return the FID
90 * of the subdirectory to the client to mount onto its mountpoint.
92 * \param[in] imp MDC import
93 * \param[in] fileset fileset name, which could be NULL
94 * \param[out] rootfid root FID of this mountpoint
95 * \param[out] pc root capa will be unpacked and saved in this pointer
97 * \retval 0 on success, negative errno on failure
99 static int mdc_get_root(struct obd_export *exp, const char *fileset,
100 struct lu_fid *rootfid)
102 struct ptlrpc_request *req;
103 struct mdt_body *body;
108 if (fileset && !(exp_connect_flags(exp) & OBD_CONNECT_SUBTREE))
111 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
117 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
118 strlen(fileset) + 1);
119 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GET_ROOT);
121 ptlrpc_request_free(req);
124 mdc_pack_body(req, NULL, 0, 0, -1, 0);
125 if (fileset != NULL) {
126 char *name = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
128 memcpy(name, fileset, strlen(fileset));
130 lustre_msg_add_flags(req->rq_reqmsg, LUSTRE_IMP_FULL);
131 req->rq_send_state = LUSTRE_IMP_FULL;
133 ptlrpc_request_set_replen(req);
135 rc = ptlrpc_queue_wait(req);
139 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
141 GOTO(out, rc = -EPROTO);
143 *rootfid = body->mbo_fid1;
144 CDEBUG(D_NET, "root fid="DFID", last_committed=%llu\n",
145 PFID(rootfid), lustre_msg_get_last_committed(req->rq_repmsg));
148 ptlrpc_req_finished(req);
154 * This function now is known to always saying that it will receive 4 buffers
155 * from server. Even for cases when acl_size and md_size is zero, RPC header
156 * will contain 4 fields and RPC itself will contain zero size fields. This is
157 * because mdt_getattr*() _always_ returns 4 fields, but if acl is not needed
158 * and thus zero, it shrinks it, making zero size. The same story about
159 * md_size. And this is course of problem when client waits for smaller number
160 * of fields. This issue will be fixed later when client gets aware of RPC
163 static int mdc_getattr_common(struct obd_export *exp,
164 struct ptlrpc_request *req)
166 struct req_capsule *pill = &req->rq_pill;
167 struct mdt_body *body;
172 /* Request message already built. */
173 rc = ptlrpc_queue_wait(req);
177 /* sanity check for the reply */
178 body = req_capsule_server_get(pill, &RMF_MDT_BODY);
182 CDEBUG(D_NET, "mode: %o\n", body->mbo_mode);
184 mdc_update_max_ea_from_body(exp, body);
185 if (body->mbo_eadatasize != 0) {
186 eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
187 body->mbo_eadatasize);
195 static int mdc_getattr(struct obd_export *exp, struct md_op_data *op_data,
196 struct ptlrpc_request **request)
198 struct ptlrpc_request *req;
202 /* Single MDS without an LMV case */
203 if (op_data->op_flags & MF_GET_MDT_IDX) {
208 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_GETATTR);
212 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR);
214 ptlrpc_request_free(req);
218 mdc_pack_body(req, &op_data->op_fid1, op_data->op_valid,
219 op_data->op_mode, -1, 0);
221 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
223 ptlrpc_request_set_replen(req);
225 rc = mdc_getattr_common(exp, req);
227 ptlrpc_req_finished(req);
233 static int mdc_getattr_name(struct obd_export *exp, struct md_op_data *op_data,
234 struct ptlrpc_request **request)
236 struct ptlrpc_request *req;
241 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
242 &RQF_MDS_GETATTR_NAME);
246 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
247 op_data->op_namelen + 1);
249 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GETATTR_NAME);
251 ptlrpc_request_free(req);
255 mdc_pack_body(req, &op_data->op_fid1, op_data->op_valid,
256 op_data->op_mode, op_data->op_suppgids[0], 0);
258 if (op_data->op_name) {
259 char *name = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
260 LASSERT(strnlen(op_data->op_name, op_data->op_namelen) ==
261 op_data->op_namelen);
262 memcpy(name, op_data->op_name, op_data->op_namelen);
265 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
267 ptlrpc_request_set_replen(req);
269 rc = mdc_getattr_common(exp, req);
271 ptlrpc_req_finished(req);
277 static int mdc_xattr_common(struct obd_export *exp,const struct req_format *fmt,
278 const struct lu_fid *fid, int opcode, u64 valid,
279 const char *xattr_name, const char *input,
280 int input_size, int output_size, int flags,
281 __u32 suppgid, struct ptlrpc_request **request)
283 struct ptlrpc_request *req;
284 int xattr_namelen = 0;
290 req = ptlrpc_request_alloc(class_exp2cliimp(exp), fmt);
295 xattr_namelen = strlen(xattr_name) + 1;
296 req_capsule_set_size(&req->rq_pill, &RMF_NAME, RCL_CLIENT,
301 req_capsule_set_size(&req->rq_pill, &RMF_EADATA, RCL_CLIENT,
305 /* Flush local XATTR locks to get rid of a possible cancel RPC */
306 if (opcode == MDS_REINT && fid_is_sane(fid) &&
307 exp->exp_connect_data.ocd_ibits_known & MDS_INODELOCK_XATTR) {
308 struct list_head cancels = LIST_HEAD_INIT(cancels);
311 /* Without that packing would fail */
313 req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
316 count = mdc_resource_get_unused(exp, fid,
318 MDS_INODELOCK_XATTR);
320 rc = mdc_prep_elc_req(exp, req, MDS_REINT, &cancels, count);
322 ptlrpc_request_free(req);
326 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, opcode);
328 ptlrpc_request_free(req);
333 if (opcode == MDS_REINT) {
334 struct mdt_rec_setxattr *rec;
336 CLASSERT(sizeof(struct mdt_rec_setxattr) ==
337 sizeof(struct mdt_rec_reint));
338 rec = req_capsule_client_get(&req->rq_pill, &RMF_REC_REINT);
339 rec->sx_opcode = REINT_SETXATTR;
340 rec->sx_fsuid = from_kuid(&init_user_ns, current_fsuid());
341 rec->sx_fsgid = from_kgid(&init_user_ns, current_fsgid());
342 rec->sx_cap = cfs_curproc_cap_pack();
343 rec->sx_suppgid1 = suppgid;
344 rec->sx_suppgid2 = -1;
346 rec->sx_valid = valid | OBD_MD_FLCTIME;
347 rec->sx_time = ktime_get_real_seconds();
348 rec->sx_size = output_size;
349 rec->sx_flags = flags;
351 mdc_pack_body(req, fid, valid, output_size, suppgid, flags);
355 tmp = req_capsule_client_get(&req->rq_pill, &RMF_NAME);
356 memcpy(tmp, xattr_name, xattr_namelen);
359 tmp = req_capsule_client_get(&req->rq_pill, &RMF_EADATA);
360 memcpy(tmp, input, input_size);
363 if (req_capsule_has_field(&req->rq_pill, &RMF_EADATA, RCL_SERVER))
364 req_capsule_set_size(&req->rq_pill, &RMF_EADATA,
365 RCL_SERVER, output_size);
366 ptlrpc_request_set_replen(req);
369 if (opcode == MDS_REINT)
370 mdc_get_mod_rpc_slot(req, NULL);
372 rc = ptlrpc_queue_wait(req);
374 if (opcode == MDS_REINT)
375 mdc_put_mod_rpc_slot(req, NULL);
377 /* For XATTR_LUSTRE_LOV.add, we'd save the LOVEA for replay. */
378 if (opcode == MDS_REINT && rc == 0) {
379 struct mdt_body *body;
380 struct req_capsule *pill = &req->rq_pill;
382 body = req_capsule_server_get(pill, &RMF_MDT_BODY);
384 GOTO(out, rc = -EPROTO);
386 if (body->mbo_valid & OBD_MD_FLEASIZE) {
389 eadata = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
390 body->mbo_eadatasize);
392 GOTO(out, rc = -EPROTO);
394 if (req_capsule_get_size(pill, &RMF_EADATA,
396 body->mbo_eadatasize) {
397 rc = sptlrpc_cli_enlarge_reqbuf(req, 4,
398 body->mbo_eadatasize);
400 GOTO(out, rc = -ENOMEM);
402 req_capsule_shrink(pill, &RMF_EADATA,
403 body->mbo_eadatasize,
407 req_capsule_set_size(pill, &RMF_EADATA, RCL_CLIENT,
408 body->mbo_eadatasize);
410 lmm = req_capsule_client_get(pill, &RMF_EADATA);
412 memcpy(lmm, eadata, body->mbo_eadatasize);
417 ptlrpc_req_finished(req);
423 static int mdc_setxattr(struct obd_export *exp, const struct lu_fid *fid,
424 u64 valid, const char *xattr_name,
425 const char *input, int input_size, int output_size,
426 int flags, __u32 suppgid,
427 struct ptlrpc_request **request)
429 return mdc_xattr_common(exp, &RQF_MDS_REINT_SETXATTR,
430 fid, MDS_REINT, valid, xattr_name,
431 input, input_size, output_size, flags,
435 static int mdc_getxattr(struct obd_export *exp, const struct lu_fid *fid,
436 u64 valid, const char *xattr_name,
437 const char *input, int input_size, int output_size,
438 int flags, struct ptlrpc_request **request)
440 return mdc_xattr_common(exp, &RQF_MDS_GETXATTR,
441 fid, MDS_GETXATTR, valid, xattr_name,
442 input, input_size, output_size, flags,
446 #ifdef CONFIG_FS_POSIX_ACL
447 static int mdc_unpack_acl(struct ptlrpc_request *req, struct lustre_md *md)
449 struct req_capsule *pill = &req->rq_pill;
450 struct mdt_body *body = md->body;
451 struct posix_acl *acl;
456 if (!body->mbo_aclsize)
459 buf = req_capsule_server_sized_get(pill, &RMF_ACL, body->mbo_aclsize);
464 acl = posix_acl_from_xattr(&init_user_ns, buf, body->mbo_aclsize);
469 CERROR("convert xattr to acl: %d\n", rc);
473 rc = posix_acl_valid(&init_user_ns, acl);
475 CERROR("validate acl: %d\n", rc);
476 posix_acl_release(acl);
484 #define mdc_unpack_acl(req, md) 0
487 int mdc_get_lustre_md(struct obd_export *exp, struct ptlrpc_request *req,
488 struct obd_export *dt_exp, struct obd_export *md_exp,
489 struct lustre_md *md)
491 struct req_capsule *pill = &req->rq_pill;
496 memset(md, 0, sizeof(*md));
498 md->body = req_capsule_server_get(pill, &RMF_MDT_BODY);
499 LASSERT(md->body != NULL);
501 if (md->body->mbo_valid & OBD_MD_FLEASIZE) {
502 if (!S_ISREG(md->body->mbo_mode)) {
503 CDEBUG(D_INFO, "OBD_MD_FLEASIZE set, should be a "
504 "regular file, but is not\n");
505 GOTO(out, rc = -EPROTO);
508 if (md->body->mbo_eadatasize == 0) {
509 CDEBUG(D_INFO, "OBD_MD_FLEASIZE set, "
510 "but eadatasize 0\n");
511 GOTO(out, rc = -EPROTO);
514 md->layout.lb_len = md->body->mbo_eadatasize;
515 md->layout.lb_buf = req_capsule_server_sized_get(pill,
518 if (md->layout.lb_buf == NULL)
519 GOTO(out, rc = -EPROTO);
520 } else if (md->body->mbo_valid & OBD_MD_FLDIREA) {
521 const union lmv_mds_md *lmv;
524 if (!S_ISDIR(md->body->mbo_mode)) {
525 CDEBUG(D_INFO, "OBD_MD_FLDIREA set, should be a "
526 "directory, but is not\n");
527 GOTO(out, rc = -EPROTO);
530 lmv_size = md->body->mbo_eadatasize;
532 CDEBUG(D_INFO, "OBD_MD_FLDIREA is set, "
533 "but eadatasize 0\n");
537 if (md->body->mbo_valid & OBD_MD_MEA) {
538 lmv = req_capsule_server_sized_get(pill, &RMF_MDT_MD,
541 GOTO(out, rc = -EPROTO);
543 rc = md_unpackmd(md_exp, &md->lmv, lmv, lmv_size);
547 if (rc < (typeof(rc))sizeof(*md->lmv)) {
548 CDEBUG(D_INFO, "size too small: "
549 "rc < sizeof(*md->lmv) (%d < %d)\n",
550 rc, (int)sizeof(*md->lmv));
551 GOTO(out, rc = -EPROTO);
557 if (md->body->mbo_valid & OBD_MD_FLACL) {
558 /* for ACL, it's possible that FLACL is set but aclsize is zero.
559 * only when aclsize != 0 there's an actual segment for ACL
562 if (md->body->mbo_aclsize) {
563 rc = mdc_unpack_acl(req, md);
566 #ifdef CONFIG_FS_POSIX_ACL
568 md->posix_acl = NULL;
576 #ifdef CONFIG_FS_POSIX_ACL
577 posix_acl_release(md->posix_acl);
583 int mdc_free_lustre_md(struct obd_export *exp, struct lustre_md *md)
589 void mdc_replay_open(struct ptlrpc_request *req)
591 struct md_open_data *mod = req->rq_cb_data;
592 struct ptlrpc_request *close_req;
593 struct obd_client_handle *och;
594 struct lustre_handle old;
595 struct mdt_body *body;
599 DEBUG_REQ(D_ERROR, req,
600 "Can't properly replay without open data.");
605 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
606 LASSERT(body != NULL);
610 struct lustre_handle *file_fh;
612 LASSERT(och->och_magic == OBD_CLIENT_HANDLE_MAGIC);
614 file_fh = &och->och_fh;
615 CDEBUG(D_HA, "updating handle from %#llx to %#llx\n",
616 file_fh->cookie, body->mbo_handle.cookie);
618 *file_fh = body->mbo_handle;
620 close_req = mod->mod_close_req;
621 if (close_req != NULL) {
622 __u32 opc = lustre_msg_get_opc(close_req->rq_reqmsg);
623 struct mdt_ioepoch *epoch;
625 LASSERT(opc == MDS_CLOSE);
626 epoch = req_capsule_client_get(&close_req->rq_pill,
631 LASSERT(!memcmp(&old, &epoch->mio_handle, sizeof(old)));
633 DEBUG_REQ(D_HA, close_req, "updating close body with new fh");
634 epoch->mio_handle = body->mbo_handle;
639 void mdc_commit_open(struct ptlrpc_request *req)
641 struct md_open_data *mod = req->rq_cb_data;
646 * No need to touch md_open_data::mod_och, it holds a reference on
647 * \var mod and will zero references to each other, \var mod will be
648 * freed after that when md_open_data::mod_och will put the reference.
652 * Do not let open request to disappear as it still may be needed
653 * for close rpc to happen (it may happen on evict only, otherwise
654 * ptlrpc_request::rq_replay does not let mdc_commit_open() to be
655 * called), just mark this rpc as committed to distinguish these 2
656 * cases, see mdc_close() for details. The open request reference will
657 * be put along with freeing \var mod.
659 ptlrpc_request_addref(req);
660 spin_lock(&req->rq_lock);
661 req->rq_committed = 1;
662 spin_unlock(&req->rq_lock);
663 req->rq_cb_data = NULL;
667 int mdc_set_open_replay_data(struct obd_export *exp,
668 struct obd_client_handle *och,
669 struct lookup_intent *it)
671 struct md_open_data *mod;
672 struct mdt_rec_create *rec;
673 struct mdt_body *body;
674 struct ptlrpc_request *open_req = it->it_request;
675 struct obd_import *imp = open_req->rq_import;
678 if (!open_req->rq_replay)
681 rec = req_capsule_client_get(&open_req->rq_pill, &RMF_REC_REINT);
682 body = req_capsule_server_get(&open_req->rq_pill, &RMF_MDT_BODY);
683 LASSERT(rec != NULL);
684 /* Incoming message in my byte order (it's been swabbed). */
685 /* Outgoing messages always in my byte order. */
686 LASSERT(body != NULL);
688 /* Only if the import is replayable, we set replay_open data */
689 if (och && imp->imp_replayable) {
690 mod = obd_mod_alloc();
692 DEBUG_REQ(D_ERROR, open_req,
693 "Can't allocate md_open_data");
698 * Take a reference on \var mod, to be freed on mdc_close().
699 * It protects \var mod from being freed on eviction (commit
700 * callback is called despite rq_replay flag).
701 * Another reference for \var och.
706 spin_lock(&open_req->rq_lock);
709 mod->mod_is_create = it_disposition(it, DISP_OPEN_CREATE) ||
710 it_disposition(it, DISP_OPEN_STRIPE);
711 mod->mod_open_req = open_req;
712 open_req->rq_cb_data = mod;
713 open_req->rq_commit_cb = mdc_commit_open;
714 spin_unlock(&open_req->rq_lock);
717 rec->cr_fid2 = body->mbo_fid1;
718 rec->cr_ioepoch = body->mbo_ioepoch;
719 rec->cr_old_handle.cookie = body->mbo_handle.cookie;
720 open_req->rq_replay_cb = mdc_replay_open;
721 if (!fid_is_sane(&body->mbo_fid1)) {
722 DEBUG_REQ(D_ERROR, open_req, "Saving replay request with "
727 DEBUG_REQ(D_RPCTRACE, open_req, "Set up open replay data");
731 static void mdc_free_open(struct md_open_data *mod)
735 if (mod->mod_is_create == 0 &&
736 imp_connect_disp_stripe(mod->mod_open_req->rq_import))
740 * No reason to asssert here if the open request has
741 * rq_replay == 1. It means that mdc_close failed, and
742 * close request wasn`t sent. It is not fatal to client.
743 * The worst thing is eviction if the client gets open lock
746 DEBUG_REQ(D_RPCTRACE, mod->mod_open_req, "free open request rq_replay"
747 "= %d\n", mod->mod_open_req->rq_replay);
749 ptlrpc_request_committed(mod->mod_open_req, committed);
750 if (mod->mod_close_req)
751 ptlrpc_request_committed(mod->mod_close_req, committed);
754 int mdc_clear_open_replay_data(struct obd_export *exp,
755 struct obd_client_handle *och)
757 struct md_open_data *mod = och->och_mod;
761 * It is possible to not have \var mod in a case of eviction between
762 * lookup and ll_file_open().
767 LASSERT(mod != LP_POISON);
768 LASSERT(mod->mod_open_req != NULL);
778 static int mdc_close(struct obd_export *exp, struct md_op_data *op_data,
779 struct md_open_data *mod, struct ptlrpc_request **request)
781 struct obd_device *obd = class_exp2obd(exp);
782 struct ptlrpc_request *req;
783 struct req_format *req_fmt;
788 if (op_data->op_bias & MDS_HSM_RELEASE) {
789 req_fmt = &RQF_MDS_INTENT_CLOSE;
791 /* allocate a FID for volatile file */
792 rc = mdc_fid_alloc(NULL, exp, &op_data->op_fid2, op_data);
794 CERROR("%s: "DFID" failed to allocate FID: %d\n",
795 obd->obd_name, PFID(&op_data->op_fid1), rc);
796 /* save the errcode and proceed to close */
799 } else if (op_data->op_bias & MDS_CLOSE_LAYOUT_SWAP) {
800 req_fmt = &RQF_MDS_INTENT_CLOSE;
802 req_fmt = &RQF_MDS_CLOSE;
806 if (OBD_FAIL_CHECK(OBD_FAIL_MDC_CLOSE))
809 req = ptlrpc_request_alloc(class_exp2cliimp(exp), req_fmt);
811 /* Ensure that this close's handle is fixed up during replay. */
812 if (likely(mod != NULL)) {
813 LASSERTF(mod->mod_open_req != NULL &&
814 mod->mod_open_req->rq_type != LI_POISON,
815 "POISONED open %p!\n", mod->mod_open_req);
817 mod->mod_close_req = req;
819 DEBUG_REQ(D_HA, mod->mod_open_req, "matched open");
820 /* We no longer want to preserve this open for replay even
821 * though the open was committed. b=3632, b=3633 */
822 spin_lock(&mod->mod_open_req->rq_lock);
823 mod->mod_open_req->rq_replay = 0;
824 spin_unlock(&mod->mod_open_req->rq_lock);
826 CDEBUG(D_HA, "couldn't find open req; expecting close error\n");
830 * TODO: repeat close after errors
832 CWARN("%s: close of FID "DFID" failed, file reference will be "
833 "dropped when this client unmounts or is evicted\n",
834 obd->obd_name, PFID(&op_data->op_fid1));
835 GOTO(out, rc = -ENOMEM);
838 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_CLOSE);
840 ptlrpc_request_free(req);
845 /* To avoid a livelock (bug 7034), we need to send CLOSE RPCs to a
846 * portal whose threads are not taking any DLM locks and are therefore
847 * always progressing */
848 req->rq_request_portal = MDS_READPAGE_PORTAL;
849 ptlrpc_at_set_req_timeout(req);
852 mdc_close_pack(req, op_data);
854 req_capsule_set_size(&req->rq_pill, &RMF_MDT_MD, RCL_SERVER,
855 obd->u.cli.cl_default_mds_easize);
857 ptlrpc_request_set_replen(req);
859 mdc_get_mod_rpc_slot(req, NULL);
860 rc = ptlrpc_queue_wait(req);
861 mdc_put_mod_rpc_slot(req, NULL);
863 if (req->rq_repmsg == NULL) {
864 CDEBUG(D_RPCTRACE, "request failed to send: %p, %d\n", req,
867 rc = req->rq_status ?: -EIO;
868 } else if (rc == 0 || rc == -EAGAIN) {
869 struct mdt_body *body;
871 rc = lustre_msg_get_status(req->rq_repmsg);
872 if (lustre_msg_get_type(req->rq_repmsg) == PTL_RPC_MSG_ERR) {
873 DEBUG_REQ(D_ERROR, req, "type == PTL_RPC_MSG_ERR, err "
878 body = req_capsule_server_get(&req->rq_pill, &RMF_MDT_BODY);
881 } else if (rc == -ESTALE) {
883 * it can be allowed error after 3633 if open was committed and
884 * server failed before close was sent. Let's check if mod
885 * exists and return no error in that case
888 DEBUG_REQ(D_HA, req, "Reset ESTALE = %d", rc);
889 LASSERT(mod->mod_open_req != NULL);
890 if (mod->mod_open_req->rq_committed)
898 mod->mod_close_req = NULL;
899 /* Since now, mod is accessed through open_req only,
900 * thus close req does not keep a reference on mod anymore. */
905 RETURN(rc < 0 ? rc : saved_rc);
908 static int mdc_getpage(struct obd_export *exp, const struct lu_fid *fid,
909 u64 offset, struct page **pages, int npages,
910 struct ptlrpc_request **request)
912 struct ptlrpc_request *req;
913 struct ptlrpc_bulk_desc *desc;
915 wait_queue_head_t waitq;
917 struct l_wait_info lwi;
922 init_waitqueue_head(&waitq);
925 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_READPAGE);
929 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_READPAGE);
931 ptlrpc_request_free(req);
935 req->rq_request_portal = MDS_READPAGE_PORTAL;
936 ptlrpc_at_set_req_timeout(req);
938 desc = ptlrpc_prep_bulk_imp(req, npages, 1,
939 PTLRPC_BULK_PUT_SINK | PTLRPC_BULK_BUF_KIOV,
941 &ptlrpc_bulk_kiov_pin_ops);
943 ptlrpc_request_free(req);
947 /* NB req now owns desc and will free it when it gets freed */
948 for (i = 0; i < npages; i++)
949 desc->bd_frag_ops->add_kiov_frag(desc, pages[i], 0,
952 mdc_readdir_pack(req, offset, PAGE_SIZE * npages, fid);
954 ptlrpc_request_set_replen(req);
955 rc = ptlrpc_queue_wait(req);
957 ptlrpc_req_finished(req);
958 if (rc != -ETIMEDOUT)
962 if (!client_should_resend(resends, &exp->exp_obd->u.cli)) {
963 CERROR("%s: too many resend retries: rc = %d\n",
964 exp->exp_obd->obd_name, -EIO);
967 lwi = LWI_TIMEOUT_INTR(cfs_time_seconds(resends), NULL, NULL,
969 l_wait_event(waitq, 0, &lwi);
974 rc = sptlrpc_cli_unwrap_bulk_read(req, req->rq_bulk,
975 req->rq_bulk->bd_nob_transferred);
977 ptlrpc_req_finished(req);
981 if (req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK) {
982 CERROR("%s: unexpected bytes transferred: %d (%ld expected)\n",
983 exp->exp_obd->obd_name, req->rq_bulk->bd_nob_transferred,
985 ptlrpc_req_finished(req);
993 static void mdc_release_page(struct page *page, int remove)
997 if (likely(page->mapping != NULL))
998 truncate_complete_page(page->mapping, page);
1004 static struct page *mdc_page_locate(struct address_space *mapping, __u64 *hash,
1005 __u64 *start, __u64 *end, int hash64)
1008 * Complement of hash is used as an index so that
1009 * radix_tree_gang_lookup() can be used to find a page with starting
1010 * hash _smaller_ than one we are looking for.
1012 unsigned long offset = hash_x_index(*hash, hash64);
1016 spin_lock_irq(&mapping->tree_lock);
1017 found = radix_tree_gang_lookup(&mapping->page_tree,
1018 (void **)&page, offset, 1);
1019 if (found > 0 && !radix_tree_exceptional_entry(page)) {
1020 struct lu_dirpage *dp;
1023 spin_unlock_irq(&mapping->tree_lock);
1025 * In contrast to find_lock_page() we are sure that directory
1026 * page cannot be truncated (while DLM lock is held) and,
1027 * hence, can avoid restart.
1029 * In fact, page cannot be locked here at all, because
1030 * mdc_read_page_remote does synchronous io.
1032 wait_on_page_locked(page);
1033 if (PageUptodate(page)) {
1035 if (BITS_PER_LONG == 32 && hash64) {
1036 *start = le64_to_cpu(dp->ldp_hash_start) >> 32;
1037 *end = le64_to_cpu(dp->ldp_hash_end) >> 32;
1038 *hash = *hash >> 32;
1040 *start = le64_to_cpu(dp->ldp_hash_start);
1041 *end = le64_to_cpu(dp->ldp_hash_end);
1043 if (unlikely(*start == 1 && *hash == 0))
1046 LASSERTF(*start <= *hash, "start = %#llx"
1047 ",end = %#llx,hash = %#llx\n",
1048 *start, *end, *hash);
1049 CDEBUG(D_VFSTRACE, "offset %lx [%#llx %#llx],"
1050 " hash %#llx\n", offset, *start, *end, *hash);
1053 mdc_release_page(page, 0);
1055 } else if (*end != *start && *hash == *end) {
1057 * upon hash collision, remove this page,
1058 * otherwise put page reference, and
1059 * mdc_read_page_remote() will issue RPC to
1060 * fetch the page we want.
1063 mdc_release_page(page,
1064 le32_to_cpu(dp->ldp_flags) & LDF_COLLIDE);
1069 page = ERR_PTR(-EIO);
1072 spin_unlock_irq(&mapping->tree_lock);
1079 * Adjust a set of pages, each page containing an array of lu_dirpages,
1080 * so that each page can be used as a single logical lu_dirpage.
1082 * A lu_dirpage is laid out as follows, where s = ldp_hash_start,
1083 * e = ldp_hash_end, f = ldp_flags, p = padding, and each "ent" is a
1084 * struct lu_dirent. It has size up to LU_PAGE_SIZE. The ldp_hash_end
1085 * value is used as a cookie to request the next lu_dirpage in a
1086 * directory listing that spans multiple pages (two in this example):
1089 * .|--------v------- -----.
1090 * |s|e|f|p|ent|ent| ... |ent|
1091 * '--|-------------- -----' Each PAGE contains a single
1092 * '------. lu_dirpage.
1093 * .---------v------- -----.
1094 * |s|e|f|p|ent| 0 | ... | 0 |
1095 * '----------------- -----'
1097 * However, on hosts where the native VM page size (PAGE_SIZE) is
1098 * larger than LU_PAGE_SIZE, a single host page may contain multiple
1099 * lu_dirpages. After reading the lu_dirpages from the MDS, the
1100 * ldp_hash_end of the first lu_dirpage refers to the one immediately
1101 * after it in the same PAGE (arrows simplified for brevity, but
1102 * in general e0==s1, e1==s2, etc.):
1104 * .-------------------- -----.
1105 * |s0|e0|f0|p|ent|ent| ... |ent|
1106 * |---v---------------- -----|
1107 * |s1|e1|f1|p|ent|ent| ... |ent|
1108 * |---v---------------- -----| Here, each PAGE contains
1109 * ... multiple lu_dirpages.
1110 * |---v---------------- -----|
1111 * |s'|e'|f'|p|ent|ent| ... |ent|
1112 * '---|---------------- -----'
1114 * .----------------------------.
1117 * This structure is transformed into a single logical lu_dirpage as follows:
1119 * - Replace e0 with e' so the request for the next lu_dirpage gets the page
1120 * labeled 'next PAGE'.
1122 * - Copy the LDF_COLLIDE flag from f' to f0 to correctly reflect whether
1123 * a hash collision with the next page exists.
1125 * - Adjust the lde_reclen of the ending entry of each lu_dirpage to span
1126 * to the first entry of the next lu_dirpage.
1128 #if PAGE_SIZE > LU_PAGE_SIZE
1129 static void mdc_adjust_dirpages(struct page **pages, int cfs_pgs, int lu_pgs)
1133 for (i = 0; i < cfs_pgs; i++) {
1134 struct lu_dirpage *dp = kmap(pages[i]);
1135 struct lu_dirpage *first = dp;
1136 struct lu_dirent *end_dirent = NULL;
1137 struct lu_dirent *ent;
1138 __u64 hash_end = le64_to_cpu(dp->ldp_hash_end);
1139 __u32 flags = le32_to_cpu(dp->ldp_flags);
1141 while (--lu_pgs > 0) {
1142 ent = lu_dirent_start(dp);
1143 for (end_dirent = ent; ent != NULL;
1144 end_dirent = ent, ent = lu_dirent_next(ent));
1146 /* Advance dp to next lu_dirpage. */
1147 dp = (struct lu_dirpage *)((char *)dp + LU_PAGE_SIZE);
1149 /* Check if we've reached the end of the PAGE. */
1150 if (!((unsigned long)dp & ~PAGE_MASK))
1153 /* Save the hash and flags of this lu_dirpage. */
1154 hash_end = le64_to_cpu(dp->ldp_hash_end);
1155 flags = le32_to_cpu(dp->ldp_flags);
1157 /* Check if lu_dirpage contains no entries. */
1158 if (end_dirent == NULL)
1161 /* Enlarge the end entry lde_reclen from 0 to
1162 * first entry of next lu_dirpage. */
1163 LASSERT(le16_to_cpu(end_dirent->lde_reclen) == 0);
1164 end_dirent->lde_reclen =
1165 cpu_to_le16((char *)(dp->ldp_entries) -
1166 (char *)end_dirent);
1169 first->ldp_hash_end = hash_end;
1170 first->ldp_flags &= ~cpu_to_le32(LDF_COLLIDE);
1171 first->ldp_flags |= flags & cpu_to_le32(LDF_COLLIDE);
1175 LASSERTF(lu_pgs == 0, "left = %d\n", lu_pgs);
1178 #define mdc_adjust_dirpages(pages, cfs_pgs, lu_pgs) do {} while (0)
1179 #endif /* PAGE_SIZE > LU_PAGE_SIZE */
1181 /* parameters for readdir page */
1182 struct readpage_param {
1183 struct md_op_data *rp_mod;
1186 struct obd_export *rp_exp;
1187 struct md_callback *rp_cb;
1190 #ifndef HAVE_DELETE_FROM_PAGE_CACHE
1191 static inline void delete_from_page_cache(struct page *page)
1193 remove_from_page_cache(page);
1199 * Read pages from server.
1201 * Page in MDS_READPAGE RPC is packed in LU_PAGE_SIZE, and each page contains
1202 * a header lu_dirpage which describes the start/end hash, and whether this
1203 * page is empty (contains no dir entry) or hash collide with next page.
1204 * After client receives reply, several pages will be integrated into dir page
1205 * in PAGE_SIZE (if PAGE_SIZE greater than LU_PAGE_SIZE), and the
1206 * lu_dirpage for this integrated page will be adjusted.
1208 static int mdc_read_page_remote(void *data, struct page *page0)
1210 struct readpage_param *rp = data;
1211 struct page **page_pool;
1213 struct lu_dirpage *dp;
1214 int rd_pgs = 0; /* number of pages read actually */
1216 struct md_op_data *op_data = rp->rp_mod;
1217 struct ptlrpc_request *req;
1218 int max_pages = op_data->op_max_pages;
1219 struct inode *inode;
1225 LASSERT(max_pages > 0 && max_pages <= PTLRPC_MAX_BRW_PAGES);
1226 inode = op_data->op_data;
1227 fid = &op_data->op_fid1;
1228 LASSERT(inode != NULL);
1230 OBD_ALLOC(page_pool, sizeof(page_pool[0]) * max_pages);
1231 if (page_pool != NULL) {
1232 page_pool[0] = page0;
1238 for (npages = 1; npages < max_pages; npages++) {
1239 page = page_cache_alloc_cold(inode->i_mapping);
1242 page_pool[npages] = page;
1245 rc = mdc_getpage(rp->rp_exp, fid, rp->rp_off, page_pool, npages, &req);
1247 /* page0 is special, which was added into page cache early */
1248 delete_from_page_cache(page0);
1252 rd_pgs = (req->rq_bulk->bd_nob_transferred +
1253 PAGE_SIZE - 1) >> PAGE_SHIFT;
1254 lu_pgs = req->rq_bulk->bd_nob_transferred >>
1256 LASSERT(!(req->rq_bulk->bd_nob_transferred & ~LU_PAGE_MASK));
1258 CDEBUG(D_INODE, "read %d(%d) pages\n", rd_pgs, lu_pgs);
1260 mdc_adjust_dirpages(page_pool, rd_pgs, lu_pgs);
1262 SetPageUptodate(page0);
1266 ptlrpc_req_finished(req);
1267 CDEBUG(D_CACHE, "read %d/%d pages\n", rd_pgs, npages);
1268 for (i = 1; i < npages; i++) {
1269 unsigned long offset;
1273 page = page_pool[i];
1275 if (rc < 0 || i >= rd_pgs) {
1280 SetPageUptodate(page);
1283 hash = le64_to_cpu(dp->ldp_hash_start);
1286 offset = hash_x_index(hash, rp->rp_hash64);
1288 prefetchw(&page->flags);
1289 ret = add_to_page_cache_lru(page, inode->i_mapping, offset,
1294 CDEBUG(D_VFSTRACE, "page %lu add to page cache failed:"
1295 " rc = %d\n", offset, ret);
1299 if (page_pool != &page0)
1300 OBD_FREE(page_pool, sizeof(page_pool[0]) * max_pages);
1306 * Read dir page from cache first, if it can not find it, read it from
1307 * server and add into the cache.
1309 * \param[in] exp MDC export
1310 * \param[in] op_data client MD stack parameters, transfering parameters
1311 * between different layers on client MD stack.
1312 * \param[in] cb_op callback required for ldlm lock enqueue during
1314 * \param[in] hash_offset the hash offset of the page to be read
1315 * \param[in] ppage the page to be read
1317 * retval = 0 get the page successfully
1318 * errno(<0) get the page failed
1320 static int mdc_read_page(struct obd_export *exp, struct md_op_data *op_data,
1321 struct md_callback *cb_op, __u64 hash_offset,
1322 struct page **ppage)
1324 struct lookup_intent it = { .it_op = IT_READDIR };
1326 struct inode *dir = op_data->op_data;
1327 struct address_space *mapping;
1328 struct lu_dirpage *dp;
1331 struct lustre_handle lockh;
1332 struct ptlrpc_request *enq_req = NULL;
1333 struct readpage_param rp_param;
1340 LASSERT(dir != NULL);
1341 mapping = dir->i_mapping;
1343 rc = mdc_intent_lock(exp, op_data, &it, &enq_req,
1344 cb_op->md_blocking_ast, 0);
1345 if (enq_req != NULL)
1346 ptlrpc_req_finished(enq_req);
1349 CERROR("%s: "DFID" lock enqueue fails: rc = %d\n",
1350 exp->exp_obd->obd_name, PFID(&op_data->op_fid1), rc);
1355 lockh.cookie = it.it_lock_handle;
1356 mdc_set_lock_data(exp, &lockh, dir, NULL);
1358 rp_param.rp_off = hash_offset;
1359 rp_param.rp_hash64 = op_data->op_cli_flags & CLI_HASH64;
1360 page = mdc_page_locate(mapping, &rp_param.rp_off, &start, &end,
1361 rp_param.rp_hash64);
1363 CERROR("%s: dir page locate: "DFID" at %llu: rc %ld\n",
1364 exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
1365 rp_param.rp_off, PTR_ERR(page));
1366 GOTO(out_unlock, rc = PTR_ERR(page));
1367 } else if (page != NULL) {
1369 * XXX nikita: not entirely correct handling of a corner case:
1370 * suppose hash chain of entries with hash value HASH crosses
1371 * border between pages P0 and P1. First both P0 and P1 are
1372 * cached, seekdir() is called for some entry from the P0 part
1373 * of the chain. Later P0 goes out of cache. telldir(HASH)
1374 * happens and finds P1, as it starts with matching hash
1375 * value. Remaining entries from P0 part of the chain are
1376 * skipped. (Is that really a bug?)
1378 * Possible solutions: 0. don't cache P1 is such case, handle
1379 * it as an "overflow" page. 1. invalidate all pages at
1380 * once. 2. use HASH|1 as an index for P1.
1382 GOTO(hash_collision, page);
1385 rp_param.rp_exp = exp;
1386 rp_param.rp_mod = op_data;
1387 page = read_cache_page(mapping,
1388 hash_x_index(rp_param.rp_off,
1389 rp_param.rp_hash64),
1390 mdc_read_page_remote, &rp_param);
1392 CDEBUG(D_INFO, "%s: read cache page: "DFID" at %llu: %ld\n",
1393 exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
1394 rp_param.rp_off, PTR_ERR(page));
1395 GOTO(out_unlock, rc = PTR_ERR(page));
1398 wait_on_page_locked(page);
1400 if (!PageUptodate(page)) {
1401 CERROR("%s: page not updated: "DFID" at %llu: rc %d\n",
1402 exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
1403 rp_param.rp_off, -5);
1406 if (!PageChecked(page))
1407 SetPageChecked(page);
1408 if (PageError(page)) {
1409 CERROR("%s: page error: "DFID" at %llu: rc %d\n",
1410 exp->exp_obd->obd_name, PFID(&op_data->op_fid1),
1411 rp_param.rp_off, -5);
1416 dp = page_address(page);
1417 if (BITS_PER_LONG == 32 && rp_param.rp_hash64) {
1418 start = le64_to_cpu(dp->ldp_hash_start) >> 32;
1419 end = le64_to_cpu(dp->ldp_hash_end) >> 32;
1420 rp_param.rp_off = hash_offset >> 32;
1422 start = le64_to_cpu(dp->ldp_hash_start);
1423 end = le64_to_cpu(dp->ldp_hash_end);
1424 rp_param.rp_off = hash_offset;
1427 LASSERT(start == rp_param.rp_off);
1428 CWARN("Page-wide hash collision: %#lx\n", (unsigned long)end);
1429 #if BITS_PER_LONG == 32
1430 CWARN("Real page-wide hash collision at [%llu %llu] with "
1431 "hash %llu\n", le64_to_cpu(dp->ldp_hash_start),
1432 le64_to_cpu(dp->ldp_hash_end), hash_offset);
1436 * Fetch whole overflow chain...
1444 ldlm_lock_decref(&lockh, it.it_lock_mode);
1448 mdc_release_page(page, 1);
1454 static int mdc_statfs(const struct lu_env *env,
1455 struct obd_export *exp, struct obd_statfs *osfs,
1456 __u64 max_age, __u32 flags)
1458 struct obd_device *obd = class_exp2obd(exp);
1459 struct ptlrpc_request *req;
1460 struct obd_statfs *msfs;
1461 struct obd_import *imp = NULL;
1466 * Since the request might also come from lprocfs, so we need
1467 * sync this with client_disconnect_export Bug15684
1469 down_read(&obd->u.cli.cl_sem);
1470 if (obd->u.cli.cl_import)
1471 imp = class_import_get(obd->u.cli.cl_import);
1472 up_read(&obd->u.cli.cl_sem);
1476 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_STATFS,
1477 LUSTRE_MDS_VERSION, MDS_STATFS);
1479 GOTO(output, rc = -ENOMEM);
1481 ptlrpc_request_set_replen(req);
1483 if (flags & OBD_STATFS_NODELAY) {
1484 /* procfs requests not want stay in wait for avoid deadlock */
1485 req->rq_no_resend = 1;
1486 req->rq_no_delay = 1;
1489 rc = ptlrpc_queue_wait(req);
1491 /* check connection error first */
1492 if (imp->imp_connect_error)
1493 rc = imp->imp_connect_error;
1497 msfs = req_capsule_server_get(&req->rq_pill, &RMF_OBD_STATFS);
1499 GOTO(out, rc = -EPROTO);
1504 ptlrpc_req_finished(req);
1506 class_import_put(imp);
1510 static int mdc_ioc_fid2path(struct obd_export *exp, struct getinfo_fid2path *gf)
1512 __u32 keylen, vallen;
1516 if (gf->gf_pathlen > PATH_MAX)
1517 RETURN(-ENAMETOOLONG);
1518 if (gf->gf_pathlen < 2)
1521 /* Key is KEY_FID2PATH + getinfo_fid2path description */
1522 keylen = cfs_size_round(sizeof(KEY_FID2PATH) + sizeof(*gf) +
1523 sizeof(struct lu_fid));
1524 OBD_ALLOC(key, keylen);
1527 memcpy(key, KEY_FID2PATH, sizeof(KEY_FID2PATH));
1528 memcpy(key + cfs_size_round(sizeof(KEY_FID2PATH)), gf, sizeof(*gf));
1529 memcpy(key + cfs_size_round(sizeof(KEY_FID2PATH)) + sizeof(*gf),
1530 gf->gf_u.gf_root_fid, sizeof(struct lu_fid));
1531 CDEBUG(D_IOCTL, "path get "DFID" from %llu #%d\n",
1532 PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno);
1534 if (!fid_is_sane(&gf->gf_fid))
1535 GOTO(out, rc = -EINVAL);
1537 /* Val is struct getinfo_fid2path result plus path */
1538 vallen = sizeof(*gf) + gf->gf_pathlen;
1540 rc = obd_get_info(NULL, exp, keylen, key, &vallen, gf);
1541 if (rc != 0 && rc != -EREMOTE)
1544 if (vallen <= sizeof(*gf))
1545 GOTO(out, rc = -EPROTO);
1546 if (vallen > sizeof(*gf) + gf->gf_pathlen)
1547 GOTO(out, rc = -EOVERFLOW);
1549 CDEBUG(D_IOCTL, "path got "DFID" from %llu #%d: %s\n",
1550 PFID(&gf->gf_fid), gf->gf_recno, gf->gf_linkno,
1551 gf->gf_pathlen < 512 ? gf->gf_u.gf_path :
1552 /* only log the last 512 characters of the path */
1553 gf->gf_u.gf_path + gf->gf_pathlen - 512);
1556 OBD_FREE(key, keylen);
1560 static int mdc_ioc_hsm_progress(struct obd_export *exp,
1561 struct hsm_progress_kernel *hpk)
1563 struct obd_import *imp = class_exp2cliimp(exp);
1564 struct hsm_progress_kernel *req_hpk;
1565 struct ptlrpc_request *req;
1569 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_PROGRESS,
1570 LUSTRE_MDS_VERSION, MDS_HSM_PROGRESS);
1572 GOTO(out, rc = -ENOMEM);
1574 mdc_pack_body(req, NULL, 0, 0, -1, 0);
1576 /* Copy hsm_progress struct */
1577 req_hpk = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_PROGRESS);
1578 if (req_hpk == NULL)
1579 GOTO(out, rc = -EPROTO);
1582 req_hpk->hpk_errval = lustre_errno_hton(hpk->hpk_errval);
1584 ptlrpc_request_set_replen(req);
1586 mdc_get_mod_rpc_slot(req, NULL);
1587 rc = ptlrpc_queue_wait(req);
1588 mdc_put_mod_rpc_slot(req, NULL);
1592 ptlrpc_req_finished(req);
1596 static int mdc_ioc_hsm_ct_register(struct obd_import *imp, __u32 archives)
1598 __u32 *archive_mask;
1599 struct ptlrpc_request *req;
1603 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_REGISTER,
1605 MDS_HSM_CT_REGISTER);
1607 GOTO(out, rc = -ENOMEM);
1609 mdc_pack_body(req, NULL, 0, 0, -1, 0);
1611 /* Copy hsm_progress struct */
1612 archive_mask = req_capsule_client_get(&req->rq_pill,
1613 &RMF_MDS_HSM_ARCHIVE);
1614 if (archive_mask == NULL)
1615 GOTO(out, rc = -EPROTO);
1617 *archive_mask = archives;
1619 ptlrpc_request_set_replen(req);
1621 rc = mdc_queue_wait(req);
1624 ptlrpc_req_finished(req);
1628 static int mdc_ioc_hsm_current_action(struct obd_export *exp,
1629 struct md_op_data *op_data)
1631 struct hsm_current_action *hca = op_data->op_data;
1632 struct hsm_current_action *req_hca;
1633 struct ptlrpc_request *req;
1637 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
1638 &RQF_MDS_HSM_ACTION);
1642 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_ACTION);
1644 ptlrpc_request_free(req);
1648 mdc_pack_body(req, &op_data->op_fid1, 0, 0,
1649 op_data->op_suppgids[0], 0);
1651 ptlrpc_request_set_replen(req);
1653 rc = mdc_queue_wait(req);
1657 req_hca = req_capsule_server_get(&req->rq_pill,
1658 &RMF_MDS_HSM_CURRENT_ACTION);
1659 if (req_hca == NULL)
1660 GOTO(out, rc = -EPROTO);
1666 ptlrpc_req_finished(req);
1670 static int mdc_ioc_hsm_ct_unregister(struct obd_import *imp)
1672 struct ptlrpc_request *req;
1676 req = ptlrpc_request_alloc_pack(imp, &RQF_MDS_HSM_CT_UNREGISTER,
1678 MDS_HSM_CT_UNREGISTER);
1680 GOTO(out, rc = -ENOMEM);
1682 mdc_pack_body(req, NULL, 0, 0, -1, 0);
1684 ptlrpc_request_set_replen(req);
1686 rc = mdc_queue_wait(req);
1689 ptlrpc_req_finished(req);
1693 static int mdc_ioc_hsm_state_get(struct obd_export *exp,
1694 struct md_op_data *op_data)
1696 struct hsm_user_state *hus = op_data->op_data;
1697 struct hsm_user_state *req_hus;
1698 struct ptlrpc_request *req;
1702 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
1703 &RQF_MDS_HSM_STATE_GET);
1707 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_GET);
1709 ptlrpc_request_free(req);
1713 mdc_pack_body(req, &op_data->op_fid1, 0, 0,
1714 op_data->op_suppgids[0], 0);
1716 ptlrpc_request_set_replen(req);
1718 rc = mdc_queue_wait(req);
1722 req_hus = req_capsule_server_get(&req->rq_pill, &RMF_HSM_USER_STATE);
1723 if (req_hus == NULL)
1724 GOTO(out, rc = -EPROTO);
1730 ptlrpc_req_finished(req);
1734 static int mdc_ioc_hsm_state_set(struct obd_export *exp,
1735 struct md_op_data *op_data)
1737 struct hsm_state_set *hss = op_data->op_data;
1738 struct hsm_state_set *req_hss;
1739 struct ptlrpc_request *req;
1743 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
1744 &RQF_MDS_HSM_STATE_SET);
1748 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_STATE_SET);
1750 ptlrpc_request_free(req);
1754 mdc_pack_body(req, &op_data->op_fid1, 0, 0,
1755 op_data->op_suppgids[0], 0);
1758 req_hss = req_capsule_client_get(&req->rq_pill, &RMF_HSM_STATE_SET);
1759 if (req_hss == NULL)
1760 GOTO(out, rc = -EPROTO);
1763 ptlrpc_request_set_replen(req);
1765 mdc_get_mod_rpc_slot(req, NULL);
1766 rc = ptlrpc_queue_wait(req);
1767 mdc_put_mod_rpc_slot(req, NULL);
1771 ptlrpc_req_finished(req);
1775 static int mdc_ioc_hsm_request(struct obd_export *exp,
1776 struct hsm_user_request *hur)
1778 struct obd_import *imp = class_exp2cliimp(exp);
1779 struct ptlrpc_request *req;
1780 struct hsm_request *req_hr;
1781 struct hsm_user_item *req_hui;
1786 req = ptlrpc_request_alloc(imp, &RQF_MDS_HSM_REQUEST);
1788 GOTO(out, rc = -ENOMEM);
1790 req_capsule_set_size(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM, RCL_CLIENT,
1791 hur->hur_request.hr_itemcount
1792 * sizeof(struct hsm_user_item));
1793 req_capsule_set_size(&req->rq_pill, &RMF_GENERIC_DATA, RCL_CLIENT,
1794 hur->hur_request.hr_data_len);
1796 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_HSM_REQUEST);
1798 ptlrpc_request_free(req);
1802 mdc_pack_body(req, NULL, 0, 0, -1, 0);
1804 /* Copy hsm_request struct */
1805 req_hr = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_REQUEST);
1807 GOTO(out, rc = -EPROTO);
1808 *req_hr = hur->hur_request;
1810 /* Copy hsm_user_item structs */
1811 req_hui = req_capsule_client_get(&req->rq_pill, &RMF_MDS_HSM_USER_ITEM);
1812 if (req_hui == NULL)
1813 GOTO(out, rc = -EPROTO);
1814 memcpy(req_hui, hur->hur_user_item,
1815 hur->hur_request.hr_itemcount * sizeof(struct hsm_user_item));
1817 /* Copy opaque field */
1818 req_opaque = req_capsule_client_get(&req->rq_pill, &RMF_GENERIC_DATA);
1819 if (req_opaque == NULL)
1820 GOTO(out, rc = -EPROTO);
1821 memcpy(req_opaque, hur_data(hur), hur->hur_request.hr_data_len);
1823 ptlrpc_request_set_replen(req);
1825 mdc_get_mod_rpc_slot(req, NULL);
1826 rc = ptlrpc_queue_wait(req);
1827 mdc_put_mod_rpc_slot(req, NULL);
1832 ptlrpc_req_finished(req);
1836 static struct kuc_hdr *changelog_kuc_hdr(char *buf, size_t len, __u32 flags)
1838 struct kuc_hdr *lh = (struct kuc_hdr *)buf;
1840 LASSERT(len <= KUC_CHANGELOG_MSG_MAXSIZE);
1842 lh->kuc_magic = KUC_MAGIC;
1843 lh->kuc_transport = KUC_TRANSPORT_CHANGELOG;
1844 lh->kuc_flags = flags;
1845 lh->kuc_msgtype = CL_RECORD;
1846 lh->kuc_msglen = len;
1850 struct changelog_show {
1852 enum changelog_send_flag cs_flags;
1855 struct obd_device *cs_obd;
1858 static inline char *cs_obd_name(struct changelog_show *cs)
1860 return cs->cs_obd->obd_name;
1863 static int changelog_kkuc_cb(const struct lu_env *env, struct llog_handle *llh,
1864 struct llog_rec_hdr *hdr, void *data)
1866 struct changelog_show *cs = data;
1867 struct llog_changelog_rec *rec = (struct llog_changelog_rec *)hdr;
1873 if (rec->cr_hdr.lrh_type != CHANGELOG_REC) {
1875 CERROR("%s: not a changelog rec %x/%d: rc = %d\n",
1876 cs_obd_name(cs), rec->cr_hdr.lrh_type,
1877 rec->cr.cr_type, rc);
1881 if (rec->cr.cr_index < cs->cs_startrec) {
1882 /* Skip entries earlier than what we are interested in */
1883 CDEBUG(D_HSM, "rec=%llu start=%llu\n",
1884 rec->cr.cr_index, cs->cs_startrec);
1888 CDEBUG(D_HSM, "%llu %02d%-5s %llu 0x%x t="DFID" p="DFID" %.*s\n",
1889 rec->cr.cr_index, rec->cr.cr_type,
1890 changelog_type2str(rec->cr.cr_type), rec->cr.cr_time,
1891 rec->cr.cr_flags & CLF_FLAGMASK,
1892 PFID(&rec->cr.cr_tfid), PFID(&rec->cr.cr_pfid),
1893 rec->cr.cr_namelen, changelog_rec_name(&rec->cr));
1895 len = sizeof(*lh) + changelog_rec_size(&rec->cr) + rec->cr.cr_namelen;
1897 /* Set up the message */
1898 lh = changelog_kuc_hdr(cs->cs_buf, len, cs->cs_flags);
1899 memcpy(lh + 1, &rec->cr, len - sizeof(*lh));
1901 rc = libcfs_kkuc_msg_put(cs->cs_fp, lh);
1902 CDEBUG(D_HSM, "kucmsg fp %p len %zu rc %d\n", cs->cs_fp, len, rc);
1907 static int mdc_changelog_send_thread(void *csdata)
1909 struct changelog_show *cs = csdata;
1910 struct llog_ctxt *ctxt = NULL;
1911 struct llog_handle *llh = NULL;
1912 struct kuc_hdr *kuch;
1913 enum llog_flag flags = LLOG_F_IS_CAT;
1916 CDEBUG(D_HSM, "changelog to fp=%p start %llu\n",
1917 cs->cs_fp, cs->cs_startrec);
1919 OBD_ALLOC(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
1920 if (cs->cs_buf == NULL)
1921 GOTO(out, rc = -ENOMEM);
1923 /* Set up the remote catalog handle */
1924 ctxt = llog_get_context(cs->cs_obd, LLOG_CHANGELOG_REPL_CTXT);
1926 GOTO(out, rc = -ENOENT);
1927 rc = llog_open(NULL, ctxt, &llh, NULL, CHANGELOG_CATALOG,
1930 CERROR("%s: fail to open changelog catalog: rc = %d\n",
1931 cs_obd_name(cs), rc);
1935 if (cs->cs_flags & CHANGELOG_FLAG_JOBID)
1936 flags |= LLOG_F_EXT_JOBID;
1938 rc = llog_init_handle(NULL, llh, flags, NULL);
1940 CERROR("llog_init_handle failed %d\n", rc);
1944 rc = llog_cat_process(NULL, llh, changelog_kkuc_cb, cs, 0, 0);
1946 /* Send EOF no matter what our result */
1947 kuch = changelog_kuc_hdr(cs->cs_buf, sizeof(*kuch), cs->cs_flags);
1948 kuch->kuc_msgtype = CL_EOF;
1949 libcfs_kkuc_msg_put(cs->cs_fp, kuch);
1954 llog_cat_close(NULL, llh);
1956 llog_ctxt_put(ctxt);
1958 OBD_FREE(cs->cs_buf, KUC_CHANGELOG_MSG_MAXSIZE);
1963 static int mdc_ioc_changelog_send(struct obd_device *obd,
1964 struct ioc_changelog *icc)
1966 struct changelog_show *cs;
1967 struct task_struct *task;
1970 /* Freed in mdc_changelog_send_thread */
1976 cs->cs_startrec = icc->icc_recno;
1977 /* matching fput in mdc_changelog_send_thread */
1978 cs->cs_fp = fget(icc->icc_id);
1979 cs->cs_flags = icc->icc_flags;
1982 * New thread because we should return to user app before
1983 * writing into our pipe
1985 task = kthread_run(mdc_changelog_send_thread, cs,
1986 "mdc_clg_send_thread");
1989 CERROR("%s: cannot start changelog thread: rc = %d\n",
1990 cs_obd_name(cs), rc);
1994 CDEBUG(D_HSM, "%s: started changelog thread\n",
2001 static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
2002 struct lustre_kernelcomm *lk);
2004 static int mdc_quotactl(struct obd_device *unused, struct obd_export *exp,
2005 struct obd_quotactl *oqctl)
2007 struct ptlrpc_request *req;
2008 struct obd_quotactl *oqc;
2012 req = ptlrpc_request_alloc_pack(class_exp2cliimp(exp),
2013 &RQF_MDS_QUOTACTL, LUSTRE_MDS_VERSION,
2018 oqc = req_capsule_client_get(&req->rq_pill, &RMF_OBD_QUOTACTL);
2021 ptlrpc_request_set_replen(req);
2022 ptlrpc_at_set_req_timeout(req);
2023 req->rq_no_resend = 1;
2025 rc = ptlrpc_queue_wait(req);
2027 CERROR("ptlrpc_queue_wait failed, rc: %d\n", rc);
2029 if (req->rq_repmsg &&
2030 (oqc = req_capsule_server_get(&req->rq_pill, &RMF_OBD_QUOTACTL))) {
2033 CERROR ("Can't unpack obd_quotactl\n");
2036 ptlrpc_req_finished(req);
2041 static int mdc_ioc_swap_layouts(struct obd_export *exp,
2042 struct md_op_data *op_data)
2044 struct list_head cancels = LIST_HEAD_INIT(cancels);
2045 struct ptlrpc_request *req;
2047 struct mdc_swap_layouts *msl, *payload;
2050 msl = op_data->op_data;
2052 /* When the MDT will get the MDS_SWAP_LAYOUTS RPC the
2053 * first thing it will do is to cancel the 2 layout
2054 * locks held by this client.
2055 * So the client must cancel its layout locks on the 2 fids
2056 * with the request RPC to avoid extra RPC round trips.
2058 count = mdc_resource_get_unused(exp, &op_data->op_fid1, &cancels,
2059 LCK_EX, MDS_INODELOCK_LAYOUT |
2060 MDS_INODELOCK_XATTR);
2061 count += mdc_resource_get_unused(exp, &op_data->op_fid2, &cancels,
2062 LCK_EX, MDS_INODELOCK_LAYOUT |
2063 MDS_INODELOCK_XATTR);
2065 req = ptlrpc_request_alloc(class_exp2cliimp(exp),
2066 &RQF_MDS_SWAP_LAYOUTS);
2068 ldlm_lock_list_put(&cancels, l_bl_ast, count);
2072 rc = mdc_prep_elc_req(exp, req, MDS_SWAP_LAYOUTS, &cancels, count);
2074 ptlrpc_request_free(req);
2078 mdc_swap_layouts_pack(req, op_data);
2080 payload = req_capsule_client_get(&req->rq_pill, &RMF_SWAP_LAYOUTS);
2085 ptlrpc_request_set_replen(req);
2087 rc = ptlrpc_queue_wait(req);
2093 ptlrpc_req_finished(req);
2097 static int mdc_iocontrol(unsigned int cmd, struct obd_export *exp, int len,
2098 void *karg, void __user *uarg)
2100 struct obd_device *obd = exp->exp_obd;
2101 struct obd_ioctl_data *data = karg;
2102 struct obd_import *imp = obd->u.cli.cl_import;
2106 if (!try_module_get(THIS_MODULE)) {
2107 CERROR("%s: cannot get module '%s'\n", obd->obd_name,
2108 module_name(THIS_MODULE));
2112 case OBD_IOC_CHANGELOG_SEND:
2113 rc = mdc_ioc_changelog_send(obd, karg);
2115 case OBD_IOC_CHANGELOG_CLEAR: {
2116 struct ioc_changelog *icc = karg;
2117 struct changelog_setinfo cs =
2118 {.cs_recno = icc->icc_recno, .cs_id = icc->icc_id};
2119 rc = obd_set_info_async(NULL, exp, strlen(KEY_CHANGELOG_CLEAR),
2120 KEY_CHANGELOG_CLEAR, sizeof(cs), &cs,
2124 case OBD_IOC_FID2PATH:
2125 rc = mdc_ioc_fid2path(exp, karg);
2127 case LL_IOC_HSM_CT_START:
2128 rc = mdc_ioc_hsm_ct_start(exp, karg);
2129 /* ignore if it was already registered on this MDS. */
2133 case LL_IOC_HSM_PROGRESS:
2134 rc = mdc_ioc_hsm_progress(exp, karg);
2136 case LL_IOC_HSM_STATE_GET:
2137 rc = mdc_ioc_hsm_state_get(exp, karg);
2139 case LL_IOC_HSM_STATE_SET:
2140 rc = mdc_ioc_hsm_state_set(exp, karg);
2142 case LL_IOC_HSM_ACTION:
2143 rc = mdc_ioc_hsm_current_action(exp, karg);
2145 case LL_IOC_HSM_REQUEST:
2146 rc = mdc_ioc_hsm_request(exp, karg);
2148 case OBD_IOC_CLIENT_RECOVER:
2149 rc = ptlrpc_recover_import(imp, data->ioc_inlbuf1, 0);
2153 case IOC_OSC_SET_ACTIVE:
2154 rc = ptlrpc_set_import_active(imp, data->ioc_offset);
2156 case OBD_IOC_PING_TARGET:
2157 rc = ptlrpc_obd_ping(obd);
2160 * Normally IOC_OBD_STATFS, OBD_IOC_QUOTACTL iocontrol are handled by
2161 * LMV instead of MDC. But when the cluster is upgraded from 1.8,
2162 * there'd be no LMV layer thus we might be called here. Eventually
2163 * this code should be removed.
2166 case IOC_OBD_STATFS: {
2167 struct obd_statfs stat_buf = {0};
2169 if (*((__u32 *) data->ioc_inlbuf2) != 0)
2170 GOTO(out, rc = -ENODEV);
2173 if (copy_to_user(data->ioc_pbuf2, obd2cli_tgt(obd),
2174 min((int)data->ioc_plen2,
2175 (int)sizeof(struct obd_uuid))))
2176 GOTO(out, rc = -EFAULT);
2178 rc = mdc_statfs(NULL, obd->obd_self_export, &stat_buf,
2179 cfs_time_shift_64(-OBD_STATFS_CACHE_SECONDS),
2184 if (copy_to_user(data->ioc_pbuf1, &stat_buf,
2185 min((int) data->ioc_plen1,
2186 (int) sizeof(stat_buf))))
2187 GOTO(out, rc = -EFAULT);
2191 case OBD_IOC_QUOTACTL: {
2192 struct if_quotactl *qctl = karg;
2193 struct obd_quotactl *oqctl;
2195 OBD_ALLOC_PTR(oqctl);
2197 GOTO(out, rc = -ENOMEM);
2199 QCTL_COPY(oqctl, qctl);
2200 rc = obd_quotactl(exp, oqctl);
2202 QCTL_COPY(qctl, oqctl);
2203 qctl->qc_valid = QC_MDTIDX;
2204 qctl->obd_uuid = obd->u.cli.cl_target_uuid;
2207 OBD_FREE_PTR(oqctl);
2210 case LL_IOC_GET_CONNECT_FLAGS:
2211 if (copy_to_user(uarg, exp_connect_flags_ptr(exp),
2212 sizeof(*exp_connect_flags_ptr(exp))))
2213 GOTO(out, rc = -EFAULT);
2216 case LL_IOC_LOV_SWAP_LAYOUTS:
2217 rc = mdc_ioc_swap_layouts(exp, karg);
2220 CERROR("unrecognised ioctl: cmd = %#x\n", cmd);
2221 GOTO(out, rc = -ENOTTY);
2224 module_put(THIS_MODULE);
2229 static int mdc_get_info_rpc(struct obd_export *exp,
2230 u32 keylen, void *key,
2231 u32 vallen, void *val)
2233 struct obd_import *imp = class_exp2cliimp(exp);
2234 struct ptlrpc_request *req;
2239 req = ptlrpc_request_alloc(imp, &RQF_MDS_GET_INFO);
2243 req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_KEY,
2244 RCL_CLIENT, keylen);
2245 req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VALLEN,
2246 RCL_CLIENT, sizeof(vallen));
2248 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_GET_INFO);
2250 ptlrpc_request_free(req);
2254 tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_KEY);
2255 memcpy(tmp, key, keylen);
2256 tmp = req_capsule_client_get(&req->rq_pill, &RMF_GETINFO_VALLEN);
2257 memcpy(tmp, &vallen, sizeof(vallen));
2259 req_capsule_set_size(&req->rq_pill, &RMF_GETINFO_VAL,
2260 RCL_SERVER, vallen);
2261 ptlrpc_request_set_replen(req);
2263 rc = ptlrpc_queue_wait(req);
2264 /* -EREMOTE means the get_info result is partial, and it needs to
2265 * continue on another MDT, see fid2path part in lmv_iocontrol */
2266 if (rc == 0 || rc == -EREMOTE) {
2267 tmp = req_capsule_server_get(&req->rq_pill, &RMF_GETINFO_VAL);
2268 memcpy(val, tmp, vallen);
2269 if (ptlrpc_rep_need_swab(req)) {
2270 if (KEY_IS(KEY_FID2PATH))
2271 lustre_swab_fid2path(val);
2274 ptlrpc_req_finished(req);
2279 static void lustre_swab_hai(struct hsm_action_item *h)
2281 __swab32s(&h->hai_len);
2282 __swab32s(&h->hai_action);
2283 lustre_swab_lu_fid(&h->hai_fid);
2284 lustre_swab_lu_fid(&h->hai_dfid);
2285 __swab64s(&h->hai_cookie);
2286 __swab64s(&h->hai_extent.offset);
2287 __swab64s(&h->hai_extent.length);
2288 __swab64s(&h->hai_gid);
2291 static void lustre_swab_hal(struct hsm_action_list *h)
2293 struct hsm_action_item *hai;
2296 __swab32s(&h->hal_version);
2297 __swab32s(&h->hal_count);
2298 __swab32s(&h->hal_archive_id);
2299 __swab64s(&h->hal_flags);
2301 for (i = 0; i < h->hal_count; i++, hai = hai_next(hai))
2302 lustre_swab_hai(hai);
2305 static void lustre_swab_kuch(struct kuc_hdr *l)
2307 __swab16s(&l->kuc_magic);
2308 /* __u8 l->kuc_transport */
2309 __swab16s(&l->kuc_msgtype);
2310 __swab16s(&l->kuc_msglen);
2313 static int mdc_ioc_hsm_ct_start(struct obd_export *exp,
2314 struct lustre_kernelcomm *lk)
2316 struct obd_import *imp = class_exp2cliimp(exp);
2317 __u32 archive = lk->lk_data;
2320 if (lk->lk_group != KUC_GRP_HSM) {
2321 CERROR("Bad copytool group %d\n", lk->lk_group);
2325 CDEBUG(D_HSM, "CT start r%d w%d u%d g%d f%#x\n", lk->lk_rfd, lk->lk_wfd,
2326 lk->lk_uid, lk->lk_group, lk->lk_flags);
2328 if (lk->lk_flags & LK_FLG_STOP) {
2329 /* Unregister with the coordinator */
2330 rc = mdc_ioc_hsm_ct_unregister(imp);
2332 rc = mdc_ioc_hsm_ct_register(imp, archive);
2339 * Send a message to any listening copytools
2340 * @param val KUC message (kuc_hdr + hsm_action_list)
2341 * @param len total length of message
2343 static int mdc_hsm_copytool_send(size_t len, void *val)
2345 struct kuc_hdr *lh = (struct kuc_hdr *)val;
2346 struct hsm_action_list *hal = (struct hsm_action_list *)(lh + 1);
2350 if (len < sizeof(*lh) + sizeof(*hal)) {
2351 CERROR("Short HSM message %zu < %zu\n", len,
2352 sizeof(*lh) + sizeof(*hal));
2355 if (lh->kuc_magic == __swab16(KUC_MAGIC)) {
2356 lustre_swab_kuch(lh);
2357 lustre_swab_hal(hal);
2358 } else if (lh->kuc_magic != KUC_MAGIC) {
2359 CERROR("Bad magic %x!=%x\n", lh->kuc_magic, KUC_MAGIC);
2363 CDEBUG(D_HSM, " Received message mg=%x t=%d m=%d l=%d actions=%d "
2365 lh->kuc_magic, lh->kuc_transport, lh->kuc_msgtype,
2366 lh->kuc_msglen, hal->hal_count, hal->hal_fsname);
2368 /* Broadcast to HSM listeners */
2369 rc = libcfs_kkuc_group_put(KUC_GRP_HSM, lh);
2375 * callback function passed to kuc for re-registering each HSM copytool
2376 * running on MDC, after MDT shutdown/recovery.
2377 * @param data copytool registration data
2378 * @param cb_arg callback argument (obd_import)
2380 static int mdc_hsm_ct_reregister(void *data, void *cb_arg)
2382 struct kkuc_ct_data *kcd = data;
2383 struct obd_import *imp = (struct obd_import *)cb_arg;
2386 if (kcd == NULL || kcd->kcd_magic != KKUC_CT_DATA_MAGIC)
2389 if (!obd_uuid_equals(&kcd->kcd_uuid, &imp->imp_obd->obd_uuid))
2392 CDEBUG(D_HA, "%s: recover copytool registration to MDT (archive=%#x)\n",
2393 imp->imp_obd->obd_name, kcd->kcd_archive);
2394 rc = mdc_ioc_hsm_ct_register(imp, kcd->kcd_archive);
2396 /* ignore error if the copytool is already registered */
2397 return (rc == -EEXIST) ? 0 : rc;
2401 * Re-establish all kuc contexts with MDT
2402 * after MDT shutdown/recovery.
2404 static int mdc_kuc_reregister(struct obd_import *imp)
2406 /* re-register HSM agents */
2407 return libcfs_kkuc_group_foreach(KUC_GRP_HSM, mdc_hsm_ct_reregister,
2411 static int mdc_set_info_async(const struct lu_env *env,
2412 struct obd_export *exp,
2413 u32 keylen, void *key,
2414 u32 vallen, void *val,
2415 struct ptlrpc_request_set *set)
2417 struct obd_import *imp = class_exp2cliimp(exp);
2421 if (KEY_IS(KEY_READ_ONLY)) {
2422 if (vallen != sizeof(int))
2425 spin_lock(&imp->imp_lock);
2426 if (*((int *)val)) {
2427 imp->imp_connect_flags_orig |= OBD_CONNECT_RDONLY;
2428 imp->imp_connect_data.ocd_connect_flags |=
2431 imp->imp_connect_flags_orig &= ~OBD_CONNECT_RDONLY;
2432 imp->imp_connect_data.ocd_connect_flags &=
2433 ~OBD_CONNECT_RDONLY;
2435 spin_unlock(&imp->imp_lock);
2437 rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
2438 keylen, key, vallen, val, set);
2441 if (KEY_IS(KEY_SPTLRPC_CONF)) {
2442 sptlrpc_conf_client_adapt(exp->exp_obd);
2445 if (KEY_IS(KEY_FLUSH_CTX)) {
2446 sptlrpc_import_flush_my_ctx(imp);
2449 if (KEY_IS(KEY_CHANGELOG_CLEAR)) {
2450 rc = do_set_info_async(imp, MDS_SET_INFO, LUSTRE_MDS_VERSION,
2451 keylen, key, vallen, val, set);
2454 if (KEY_IS(KEY_HSM_COPYTOOL_SEND)) {
2455 rc = mdc_hsm_copytool_send(vallen, val);
2459 if (KEY_IS(KEY_DEFAULT_EASIZE)) {
2460 __u32 *default_easize = val;
2462 exp->exp_obd->u.cli.cl_default_mds_easize = *default_easize;
2466 CERROR("Unknown key %s\n", (char *)key);
2470 static int mdc_get_info(const struct lu_env *env, struct obd_export *exp,
2471 __u32 keylen, void *key, __u32 *vallen, void *val)
2475 if (KEY_IS(KEY_MAX_EASIZE)) {
2476 __u32 mdsize, *max_easize;
2478 if (*vallen != sizeof(int))
2480 mdsize = *(__u32 *)val;
2481 if (mdsize > exp->exp_obd->u.cli.cl_max_mds_easize)
2482 exp->exp_obd->u.cli.cl_max_mds_easize = mdsize;
2484 *max_easize = exp->exp_obd->u.cli.cl_max_mds_easize;
2486 } else if (KEY_IS(KEY_DEFAULT_EASIZE)) {
2487 __u32 *default_easize;
2489 if (*vallen != sizeof(int))
2491 default_easize = val;
2492 *default_easize = exp->exp_obd->u.cli.cl_default_mds_easize;
2494 } else if (KEY_IS(KEY_CONN_DATA)) {
2495 struct obd_import *imp = class_exp2cliimp(exp);
2496 struct obd_connect_data *data = val;
2498 if (*vallen != sizeof(*data))
2501 *data = imp->imp_connect_data;
2503 } else if (KEY_IS(KEY_TGT_COUNT)) {
2504 *((__u32 *)val) = 1;
2508 rc = mdc_get_info_rpc(exp, keylen, key, *vallen, val);
2513 static int mdc_fsync(struct obd_export *exp, const struct lu_fid *fid,
2514 struct ptlrpc_request **request)
2516 struct ptlrpc_request *req;
2521 req = ptlrpc_request_alloc(class_exp2cliimp(exp), &RQF_MDS_SYNC);
2525 rc = ptlrpc_request_pack(req, LUSTRE_MDS_VERSION, MDS_SYNC);
2527 ptlrpc_request_free(req);
2531 mdc_pack_body(req, fid, 0, 0, -1, 0);
2533 ptlrpc_request_set_replen(req);
2535 rc = ptlrpc_queue_wait(req);
2537 ptlrpc_req_finished(req);
2543 static int mdc_import_event(struct obd_device *obd, struct obd_import *imp,
2544 enum obd_import_event event)
2548 LASSERT(imp->imp_obd == obd);
2552 case IMP_EVENT_INACTIVE: {
2553 struct client_obd *cli = &obd->u.cli;
2555 * Flush current sequence to make client obtain new one
2556 * from server in case of disconnect/reconnect.
2558 down_read(&cli->cl_seq_rwsem);
2560 seq_client_flush(cli->cl_seq);
2561 up_read(&cli->cl_seq_rwsem);
2563 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_INACTIVE);
2566 case IMP_EVENT_INVALIDATE: {
2567 struct ldlm_namespace *ns = obd->obd_namespace;
2569 ldlm_namespace_cleanup(ns, LDLM_FL_LOCAL_ONLY);
2573 case IMP_EVENT_ACTIVE:
2574 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_ACTIVE);
2575 /* redo the kuc registration after reconnecting */
2577 rc = mdc_kuc_reregister(imp);
2580 rc = obd_notify_observer(obd, obd, OBD_NOTIFY_OCD);
2582 case IMP_EVENT_DISCON:
2583 case IMP_EVENT_DEACTIVATE:
2584 case IMP_EVENT_ACTIVATE:
2587 CERROR("Unknown import event %x\n", event);
2593 int mdc_fid_alloc(const struct lu_env *env, struct obd_export *exp,
2594 struct lu_fid *fid, struct md_op_data *op_data)
2596 struct client_obd *cli = &exp->exp_obd->u.cli;
2601 down_read(&cli->cl_seq_rwsem);
2603 rc = seq_client_alloc_fid(env, cli->cl_seq, fid);
2604 up_read(&cli->cl_seq_rwsem);
2609 static struct obd_uuid *mdc_get_uuid(struct obd_export *exp)
2611 struct client_obd *cli = &exp->exp_obd->u.cli;
2612 return &cli->cl_target_uuid;
2616 * Determine whether the lock can be canceled before replaying it during
2617 * recovery, non zero value will be return if the lock can be canceled,
2618 * or zero returned for not
2620 static int mdc_cancel_weight(struct ldlm_lock *lock)
2622 if (lock->l_resource->lr_type != LDLM_IBITS)
2625 /* FIXME: if we ever get into a situation where there are too many
2626 * opened files with open locks on a single node, then we really
2627 * should replay these open locks to reget it */
2628 if (lock->l_policy_data.l_inodebits.bits & MDS_INODELOCK_OPEN)
2634 static int mdc_resource_inode_free(struct ldlm_resource *res)
2636 if (res->lr_lvb_inode)
2637 res->lr_lvb_inode = NULL;
2642 static struct ldlm_valblock_ops inode_lvbo = {
2643 .lvbo_free = mdc_resource_inode_free
2646 static int mdc_llog_init(struct obd_device *obd)
2648 struct obd_llog_group *olg = &obd->obd_olg;
2649 struct llog_ctxt *ctxt;
2654 rc = llog_setup(NULL, obd, olg, LLOG_CHANGELOG_REPL_CTXT, obd,
2659 ctxt = llog_group_get_ctxt(olg, LLOG_CHANGELOG_REPL_CTXT);
2660 llog_initiator_connect(ctxt);
2661 llog_ctxt_put(ctxt);
2666 static void mdc_llog_finish(struct obd_device *obd)
2668 struct llog_ctxt *ctxt;
2672 ctxt = llog_get_context(obd, LLOG_CHANGELOG_REPL_CTXT);
2674 llog_cleanup(NULL, ctxt);
2679 static int mdc_setup(struct obd_device *obd, struct lustre_cfg *cfg)
2684 rc = ptlrpcd_addref();
2688 rc = client_obd_setup(obd, cfg);
2690 GOTO(err_ptlrpcd_decref, rc);
2691 #ifdef CONFIG_PROC_FS
2692 obd->obd_vars = lprocfs_mdc_obd_vars;
2693 lprocfs_obd_setup(obd);
2694 lprocfs_alloc_md_stats(obd, 0);
2696 sptlrpc_lprocfs_cliobd_attach(obd);
2697 ptlrpc_lprocfs_register_obd(obd);
2699 ns_register_cancel(obd->obd_namespace, mdc_cancel_weight);
2701 obd->obd_namespace->ns_lvbo = &inode_lvbo;
2703 rc = mdc_llog_init(obd);
2706 CERROR("failed to setup llogging subsystems\n");
2717 /* Initialize the default and maximum LOV EA sizes. This allows
2718 * us to make MDS RPCs with large enough reply buffers to hold a default
2719 * sized EA without having to calculate this (via a call into the
2720 * LOV + OSCs) each time we make an RPC. The maximum size is also tracked
2721 * but not used to avoid wastefully vmalloc()'ing large reply buffers when
2722 * a large number of stripes is possible. If a larger reply buffer is
2723 * required it will be reallocated in the ptlrpc layer due to overflow.
2725 static int mdc_init_ea_size(struct obd_export *exp, __u32 easize,
2728 struct obd_device *obd = exp->exp_obd;
2729 struct client_obd *cli = &obd->u.cli;
2732 if (cli->cl_max_mds_easize < easize)
2733 cli->cl_max_mds_easize = easize;
2735 if (cli->cl_default_mds_easize < def_easize)
2736 cli->cl_default_mds_easize = def_easize;
2741 static int mdc_precleanup(struct obd_device *obd)
2745 /* Failsafe, ok if racy */
2746 if (obd->obd_type->typ_refcnt <= 1)
2747 libcfs_kkuc_group_rem(0, KUC_GRP_HSM);
2749 obd_cleanup_client_import(obd);
2750 ptlrpc_lprocfs_unregister_obd(obd);
2751 lprocfs_obd_cleanup(obd);
2752 lprocfs_free_md_stats(obd);
2753 mdc_llog_finish(obd);
2757 static int mdc_cleanup(struct obd_device *obd)
2761 return client_obd_cleanup(obd);
2764 static int mdc_process_config(struct obd_device *obd, size_t len, void *buf)
2766 struct lustre_cfg *lcfg = buf;
2767 int rc = class_process_proc_param(PARAM_MDC, obd->obd_vars, lcfg, obd);
2768 return (rc > 0 ? 0: rc);
2771 static struct obd_ops mdc_obd_ops = {
2772 .o_owner = THIS_MODULE,
2773 .o_setup = mdc_setup,
2774 .o_precleanup = mdc_precleanup,
2775 .o_cleanup = mdc_cleanup,
2776 .o_add_conn = client_import_add_conn,
2777 .o_del_conn = client_import_del_conn,
2778 .o_connect = client_connect_import,
2779 .o_disconnect = client_disconnect_export,
2780 .o_iocontrol = mdc_iocontrol,
2781 .o_set_info_async = mdc_set_info_async,
2782 .o_statfs = mdc_statfs,
2783 .o_fid_init = client_fid_init,
2784 .o_fid_fini = client_fid_fini,
2785 .o_fid_alloc = mdc_fid_alloc,
2786 .o_import_event = mdc_import_event,
2787 .o_get_info = mdc_get_info,
2788 .o_process_config = mdc_process_config,
2789 .o_get_uuid = mdc_get_uuid,
2790 .o_quotactl = mdc_quotactl,
2793 static struct md_ops mdc_md_ops = {
2794 .m_get_root = mdc_get_root,
2795 .m_null_inode = mdc_null_inode,
2796 .m_close = mdc_close,
2797 .m_create = mdc_create,
2798 .m_enqueue = mdc_enqueue,
2799 .m_getattr = mdc_getattr,
2800 .m_getattr_name = mdc_getattr_name,
2801 .m_intent_lock = mdc_intent_lock,
2803 .m_rename = mdc_rename,
2804 .m_setattr = mdc_setattr,
2805 .m_setxattr = mdc_setxattr,
2806 .m_getxattr = mdc_getxattr,
2807 .m_fsync = mdc_fsync,
2808 .m_read_page = mdc_read_page,
2809 .m_unlink = mdc_unlink,
2810 .m_cancel_unused = mdc_cancel_unused,
2811 .m_init_ea_size = mdc_init_ea_size,
2812 .m_set_lock_data = mdc_set_lock_data,
2813 .m_lock_match = mdc_lock_match,
2814 .m_get_lustre_md = mdc_get_lustre_md,
2815 .m_free_lustre_md = mdc_free_lustre_md,
2816 .m_set_open_replay_data = mdc_set_open_replay_data,
2817 .m_clear_open_replay_data = mdc_clear_open_replay_data,
2818 .m_intent_getattr_async = mdc_intent_getattr_async,
2819 .m_revalidate_lock = mdc_revalidate_lock
2822 static int __init mdc_init(void)
2824 return class_register_type(&mdc_obd_ops, &mdc_md_ops, true, NULL,
2825 LUSTRE_MDC_NAME, NULL);
2828 static void __exit mdc_exit(void)
2830 class_unregister_type(LUSTRE_MDC_NAME);
2833 MODULE_AUTHOR("OpenSFS, Inc. <http://www.lustre.org/>");
2834 MODULE_DESCRIPTION("Lustre Metadata Client");
2835 MODULE_VERSION(LUSTRE_VERSION_STRING);
2836 MODULE_LICENSE("GPL");
2838 module_init(mdc_init);
2839 module_exit(mdc_exit);