1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
6 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 only,
10 * as published by the Free Software Foundation.
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License version 2 for more details (a copy is included
16 * in the LICENSE file that accompanied this code).
18 * You should have received a copy of the GNU General Public License
19 * version 2 along with this program; If not, see
20 * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf
22 * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
23 * CA 95054 USA or visit www.sun.com if you need additional information or
29 * Copyright 2008 Sun Microsystems, Inc. All rights reserved
30 * Use is subject to license terms.
33 * This file is part of Lustre, http://www.lustre.org/
34 * Lustre is a trademark of Sun Microsystems, Inc.
36 * lustre/mds/handler.c
38 * Author: Peter Braam <braam@clusterfs.com>
39 * Author: Andreas Dilger <adilger@clusterfs.com>
40 * Author: Phil Schwan <phil@clusterfs.com>
41 * Author: Mike Shaver <shaver@clusterfs.com>
45 # define EXPORT_SYMTAB
47 #define DEBUG_SUBSYSTEM S_MDS
49 #include <lustre_mds.h>
50 #include <linux/module.h>
51 #include <linux/init.h>
52 #include <linux/random.h>
54 #include <linux/jbd.h>
55 # include <linux/smp_lock.h>
56 # include <linux/buffer_head.h>
57 # include <linux/workqueue.h>
58 # include <linux/mount.h>
60 #include <obd_class.h>
61 #include <lustre_dlm.h>
63 #include <lustre_fsfilt.h>
64 #include <lprocfs_status.h>
65 #include <lustre_quota.h>
66 #include <lustre_disk.h>
67 #include <lustre_param.h>
69 #include "mds_internal.h"
72 CFS_MODULE_PARM(mds_num_threads, "i", int, 0444,
73 "number of MDS service threads to start");
75 static int mds_intent_policy(struct ldlm_namespace *ns,
76 struct ldlm_lock **lockp, void *req_cookie,
77 ldlm_mode_t mode, int flags, void *data);
78 static int mds_postsetup(struct obd_device *obd);
79 static int mds_cleanup(struct obd_device *obd);
81 /* Assumes caller has already pushed into the kernel filesystem context */
82 static int mds_sendpage(struct ptlrpc_request *req, struct file *file,
83 loff_t offset, int count)
85 struct ptlrpc_bulk_desc *desc;
86 struct l_wait_info lwi;
89 int rc = 0, npages, i, tmpcount, tmpsize = 0;
92 LASSERT((offset & ~CFS_PAGE_MASK) == 0); /* I'm dubious about this */
94 npages = (count + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
95 OBD_ALLOC(pages, sizeof(*pages) * npages);
97 GOTO(out, rc = -ENOMEM);
99 desc = ptlrpc_prep_bulk_exp(req, npages, BULK_PUT_SOURCE,
102 GOTO(out_free, rc = -ENOMEM);
104 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
105 tmpsize = tmpcount > CFS_PAGE_SIZE ? CFS_PAGE_SIZE : tmpcount;
107 OBD_PAGE_ALLOC(pages[i], CFS_ALLOC_STD);
108 if (pages[i] == NULL)
109 GOTO(cleanup_buf, rc = -ENOMEM);
111 ptlrpc_prep_bulk_page(desc, pages[i], 0, tmpsize);
114 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
115 tmpsize = tmpcount > CFS_PAGE_SIZE ? CFS_PAGE_SIZE : tmpcount;
116 CDEBUG(D_EXT2, "reading %u@%llu from dir %lu (size %llu)\n",
117 tmpsize, offset, file->f_dentry->d_inode->i_ino,
118 i_size_read(file->f_dentry->d_inode));
120 rc = fsfilt_readpage(req->rq_export->exp_obd, file,
121 kmap(pages[i]), tmpsize, &offset);
125 GOTO(cleanup_buf, rc = -EIO);
128 LASSERT(desc->bd_nob == count);
130 rc = ptlrpc_start_bulk_transfer(desc);
132 GOTO(cleanup_buf, rc);
134 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) {
135 CERROR("obd_fail_loc=%x, fail operation rc=%d\n",
136 OBD_FAIL_MDS_SENDPAGE, rc);
137 GOTO(abort_bulk, rc);
140 timeout = (int)req->rq_deadline - (int)cfs_time_current_sec();
142 CERROR("Req deadline already passed %lu (now: %lu)\n",
143 req->rq_deadline, cfs_time_current_sec());
145 lwi = LWI_TIMEOUT(cfs_time_seconds(max(timeout, 1)), NULL, NULL);
146 rc = l_wait_event(desc->bd_waitq, !ptlrpc_server_bulk_active(desc), &lwi);
147 LASSERT (rc == 0 || rc == -ETIMEDOUT);
150 if (desc->bd_success &&
151 desc->bd_nob_transferred == count)
152 GOTO(cleanup_buf, rc);
154 rc = -ETIMEDOUT; /* XXX should this be a different errno? */
157 DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s\n",
158 (rc == -ETIMEDOUT) ? "timeout" : "network error",
159 desc->bd_nob_transferred, count,
160 req->rq_export->exp_client_uuid.uuid,
161 req->rq_export->exp_connection->c_remote_uuid.uuid);
163 class_fail_export(req->rq_export);
167 ptlrpc_abort_bulk(desc);
169 for (i = 0; i < npages; i++)
171 OBD_PAGE_FREE(pages[i]);
173 ptlrpc_free_bulk(desc);
175 OBD_FREE(pages, sizeof(*pages) * npages);
180 /* only valid locked dentries or errors should be returned */
181 struct dentry *mds_fid2locked_dentry(struct obd_device *obd, struct ll_fid *fid,
182 struct vfsmount **mnt, int lock_mode,
183 struct lustre_handle *lockh,
184 char *name, int namelen, __u64 lockpart)
186 struct mds_obd *mds = &obd->u.mds;
187 struct dentry *de = mds_fid2dentry(mds, fid, mnt), *retval = de;
188 struct ldlm_res_id res_id = { .name = {0} };
189 int flags = LDLM_FL_ATOMIC_CB, rc;
190 ldlm_policy_data_t policy = { .l_inodebits = { lockpart} };
196 res_id.name[0] = de->d_inode->i_ino;
197 res_id.name[1] = de->d_inode->i_generation;
198 rc = ldlm_cli_enqueue_local(obd->obd_namespace, &res_id,
199 LDLM_IBITS, &policy, lock_mode, &flags,
200 ldlm_blocking_ast, ldlm_completion_ast,
201 NULL, NULL, 0, NULL, lockh);
202 if (rc != ELDLM_OK) {
204 retval = ERR_PTR(-EIO); /* XXX translate ldlm code */
210 /* Look up an entry by inode number. */
211 /* this function ONLY returns valid dget'd dentries with an initialized inode
213 struct dentry *mds_fid2dentry(struct mds_obd *mds, struct ll_fid *fid,
214 struct vfsmount **mnt)
216 struct obd_device *obd = container_of(mds, struct obd_device, u.mds);
218 unsigned long ino = fid->id;
219 __u32 generation = fid->generation;
221 struct dentry *result;
224 RETURN(ERR_PTR(-ESTALE));
226 snprintf(fid_name, sizeof(fid_name), "0x%lx", ino);
228 CDEBUG(D_DENTRY, "--> mds_fid2dentry: ino/gen %lu/%u, sb %p\n",
229 ino, generation, mds->mds_obt.obt_sb);
231 /* under ext3 this is neither supposed to return bad inodes
233 result = mds_lookup(obd, fid_name, mds->mds_fid_de, strlen(fid_name));
237 inode = result->d_inode;
239 RETURN(ERR_PTR(-ENOENT));
241 if (inode->i_nlink == 0) {
242 if (inode->i_mode == 0 &&
243 LTIME_S(inode->i_ctime) == 0 ) {
244 LCONSOLE_WARN("Found inode with zero nlink, mode and "
245 "ctime -- this may indicate disk"
246 "corruption (device %s, inode %lu, link:"
247 " %lu, count: %d)\n", obd->obd_name, inode->i_ino,
248 (unsigned long)inode->i_nlink,
249 atomic_read(&inode->i_count));
252 RETURN(ERR_PTR(-ENOENT));
255 if (generation && inode->i_generation != generation) {
256 /* we didn't find the right inode.. */
257 CDEBUG(D_INODE, "found wrong generation: inode %lu, link: %lu, "
258 "count: %d, generation %u/%u\n", inode->i_ino,
259 (unsigned long)inode->i_nlink,
260 atomic_read(&inode->i_count), inode->i_generation,
263 RETURN(ERR_PTR(-ENOENT));
267 *mnt = mds->mds_vfsmnt;
274 static int mds_connect_internal(struct obd_export *exp,
275 struct obd_connect_data *data)
277 struct obd_device *obd = exp->exp_obd;
279 data->ocd_connect_flags &= MDS_CONNECT_SUPPORTED;
280 data->ocd_ibits_known &= MDS_INODELOCK_FULL;
282 /* If no known bits (which should not happen, probably,
283 as everybody should support LOOKUP and UPDATE bits at least)
284 revert to compat mode with plain locks. */
285 if (!data->ocd_ibits_known &&
286 data->ocd_connect_flags & OBD_CONNECT_IBITS)
287 data->ocd_connect_flags &= ~OBD_CONNECT_IBITS;
289 if (!obd->u.mds.mds_fl_acl)
290 data->ocd_connect_flags &= ~OBD_CONNECT_ACL;
292 if (!obd->u.mds.mds_fl_user_xattr)
293 data->ocd_connect_flags &= ~OBD_CONNECT_XATTR;
295 exp->exp_connect_flags = data->ocd_connect_flags;
296 data->ocd_version = LUSTRE_VERSION_CODE;
297 exp->exp_mds_data.med_ibits_known = data->ocd_ibits_known;
300 if (obd->u.mds.mds_fl_acl &&
301 ((exp->exp_connect_flags & OBD_CONNECT_ACL) == 0)) {
302 CWARN("%s: MDS requires ACL support but client does not\n",
309 static int mds_reconnect(struct obd_export *exp, struct obd_device *obd,
310 struct obd_uuid *cluuid,
311 struct obd_connect_data *data,
317 if (exp == NULL || obd == NULL || cluuid == NULL)
320 rc = mds_connect_internal(exp, data);
322 mds_export_stats_init(obd, exp, 1, localdata);
327 /* Establish a connection to the MDS.
329 * This will set up an export structure for the client to hold state data
330 * about that client, like open files, the last operation number it did
331 * on the server, etc.
333 static int mds_connect(struct lustre_handle *conn, struct obd_device *obd,
334 struct obd_uuid *cluuid, struct obd_connect_data *data,
337 struct obd_export *exp;
338 struct mds_export_data *med;
339 struct lsd_client_data *lcd = NULL;
343 if (!conn || !obd || !cluuid)
346 /* Check for aborted recovery. */
347 target_recovery_check_and_stop(obd);
349 /* XXX There is a small race between checking the list and adding a
350 * new connection for the same UUID, but the real threat (list
351 * corruption when multiple different clients connect) is solved.
353 * There is a second race between adding the export to the list,
354 * and filling in the client data below. Hence skipping the case
355 * of NULL lcd above. We should already be controlling multiple
356 * connects at the client, and we can't hold the spinlock over
357 * memory allocations without risk of deadlocking.
359 rc = class_connect(conn, obd, cluuid);
362 exp = class_conn2export(conn);
364 med = &exp->exp_mds_data;
366 rc = mds_connect_internal(exp, data);
372 GOTO(out, rc = -ENOMEM);
374 memcpy(lcd->lcd_uuid, cluuid, sizeof(lcd->lcd_uuid));
377 rc = mds_client_add(obd, exp, -1, localdata);
386 class_disconnect(exp);
387 /* release nid stat refererence */
388 lprocfs_exp_cleanup(exp);
390 class_export_put(exp);
396 int mds_init_export(struct obd_export *exp)
398 struct mds_export_data *med = &exp->exp_mds_data;
401 INIT_LIST_HEAD(&med->med_open_head);
402 spin_lock_init(&med->med_open_lock);
404 spin_lock(&exp->exp_lock);
405 exp->exp_connecting = 1;
406 spin_unlock(&exp->exp_lock);
408 RETURN(ldlm_init_export(exp));
411 static int mds_destroy_export(struct obd_export *exp)
415 target_destroy_export(exp);
416 ldlm_destroy_export(exp);
418 LASSERT(list_empty(&exp->exp_mds_data.med_open_head));
419 mds_client_free(exp);
424 static int mds_cleanup_mfd(struct obd_export *exp)
426 struct mds_export_data *med;
427 struct obd_device *obd = exp->exp_obd;
428 struct mds_obd *mds = &obd->u.mds;
429 struct lvfs_run_ctxt saved;
430 struct lov_mds_md *lmm;
431 __u32 lmm_sz, cookie_sz;
432 struct llog_cookie *logcookies;
433 struct list_head closing_list;
434 struct mds_file_data *mfd, *n;
438 med = &exp->exp_mds_data;
440 spin_lock(&med->med_open_lock);
441 if (list_empty(&med->med_open_head)) {
442 spin_unlock(&med->med_open_lock);
446 CFS_INIT_LIST_HEAD(&closing_list);
447 while (!list_empty(&med->med_open_head)) {
448 struct list_head *tmp = med->med_open_head.next;
449 struct mds_file_data *mfd =
450 list_entry(tmp, struct mds_file_data, mfd_list);
452 /* Remove mfd handle so it can't be found again.
453 * We are consuming the mfd_list reference here. */
454 mds_mfd_unlink(mfd, 0);
455 list_add_tail(&mfd->mfd_list, &closing_list);
457 spin_unlock(&med->med_open_lock);
459 lmm_sz = mds->mds_max_mdsize;
460 OBD_ALLOC(lmm, lmm_sz);
462 CWARN("%s: allocation failure during cleanup; can not force "
463 "close file handles on this service.\n", obd->obd_name);
464 GOTO(out, rc = -ENOMEM);
467 cookie_sz = mds->mds_max_cookiesize;
468 OBD_ALLOC(logcookies, cookie_sz);
469 if (logcookies == NULL) {
470 CWARN("%s: allocation failure during cleanup; can not force "
471 "close file handles on this service.\n", obd->obd_name);
472 OBD_FREE(lmm, lmm_sz);
473 GOTO(out, rc = -ENOMEM);
476 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
477 /* Close any open files (which may also cause orphan unlinking). */
478 list_for_each_entry_safe(mfd, n, &closing_list, mfd_list) {
479 int lmm_size = lmm_sz;
480 umode_t mode = mfd->mfd_dentry->d_inode->i_mode;
483 /* If you change this message, be sure to update
484 * replay_single:test_46 */
485 CDEBUG(D_INODE|D_IOCTL, "%s: force closing file handle for "
486 "%.*s (ino %lu)\n", obd->obd_name,
487 mfd->mfd_dentry->d_name.len,mfd->mfd_dentry->d_name.name,
488 mfd->mfd_dentry->d_inode->i_ino);
490 rc = mds_get_md(obd, mfd->mfd_dentry->d_inode, lmm,
493 CWARN("mds_get_md failure, rc=%d\n", rc);
495 valid |= OBD_MD_FLEASIZE;
497 /* child orphan sem protects orphan_dec_test and
498 * is_orphan race, mds_mfd_close drops it */
499 MDS_DOWN_WRITE_ORPHAN_SEM(mfd->mfd_dentry->d_inode);
501 list_del_init(&mfd->mfd_list);
502 rc = mds_mfd_close(NULL, REQ_REC_OFF, obd, mfd,
503 !(exp->exp_flags & OBD_OPT_FAILOVER),
504 lmm, lmm_size, logcookies,
505 mds->mds_max_cookiesize,
509 CDEBUG(D_INODE|D_IOCTL, "Error closing file: %d\n", rc);
511 if (valid & OBD_MD_FLCOOKIE) {
512 rc = mds_osc_destroy_orphan(obd, mode, lmm,
513 lmm_size, logcookies, 1);
515 CDEBUG(D_INODE, "%s: destroy of orphan failed,"
516 " rc = %d\n", obd->obd_name, rc);
519 valid &= ~OBD_MD_FLCOOKIE;
523 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
524 OBD_FREE(logcookies, cookie_sz);
525 OBD_FREE(lmm, lmm_sz);
530 static int mds_disconnect(struct obd_export *exp)
536 class_export_get(exp);
538 /* Disconnect early so that clients can't keep using export */
539 rc = class_disconnect(exp);
540 if (exp->exp_obd->obd_namespace != NULL)
541 ldlm_cancel_locks_for_export(exp);
543 /* release nid stat refererence */
544 lprocfs_exp_cleanup(exp);
546 /* complete all outstanding replies */
547 spin_lock(&exp->exp_lock);
548 while (!list_empty(&exp->exp_outstanding_replies)) {
549 struct ptlrpc_reply_state *rs =
550 list_entry(exp->exp_outstanding_replies.next,
551 struct ptlrpc_reply_state, rs_exp_list);
552 struct ptlrpc_service *svc = rs->rs_service;
554 spin_lock(&svc->srv_lock);
555 list_del_init(&rs->rs_exp_list);
556 ptlrpc_schedule_difficult_reply(rs);
557 spin_unlock(&svc->srv_lock);
559 spin_unlock(&exp->exp_lock);
560 rc = mds_cleanup_mfd(exp);
562 class_export_put(exp);
566 static int mds_getstatus(struct ptlrpc_request *req)
568 struct mds_obd *mds = mds_req2mds(req);
569 struct mds_body *body;
570 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*body) };
573 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_PACK, req->rq_status = -ENOMEM);
574 rc = lustre_pack_reply(req, 2, size, NULL);
576 RETURN(req->rq_status = rc);
578 body = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*body));
579 memcpy(&body->fid1, &mds->mds_rootfid, sizeof(body->fid1));
581 /* the last_committed and last_xid fields are filled in for all
582 * replies already - no need to do so here also.
587 /* get the LOV EA from @inode and store it into @md. It can be at most
588 * @size bytes, and @size is updated with the actual EA size.
589 * The EA size is also returned on success, and -ve errno on failure.
590 * If there is no EA then 0 is returned. */
591 int mds_get_md(struct obd_device *obd, struct inode *inode, void *md,
592 int *size, int lock, int flags,
599 LOCK_INODE_MUTEX(inode);
600 rc = fsfilt_get_md(obd, inode, md, *size, "lov");
602 if (rc == 0 && flags == MDS_GETATTR)
603 rc = mds_get_default_md(obd, md, &lmm_size);
606 CERROR("Error %d reading eadata for ino %lu\n",
610 rc = mds_convert_lov_ea(obd, inode, md, lmm_size,
623 UNLOCK_INODE_MUTEX(inode);
629 /* Call with lock=1 if you want mds_pack_md to take the i_mutex.
630 * Call with lock=0 if the caller has already taken the i_mutex. */
631 int mds_pack_md(struct obd_device *obd, struct lustre_msg *msg, int offset,
632 struct mds_body *body, struct inode *inode, int lock, int flags,
635 struct mds_obd *mds = &obd->u.mds;
641 lmm = lustre_msg_buf(msg, offset, 0);
643 /* Some problem with getting eadata when I sized the reply
645 CDEBUG(D_INFO, "no space reserved for inode %lu MD\n",
649 /* if this replay request we should be silencely exist without fill md*/
650 lmm_size = lustre_msg_buflen(msg, offset);
654 /* I don't really like this, but it is a sanity check on the client
655 * MD request. However, if the client doesn't know how much space
656 * to reserve for the MD, it shouldn't be bad to have too much space.
658 if (lmm_size > mds->mds_max_mdsize) {
659 CWARN("Reading MD for inode %lu of %d bytes > max %d\n",
660 inode->i_ino, lmm_size, mds->mds_max_mdsize);
664 rc = mds_get_md(obd, inode, lmm, &lmm_size, lock, flags,
667 if (S_ISDIR(inode->i_mode))
668 body->valid |= OBD_MD_FLDIREA;
670 body->valid |= OBD_MD_FLEASIZE;
671 body->eadatasize = lmm_size;
678 #ifdef CONFIG_FS_POSIX_ACL
680 int mds_pack_posix_acl(struct inode *inode, struct lustre_msg *repmsg,
681 struct mds_body *repbody, int repoff)
683 struct dentry de = { .d_inode = inode };
687 LASSERT(repbody->aclsize == 0);
688 LASSERT(lustre_msg_bufcount(repmsg) > repoff);
690 buflen = lustre_msg_buflen(repmsg, repoff);
694 if (!inode->i_op || !inode->i_op->getxattr)
697 rc = inode->i_op->getxattr(&de, MDS_XATTR_NAME_ACL_ACCESS,
698 lustre_msg_buf(repmsg, repoff, buflen),
701 repbody->aclsize = rc;
702 } else if (rc != -ENODATA) {
703 CERROR("buflen %d, get acl: %d\n", buflen, rc);
708 repbody->valid |= OBD_MD_FLACL;
712 #define mds_pack_posix_acl(inode, repmsg, repbody, repoff) 0
715 int mds_pack_acl(struct mds_export_data *med, struct inode *inode,
716 struct lustre_msg *repmsg, struct mds_body *repbody,
719 return mds_pack_posix_acl(inode, repmsg, repbody, repoff);
722 static int mds_getattr_internal(struct obd_device *obd, struct dentry *dentry,
723 struct ptlrpc_request *req,
724 struct mds_body *reqbody, int reply_off)
726 struct mds_body *body;
727 struct inode *inode = dentry->d_inode;
735 body = lustre_msg_buf(req->rq_repmsg, reply_off, sizeof(*body));
736 LASSERT(body != NULL); /* caller prepped reply */
738 body->flags = reqbody->flags; /* copy MDS_BFLAG_EXT_FLAGS if present */
739 mds_pack_inode2body(body, inode);
742 if ((S_ISREG(inode->i_mode) && (reqbody->valid & OBD_MD_FLEASIZE)) ||
743 (S_ISDIR(inode->i_mode) && (reqbody->valid & OBD_MD_FLDIREA))) {
744 if (lustre_msg_get_opc(req->rq_reqmsg) == MDS_GETATTR &&
745 ((S_ISDIR(inode->i_mode) && (reqbody->valid & OBD_MD_FLDIREA))))
748 rc = mds_pack_md(obd, req->rq_repmsg, reply_off, body,
750 req->rq_export->exp_connect_flags);
752 /* If we have LOV EA data, the OST holds size, atime, mtime */
753 if (!(body->valid & OBD_MD_FLEASIZE) &&
754 !(body->valid & OBD_MD_FLDIREA))
755 body->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
756 OBD_MD_FLATIME | OBD_MD_FLMTIME);
759 } else if (S_ISLNK(inode->i_mode) &&
760 (reqbody->valid & OBD_MD_LINKNAME) != 0) {
761 char *symname = lustre_msg_buf(req->rq_repmsg, reply_off, 0);
764 LASSERT (symname != NULL); /* caller prepped reply */
765 len = lustre_msg_buflen(req->rq_repmsg, reply_off);
767 rc = inode->i_op->readlink(dentry, symname, len);
769 CERROR("readlink failed: %d\n", rc);
770 } else if (rc != len - 1) {
771 CERROR ("Unexpected readlink rc %d: expecting %d\n",
775 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READLINK_EPROTO))
778 CDEBUG(D_INODE, "read symlink dest %s\n", symname);
779 body->valid |= OBD_MD_LINKNAME;
780 body->eadatasize = rc + 1;
781 symname[rc] = 0; /* NULL terminate */
785 } else if (reqbody->valid == OBD_MD_FLFLAGS &&
786 reqbody->flags & MDS_BFLAG_EXT_FLAGS) {
789 /* We only return the full set of flags on ioctl, otherwise we
790 * get enough flags from the inode in mds_pack_inode2body(). */
791 rc = fsfilt_iocontrol(obd, dentry, FSFILT_IOC_GETFLAGS,
794 body->flags = flags | MDS_BFLAG_EXT_FLAGS;
797 if (reqbody->valid & OBD_MD_FLMODEASIZE) {
798 struct mds_obd *mds = mds_req2mds(req);
799 body->max_cookiesize = mds->mds_max_cookiesize;
800 body->max_mdsize = mds->mds_max_mdsize;
801 body->valid |= OBD_MD_FLMODEASIZE;
807 #ifdef CONFIG_FS_POSIX_ACL
808 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
809 (reqbody->valid & OBD_MD_FLACL)) {
810 rc = mds_pack_acl(&req->rq_export->exp_mds_data,
811 inode, req->rq_repmsg,
822 static int mds_getattr_pack_msg(struct ptlrpc_request *req, struct inode *inode,
825 struct mds_obd *mds = mds_req2mds(req);
826 struct mds_body *body;
827 int rc, bufcount = REPLY_REC_OFF + 1;
828 int size[4] = { sizeof(struct ptlrpc_body),
832 LASSERT(offset == REQ_REC_OFF); /* non-intent */
834 body = lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*body));
835 LASSERT(body != NULL); /* checked by caller */
836 LASSERT(lustre_req_swabbed(req, offset)); /* swabbed by caller */
838 if (body->valid & (OBD_MD_FLEASIZE | OBD_MD_FLDIREA)) {
839 /* this will be shrinked to actual size before size */
840 if (S_ISREG(inode->i_mode) || (S_ISDIR(inode->i_mode)))
841 size[bufcount ++] = mds->mds_max_mdsize;
843 /* we not want LSM for specfial files */
844 body->valid &= ~(OBD_MD_FLEASIZE | OBD_MD_FLDIREA);
845 } else if (S_ISLNK(inode->i_mode) && (body->valid & OBD_MD_LINKNAME)) {
846 if (i_size_read(inode) > body->eadatasize)
847 CERROR("symlink size: %Lu, reply space: %d\n",
848 i_size_read(inode) + 1, body->eadatasize);
849 size[bufcount ++] = min_t(int, i_size_read(inode) + 1,
851 CDEBUG(D_INODE, "symlink size: %Lu, reply space: %d\n",
852 i_size_read(inode) + 1, body->eadatasize);
854 #ifdef CONFIG_FS_POSIX_ACL
855 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
856 (body->valid & OBD_MD_FLACL)) {
857 size[bufcount ++] = LUSTRE_POSIX_ACL_MAX_SIZE;
861 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) {
862 CERROR("failed MDS_GETATTR_PACK test\n");
863 req->rq_status = -ENOMEM;
867 rc = lustre_pack_reply(req, bufcount, size, NULL);
876 static int mds_getattr_lock(struct ptlrpc_request *req, int offset,
877 int child_part, struct lustre_handle *child_lockh)
879 struct obd_device *obd = req->rq_export->exp_obd;
880 struct mds_obd *mds = &obd->u.mds;
881 struct ldlm_reply *rep = NULL;
882 struct lvfs_run_ctxt saved;
883 struct mds_body *body;
884 struct dentry *dparent = NULL, *dchild = NULL;
885 struct lvfs_ucred uc = {NULL,};
886 struct lustre_handle parent_lockh;
888 int rc = 0, cleanup_phase = 0, resent_req = 0;
889 int rq_offset = offset;
893 LASSERT(!strcmp(obd->obd_type->typ_name, LUSTRE_MDS_NAME));
894 LASSERT(offset == REQ_REC_OFF || offset == DLM_INTENT_REC_OFF);
895 /* if requests were at offset 2, the getattr reply goes back at 1 */
896 if (offset == DLM_INTENT_REC_OFF) {
897 rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
899 offset = DLM_REPLY_REC_OFF;
902 /* Swab now, before anyone looks inside the request */
903 body = lustre_swab_reqbuf(req, rq_offset, sizeof(*body),
904 lustre_swab_mds_body);
906 CERROR("Can't swab mds_body\n");
907 GOTO(cleanup_exit, rc = -EFAULT);
910 lustre_set_req_swabbed(req, rq_offset + 1);
911 name = lustre_msg_string(req->rq_reqmsg, rq_offset + 1, 0);
913 CERROR("Can't unpack name\n");
914 GOTO(cleanup_exit, rc = -EFAULT);
916 namesize = lustre_msg_buflen(req->rq_reqmsg, rq_offset + 1);
917 /* namesize less than 2 means we have empty name, probably came from
918 revalidate by cfid, so no point in having name to be set */
922 rc = mds_init_ucred(&uc, req, rq_offset);
927 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
928 cleanup_phase = 1; /* kernel context */
929 ldlm_reply_set_disposition(rep, DISP_LOOKUP_EXECD);
931 /* FIXME: handle raw lookup */
933 if (body->valid == OBD_MD_FLID) {
934 struct mds_body *mds_reply;
935 int size = sizeof(*mds_reply);
937 // The user requested ONLY the inode number, so do a raw lookup
938 rc = lustre_pack_reply(req, 1, &size, NULL);
940 CERROR("out of memory\n");
944 rc = dir->i_op->lookup_raw(dir, name, namesize - 1, &inum);
946 mds_reply = lustre_msg_buf(req->rq_repmsg, offset,
948 mds_reply->fid1.id = inum;
949 mds_reply->valid = OBD_MD_FLID;
954 /* child_lockh() is only set in fixup_handle_for_resent_req()
955 * if MSG_RESENT is set */
956 if (lustre_handle_is_used(child_lockh)) {
957 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT);
961 if (resent_req == 0) {
963 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
964 rc = mds_get_parent_child_locked(obd, &obd->u.mds,
968 MDS_INODELOCK_UPDATE,
970 child_lockh, &dchild,
973 /* For revalidate by fid we always take UPDATE lock */
974 dchild = mds_fid2locked_dentry(obd, &body->fid2, NULL,
976 NULL, 0, child_part);
979 rc = PTR_ERR(dchild);
984 struct ldlm_lock *granted_lock;
985 struct ll_fid child_fid;
986 struct ldlm_resource *res;
987 DEBUG_REQ(D_DLMTRACE, req, "resent, not enqueuing new locks");
988 granted_lock = ldlm_handle2lock(child_lockh);
989 /* lock was granted in fixup_handle_for_resent_req() if
990 * MSG_RESENT is set */
991 LASSERTF(granted_lock != NULL, LPU64"/%u lockh "LPX64"\n",
992 body->fid1.id, body->fid1.generation,
993 child_lockh->cookie);
996 res = granted_lock->l_resource;
997 child_fid.id = res->lr_name.name[0];
998 child_fid.generation = res->lr_name.name[1];
999 dchild = mds_fid2dentry(&obd->u.mds, &child_fid, NULL);
1000 if (IS_ERR(dchild)) {
1001 rc = PTR_ERR(dchild);
1002 LCONSOLE_WARN("Child "LPU64"/%u lookup error %d.",
1003 child_fid.id, child_fid.generation, rc);
1006 LDLM_LOCK_PUT(granted_lock);
1009 cleanup_phase = 2; /* dchild, dparent, locks */
1011 if (dchild->d_inode == NULL) {
1012 ldlm_reply_set_disposition(rep, DISP_LOOKUP_NEG);
1013 /* in the intent case, the policy clears this error:
1014 the disposition is enough */
1015 GOTO(cleanup, rc = -ENOENT);
1017 ldlm_reply_set_disposition(rep, DISP_LOOKUP_POS);
1020 if (req->rq_repmsg == NULL) {
1021 rc = mds_getattr_pack_msg(req, dchild->d_inode, offset);
1023 CERROR ("mds_getattr_pack_msg: %d\n", rc);
1028 rc = mds_getattr_internal(obd, dchild, req, body, offset);
1029 GOTO(cleanup, rc); /* returns the lock to the client */
1032 switch (cleanup_phase) {
1034 if (resent_req == 0) {
1035 if (rc && dchild->d_inode)
1036 ldlm_lock_decref(child_lockh, LCK_CR);
1038 ldlm_lock_decref(&parent_lockh, LCK_CR);
1044 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1046 mds_exit_ucred(&uc, mds);
1047 if (!req->rq_packed_final) {
1048 int rc2 = lustre_pack_reply(req, 1, NULL, NULL);
1051 req->rq_status = rc;
1058 static int mds_getattr(struct ptlrpc_request *req, int offset)
1060 struct mds_obd *mds = mds_req2mds(req);
1061 struct obd_device *obd = req->rq_export->exp_obd;
1062 struct lvfs_run_ctxt saved;
1064 struct mds_body *body;
1065 struct lvfs_ucred uc = { NULL, };
1069 OBD_COUNTER_INCREMENT(obd, getattr);
1071 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1072 lustre_swab_mds_body);
1074 GOTO(cleanup_exit, rc = -EFAULT);
1076 rc = mds_init_ucred(&uc, req, offset);
1078 GOTO(out_ucred, rc);
1080 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1081 de = mds_fid2dentry(mds, &body->fid1, NULL);
1083 req->rq_status = PTR_ERR(de);
1087 rc = mds_getattr_pack_msg(req, de->d_inode, offset);
1089 CERROR("mds_getattr_pack_msg: %d\n", rc);
1093 req->rq_status = mds_getattr_internal(obd, de, req, body,REPLY_REC_OFF);
1098 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1100 if (!req->rq_packed_final) {
1101 int rc2 = lustre_pack_reply(req, 1, NULL, NULL);
1105 req->rq_status = rc;
1107 mds_exit_ucred(&uc, mds);
1110 mds_shrink_body_reply(req, offset, REPLY_REC_OFF);
1114 static int mds_obd_statfs(struct obd_device *obd, struct obd_statfs *osfs,
1115 __u64 max_age, __u32 flags)
1119 spin_lock(&obd->obd_osfs_lock);
1120 rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, max_age);
1122 memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
1123 spin_unlock(&obd->obd_osfs_lock);
1128 static int mds_statfs(struct ptlrpc_request *req)
1130 struct obd_device *obd = req->rq_export->exp_obd;
1131 struct ptlrpc_service *svc = req->rq_rqbd->rqbd_service;
1132 int rc, size[2] = { sizeof(struct ptlrpc_body),
1133 sizeof(struct obd_statfs) };
1136 /* This will trigger a watchdog timeout */
1137 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
1138 (MDS_SERVICE_WATCHDOG_FACTOR *
1139 at_get(&svc->srv_at_estimate)) + 1);
1140 OBD_COUNTER_INCREMENT(obd, statfs);
1142 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK))
1143 GOTO(out, rc = -ENOMEM);
1144 rc = lustre_pack_reply(req, 2, size, NULL);
1148 /* We call this so that we can cache a bit - 1 jiffie worth */
1149 rc = mds_obd_statfs(obd, lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1150 size[REPLY_REC_OFF]),
1151 cfs_time_current_64() - HZ, 0);
1153 CERROR("mds_obd_statfs failed: rc %d\n", rc);
1159 req->rq_status = rc;
1163 static int mds_sync(struct ptlrpc_request *req, int offset)
1165 struct obd_device *obd = req->rq_export->exp_obd;
1166 struct mds_obd *mds = &obd->u.mds;
1167 struct mds_body *body;
1168 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*body) };
1171 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1172 lustre_swab_mds_body);
1174 GOTO(out, rc = -EFAULT);
1176 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK))
1177 GOTO(out, rc = -ENOMEM);
1178 rc = lustre_pack_reply(req, 2, size, NULL);
1182 rc = fsfilt_sync(obd, obd->u.obt.obt_sb);
1183 if (rc == 0 && body->fid1.id != 0) {
1186 de = mds_fid2dentry(mds, &body->fid1, NULL);
1188 GOTO(out, rc = PTR_ERR(de));
1190 body = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1192 mds_pack_inode2body(body, de->d_inode);
1198 req->rq_status = rc;
1202 /* mds_readpage does not take a DLM lock on the inode, because the client must
1203 * already have a PR lock.
1205 * If we were to take another one here, a deadlock will result, if another
1206 * thread is already waiting for a PW lock. */
1207 static int mds_readpage(struct ptlrpc_request *req, int offset)
1209 struct obd_device *obd = req->rq_export->exp_obd;
1210 struct mds_obd *mds = &obd->u.mds;
1211 struct vfsmount *mnt;
1214 struct mds_body *body, *repbody;
1215 struct lvfs_run_ctxt saved;
1216 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*repbody) };
1217 struct lvfs_ucred uc = {NULL,};
1220 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_PACK, -ENOMEM);
1221 rc = lustre_pack_reply(req, 2, size, NULL);
1225 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1226 lustre_swab_mds_body);
1228 GOTO (out, rc = -EFAULT);
1230 rc = mds_init_ucred(&uc, req, offset);
1234 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1235 de = mds_fid2dentry(&obd->u.mds, &body->fid1, &mnt);
1237 GOTO(out_pop, rc = PTR_ERR(de));
1239 CDEBUG(D_INODE, "ino %lu\n", de->d_inode->i_ino);
1241 file = dentry_open(de, mnt, O_RDONLY | O_LARGEFILE);
1242 /* note: in case of an error, dentry_open puts dentry */
1244 GOTO(out_pop, rc = PTR_ERR(file));
1246 /* body->size is actually the offset -eeb */
1247 if ((body->size & (de->d_inode->i_sb->s_blocksize - 1)) != 0) {
1248 CERROR("offset "LPU64" not on a block boundary of %lu\n",
1249 body->size, de->d_inode->i_sb->s_blocksize);
1250 GOTO(out_file, rc = -EFAULT);
1253 /* body->nlink is actually the #bytes to read -eeb */
1254 if (body->nlink & (de->d_inode->i_sb->s_blocksize - 1)) {
1255 CERROR("size %u is not multiple of blocksize %lu\n",
1256 body->nlink, de->d_inode->i_sb->s_blocksize);
1257 GOTO(out_file, rc = -EFAULT);
1260 repbody = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1262 repbody->size = i_size_read(file->f_dentry->d_inode);
1263 repbody->valid = OBD_MD_FLSIZE;
1265 /* to make this asynchronous make sure that the handling function
1266 doesn't send a reply when this function completes. Instead a
1267 callback function would send the reply */
1268 /* body->size is actually the offset -eeb */
1269 rc = mds_sendpage(req, file, body->size, body->nlink);
1272 filp_close(file, 0);
1274 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1276 mds_exit_ucred(&uc, mds);
1277 req->rq_status = rc;
1281 int mds_reint(struct ptlrpc_request *req, int offset,
1282 struct lustre_handle *lockh)
1284 struct mds_update_record *rec; /* 116 bytes on the stack? no sir! */
1287 OBD_ALLOC(rec, sizeof(*rec));
1291 rc = mds_update_unpack(req, offset, rec);
1292 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK)) {
1293 CERROR("invalid record\n");
1294 GOTO(out, req->rq_status = -EINVAL);
1297 mds_root_squash(&req->rq_export->exp_obd->u.mds, &req->rq_peer.nid,
1298 &rec->ur_uc.luc_fsuid, &rec->ur_uc.luc_fsgid,
1299 &rec->ur_uc.luc_cap, &rec->ur_uc.luc_suppgid1,
1300 &rec->ur_uc.luc_suppgid2);
1302 /* rc will be used to interrupt a for loop over multiple records */
1303 rc = mds_reint_rec(rec, offset, req, lockh);
1305 OBD_FREE(rec, sizeof(*rec));
1309 static int mds_filter_recovery_request(struct ptlrpc_request *req,
1310 struct obd_device *obd, int *process)
1312 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1313 case MDS_CONNECT: /* This will never get here, but for completeness. */
1314 case OST_CONNECT: /* This will never get here, but for completeness. */
1315 case MDS_DISCONNECT:
1316 case OST_DISCONNECT:
1321 case MDS_SYNC: /* used in unmounting */
1326 *process = target_queue_recovery_request(req, obd);
1330 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
1332 /* XXX what should we set rq_status to here? */
1333 req->rq_status = -EAGAIN;
1334 RETURN(ptlrpc_error(req));
1338 static char *reint_names[] = {
1339 [REINT_SETATTR] "setattr",
1340 [REINT_CREATE] "create",
1341 [REINT_LINK] "link",
1342 [REINT_UNLINK] "unlink",
1343 [REINT_RENAME] "rename",
1344 [REINT_OPEN] "open",
1347 static int mds_set_info_rpc(struct obd_export *exp, struct ptlrpc_request *req)
1350 int keylen, vallen, rc = 0;
1353 key = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF, 1);
1355 DEBUG_REQ(D_HA, req, "no set_info key");
1358 keylen = lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF);
1360 val = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 1, 0);
1361 vallen = lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF + 1);
1363 rc = lustre_pack_reply(req, 1, NULL, NULL);
1367 lustre_msg_set_status(req->rq_repmsg, 0);
1369 /* Accept the broken "read-only" key from 1.6.6 clients. b=17493 */
1370 if (KEY_IS(KEY_READONLY) || KEY_IS(KEY_READONLY_166COMPAT)) {
1371 if (val == NULL || vallen < sizeof(__u32)) {
1372 DEBUG_REQ(D_HA, req, "no set_info val");
1377 exp->exp_connect_flags |= OBD_CONNECT_RDONLY;
1379 exp->exp_connect_flags &= ~OBD_CONNECT_RDONLY;
1387 #ifdef HAVE_QUOTA_SUPPORT
1388 static int mds_handle_quotacheck(struct ptlrpc_request *req)
1390 struct obd_quotactl *oqctl;
1394 oqctl = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*oqctl),
1395 lustre_swab_obd_quotactl);
1399 rc = lustre_pack_reply(req, 1, NULL, NULL);
1403 req->rq_status = obd_quotacheck(req->rq_export, oqctl);
1407 static int mds_handle_quotactl(struct ptlrpc_request *req)
1409 struct obd_quotactl *oqctl, *repoqc;
1410 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*repoqc) };
1413 oqctl = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*oqctl),
1414 lustre_swab_obd_quotactl);
1418 rc = lustre_pack_reply(req, 2, size, NULL);
1422 repoqc = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*repoqc));
1424 req->rq_status = obd_quotactl(req->rq_export, oqctl);
1430 static int mds_msg_check_version(struct lustre_msg *msg)
1434 switch (lustre_msg_get_opc(msg)) {
1436 case MDS_DISCONNECT:
1438 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
1440 CERROR("bad opc %u version %08x, expecting %08x\n",
1441 lustre_msg_get_opc(msg),
1442 lustre_msg_get_version(msg),
1443 LUSTRE_OBD_VERSION);
1447 case MDS_GETATTR_NAME:
1452 case MDS_DONE_WRITING:
1458 case MDS_QUOTACHECK:
1462 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
1464 CERROR("bad opc %u version %08x, expecting %08x\n",
1465 lustre_msg_get_opc(msg),
1466 lustre_msg_get_version(msg),
1467 LUSTRE_MDS_VERSION);
1471 case LDLM_BL_CALLBACK:
1472 case LDLM_CP_CALLBACK:
1473 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
1475 CERROR("bad opc %u version %08x, expecting %08x\n",
1476 lustre_msg_get_opc(msg),
1477 lustre_msg_get_version(msg),
1478 LUSTRE_DLM_VERSION);
1480 case OBD_LOG_CANCEL:
1481 case LLOG_ORIGIN_HANDLE_CREATE:
1482 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1483 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1484 case LLOG_ORIGIN_HANDLE_CLOSE:
1485 case LLOG_ORIGIN_HANDLE_DESTROY:
1486 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
1488 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
1490 CERROR("bad opc %u version %08x, expecting %08x\n",
1491 lustre_msg_get_opc(msg),
1492 lustre_msg_get_version(msg),
1493 LUSTRE_LOG_VERSION);
1496 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
1502 int mds_handle(struct ptlrpc_request *req)
1504 int should_process, fail = OBD_FAIL_MDS_ALL_REPLY_NET;
1506 struct mds_obd *mds = NULL; /* quell gcc overwarning */
1507 struct obd_device *obd = NULL;
1510 OBD_FAIL_RETURN(OBD_FAIL_MDS_ALL_REQUEST_NET | OBD_FAIL_ONCE, 0);
1512 LASSERT(current->journal_info == NULL);
1514 rc = mds_msg_check_version(req->rq_reqmsg);
1516 CERROR("MDS drop mal-formed request\n");
1520 /* XXX identical to OST */
1521 if (lustre_msg_get_opc(req->rq_reqmsg) != MDS_CONNECT) {
1522 struct mds_export_data *med;
1525 if (req->rq_export == NULL) {
1526 CERROR("operation %d on unconnected MDS from %s\n",
1527 lustre_msg_get_opc(req->rq_reqmsg),
1528 libcfs_id2str(req->rq_peer));
1529 req->rq_status = -ENOTCONN;
1530 GOTO(out, rc = -ENOTCONN);
1533 med = &req->rq_export->exp_mds_data;
1534 obd = req->rq_export->exp_obd;
1537 /* sanity check: if the xid matches, the request must
1538 * be marked as a resent or replayed */
1539 if (req->rq_xid == le64_to_cpu(med->med_lcd->lcd_last_xid) ||
1540 req->rq_xid == le64_to_cpu(med->med_lcd->lcd_last_close_xid))
1541 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
1542 (MSG_RESENT | MSG_REPLAY))) {
1543 CERROR("rq_xid "LPU64" matches last_xid, "
1544 "expected RESENT flag\n",
1546 req->rq_status = -ENOTCONN;
1547 GOTO(out, rc = -EFAULT);
1549 /* else: note the opposite is not always true; a
1550 * RESENT req after a failover will usually not match
1551 * the last_xid, since it was likely never
1552 * committed. A REPLAYed request will almost never
1553 * match the last xid, however it could for a
1554 * committed, but still retained, open. */
1556 /* Check for aborted recovery. */
1557 spin_lock_bh(&obd->obd_processing_task_lock);
1558 recovering = obd->obd_recovering;
1559 spin_unlock_bh(&obd->obd_processing_task_lock);
1561 target_recovery_check_and_stop(obd) == 0) {
1562 rc = mds_filter_recovery_request(req, obd,
1564 if (rc || !should_process)
1569 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1571 DEBUG_REQ(D_INODE, req, "connect");
1572 OBD_FAIL_RETURN(OBD_FAIL_MDS_CONNECT_NET, 0);
1573 rc = target_handle_connect(req, mds_handle);
1575 /* Now that we have an export, set obd. */
1576 obd = req->rq_export->exp_obd;
1580 case MDS_DISCONNECT:
1581 DEBUG_REQ(D_INODE, req, "disconnect");
1582 OBD_FAIL_RETURN(OBD_FAIL_MDS_DISCONNECT_NET, 0);
1583 rc = target_handle_disconnect(req);
1584 req->rq_status = rc; /* superfluous? */
1588 DEBUG_REQ(D_INODE, req, "getstatus");
1589 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_NET, 0);
1590 rc = mds_getstatus(req);
1594 DEBUG_REQ(D_INODE, req, "getattr");
1595 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NET, 0);
1596 rc = mds_getattr(req, REQ_REC_OFF);
1600 DEBUG_REQ(D_INODE, req, "setxattr");
1601 OBD_FAIL_RETURN(OBD_FAIL_MDS_SETXATTR_NET, 0);
1602 rc = mds_setxattr(req);
1606 DEBUG_REQ(D_INODE, req, "getxattr");
1607 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETXATTR_NET, 0);
1608 rc = mds_getxattr(req);
1611 case MDS_GETATTR_NAME: {
1612 struct lustre_handle lockh = { 0 };
1613 DEBUG_REQ(D_INODE, req, "getattr_name");
1614 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NAME_NET, 0);
1616 /* If this request gets a reconstructed reply, we won't be
1617 * acquiring any new locks in mds_getattr_lock, so we don't
1620 rc = mds_getattr_lock(req, REQ_REC_OFF, MDS_INODELOCK_UPDATE,
1622 mds_shrink_body_reply(req, REQ_REC_OFF, REPLY_REC_OFF);
1623 /* this non-intent call (from an ioctl) is special */
1624 req->rq_status = rc;
1625 if (rc == 0 && lustre_handle_is_used(&lockh))
1626 ldlm_lock_decref(&lockh, LCK_CR);
1630 DEBUG_REQ(D_INODE, req, "statfs");
1631 OBD_FAIL_RETURN(OBD_FAIL_MDS_STATFS_NET, 0);
1632 rc = mds_statfs(req);
1636 DEBUG_REQ(D_INODE, req, "readpage");
1637 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_NET, 0);
1638 rc = mds_readpage(req, REQ_REC_OFF);
1640 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_MDS_SENDPAGE)) {
1647 __u32 *opcp = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF,
1651 int size[4] = { sizeof(struct ptlrpc_body),
1652 sizeof(struct mds_body),
1653 mds->mds_max_mdsize,
1654 mds->mds_max_cookiesize };
1657 /* NB only peek inside req now; mds_reint() will swab it */
1659 CERROR ("Can't inspect opcode\n");
1664 if (lustre_req_need_swab(req))
1667 DEBUG_REQ(D_INODE, req, "reint %d (%s)", opc,
1668 (opc < REINT_MAX) ? reint_names[opc] :
1673 op = PTLRPC_LAST_CNTR + MDS_REINT_CREATE;
1676 op = PTLRPC_LAST_CNTR + MDS_REINT_LINK;
1679 op = PTLRPC_LAST_CNTR + MDS_REINT_OPEN;
1682 op = PTLRPC_LAST_CNTR + MDS_REINT_SETATTR;
1685 op = PTLRPC_LAST_CNTR + MDS_REINT_RENAME;
1688 op = PTLRPC_LAST_CNTR + MDS_REINT_UNLINK;
1695 if (op && req->rq_rqbd->rqbd_service->srv_stats)
1696 lprocfs_counter_incr(
1697 req->rq_rqbd->rqbd_service->srv_stats, op);
1699 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET, 0);
1701 if (opc == REINT_UNLINK || opc == REINT_RENAME)
1703 else if (opc == REINT_OPEN)
1708 /* if we do recovery we isn't send reply mds state is restored */
1709 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
1710 size[DLM_REPLY_REC_OFF] = 0;
1711 if (opc == REINT_UNLINK || opc == REINT_RENAME)
1712 size[DLM_REPLY_REC_OFF + 1] = 0;
1715 rc = lustre_pack_reply(req, bufcount, size, NULL);
1719 rc = mds_reint(req, REQ_REC_OFF, NULL);
1720 mds_shrink_intent_reply(req, opc, REPLY_REC_OFF);
1721 fail = OBD_FAIL_MDS_REINT_NET_REP;
1726 DEBUG_REQ(D_INODE, req, "close");
1727 OBD_FAIL_RETURN(OBD_FAIL_MDS_CLOSE_NET, 0);
1728 rc = mds_close(req, REQ_REC_OFF);
1729 mds_shrink_body_reply(req, REQ_REC_OFF, REPLY_REC_OFF);
1730 fail = OBD_FAIL_MDS_CLOSE_NET_REP;
1733 case MDS_DONE_WRITING:
1734 DEBUG_REQ(D_INODE, req, "done_writing");
1735 OBD_FAIL_RETURN(OBD_FAIL_MDS_DONE_WRITING_NET, 0);
1736 rc = mds_done_writing(req, REQ_REC_OFF);
1740 DEBUG_REQ(D_INODE, req, "pin");
1741 OBD_FAIL_RETURN(OBD_FAIL_MDS_PIN_NET, 0);
1742 rc = mds_pin(req, REQ_REC_OFF);
1746 DEBUG_REQ(D_INODE, req, "sync");
1747 OBD_FAIL_RETURN(OBD_FAIL_MDS_SYNC_NET, 0);
1748 rc = mds_sync(req, REQ_REC_OFF);
1752 DEBUG_REQ(D_INODE, req, "set_info");
1753 rc = mds_set_info_rpc(req->rq_export, req);
1755 #ifdef HAVE_QUOTA_SUPPORT
1756 case MDS_QUOTACHECK:
1757 DEBUG_REQ(D_INODE, req, "quotacheck");
1758 OBD_FAIL_RETURN(OBD_FAIL_MDS_QUOTACHECK_NET, 0);
1759 rc = mds_handle_quotacheck(req);
1763 DEBUG_REQ(D_INODE, req, "quotactl");
1764 OBD_FAIL_RETURN(OBD_FAIL_MDS_QUOTACTL_NET, 0);
1765 rc = mds_handle_quotactl(req);
1769 DEBUG_REQ(D_INODE, req, "ping");
1770 rc = target_handle_ping(req);
1771 if (req->rq_export->exp_delayed)
1772 mds_update_client_epoch(req->rq_export);
1775 case OBD_LOG_CANCEL:
1776 CDEBUG(D_INODE, "log cancel\n");
1777 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1778 rc = -ENOTSUPP; /* la la la */
1782 DEBUG_REQ(D_INODE, req, "enqueue");
1783 OBD_FAIL_RETURN(OBD_FAIL_LDLM_ENQUEUE, 0);
1784 rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast,
1785 ldlm_server_blocking_ast, NULL);
1786 fail = OBD_FAIL_LDLM_REPLY;
1789 DEBUG_REQ(D_INODE, req, "convert");
1790 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CONVERT, 0);
1791 rc = ldlm_handle_convert(req);
1793 case LDLM_BL_CALLBACK:
1794 case LDLM_CP_CALLBACK:
1795 DEBUG_REQ(D_INODE, req, "callback");
1796 CERROR("callbacks should not happen on MDS\n");
1798 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1800 case LLOG_ORIGIN_HANDLE_CREATE:
1801 DEBUG_REQ(D_INODE, req, "llog_init");
1802 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1803 rc = llog_origin_handle_create(req);
1805 case LLOG_ORIGIN_HANDLE_DESTROY:
1806 DEBUG_REQ(D_INODE, req, "llog_init");
1807 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1808 rc = llog_origin_handle_destroy(req);
1810 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1811 DEBUG_REQ(D_INODE, req, "llog next block");
1812 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1813 rc = llog_origin_handle_next_block(req);
1815 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
1816 DEBUG_REQ(D_INODE, req, "llog prev block");
1817 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1818 rc = llog_origin_handle_prev_block(req);
1820 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1821 DEBUG_REQ(D_INODE, req, "llog read header");
1822 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1823 rc = llog_origin_handle_read_header(req);
1825 case LLOG_ORIGIN_HANDLE_CLOSE:
1826 DEBUG_REQ(D_INODE, req, "llog close");
1827 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1828 rc = llog_origin_handle_close(req);
1831 DEBUG_REQ(D_INODE, req, "llog catinfo");
1832 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1833 rc = llog_catinfo(req);
1836 req->rq_status = -ENOTSUPP;
1837 rc = ptlrpc_error(req);
1841 LASSERT(current->journal_info == NULL);
1843 /* If we're DISCONNECTing, the mds_export_data is already freed */
1844 if (!rc && lustre_msg_get_opc(req->rq_reqmsg) != MDS_DISCONNECT) {
1845 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1847 /* I don't think last_xid is used for anyway, so I'm not sure
1848 if we need to care about last_close_xid here.*/
1849 lustre_msg_set_last_xid(req->rq_repmsg,
1850 le64_to_cpu(med->med_lcd->lcd_last_xid));
1851 target_committed_to_req(req);
1856 return target_handle_reply(req, rc, fail);
1859 /* Update the server data on disk. This stores the new mount_count and
1860 * also the last_rcvd value to disk. If we don't have a clean shutdown,
1861 * then the server last_rcvd value may be less than that of the clients.
1862 * This will alert us that we may need to do client recovery.
1864 * Also assumes for mds_last_transno that we are not modifying it (no locking).
1866 int mds_update_server_data(struct obd_device *obd, int force_sync)
1868 struct mds_obd *mds = &obd->u.mds;
1869 struct lr_server_data *lsd = mds->mds_server_data;
1870 struct file *filp = mds->mds_rcvd_filp;
1871 struct lvfs_run_ctxt saved;
1876 CDEBUG(D_SUPER, "MDS mount_count is "LPU64", last_transno is "LPU64"\n",
1877 mds->mds_mount_count, mds->mds_last_transno);
1879 spin_lock(&mds->mds_transno_lock);
1880 lsd->lsd_last_transno = cpu_to_le64(mds->mds_last_transno);
1881 spin_unlock(&mds->mds_transno_lock);
1883 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1884 rc = fsfilt_write_record(obd, filp, lsd, sizeof(*lsd), &off,force_sync);
1885 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1887 CERROR("error writing MDS server data: rc = %d\n", rc);
1892 static void fsoptions_to_mds_flags(struct mds_obd *mds, char *options)
1902 while (*p && *p != ',')
1906 if (len == sizeof("user_xattr") - 1 &&
1907 memcmp(options, "user_xattr", len) == 0) {
1908 mds->mds_fl_user_xattr = 1;
1909 LCONSOLE_INFO("Enabling user_xattr\n");
1910 } else if (len == sizeof("nouser_xattr") - 1 &&
1911 memcmp(options, "nouser_xattr", len) == 0) {
1912 mds->mds_fl_user_xattr = 0;
1913 LCONSOLE_INFO("Disabling user_xattr\n");
1914 } else if (len == sizeof("acl") - 1 &&
1915 memcmp(options, "acl", len) == 0) {
1916 #ifdef CONFIG_FS_POSIX_ACL
1917 mds->mds_fl_acl = 1;
1918 LCONSOLE_INFO("Enabling ACL\n");
1920 CWARN("ignoring unsupported acl mount option\n");
1922 } else if (len == sizeof("noacl") - 1 &&
1923 memcmp(options, "noacl", len) == 0) {
1924 #ifdef CONFIG_FS_POSIX_ACL
1925 mds->mds_fl_acl = 0;
1926 LCONSOLE_INFO("Disabling ACL\n");
1934 /* mount the file system (secretly). lustre_cfg parameters are:
1940 static int mds_setup(struct obd_device *obd, obd_count len, void *buf)
1942 struct lprocfs_static_vars lvars;
1943 struct lustre_cfg* lcfg = buf;
1944 struct mds_obd *mds = &obd->u.mds;
1945 struct lustre_sb_info *lsi;
1946 struct lustre_mount_info *lmi;
1947 struct vfsmount *mnt;
1948 struct obd_uuid uuid;
1955 /* setup 1:/dev/loop/0 2:ext3 3:mdsA 4:errors=remount-ro,iopen_nopriv */
1957 CLASSERT(offsetof(struct obd_device, u.obt) ==
1958 offsetof(struct obd_device, u.mds.mds_obt));
1960 if (lcfg->lcfg_bufcount < 3)
1963 if (LUSTRE_CFG_BUFLEN(lcfg, 1) == 0 || LUSTRE_CFG_BUFLEN(lcfg, 2) == 0)
1966 lmi = server_get_mount(obd->obd_name);
1968 CERROR("Not mounted in lustre_fill_super?\n");
1972 /* We mounted in lustre_fill_super.
1973 lcfg bufs 1, 2, 4 (device, fstype, mount opts) are ignored.*/
1974 lsi = s2lsi(lmi->lmi_sb);
1975 fsoptions_to_mds_flags(mds, lsi->lsi_ldd->ldd_mount_opts);
1976 fsoptions_to_mds_flags(mds, lsi->lsi_lmd->lmd_opts);
1978 obd->obd_fsops = fsfilt_get_ops(MT_STR(lsi->lsi_ldd));
1979 if (IS_ERR(obd->obd_fsops))
1980 GOTO(err_put, rc = PTR_ERR(obd->obd_fsops));
1982 CDEBUG(D_SUPER, "%s: mnt = %p\n", lustre_cfg_string(lcfg, 1), mnt);
1984 if (lvfs_check_rdonly(lvfs_sbdev(mnt->mnt_sb))) {
1985 CERROR("%s: Underlying device is marked as read-only. "
1986 "Setup failed\n", obd->obd_name);
1987 GOTO(err_ops, rc = -EROFS);
1990 sema_init(&mds->mds_epoch_sem, 1);
1991 spin_lock_init(&mds->mds_transno_lock);
1992 mds->mds_max_mdsize = sizeof(struct lov_mds_md_v3);
1993 mds->mds_max_cookiesize = sizeof(struct llog_cookie);
1994 mds->mds_atime_diff = MAX_ATIME_DIFF;
1995 mds->mds_evict_ost_nids = 1;
1996 /* sync permission changes */
1997 mds->mds_sync_permission = 0;
1999 sprintf(ns_name, "mds-%s", obd->obd_uuid.uuid);
2000 obd->obd_namespace = ldlm_namespace_new(obd, ns_name, LDLM_NAMESPACE_SERVER,
2001 LDLM_NAMESPACE_GREEDY);
2002 if (obd->obd_namespace == NULL) {
2004 GOTO(err_ops, rc = -ENOMEM);
2006 ldlm_register_intent(obd->obd_namespace, mds_intent_policy);
2008 lprocfs_mds_init_vars(&lvars);
2009 if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0 &&
2010 lprocfs_alloc_obd_stats(obd, LPROC_MDS_LAST) == 0) {
2011 /* Init private stats here */
2012 mds_stats_counter_init(obd->obd_stats);
2013 #ifdef HAVE_DELAYED_RECOVERY
2014 lprocfs_obd_attach_stale_exports(obd);
2016 obd->obd_proc_exports_entry = proc_mkdir("exports",
2017 obd->obd_proc_entry);
2020 rc = mds_fs_setup(obd, mnt);
2022 CERROR("%s: MDS filesystem method init failed: rc = %d\n",
2027 if (obd->obd_proc_exports_entry)
2028 lprocfs_add_simple(obd->obd_proc_exports_entry,
2029 "clear", lprocfs_nid_stats_clear_read,
2030 lprocfs_nid_stats_clear_write, obd, NULL);
2032 if (lcfg->lcfg_bufcount >= 4 && LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
2035 ll_generate_random_uuid(uuid);
2036 class_uuid_unparse(uuid, &mds->mds_lov_uuid);
2038 OBD_ALLOC(mds->mds_profile, LUSTRE_CFG_BUFLEN(lcfg, 3));
2039 if (mds->mds_profile == NULL)
2040 GOTO(err_fs, rc = -ENOMEM);
2042 strncpy(mds->mds_profile, lustre_cfg_string(lcfg, 3),
2043 LUSTRE_CFG_BUFLEN(lcfg, 3));
2046 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
2047 "mds_ldlm_client", &obd->obd_ldlm_client);
2048 obd->obd_replayable = 1;
2050 rc = lquota_setup(mds_quota_interface_ref, obd);
2054 mds->mds_group_hash = upcall_cache_init(obd->obd_name);
2055 if (IS_ERR(mds->mds_group_hash)) {
2056 rc = PTR_ERR(mds->mds_group_hash);
2057 mds->mds_group_hash = NULL;
2058 GOTO(err_qctxt, rc);
2061 /* Don't wait for mds_postrecov trying to clear orphans */
2062 obd->obd_async_recov = 1;
2063 rc = mds_postsetup(obd);
2064 /* Bug 11557 - allow async abort_recov start
2065 FIXME can remove most of this obd_async_recov plumbing
2066 obd->obd_async_recov = 0;
2069 GOTO(err_qctxt, rc);
2071 uuid_ptr = fsfilt_uuid(obd, obd->u.obt.obt_sb);
2072 if (uuid_ptr != NULL) {
2073 class_uuid_unparse(uuid_ptr, &uuid);
2079 label = fsfilt_get_label(obd, obd->u.obt.obt_sb);
2080 if (obd->obd_recovering) {
2081 LCONSOLE_WARN("MDT %s now serving %s (%s%s%s), but will be in "
2082 "recovery for at least %d:%.02d, or until %d "
2083 "client%s reconnect%s. \n",
2084 obd->obd_name, lustre_cfg_string(lcfg, 1),
2085 label ?: "", label ? "/" : "", str,
2086 obd->obd_recovery_timeout / 60,
2087 obd->obd_recovery_timeout % 60,
2088 obd->obd_recoverable_clients,
2089 (obd->obd_recoverable_clients == 1) ? "":"s",
2090 (obd->obd_recoverable_clients == 1) ? "s":"");
2092 LCONSOLE_INFO("MDT %s now serving %s (%s%s%s) with recovery "
2093 "%s\n", obd->obd_name, lustre_cfg_string(lcfg, 1),
2094 label ?: "", label ? "/" : "", str,
2095 obd->obd_replayable ? "enabled" : "disabled");
2098 /* Reduce the initial timeout on an MDS because it doesn't need such
2099 * a long timeout as an OST does. Adaptive timeouts will adjust this
2100 * value appropriately. */
2101 if (ldlm_timeout == LDLM_TIMEOUT_DEFAULT)
2102 ldlm_timeout = MDS_LDLM_TIMEOUT_DEFAULT;
2107 lquota_cleanup(mds_quota_interface_ref, obd);
2109 /* No extra cleanup needed for llog_init_commit_thread() */
2110 mds_fs_cleanup(obd);
2111 upcall_cache_cleanup(mds->mds_group_hash);
2112 mds->mds_group_hash = NULL;
2113 remove_proc_entry("clear", obd->obd_proc_exports_entry);
2115 lprocfs_free_per_client_stats(obd);
2116 lprocfs_free_obd_stats(obd);
2117 lprocfs_obd_cleanup(obd);
2118 ldlm_namespace_free(obd->obd_namespace, NULL, 0);
2119 obd->obd_namespace = NULL;
2121 fsfilt_put_ops(obd->obd_fsops);
2123 server_put_mount(obd->obd_name, mnt);
2124 obd->u.obt.obt_sb = NULL;
2128 static int mds_lov_clean(struct obd_device *obd)
2130 struct mds_obd *mds = &obd->u.mds;
2131 struct obd_device *osc = mds->mds_osc_obd;
2134 if (mds->mds_profile) {
2135 class_del_profile(mds->mds_profile);
2136 OBD_FREE(mds->mds_profile, strlen(mds->mds_profile) + 1);
2137 mds->mds_profile = NULL;
2140 /* There better be a lov */
2144 RETURN(PTR_ERR(osc));
2146 obd_register_observer(osc, NULL);
2148 /* Give lov our same shutdown flags */
2149 osc->obd_force = obd->obd_force;
2150 osc->obd_fail = obd->obd_fail;
2152 /* Cleanup the lov */
2153 obd_disconnect(mds->mds_osc_exp);
2154 class_manual_cleanup(osc);
2155 mds->mds_osc_exp = NULL;
2160 static int mds_postsetup(struct obd_device *obd)
2162 struct mds_obd *mds = &obd->u.mds;
2163 struct llog_ctxt *ctxt;
2167 rc = llog_setup(obd, LLOG_CONFIG_ORIG_CTXT, obd, 0, NULL,
2172 rc = llog_setup(obd, LLOG_LOVEA_ORIG_CTXT, obd, 0, NULL,
2177 if (mds->mds_profile) {
2178 struct lustre_profile *lprof;
2179 /* The profile defines which osc and mdc to connect to, for a
2180 client. We reuse that here to figure out the name of the
2181 lov to use (and ignore lprof->lp_mdc).
2182 The profile was set in the config log with
2183 LCFG_MOUNTOPT profilenm oscnm mdcnm */
2184 lprof = class_get_profile(mds->mds_profile);
2185 if (lprof == NULL) {
2186 CERROR("No profile found: %s\n", mds->mds_profile);
2187 GOTO(err_cleanup, rc = -ENOENT);
2189 rc = mds_lov_connect(obd, lprof->lp_osc);
2191 GOTO(err_cleanup, rc);
2198 ctxt = llog_get_context(obd, LLOG_LOVEA_ORIG_CTXT);
2202 ctxt = llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT);
2208 int mds_postrecov(struct obd_device *obd)
2216 LASSERT(!obd->obd_recovering);
2218 /* VBR: update boot epoch after recovery */
2219 mds_update_last_epoch(obd);
2221 /* clean PENDING dir */
2222 rc = mds_cleanup_pending(obd);
2225 /* FIXME Does target_finish_recovery really need this to block? */
2226 /* Notify the LOV, which will in turn call mds_notify for each tgt */
2227 /* This means that we have to hack obd_notify to think we're obd_set_up
2228 during mds_lov_connect. */
2229 obd_notify(obd->u.mds.mds_osc_obd, NULL,
2230 obd->obd_async_recov ? OBD_NOTIFY_SYNC_NONBLOCK :
2231 OBD_NOTIFY_SYNC, NULL);
2233 /* quota recovery */
2234 if (likely(obd->obd_stopping == 0))
2235 lquota_recovery(mds_quota_interface_ref, obd);
2241 /* We need to be able to stop an mds_lov_synchronize */
2242 static int mds_lov_early_clean(struct obd_device *obd)
2244 struct mds_obd *mds = &obd->u.mds;
2245 struct obd_device *osc = mds->mds_osc_obd;
2247 if (!osc || (!obd->obd_force && !obd->obd_fail))
2250 CDEBUG(D_HA, "abort inflight\n");
2251 return (obd_precleanup(osc, OBD_CLEANUP_EARLY));
2254 static int mds_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
2260 case OBD_CLEANUP_EARLY:
2262 case OBD_CLEANUP_EXPORTS:
2263 target_cleanup_recovery(obd);
2264 mds_lov_early_clean(obd);
2266 case OBD_CLEANUP_SELF_EXP:
2267 mds_lov_disconnect(obd);
2269 llog_cleanup(llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT));
2270 llog_cleanup(llog_get_context(obd, LLOG_LOVEA_ORIG_CTXT));
2271 rc = obd_llog_finish(obd, 0);
2273 case OBD_CLEANUP_OBD:
2279 static int mds_cleanup(struct obd_device *obd)
2281 struct mds_obd *mds = &obd->u.mds;
2282 lvfs_sbdev_type save_dev;
2285 if (obd->u.obt.obt_sb == NULL)
2287 save_dev = lvfs_sbdev(obd->u.obt.obt_sb);
2289 if (mds->mds_osc_exp)
2290 /* lov export was disconnected by mds_lov_clean;
2291 we just need to drop our ref */
2292 class_export_put(mds->mds_osc_exp);
2294 remove_proc_entry("clear", obd->obd_proc_exports_entry);
2295 lprocfs_free_per_client_stats(obd);
2296 lprocfs_free_obd_stats(obd);
2297 lprocfs_obd_cleanup(obd);
2299 lquota_cleanup(mds_quota_interface_ref, obd);
2301 mds_update_server_data(obd, 1);
2302 mds_fs_cleanup(obd);
2304 upcall_cache_cleanup(mds->mds_group_hash);
2305 mds->mds_group_hash = NULL;
2307 server_put_mount(obd->obd_name, mds->mds_vfsmnt);
2308 obd->u.obt.obt_sb = NULL;
2310 ldlm_namespace_free(obd->obd_namespace, NULL, obd->obd_force);
2311 obd->obd_namespace = NULL;
2313 spin_lock_bh(&obd->obd_processing_task_lock);
2314 if (obd->obd_recovering) {
2315 target_cancel_recovery_timer(obd);
2316 obd->obd_recovering = 0;
2318 spin_unlock_bh(&obd->obd_processing_task_lock);
2320 fsfilt_put_ops(obd->obd_fsops);
2322 LCONSOLE_INFO("MDT %s has stopped.\n", obd->obd_name);
2327 static void fixup_handle_for_resent_req(struct ptlrpc_request *req, int offset,
2328 struct ldlm_lock *new_lock,
2329 struct ldlm_lock **old_lock,
2330 struct lustre_handle *lockh)
2332 struct obd_export *exp = req->rq_export;
2333 struct ldlm_request *dlmreq =
2334 lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*dlmreq));
2335 struct lustre_handle remote_hdl = dlmreq->lock_handle[0];
2336 struct ldlm_lock *lock;
2338 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
2341 lock = lustre_hash_lookup(exp->exp_lock_hash, &remote_hdl);
2343 if (lock != new_lock) {
2344 lockh->cookie = lock->l_handle.h_cookie;
2345 LDLM_DEBUG(lock, "restoring lock cookie");
2346 DEBUG_REQ(D_DLMTRACE, req, "restoring lock cookie "
2347 LPX64, lockh->cookie);
2349 *old_lock = LDLM_LOCK_GET(lock);
2351 lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
2354 lh_put(exp->exp_lock_hash, &lock->l_exp_hash);
2357 /* If the xid matches, then we know this is a resent request,
2358 * and allow it. (It's probably an OPEN, for which we don't
2361 le64_to_cpu(exp->exp_mds_data.med_lcd->lcd_last_xid))
2365 le64_to_cpu(exp->exp_mds_data.med_lcd->lcd_last_close_xid))
2368 /* This remote handle isn't enqueued, so we never received or
2369 * processed this request. Clear MSG_RESENT, because it can
2370 * be handled like any normal request now. */
2372 lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
2374 DEBUG_REQ(D_DLMTRACE, req, "no existing lock with rhandle "LPX64,
2378 #define IS_CLIENT_DISCONNECT_ERROR(error) \
2379 (error == -ENOTCONN || error == -ENODEV)
2381 static int mds_intent_policy(struct ldlm_namespace *ns,
2382 struct ldlm_lock **lockp, void *req_cookie,
2383 ldlm_mode_t mode, int flags, void *data)
2385 struct ptlrpc_request *req = req_cookie;
2386 struct ldlm_lock *lock = *lockp;
2387 struct ldlm_intent *it;
2388 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
2389 struct ldlm_reply *rep;
2390 struct lustre_handle lockh = { 0 };
2391 struct ldlm_lock *new_lock = NULL;
2392 int getattr_part = MDS_INODELOCK_UPDATE;
2393 int repsize[5] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
2394 [DLM_LOCKREPLY_OFF] = sizeof(struct ldlm_reply),
2395 [DLM_REPLY_REC_OFF] = sizeof(struct mds_body),
2396 [DLM_REPLY_REC_OFF+1] = mds->mds_max_mdsize };
2397 int repbufcnt = 4, rc;
2400 LASSERT(req != NULL);
2402 if (lustre_msg_bufcount(req->rq_reqmsg) <= DLM_INTENT_IT_OFF) {
2403 /* No intent was provided */
2404 rc = lustre_pack_reply(req, 2, repsize, NULL);
2410 it = lustre_swab_reqbuf(req, DLM_INTENT_IT_OFF, sizeof(*it),
2411 lustre_swab_ldlm_intent);
2413 CERROR("Intent missing\n");
2414 RETURN(req->rq_status = -EFAULT);
2417 LDLM_DEBUG(lock, "intent policy, opc: %s", ldlm_it2str(it->opc));
2419 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
2420 (it->opc & (IT_OPEN | IT_GETATTR | IT_LOOKUP)))
2421 /* we should never allow OBD_CONNECT_ACL if not configured */
2422 repsize[repbufcnt++] = LUSTRE_POSIX_ACL_MAX_SIZE;
2423 else if (it->opc & IT_UNLINK)
2424 repsize[repbufcnt++] = mds->mds_max_cookiesize;
2426 /* if we do recovery we isn't send reply mds state is restored */
2427 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
2428 repsize[DLM_REPLY_REC_OFF+1] = 0;
2429 if (it->opc & IT_UNLINK)
2430 repsize[DLM_REPLY_REC_OFF+2] = 0;
2433 rc = lustre_pack_reply(req, repbufcnt, repsize, NULL);
2435 RETURN(req->rq_status = rc);
2437 rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF, sizeof(*rep));
2438 ldlm_reply_set_disposition(rep, DISP_IT_EXECD);
2440 /* execute policy */
2441 switch ((long)it->opc) {
2443 case IT_CREAT|IT_OPEN:
2444 mds_counter_incr(req->rq_export, LPROC_MDS_OPEN);
2445 fixup_handle_for_resent_req(req, DLM_LOCKREQ_OFF, lock, NULL,
2447 /* XXX swab here to assert that an mds_open reint
2448 * packet is following */
2449 rep->lock_policy_res2 = mds_reint(req, DLM_INTENT_REC_OFF,
2451 mds_shrink_intent_reply(req, REINT_OPEN, DLM_REPLY_REC_OFF);
2453 /* We abort the lock if the lookup was negative and
2454 * we did not make it to the OPEN portion */
2455 if (!ldlm_reply_disposition(rep, DISP_LOOKUP_EXECD))
2456 RETURN(ELDLM_LOCK_ABORTED);
2457 if (ldlm_reply_disposition(rep, DISP_LOOKUP_NEG) &&
2458 !ldlm_reply_disposition(rep, DISP_OPEN_OPEN))
2461 /* If there was an error of some sort or if we are not
2462 * returning any locks */
2463 if (rep->lock_policy_res2 ||
2464 !ldlm_reply_disposition(rep, DISP_OPEN_LOCK)) {
2465 /* If it is the disconnect error (ENODEV & ENOCONN)
2466 * ptlrpc layer should know this imediately, it should
2467 * be replied by rq_stats, otherwise, return it by
2470 /* if VBR failure then return error in rq_stats too */
2471 if (IS_CLIENT_DISCONNECT_ERROR(rep->lock_policy_res2) ||
2472 rep->lock_policy_res2 == -EOVERFLOW)
2473 RETURN(rep->lock_policy_res2);
2475 RETURN(ELDLM_LOCK_ABORTED);
2479 getattr_part = MDS_INODELOCK_LOOKUP;
2481 getattr_part |= MDS_INODELOCK_LOOKUP;
2482 OBD_COUNTER_INCREMENT(req->rq_export->exp_obd, getattr);
2484 fixup_handle_for_resent_req(req, DLM_LOCKREQ_OFF, lock,
2487 /* INODEBITS_INTEROP: if this lock was converted from a
2488 * plain lock (client does not support inodebits), then
2489 * child lock must be taken with both lookup and update
2490 * bits set for all operations.
2492 if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS))
2493 getattr_part = MDS_INODELOCK_LOOKUP |
2494 MDS_INODELOCK_UPDATE;
2496 rep->lock_policy_res2 = mds_getattr_lock(req,DLM_INTENT_REC_OFF,
2497 getattr_part, &lockh);
2498 mds_shrink_body_reply(req,DLM_INTENT_REC_OFF, DLM_REPLY_REC_OFF);
2499 /* FIXME: LDLM can set req->rq_status. MDS sets
2500 policy_res{1,2} with disposition and status.
2501 - replay: returns 0 & req->status is old status
2502 - otherwise: returns req->status */
2503 if (ldlm_reply_disposition(rep, DISP_LOOKUP_NEG))
2504 rep->lock_policy_res2 = 0;
2505 if (!ldlm_reply_disposition(rep, DISP_LOOKUP_POS) ||
2506 rep->lock_policy_res2)
2507 RETURN(ELDLM_LOCK_ABORTED);
2508 if (req->rq_status != 0) {
2510 rep->lock_policy_res2 = req->rq_status;
2511 RETURN(ELDLM_LOCK_ABORTED);
2515 CERROR("Unhandled intent "LPD64"\n", it->opc);
2519 /* By this point, whatever function we called above must have either
2520 * filled in 'lockh', been an intent replay, or returned an error. We
2521 * want to allow replayed RPCs to not get a lock, since we would just
2522 * drop it below anyways because lock replay is done separately by the
2523 * client afterwards. For regular RPCs we want to give the new lock to
2524 * the client instead of whatever lock it was about to get. */
2525 if (new_lock == NULL)
2526 new_lock = ldlm_handle2lock(&lockh);
2527 if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY))
2530 LASSERTF(new_lock != NULL, "op "LPX64" lockh "LPX64"\n",
2531 it->opc, lockh.cookie);
2533 /* If we've already given this lock to a client once, then we should
2534 * have no readers or writers. Otherwise, we should have one reader
2535 * _or_ writer ref (which will be zeroed below) before returning the
2536 * lock to a client. */
2537 if (new_lock->l_export == req->rq_export) {
2538 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
2540 LASSERT(new_lock->l_export == NULL);
2541 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
2546 if (new_lock->l_export == req->rq_export) {
2547 /* Already gave this to the client, which means that we
2548 * reconstructed a reply. */
2549 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
2551 RETURN(ELDLM_LOCK_REPLACED);
2554 /* Fixup the lock to be given to the client */
2555 lock_res_and_lock(new_lock);
2556 new_lock->l_readers = 0;
2557 new_lock->l_writers = 0;
2559 new_lock->l_export = class_export_get(req->rq_export);
2560 new_lock->l_blocking_ast = lock->l_blocking_ast;
2561 new_lock->l_completion_ast = lock->l_completion_ast;
2562 new_lock->l_flags &= ~LDLM_FL_LOCAL;
2564 memcpy(&new_lock->l_remote_handle, &lock->l_remote_handle,
2565 sizeof(lock->l_remote_handle));
2567 unlock_res_and_lock(new_lock);
2569 lustre_hash_add(new_lock->l_export->exp_lock_hash,
2570 &new_lock->l_remote_handle,
2571 &new_lock->l_exp_hash);
2572 LDLM_LOCK_PUT(new_lock);
2574 RETURN(ELDLM_LOCK_REPLACED);
2577 static int mdt_setup(struct obd_device *obd, obd_count len, void *buf)
2579 struct mds_obd *mds = &obd->u.mds;
2580 struct lprocfs_static_vars lvars;
2581 int mds_min_threads;
2582 int mds_max_threads;
2586 lprocfs_mdt_init_vars(&lvars);
2587 lprocfs_obd_setup(obd, lvars.obd_vars);
2589 sema_init(&mds->mds_health_sem, 1);
2591 if (mds_num_threads) {
2592 /* If mds_num_threads is set, it is the min and the max. */
2593 if (mds_num_threads > MDS_THREADS_MAX)
2594 mds_num_threads = MDS_THREADS_MAX;
2595 if (mds_num_threads < MDS_THREADS_MIN)
2596 mds_num_threads = MDS_THREADS_MIN;
2597 mds_max_threads = mds_min_threads = mds_num_threads;
2599 /* Base min threads on memory and cpus */
2600 mds_min_threads = num_possible_cpus() * CFS_NUM_CACHEPAGES >>
2601 (27 - CFS_PAGE_SHIFT);
2602 if (mds_min_threads < MDS_THREADS_MIN)
2603 mds_min_threads = MDS_THREADS_MIN;
2604 /* Largest auto threads start value */
2605 if (mds_min_threads > 32)
2606 mds_min_threads = 32;
2607 mds_max_threads = min(MDS_THREADS_MAX, mds_min_threads * 4);
2611 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2612 MDS_MAXREPSIZE, MDS_REQUEST_PORTAL,
2613 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_FACTOR,
2614 mds_handle, LUSTRE_MDS_NAME,
2615 obd->obd_proc_entry, target_print_req,
2616 mds_min_threads, mds_max_threads, "ll_mdt",
2619 if (!mds->mds_service) {
2620 CERROR("failed to start service\n");
2621 GOTO(err_lprocfs, rc = -ENOMEM);
2624 rc = ptlrpc_start_threads(obd, mds->mds_service);
2626 GOTO(err_thread, rc);
2628 mds->mds_setattr_service =
2629 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2630 MDS_MAXREPSIZE, MDS_SETATTR_PORTAL,
2631 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_FACTOR,
2632 mds_handle, "mds_setattr",
2633 obd->obd_proc_entry, target_print_req,
2634 mds_min_threads, mds_max_threads,
2635 "ll_mdt_attr", NULL);
2636 if (!mds->mds_setattr_service) {
2637 CERROR("failed to start getattr service\n");
2638 GOTO(err_thread, rc = -ENOMEM);
2641 rc = ptlrpc_start_threads(obd, mds->mds_setattr_service);
2643 GOTO(err_thread2, rc);
2645 mds->mds_readpage_service =
2646 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2647 MDS_MAXREPSIZE, MDS_READPAGE_PORTAL,
2648 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_FACTOR,
2649 mds_handle, "mds_readpage",
2650 obd->obd_proc_entry, target_print_req,
2651 MDS_THREADS_MIN_READPAGE, mds_max_threads,
2652 "ll_mdt_rdpg", NULL);
2653 if (!mds->mds_readpage_service) {
2654 CERROR("failed to start readpage service\n");
2655 GOTO(err_thread2, rc = -ENOMEM);
2658 rc = ptlrpc_start_threads(obd, mds->mds_readpage_service);
2661 GOTO(err_thread3, rc);
2663 ping_evictor_start();
2668 ptlrpc_unregister_service(mds->mds_readpage_service);
2669 mds->mds_readpage_service = NULL;
2671 ptlrpc_unregister_service(mds->mds_setattr_service);
2672 mds->mds_setattr_service = NULL;
2674 ptlrpc_unregister_service(mds->mds_service);
2675 mds->mds_service = NULL;
2677 lprocfs_obd_cleanup(obd);
2681 static int mdt_cleanup(struct obd_device *obd)
2683 struct mds_obd *mds = &obd->u.mds;
2686 ping_evictor_stop();
2688 down(&mds->mds_health_sem);
2689 ptlrpc_unregister_service(mds->mds_readpage_service);
2690 ptlrpc_unregister_service(mds->mds_setattr_service);
2691 ptlrpc_unregister_service(mds->mds_service);
2692 mds->mds_readpage_service = NULL;
2693 mds->mds_setattr_service = NULL;
2694 mds->mds_service = NULL;
2695 up(&mds->mds_health_sem);
2697 lprocfs_obd_cleanup(obd);
2702 static int mdt_health_check(struct obd_device *obd)
2704 struct mds_obd *mds = &obd->u.mds;
2707 down(&mds->mds_health_sem);
2708 rc |= ptlrpc_service_health_check(mds->mds_readpage_service);
2709 rc |= ptlrpc_service_health_check(mds->mds_setattr_service);
2710 rc |= ptlrpc_service_health_check(mds->mds_service);
2711 up(&mds->mds_health_sem);
2714 * health_check to return 0 on healthy
2715 * and 1 on unhealthy.
2723 static struct dentry *mds_lvfs_fid2dentry(__u64 id, __u32 gen, __u64 gr,
2726 struct obd_device *obd = data;
2729 fid.generation = gen;
2730 return mds_fid2dentry(&obd->u.mds, &fid, NULL);
2733 static int mds_health_check(struct obd_device *obd)
2735 struct obd_device_target *odt = &obd->u.obt;
2736 #ifdef USE_HEALTH_CHECK_WRITE
2737 struct mds_obd *mds = &obd->u.mds;
2741 if (odt->obt_sb->s_flags & MS_RDONLY)
2744 #ifdef USE_HEALTH_CHECK_WRITE
2745 LASSERT(mds->mds_obt.obt_health_check_filp != NULL);
2746 rc |= !!lvfs_check_io_health(obd, mds->mds_obt.obt_health_check_filp);
2752 static int mds_process_config(struct obd_device *obd, obd_count len, void *buf)
2754 struct lustre_cfg *lcfg = buf;
2757 switch(lcfg->lcfg_command) {
2759 struct lprocfs_static_vars lvars;
2760 lprocfs_mds_init_vars(&lvars);
2762 rc = class_process_proc_param(PARAM_MDT, lvars.obd_vars, lcfg, obd);
2772 struct lvfs_callback_ops mds_lvfs_ops = {
2773 l_fid2dentry: mds_lvfs_fid2dentry,
2776 /* use obd ops to offer management infrastructure */
2777 static struct obd_ops mds_obd_ops = {
2778 .o_owner = THIS_MODULE,
2779 .o_connect = mds_connect,
2780 .o_reconnect = mds_reconnect,
2781 .o_init_export = mds_init_export,
2782 .o_destroy_export = mds_destroy_export,
2783 .o_disconnect = mds_disconnect,
2784 .o_setup = mds_setup,
2785 .o_precleanup = mds_precleanup,
2786 .o_cleanup = mds_cleanup,
2787 .o_postrecov = mds_postrecov,
2788 .o_statfs = mds_obd_statfs,
2789 .o_iocontrol = mds_iocontrol,
2790 .o_create = mds_obd_create,
2791 .o_destroy = mds_obd_destroy,
2792 .o_llog_init = mds_llog_init,
2793 .o_llog_finish = mds_llog_finish,
2794 .o_notify = mds_notify,
2795 .o_health_check = mds_health_check,
2796 .o_process_config = mds_process_config,
2799 static struct obd_ops mdt_obd_ops = {
2800 .o_owner = THIS_MODULE,
2801 .o_setup = mdt_setup,
2802 .o_cleanup = mdt_cleanup,
2803 .o_health_check = mdt_health_check,
2806 quota_interface_t *mds_quota_interface_ref;
2807 extern quota_interface_t mds_quota_interface;
2809 static int __init mds_init(void)
2812 struct lprocfs_static_vars lvars;
2814 request_module("lquota");
2815 mds_quota_interface_ref = PORTAL_SYMBOL_GET(mds_quota_interface);
2816 rc = lquota_init(mds_quota_interface_ref);
2818 if (mds_quota_interface_ref)
2819 PORTAL_SYMBOL_PUT(mds_quota_interface);
2822 init_obd_quota_ops(mds_quota_interface_ref, &mds_obd_ops);
2824 lprocfs_mds_init_vars(&lvars);
2825 class_register_type(&mds_obd_ops, lvars.module_vars, LUSTRE_MDS_NAME);
2826 lprocfs_mdt_init_vars(&lvars);
2827 class_register_type(&mdt_obd_ops, lvars.module_vars, LUSTRE_MDT_NAME);
2832 static void /*__exit*/ mds_exit(void)
2834 lquota_exit(mds_quota_interface_ref);
2835 if (mds_quota_interface_ref)
2836 PORTAL_SYMBOL_PUT(mds_quota_interface);
2838 class_unregister_type(LUSTRE_MDS_NAME);
2839 class_unregister_type(LUSTRE_MDT_NAME);
2842 MODULE_AUTHOR("Sun Microsystems, Inc. <http://www.lustre.org/>");
2843 MODULE_DESCRIPTION("Lustre Metadata Server (MDS)");
2844 MODULE_LICENSE("GPL");
2846 module_init(mds_init);
2847 module_exit(mds_exit);