1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Lustre Metadata Server (mds) request handler
7 * Copyright (c) 2001-2005 Cluster File Systems, Inc.
8 * Author: Peter Braam <braam@clusterfs.com>
9 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * Author: Phil Schwan <phil@clusterfs.com>
11 * Author: Mike Shaver <shaver@clusterfs.com>
13 * This file is part of the Lustre file system, http://www.lustre.org
14 * Lustre is a trademark of Cluster File Systems, Inc.
16 * You may have signed or agreed to another license before downloading
17 * this software. If so, you are bound by the terms and conditions
18 * of that agreement, and the following does not apply to you. See the
19 * LICENSE file included with this distribution for more information.
21 * If you did not agree to a different license, then this copy of Lustre
22 * is open source software; you can redistribute it and/or modify it
23 * under the terms of version 2 of the GNU General Public License as
24 * published by the Free Software Foundation.
26 * In either case, Lustre is distributed in the hope that it will be
27 * useful, but WITHOUT ANY WARRANTY; without even the implied warranty
28 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
29 * license text for more details.
33 # define EXPORT_SYMTAB
35 #define DEBUG_SUBSYSTEM S_MDS
37 #include <lustre_mds.h>
38 #include <linux/module.h>
39 #include <linux/init.h>
40 #include <linux/random.h>
42 #include <linux/jbd.h>
43 #include <linux/ext3_fs.h>
44 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
45 # include <linux/smp_lock.h>
46 # include <linux/buffer_head.h>
47 # include <linux/workqueue.h>
48 # include <linux/mount.h>
50 # include <linux/locks.h>
53 #include <obd_class.h>
54 #include <lustre_dlm.h>
56 #include <lustre_fsfilt.h>
57 #include <lprocfs_status.h>
58 #include <lustre_commit_confd.h>
59 #include <lustre_quota.h>
60 #include <lustre_disk.h>
61 #include <lustre_param.h>
63 #include "mds_internal.h"
66 CFS_MODULE_PARM(mds_num_threads, "i", int, 0444,
67 "number of MDS service threads to start");
69 static int mds_intent_policy(struct ldlm_namespace *ns,
70 struct ldlm_lock **lockp, void *req_cookie,
71 ldlm_mode_t mode, int flags, void *data);
72 static int mds_postsetup(struct obd_device *obd);
73 static int mds_cleanup(struct obd_device *obd);
75 /* Assumes caller has already pushed into the kernel filesystem context */
76 static int mds_sendpage(struct ptlrpc_request *req, struct file *file,
77 loff_t offset, int count)
79 struct ptlrpc_bulk_desc *desc;
80 struct l_wait_info lwi;
82 int rc = 0, npages, i, tmpcount, tmpsize = 0;
85 LASSERT((offset & ~CFS_PAGE_MASK) == 0); /* I'm dubious about this */
87 npages = (count + CFS_PAGE_SIZE - 1) >> CFS_PAGE_SHIFT;
88 OBD_ALLOC(pages, sizeof(*pages) * npages);
90 GOTO(out, rc = -ENOMEM);
92 desc = ptlrpc_prep_bulk_exp(req, npages, BULK_PUT_SOURCE,
95 GOTO(out_free, rc = -ENOMEM);
97 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
98 tmpsize = tmpcount > CFS_PAGE_SIZE ? CFS_PAGE_SIZE : tmpcount;
100 pages[i] = alloc_pages(GFP_KERNEL, 0);
101 if (pages[i] == NULL)
102 GOTO(cleanup_buf, rc = -ENOMEM);
104 ptlrpc_prep_bulk_page(desc, pages[i], 0, tmpsize);
107 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
108 tmpsize = tmpcount > CFS_PAGE_SIZE ? CFS_PAGE_SIZE : tmpcount;
109 CDEBUG(D_EXT2, "reading %u@%llu from dir %lu (size %llu)\n",
110 tmpsize, offset, file->f_dentry->d_inode->i_ino,
111 file->f_dentry->d_inode->i_size);
113 rc = fsfilt_readpage(req->rq_export->exp_obd, file,
114 kmap(pages[i]), tmpsize, &offset);
118 GOTO(cleanup_buf, rc = -EIO);
121 LASSERT(desc->bd_nob == count);
123 rc = ptlrpc_start_bulk_transfer(desc);
125 GOTO(cleanup_buf, rc);
127 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) {
128 CERROR("obd_fail_loc=%x, fail operation rc=%d\n",
129 OBD_FAIL_MDS_SENDPAGE, rc);
130 GOTO(abort_bulk, rc);
133 lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
134 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc), &lwi);
135 LASSERT (rc == 0 || rc == -ETIMEDOUT);
138 if (desc->bd_success &&
139 desc->bd_nob_transferred == count)
140 GOTO(cleanup_buf, rc);
142 rc = -ETIMEDOUT; /* XXX should this be a different errno? */
145 DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s\n",
146 (rc == -ETIMEDOUT) ? "timeout" : "network error",
147 desc->bd_nob_transferred, count,
148 req->rq_export->exp_client_uuid.uuid,
149 req->rq_export->exp_connection->c_remote_uuid.uuid);
151 class_fail_export(req->rq_export);
155 ptlrpc_abort_bulk (desc);
157 for (i = 0; i < npages; i++)
159 __free_pages(pages[i], 0);
161 ptlrpc_free_bulk(desc);
163 OBD_FREE(pages, sizeof(*pages) * npages);
168 /* only valid locked dentries or errors should be returned */
169 struct dentry *mds_fid2locked_dentry(struct obd_device *obd, struct ll_fid *fid,
170 struct vfsmount **mnt, int lock_mode,
171 struct lustre_handle *lockh,
172 char *name, int namelen, __u64 lockpart)
174 struct mds_obd *mds = &obd->u.mds;
175 struct dentry *de = mds_fid2dentry(mds, fid, mnt), *retval = de;
176 struct ldlm_res_id res_id = { .name = {0} };
177 int flags = LDLM_FL_ATOMIC_CB, rc;
178 ldlm_policy_data_t policy = { .l_inodebits = { lockpart} };
184 res_id.name[0] = de->d_inode->i_ino;
185 res_id.name[1] = de->d_inode->i_generation;
186 rc = ldlm_cli_enqueue_local(obd->obd_namespace, res_id,
187 LDLM_IBITS, &policy, lock_mode, &flags,
188 ldlm_blocking_ast, ldlm_completion_ast,
189 NULL, NULL, 0, NULL, lockh);
190 if (rc != ELDLM_OK) {
192 retval = ERR_PTR(-EIO); /* XXX translate ldlm code */
198 /* Look up an entry by inode number. */
199 /* this function ONLY returns valid dget'd dentries with an initialized inode
201 struct dentry *mds_fid2dentry(struct mds_obd *mds, struct ll_fid *fid,
202 struct vfsmount **mnt)
205 unsigned long ino = fid->id;
206 __u32 generation = fid->generation;
208 struct dentry *result;
211 RETURN(ERR_PTR(-ESTALE));
213 snprintf(fid_name, sizeof(fid_name), "0x%lx", ino);
215 CDEBUG(D_DENTRY, "--> mds_fid2dentry: ino/gen %lu/%u, sb %p\n",
216 ino, generation, mds->mds_obt.obt_sb);
218 /* under ext3 this is neither supposed to return bad inodes
220 result = ll_lookup_one_len(fid_name, mds->mds_fid_de, strlen(fid_name));
224 inode = result->d_inode;
226 RETURN(ERR_PTR(-ENOENT));
228 if (inode->i_nlink == 0) {
229 if (inode->i_mode == 0 &&
230 LTIME_S(inode->i_ctime) == 0 ) {
231 struct obd_device *obd = container_of(mds, struct
233 LCONSOLE_WARN("Found inode with zero nlink, mode and "
234 "ctime -- this may indicate disk"
235 "corruption (device %s, inode %lu, link:"
236 " %lu, count: %d)\n", obd->obd_name, inode->i_ino,
237 (unsigned long)inode->i_nlink,
238 atomic_read(&inode->i_count));
241 RETURN(ERR_PTR(-ENOENT));
244 if (generation && inode->i_generation != generation) {
245 /* we didn't find the right inode.. */
246 CDEBUG(D_INODE, "found wrong generation: inode %lu, link: %lu, "
247 "count: %d, generation %u/%u\n", inode->i_ino,
248 (unsigned long)inode->i_nlink,
249 atomic_read(&inode->i_count), inode->i_generation,
252 RETURN(ERR_PTR(-ENOENT));
256 *mnt = mds->mds_vfsmnt;
263 static int mds_connect_internal(struct obd_export *exp,
264 struct obd_connect_data *data)
266 struct obd_device *obd = exp->exp_obd;
268 data->ocd_connect_flags &= MDS_CONNECT_SUPPORTED;
269 data->ocd_ibits_known &= MDS_INODELOCK_FULL;
271 /* If no known bits (which should not happen, probably,
272 as everybody should support LOOKUP and UPDATE bits at least)
273 revert to compat mode with plain locks. */
274 if (!data->ocd_ibits_known &&
275 data->ocd_connect_flags & OBD_CONNECT_IBITS)
276 data->ocd_connect_flags &= ~OBD_CONNECT_IBITS;
278 if (!obd->u.mds.mds_fl_acl)
279 data->ocd_connect_flags &= ~OBD_CONNECT_ACL;
281 if (!obd->u.mds.mds_fl_user_xattr)
282 data->ocd_connect_flags &= ~OBD_CONNECT_XATTR;
284 exp->exp_connect_flags = data->ocd_connect_flags;
285 data->ocd_version = LUSTRE_VERSION_CODE;
286 exp->exp_mds_data.med_ibits_known = data->ocd_ibits_known;
289 if (obd->u.mds.mds_fl_acl &&
290 ((exp->exp_connect_flags & OBD_CONNECT_ACL) == 0)) {
291 CWARN("%s: MDS requires ACL support but client does not\n",
298 static int mds_reconnect(struct obd_export *exp, struct obd_device *obd,
299 struct obd_uuid *cluuid,
300 struct obd_connect_data *data)
305 if (exp == NULL || obd == NULL || cluuid == NULL)
308 rc = mds_connect_internal(exp, data);
313 /* Establish a connection to the MDS.
315 * This will set up an export structure for the client to hold state data
316 * about that client, like open files, the last operation number it did
317 * on the server, etc.
319 static int mds_connect(struct lustre_handle *conn, struct obd_device *obd,
320 struct obd_uuid *cluuid, struct obd_connect_data *data)
322 struct obd_export *exp;
323 struct mds_export_data *med;
324 struct mds_client_data *mcd = NULL;
325 int rc, abort_recovery;
328 if (!conn || !obd || !cluuid)
331 /* Check for aborted recovery. */
332 spin_lock_bh(&obd->obd_processing_task_lock);
333 abort_recovery = obd->obd_abort_recovery;
334 spin_unlock_bh(&obd->obd_processing_task_lock);
336 target_abort_recovery(obd);
338 /* XXX There is a small race between checking the list and adding a
339 * new connection for the same UUID, but the real threat (list
340 * corruption when multiple different clients connect) is solved.
342 * There is a second race between adding the export to the list,
343 * and filling in the client data below. Hence skipping the case
344 * of NULL mcd above. We should already be controlling multiple
345 * connects at the client, and we can't hold the spinlock over
346 * memory allocations without risk of deadlocking.
348 rc = class_connect(conn, obd, cluuid);
351 exp = class_conn2export(conn);
353 med = &exp->exp_mds_data;
355 rc = mds_connect_internal(exp, data);
359 OBD_ALLOC(mcd, sizeof(*mcd));
361 GOTO(out, rc = -ENOMEM);
363 memcpy(mcd->mcd_uuid, cluuid, sizeof(mcd->mcd_uuid));
366 rc = mds_client_add(obd, exp, -1);
372 OBD_FREE(mcd, sizeof(*mcd));
375 class_disconnect(exp);
377 class_export_put(exp);
383 int mds_init_export(struct obd_export *exp)
385 struct mds_export_data *med = &exp->exp_mds_data;
387 INIT_LIST_HEAD(&med->med_open_head);
388 spin_lock_init(&med->med_open_lock);
390 spin_lock(&exp->exp_lock);
391 exp->exp_connecting = 1;
392 spin_unlock(&exp->exp_lock);
397 static int mds_destroy_export(struct obd_export *export)
399 struct mds_export_data *med;
400 struct obd_device *obd = export->exp_obd;
401 struct mds_obd *mds = &obd->u.mds;
402 struct lvfs_run_ctxt saved;
403 struct lov_mds_md *lmm;
404 struct llog_cookie *logcookies;
408 med = &export->exp_mds_data;
409 target_destroy_export(export);
411 if (obd_uuid_equals(&export->exp_client_uuid, &obd->obd_uuid))
414 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
415 /* Close any open files (which may also cause orphan unlinking). */
417 OBD_ALLOC(lmm, mds->mds_max_mdsize);
419 CWARN("%s: allocation failure during cleanup; can not force "
420 "close file handles on this service.\n", obd->obd_name);
421 GOTO(out, rc = -ENOMEM);
424 OBD_ALLOC(logcookies, mds->mds_max_cookiesize);
425 if (logcookies == NULL) {
426 CWARN("%s: allocation failure during cleanup; can not force "
427 "close file handles on this service.\n", obd->obd_name);
428 OBD_FREE(lmm, mds->mds_max_mdsize);
429 GOTO(out, rc = -ENOMEM);
432 spin_lock(&med->med_open_lock);
433 while (!list_empty(&med->med_open_head)) {
434 struct list_head *tmp = med->med_open_head.next;
435 struct mds_file_data *mfd =
436 list_entry(tmp, struct mds_file_data, mfd_list);
437 int lmm_size = mds->mds_max_mdsize;
438 umode_t mode = mfd->mfd_dentry->d_inode->i_mode;
441 /* Remove mfd handle so it can't be found again.
442 * We are consuming the mfd_list reference here. */
443 mds_mfd_unlink(mfd, 0);
444 spin_unlock(&med->med_open_lock);
446 /* If you change this message, be sure to update
447 * replay_single:test_46 */
448 CDEBUG(D_INODE|D_IOCTL, "%s: force closing file handle for "
449 "%.*s (ino %lu)\n", obd->obd_name,
450 mfd->mfd_dentry->d_name.len,mfd->mfd_dentry->d_name.name,
451 mfd->mfd_dentry->d_inode->i_ino);
453 rc = mds_get_md(obd, mfd->mfd_dentry->d_inode, lmm,&lmm_size,1);
455 CWARN("mds_get_md failure, rc=%d\n", rc);
457 valid |= OBD_MD_FLEASIZE;
459 /* child orphan sem protects orphan_dec_test and
460 * is_orphan race, mds_mfd_close drops it */
461 MDS_DOWN_WRITE_ORPHAN_SEM(mfd->mfd_dentry->d_inode);
463 rc = mds_mfd_close(NULL, REQ_REC_OFF, obd, mfd,
464 !(export->exp_flags & OBD_OPT_FAILOVER),
465 lmm, lmm_size, logcookies,
466 mds->mds_max_cookiesize,
470 CDEBUG(D_INODE|D_IOCTL, "Error closing file: %d\n", rc);
472 if (valid & OBD_MD_FLCOOKIE) {
473 rc = mds_osc_destroy_orphan(obd, mode, lmm,
474 lmm_size, logcookies, 1);
476 CDEBUG(D_INODE, "%s: destroy of orphan failed,"
477 " rc = %d\n", obd->obd_name, rc);
480 valid &= ~OBD_MD_FLCOOKIE;
483 spin_lock(&med->med_open_lock);
486 OBD_FREE(logcookies, mds->mds_max_cookiesize);
487 OBD_FREE(lmm, mds->mds_max_mdsize);
489 spin_unlock(&med->med_open_lock);
491 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
492 mds_client_free(export);
498 static int mds_disconnect(struct obd_export *exp)
504 class_export_get(exp);
506 /* Disconnect early so that clients can't keep using export */
507 rc = class_disconnect(exp);
508 if (exp->exp_obd->obd_namespace != NULL)
509 ldlm_cancel_locks_for_export(exp);
511 /* complete all outstanding replies */
512 spin_lock(&exp->exp_lock);
513 while (!list_empty(&exp->exp_outstanding_replies)) {
514 struct ptlrpc_reply_state *rs =
515 list_entry(exp->exp_outstanding_replies.next,
516 struct ptlrpc_reply_state, rs_exp_list);
517 struct ptlrpc_service *svc = rs->rs_service;
519 spin_lock(&svc->srv_lock);
520 list_del_init(&rs->rs_exp_list);
521 ptlrpc_schedule_difficult_reply(rs);
522 spin_unlock(&svc->srv_lock);
524 spin_unlock(&exp->exp_lock);
526 class_export_put(exp);
530 static int mds_getstatus(struct ptlrpc_request *req)
532 struct mds_obd *mds = mds_req2mds(req);
533 struct mds_body *body;
534 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*body) };
537 rc = lustre_pack_reply(req, 2, size, NULL);
538 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK)) {
539 CERROR("mds: out of memory for message\n");
540 req->rq_status = -ENOMEM; /* superfluous? */
544 body = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*body));
545 memcpy(&body->fid1, &mds->mds_rootfid, sizeof(body->fid1));
547 /* the last_committed and last_xid fields are filled in for all
548 * replies already - no need to do so here also.
553 /* get the LOV EA from @inode and store it into @md. It can be at most
554 * @size bytes, and @size is updated with the actual EA size.
555 * The EA size is also returned on success, and -ve errno on failure.
556 * If there is no EA then 0 is returned. */
557 int mds_get_md(struct obd_device *obd, struct inode *inode, void *md,
564 LOCK_INODE_MUTEX(inode);
565 rc = fsfilt_get_md(obd, inode, md, *size, "lov");
568 CERROR("Error %d reading eadata for ino %lu\n",
572 rc = mds_convert_lov_ea(obd, inode, md, lmm_size);
584 UNLOCK_INODE_MUTEX(inode);
590 /* Call with lock=1 if you want mds_pack_md to take the i_mutex.
591 * Call with lock=0 if the caller has already taken the i_mutex. */
592 int mds_pack_md(struct obd_device *obd, struct lustre_msg *msg, int offset,
593 struct mds_body *body, struct inode *inode, int lock)
595 struct mds_obd *mds = &obd->u.mds;
601 lmm = lustre_msg_buf(msg, offset, 0);
603 /* Some problem with getting eadata when I sized the reply
605 CDEBUG(D_INFO, "no space reserved for inode %lu MD\n",
609 lmm_size = lustre_msg_buflen(msg, offset);
611 /* I don't really like this, but it is a sanity check on the client
612 * MD request. However, if the client doesn't know how much space
613 * to reserve for the MD, it shouldn't be bad to have too much space.
615 if (lmm_size > mds->mds_max_mdsize) {
616 CWARN("Reading MD for inode %lu of %d bytes > max %d\n",
617 inode->i_ino, lmm_size, mds->mds_max_mdsize);
621 rc = mds_get_md(obd, inode, lmm, &lmm_size, lock);
623 if (S_ISDIR(inode->i_mode))
624 body->valid |= OBD_MD_FLDIREA;
626 body->valid |= OBD_MD_FLEASIZE;
627 body->eadatasize = lmm_size;
634 #ifdef CONFIG_FS_POSIX_ACL
636 int mds_pack_posix_acl(struct inode *inode, struct lustre_msg *repmsg,
637 struct mds_body *repbody, int repoff)
639 struct dentry de = { .d_inode = inode };
643 LASSERT(repbody->aclsize == 0);
644 LASSERT(lustre_msg_bufcount(repmsg) > repoff);
646 buflen = lustre_msg_buflen(repmsg, repoff);
650 if (!inode->i_op || !inode->i_op->getxattr)
654 rc = inode->i_op->getxattr(&de, MDS_XATTR_NAME_ACL_ACCESS,
655 lustre_msg_buf(repmsg, repoff, buflen),
660 repbody->aclsize = rc;
661 else if (rc != -ENODATA) {
662 CERROR("buflen %d, get acl: %d\n", buflen, rc);
667 repbody->valid |= OBD_MD_FLACL;
671 #define mds_pack_posix_acl(inode, repmsg, repbody, repoff) 0
674 int mds_pack_acl(struct mds_export_data *med, struct inode *inode,
675 struct lustre_msg *repmsg, struct mds_body *repbody,
678 return mds_pack_posix_acl(inode, repmsg, repbody, repoff);
681 static int mds_getattr_internal(struct obd_device *obd, struct dentry *dentry,
682 struct ptlrpc_request *req,
683 struct mds_body *reqbody, int reply_off)
685 struct mds_body *body;
686 struct inode *inode = dentry->d_inode;
693 body = lustre_msg_buf(req->rq_repmsg, reply_off, sizeof(*body));
694 LASSERT(body != NULL); /* caller prepped reply */
696 mds_pack_inode2fid(&body->fid1, inode);
697 body->flags = reqbody->flags; /* copy MDS_BFLAG_EXT_FLAGS if present */
698 mds_pack_inode2body(body, inode);
701 if ((S_ISREG(inode->i_mode) && (reqbody->valid & OBD_MD_FLEASIZE)) ||
702 (S_ISDIR(inode->i_mode) && (reqbody->valid & OBD_MD_FLDIREA))) {
703 rc = mds_pack_md(obd, req->rq_repmsg, reply_off, body,
706 /* If we have LOV EA data, the OST holds size, atime, mtime */
707 if (!(body->valid & OBD_MD_FLEASIZE) &&
708 !(body->valid & OBD_MD_FLDIREA))
709 body->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
710 OBD_MD_FLATIME | OBD_MD_FLMTIME);
712 lustre_shrink_reply(req, reply_off, body->eadatasize, 0);
713 if (body->eadatasize)
715 } else if (S_ISLNK(inode->i_mode) &&
716 (reqbody->valid & OBD_MD_LINKNAME) != 0) {
717 char *symname = lustre_msg_buf(req->rq_repmsg, reply_off, 0);
720 LASSERT (symname != NULL); /* caller prepped reply */
721 len = lustre_msg_buflen(req->rq_repmsg, reply_off);
723 rc = inode->i_op->readlink(dentry, symname, len);
725 CERROR("readlink failed: %d\n", rc);
726 } else if (rc != len - 1) {
727 CERROR ("Unexpected readlink rc %d: expecting %d\n",
731 CDEBUG(D_INODE, "read symlink dest %s\n", symname);
732 body->valid |= OBD_MD_LINKNAME;
733 body->eadatasize = rc + 1;
734 symname[rc] = 0; /* NULL terminate */
738 } else if (reqbody->valid == OBD_MD_FLFLAGS &&
739 reqbody->flags & MDS_BFLAG_EXT_FLAGS) {
742 /* We only return the full set of flags on ioctl, otherwise we
743 * get enough flags from the inode in mds_pack_inode2body(). */
744 rc = fsfilt_iocontrol(obd, inode, NULL, EXT3_IOC_GETFLAGS,
747 body->flags = flags | MDS_BFLAG_EXT_FLAGS;
750 if (reqbody->valid & OBD_MD_FLMODEASIZE) {
751 struct mds_obd *mds = mds_req2mds(req);
752 body->max_cookiesize = mds->mds_max_cookiesize;
753 body->max_mdsize = mds->mds_max_mdsize;
754 body->valid |= OBD_MD_FLMODEASIZE;
760 #ifdef CONFIG_FS_POSIX_ACL
761 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
762 (reqbody->valid & OBD_MD_FLACL)) {
763 rc = mds_pack_acl(&req->rq_export->exp_mds_data,
764 inode, req->rq_repmsg,
767 lustre_shrink_reply(req, reply_off, body->aclsize, 0);
776 static int mds_getattr_pack_msg(struct ptlrpc_request *req, struct inode *inode,
779 struct mds_obd *mds = mds_req2mds(req);
780 struct mds_body *body;
781 int rc, bufcount = 2;
782 int size[4] = { sizeof(struct ptlrpc_body), sizeof(*body) };
785 LASSERT(offset == REQ_REC_OFF); /* non-intent */
787 body = lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*body));
788 LASSERT(body != NULL); /* checked by caller */
789 LASSERT_REQSWABBED(req, offset); /* swabbed by caller */
791 if ((S_ISREG(inode->i_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
792 (S_ISDIR(inode->i_mode) && (body->valid & OBD_MD_FLDIREA))) {
793 LOCK_INODE_MUTEX(inode);
794 rc = fsfilt_get_md(req->rq_export->exp_obd, inode, NULL, 0,
796 UNLOCK_INODE_MUTEX(inode);
797 CDEBUG(D_INODE, "got %d bytes MD data for inode %lu\n",
800 if (rc != -ENODATA) {
801 CERROR("error getting inode %lu MD: rc = %d\n",
806 } else if (rc > mds->mds_max_mdsize) {
808 CERROR("MD size %d larger than maximum possible %u\n",
809 rc, mds->mds_max_mdsize);
814 } else if (S_ISLNK(inode->i_mode) && (body->valid & OBD_MD_LINKNAME)) {
815 if (inode->i_size + 1 != body->eadatasize)
816 CERROR("symlink size: %Lu, reply space: %d\n",
817 inode->i_size + 1, body->eadatasize);
818 size[bufcount] = min_t(int, inode->i_size+1, body->eadatasize);
820 CDEBUG(D_INODE, "symlink size: %Lu, reply space: %d\n",
821 inode->i_size + 1, body->eadatasize);
824 #ifdef CONFIG_FS_POSIX_ACL
825 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
826 (body->valid & OBD_MD_FLACL)) {
827 struct dentry de = { .d_inode = inode };
830 if (inode->i_op && inode->i_op->getxattr) {
832 rc = inode->i_op->getxattr(&de, MDS_XATTR_NAME_ACL_ACCESS,
837 if (rc != -ENODATA) {
838 CERROR("got acl size: %d\n", rc);
848 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) {
849 CERROR("failed MDS_GETATTR_PACK test\n");
850 req->rq_status = -ENOMEM;
854 rc = lustre_pack_reply(req, bufcount, size, NULL);
856 CERROR("lustre_pack_reply failed: rc %d\n", rc);
864 static int mds_getattr_lock(struct ptlrpc_request *req, int offset,
865 int child_part, struct lustre_handle *child_lockh)
867 struct obd_device *obd = req->rq_export->exp_obd;
868 struct mds_obd *mds = &obd->u.mds;
869 struct ldlm_reply *rep = NULL;
870 struct lvfs_run_ctxt saved;
871 struct mds_body *body;
872 struct dentry *dparent = NULL, *dchild = NULL;
873 struct lvfs_ucred uc = {NULL,};
874 struct lustre_handle parent_lockh;
876 int rc = 0, cleanup_phase = 0, resent_req = 0;
880 LASSERT(!strcmp(obd->obd_type->typ_name, LUSTRE_MDS_NAME));
882 /* Swab now, before anyone looks inside the request */
883 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
884 lustre_swab_mds_body);
886 CERROR("Can't swab mds_body\n");
890 LASSERT_REQSWAB(req, offset + 1);
891 name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
893 CERROR("Can't unpack name\n");
896 namesize = lustre_msg_buflen(req->rq_reqmsg, offset + 1);
897 /* namesize less than 2 means we have empty name, probably came from
898 revalidate by cfid, so no point in having name to be set */
902 rc = mds_init_ucred(&uc, req, offset);
906 LASSERT(offset == REQ_REC_OFF || offset == DLM_INTENT_REC_OFF);
907 /* if requests were at offset 2, the getattr reply goes back at 1 */
908 if (offset == DLM_INTENT_REC_OFF) {
909 rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF,
911 offset = DLM_REPLY_REC_OFF;
914 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
915 cleanup_phase = 1; /* kernel context */
916 intent_set_disposition(rep, DISP_LOOKUP_EXECD);
918 /* FIXME: handle raw lookup */
920 if (body->valid == OBD_MD_FLID) {
921 struct mds_body *mds_reply;
922 int size = sizeof(*mds_reply);
924 // The user requested ONLY the inode number, so do a raw lookup
925 rc = lustre_pack_reply(req, 1, &size, NULL);
927 CERROR("out of memory\n");
931 rc = dir->i_op->lookup_raw(dir, name, namesize - 1, &inum);
933 mds_reply = lustre_msg_buf(req->rq_repmsg, offset,
935 mds_reply->fid1.id = inum;
936 mds_reply->valid = OBD_MD_FLID;
941 if (lustre_handle_is_used(child_lockh)) {
942 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT);
946 if (resent_req == 0) {
948 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_RESEND, obd_timeout*2);
949 rc = mds_get_parent_child_locked(obd, &obd->u.mds,
953 MDS_INODELOCK_UPDATE,
955 child_lockh, &dchild,
958 /* For revalidate by fid we always take UPDATE lock */
959 dchild = mds_fid2locked_dentry(obd, &body->fid2, NULL,
961 NULL, 0, child_part);
964 rc = PTR_ERR(dchild);
969 struct ldlm_lock *granted_lock;
970 struct ll_fid child_fid;
971 struct ldlm_resource *res;
972 DEBUG_REQ(D_DLMTRACE, req, "resent, not enqueuing new locks");
973 granted_lock = ldlm_handle2lock(child_lockh);
974 LASSERTF(granted_lock != NULL, LPU64"/%u lockh "LPX64"\n",
975 body->fid1.id, body->fid1.generation,
976 child_lockh->cookie);
979 res = granted_lock->l_resource;
980 child_fid.id = res->lr_name.name[0];
981 child_fid.generation = res->lr_name.name[1];
982 dchild = mds_fid2dentry(&obd->u.mds, &child_fid, NULL);
983 LASSERT(!IS_ERR(dchild));
984 LDLM_LOCK_PUT(granted_lock);
987 cleanup_phase = 2; /* dchild, dparent, locks */
989 if (dchild->d_inode == NULL) {
990 intent_set_disposition(rep, DISP_LOOKUP_NEG);
991 /* in the intent case, the policy clears this error:
992 the disposition is enough */
993 GOTO(cleanup, rc = -ENOENT);
995 intent_set_disposition(rep, DISP_LOOKUP_POS);
998 if (req->rq_repmsg == NULL) {
999 rc = mds_getattr_pack_msg(req, dchild->d_inode, offset);
1001 CERROR ("mds_getattr_pack_msg: %d\n", rc);
1006 rc = mds_getattr_internal(obd, dchild, req, body, offset);
1007 GOTO(cleanup, rc); /* returns the lock to the client */
1010 switch (cleanup_phase) {
1012 if (resent_req == 0) {
1013 if (rc && dchild->d_inode)
1014 ldlm_lock_decref(child_lockh, LCK_CR);
1016 ldlm_lock_decref(&parent_lockh, LCK_CR);
1022 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1024 mds_exit_ucred(&uc, mds);
1025 if (req->rq_reply_state == NULL) {
1026 req->rq_status = rc;
1027 lustre_pack_reply(req, 1, NULL, NULL);
1033 static int mds_getattr(struct ptlrpc_request *req, int offset)
1035 struct mds_obd *mds = mds_req2mds(req);
1036 struct obd_device *obd = req->rq_export->exp_obd;
1037 struct lvfs_run_ctxt saved;
1039 struct mds_body *body;
1040 struct lvfs_ucred uc = { NULL, };
1044 OBD_COUNTER_INCREMENT(obd, getattr);
1046 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1047 lustre_swab_mds_body);
1051 rc = mds_init_ucred(&uc, req, offset);
1053 GOTO(out_ucred, rc);
1055 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1056 de = mds_fid2dentry(mds, &body->fid1, NULL);
1058 rc = req->rq_status = PTR_ERR(de);
1062 rc = mds_getattr_pack_msg(req, de->d_inode, offset);
1064 CERROR("mds_getattr_pack_msg: %d\n", rc);
1068 req->rq_status = mds_getattr_internal(obd, de, req, body,REPLY_REC_OFF);
1073 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1075 if (req->rq_reply_state == NULL) {
1076 req->rq_status = rc;
1077 lustre_pack_reply(req, 1, NULL, NULL);
1079 mds_exit_ucred(&uc, mds);
1083 static int mds_obd_statfs(struct obd_device *obd, struct obd_statfs *osfs,
1088 spin_lock(&obd->obd_osfs_lock);
1089 rc = fsfilt_statfs(obd, obd->u.obt.obt_sb, max_age);
1091 memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
1092 spin_unlock(&obd->obd_osfs_lock);
1097 static int mds_statfs(struct ptlrpc_request *req)
1099 struct obd_device *obd = req->rq_export->exp_obd;
1100 int rc, size[2] = { sizeof(struct ptlrpc_body),
1101 sizeof(struct obd_statfs) };
1104 /* This will trigger a watchdog timeout */
1105 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
1106 (MDS_SERVICE_WATCHDOG_TIMEOUT / 1000) + 1);
1107 OBD_COUNTER_INCREMENT(obd, statfs);
1109 rc = lustre_pack_reply(req, 2, size, NULL);
1110 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
1111 CERROR("mds: statfs lustre_pack_reply failed: rc = %d\n", rc);
1115 /* We call this so that we can cache a bit - 1 jiffie worth */
1116 rc = mds_obd_statfs(obd, lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1117 size[REPLY_REC_OFF]),
1118 cfs_time_current_64() - HZ);
1120 CERROR("mds_obd_statfs failed: rc %d\n", rc);
1126 req->rq_status = rc;
1130 static int mds_sync(struct ptlrpc_request *req, int offset)
1132 struct obd_device *obd = req->rq_export->exp_obd;
1133 struct mds_obd *mds = &obd->u.mds;
1134 struct mds_body *body;
1135 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*body) };
1138 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1139 lustre_swab_mds_body);
1141 GOTO(out, rc = -EFAULT);
1143 rc = lustre_pack_reply(req, 2, size, NULL);
1144 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK)) {
1145 CERROR("fsync lustre_pack_reply failed: rc = %d\n", rc);
1149 if (body->fid1.id == 0) {
1150 /* a fid of zero is taken to mean "sync whole filesystem" */
1151 rc = fsfilt_sync(obd, obd->u.obt.obt_sb);
1156 de = mds_fid2dentry(mds, &body->fid1, NULL);
1158 GOTO(out, rc = PTR_ERR(de));
1160 /* The file parameter isn't used for anything */
1161 if (de->d_inode->i_fop && de->d_inode->i_fop->fsync)
1162 rc = de->d_inode->i_fop->fsync(NULL, de, 1);
1164 body = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1166 mds_pack_inode2fid(&body->fid1, de->d_inode);
1167 mds_pack_inode2body(body, de->d_inode);
1174 req->rq_status = rc;
1178 /* mds_readpage does not take a DLM lock on the inode, because the client must
1179 * already have a PR lock.
1181 * If we were to take another one here, a deadlock will result, if another
1182 * thread is already waiting for a PW lock. */
1183 static int mds_readpage(struct ptlrpc_request *req, int offset)
1185 struct obd_device *obd = req->rq_export->exp_obd;
1186 struct mds_obd *mds = &obd->u.mds;
1187 struct vfsmount *mnt;
1190 struct mds_body *body, *repbody;
1191 struct lvfs_run_ctxt saved;
1192 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*repbody) };
1193 struct lvfs_ucred uc = {NULL,};
1196 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
1199 rc = lustre_pack_reply(req, 2, size, NULL);
1201 CERROR("error packing readpage reply: rc %d\n", rc);
1205 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1206 lustre_swab_mds_body);
1208 GOTO (out, rc = -EFAULT);
1210 rc = mds_init_ucred(&uc, req, offset);
1214 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1215 de = mds_fid2dentry(&obd->u.mds, &body->fid1, &mnt);
1217 GOTO(out_pop, rc = PTR_ERR(de));
1219 CDEBUG(D_INODE, "ino %lu\n", de->d_inode->i_ino);
1221 file = dentry_open(de, mnt, O_RDONLY | O_LARGEFILE);
1222 /* note: in case of an error, dentry_open puts dentry */
1224 GOTO(out_pop, rc = PTR_ERR(file));
1226 /* body->size is actually the offset -eeb */
1227 if ((body->size & (de->d_inode->i_blksize - 1)) != 0) {
1228 CERROR("offset "LPU64" not on a block boundary of %lu\n",
1229 body->size, de->d_inode->i_blksize);
1230 GOTO(out_file, rc = -EFAULT);
1233 /* body->nlink is actually the #bytes to read -eeb */
1234 if (body->nlink & (de->d_inode->i_blksize - 1)) {
1235 CERROR("size %u is not multiple of blocksize %lu\n",
1236 body->nlink, de->d_inode->i_blksize);
1237 GOTO(out_file, rc = -EFAULT);
1240 repbody = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF,
1242 repbody->size = file->f_dentry->d_inode->i_size;
1243 repbody->valid = OBD_MD_FLSIZE;
1245 /* to make this asynchronous make sure that the handling function
1246 doesn't send a reply when this function completes. Instead a
1247 callback function would send the reply */
1248 /* body->size is actually the offset -eeb */
1249 rc = mds_sendpage(req, file, body->size, body->nlink);
1252 filp_close(file, 0);
1254 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1256 mds_exit_ucred(&uc, mds);
1257 req->rq_status = rc;
1261 int mds_reint(struct ptlrpc_request *req, int offset,
1262 struct lustre_handle *lockh)
1264 struct mds_update_record *rec; /* 116 bytes on the stack? no sir! */
1267 OBD_ALLOC(rec, sizeof(*rec));
1271 rc = mds_update_unpack(req, offset, rec);
1272 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK)) {
1273 CERROR("invalid record\n");
1274 GOTO(out, req->rq_status = -EINVAL);
1277 /* rc will be used to interrupt a for loop over multiple records */
1278 rc = mds_reint_rec(rec, offset, req, lockh);
1280 OBD_FREE(rec, sizeof(*rec));
1284 static int mds_filter_recovery_request(struct ptlrpc_request *req,
1285 struct obd_device *obd, int *process)
1287 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1288 case MDS_CONNECT: /* This will never get here, but for completeness. */
1289 case OST_CONNECT: /* This will never get here, but for completeness. */
1290 case MDS_DISCONNECT:
1291 case OST_DISCONNECT:
1296 case MDS_SYNC: /* used in unmounting */
1300 *process = target_queue_recovery_request(req, obd);
1304 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
1306 /* XXX what should we set rq_status to here? */
1307 req->rq_status = -EAGAIN;
1308 RETURN(ptlrpc_error(req));
1312 static char *reint_names[] = {
1313 [REINT_SETATTR] "setattr",
1314 [REINT_CREATE] "create",
1315 [REINT_LINK] "link",
1316 [REINT_UNLINK] "unlink",
1317 [REINT_RENAME] "rename",
1318 [REINT_OPEN] "open",
1321 static int mds_set_info_rpc(struct obd_export *exp, struct ptlrpc_request *req)
1328 key = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF, 1);
1330 DEBUG_REQ(D_HA, req, "no set_info key");
1333 keylen = lustre_msg_buflen(req->rq_reqmsg, REQ_REC_OFF);
1335 val = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF + 1, sizeof(*val));
1337 DEBUG_REQ(D_HA, req, "no set_info val");
1341 rc = lustre_pack_reply(req, 1, NULL, NULL);
1344 lustre_msg_set_status(req->rq_repmsg, 0);
1346 if (keylen < strlen("read-only") ||
1347 memcmp(key, "read-only", keylen) != 0)
1351 exp->exp_connect_flags |= OBD_CONNECT_RDONLY;
1353 exp->exp_connect_flags &= ~OBD_CONNECT_RDONLY;
1358 static int mds_handle_quotacheck(struct ptlrpc_request *req)
1360 struct obd_quotactl *oqctl;
1364 oqctl = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*oqctl),
1365 lustre_swab_obd_quotactl);
1369 rc = lustre_pack_reply(req, 1, NULL, NULL);
1371 CERROR("mds: out of memory while packing quotacheck reply\n");
1375 req->rq_status = obd_quotacheck(req->rq_export, oqctl);
1379 static int mds_handle_quotactl(struct ptlrpc_request *req)
1381 struct obd_quotactl *oqctl, *repoqc;
1382 int rc, size[2] = { sizeof(struct ptlrpc_body), sizeof(*repoqc) };
1385 oqctl = lustre_swab_reqbuf(req, REQ_REC_OFF, sizeof(*oqctl),
1386 lustre_swab_obd_quotactl);
1390 rc = lustre_pack_reply(req, 2, size, NULL);
1394 repoqc = lustre_msg_buf(req->rq_repmsg, REPLY_REC_OFF, sizeof(*repoqc));
1396 req->rq_status = obd_quotactl(req->rq_export, oqctl);
1401 static int mds_msg_check_version(struct lustre_msg *msg)
1405 switch (lustre_msg_get_opc(msg)) {
1407 case MDS_DISCONNECT:
1409 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
1411 CERROR("bad opc %u version %08x, expecting %08x\n",
1412 lustre_msg_get_opc(msg),
1413 lustre_msg_get_version(msg),
1414 LUSTRE_OBD_VERSION);
1418 case MDS_GETATTR_NAME:
1423 case MDS_DONE_WRITING:
1429 case MDS_QUOTACHECK:
1433 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
1435 CERROR("bad opc %u version %08x, expecting %08x\n",
1436 lustre_msg_get_opc(msg),
1437 lustre_msg_get_version(msg),
1438 LUSTRE_MDS_VERSION);
1442 case LDLM_BL_CALLBACK:
1443 case LDLM_CP_CALLBACK:
1444 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
1446 CERROR("bad opc %u version %08x, expecting %08x\n",
1447 lustre_msg_get_opc(msg),
1448 lustre_msg_get_version(msg),
1449 LUSTRE_DLM_VERSION);
1451 case OBD_LOG_CANCEL:
1452 case LLOG_ORIGIN_HANDLE_CREATE:
1453 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1454 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1455 case LLOG_ORIGIN_HANDLE_CLOSE:
1456 case LLOG_ORIGIN_HANDLE_DESTROY:
1457 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
1459 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
1461 CERROR("bad opc %u version %08x, expecting %08x\n",
1462 lustre_msg_get_opc(msg),
1463 lustre_msg_get_version(msg),
1464 LUSTRE_LOG_VERSION);
1467 CERROR("MDS unknown opcode %d\n", lustre_msg_get_opc(msg));
1473 int mds_handle(struct ptlrpc_request *req)
1475 int should_process, fail = OBD_FAIL_MDS_ALL_REPLY_NET;
1477 struct mds_obd *mds = NULL; /* quell gcc overwarning */
1478 struct obd_device *obd = NULL;
1481 OBD_FAIL_RETURN(OBD_FAIL_MDS_ALL_REQUEST_NET | OBD_FAIL_ONCE, 0);
1483 LASSERT(current->journal_info == NULL);
1485 rc = mds_msg_check_version(req->rq_reqmsg);
1487 CERROR("MDS drop mal-formed request\n");
1491 /* XXX identical to OST */
1492 if (lustre_msg_get_opc(req->rq_reqmsg) != MDS_CONNECT) {
1493 struct mds_export_data *med;
1494 int recovering, abort_recovery;
1496 if (req->rq_export == NULL) {
1497 CERROR("operation %d on unconnected MDS from %s\n",
1498 lustre_msg_get_opc(req->rq_reqmsg),
1499 libcfs_id2str(req->rq_peer));
1500 req->rq_status = -ENOTCONN;
1501 GOTO(out, rc = -ENOTCONN);
1504 med = &req->rq_export->exp_mds_data;
1505 obd = req->rq_export->exp_obd;
1508 /* sanity check: if the xid matches, the request must
1509 * be marked as a resent or replayed */
1510 if (req->rq_xid == le64_to_cpu(med->med_mcd->mcd_last_xid) ||
1511 req->rq_xid == le64_to_cpu(med->med_mcd->mcd_last_close_xid))
1512 if (!(lustre_msg_get_flags(req->rq_reqmsg) &
1513 (MSG_RESENT | MSG_REPLAY))) {
1514 CERROR("rq_xid "LPU64" matches last_xid, "
1515 "expected RESENT flag\n",
1517 req->rq_status = -ENOTCONN;
1518 GOTO(out, rc = -EFAULT);
1520 /* else: note the opposite is not always true; a
1521 * RESENT req after a failover will usually not match
1522 * the last_xid, since it was likely never
1523 * committed. A REPLAYed request will almost never
1524 * match the last xid, however it could for a
1525 * committed, but still retained, open. */
1527 /* Check for aborted recovery. */
1528 spin_lock_bh(&obd->obd_processing_task_lock);
1529 abort_recovery = obd->obd_abort_recovery;
1530 recovering = obd->obd_recovering;
1531 spin_unlock_bh(&obd->obd_processing_task_lock);
1532 if (abort_recovery) {
1533 target_abort_recovery(obd);
1534 } else if (recovering) {
1535 rc = mds_filter_recovery_request(req, obd,
1537 if (rc || !should_process)
1542 switch (lustre_msg_get_opc(req->rq_reqmsg)) {
1544 DEBUG_REQ(D_INODE, req, "connect");
1545 OBD_FAIL_RETURN(OBD_FAIL_MDS_CONNECT_NET, 0);
1546 rc = target_handle_connect(req, mds_handle);
1548 /* Now that we have an export, set mds. */
1549 obd = req->rq_export->exp_obd;
1550 mds = mds_req2mds(req);
1554 case MDS_DISCONNECT:
1555 DEBUG_REQ(D_INODE, req, "disconnect");
1556 OBD_FAIL_RETURN(OBD_FAIL_MDS_DISCONNECT_NET, 0);
1557 rc = target_handle_disconnect(req);
1558 req->rq_status = rc; /* superfluous? */
1562 DEBUG_REQ(D_INODE, req, "getstatus");
1563 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_NET, 0);
1564 rc = mds_getstatus(req);
1568 DEBUG_REQ(D_INODE, req, "getattr");
1569 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NET, 0);
1570 rc = mds_getattr(req, REQ_REC_OFF);
1574 DEBUG_REQ(D_INODE, req, "setxattr");
1575 OBD_FAIL_RETURN(OBD_FAIL_MDS_SETXATTR_NET, 0);
1576 rc = mds_setxattr(req);
1580 DEBUG_REQ(D_INODE, req, "getxattr");
1581 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETXATTR_NET, 0);
1582 rc = mds_getxattr(req);
1585 case MDS_GETATTR_NAME: {
1586 struct lustre_handle lockh = { 0 };
1587 DEBUG_REQ(D_INODE, req, "getattr_name");
1588 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NAME_NET, 0);
1590 /* If this request gets a reconstructed reply, we won't be
1591 * acquiring any new locks in mds_getattr_lock, so we don't
1594 rc = mds_getattr_lock(req, REQ_REC_OFF, MDS_INODELOCK_UPDATE,
1596 /* this non-intent call (from an ioctl) is special */
1597 req->rq_status = rc;
1598 if (rc == 0 && lustre_handle_is_used(&lockh))
1599 ldlm_lock_decref(&lockh, LCK_CR);
1603 DEBUG_REQ(D_INODE, req, "statfs");
1604 OBD_FAIL_RETURN(OBD_FAIL_MDS_STATFS_NET, 0);
1605 rc = mds_statfs(req);
1609 DEBUG_REQ(D_INODE, req, "readpage");
1610 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_NET, 0);
1611 rc = mds_readpage(req, REQ_REC_OFF);
1613 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_MDS_SENDPAGE)) {
1620 __u32 *opcp = lustre_msg_buf(req->rq_reqmsg, REQ_REC_OFF,
1623 int size[4] = { sizeof(struct ptlrpc_body),
1624 sizeof(struct mds_body),
1625 mds->mds_max_mdsize,
1626 mds->mds_max_cookiesize };
1629 /* NB only peek inside req now; mds_reint() will swab it */
1631 CERROR ("Can't inspect opcode\n");
1636 if (lustre_msg_swabbed(req->rq_reqmsg))
1639 DEBUG_REQ(D_INODE, req, "reint %d (%s)", opc,
1640 (opc < sizeof(reint_names) / sizeof(reint_names[0]) ||
1641 reint_names[opc] == NULL) ? reint_names[opc] :
1644 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET, 0);
1646 if (opc == REINT_UNLINK || opc == REINT_RENAME)
1648 else if (opc == REINT_OPEN)
1653 rc = lustre_pack_reply(req, bufcount, size, NULL);
1657 rc = mds_reint(req, REQ_REC_OFF, NULL);
1658 fail = OBD_FAIL_MDS_REINT_NET_REP;
1663 DEBUG_REQ(D_INODE, req, "close");
1664 OBD_FAIL_RETURN(OBD_FAIL_MDS_CLOSE_NET, 0);
1665 rc = mds_close(req, REQ_REC_OFF);
1668 case MDS_DONE_WRITING:
1669 DEBUG_REQ(D_INODE, req, "done_writing");
1670 OBD_FAIL_RETURN(OBD_FAIL_MDS_DONE_WRITING_NET, 0);
1671 rc = mds_done_writing(req, REQ_REC_OFF);
1675 DEBUG_REQ(D_INODE, req, "pin");
1676 OBD_FAIL_RETURN(OBD_FAIL_MDS_PIN_NET, 0);
1677 rc = mds_pin(req, REQ_REC_OFF);
1681 DEBUG_REQ(D_INODE, req, "sync");
1682 OBD_FAIL_RETURN(OBD_FAIL_MDS_SYNC_NET, 0);
1683 rc = mds_sync(req, REQ_REC_OFF);
1687 DEBUG_REQ(D_INODE, req, "set_info");
1688 rc = mds_set_info_rpc(req->rq_export, req);
1691 case MDS_QUOTACHECK:
1692 DEBUG_REQ(D_INODE, req, "quotacheck");
1693 OBD_FAIL_RETURN(OBD_FAIL_MDS_QUOTACHECK_NET, 0);
1694 rc = mds_handle_quotacheck(req);
1698 DEBUG_REQ(D_INODE, req, "quotactl");
1699 OBD_FAIL_RETURN(OBD_FAIL_MDS_QUOTACTL_NET, 0);
1700 rc = mds_handle_quotactl(req);
1704 DEBUG_REQ(D_INODE, req, "ping");
1705 rc = target_handle_ping(req);
1708 case OBD_LOG_CANCEL:
1709 CDEBUG(D_INODE, "log cancel\n");
1710 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1711 rc = -ENOTSUPP; /* la la la */
1715 DEBUG_REQ(D_INODE, req, "enqueue");
1716 OBD_FAIL_RETURN(OBD_FAIL_LDLM_ENQUEUE, 0);
1717 rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast,
1718 ldlm_server_blocking_ast, NULL);
1719 fail = OBD_FAIL_LDLM_REPLY;
1722 DEBUG_REQ(D_INODE, req, "convert");
1723 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CONVERT, 0);
1724 rc = ldlm_handle_convert(req);
1726 case LDLM_BL_CALLBACK:
1727 case LDLM_CP_CALLBACK:
1728 DEBUG_REQ(D_INODE, req, "callback");
1729 CERROR("callbacks should not happen on MDS\n");
1731 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1733 case LLOG_ORIGIN_HANDLE_CREATE:
1734 DEBUG_REQ(D_INODE, req, "llog_init");
1735 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1736 rc = llog_origin_handle_create(req);
1738 case LLOG_ORIGIN_HANDLE_DESTROY:
1739 DEBUG_REQ(D_INODE, req, "llog_init");
1740 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1741 rc = llog_origin_handle_destroy(req);
1743 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1744 DEBUG_REQ(D_INODE, req, "llog next block");
1745 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1746 rc = llog_origin_handle_next_block(req);
1748 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
1749 DEBUG_REQ(D_INODE, req, "llog prev block");
1750 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1751 rc = llog_origin_handle_prev_block(req);
1753 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1754 DEBUG_REQ(D_INODE, req, "llog read header");
1755 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1756 rc = llog_origin_handle_read_header(req);
1758 case LLOG_ORIGIN_HANDLE_CLOSE:
1759 DEBUG_REQ(D_INODE, req, "llog close");
1760 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1761 rc = llog_origin_handle_close(req);
1764 DEBUG_REQ(D_INODE, req, "llog catinfo");
1765 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1766 rc = llog_catinfo(req);
1769 req->rq_status = -ENOTSUPP;
1770 rc = ptlrpc_error(req);
1774 LASSERT(current->journal_info == NULL);
1776 /* If we're DISCONNECTing, the mds_export_data is already freed */
1777 if (!rc && lustre_msg_get_opc(req->rq_reqmsg) != MDS_DISCONNECT) {
1778 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1780 /* I don't think last_xid is used for anyway, so I'm not sure
1781 if we need to care about last_close_xid here.*/
1782 lustre_msg_set_last_xid(req->rq_repmsg,
1783 le64_to_cpu(med->med_mcd->mcd_last_xid));
1785 target_committed_to_req(req);
1791 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LAST_REPLAY) {
1792 if (obd && obd->obd_recovering) {
1793 DEBUG_REQ(D_HA, req, "LAST_REPLAY, queuing reply");
1794 return target_queue_final_reply(req, rc);
1796 /* Lost a race with recovery; let the error path DTRT. */
1797 rc = req->rq_status = -ENOTCONN;
1800 target_send_reply(req, rc, fail);
1804 /* Update the server data on disk. This stores the new mount_count and
1805 * also the last_rcvd value to disk. If we don't have a clean shutdown,
1806 * then the server last_rcvd value may be less than that of the clients.
1807 * This will alert us that we may need to do client recovery.
1809 * Also assumes for mds_last_transno that we are not modifying it (no locking).
1811 int mds_update_server_data(struct obd_device *obd, int force_sync)
1813 struct mds_obd *mds = &obd->u.mds;
1814 struct lr_server_data *lsd = mds->mds_server_data;
1815 struct file *filp = mds->mds_rcvd_filp;
1816 struct lvfs_run_ctxt saved;
1821 CDEBUG(D_SUPER, "MDS mount_count is "LPU64", last_transno is "LPU64"\n",
1822 mds->mds_mount_count, mds->mds_last_transno);
1824 spin_lock(&mds->mds_transno_lock);
1825 lsd->lsd_last_transno = cpu_to_le64(mds->mds_last_transno);
1826 spin_unlock(&mds->mds_transno_lock);
1828 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1829 rc = fsfilt_write_record(obd, filp, lsd, sizeof(*lsd), &off,force_sync);
1830 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1832 CERROR("error writing MDS server data: rc = %d\n", rc);
1837 static void fsoptions_to_mds_flags(struct mds_obd *mds, char *options)
1847 while (*p && *p != ',')
1851 if (len == sizeof("user_xattr") - 1 &&
1852 memcmp(options, "user_xattr", len) == 0) {
1853 mds->mds_fl_user_xattr = 1;
1854 LCONSOLE_INFO("Enabling user_xattr\n");
1855 } else if (len == sizeof("nouser_xattr") - 1 &&
1856 memcmp(options, "nouser_xattr", len) == 0) {
1857 mds->mds_fl_user_xattr = 0;
1858 LCONSOLE_INFO("Disabling user_xattr\n");
1859 } else if (len == sizeof("acl") - 1 &&
1860 memcmp(options, "acl", len) == 0) {
1861 #ifdef CONFIG_FS_POSIX_ACL
1862 mds->mds_fl_acl = 1;
1863 LCONSOLE_INFO("Enabling ACL\n");
1865 CWARN("ignoring unsupported acl mount option\n");
1867 } else if (len == sizeof("noacl") - 1 &&
1868 memcmp(options, "noacl", len) == 0) {
1869 #ifdef CONFIG_FS_POSIX_ACL
1870 mds->mds_fl_acl = 0;
1871 LCONSOLE_INFO("Disabling ACL\n");
1879 /* mount the file system (secretly). lustre_cfg parameters are:
1885 static int mds_setup(struct obd_device *obd, obd_count len, void *buf)
1887 struct lprocfs_static_vars lvars;
1888 struct lustre_cfg* lcfg = buf;
1889 struct mds_obd *mds = &obd->u.mds;
1890 struct lustre_sb_info *lsi;
1891 struct lustre_mount_info *lmi;
1892 struct vfsmount *mnt;
1893 struct obd_uuid uuid;
1900 /* setup 1:/dev/loop/0 2:ext3 3:mdsA 4:errors=remount-ro,iopen_nopriv */
1902 CLASSERT(offsetof(struct obd_device, u.obt) ==
1903 offsetof(struct obd_device, u.mds.mds_obt));
1905 if (lcfg->lcfg_bufcount < 3)
1908 if (LUSTRE_CFG_BUFLEN(lcfg, 1) == 0 || LUSTRE_CFG_BUFLEN(lcfg, 2) == 0)
1911 lmi = server_get_mount(obd->obd_name);
1913 CERROR("Not mounted in lustre_fill_super?\n");
1917 /* We mounted in lustre_fill_super.
1918 lcfg bufs 1, 2, 4 (device, fstype, mount opts) are ignored.*/
1919 lsi = s2lsi(lmi->lmi_sb);
1920 fsoptions_to_mds_flags(mds, lsi->lsi_ldd->ldd_mount_opts);
1921 fsoptions_to_mds_flags(mds, lsi->lsi_lmd->lmd_opts);
1923 obd->obd_fsops = fsfilt_get_ops(MT_STR(lsi->lsi_ldd));
1924 if (IS_ERR(obd->obd_fsops))
1925 GOTO(err_put, rc = PTR_ERR(obd->obd_fsops));
1927 CDEBUG(D_SUPER, "%s: mnt = %p\n", lustre_cfg_string(lcfg, 1), mnt);
1929 LASSERT(!lvfs_check_rdonly(lvfs_sbdev(mnt->mnt_sb)));
1931 sema_init(&mds->mds_epoch_sem, 1);
1932 spin_lock_init(&mds->mds_transno_lock);
1933 mds->mds_max_mdsize = sizeof(struct lov_mds_md);
1934 mds->mds_max_cookiesize = sizeof(struct llog_cookie);
1935 mds->mds_atime_diff = MAX_ATIME_DIFF;
1937 sprintf(ns_name, "mds-%s", obd->obd_uuid.uuid);
1938 obd->obd_namespace = ldlm_namespace_new(ns_name, LDLM_NAMESPACE_SERVER);
1939 if (obd->obd_namespace == NULL) {
1941 GOTO(err_ops, rc = -ENOMEM);
1943 ldlm_register_intent(obd->obd_namespace, mds_intent_policy);
1945 lprocfs_init_vars(mds, &lvars);
1946 if (lprocfs_obd_setup(obd, lvars.obd_vars) == 0 &&
1947 lprocfs_alloc_obd_stats(obd, LPROC_MDS_LAST) == 0) {
1948 /* Init private stats here */
1949 mds_stats_counter_init(obd->obd_stats);
1950 obd->obd_proc_exports = proc_mkdir("exports",
1951 obd->obd_proc_entry);
1954 rc = mds_fs_setup(obd, mnt);
1956 CERROR("%s: MDS filesystem method init failed: rc = %d\n",
1961 rc = llog_start_commit_thread();
1965 if (lcfg->lcfg_bufcount >= 4 && LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
1968 ll_generate_random_uuid(uuid);
1969 class_uuid_unparse(uuid, &mds->mds_lov_uuid);
1971 OBD_ALLOC(mds->mds_profile, LUSTRE_CFG_BUFLEN(lcfg, 3));
1972 if (mds->mds_profile == NULL)
1973 GOTO(err_fs, rc = -ENOMEM);
1975 strncpy(mds->mds_profile, lustre_cfg_string(lcfg, 3),
1976 LUSTRE_CFG_BUFLEN(lcfg, 3));
1979 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
1980 "mds_ldlm_client", &obd->obd_ldlm_client);
1981 obd->obd_replayable = 1;
1983 rc = lquota_setup(mds_quota_interface_ref, obd);
1987 mds->mds_group_hash = upcall_cache_init(obd->obd_name);
1988 if (IS_ERR(mds->mds_group_hash)) {
1989 rc = PTR_ERR(mds->mds_group_hash);
1990 mds->mds_group_hash = NULL;
1991 GOTO(err_qctxt, rc);
1994 /* Don't wait for mds_postrecov trying to clear orphans */
1995 obd->obd_async_recov = 1;
1996 rc = mds_postsetup(obd);
1997 /* Bug 11557 - allow async abort_recov start
1998 FIXME can remove most of this obd_async_recov plumbing
1999 obd->obd_async_recov = 0;
2002 GOTO(err_qctxt, rc);
2004 uuid_ptr = fsfilt_uuid(obd, obd->u.obt.obt_sb);
2005 if (uuid_ptr != NULL) {
2006 class_uuid_unparse(uuid_ptr, &uuid);
2012 label = fsfilt_get_label(obd, obd->u.obt.obt_sb);
2013 if (obd->obd_recovering) {
2014 LCONSOLE_WARN("MDT %s now serving %s (%s%s%s), but will be in "
2015 "recovery until %d %s reconnect, or if no clients"
2016 " reconnect for %d:%.02d; during that time new "
2017 "clients will not be allowed to connect. "
2018 "Recovery progress can be monitored by watching "
2019 "/proc/fs/lustre/mds/%s/recovery_status.\n",
2020 obd->obd_name, lustre_cfg_string(lcfg, 1),
2021 label ?: "", label ? "/" : "", str,
2022 obd->obd_recoverable_clients,
2023 (obd->obd_recoverable_clients == 1) ?
2024 "client" : "clients",
2025 (int)(OBD_RECOVERY_TIMEOUT) / 60,
2026 (int)(OBD_RECOVERY_TIMEOUT) % 60,
2029 LCONSOLE_INFO("MDT %s now serving %s (%s%s%s) with recovery "
2030 "%s\n", obd->obd_name, lustre_cfg_string(lcfg, 1),
2031 label ?: "", label ? "/" : "", str,
2032 obd->obd_replayable ? "enabled" : "disabled");
2035 if (ldlm_timeout == LDLM_TIMEOUT_DEFAULT)
2041 lquota_cleanup(mds_quota_interface_ref, obd);
2043 /* No extra cleanup needed for llog_init_commit_thread() */
2044 mds_fs_cleanup(obd);
2045 upcall_cache_cleanup(mds->mds_group_hash);
2046 mds->mds_group_hash = NULL;
2048 lprocfs_obd_cleanup(obd);
2049 lprocfs_free_obd_stats(obd);
2050 ldlm_namespace_free(obd->obd_namespace, 0);
2051 obd->obd_namespace = NULL;
2053 fsfilt_put_ops(obd->obd_fsops);
2055 server_put_mount(obd->obd_name, mnt);
2056 obd->u.obt.obt_sb = NULL;
2060 static int mds_lov_clean(struct obd_device *obd)
2062 struct mds_obd *mds = &obd->u.mds;
2063 struct obd_device *osc = mds->mds_osc_obd;
2066 if (mds->mds_profile) {
2067 class_del_profile(mds->mds_profile);
2068 OBD_FREE(mds->mds_profile, strlen(mds->mds_profile) + 1);
2069 mds->mds_profile = NULL;
2072 /* There better be a lov */
2076 RETURN(PTR_ERR(osc));
2078 obd_register_observer(osc, NULL);
2080 /* Give lov our same shutdown flags */
2081 osc->obd_force = obd->obd_force;
2082 osc->obd_fail = obd->obd_fail;
2084 /* Cleanup the lov */
2085 obd_disconnect(mds->mds_osc_exp);
2086 class_manual_cleanup(osc);
2087 mds->mds_osc_exp = NULL;
2092 static int mds_postsetup(struct obd_device *obd)
2094 struct mds_obd *mds = &obd->u.mds;
2098 rc = llog_setup(obd, LLOG_CONFIG_ORIG_CTXT, obd, 0, NULL,
2103 rc = llog_setup(obd, LLOG_LOVEA_ORIG_CTXT, obd, 0, NULL,
2108 if (mds->mds_profile) {
2109 struct lustre_profile *lprof;
2110 /* The profile defines which osc and mdc to connect to, for a
2111 client. We reuse that here to figure out the name of the
2112 lov to use (and ignore lprof->lp_mdc).
2113 The profile was set in the config log with
2114 LCFG_MOUNTOPT profilenm oscnm mdcnm */
2115 lprof = class_get_profile(mds->mds_profile);
2116 if (lprof == NULL) {
2117 CERROR("No profile found: %s\n", mds->mds_profile);
2118 GOTO(err_cleanup, rc = -ENOENT);
2120 rc = mds_lov_connect(obd, lprof->lp_osc);
2122 GOTO(err_cleanup, rc);
2129 llog_cleanup(llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT));
2130 llog_cleanup(llog_get_context(obd, LLOG_LOVEA_ORIG_CTXT));
2134 int mds_postrecov(struct obd_device *obd)
2142 LASSERT(!obd->obd_recovering);
2143 LASSERT(llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT) != NULL);
2145 /* FIXME why not put this in the synchronize? */
2146 /* set nextid first, so we are sure it happens */
2147 rc = mds_lov_set_nextid(obd);
2149 CERROR("%s: mds_lov_set_nextid failed %d\n",
2154 /* clean PENDING dir */
2155 rc = mds_cleanup_pending(obd);
2159 /* FIXME Does target_finish_recovery really need this to block? */
2160 /* Notify the LOV, which will in turn call mds_notify for each tgt */
2161 /* This means that we have to hack obd_notify to think we're obd_set_up
2162 during mds_lov_connect. */
2163 obd_notify(obd->u.mds.mds_osc_obd, NULL,
2164 obd->obd_async_recov ? OBD_NOTIFY_SYNC_NONBLOCK :
2165 OBD_NOTIFY_SYNC, NULL);
2167 /* quota recovery */
2168 lquota_recovery(mds_quota_interface_ref, obd);
2174 /* We need to be able to stop an mds_lov_synchronize */
2175 static int mds_lov_early_clean(struct obd_device *obd)
2177 struct mds_obd *mds = &obd->u.mds;
2178 struct obd_device *osc = mds->mds_osc_obd;
2180 if (!osc || (!obd->obd_force && !obd->obd_fail))
2183 CDEBUG(D_HA, "abort inflight\n");
2184 return (obd_precleanup(osc, OBD_CLEANUP_EARLY));
2187 static int mds_precleanup(struct obd_device *obd, enum obd_cleanup_stage stage)
2193 case OBD_CLEANUP_EARLY:
2195 case OBD_CLEANUP_EXPORTS:
2196 target_cleanup_recovery(obd);
2197 mds_lov_early_clean(obd);
2199 case OBD_CLEANUP_SELF_EXP:
2200 mds_lov_disconnect(obd);
2202 llog_cleanup(llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT));
2203 llog_cleanup(llog_get_context(obd, LLOG_LOVEA_ORIG_CTXT));
2204 rc = obd_llog_finish(obd, 0);
2206 case OBD_CLEANUP_OBD:
2212 static int mds_cleanup(struct obd_device *obd)
2214 struct mds_obd *mds = &obd->u.mds;
2215 lvfs_sbdev_type save_dev;
2218 if (obd->u.obt.obt_sb == NULL)
2220 save_dev = lvfs_sbdev(obd->u.obt.obt_sb);
2222 if (mds->mds_osc_exp)
2223 /* lov export was disconnected by mds_lov_clean;
2224 we just need to drop our ref */
2225 class_export_put(mds->mds_osc_exp);
2227 lprocfs_obd_cleanup(obd);
2228 lprocfs_free_obd_stats(obd);
2230 lquota_cleanup(mds_quota_interface_ref, obd);
2232 mds_update_server_data(obd, 1);
2233 if (mds->mds_lov_objids != NULL)
2234 OBD_FREE(mds->mds_lov_objids, mds->mds_lov_objids_size);
2235 mds_fs_cleanup(obd);
2237 upcall_cache_cleanup(mds->mds_group_hash);
2238 mds->mds_group_hash = NULL;
2240 server_put_mount(obd->obd_name, mds->mds_vfsmnt);
2241 obd->u.obt.obt_sb = NULL;
2243 ldlm_namespace_free(obd->obd_namespace, obd->obd_force);
2245 spin_lock_bh(&obd->obd_processing_task_lock);
2246 if (obd->obd_recovering) {
2247 target_cancel_recovery_timer(obd);
2248 obd->obd_recovering = 0;
2250 spin_unlock_bh(&obd->obd_processing_task_lock);
2252 fsfilt_put_ops(obd->obd_fsops);
2254 LCONSOLE_INFO("MDT %s has stopped.\n", obd->obd_name);
2259 static void fixup_handle_for_resent_req(struct ptlrpc_request *req, int offset,
2260 struct ldlm_lock *new_lock,
2261 struct ldlm_lock **old_lock,
2262 struct lustre_handle *lockh)
2264 struct obd_export *exp = req->rq_export;
2265 struct ldlm_request *dlmreq =
2266 lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*dlmreq));
2267 struct lustre_handle remote_hdl = dlmreq->lock_handle[0];
2268 struct list_head *iter;
2270 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
2273 spin_lock(&exp->exp_ldlm_data.led_lock);
2274 list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) {
2275 struct ldlm_lock *lock;
2276 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
2277 if (lock == new_lock)
2279 if (lock->l_remote_handle.cookie == remote_hdl.cookie) {
2280 lockh->cookie = lock->l_handle.h_cookie;
2281 LDLM_DEBUG(lock, "restoring lock cookie");
2282 DEBUG_REQ(D_HA, req, "restoring lock cookie "LPX64,
2285 *old_lock = LDLM_LOCK_GET(lock);
2286 spin_unlock(&exp->exp_ldlm_data.led_lock);
2290 spin_unlock(&exp->exp_ldlm_data.led_lock);
2292 /* If the xid matches, then we know this is a resent request,
2293 * and allow it. (It's probably an OPEN, for which we don't
2296 le64_to_cpu(exp->exp_mds_data.med_mcd->mcd_last_xid))
2300 le64_to_cpu(exp->exp_mds_data.med_mcd->mcd_last_close_xid))
2303 /* This remote handle isn't enqueued, so we never received or
2304 * processed this request. Clear MSG_RESENT, because it can
2305 * be handled like any normal request now. */
2307 lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
2309 DEBUG_REQ(D_HA, req, "no existing lock with rhandle "LPX64,
2313 int intent_disposition(struct ldlm_reply *rep, int flag)
2317 return (rep->lock_policy_res1 & flag);
2320 void intent_set_disposition(struct ldlm_reply *rep, int flag)
2324 rep->lock_policy_res1 |= flag;
2327 static int mds_intent_policy(struct ldlm_namespace *ns,
2328 struct ldlm_lock **lockp, void *req_cookie,
2329 ldlm_mode_t mode, int flags, void *data)
2331 struct ptlrpc_request *req = req_cookie;
2332 struct ldlm_lock *lock = *lockp;
2333 struct ldlm_intent *it;
2334 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
2335 struct ldlm_reply *rep;
2336 struct lustre_handle lockh = { 0 };
2337 struct ldlm_lock *new_lock = NULL;
2338 int getattr_part = MDS_INODELOCK_UPDATE;
2339 int repsize[5] = { [MSG_PTLRPC_BODY_OFF] = sizeof(struct ptlrpc_body),
2340 [DLM_LOCKREPLY_OFF] = sizeof(struct ldlm_reply),
2341 [DLM_REPLY_REC_OFF] = sizeof(struct mds_body),
2342 [DLM_REPLY_REC_OFF+1] = mds->mds_max_mdsize };
2343 int repbufcnt = 4, rc;
2346 LASSERT(req != NULL);
2348 if (lustre_msg_bufcount(req->rq_reqmsg) <= DLM_INTENT_IT_OFF) {
2349 /* No intent was provided */
2350 rc = lustre_pack_reply(req, 2, repsize, NULL);
2355 it = lustre_swab_reqbuf(req, DLM_INTENT_IT_OFF, sizeof(*it),
2356 lustre_swab_ldlm_intent);
2358 CERROR("Intent missing\n");
2359 RETURN(req->rq_status = -EFAULT);
2362 LDLM_DEBUG(lock, "intent policy, opc: %s", ldlm_it2str(it->opc));
2364 if ((req->rq_export->exp_connect_flags & OBD_CONNECT_ACL) &&
2365 (it->opc & (IT_OPEN | IT_GETATTR | IT_LOOKUP)))
2366 /* we should never allow OBD_CONNECT_ACL if not configured */
2367 repsize[repbufcnt++] = LUSTRE_POSIX_ACL_MAX_SIZE;
2368 else if (it->opc & IT_UNLINK)
2369 repsize[repbufcnt++] = mds->mds_max_cookiesize;
2371 rc = lustre_pack_reply(req, repbufcnt, repsize, NULL);
2373 RETURN(req->rq_status = rc);
2375 rep = lustre_msg_buf(req->rq_repmsg, DLM_LOCKREPLY_OFF, sizeof(*rep));
2376 intent_set_disposition(rep, DISP_IT_EXECD);
2379 /* execute policy */
2380 switch ((long)it->opc) {
2382 case IT_CREAT|IT_OPEN:
2383 mds_counter_incr(req->rq_export, LPROC_MDS_OPEN);
2384 fixup_handle_for_resent_req(req, DLM_LOCKREQ_OFF, lock, NULL,
2386 /* XXX swab here to assert that an mds_open reint
2387 * packet is following */
2388 rep->lock_policy_res2 = mds_reint(req, DLM_INTENT_REC_OFF,
2391 /* We abort the lock if the lookup was negative and
2392 * we did not make it to the OPEN portion */
2393 if (!intent_disposition(rep, DISP_LOOKUP_EXECD))
2394 RETURN(ELDLM_LOCK_ABORTED);
2395 if (intent_disposition(rep, DISP_LOOKUP_NEG) &&
2396 !intent_disposition(rep, DISP_OPEN_OPEN))
2399 /* If there was an error of some sort or if we are not
2400 * returning any locks */
2401 if (rep->lock_policy_res2 ||
2402 !intent_disposition(rep, DISP_OPEN_LOCK))
2403 RETURN(ELDLM_LOCK_ABORTED);
2406 getattr_part = MDS_INODELOCK_LOOKUP;
2408 getattr_part |= MDS_INODELOCK_LOOKUP;
2409 OBD_COUNTER_INCREMENT(req->rq_export->exp_obd, getattr);
2411 fixup_handle_for_resent_req(req, DLM_LOCKREQ_OFF, lock,
2414 /* INODEBITS_INTEROP: if this lock was converted from a
2415 * plain lock (client does not support inodebits), then
2416 * child lock must be taken with both lookup and update
2417 * bits set for all operations.
2419 if (!(req->rq_export->exp_connect_flags & OBD_CONNECT_IBITS))
2420 getattr_part = MDS_INODELOCK_LOOKUP |
2421 MDS_INODELOCK_UPDATE;
2423 rep->lock_policy_res2 = mds_getattr_lock(req,DLM_INTENT_REC_OFF,
2424 getattr_part, &lockh);
2425 /* FIXME: LDLM can set req->rq_status. MDS sets
2426 policy_res{1,2} with disposition and status.
2427 - replay: returns 0 & req->status is old status
2428 - otherwise: returns req->status */
2429 if (intent_disposition(rep, DISP_LOOKUP_NEG))
2430 rep->lock_policy_res2 = 0;
2431 if (!intent_disposition(rep, DISP_LOOKUP_POS) ||
2432 rep->lock_policy_res2)
2433 RETURN(ELDLM_LOCK_ABORTED);
2434 if (req->rq_status != 0) {
2436 rep->lock_policy_res2 = req->rq_status;
2437 RETURN(ELDLM_LOCK_ABORTED);
2441 CERROR("Unhandled intent "LPD64"\n", it->opc);
2445 /* By this point, whatever function we called above must have either
2446 * filled in 'lockh', been an intent replay, or returned an error. We
2447 * want to allow replayed RPCs to not get a lock, since we would just
2448 * drop it below anyways because lock replay is done separately by the
2449 * client afterwards. For regular RPCs we want to give the new lock to
2450 * the client instead of whatever lock it was about to get. */
2451 if (new_lock == NULL)
2452 new_lock = ldlm_handle2lock(&lockh);
2453 if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY))
2456 LASSERTF(new_lock != NULL, "op "LPX64" lockh "LPX64"\n",
2457 it->opc, lockh.cookie);
2459 /* If we've already given this lock to a client once, then we should
2460 * have no readers or writers. Otherwise, we should have one reader
2461 * _or_ writer ref (which will be zeroed below) before returning the
2462 * lock to a client. */
2463 if (new_lock->l_export == req->rq_export) {
2464 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
2466 LASSERT(new_lock->l_export == NULL);
2467 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
2472 if (new_lock->l_export == req->rq_export) {
2473 /* Already gave this to the client, which means that we
2474 * reconstructed a reply. */
2475 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
2477 RETURN(ELDLM_LOCK_REPLACED);
2480 /* Fixup the lock to be given to the client */
2481 lock_res_and_lock(new_lock);
2482 new_lock->l_readers = 0;
2483 new_lock->l_writers = 0;
2485 new_lock->l_export = class_export_get(req->rq_export);
2486 spin_lock(&req->rq_export->exp_ldlm_data.led_lock);
2487 list_add(&new_lock->l_export_chain,
2488 &new_lock->l_export->exp_ldlm_data.led_held_locks);
2489 spin_unlock(&req->rq_export->exp_ldlm_data.led_lock);
2491 new_lock->l_blocking_ast = lock->l_blocking_ast;
2492 new_lock->l_completion_ast = lock->l_completion_ast;
2494 memcpy(&new_lock->l_remote_handle, &lock->l_remote_handle,
2495 sizeof(lock->l_remote_handle));
2497 new_lock->l_flags &= ~LDLM_FL_LOCAL;
2499 unlock_res_and_lock(new_lock);
2500 LDLM_LOCK_PUT(new_lock);
2502 RETURN(ELDLM_LOCK_REPLACED);
2505 static int mdt_setup(struct obd_device *obd, obd_count len, void *buf)
2507 struct mds_obd *mds = &obd->u.mds;
2508 struct lprocfs_static_vars lvars;
2509 int mds_min_threads;
2510 int mds_max_threads;
2514 lprocfs_init_vars(mdt, &lvars);
2515 lprocfs_obd_setup(obd, lvars.obd_vars);
2517 sema_init(&mds->mds_health_sem, 1);
2519 if (mds_num_threads) {
2520 /* If mds_num_threads is set, it is the min and the max. */
2521 if (mds_num_threads > MDS_THREADS_MAX)
2522 mds_num_threads = MDS_THREADS_MAX;
2523 if (mds_num_threads < MDS_THREADS_MIN)
2524 mds_num_threads = MDS_THREADS_MIN;
2525 mds_max_threads = mds_min_threads = mds_num_threads;
2527 /* Base min threads on memory and cpus */
2528 mds_min_threads = smp_num_cpus * num_physpages >>
2529 (27 - CFS_PAGE_SHIFT);
2530 if (mds_min_threads < MDS_THREADS_MIN)
2531 mds_min_threads = MDS_THREADS_MIN;
2532 /* Largest auto threads start value */
2533 if (mds_min_threads > 32)
2534 mds_min_threads = 32;
2535 mds_max_threads = min(MDS_THREADS_MAX, mds_min_threads * 4);
2539 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2540 MDS_MAXREPSIZE, MDS_REQUEST_PORTAL,
2541 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_TIMEOUT,
2542 mds_handle, LUSTRE_MDS_NAME,
2543 obd->obd_proc_entry, NULL,
2544 mds_min_threads, mds_max_threads, "ll_mdt");
2546 if (!mds->mds_service) {
2547 CERROR("failed to start service\n");
2548 GOTO(err_lprocfs, rc = -ENOMEM);
2551 rc = ptlrpc_start_threads(obd, mds->mds_service);
2553 GOTO(err_thread, rc);
2555 mds->mds_setattr_service =
2556 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2557 MDS_MAXREPSIZE, MDS_SETATTR_PORTAL,
2558 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_TIMEOUT,
2559 mds_handle, "mds_setattr",
2560 obd->obd_proc_entry, NULL,
2561 mds_min_threads, mds_max_threads,
2563 if (!mds->mds_setattr_service) {
2564 CERROR("failed to start getattr service\n");
2565 GOTO(err_thread, rc = -ENOMEM);
2568 rc = ptlrpc_start_threads(obd, mds->mds_setattr_service);
2570 GOTO(err_thread2, rc);
2572 mds->mds_readpage_service =
2573 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2574 MDS_MAXREPSIZE, MDS_READPAGE_PORTAL,
2575 MDC_REPLY_PORTAL, MDS_SERVICE_WATCHDOG_TIMEOUT,
2576 mds_handle, "mds_readpage",
2577 obd->obd_proc_entry, NULL,
2578 MDS_THREADS_MIN_READPAGE, mds_max_threads,
2580 if (!mds->mds_readpage_service) {
2581 CERROR("failed to start readpage service\n");
2582 GOTO(err_thread2, rc = -ENOMEM);
2585 rc = ptlrpc_start_threads(obd, mds->mds_readpage_service);
2588 GOTO(err_thread3, rc);
2590 ping_evictor_start();
2595 ptlrpc_unregister_service(mds->mds_readpage_service);
2596 mds->mds_readpage_service = NULL;
2598 ptlrpc_unregister_service(mds->mds_setattr_service);
2599 mds->mds_setattr_service = NULL;
2601 ptlrpc_unregister_service(mds->mds_service);
2602 mds->mds_service = NULL;
2604 lprocfs_obd_cleanup(obd);
2608 static int mdt_cleanup(struct obd_device *obd)
2610 struct mds_obd *mds = &obd->u.mds;
2613 ping_evictor_stop();
2615 down(&mds->mds_health_sem);
2616 ptlrpc_unregister_service(mds->mds_readpage_service);
2617 ptlrpc_unregister_service(mds->mds_setattr_service);
2618 ptlrpc_unregister_service(mds->mds_service);
2619 mds->mds_readpage_service = NULL;
2620 mds->mds_setattr_service = NULL;
2621 mds->mds_service = NULL;
2622 up(&mds->mds_health_sem);
2624 lprocfs_obd_cleanup(obd);
2629 static int mdt_health_check(struct obd_device *obd)
2631 struct mds_obd *mds = &obd->u.mds;
2634 down(&mds->mds_health_sem);
2635 rc |= ptlrpc_service_health_check(mds->mds_readpage_service);
2636 rc |= ptlrpc_service_health_check(mds->mds_setattr_service);
2637 rc |= ptlrpc_service_health_check(mds->mds_service);
2638 up(&mds->mds_health_sem);
2641 * health_check to return 0 on healthy
2642 * and 1 on unhealthy.
2650 static struct dentry *mds_lvfs_fid2dentry(__u64 id, __u32 gen, __u64 gr,
2653 struct obd_device *obd = data;
2656 fid.generation = gen;
2657 return mds_fid2dentry(&obd->u.mds, &fid, NULL);
2660 static int mds_health_check(struct obd_device *obd)
2662 struct obd_device_target *odt = &obd->u.obt;
2663 #ifdef USE_HEALTH_CHECK_WRITE
2664 struct mds_obd *mds = &obd->u.mds;
2668 if (odt->obt_sb->s_flags & MS_RDONLY)
2671 #ifdef USE_HEALTH_CHECK_WRITE
2672 LASSERT(mds->mds_health_check_filp != NULL);
2673 rc |= !!lvfs_check_io_health(obd, mds->mds_health_check_filp);
2679 static int mds_process_config(struct obd_device *obd, obd_count len, void *buf)
2681 struct lustre_cfg *lcfg = buf;
2682 struct lprocfs_static_vars lvars;
2685 lprocfs_init_vars(mds, &lvars);
2687 rc = class_process_proc_param(PARAM_MDT, lvars.obd_vars, lcfg, obd);
2692 struct lvfs_callback_ops mds_lvfs_ops = {
2693 l_fid2dentry: mds_lvfs_fid2dentry,
2696 /* use obd ops to offer management infrastructure */
2697 static struct obd_ops mds_obd_ops = {
2698 .o_owner = THIS_MODULE,
2699 .o_connect = mds_connect,
2700 .o_reconnect = mds_reconnect,
2701 .o_init_export = mds_init_export,
2702 .o_destroy_export = mds_destroy_export,
2703 .o_disconnect = mds_disconnect,
2704 .o_setup = mds_setup,
2705 .o_precleanup = mds_precleanup,
2706 .o_cleanup = mds_cleanup,
2707 .o_postrecov = mds_postrecov,
2708 .o_statfs = mds_obd_statfs,
2709 .o_iocontrol = mds_iocontrol,
2710 .o_create = mds_obd_create,
2711 .o_destroy = mds_obd_destroy,
2712 .o_llog_init = mds_llog_init,
2713 .o_llog_finish = mds_llog_finish,
2714 .o_notify = mds_notify,
2715 .o_health_check = mds_health_check,
2716 .o_process_config = mds_process_config,
2719 static struct obd_ops mdt_obd_ops = {
2720 .o_owner = THIS_MODULE,
2721 .o_setup = mdt_setup,
2722 .o_cleanup = mdt_cleanup,
2723 .o_health_check = mdt_health_check,
2726 quota_interface_t *mds_quota_interface_ref;
2727 extern quota_interface_t mds_quota_interface;
2729 static int __init mds_init(void)
2732 struct lprocfs_static_vars lvars;
2734 request_module("lquota");
2735 mds_quota_interface_ref = PORTAL_SYMBOL_GET(mds_quota_interface);
2736 rc = lquota_init(mds_quota_interface_ref);
2738 if (mds_quota_interface_ref)
2739 PORTAL_SYMBOL_PUT(mds_quota_interface);
2742 init_obd_quota_ops(mds_quota_interface_ref, &mds_obd_ops);
2744 lprocfs_init_vars(mds, &lvars);
2745 class_register_type(&mds_obd_ops, lvars.module_vars, LUSTRE_MDS_NAME);
2746 lprocfs_init_vars(mdt, &lvars);
2747 class_register_type(&mdt_obd_ops, lvars.module_vars, LUSTRE_MDT_NAME);
2752 static void /*__exit*/ mds_exit(void)
2754 lquota_exit(mds_quota_interface_ref);
2755 if (mds_quota_interface_ref)
2756 PORTAL_SYMBOL_PUT(mds_quota_interface);
2758 class_unregister_type(LUSTRE_MDS_NAME);
2759 class_unregister_type(LUSTRE_MDT_NAME);
2762 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2763 MODULE_DESCRIPTION("Lustre Metadata Server (MDS)");
2764 MODULE_LICENSE("GPL");
2766 module_init(mds_init);
2767 module_exit(mds_exit);