1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Lustre Metadata Server (mds) request handler
7 * Copyright (c) 2001-2005 Cluster File Systems, Inc.
8 * Author: Peter Braam <braam@clusterfs.com>
9 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * Author: Phil Schwan <phil@clusterfs.com>
11 * Author: Mike Shaver <shaver@clusterfs.com>
13 * This file is part of Lustre, http://www.lustre.org.
15 * Lustre is free software; you can redistribute it and/or
16 * modify it under the terms of version 2 of the GNU General Public
17 * License as published by the Free Software Foundation.
19 * Lustre is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with Lustre; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 # define EXPORT_SYMTAB
32 #define DEBUG_SUBSYSTEM S_MDS
34 #include <linux/module.h>
35 #include <linux/lustre_mds.h>
36 #include <linux/lustre_dlm.h>
37 #include <linux/init.h>
38 #include <linux/obd_class.h>
39 #include <linux/random.h>
41 #include <linux/jbd.h>
42 #include <linux/ext3_fs.h>
43 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
44 # include <linux/smp_lock.h>
45 # include <linux/buffer_head.h>
46 # include <linux/workqueue.h>
47 # include <linux/mount.h>
49 # include <linux/locks.h>
51 #include <linux/obd_lov.h>
52 #include <linux/lustre_mds.h>
53 #include <linux/lustre_fsfilt.h>
54 #include <linux/lprocfs_status.h>
55 #include <linux/lustre_commit_confd.h>
56 #include <linux/lustre_quota.h>
57 #include <linux/lustre_disk.h>
59 #include "mds_internal.h"
61 static int mds_intent_policy(struct ldlm_namespace *ns,
62 struct ldlm_lock **lockp, void *req_cookie,
63 ldlm_mode_t mode, int flags, void *data);
64 static int mds_postsetup(struct obd_device *obd);
65 static int mds_cleanup(struct obd_device *obd);
67 /* Assumes caller has already pushed into the kernel filesystem context */
68 static int mds_sendpage(struct ptlrpc_request *req, struct file *file,
69 loff_t offset, int count)
71 struct ptlrpc_bulk_desc *desc;
72 struct l_wait_info lwi;
74 int rc = 0, npages, i, tmpcount, tmpsize = 0;
77 LASSERT((offset & (PAGE_SIZE - 1)) == 0); /* I'm dubious about this */
79 npages = (count + PAGE_SIZE - 1) >> PAGE_SHIFT;
80 OBD_ALLOC(pages, sizeof(*pages) * npages);
82 GOTO(out, rc = -ENOMEM);
84 desc = ptlrpc_prep_bulk_exp(req, npages, BULK_PUT_SOURCE,
87 GOTO(out_free, rc = -ENOMEM);
89 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
90 tmpsize = tmpcount > PAGE_SIZE ? PAGE_SIZE : tmpcount;
92 pages[i] = alloc_pages(GFP_KERNEL, 0);
94 GOTO(cleanup_buf, rc = -ENOMEM);
96 ptlrpc_prep_bulk_page(desc, pages[i], 0, tmpsize);
99 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
100 tmpsize = tmpcount > PAGE_SIZE ? PAGE_SIZE : tmpcount;
101 CDEBUG(D_EXT2, "reading %u@%llu from dir %lu (size %llu)\n",
102 tmpsize, offset, file->f_dentry->d_inode->i_ino,
103 file->f_dentry->d_inode->i_size);
105 rc = fsfilt_readpage(req->rq_export->exp_obd, file,
106 kmap(pages[i]), tmpsize, &offset);
110 GOTO(cleanup_buf, rc = -EIO);
113 LASSERT(desc->bd_nob == count);
115 rc = ptlrpc_start_bulk_transfer(desc);
117 GOTO(cleanup_buf, rc);
119 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) {
120 CERROR("obd_fail_loc=%x, fail operation rc=%d\n",
121 OBD_FAIL_MDS_SENDPAGE, rc);
122 GOTO(abort_bulk, rc);
125 lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
126 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc), &lwi);
127 LASSERT (rc == 0 || rc == -ETIMEDOUT);
130 if (desc->bd_success &&
131 desc->bd_nob_transferred == count)
132 GOTO(cleanup_buf, rc);
134 rc = -ETIMEDOUT; /* XXX should this be a different errno? */
137 DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s\n",
138 (rc == -ETIMEDOUT) ? "timeout" : "network error",
139 desc->bd_nob_transferred, count,
140 req->rq_export->exp_client_uuid.uuid,
141 req->rq_export->exp_connection->c_remote_uuid.uuid);
143 class_fail_export(req->rq_export);
147 ptlrpc_abort_bulk (desc);
149 for (i = 0; i < npages; i++)
151 __free_pages(pages[i], 0);
153 ptlrpc_free_bulk(desc);
155 OBD_FREE(pages, sizeof(*pages) * npages);
160 /* only valid locked dentries or errors should be returned */
161 struct dentry *mds_fid2locked_dentry(struct obd_device *obd, struct ll_fid *fid,
162 struct vfsmount **mnt, int lock_mode,
163 struct lustre_handle *lockh,
164 char *name, int namelen)
166 struct mds_obd *mds = &obd->u.mds;
167 struct dentry *de = mds_fid2dentry(mds, fid, mnt), *retval = de;
168 struct ldlm_res_id res_id = { .name = {0} };
175 res_id.name[0] = de->d_inode->i_ino;
176 res_id.name[1] = de->d_inode->i_generation;
177 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace, res_id,
178 LDLM_PLAIN, NULL, lock_mode, &flags,
179 mds_blocking_ast, ldlm_completion_ast, NULL, NULL,
180 NULL, 0, NULL, lockh);
181 if (rc != ELDLM_OK) {
183 retval = ERR_PTR(-EIO); /* XXX translate ldlm code */
189 /* Look up an entry by inode number. */
190 /* this function ONLY returns valid dget'd dentries with an initialized inode
192 struct dentry *mds_fid2dentry(struct mds_obd *mds, struct ll_fid *fid,
193 struct vfsmount **mnt)
196 unsigned long ino = fid->id;
197 __u32 generation = fid->generation;
199 struct dentry *result;
202 RETURN(ERR_PTR(-ESTALE));
204 snprintf(fid_name, sizeof(fid_name), "0x%lx", ino);
206 CDEBUG(D_DENTRY, "--> mds_fid2dentry: ino/gen %lu/%u, sb %p\n",
207 ino, generation, mds->mds_sb);
209 /* under ext3 this is neither supposed to return bad inodes
211 result = ll_lookup_one_len(fid_name, mds->mds_fid_de, strlen(fid_name));
215 inode = result->d_inode;
217 RETURN(ERR_PTR(-ENOENT));
219 if (generation && inode->i_generation != generation) {
220 /* we didn't find the right inode.. */
221 CERROR("bad inode %lu, link: %lu ct: %d or generation %u/%u\n",
222 inode->i_ino, (unsigned long)inode->i_nlink,
223 atomic_read(&inode->i_count), inode->i_generation,
226 RETURN(ERR_PTR(-ENOENT));
230 *mnt = mds->mds_vfsmnt;
238 /* Establish a connection to the MDS.
240 * This will set up an export structure for the client to hold state data
241 * about that client, like open files, the last operation number it did
242 * on the server, etc.
244 static int mds_connect(struct lustre_handle *conn, struct obd_device *obd,
245 struct obd_uuid *cluuid, struct obd_connect_data *data)
247 struct obd_export *exp;
248 struct mds_export_data *med;
249 struct mds_client_data *mcd;
250 int rc, abort_recovery;
253 if (!conn || !obd || !cluuid)
256 /* Check for aborted recovery. */
257 spin_lock_bh(&obd->obd_processing_task_lock);
258 abort_recovery = obd->obd_abort_recovery;
259 spin_unlock_bh(&obd->obd_processing_task_lock);
261 target_abort_recovery(obd);
263 /* XXX There is a small race between checking the list and adding a
264 * new connection for the same UUID, but the real threat (list
265 * corruption when multiple different clients connect) is solved.
267 * There is a second race between adding the export to the list,
268 * and filling in the client data below. Hence skipping the case
269 * of NULL mcd above. We should already be controlling multiple
270 * connects at the client, and we can't hold the spinlock over
271 * memory allocations without risk of deadlocking.
273 rc = class_connect(conn, obd, cluuid);
276 exp = class_conn2export(conn);
278 med = &exp->exp_mds_data;
281 data->ocd_connect_flags &= MDS_CONNECT_SUPPORTED;
282 exp->exp_connect_flags = data->ocd_connect_flags;
285 OBD_ALLOC(mcd, sizeof(*mcd));
287 CERROR("mds: out of memory for client data\n");
288 GOTO(out, rc = -ENOMEM);
291 memcpy(mcd->mcd_uuid, cluuid, sizeof(mcd->mcd_uuid));
294 rc = mds_client_add(obd, &obd->u.mds, med, -1);
300 OBD_FREE(mcd, sizeof(*mcd));
303 class_disconnect(exp);
305 class_export_put(exp);
311 static int mds_init_export(struct obd_export *exp)
313 struct mds_export_data *med = &exp->exp_mds_data;
315 INIT_LIST_HEAD(&med->med_open_head);
316 spin_lock_init(&med->med_open_lock);
320 static int mds_destroy_export(struct obd_export *export)
322 struct mds_export_data *med;
323 struct obd_device *obd = export->exp_obd;
324 struct lvfs_run_ctxt saved;
328 med = &export->exp_mds_data;
329 target_destroy_export(export);
331 if (obd_uuid_equals(&export->exp_client_uuid, &obd->obd_uuid))
334 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
335 /* Close any open files (which may also cause orphan unlinking). */
336 spin_lock(&med->med_open_lock);
337 while (!list_empty(&med->med_open_head)) {
338 struct list_head *tmp = med->med_open_head.next;
339 struct mds_file_data *mfd =
340 list_entry(tmp, struct mds_file_data, mfd_list);
341 struct dentry *dentry = mfd->mfd_dentry;
343 /* Remove mfd handle so it can't be found again.
344 * We are consuming the mfd_list reference here. */
345 mds_mfd_unlink(mfd, 0);
346 spin_unlock(&med->med_open_lock);
348 /* If you change this message, be sure to update
349 * replay_single:test_46 */
350 CDEBUG(D_INODE|D_IOCTL, "%s: force closing file handle for "
351 "%.*s (ino %lu)\n", obd->obd_name, dentry->d_name.len,
352 dentry->d_name.name, dentry->d_inode->i_ino);
353 /* child orphan sem protects orphan_dec_test and
354 * is_orphan race, mds_mfd_close drops it */
355 MDS_DOWN_WRITE_ORPHAN_SEM(dentry->d_inode);
356 rc = mds_mfd_close(NULL, obd, mfd,
357 !(export->exp_flags & OBD_OPT_FAILOVER));
360 CDEBUG(D_INODE|D_IOCTL, "Error closing file: %d\n", rc);
361 spin_lock(&med->med_open_lock);
363 spin_unlock(&med->med_open_lock);
364 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
366 mds_client_free(export);
371 static int mds_disconnect(struct obd_export *exp)
373 unsigned long irqflags;
378 class_export_get(exp);
380 /* Disconnect early so that clients can't keep using export */
381 rc = class_disconnect(exp);
382 ldlm_cancel_locks_for_export(exp);
384 /* complete all outstanding replies */
385 spin_lock_irqsave(&exp->exp_lock, irqflags);
386 while (!list_empty(&exp->exp_outstanding_replies)) {
387 struct ptlrpc_reply_state *rs =
388 list_entry(exp->exp_outstanding_replies.next,
389 struct ptlrpc_reply_state, rs_exp_list);
390 struct ptlrpc_service *svc = rs->rs_service;
392 spin_lock(&svc->srv_lock);
393 list_del_init(&rs->rs_exp_list);
394 ptlrpc_schedule_difficult_reply(rs);
395 spin_unlock(&svc->srv_lock);
397 spin_unlock_irqrestore(&exp->exp_lock, irqflags);
399 class_export_put(exp);
403 static int mds_getstatus(struct ptlrpc_request *req)
405 struct mds_obd *mds = mds_req2mds(req);
406 struct mds_body *body;
407 int rc, size = sizeof(*body);
410 rc = lustre_pack_reply(req, 1, &size, NULL);
411 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK)) {
412 CERROR("mds: out of memory for message: size=%d\n", size);
413 req->rq_status = -ENOMEM; /* superfluous? */
417 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*body));
418 memcpy(&body->fid1, &mds->mds_rootfid, sizeof(body->fid1));
420 /* the last_committed and last_xid fields are filled in for all
421 * replies already - no need to do so here also.
426 int mds_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
427 void *data, int flag)
432 if (flag == LDLM_CB_CANCELING) {
433 /* Don't need to do anything here. */
437 /* XXX layering violation! -phil */
438 l_lock(&lock->l_resource->lr_namespace->ns_lock);
439 /* Get this: if mds_blocking_ast is racing with mds_intent_policy,
440 * such that mds_blocking_ast is called just before l_i_p takes the
441 * ns_lock, then by the time we get the lock, we might not be the
442 * correct blocking function anymore. So check, and return early, if
444 if (lock->l_blocking_ast != mds_blocking_ast) {
445 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
449 lock->l_flags |= LDLM_FL_CBPENDING;
450 do_ast = (!lock->l_readers && !lock->l_writers);
451 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
454 struct lustre_handle lockh;
457 LDLM_DEBUG(lock, "already unused, calling ldlm_cli_cancel");
458 ldlm_lock2handle(lock, &lockh);
459 rc = ldlm_cli_cancel(&lockh);
461 CERROR("ldlm_cli_cancel: %d\n", rc);
463 LDLM_DEBUG(lock, "Lock still has references, will be "
469 int mds_get_md(struct obd_device *obd, struct inode *inode, void *md,
477 rc = fsfilt_get_md(obd, inode, md, *size);
480 CERROR("Error %d reading eadata for ino %lu\n",
484 rc = mds_convert_lov_ea(obd, inode, md, lmm_size);
500 /* Call with lock=1 if you want mds_pack_md to take the i_sem.
501 * Call with lock=0 if the caller has already taken the i_sem. */
502 int mds_pack_md(struct obd_device *obd, struct lustre_msg *msg, int offset,
503 struct mds_body *body, struct inode *inode, int lock)
505 struct mds_obd *mds = &obd->u.mds;
511 lmm = lustre_msg_buf(msg, offset, 0);
513 /* Some problem with getting eadata when I sized the reply
515 CDEBUG(D_INFO, "no space reserved for inode %lu MD\n",
519 lmm_size = msg->buflens[offset];
521 /* I don't really like this, but it is a sanity check on the client
522 * MD request. However, if the client doesn't know how much space
523 * to reserve for the MD, it shouldn't be bad to have too much space.
525 if (lmm_size > mds->mds_max_mdsize) {
526 CWARN("Reading MD for inode %lu of %d bytes > max %d\n",
527 inode->i_ino, lmm_size, mds->mds_max_mdsize);
531 rc = mds_get_md(obd, inode, lmm, &lmm_size, lock);
533 if (S_ISDIR(inode->i_mode))
534 body->valid |= OBD_MD_FLDIREA;
536 body->valid |= OBD_MD_FLEASIZE;
537 body->eadatasize = lmm_size;
544 static int mds_getattr_internal(struct obd_device *obd, struct dentry *dentry,
545 struct ptlrpc_request *req,
546 struct mds_body *reqbody, int reply_off)
548 struct mds_body *body;
549 struct inode *inode = dentry->d_inode;
556 body = lustre_msg_buf(req->rq_repmsg, reply_off, sizeof(*body));
557 LASSERT(body != NULL); /* caller prepped reply */
559 mds_pack_inode2fid(&body->fid1, inode);
560 mds_pack_inode2body(body, inode);
562 if ((S_ISREG(inode->i_mode) && (reqbody->valid & OBD_MD_FLEASIZE)) ||
563 (S_ISDIR(inode->i_mode) && (reqbody->valid & OBD_MD_FLDIREA))) {
564 rc = mds_pack_md(obd, req->rq_repmsg, reply_off + 1, body,
567 /* If we have LOV EA data, the OST holds size, atime, mtime */
568 if (!(body->valid & OBD_MD_FLEASIZE) &&
569 !(body->valid & OBD_MD_FLDIREA))
570 body->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
571 OBD_MD_FLATIME | OBD_MD_FLMTIME);
572 } else if (S_ISLNK(inode->i_mode) &&
573 (reqbody->valid & OBD_MD_LINKNAME) != 0) {
574 char *symname = lustre_msg_buf(req->rq_repmsg, reply_off + 1,0);
577 LASSERT (symname != NULL); /* caller prepped reply */
578 len = req->rq_repmsg->buflens[reply_off + 1];
580 rc = inode->i_op->readlink(dentry, symname, len);
582 CERROR("readlink failed: %d\n", rc);
583 } else if (rc != len - 1) {
584 CERROR ("Unexpected readlink rc %d: expecting %d\n",
588 CDEBUG(D_INODE, "read symlink dest %s\n", symname);
589 body->valid |= OBD_MD_LINKNAME;
590 body->eadatasize = rc + 1;
591 symname[rc] = 0; /* NULL terminate */
599 static int mds_getattr_pack_msg(struct ptlrpc_request *req, struct inode *inode,
602 struct mds_obd *mds = mds_req2mds(req);
603 struct mds_body *body;
604 int rc = 0, size[2] = {sizeof(*body)}, bufcount = 1;
607 body = lustre_msg_buf(req->rq_reqmsg, offset, sizeof (*body));
608 LASSERT(body != NULL); /* checked by caller */
609 LASSERT_REQSWABBED(req, offset); /* swabbed by caller */
611 if ((S_ISREG(inode->i_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
612 (S_ISDIR(inode->i_mode) && (body->valid & OBD_MD_FLDIREA))) {
615 ret = fsfilt_get_md(req->rq_export->exp_obd, inode, NULL, 0);
617 CDEBUG(D_INODE, "got %d bytes MD data for inode %lu\n",
620 if (ret != -ENODATA) {
621 CERROR("error getting inode %lu MD: rc = %d\n",
623 /* should we return ret in req->rq_status? */
626 } else if (ret > mds->mds_max_mdsize) {
628 CERROR("MD size %d larger than maximum possible %u\n",
629 ret, mds->mds_max_mdsize);
631 size[bufcount] = ret;
634 } else if (S_ISLNK(inode->i_mode) && (body->valid & OBD_MD_LINKNAME)) {
635 if (inode->i_size + 1 != body->eadatasize)
636 CERROR("symlink size: %Lu, reply space: %d\n",
637 inode->i_size + 1, body->eadatasize);
638 size[bufcount] = min_t(int, inode->i_size+1, body->eadatasize);
640 CDEBUG(D_INODE, "symlink size: %Lu, reply space: %d\n",
641 inode->i_size + 1, body->eadatasize);
644 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) {
645 CERROR("failed MDS_GETATTR_PACK test\n");
646 req->rq_status = -ENOMEM;
647 GOTO(out, rc = -ENOMEM);
650 rc = lustre_pack_reply(req, bufcount, size, NULL);
652 CERROR("lustre_pack_reply failed: rc %d\n", rc);
653 GOTO(out, req->rq_status = rc);
661 static int mds_getattr_name(int offset, struct ptlrpc_request *req,
662 struct lustre_handle *child_lockh)
664 struct obd_device *obd = req->rq_export->exp_obd;
665 struct ldlm_reply *rep = NULL;
666 struct lvfs_run_ctxt saved;
667 struct mds_body *body;
668 struct dentry *dparent = NULL, *dchild = NULL;
669 struct lvfs_ucred uc;
670 struct lustre_handle parent_lockh;
672 int rc = 0, cleanup_phase = 0, resent_req = 0;
676 LASSERT(!strcmp(obd->obd_type->typ_name, "mds"));
678 /* Swab now, before anyone looks inside the request */
680 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
681 lustre_swab_mds_body);
683 CERROR("Can't swab mds_body\n");
684 GOTO(cleanup, rc = -EFAULT);
687 LASSERT_REQSWAB(req, offset + 1);
688 name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
690 CERROR("Can't unpack name\n");
691 GOTO(cleanup, rc = -EFAULT);
693 namesize = req->rq_reqmsg->buflens[offset + 1];
695 LASSERT (offset == 0 || offset == 2);
696 /* if requests were at offset 2, the getattr reply goes back at 1 */
698 rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*rep));
703 uc.luc_fsuid = req->rq_uid;
705 uc.luc_fsuid = body->fsuid;
707 uc.luc_fsgid = body->fsgid;
708 uc.luc_cap = body->capability;
709 uc.luc_suppgid1 = body->suppgid;
710 uc.luc_suppgid2 = -1;
711 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
712 cleanup_phase = 1; /* kernel context */
713 intent_set_disposition(rep, DISP_LOOKUP_EXECD);
715 /* FIXME: handle raw lookup */
717 if (body->valid == OBD_MD_FLID) {
718 struct mds_body *mds_reply;
719 int size = sizeof(*mds_reply);
721 // The user requested ONLY the inode number, so do a raw lookup
722 rc = lustre_pack_reply(req, 1, &size, NULL);
724 CERROR("out of memory\n");
728 rc = dir->i_op->lookup_raw(dir, name, namesize - 1, &inum);
730 mds_reply = lustre_msg_buf(req->rq_repmsg, offset,
732 mds_reply->fid1.id = inum;
733 mds_reply->valid = OBD_MD_FLID;
738 if (child_lockh->cookie != 0) {
739 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT);
743 if (resent_req == 0) {
744 rc = mds_get_parent_child_locked(obd, &obd->u.mds, &body->fid1,
745 &parent_lockh, &dparent,
746 LCK_PR, name, namesize,
747 child_lockh, &dchild, LCK_PR);
751 struct ldlm_lock *granted_lock;
752 struct ll_fid child_fid;
753 struct ldlm_resource *res;
754 DEBUG_REQ(D_DLMTRACE, req, "resent, not enqueuing new locks");
755 granted_lock = ldlm_handle2lock(child_lockh);
756 LASSERTF(granted_lock != NULL, LPU64"/%u lockh "LPX64"\n",
757 body->fid1.id, body->fid1.generation,
758 child_lockh->cookie);
761 res = granted_lock->l_resource;
762 child_fid.id = res->lr_name.name[0];
763 child_fid.generation = res->lr_name.name[1];
764 dchild = mds_fid2dentry(&obd->u.mds, &child_fid, NULL);
765 LASSERT(!IS_ERR(dchild));
766 LDLM_LOCK_PUT(granted_lock);
769 cleanup_phase = 2; /* dchild, dparent, locks */
771 if (dchild->d_inode == NULL) {
772 intent_set_disposition(rep, DISP_LOOKUP_NEG);
773 /* in the intent case, the policy clears this error:
774 the disposition is enough */
775 GOTO(cleanup, rc = -ENOENT);
777 intent_set_disposition(rep, DISP_LOOKUP_POS);
780 if (req->rq_repmsg == NULL) {
781 rc = mds_getattr_pack_msg(req, dchild->d_inode, offset);
783 CERROR ("mds_getattr_pack_msg: %d\n", rc);
788 rc = mds_getattr_internal(obd, dchild, req, body, offset);
789 GOTO(cleanup, rc); /* returns the lock to the client */
792 switch (cleanup_phase) {
794 if (resent_req == 0) {
795 if (rc && dchild->d_inode)
796 ldlm_lock_decref(child_lockh, LCK_PR);
797 ldlm_lock_decref(&parent_lockh, LCK_PR);
802 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
808 static int mds_getattr(int offset, struct ptlrpc_request *req)
810 struct mds_obd *mds = mds_req2mds(req);
811 struct obd_device *obd = req->rq_export->exp_obd;
812 struct lvfs_run_ctxt saved;
814 struct mds_body *body;
815 struct lvfs_ucred uc;
819 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
820 lustre_swab_mds_body);
822 CERROR("Can't unpack body\n");
827 uc.luc_fsuid = req->rq_uid;
829 uc.luc_fsuid = body->fsuid;
831 uc.luc_fsgid = body->fsgid;
832 uc.luc_cap = body->capability;
833 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
834 de = mds_fid2dentry(mds, &body->fid1, NULL);
836 rc = req->rq_status = PTR_ERR(de);
840 rc = mds_getattr_pack_msg(req, de->d_inode, offset);
842 CERROR("mds_getattr_pack_msg: %d\n", rc);
846 req->rq_status = mds_getattr_internal(obd, de, req, body, 0);
851 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
856 static int mds_obd_statfs(struct obd_device *obd, struct obd_statfs *osfs,
857 unsigned long max_age)
861 spin_lock(&obd->obd_osfs_lock);
862 rc = fsfilt_statfs(obd, obd->u.mds.mds_sb, max_age);
864 memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
865 spin_unlock(&obd->obd_osfs_lock);
870 static int mds_statfs(struct ptlrpc_request *req)
872 struct obd_device *obd = req->rq_export->exp_obd;
873 int rc, size = sizeof(struct obd_statfs);
876 /* This will trigger a watchdog timeout */
877 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
878 (MDS_SERVICE_WATCHDOG_TIMEOUT / 1000) + 1);
880 rc = lustre_pack_reply(req, 1, &size, NULL);
881 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
882 CERROR("mds: statfs lustre_pack_reply failed: rc = %d\n", rc);
886 /* We call this so that we can cache a bit - 1 jiffie worth */
887 rc = mds_obd_statfs(obd, lustre_msg_buf(req->rq_repmsg, 0, size),
890 CERROR("mds_obd_statfs failed: rc %d\n", rc);
900 static int mds_sync(struct ptlrpc_request *req)
902 struct obd_device *obd = req->rq_export->exp_obd;
903 struct mds_obd *mds = &obd->u.mds;
904 struct mds_body *body;
905 int rc, size = sizeof(*body);
908 body = lustre_swab_reqbuf(req, 0, sizeof(*body), lustre_swab_mds_body);
910 GOTO(out, rc = -EFAULT);
912 rc = lustre_pack_reply(req, 1, &size, NULL);
913 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK)) {
914 CERROR("fsync lustre_pack_reply failed: rc = %d\n", rc);
918 if (body->fid1.id == 0) {
919 /* a fid of zero is taken to mean "sync whole filesystem" */
920 rc = fsfilt_sync(obd, mds->mds_sb);
925 de = mds_fid2dentry(mds, &body->fid1, NULL);
927 GOTO(out, rc = PTR_ERR(de));
929 /* The file parameter isn't used for anything */
930 if (de->d_inode->i_fop && de->d_inode->i_fop->fsync)
931 rc = de->d_inode->i_fop->fsync(NULL, de, 1);
933 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*body));
934 mds_pack_inode2fid(&body->fid1, de->d_inode);
935 mds_pack_inode2body(body, de->d_inode);
946 /* mds_readpage does not take a DLM lock on the inode, because the client must
947 * already have a PR lock.
949 * If we were to take another one here, a deadlock will result, if another
950 * thread is already waiting for a PW lock. */
951 static int mds_readpage(struct ptlrpc_request *req)
953 struct obd_device *obd = req->rq_export->exp_obd;
954 struct vfsmount *mnt;
957 struct mds_body *body, *repbody;
958 struct lvfs_run_ctxt saved;
959 int rc, size = sizeof(*repbody);
960 struct lvfs_ucred uc;
963 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK))
966 rc = lustre_pack_reply(req, 1, &size, NULL);
968 CERROR("mds: out of memory while packing readpage reply\n");
972 body = lustre_swab_reqbuf(req, 0, sizeof(*body), lustre_swab_mds_body);
974 GOTO (out, rc = -EFAULT);
977 uc.luc_fsuid = req->rq_uid;
979 uc.luc_fsuid = body->fsuid;
981 uc.luc_fsgid = body->fsgid;
982 uc.luc_cap = body->capability;
983 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
984 de = mds_fid2dentry(&obd->u.mds, &body->fid1, &mnt);
986 GOTO(out_pop, rc = PTR_ERR(de));
988 CDEBUG(D_INODE, "ino %lu\n", de->d_inode->i_ino);
990 file = dentry_open(de, mnt, O_RDONLY | O_LARGEFILE);
991 /* note: in case of an error, dentry_open puts dentry */
993 GOTO(out_pop, rc = PTR_ERR(file));
995 /* body->size is actually the offset -eeb */
996 if ((body->size & (de->d_inode->i_blksize - 1)) != 0) {
997 CERROR("offset "LPU64" not on a block boundary of %lu\n",
998 body->size, de->d_inode->i_blksize);
999 GOTO(out_file, rc = -EFAULT);
1002 /* body->nlink is actually the #bytes to read -eeb */
1003 if (body->nlink & (de->d_inode->i_blksize - 1)) {
1004 CERROR("size %u is not multiple of blocksize %lu\n",
1005 body->nlink, de->d_inode->i_blksize);
1006 GOTO(out_file, rc = -EFAULT);
1009 repbody = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*repbody));
1010 repbody->size = file->f_dentry->d_inode->i_size;
1011 repbody->valid = OBD_MD_FLSIZE;
1013 /* to make this asynchronous make sure that the handling function
1014 doesn't send a reply when this function completes. Instead a
1015 callback function would send the reply */
1016 /* body->size is actually the offset -eeb */
1017 rc = mds_sendpage(req, file, body->size, body->nlink);
1020 filp_close(file, 0);
1022 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1024 req->rq_status = rc;
1028 int mds_reint(struct ptlrpc_request *req, int offset,
1029 struct lustre_handle *lockh)
1031 struct mds_update_record *rec; /* 116 bytes on the stack? no sir! */
1034 OBD_ALLOC(rec, sizeof(*rec));
1038 rc = mds_update_unpack(req, offset, rec);
1039 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK)) {
1040 CERROR("invalid record\n");
1041 GOTO(out, req->rq_status = -EINVAL);
1043 /* rc will be used to interrupt a for loop over multiple records */
1044 rc = mds_reint_rec(rec, offset, req, lockh);
1046 OBD_FREE(rec, sizeof(*rec));
1050 static int mds_filter_recovery_request(struct ptlrpc_request *req,
1051 struct obd_device *obd, int *process)
1053 switch (req->rq_reqmsg->opc) {
1054 case MDS_CONNECT: /* This will never get here, but for completeness. */
1055 case OST_CONNECT: /* This will never get here, but for completeness. */
1056 case MDS_DISCONNECT:
1057 case OST_DISCONNECT:
1062 case MDS_SYNC: /* used in unmounting */
1066 *process = target_queue_recovery_request(req, obd);
1070 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
1072 /* XXX what should we set rq_status to here? */
1073 req->rq_status = -EAGAIN;
1074 RETURN(ptlrpc_error(req));
1078 static char *reint_names[] = {
1079 [REINT_SETATTR] "setattr",
1080 [REINT_CREATE] "create",
1081 [REINT_LINK] "link",
1082 [REINT_UNLINK] "unlink",
1083 [REINT_RENAME] "rename",
1084 [REINT_OPEN] "open",
1087 static int mds_set_info(struct obd_export *exp, struct ptlrpc_request *req)
1094 key = lustre_msg_buf(req->rq_reqmsg, 0, 1);
1096 DEBUG_REQ(D_HA, req, "no set_info key");
1099 keylen = req->rq_reqmsg->buflens[0];
1101 val = lustre_msg_buf(req->rq_reqmsg, 1, sizeof(*val));
1103 DEBUG_REQ(D_HA, req, "no set_info val");
1107 rc = lustre_pack_reply(req, 0, NULL, NULL);
1110 req->rq_repmsg->status = 0;
1112 if (keylen < strlen("read-only") ||
1113 memcmp(key, "read-only", keylen) != 0)
1117 exp->exp_connect_flags |= OBD_CONNECT_RDONLY;
1119 exp->exp_connect_flags &= ~OBD_CONNECT_RDONLY;
1125 int mds_handle(struct ptlrpc_request *req)
1127 int should_process, fail = OBD_FAIL_MDS_ALL_REPLY_NET;
1129 struct mds_obd *mds = NULL; /* quell gcc overwarning */
1130 struct obd_device *obd = NULL;
1133 OBD_FAIL_RETURN(OBD_FAIL_MDS_ALL_REQUEST_NET | OBD_FAIL_ONCE, 0);
1135 LASSERT(current->journal_info == NULL);
1136 /* XXX identical to OST */
1137 if (req->rq_reqmsg->opc != MDS_CONNECT) {
1138 struct mds_export_data *med;
1139 int recovering, abort_recovery;
1141 if (req->rq_export == NULL) {
1142 CERROR("lustre_mds: operation %d on unconnected MDS\n",
1143 req->rq_reqmsg->opc);
1144 req->rq_status = -ENOTCONN;
1145 GOTO(out, rc = -ENOTCONN);
1148 med = &req->rq_export->exp_mds_data;
1149 obd = req->rq_export->exp_obd;
1152 /* sanity check: if the xid matches, the request must
1153 * be marked as a resent or replayed */
1154 if (req->rq_xid == med->med_mcd->mcd_last_xid)
1155 LASSERTF(lustre_msg_get_flags(req->rq_reqmsg) &
1156 (MSG_RESENT | MSG_REPLAY),
1157 "rq_xid "LPU64" matches last_xid, "
1158 "expected RESENT flag\n",
1160 /* else: note the opposite is not always true; a
1161 * RESENT req after a failover will usually not match
1162 * the last_xid, since it was likely never
1163 * committed. A REPLAYed request will almost never
1164 * match the last xid, however it could for a
1165 * committed, but still retained, open. */
1167 /* Check for aborted recovery. */
1168 spin_lock_bh(&obd->obd_processing_task_lock);
1169 abort_recovery = obd->obd_abort_recovery;
1170 recovering = obd->obd_recovering;
1171 spin_unlock_bh(&obd->obd_processing_task_lock);
1172 if (abort_recovery) {
1173 target_abort_recovery(obd);
1174 } else if (recovering) {
1175 rc = mds_filter_recovery_request(req, obd,
1177 if (rc || !should_process)
1182 switch (req->rq_reqmsg->opc) {
1184 DEBUG_REQ(D_INODE, req, "connect");
1185 OBD_FAIL_RETURN(OBD_FAIL_MDS_CONNECT_NET, 0);
1186 rc = target_handle_connect(req, mds_handle);
1188 /* Now that we have an export, set mds. */
1189 obd = req->rq_export->exp_obd;
1190 mds = mds_req2mds(req);
1194 case MDS_DISCONNECT:
1195 DEBUG_REQ(D_INODE, req, "disconnect");
1196 OBD_FAIL_RETURN(OBD_FAIL_MDS_DISCONNECT_NET, 0);
1197 rc = target_handle_disconnect(req);
1198 req->rq_status = rc; /* superfluous? */
1202 DEBUG_REQ(D_INODE, req, "getstatus");
1203 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_NET, 0);
1204 rc = mds_getstatus(req);
1208 DEBUG_REQ(D_INODE, req, "getattr");
1209 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NET, 0);
1210 rc = mds_getattr(0, req);
1213 case MDS_GETATTR_NAME: {
1214 struct lustre_handle lockh;
1215 DEBUG_REQ(D_INODE, req, "getattr_name");
1216 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NAME_NET, 0);
1218 /* If this request gets a reconstructed reply, we won't be
1219 * acquiring any new locks in mds_getattr_name, so we don't
1223 rc = mds_getattr_name(0, req, &lockh);
1224 /* this non-intent call (from an ioctl) is special */
1225 req->rq_status = rc;
1226 if (rc == 0 && lockh.cookie)
1227 ldlm_lock_decref(&lockh, LCK_PR);
1231 DEBUG_REQ(D_INODE, req, "statfs");
1232 OBD_FAIL_RETURN(OBD_FAIL_MDS_STATFS_NET, 0);
1233 rc = mds_statfs(req);
1237 DEBUG_REQ(D_INODE, req, "readpage");
1238 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_NET, 0);
1239 rc = mds_readpage(req);
1241 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_MDS_SENDPAGE)) {
1248 __u32 *opcp = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*opcp));
1250 int size[3] = {sizeof(struct mds_body), mds->mds_max_mdsize,
1251 mds->mds_max_cookiesize};
1254 /* NB only peek inside req now; mds_reint() will swab it */
1256 CERROR ("Can't inspect opcode\n");
1261 if (lustre_msg_swabbed (req->rq_reqmsg))
1264 DEBUG_REQ(D_INODE, req, "reint %d (%s)", opc,
1265 (opc < sizeof(reint_names) / sizeof(reint_names[0]) ||
1266 reint_names[opc] == NULL) ? reint_names[opc] :
1269 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET, 0);
1271 if (opc == REINT_UNLINK || opc == REINT_RENAME)
1273 else if (opc == REINT_OPEN)
1278 rc = lustre_pack_reply(req, bufcount, size, NULL);
1282 rc = mds_reint(req, 0, NULL);
1283 fail = OBD_FAIL_MDS_REINT_NET_REP;
1288 DEBUG_REQ(D_INODE, req, "close");
1289 OBD_FAIL_RETURN(OBD_FAIL_MDS_CLOSE_NET, 0);
1290 rc = mds_close(req);
1293 case MDS_DONE_WRITING:
1294 DEBUG_REQ(D_INODE, req, "done_writing");
1295 OBD_FAIL_RETURN(OBD_FAIL_MDS_DONE_WRITING_NET, 0);
1296 rc = mds_done_writing(req);
1300 DEBUG_REQ(D_INODE, req, "pin");
1301 OBD_FAIL_RETURN(OBD_FAIL_MDS_PIN_NET, 0);
1306 DEBUG_REQ(D_INODE, req, "sync");
1307 OBD_FAIL_RETURN(OBD_FAIL_MDS_SYNC_NET, 0);
1312 DEBUG_REQ(D_INODE, req, "set_info");
1313 rc = mds_set_info(req->rq_export, req);
1316 case MDS_QUOTACHECK:
1317 DEBUG_REQ(D_INODE, req, "quotacheck");
1318 OBD_FAIL_RETURN(OBD_FAIL_MDS_QUOTACHECK_NET, 0);
1319 rc = mds_quotacheck(req);
1323 DEBUG_REQ(D_INODE, req, "quotactl");
1324 OBD_FAIL_RETURN(OBD_FAIL_MDS_QUOTACTL_NET, 0);
1325 rc = mds_quotactl(req);
1329 DEBUG_REQ(D_INODE, req, "ping");
1330 rc = target_handle_ping(req);
1333 case OBD_LOG_CANCEL:
1334 CDEBUG(D_INODE, "log cancel\n");
1335 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1336 rc = -ENOTSUPP; /* la la la */
1340 DEBUG_REQ(D_INODE, req, "enqueue");
1341 OBD_FAIL_RETURN(OBD_FAIL_LDLM_ENQUEUE, 0);
1342 rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast,
1343 ldlm_server_blocking_ast, NULL);
1344 fail = OBD_FAIL_LDLM_REPLY;
1347 DEBUG_REQ(D_INODE, req, "convert");
1348 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CONVERT, 0);
1349 rc = ldlm_handle_convert(req);
1351 case LDLM_BL_CALLBACK:
1352 case LDLM_CP_CALLBACK:
1353 DEBUG_REQ(D_INODE, req, "callback");
1354 CERROR("callbacks should not happen on MDS\n");
1356 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1358 case LLOG_ORIGIN_HANDLE_CREATE:
1359 DEBUG_REQ(D_INODE, req, "llog_init");
1360 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1361 rc = llog_origin_handle_create(req);
1363 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
1364 DEBUG_REQ(D_INODE, req, "llog next block");
1365 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1366 rc = llog_origin_handle_next_block(req);
1368 case LLOG_ORIGIN_HANDLE_READ_HEADER:
1369 DEBUG_REQ(D_INODE, req, "llog read header");
1370 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1371 rc = llog_origin_handle_read_header(req);
1373 case LLOG_ORIGIN_HANDLE_CLOSE:
1374 DEBUG_REQ(D_INODE, req, "llog close");
1375 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1376 rc = llog_origin_handle_close(req);
1379 DEBUG_REQ(D_INODE, req, "llog catinfo");
1380 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
1381 rc = llog_catinfo(req);
1384 req->rq_status = -ENOTSUPP;
1385 rc = ptlrpc_error(req);
1389 LASSERT(current->journal_info == NULL);
1391 /* If we're DISCONNECTing, the mds_export_data is already freed */
1392 if (!rc && req->rq_reqmsg->opc != MDS_DISCONNECT) {
1393 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1394 req->rq_repmsg->last_xid =
1395 le64_to_cpu(med->med_mcd->mcd_last_xid);
1397 target_committed_to_req(req);
1403 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LAST_REPLAY) {
1404 if (obd && obd->obd_recovering) {
1405 DEBUG_REQ(D_HA, req, "LAST_REPLAY, queuing reply");
1406 return target_queue_final_reply(req, rc);
1408 /* Lost a race with recovery; let the error path DTRT. */
1409 rc = req->rq_status = -ENOTCONN;
1412 target_send_reply(req, rc, fail);
1416 /* Update the server data on disk. This stores the new mount_count and
1417 * also the last_rcvd value to disk. If we don't have a clean shutdown,
1418 * then the server last_rcvd value may be less than that of the clients.
1419 * This will alert us that we may need to do client recovery.
1421 * Also assumes for mds_last_transno that we are not modifying it (no locking).
1423 int mds_update_server_data(struct obd_device *obd, int force_sync)
1425 struct mds_obd *mds = &obd->u.mds;
1426 struct lr_server_data *lsd = mds->mds_server_data;
1427 struct file *filp = mds->mds_rcvd_filp;
1428 struct lvfs_run_ctxt saved;
1433 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1434 lsd->lsd_last_transno = cpu_to_le64(mds->mds_last_transno);
1436 CDEBUG(D_SUPER, "MDS mount_count is "LPU64", last_transno is "LPU64"\n",
1437 mds->mds_mount_count, mds->mds_last_transno);
1438 rc = fsfilt_write_record(obd, filp, lsd, sizeof(*lsd), &off,force_sync);
1440 CERROR("error writing MDS server data: rc = %d\n", rc);
1441 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1447 /* mount the file system (secretly) */
1448 static int mds_setup(struct obd_device *obd, obd_count len, void *buf)
1450 struct lprocfs_static_vars lvars;
1451 struct lustre_cfg* lcfg = buf;
1452 char *options = NULL;
1453 struct mds_obd *mds = &obd->u.mds;
1454 struct lustre_mount_info *lmi;
1455 struct vfsmount *mnt;
1461 /* setup 1:/dev/loop/0 2:ext3 3:mdsA 4:errors=remount-ro,iopen_nopriv*/
1463 if (lcfg->lcfg_bufcount < 3)
1464 RETURN(rc = -EINVAL);
1466 if (LUSTRE_CFG_BUFLEN(lcfg, 1) == 0 || LUSTRE_CFG_BUFLEN(lcfg, 2) == 0)
1467 RETURN(rc = -EINVAL);
1469 obd->obd_fsops = fsfilt_get_ops(lustre_cfg_string(lcfg, 2));
1470 if (IS_ERR(obd->obd_fsops))
1471 RETURN(rc = PTR_ERR(obd->obd_fsops));
1473 lmi = lustre_get_mount(obd->obd_name);
1475 /* We already mounted in lustre_fill_super */
1478 /* old path - used for llog writing from mkfs.lustre */
1479 CERROR("Using old MDS mount method\n");
1480 page = __get_free_page(GFP_KERNEL);
1484 options = (char *)page;
1485 memset(options, 0, PAGE_SIZE);
1487 /* here we use "iopen_nopriv" hardcoded, because it affects MDS utility
1488 * and the rest of options are passed by mount options. Probably this
1489 * should be moved to somewhere else like startup scripts or lconf. */
1490 sprintf(options, "iopen_nopriv");
1492 if (LUSTRE_CFG_BUFLEN(lcfg, 4) > 0 && lustre_cfg_buf(lcfg, 4))
1493 sprintf(options + strlen(options), ",%s",
1494 lustre_cfg_string(lcfg, 4));
1496 mnt = do_kern_mount(lustre_cfg_string(lcfg, 2), 0,
1497 lustre_cfg_string(lcfg, 1), (void *)options);
1501 CERROR("do_kern_mount failed: rc = %d\n", rc);
1506 CDEBUG(D_SUPER, "%s: mnt = %p\n", lustre_cfg_string(lcfg, 1), mnt);
1508 LASSERT(!lvfs_check_rdonly(lvfs_sbdev(mnt->mnt_sb)));
1510 sema_init(&mds->mds_orphan_recovery_sem, 1);
1511 sema_init(&mds->mds_epoch_sem, 1);
1512 spin_lock_init(&mds->mds_transno_lock);
1513 mds->mds_max_mdsize = sizeof(struct lov_mds_md);
1514 mds->mds_max_cookiesize = sizeof(struct llog_cookie);
1516 sprintf(ns_name, "mds-%s", obd->obd_uuid.uuid);
1517 obd->obd_namespace = ldlm_namespace_new(ns_name, LDLM_NAMESPACE_SERVER);
1518 if (obd->obd_namespace == NULL) {
1520 GOTO(err_put, rc = -ENOMEM);
1522 ldlm_register_intent(obd->obd_namespace, mds_intent_policy);
1524 rc = mds_fs_setup(obd, mnt);
1526 CERROR("%s: MDS filesystem method init failed: rc = %d\n",
1531 rc = llog_start_commit_thread();
1535 if (lcfg->lcfg_bufcount >= 4 && LUSTRE_CFG_BUFLEN(lcfg, 3) > 0) {
1538 generate_random_uuid(uuid);
1539 class_uuid_unparse(uuid, &mds->mds_lov_uuid);
1541 OBD_ALLOC(mds->mds_profile, LUSTRE_CFG_BUFLEN(lcfg, 3));
1542 if (mds->mds_profile == NULL)
1543 GOTO(err_fs, rc = -ENOMEM);
1545 strncpy(mds->mds_profile, lustre_cfg_string(lcfg, 3),
1546 LUSTRE_CFG_BUFLEN(lcfg, 3));
1550 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
1551 "mds_ldlm_client", &obd->obd_ldlm_client);
1552 obd->obd_replayable = 1;
1554 mds_quota_setup(mds);
1556 rc = mds_postsetup(obd);
1560 lprocfs_init_vars(mds, &lvars);
1561 lprocfs_obd_setup(obd, lvars.obd_vars);
1563 if (obd->obd_recovering) {
1564 LCONSOLE_WARN("MDT %s now serving %s, but will be in recovery "
1565 "until %d %s reconnect, or if no clients "
1566 "reconnect for %d:%.02d; during that time new "
1567 "clients will not be allowed to connect. "
1568 "Recovery progress can be monitored by watching "
1569 "/proc/fs/lustre/mds/%s/recovery_status.\n",
1571 lustre_cfg_string(lcfg, 1),
1572 obd->obd_recoverable_clients,
1573 (obd->obd_recoverable_clients == 1)
1574 ? "client" : "clients",
1575 (int)(OBD_RECOVERY_TIMEOUT / HZ) / 60,
1576 (int)(OBD_RECOVERY_TIMEOUT / HZ) % 60,
1579 LCONSOLE_INFO("MDT %s now serving %s with recovery %s.\n",
1581 lustre_cfg_string(lcfg, 1),
1582 obd->obd_replayable ? "enabled" : "disabled");
1586 ping_evictor_start();
1591 /* No extra cleanup needed for llog_init_commit_thread() */
1592 mds_fs_cleanup(obd);
1594 ldlm_namespace_free(obd->obd_namespace, 0);
1595 obd->obd_namespace = NULL;
1598 lustre_put_mount(obd->obd_name);
1602 mntput(mds->mds_vfsmnt);
1607 fsfilt_put_ops(obd->obd_fsops);
1611 static int mds_postsetup(struct obd_device *obd)
1613 struct mds_obd *mds = &obd->u.mds;
1617 rc = llog_setup(obd, LLOG_CONFIG_ORIG_CTXT, obd, 0, NULL,
1622 if (mds->mds_profile) {
1623 struct lvfs_run_ctxt saved;
1624 struct lustre_profile *lprof;
1625 struct config_llog_instance cfg;
1627 cfg.cfg_instance = NULL;
1628 cfg.cfg_uuid = mds->mds_lov_uuid;
1629 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1630 rc = class_config_parse_llog(llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT),
1631 mds->mds_profile, &cfg);
1632 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1637 LCONSOLE_ERROR("%s: the profile %s could not be read. "
1638 "If you recently installed a new "
1639 "version of Lustre, you may need to "
1640 "re-run 'lconf --write_conf "
1641 "<yourconfig>.xml' command line before "
1642 "restarting the MDS.\n",
1643 obd->obd_name, mds->mds_profile);
1650 lprof = class_get_profile(mds->mds_profile);
1651 if (lprof == NULL) {
1652 CERROR("No profile found: %s\n", mds->mds_profile);
1653 GOTO(err_cleanup, rc = -ENOENT);
1655 rc = mds_lov_connect(obd, lprof->lp_osc);
1657 GOTO(err_cleanup, rc);
1665 llog_cleanup(llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT));
1669 int mds_postrecov(struct obd_device *obd)
1671 struct mds_obd *mds = &obd->u.mds;
1678 LASSERT(!obd->obd_recovering);
1679 LASSERT(llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT) != NULL);
1681 /* set nextid first, so we are sure it happens */
1682 rc = mds_lov_set_nextid(obd);
1684 CERROR ("%s: mds_lov_set_nextid failed\n",
1689 /* clean PENDING dir */
1690 rc = mds_cleanup_orphans(obd);
1697 rc = obd_set_info(mds->mds_osc_exp, strlen("mds_conn"), "mds_conn",
1702 rc = llog_connect(llog_get_context(obd, LLOG_MDS_OST_ORIG_CTXT),
1703 obd->u.mds.mds_lov_desc.ld_tgt_count,
1706 CERROR("%s: failed at llog_origin_connect: %d\n",
1711 /* remove the orphaned precreated objects */
1712 rc = mds_lov_clearorphans(mds, NULL /* all OSTs */);
1718 RETURN(rc < 0 ? rc : item);
1721 /* cleanup all llogging subsystems */
1722 rc = obd_llog_finish(obd, mds->mds_lov_desc.ld_tgt_count);
1724 CERROR("%s: failed to cleanup llogging subsystems\n",
1729 int mds_lov_clean(struct obd_device *obd)
1731 struct mds_obd *mds = &obd->u.mds;
1733 if (mds->mds_profile) {
1735 struct config_llog_instance cfg;
1736 struct lvfs_run_ctxt saved;
1737 int len = strlen(mds->mds_profile) + sizeof("-clean") + 1;
1739 OBD_ALLOC(cln_prof, len);
1740 sprintf(cln_prof, "%s-clean", mds->mds_profile);
1742 cfg.cfg_instance = NULL;
1743 cfg.cfg_uuid = mds->mds_lov_uuid;
1745 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1746 class_config_parse_llog(llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT),
1748 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
1750 OBD_FREE(cln_prof, len);
1751 OBD_FREE(mds->mds_profile, strlen(mds->mds_profile) + 1);
1752 mds->mds_profile = NULL;
1757 static int mds_precleanup(struct obd_device *obd, int stage)
1764 mds_lov_set_cleanup_flags(obd);
1765 target_cleanup_recovery(obd);
1768 mds_lov_disconnect(obd);
1770 llog_cleanup(llog_get_context(obd, LLOG_CONFIG_ORIG_CTXT));
1771 rc = obd_llog_finish(obd, 0);
1776 static int mds_cleanup(struct obd_device *obd)
1778 struct mds_obd *mds = &obd->u.mds;
1779 lvfs_sbdev_type save_dev;
1781 int must_relock = 0;
1784 ping_evictor_stop();
1786 if (mds->mds_sb == NULL)
1788 save_dev = lvfs_sbdev(mds->mds_sb);
1790 if (mds->mds_osc_exp)
1791 /* lov export was disconnected by mds_lov_clean;
1792 we just need to drop our ref */
1793 class_export_put(mds->mds_osc_exp);
1795 lprocfs_obd_cleanup(obd);
1797 mds_quota_cleanup(mds);
1799 mds_update_server_data(obd, 1);
1800 if (mds->mds_lov_objids != NULL) {
1801 OBD_FREE(mds->mds_lov_objids,
1802 mds->mds_lov_desc.ld_tgt_count * sizeof(obd_id));
1804 mds_fs_cleanup(obd);
1806 /* 2 seems normal on mds, (may_umount() also expects 2
1807 fwiw), but we only see 1 at this point in obdfilter. */
1808 if (atomic_read(&obd->u.mds.mds_vfsmnt->mnt_count) > 2)
1809 CERROR("%s: mount busy, mnt_count %d != 2\n", obd->obd_name,
1810 atomic_read(&obd->u.mds.mds_vfsmnt->mnt_count));
1812 must_put = lustre_put_mount(obd->obd_name);
1814 /* We can only unlock kernel if we are in the context of sys_ioctl,
1815 otherwise we never called lock_kernel */
1816 if (kernel_locked()) {
1822 /* In case we didn't mount with lustre_get_mount -- old method*/
1823 mntput(mds->mds_vfsmnt);
1826 ldlm_namespace_free(obd->obd_namespace, obd->obd_force);
1828 spin_lock_bh(&obd->obd_processing_task_lock);
1829 if (obd->obd_recovering) {
1830 target_cancel_recovery_timer(obd);
1831 obd->obd_recovering = 0;
1833 spin_unlock_bh(&obd->obd_processing_task_lock);
1835 lvfs_clear_rdonly(save_dev);
1840 fsfilt_put_ops(obd->obd_fsops);
1842 LCONSOLE_INFO("MDT %s has stopped.\n", obd->obd_name);
1847 static void fixup_handle_for_resent_req(struct ptlrpc_request *req,
1848 struct ldlm_lock *new_lock,
1849 struct ldlm_lock **old_lock,
1850 struct lustre_handle *lockh)
1852 struct obd_export *exp = req->rq_export;
1853 struct obd_device *obd = exp->exp_obd;
1854 struct ldlm_request *dlmreq =
1855 lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*dlmreq));
1856 struct lustre_handle remote_hdl = dlmreq->lock_handle1;
1857 struct list_head *iter;
1859 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
1862 l_lock(&obd->obd_namespace->ns_lock);
1863 list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) {
1864 struct ldlm_lock *lock;
1865 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
1866 if (lock == new_lock)
1868 if (lock->l_remote_handle.cookie == remote_hdl.cookie) {
1869 lockh->cookie = lock->l_handle.h_cookie;
1870 LDLM_DEBUG(lock, "restoring lock cookie");
1871 DEBUG_REQ(D_HA, req, "restoring lock cookie "LPX64,
1874 *old_lock = LDLM_LOCK_GET(lock);
1875 l_unlock(&obd->obd_namespace->ns_lock);
1879 l_unlock(&obd->obd_namespace->ns_lock);
1881 /* If the xid matches, then we know this is a resent request,
1882 * and allow it. (It's probably an OPEN, for which we don't
1885 le64_to_cpu(exp->exp_mds_data.med_mcd->mcd_last_xid))
1888 /* This remote handle isn't enqueued, so we never received or
1889 * processed this request. Clear MSG_RESENT, because it can
1890 * be handled like any normal request now. */
1892 lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
1894 DEBUG_REQ(D_HA, req, "no existing lock with rhandle "LPX64,
1898 int intent_disposition(struct ldlm_reply *rep, int flag)
1902 return (rep->lock_policy_res1 & flag);
1905 void intent_set_disposition(struct ldlm_reply *rep, int flag)
1909 rep->lock_policy_res1 |= flag;
1912 static int mds_intent_policy(struct ldlm_namespace *ns,
1913 struct ldlm_lock **lockp, void *req_cookie,
1914 ldlm_mode_t mode, int flags, void *data)
1916 struct ptlrpc_request *req = req_cookie;
1917 struct ldlm_lock *lock = *lockp;
1918 struct ldlm_intent *it;
1919 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
1920 struct ldlm_reply *rep;
1921 struct lustre_handle lockh = { 0 };
1922 struct ldlm_lock *new_lock = NULL;
1923 int rc, offset = 2, repsize[4] = {sizeof(struct ldlm_reply),
1924 sizeof(struct mds_body),
1925 mds->mds_max_mdsize,
1926 mds->mds_max_cookiesize};
1929 LASSERT(req != NULL);
1931 if (req->rq_reqmsg->bufcount <= 1) {
1932 /* No intent was provided */
1933 int size = sizeof(struct ldlm_reply);
1934 rc = lustre_pack_reply(req, 1, &size, NULL);
1939 it = lustre_swab_reqbuf(req, 1, sizeof(*it), lustre_swab_ldlm_intent);
1941 CERROR("Intent missing\n");
1942 RETURN(req->rq_status = -EFAULT);
1945 LDLM_DEBUG(lock, "intent policy, opc: %s", ldlm_it2str(it->opc));
1947 rc = lustre_pack_reply(req, it->opc == IT_UNLINK ? 4 : 3, repsize,
1950 RETURN(req->rq_status = rc);
1952 rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*rep));
1953 intent_set_disposition(rep, DISP_IT_EXECD);
1956 /* execute policy */
1957 switch ((long)it->opc) {
1959 case IT_CREAT|IT_OPEN:
1960 fixup_handle_for_resent_req(req, lock, NULL, &lockh);
1961 /* XXX swab here to assert that an mds_open reint
1962 * packet is following */
1963 rep->lock_policy_res2 = mds_reint(req, offset, &lockh);
1965 /* We abort the lock if the lookup was negative and
1966 * we did not make it to the OPEN portion */
1967 if (!intent_disposition(rep, DISP_LOOKUP_EXECD))
1968 RETURN(ELDLM_LOCK_ABORTED);
1969 if (intent_disposition(rep, DISP_LOOKUP_NEG) &&
1970 !intent_disposition(rep, DISP_OPEN_OPEN))
1972 RETURN(ELDLM_LOCK_ABORTED);
1977 fixup_handle_for_resent_req(req, lock, &new_lock, &lockh);
1978 rep->lock_policy_res2 = mds_getattr_name(offset, req, &lockh);
1979 /* FIXME: LDLM can set req->rq_status. MDS sets
1980 policy_res{1,2} with disposition and status.
1981 - replay: returns 0 & req->status is old status
1982 - otherwise: returns req->status */
1983 if (intent_disposition(rep, DISP_LOOKUP_NEG))
1984 rep->lock_policy_res2 = 0;
1985 if (!intent_disposition(rep, DISP_LOOKUP_POS) ||
1986 rep->lock_policy_res2)
1987 RETURN(ELDLM_LOCK_ABORTED);
1988 if (req->rq_status != 0) {
1990 rep->lock_policy_res2 = req->rq_status;
1991 RETURN(ELDLM_LOCK_ABORTED);
1995 CERROR("Unhandled intent "LPD64"\n", it->opc);
1999 /* By this point, whatever function we called above must have either
2000 * filled in 'lockh', been an intent replay, or returned an error. We
2001 * want to allow replayed RPCs to not get a lock, since we would just
2002 * drop it below anyways because lock replay is done separately by the
2003 * client afterwards. For regular RPCs we want to give the new lock to
2004 * the client instead of whatever lock it was about to get. */
2005 if (new_lock == NULL)
2006 new_lock = ldlm_handle2lock(&lockh);
2007 if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY))
2010 LASSERTF(new_lock != NULL, "op "LPX64" lockh "LPX64"\n",
2011 it->opc, lockh.cookie);
2013 /* If we've already given this lock to a client once, then we should
2014 * have no readers or writers. Otherwise, we should have one reader
2015 * _or_ writer ref (which will be zeroed below) before returning the
2016 * lock to a client. */
2017 if (new_lock->l_export == req->rq_export) {
2018 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
2020 LASSERT(new_lock->l_export == NULL);
2021 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
2026 if (new_lock->l_export == req->rq_export) {
2027 /* Already gave this to the client, which means that we
2028 * reconstructed a reply. */
2029 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
2031 RETURN(ELDLM_LOCK_REPLACED);
2034 /* Fixup the lock to be given to the client */
2035 l_lock(&new_lock->l_resource->lr_namespace->ns_lock);
2036 new_lock->l_readers = 0;
2037 new_lock->l_writers = 0;
2039 new_lock->l_export = class_export_get(req->rq_export);
2040 list_add(&new_lock->l_export_chain,
2041 &new_lock->l_export->exp_ldlm_data.led_held_locks);
2043 new_lock->l_blocking_ast = lock->l_blocking_ast;
2044 new_lock->l_completion_ast = lock->l_completion_ast;
2046 memcpy(&new_lock->l_remote_handle, &lock->l_remote_handle,
2047 sizeof(lock->l_remote_handle));
2049 new_lock->l_flags &= ~LDLM_FL_LOCAL;
2051 LDLM_LOCK_PUT(new_lock);
2052 l_unlock(&new_lock->l_resource->lr_namespace->ns_lock);
2054 RETURN(ELDLM_LOCK_REPLACED);
2057 static int mdt_setup(struct obd_device *obd, obd_count len, void *buf)
2059 struct mds_obd *mds = &obd->u.mds;
2060 struct lprocfs_static_vars lvars;
2064 lprocfs_init_vars(mdt, &lvars);
2065 lprocfs_obd_setup(obd, lvars.obd_vars);
2068 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2069 MDS_REQUEST_PORTAL, MDC_REPLY_PORTAL,
2070 MDS_SERVICE_WATCHDOG_TIMEOUT,
2071 mds_handle, "mds", obd->obd_proc_entry, NULL);
2073 if (!mds->mds_service) {
2074 CERROR("failed to start service\n");
2075 GOTO(err_lprocfs, rc = -ENOMEM);
2078 rc = ptlrpc_start_n_threads(obd, mds->mds_service, MDT_NUM_THREADS,
2081 GOTO(err_thread, rc);
2083 mds->mds_setattr_service =
2084 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2085 MDS_SETATTR_PORTAL, MDC_REPLY_PORTAL,
2086 MDS_SERVICE_WATCHDOG_TIMEOUT,
2087 mds_handle, "mds_setattr",
2088 obd->obd_proc_entry, NULL);
2089 if (!mds->mds_setattr_service) {
2090 CERROR("failed to start getattr service\n");
2091 GOTO(err_thread, rc = -ENOMEM);
2094 rc = ptlrpc_start_n_threads(obd, mds->mds_setattr_service,
2095 MDT_NUM_THREADS, "ll_mdt_attr");
2097 GOTO(err_thread2, rc);
2099 mds->mds_readpage_service =
2100 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
2101 MDS_READPAGE_PORTAL, MDC_REPLY_PORTAL,
2102 MDS_SERVICE_WATCHDOG_TIMEOUT,
2103 mds_handle, "mds_readpage",
2104 obd->obd_proc_entry, NULL);
2105 if (!mds->mds_readpage_service) {
2106 CERROR("failed to start readpage service\n");
2107 GOTO(err_thread2, rc = -ENOMEM);
2110 rc = ptlrpc_start_n_threads(obd, mds->mds_readpage_service,
2111 MDT_NUM_THREADS, "ll_mdt_rdpg");
2114 GOTO(err_thread3, rc);
2119 ptlrpc_unregister_service(mds->mds_readpage_service);
2121 ptlrpc_unregister_service(mds->mds_setattr_service);
2123 ptlrpc_unregister_service(mds->mds_service);
2125 lprocfs_obd_cleanup(obd);
2129 static int mdt_cleanup(struct obd_device *obd)
2131 struct mds_obd *mds = &obd->u.mds;
2134 ptlrpc_unregister_service(mds->mds_readpage_service);
2135 ptlrpc_unregister_service(mds->mds_setattr_service);
2136 ptlrpc_unregister_service(mds->mds_service);
2138 lprocfs_obd_cleanup(obd);
2143 static struct dentry *mds_lvfs_fid2dentry(__u64 id, __u32 gen, __u64 gr,
2146 struct obd_device *obd = data;
2149 fid.generation = gen;
2150 return mds_fid2dentry(&obd->u.mds, &fid, NULL);
2153 struct lvfs_callback_ops mds_lvfs_ops = {
2154 l_fid2dentry: mds_lvfs_fid2dentry,
2157 /* use obd ops to offer management infrastructure */
2158 static struct obd_ops mds_obd_ops = {
2159 .o_owner = THIS_MODULE,
2160 .o_connect = mds_connect,
2161 .o_init_export = mds_init_export,
2162 .o_destroy_export = mds_destroy_export,
2163 .o_disconnect = mds_disconnect,
2164 .o_setup = mds_setup,
2165 .o_precleanup = mds_precleanup,
2166 .o_cleanup = mds_cleanup,
2167 .o_postrecov = mds_postrecov,
2168 .o_statfs = mds_obd_statfs,
2169 .o_iocontrol = mds_iocontrol,
2170 .o_create = mds_obd_create,
2171 .o_destroy = mds_obd_destroy,
2172 .o_llog_init = mds_llog_init,
2173 .o_llog_finish = mds_llog_finish,
2174 .o_notify = mds_notify,
2177 static struct obd_ops mdt_obd_ops = {
2178 .o_owner = THIS_MODULE,
2179 .o_setup = mdt_setup,
2180 .o_cleanup = mdt_cleanup,
2183 static int __init mds_init(void)
2186 struct lprocfs_static_vars lvars;
2188 rc = lustre_dquot_init();
2192 lprocfs_init_vars(mds, &lvars);
2193 class_register_type(&mds_obd_ops, lvars.module_vars, LUSTRE_MDS_NAME);
2194 lprocfs_init_vars(mdt, &lvars);
2195 class_register_type(&mdt_obd_ops, lvars.module_vars, LUSTRE_MDT_NAME);
2200 static void /*__exit*/ mds_exit(void)
2202 lustre_dquot_exit();
2204 class_unregister_type(LUSTRE_MDS_NAME);
2205 class_unregister_type(LUSTRE_MDT_NAME);
2208 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2209 MODULE_DESCRIPTION("Lustre Metadata Server (MDS)");
2210 MODULE_LICENSE("GPL");
2212 module_init(mds_init);
2213 module_exit(mds_exit);