1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Lustre Metadata Server (mds) request handler
7 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * Author: Peter Braam <braam@clusterfs.com>
9 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * Author: Phil Schwan <phil@clusterfs.com>
11 * Author: Mike Shaver <shaver@clusterfs.com>
13 * This file is part of Lustre, http://www.lustre.org.
15 * Lustre is free software; you can redistribute it and/or
16 * modify it under the terms of version 2 of the GNU General Public
17 * License as published by the Free Software Foundation.
19 * Lustre is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with Lustre; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 #define DEBUG_SUBSYSTEM S_MDS
32 #include <linux/module.h>
33 #include <linux/lustre_mds.h>
34 #include <linux/lustre_dlm.h>
35 #include <linux/init.h>
36 #include <linux/obd_class.h>
37 #include <linux/random.h>
39 #include <linux/jbd.h>
40 #include <linux/ext3_fs.h>
41 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
42 # include <linux/smp_lock.h>
43 # include <linux/buffer_head.h>
44 # include <linux/workqueue.h>
45 # include <linux/mount.h>
47 # include <linux/locks.h>
49 #include <linux/obd_lov.h>
50 #include <linux/lustre_mds.h>
51 #include <linux/lustre_fsfilt.h>
52 #include <linux/lprocfs_status.h>
53 #include <linux/lustre_commit_confd.h>
55 #include "mds_internal.h"
57 static int mds_cleanup(struct obd_device *obd, int flags);
59 static int mds_bulk_timeout(void *data)
61 struct ptlrpc_bulk_desc *desc = data;
62 struct obd_export *exp = desc->bd_export;
64 CERROR("bulk send timed out: evicting %s@%s\n",
65 exp->exp_client_uuid.uuid,
66 exp->exp_connection->c_remote_uuid.uuid);
67 ptlrpc_fail_export(exp);
68 ptlrpc_abort_bulk (desc);
72 /* Assumes caller has already pushed into the kernel filesystem context */
73 static int mds_sendpage(struct ptlrpc_request *req, struct file *file,
74 __u64 offset, __u64 xid)
76 struct ptlrpc_bulk_desc *desc;
77 struct l_wait_info lwi;
82 LASSERT ((offset & (PAGE_CACHE_SIZE - 1)) == 0);
84 desc = ptlrpc_prep_bulk_exp (req, BULK_PUT_SOURCE, MDS_BULK_PORTAL);
86 GOTO(out, rc = -ENOMEM);
88 LASSERT (PAGE_SIZE == PAGE_CACHE_SIZE);
89 page = alloc_pages (GFP_KERNEL, 0);
91 GOTO(cleanup_bulk, rc = -ENOMEM);
93 rc = ptlrpc_prep_bulk_page(desc, page, 0, PAGE_CACHE_SIZE);
95 GOTO(cleanup_buf, rc);
97 CDEBUG(D_EXT2, "reading %lu@"LPU64" from dir %lu (size %llu)\n",
98 PAGE_CACHE_SIZE, offset, file->f_dentry->d_inode->i_ino,
99 file->f_dentry->d_inode->i_size);
100 rc = fsfilt_readpage(req->rq_export->exp_obd, file, page_address (page),
101 PAGE_CACHE_SIZE, (loff_t *)&offset);
103 if (rc != PAGE_CACHE_SIZE)
104 GOTO(cleanup_buf, rc = -EIO);
106 rc = ptlrpc_bulk_put(desc);
108 GOTO(cleanup_buf, rc);
110 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) {
111 CERROR("obd_fail_loc=%x, fail operation rc=%d\n",
112 OBD_FAIL_MDS_SENDPAGE, rc);
113 ptlrpc_abort_bulk(desc);
114 GOTO(cleanup_buf, rc);
117 lwi = LWI_TIMEOUT(obd_timeout * HZ, mds_bulk_timeout, desc);
118 rc = l_wait_event(desc->bd_waitq, ptlrpc_bulk_complete (desc), &lwi);
120 LASSERT (rc == -ETIMEDOUT);
121 GOTO(cleanup_buf, rc);
126 __free_pages (page, 0);
128 ptlrpc_free_bulk (desc);
133 /* only valid locked dentries or errors should be returned */
134 struct dentry *mds_fid2locked_dentry(struct obd_device *obd, struct ll_fid *fid,
135 struct vfsmount **mnt, int lock_mode,
136 struct lustre_handle *lockh)
138 struct mds_obd *mds = &obd->u.mds;
139 struct dentry *de = mds_fid2dentry(mds, fid, mnt), *retval = de;
140 struct ldlm_res_id res_id = { .name = {0} };
147 res_id.name[0] = de->d_inode->i_ino;
148 res_id.name[1] = de->d_inode->i_generation;
149 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace, NULL,
150 res_id, LDLM_PLAIN, NULL, 0, lock_mode,
151 &flags, ldlm_completion_ast,
152 mds_blocking_ast, NULL, lockh);
153 if (rc != ELDLM_OK) {
155 retval = ERR_PTR(-ENOLCK); /* XXX translate ldlm code */
161 #ifndef DCACHE_DISCONNECTED
162 #define DCACHE_DISCONNECTED DCACHE_NFSD_DISCONNECTED
166 /* Look up an entry by inode number. */
167 /* this function ONLY returns valid dget'd dentries with an initialized inode
169 struct dentry *mds_fid2dentry(struct mds_obd *mds, struct ll_fid *fid,
170 struct vfsmount **mnt)
173 unsigned long ino = fid->id;
174 __u32 generation = fid->generation;
176 struct dentry *result;
179 RETURN(ERR_PTR(-ESTALE));
181 snprintf(fid_name, sizeof(fid_name), "0x%lx", ino);
183 CDEBUG(D_DENTRY, "--> mds_fid2dentry: ino %lu, gen %u, sb %p\n",
184 ino, generation, mds->mds_sb);
186 /* under ext3 this is neither supposed to return bad inodes
188 result = ll_lookup_one_len(fid_name, mds->mds_fid_de, strlen(fid_name));
192 inode = result->d_inode;
194 RETURN(ERR_PTR(-ENOENT));
196 if (generation && inode->i_generation != generation) {
197 /* we didn't find the right inode.. */
198 CERROR("bad inode %lu, link: %d ct: %d or generation %u/%u\n",
199 inode->i_ino, inode->i_nlink,
200 atomic_read(&inode->i_count), inode->i_generation,
203 RETURN(ERR_PTR(-ENOENT));
207 *mnt = mds->mds_vfsmnt;
215 /* Establish a connection to the MDS.
217 * This will set up an export structure for the client to hold state data
218 * about that client, like open files, the last operation number it did
219 * on the server, etc.
221 static int mds_connect(struct lustre_handle *conn, struct obd_device *obd,
222 struct obd_uuid *cluuid)
224 struct obd_export *exp;
225 struct mds_export_data *med;
226 struct mds_client_data *mcd;
227 int rc, abort_recovery;
230 if (!conn || !obd || !cluuid)
233 /* Check for aborted recovery. */
234 spin_lock_bh(&obd->obd_processing_task_lock);
235 abort_recovery = obd->obd_abort_recovery;
236 spin_unlock_bh(&obd->obd_processing_task_lock);
238 target_abort_recovery(obd);
240 /* XXX There is a small race between checking the list and adding a
241 * new connection for the same UUID, but the real threat (list
242 * corruption when multiple different clients connect) is solved.
244 * There is a second race between adding the export to the list,
245 * and filling in the client data below. Hence skipping the case
246 * of NULL mcd above. We should already be controlling multiple
247 * connects at the client, and we can't hold the spinlock over
248 * memory allocations without risk of deadlocking.
250 rc = class_connect(conn, obd, cluuid);
253 exp = class_conn2export(conn);
255 med = &exp->exp_mds_data;
256 class_export_put(exp);
258 OBD_ALLOC(mcd, sizeof(*mcd));
260 CERROR("mds: out of memory for client data\n");
261 GOTO(out_export, rc = -ENOMEM);
264 memcpy(mcd->mcd_uuid, cluuid, sizeof(mcd->mcd_uuid));
267 INIT_LIST_HEAD(&med->med_open_head);
268 spin_lock_init(&med->med_open_lock);
270 rc = mds_client_add(obd, &obd->u.mds, med, -1);
277 OBD_FREE(mcd, sizeof(*mcd));
279 class_disconnect(conn, 0);
284 static void mds_mfd_addref(void *mfdp)
286 struct mds_file_data *mfd = mfdp;
288 atomic_inc(&mfd->mfd_refcount);
289 CDEBUG(D_INFO, "GETting mfd %p : new refcount %d\n", mfd,
290 atomic_read(&mfd->mfd_refcount));
293 struct mds_file_data *mds_mfd_new(void)
295 struct mds_file_data *mfd;
297 OBD_ALLOC(mfd, sizeof *mfd);
299 CERROR("mds: out of memory\n");
303 atomic_set(&mfd->mfd_refcount, 2);
305 INIT_LIST_HEAD(&mfd->mfd_handle.h_link);
306 class_handle_hash(&mfd->mfd_handle, mds_mfd_addref);
311 static struct mds_file_data *mds_handle2mfd(struct lustre_handle *handle)
314 LASSERT(handle != NULL);
315 RETURN(class_handle2object(handle->cookie));
318 void mds_mfd_put(struct mds_file_data *mfd)
320 CDEBUG(D_INFO, "PUTting mfd %p : new refcount %d\n", mfd,
321 atomic_read(&mfd->mfd_refcount) - 1);
322 LASSERT(atomic_read(&mfd->mfd_refcount) > 0 &&
323 atomic_read(&mfd->mfd_refcount) < 0x5a5a);
324 if (atomic_dec_and_test(&mfd->mfd_refcount)) {
325 LASSERT(list_empty(&mfd->mfd_handle.h_link));
326 OBD_FREE(mfd, sizeof *mfd);
330 void mds_mfd_destroy(struct mds_file_data *mfd)
332 class_handle_unhash(&mfd->mfd_handle);
336 /* Close a "file descriptor" and possibly unlink an orphan from the
339 * If we are being called from mds_disconnect() because the client has
340 * disappeared, then req == NULL and we do not update last_rcvd because
341 * there is nothing that could be recovered by the client at this stage
342 * (it will not even _have_ an entry in last_rcvd anymore).
344 static int mds_mfd_close(struct ptlrpc_request *req, struct obd_device *obd,
345 struct mds_file_data *mfd)
347 struct dentry *dparent = mfd->mfd_dentry->d_parent;
348 struct inode *child_inode = mfd->mfd_dentry->d_inode;
349 char fidname[LL_FID_NAMELEN];
350 int last_orphan, fidlen, rc = 0;
354 LASSERT(atomic_read(&dparent->d_count) > 0);
355 dparent = dget(dparent);
358 fidlen = ll_fid2str(fidname, child_inode->i_ino,
359 child_inode->i_generation);
361 last_orphan = mds_open_orphan_dec_test(child_inode) &&
362 mds_inode_is_orphan(child_inode);
364 /* this is the actual "close" */
365 l_dput(mfd->mfd_dentry);
366 mds_mfd_destroy(mfd);
372 struct mds_obd *mds = &obd->u.mds;
373 struct inode *pending_dir = mds->mds_pending_dir->d_inode;
374 struct dentry *pending_child = NULL;
377 CDEBUG(D_ERROR, "destroying orphan object %s\n", fidname);
379 /* Sadly, there is no easy way to save pending_child from
380 * mds_reint_unlink() into mfd, so we need to re-lookup,
381 * but normally it will still be in the dcache.
383 down(&pending_dir->i_sem);
384 pending_child = lookup_one_len(fidname, mds->mds_pending_dir,
386 if (IS_ERR(pending_child))
387 GOTO(out_lock, rc = PTR_ERR(pending_child));
388 LASSERT(pending_child->d_inode != NULL);
390 handle = fsfilt_start(obd, pending_dir, FSFILT_OP_UNLINK, NULL);
392 GOTO(out_dput, rc = PTR_ERR(handle));
393 rc = vfs_unlink(pending_dir, pending_child);
395 CERROR("error unlinking orphan %s: rc %d\n",fidname,rc);
398 rc = mds_finish_transno(mds, pending_dir, handle, req,
401 int err = fsfilt_commit(obd, pending_dir, handle, 0);
403 CERROR("error committing orphan unlink: %d\n",
412 up(&pending_dir->i_sem);
418 static int mds_disconnect(struct lustre_handle *conn, int flags)
420 struct obd_export *export = class_conn2export(conn);
421 struct mds_export_data *med = &export->exp_mds_data;
422 struct obd_device *obd = export->exp_obd;
423 struct obd_run_ctxt saved;
427 push_ctxt(&saved, &obd->u.mds.mds_ctxt, NULL);
428 /* Close any open files (which may also cause orphan unlinking). */
429 spin_lock(&med->med_open_lock);
430 while (!list_empty(&med->med_open_head)) {
431 struct list_head *tmp = med->med_open_head.next;
432 struct mds_file_data *mfd =
433 list_entry(tmp, struct mds_file_data, mfd_list);
434 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
435 /* bug 1579: fix force-closing for 2.5 */
436 struct dentry *dentry = mfd->mfd_dentry;
438 list_del(&mfd->mfd_list);
439 spin_unlock(&med->med_open_lock);
441 CERROR("force closing client file handle for %*s (%s:%lu)\n",
442 dentry->d_name.len, dentry->d_name.name,
443 kdevname(dentry->d_inode->i_sb->s_dev),
444 dentry->d_inode->i_ino);
445 rc = mds_mfd_close(NULL, obd, mfd);
448 CDEBUG(D_INODE, "Error closing file: %d\n", rc);
449 spin_lock(&med->med_open_lock);
451 spin_unlock(&med->med_open_lock);
452 pop_ctxt(&saved, &obd->u.mds.mds_ctxt, NULL);
454 ldlm_cancel_locks_for_export(export);
455 if (!(flags & OBD_OPT_FAILOVER))
456 mds_client_free(export);
458 rc = class_disconnect(conn, flags);
459 class_export_put(export);
465 * XXX This is NOT guaranteed to flush all transactions to disk (even though
466 * it is equivalent to calling sync()) because it only _starts_ the flush
467 * and does not wait for completion. It's better than nothing though.
468 * What we really want is a mild form of fsync_dev_lockfs(), but it is
469 * non-standard, or enabling do_sync_supers in ext3, just for this call.
471 static void mds_fsync_super(struct super_block *sb)
475 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
476 if (sb->s_dirt && sb->s_op && sb->s_op->write_super)
477 sb->s_op->write_super(sb);
479 if (sb->s_dirt && sb->s_op) {
480 if (sb->s_op->sync_fs)
481 sb->s_op->sync_fs(sb, 1);
482 else if (sb->s_op->write_super)
483 sb->s_op->write_super(sb);
490 static int mds_getstatus(struct ptlrpc_request *req)
492 struct obd_device *obd = req->rq_export->exp_obd;
493 struct mds_obd *mds = mds_req2mds(req);
494 struct mds_body *body;
495 int rc, size = sizeof(*body);
498 rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg);
499 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK)) {
500 CERROR("mds: out of memory for message: size=%d\n", size);
501 req->rq_status = -ENOMEM; /* superfluous? */
505 /* Flush any outstanding transactions to disk so the client will
506 * get the latest last_committed value and can drop their local
507 * requests if they have any. This would be fsync_super() if it
510 fsfilt_sync(obd, mds->mds_sb);
512 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*body));
513 memcpy(&body->fid1, &mds->mds_rootfid, sizeof(body->fid1));
515 /* the last_committed and last_xid fields are filled in for all
516 * replies already - no need to do so here also.
521 static int mds_getlovinfo(struct ptlrpc_request *req)
523 struct mds_obd *mds = mds_req2mds(req);
524 struct mds_status_req *streq;
525 struct lov_desc *desc;
526 struct obd_uuid *uuid0;
528 int rc, size[2] = {sizeof(*desc)};
531 streq = lustre_swab_reqbuf (req, 0, sizeof (*streq),
532 lustre_swab_mds_status_req);
534 CERROR ("Can't unpack mds_status_req\n");
538 if (streq->repbuf > LOV_MAX_UUID_BUFFER_SIZE) {
539 CERROR ("Illegal request for uuid array > %d\n",
543 size[1] = streq->repbuf;
545 rc = lustre_pack_msg(2, size, NULL, &req->rq_replen, &req->rq_repmsg);
547 CERROR("mds: out of memory for message: size=%d\n", size[1]);
551 if (!mds->mds_has_lov_desc) {
552 req->rq_status = -ENOENT;
556 /* XXX We're sending the lov_desc in my byte order.
557 * Receiver will swab... */
558 desc = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*desc));
559 memcpy(desc, &mds->mds_lov_desc, sizeof (*desc));
561 tgt_count = mds->mds_lov_desc.ld_tgt_count;
562 uuid0 = lustre_msg_buf(req->rq_repmsg, 1, tgt_count * sizeof (*uuid0));
564 CERROR("too many targets, enlarge client buffers\n");
565 req->rq_status = -ENOSPC;
569 rc = mds_get_lovtgts(mds, tgt_count, uuid0);
571 CERROR("get_lovtgts error %d\n", rc);
575 memcpy(&mds->mds_osc_uuid, &mds->mds_lov_desc.ld_uuid,
576 sizeof(mds->mds_osc_uuid));
580 int mds_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
581 void *data, int flag)
586 if (flag == LDLM_CB_CANCELING) {
587 /* Don't need to do anything here. */
591 /* XXX layering violation! -phil */
592 l_lock(&lock->l_resource->lr_namespace->ns_lock);
593 /* Get this: if mds_blocking_ast is racing with ldlm_intent_policy,
594 * such that mds_blocking_ast is called just before l_i_p takes the
595 * ns_lock, then by the time we get the lock, we might not be the
596 * correct blocking function anymore. So check, and return early, if
598 if (lock->l_blocking_ast != mds_blocking_ast) {
599 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
603 lock->l_flags |= LDLM_FL_CBPENDING;
604 do_ast = (!lock->l_readers && !lock->l_writers);
605 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
608 struct lustre_handle lockh;
611 LDLM_DEBUG(lock, "already unused, calling ldlm_cli_cancel");
612 ldlm_lock2handle(lock, &lockh);
613 rc = ldlm_cli_cancel(&lockh);
615 CERROR("ldlm_cli_cancel: %d\n", rc);
617 LDLM_DEBUG(lock, "Lock still has references, will be "
623 int mds_pack_md(struct obd_device *obd, struct lustre_msg *msg,
624 int offset, struct mds_body *body, struct inode *inode)
626 struct mds_obd *mds = &obd->u.mds;
627 struct lov_mds_md *lmm;
632 lmm = lustre_msg_buf(msg, offset, 0);
634 /* Some problem with getting eadata when I sized the reply
636 CDEBUG(D_INFO, "no space reserved for inode %lu MD\n",
640 lmm_size = msg->buflens[offset];
642 /* I don't really like this, but it is a sanity check on the client
643 * MD request. However, if the client doesn't know how much space
644 * to reserve for the MD, this shouldn't be fatal either...
646 if (lmm_size > mds->mds_max_mdsize) {
647 CERROR("Reading MD for inode %lu of %d bytes > max %d\n",
648 inode->i_ino, lmm_size, mds->mds_max_mdsize);
652 rc = fsfilt_get_md(obd, inode, lmm, lmm_size);
654 CERROR("Error %d reading eadata for ino %lu\n",
657 body->valid |= OBD_MD_FLEASIZE;
658 body->eadatasize = rc;
665 static int mds_getattr_internal(struct obd_device *obd, struct dentry *dentry,
666 struct ptlrpc_request *req,
667 struct mds_body *reqbody, int reply_off)
669 struct mds_body *body;
670 struct inode *inode = dentry->d_inode;
677 body = lustre_msg_buf(req->rq_repmsg, reply_off, sizeof(*body));
678 LASSERT(body != NULL); /* caller prepped reply */
680 mds_pack_inode2fid(&body->fid1, inode);
681 mds_pack_inode2body(body, inode);
683 if (S_ISREG(inode->i_mode) && (reqbody->valid & OBD_MD_FLEASIZE) != 0) {
684 rc = mds_pack_md(obd, req->rq_repmsg, reply_off+1, body, inode);
686 /* If we have LOV EA data, the OST holds size, atime, mtime */
687 if (!(body->valid & OBD_MD_FLEASIZE))
688 body->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
689 OBD_MD_FLATIME | OBD_MD_FLMTIME);
690 } else if (S_ISLNK(inode->i_mode) &&
691 (reqbody->valid & OBD_MD_LINKNAME) != 0) {
692 char *symname = lustre_msg_buf(req->rq_repmsg, reply_off + 1,0);
695 LASSERT (symname != NULL); /* caller prepped reply */
696 len = req->rq_repmsg->buflens[reply_off + 1];
698 rc = inode->i_op->readlink(dentry, symname, len);
700 CERROR("readlink failed: %d\n", rc);
701 } else if (rc != len - 1) {
702 CERROR ("Unexpected readlink rc %d: expecting %d\n",
706 CDEBUG(D_INODE, "read symlink dest %s\n", symname);
707 body->valid |= OBD_MD_LINKNAME;
708 body->eadatasize = rc + 1;
709 symname[rc] = 0; /* NULL terminate */
717 static int mds_getattr_pack_msg(struct ptlrpc_request *req, struct inode *inode,
720 struct mds_obd *mds = mds_req2mds(req);
721 struct mds_body *body;
722 int rc = 0, size[2] = {sizeof(*body)}, bufcount = 1;
725 body = lustre_msg_buf(req->rq_reqmsg, offset, sizeof (*body));
726 LASSERT(body != NULL); /* checked by caller */
727 LASSERT_REQSWABBED(req, offset); /* swabbed by caller */
729 if (S_ISREG(inode->i_mode) && (body->valid & OBD_MD_FLEASIZE)) {
730 int rc = fsfilt_get_md(req->rq_export->exp_obd, inode, NULL, 0);
731 CDEBUG(D_INODE, "got %d bytes MD data for inode %lu\n",
735 CERROR("error getting inode %lu MD: rc = %d\n",
738 } else if (rc > mds->mds_max_mdsize) {
740 CERROR("MD size %d larger than maximum possible %u\n",
741 rc, mds->mds_max_mdsize);
746 } else if (S_ISLNK(inode->i_mode) && (body->valid & OBD_MD_LINKNAME)) {
747 if (inode->i_size + 1 != body->eadatasize)
748 CERROR("symlink size: %Lu, reply space: %d\n",
749 inode->i_size + 1, body->eadatasize);
750 size[bufcount] = MIN(inode->i_size + 1, body->eadatasize);
752 CDEBUG(D_INODE, "symlink size: %Lu, reply space: %d\n",
753 inode->i_size + 1, body->eadatasize);
756 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) {
757 CERROR("failed MDS_GETATTR_PACK test\n");
758 req->rq_status = -ENOMEM;
759 GOTO(out, rc = -ENOMEM);
762 rc = lustre_pack_msg(bufcount, size, NULL, &req->rq_replen,
765 CERROR("out of memory\n");
766 GOTO(out, req->rq_status = rc);
774 /* This is more copy-and-paste from getattr_name than I'd like. */
775 static void reconstruct_getattr_name(int offset, struct ptlrpc_request *req,
776 struct lustre_handle *client_lockh)
778 struct mds_export_data *med = &req->rq_export->exp_mds_data;
779 struct mds_client_data *mcd = med->med_mcd;
780 struct obd_device *obd = req->rq_export->exp_obd;
781 struct mds_obd *mds = mds_req2mds(req);
782 struct dentry *parent, *child;
783 struct mds_body *body;
785 struct obd_run_ctxt saved;
790 req->rq_transno = mcd->mcd_last_transno;
791 req->rq_status = mcd->mcd_last_result;
793 LASSERT (req->rq_export->exp_outstanding_reply);
795 mds_steal_ack_locks(req->rq_export, req);
800 body = lustre_msg_buf(req->rq_reqmsg, offset, sizeof (*body));
801 LASSERT (body != NULL); /* checked by caller */
802 LASSERT_REQSWABBED (req, offset); /* swabbed by caller */
804 name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
805 LASSERT (name != NULL); /* checked by caller */
806 LASSERT_REQSWABBED (req, offset + 1); /* swabbed by caller */
807 namelen = req->rq_reqmsg->buflens[offset + 1];
809 LASSERT (offset == 2 || offset == 0);
810 /* requests were at offset 2, replies go back at 1 */
814 uc.ouc_fsuid = body->fsuid;
815 uc.ouc_fsgid = body->fsgid;
816 uc.ouc_cap = body->capability;
817 uc.ouc_suppgid1 = body->suppgid;
818 uc.ouc_suppgid2 = -1;
820 push_ctxt(&saved, &mds->mds_ctxt, &uc);
821 parent = mds_fid2dentry(mds, &body->fid1, NULL);
822 LASSERT(!IS_ERR(parent));
823 dir = parent->d_inode;
825 child = ll_lookup_one_len(name, parent, namelen - 1);
826 LASSERT(!IS_ERR(child));
828 if (req->rq_repmsg == NULL) {
829 rc = mds_getattr_pack_msg(req, child->d_inode, offset);
830 /* XXX need to handle error here */
834 rc = mds_getattr_internal(obd, child, req, body, offset);
835 /* XXX need to handle error here */
841 static int mds_getattr_name(int offset, struct ptlrpc_request *req,
842 struct lustre_handle *child_lockh)
844 struct mds_obd *mds = mds_req2mds(req);
845 struct obd_device *obd = req->rq_export->exp_obd;
846 struct ldlm_reply *rep = NULL;
847 struct obd_run_ctxt saved;
848 struct mds_body *body;
849 struct dentry *de = NULL, *dchild = NULL;
852 struct ldlm_res_id child_res_id = { .name = {0} };
853 struct lustre_handle parent_lockh;
855 int flags = 0, rc = 0, cleanup_phase = 0;
859 LASSERT(!strcmp(obd->obd_type->typ_name, "mds"));
861 /* Swab now, before anyone looks inside the request */
863 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
864 lustre_swab_mds_body);
866 CERROR("Can't swab mds_body\n");
867 GOTO(cleanup, rc = -EFAULT);
870 LASSERT_REQSWAB(req, offset + 1);
871 name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
873 CERROR("Can't unpack name\n");
874 GOTO(cleanup, rc = -EFAULT);
876 namesize = req->rq_reqmsg->buflens[offset + 1];
878 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT) {
879 struct obd_export *exp = req->rq_export;
880 if (exp->exp_outstanding_reply &&
881 exp->exp_outstanding_reply->rq_xid == req->rq_xid) {
882 reconstruct_getattr_name(offset, req, child_lockh);
885 DEBUG_REQ(D_HA, req, "no reply for RESENT req (have "LPD64")",
886 exp->exp_outstanding_reply ?
887 exp->exp_outstanding_reply->rq_xid : (u64)0);
890 LASSERT (offset == 0 || offset == 2);
891 /* if requests were at offset 2, the getattr reply goes back at 1 */
893 rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*rep));
897 uc.ouc_fsuid = body->fsuid;
898 uc.ouc_fsgid = body->fsgid;
899 uc.ouc_cap = body->capability;
900 uc.ouc_suppgid1 = body->suppgid;
901 uc.ouc_suppgid2 = -1;
902 push_ctxt(&saved, &mds->mds_ctxt, &uc);
903 /* Step 1: Lookup/lock parent */
904 intent_set_disposition(rep, DISP_LOOKUP_EXECD);
905 de = mds_fid2locked_dentry(obd, &body->fid1, NULL, LCK_PR,
908 GOTO(cleanup, rc = PTR_ERR(de));
912 cleanup_phase = 1; /* parent dentry and lock */
914 CDEBUG(D_INODE, "parent ino %lu, name %s\n", dir->i_ino, name);
916 /* Step 2: Lookup child */
917 dchild = ll_lookup_one_len(name, de, namesize - 1);
918 if (IS_ERR(dchild)) {
919 CDEBUG(D_INODE, "child lookup error %ld\n", PTR_ERR(dchild));
920 GOTO(cleanup, rc = PTR_ERR(dchild));
923 cleanup_phase = 2; /* child dentry */
925 if (dchild->d_inode == NULL) {
926 intent_set_disposition(rep, DISP_LOOKUP_NEG);
927 GOTO(cleanup, rc = -ENOENT);
929 intent_set_disposition(rep, DISP_LOOKUP_POS);
932 /* Step 3: Lock child */
933 child_res_id.name[0] = dchild->d_inode->i_ino;
934 child_res_id.name[1] = dchild->d_inode->i_generation;
935 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace, NULL,
936 child_res_id, LDLM_PLAIN, NULL, 0, LCK_PR,
937 &flags, ldlm_completion_ast, mds_blocking_ast,
939 if (rc != ELDLM_OK) {
940 CERROR("ldlm_cli_enqueue: %d\n", rc);
941 GOTO(cleanup, rc = -EIO);
944 cleanup_phase = 3; /* child lock */
946 if (req->rq_repmsg == NULL) {
947 rc = mds_getattr_pack_msg(req, dchild->d_inode, offset);
949 CERROR ("mds_getattr_pack_msg: %d\n", rc);
954 rc = mds_getattr_internal(obd, dchild, req, body, offset);
955 GOTO(cleanup, rc); /* returns the lock to the client */
958 switch (cleanup_phase) {
961 ldlm_lock_decref(child_lockh, LCK_PR);
967 ldlm_lock_decref(&parent_lockh, LCK_PR);
969 memcpy(&req->rq_ack_locks[0].lock, &parent_lockh,
970 sizeof(parent_lockh));
971 req->rq_ack_locks[0].mode = LCK_PR;
977 pop_ctxt(&saved, &mds->mds_ctxt, &uc);
981 static int mds_getattr(int offset, struct ptlrpc_request *req)
983 struct mds_obd *mds = mds_req2mds(req);
984 struct obd_device *obd = req->rq_export->exp_obd;
985 struct obd_run_ctxt saved;
987 struct mds_body *body;
992 body = lustre_swab_reqbuf (req, offset, sizeof (*body),
993 lustre_swab_mds_body);
995 CERROR ("Can't unpack body\n");
999 uc.ouc_fsuid = body->fsuid;
1000 uc.ouc_fsgid = body->fsgid;
1001 uc.ouc_cap = body->capability;
1002 push_ctxt(&saved, &mds->mds_ctxt, &uc);
1003 de = mds_fid2dentry(mds, &body->fid1, NULL);
1005 rc = req->rq_status = -ENOENT;
1006 GOTO(out_pop, PTR_ERR(de));
1009 rc = mds_getattr_pack_msg(req, de->d_inode, offset);
1011 CERROR ("mds_getattr_pack_msg: %d\n", rc);
1015 req->rq_status = mds_getattr_internal(obd, de, req, body, 0);
1020 pop_ctxt(&saved, &mds->mds_ctxt, &uc);
1025 static int mds_obd_statfs(struct obd_device *obd, struct obd_statfs *osfs,
1026 unsigned long max_age)
1028 return fsfilt_statfs(obd, obd->u.mds.mds_sb, osfs);
1031 static int mds_statfs(struct ptlrpc_request *req)
1033 struct obd_device *obd = req->rq_export->exp_obd;
1034 int rc, size = sizeof(struct obd_statfs);
1037 rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg);
1038 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
1039 CERROR("mds: statfs lustre_pack_msg failed: rc = %d\n", rc);
1043 /* We call this so that we can cache a bit - 1 jiffie worth */
1044 rc = obd_statfs(obd, lustre_msg_buf(req->rq_repmsg,0,size),jiffies-HZ);
1046 CERROR("mds_obd_statfs failed: rc %d\n", rc);
1052 req->rq_status = rc;
1056 static void reconstruct_close(struct ptlrpc_request *req)
1058 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1059 struct mds_client_data *mcd = med->med_mcd;
1061 req->rq_transno = mcd->mcd_last_transno;
1062 req->rq_status = mcd->mcd_last_result;
1064 /* XXX When open-unlink is working, we'll need to steal ack locks as
1065 * XXX well, and make sure that we do the right unlinking after we
1066 * XXX get the ack back.
1070 static int mds_close(struct ptlrpc_request *req)
1072 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1073 struct obd_device *obd = req->rq_export->exp_obd;
1074 struct mds_body *body;
1075 struct mds_file_data *mfd;
1076 struct obd_run_ctxt saved;
1080 MDS_CHECK_RESENT(req, reconstruct_close(req));
1082 body = lustre_swab_reqbuf(req, 0, sizeof (*body),
1083 lustre_swab_mds_body);
1085 CERROR ("Can't unpack body\n");
1089 mfd = mds_handle2mfd(&body->handle);
1091 DEBUG_REQ(D_ERROR, req, "no handle for file close "LPD64
1092 ": cookie "LPX64"\n", body->fid1.id,
1093 body->handle.cookie);
1097 rc = lustre_pack_msg(0, NULL, NULL, &req->rq_replen, &req->rq_repmsg);
1099 CERROR("lustre_pack_msg: rc = %d\n", rc);
1100 req->rq_status = rc;
1103 spin_lock(&med->med_open_lock);
1104 list_del(&mfd->mfd_list);
1105 spin_unlock(&med->med_open_lock);
1107 push_ctxt(&saved, &obd->u.mds.mds_ctxt, NULL);
1108 req->rq_status = mds_mfd_close(rc ? NULL : req, obd, mfd);
1109 pop_ctxt(&saved, &obd->u.mds.mds_ctxt, NULL);
1111 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_CLOSE_PACK)) {
1112 CERROR("test case OBD_FAIL_MDS_CLOSE_PACK\n");
1113 req->rq_status = -ENOMEM;
1122 static int mds_readpage(struct ptlrpc_request *req)
1124 struct mds_obd *mds = mds_req2mds(req);
1125 struct vfsmount *mnt;
1128 struct mds_body *body, *repbody;
1129 struct obd_run_ctxt saved;
1130 int rc, size = sizeof(*repbody);
1131 struct obd_ucred uc;
1134 rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg);
1135 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK)) {
1136 CERROR("mds: out of memory\n");
1137 GOTO(out, rc = -ENOMEM);
1140 body = lustre_swab_reqbuf (req, 0, sizeof (*body),
1141 lustre_swab_mds_body);
1143 GOTO (out, rc = -EFAULT);
1145 /* body->size is actually the offset -eeb */
1146 if ((body->size & ~PAGE_MASK) != 0) {
1147 CERROR ("offset "LPU64"not on a page boundary\n", body->size);
1148 GOTO (out, rc = -EFAULT);
1151 /* body->nlink is actually the #bytes to read -eeb */
1152 if (body->nlink != PAGE_SIZE) {
1153 CERROR ("size %d is not PAGE_SIZE\n", body->nlink);
1154 GOTO (out, rc = -EFAULT);
1157 uc.ouc_fsuid = body->fsuid;
1158 uc.ouc_fsgid = body->fsgid;
1159 uc.ouc_cap = body->capability;
1160 push_ctxt(&saved, &mds->mds_ctxt, &uc);
1161 de = mds_fid2dentry(mds, &body->fid1, &mnt);
1163 GOTO(out_pop, rc = PTR_ERR(de));
1165 CDEBUG(D_INODE, "ino %lu\n", de->d_inode->i_ino);
1167 file = dentry_open(de, mnt, O_RDONLY | O_LARGEFILE);
1168 /* note: in case of an error, dentry_open puts dentry */
1170 GOTO(out_pop, rc = PTR_ERR(file));
1172 repbody = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*repbody));
1173 repbody->size = file->f_dentry->d_inode->i_size;
1174 repbody->valid = OBD_MD_FLSIZE;
1176 /* to make this asynchronous make sure that the handling function
1177 doesn't send a reply when this function completes. Instead a
1178 callback function would send the reply */
1179 /* body->blocks is actually the xid -phil */
1180 /* body->size is actually the offset -eeb */
1181 rc = mds_sendpage(req, file, body->size, body->blocks);
1183 filp_close(file, 0);
1185 pop_ctxt(&saved, &mds->mds_ctxt, &uc);
1187 req->rq_status = rc;
1191 int mds_reint(struct ptlrpc_request *req, int offset,
1192 struct lustre_handle *lockh)
1194 struct mds_update_record *rec; /* 116 bytes on the stack? no sir! */
1197 OBD_ALLOC(rec, sizeof(*rec));
1201 rc = mds_update_unpack(req, offset, rec);
1202 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK)) {
1203 CERROR("invalid record\n");
1204 GOTO(out, req->rq_status = -EINVAL);
1206 /* rc will be used to interrupt a for loop over multiple records */
1207 rc = mds_reint_rec(rec, offset, req, lockh);
1209 OBD_FREE(rec, sizeof(*rec));
1213 static int filter_recovery_request(struct ptlrpc_request *req,
1214 struct obd_device *obd, int *process)
1216 switch (req->rq_reqmsg->opc) {
1217 case MDS_CONNECT: /* This will never get here, but for completeness. */
1218 case OST_CONNECT: /* This will never get here, but for completeness. */
1219 case MDS_DISCONNECT:
1220 case OST_DISCONNECT:
1225 case MDS_GETSTATUS: /* used in unmounting */
1229 *process = target_queue_recovery_request(req, obd);
1233 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
1235 /* XXX what should we set rq_status to here? */
1236 req->rq_status = -EAGAIN;
1237 RETURN(ptlrpc_error(req));
1241 static char *reint_names[] = {
1242 [REINT_SETATTR] "setattr",
1243 [REINT_CREATE] "create",
1244 [REINT_LINK] "link",
1245 [REINT_UNLINK] "unlink",
1246 [REINT_RENAME] "rename",
1247 [REINT_OPEN] "open",
1250 void mds_steal_ack_locks(struct obd_export *exp,
1251 struct ptlrpc_request *req)
1253 unsigned long flags;
1255 struct ptlrpc_request *oldrep = exp->exp_outstanding_reply;
1256 memcpy(req->rq_ack_locks, oldrep->rq_ack_locks,
1257 sizeof req->rq_ack_locks);
1258 spin_lock_irqsave (&req->rq_lock, flags);
1259 oldrep->rq_resent = 1;
1260 wake_up(&oldrep->rq_wait_for_rep);
1261 spin_unlock_irqrestore (&req->rq_lock, flags);
1262 DEBUG_REQ(D_HA, oldrep, "stole locks from");
1263 DEBUG_REQ(D_HA, req, "stole locks for");
1266 int mds_handle(struct ptlrpc_request *req)
1270 struct mds_obd *mds = NULL; /* quell gcc overwarning */
1271 struct obd_device *obd = NULL;
1274 OBD_FAIL_RETURN(OBD_FAIL_MDS_ALL_REQUEST_NET | OBD_FAIL_ONCE, 0);
1276 LASSERT(!strcmp(req->rq_obd->obd_type->typ_name, LUSTRE_MDT_NAME));
1278 /* XXX identical to OST */
1279 if (req->rq_reqmsg->opc != MDS_CONNECT) {
1280 struct mds_export_data *med;
1281 int recovering, abort_recovery;
1283 if (req->rq_export == NULL) {
1284 CERROR("lustre_mds: operation %d on unconnected MDS\n",
1285 req->rq_reqmsg->opc);
1286 req->rq_status = -ENOTCONN;
1287 GOTO(out, rc = -ENOTCONN);
1290 med = &req->rq_export->exp_mds_data;
1291 obd = req->rq_export->exp_obd;
1294 /* Check for aborted recovery. */
1295 spin_lock_bh(&obd->obd_processing_task_lock);
1296 abort_recovery = obd->obd_abort_recovery;
1297 recovering = obd->obd_recovering;
1298 spin_unlock_bh(&obd->obd_processing_task_lock);
1299 if (abort_recovery) {
1300 target_abort_recovery(obd);
1301 } else if (recovering) {
1302 rc = filter_recovery_request(req, obd, &should_process);
1303 if (rc || !should_process)
1308 switch (req->rq_reqmsg->opc) {
1310 DEBUG_REQ(D_INODE, req, "connect");
1311 OBD_FAIL_RETURN(OBD_FAIL_MDS_CONNECT_NET, 0);
1312 rc = target_handle_connect(req, mds_handle);
1313 /* Make sure that last_rcvd is correct. */
1315 /* Now that we have an export, set mds. */
1316 mds = mds_req2mds(req);
1317 mds_fsync_super(mds->mds_sb);
1321 case MDS_DISCONNECT:
1322 DEBUG_REQ(D_INODE, req, "disconnect");
1323 OBD_FAIL_RETURN(OBD_FAIL_MDS_DISCONNECT_NET, 0);
1324 rc = target_handle_disconnect(req);
1325 /* Make sure that last_rcvd is correct. */
1327 mds_fsync_super(mds->mds_sb);
1328 req->rq_status = rc; /* superfluous? */
1332 DEBUG_REQ(D_INODE, req, "getstatus");
1333 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_NET, 0);
1334 rc = mds_getstatus(req);
1337 case MDS_GETLOVINFO:
1338 DEBUG_REQ(D_INODE, req, "getlovinfo");
1339 rc = mds_getlovinfo(req);
1343 DEBUG_REQ(D_INODE, req, "getattr");
1344 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NET, 0);
1345 rc = mds_getattr(0, req);
1348 case MDS_GETATTR_NAME: {
1349 struct lustre_handle lockh;
1350 DEBUG_REQ(D_INODE, req, "getattr_name");
1351 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NAME_NET, 0);
1353 /* If this request gets a reconstructed reply, we won't be
1354 * acquiring any new locks in mds_getattr_name, so we don't
1358 rc = mds_getattr_name(0, req, &lockh);
1359 if (rc == 0 && lockh.cookie)
1360 ldlm_lock_decref(&lockh, LCK_PR);
1364 DEBUG_REQ(D_INODE, req, "statfs");
1365 OBD_FAIL_RETURN(OBD_FAIL_MDS_STATFS_NET, 0);
1366 rc = mds_statfs(req);
1370 DEBUG_REQ(D_INODE, req, "readpage");
1371 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_NET, 0);
1372 rc = mds_readpage(req);
1374 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1379 __u32 *opcp = lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*opcp));
1381 int size[3] = {sizeof(struct mds_body), mds->mds_max_mdsize,
1382 mds->mds_max_cookiesize};
1385 /* NB only peek inside req now; mds_reint() will swab it */
1387 CERROR ("Can't inspect opcode\n");
1392 if (lustre_msg_swabbed (req->rq_reqmsg))
1395 DEBUG_REQ(D_INODE, req, "reint %d (%s)", opc,
1396 (opc < sizeof(reint_names) / sizeof(reint_names[0]) ||
1397 reint_names[opc] == NULL) ? reint_names[opc] :
1400 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET, 0);
1402 if (opc == REINT_UNLINK)
1404 else if (opc == REINT_OPEN)
1409 rc = lustre_pack_msg(bufcount, size, NULL,
1410 &req->rq_replen, &req->rq_repmsg);
1414 rc = mds_reint(req, 0, NULL);
1415 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET_REP, 0);
1420 DEBUG_REQ(D_INODE, req, "close");
1421 OBD_FAIL_RETURN(OBD_FAIL_MDS_CLOSE_NET, 0);
1422 rc = mds_close(req);
1426 DEBUG_REQ(D_INODE, req, "pin");
1427 OBD_FAIL_RETURN(OBD_FAIL_MDS_PIN_NET, 0);
1432 DEBUG_REQ(D_INODE, req, "ping");
1433 rc = target_handle_ping(req);
1436 case OBD_LOG_CANCEL:
1437 CDEBUG(D_INODE, "log cancel\n");
1438 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
1439 rc = -ENOTSUPP; /* la la la */
1443 DEBUG_REQ(D_INODE, req, "enqueue");
1444 OBD_FAIL_RETURN(OBD_FAIL_LDLM_ENQUEUE, 0);
1445 rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast,
1446 ldlm_server_blocking_ast);
1449 DEBUG_REQ(D_INODE, req, "convert");
1450 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CONVERT, 0);
1451 rc = ldlm_handle_convert(req);
1453 case LDLM_BL_CALLBACK:
1454 case LDLM_CP_CALLBACK:
1455 DEBUG_REQ(D_INODE, req, "callback");
1456 CERROR("callbacks should not happen on MDS\n");
1458 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1461 req->rq_status = -ENOTSUPP;
1462 rc = ptlrpc_error(req);
1468 /* If we're DISCONNECTing, the mds_export_data is already freed */
1469 if (!rc && req->rq_reqmsg->opc != MDS_DISCONNECT) {
1470 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1471 struct obd_device *obd = list_entry(mds, struct obd_device,
1473 req->rq_repmsg->last_xid =
1474 le64_to_cpu(med->med_mcd->mcd_last_xid);
1476 if (!obd->obd_no_transno) {
1477 req->rq_repmsg->last_committed =
1478 obd->obd_last_committed;
1480 DEBUG_REQ(D_IOCTL, req,
1481 "not sending last_committed update");
1483 CDEBUG(D_INFO, "last_transno "LPU64", last_committed "LPU64
1485 mds->mds_last_transno, obd->obd_last_committed,
1490 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LAST_REPLAY) {
1491 if (obd && obd->obd_recovering) {
1492 DEBUG_REQ(D_HA, req, "LAST_REPLAY, queuing reply");
1493 return target_queue_final_reply(req, rc);
1495 /* Lost a race with recovery; let the error path DTRT. */
1496 rc = req->rq_status = -ENOTCONN;
1499 target_send_reply(req, rc, OBD_FAIL_MDS_ALL_REPLY_NET);
1503 /* Update the server data on disk. This stores the new mount_count and
1504 * also the last_rcvd value to disk. If we don't have a clean shutdown,
1505 * then the server last_rcvd value may be less than that of the clients.
1506 * This will alert us that we may need to do client recovery.
1508 * Also assumes for mds_last_transno that we are not modifying it (no locking).
1510 int mds_update_server_data(struct obd_device *obd)
1512 struct mds_obd *mds = &obd->u.mds;
1513 struct mds_server_data *msd = mds->mds_server_data;
1514 struct file *filp = mds->mds_rcvd_filp;
1515 struct obd_run_ctxt saved;
1519 push_ctxt(&saved, &mds->mds_ctxt, NULL);
1520 msd->msd_last_transno = cpu_to_le64(mds->mds_last_transno);
1521 msd->msd_mount_count = cpu_to_le64(mds->mds_mount_count);
1523 CDEBUG(D_SUPER, "MDS mount_count is "LPU64", last_transno is "LPU64"\n",
1524 mds->mds_mount_count, mds->mds_last_transno);
1525 rc = fsfilt_write_record(obd, filp, msd, sizeof(*msd), &off);
1526 if (rc != sizeof(*msd)) {
1527 CERROR("error writing MDS server data: rc = %d\n", rc);
1532 rc = file_fsync(filp, filp->f_dentry, 1);
1534 CERROR("error flushing MDS server data: rc = %d\n", rc);
1537 pop_ctxt(&saved, &mds->mds_ctxt, NULL);
1541 /* mount the file system (secretly) */
1542 static int mds_setup(struct obd_device *obd, obd_count len, void *buf)
1544 struct obd_ioctl_data* data = buf;
1545 struct mds_obd *mds = &obd->u.mds;
1546 struct vfsmount *mnt;
1552 dev_clear_rdonly(2);
1553 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2)
1554 RETURN(rc = -EINVAL);
1556 if (data->ioc_inlbuf4)
1557 obd_str2uuid(&mds->mds_osc_uuid, data->ioc_inlbuf4);
1559 obd->obd_fsops = fsfilt_get_ops(data->ioc_inlbuf2);
1560 if (IS_ERR(obd->obd_fsops))
1561 RETURN(rc = PTR_ERR(obd->obd_fsops));
1564 if (data->ioc_inllen3 > 0 && data->ioc_inlbuf3) {
1565 if (*data->ioc_inlbuf3 == '/') {
1566 CERROR("mds namespace mount: %s\n",
1568 // mds->mds_nspath = strdup(ioc->inlbuf4);
1570 CERROR("namespace mount must be absolute path: '%s'\n",
1575 if (!(page = __get_free_page(GFP_KERNEL)))
1578 memset((void *)page, 0, PAGE_SIZE);
1579 sprintf((char *)page, "iopen_nopriv");
1581 mnt = do_kern_mount(data->ioc_inlbuf2, 0,
1582 data->ioc_inlbuf1, (void *)page);
1586 CERROR("do_kern_mount failed: rc = %d\n", rc);
1590 CDEBUG(D_SUPER, "%s: mnt = %p\n", data->ioc_inlbuf1, mnt);
1591 mds->mds_sb = mnt->mnt_root->d_inode->i_sb;
1593 GOTO(err_put, rc = -ENODEV);
1595 spin_lock_init(&mds->mds_transno_lock);
1596 mds->mds_max_mdsize = sizeof(struct lov_mds_md);
1597 mds->mds_max_cookiesize = sizeof(struct llog_cookie);
1598 rc = mds_fs_setup(obd, mnt);
1600 CERROR("MDS filesystem method init failed: rc = %d\n", rc);
1604 #ifdef ENABLE_ORPHANS
1605 rc = llog_start_commit_thread();
1610 #ifdef ENABLE_ORPHANS
1611 mds->mds_catalog = mds_get_catalog(obd);
1612 if (IS_ERR(mds->mds_catalog))
1613 GOTO(err_fs, rc = PTR_ERR(mds->mds_catalog));
1616 obd->obd_namespace = ldlm_namespace_new("mds_server",
1617 LDLM_NAMESPACE_SERVER);
1618 if (obd->obd_namespace == NULL) {
1619 mds_cleanup(obd, 0);
1620 GOTO(err_log, rc = -ENOMEM);
1623 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
1624 "mds_ldlm_client", &obd->obd_ldlm_client);
1626 mds->mds_has_lov_desc = 0;
1627 obd->obd_replayable = 1;
1632 #ifdef ENABLE_ORPHANS
1633 mds_put_catalog(mds->mds_catalog);
1634 /* No extra cleanup needed for llog_init_commit_thread() */
1637 mds_fs_cleanup(obd, 0);
1640 mntput(mds->mds_vfsmnt);
1644 fsfilt_put_ops(obd->obd_fsops);
1648 static int mds_cleanup(struct obd_device *obd, int flags)
1650 struct mds_obd *mds = &obd->u.mds;
1653 if (mds->mds_sb == NULL)
1656 #ifdef ENABLE_ORPHANS
1657 mds_put_catalog(mds->mds_catalog);
1659 if (mds->mds_osc_obd)
1660 obd_disconnect(&mds->mds_osc_conn, flags);
1661 mds_update_server_data(obd);
1662 mds_fs_cleanup(obd, flags);
1666 /* 2 seems normal on mds, (may_umount() also expects 2
1667 fwiw), but we only see 1 at this point in obdfilter. */
1668 if (atomic_read(&obd->u.mds.mds_vfsmnt->mnt_count) > 2)
1669 CERROR("%s: mount point busy, mnt_count: %d\n", obd->obd_name,
1670 atomic_read(&obd->u.mds.mds_vfsmnt->mnt_count));
1672 mntput(mds->mds_vfsmnt);
1675 ldlm_namespace_free(obd->obd_namespace);
1677 if (obd->obd_recovering)
1678 target_cancel_recovery_timer(obd);
1680 dev_clear_rdonly(2);
1681 fsfilt_put_ops(obd->obd_fsops);
1686 static void fixup_handle_for_resent_req(struct ptlrpc_request *req,
1687 struct ldlm_lock *new_lock,
1688 struct lustre_handle *lockh)
1690 struct obd_export *exp = req->rq_export;
1691 struct obd_device *obd = exp->exp_obd;
1692 struct ldlm_request *dlmreq =
1693 lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*dlmreq));
1694 struct lustre_handle remote_hdl = dlmreq->lock_handle1;
1695 struct list_head *iter;
1697 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
1700 l_lock(&obd->obd_namespace->ns_lock);
1701 list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) {
1702 struct ldlm_lock *lock;
1703 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
1704 if (lock == new_lock)
1706 if (lock->l_remote_handle.cookie == remote_hdl.cookie) {
1707 lockh->cookie = lock->l_handle.h_cookie;
1708 DEBUG_REQ(D_HA, req, "restoring lock cookie "LPX64,
1710 l_unlock(&obd->obd_namespace->ns_lock);
1715 l_unlock(&obd->obd_namespace->ns_lock);
1716 DEBUG_REQ(D_HA, req, "no existing lock with rhandle "LPX64,
1720 int intent_disposition(struct ldlm_reply *rep, int flag)
1724 return (rep->lock_policy_res1 & flag);
1727 void intent_set_disposition(struct ldlm_reply *rep, int flag)
1731 rep->lock_policy_res1 |= flag;
1734 static int ldlm_intent_policy(struct ldlm_namespace *ns,
1735 struct ldlm_lock **lockp, void *req_cookie,
1736 ldlm_mode_t mode, int flags, void *data)
1738 struct ptlrpc_request *req = req_cookie;
1739 struct ldlm_lock *lock = *lockp;
1745 if (req->rq_reqmsg->bufcount > 1) {
1746 /* an intent needs to be considered */
1747 struct ldlm_intent *it;
1748 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
1749 struct ldlm_reply *rep;
1750 struct lustre_handle lockh;
1751 struct ldlm_lock *new_lock;
1752 int offset = 2, repsize[4] = {sizeof(struct ldlm_reply),
1753 sizeof(struct mds_body),
1754 mds->mds_max_mdsize,
1755 mds->mds_max_cookiesize};
1757 it = lustre_swab_reqbuf(req, 1, sizeof (*it),
1758 lustre_swab_ldlm_intent);
1760 CERROR ("Intent missing\n");
1761 req->rq_status = -EFAULT;
1762 RETURN(req->rq_status);
1765 LDLM_DEBUG(lock, "intent policy, opc: %s",
1766 ldlm_it2str(it->opc));
1768 req->rq_status = lustre_pack_msg(it->opc == IT_UNLINK ? 4 : 3,
1769 repsize, NULL, &req->rq_replen,
1772 RETURN(req->rq_status);
1774 rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*rep));
1775 intent_set_disposition(rep, DISP_IT_EXECD);
1777 fixup_handle_for_resent_req(req, lock, &lockh);
1779 /* execute policy */
1780 switch ((long)it->opc) {
1782 case IT_CREAT|IT_OPEN:
1783 /* XXX swab here to assert that an mds_open reint
1784 * packet is following */
1785 rep->lock_policy_res2 = mds_reint(req, offset, &lockh);
1786 /* We abort the lock if the lookup was negative and
1787 * we did not make it to the OPEN portion */
1788 if (!intent_disposition(rep, DISP_LOOKUP_EXECD))
1789 RETURN(ELDLM_LOCK_ABORTED);
1790 if (intent_disposition(rep, DISP_LOOKUP_NEG) &&
1791 !intent_disposition(rep, DISP_OPEN_OPEN))
1792 RETURN(ELDLM_LOCK_ABORTED);
1797 rep->lock_policy_res2 = mds_getattr_name(offset, req,
1799 /* FIXME: we need to sit down and decide on who should
1800 * set req->rq_status, who should return negative and
1801 * positive return values, and what they all mean.
1802 * - replay: returns 0 & req->status is old status
1803 * - otherwise: returns req->status */
1804 if (!intent_disposition(rep, DISP_LOOKUP_POS) ||
1805 rep->lock_policy_res2)
1806 RETURN(ELDLM_LOCK_ABORTED);
1807 if (req->rq_status != 0) {
1808 rep->lock_policy_res2 = req->rq_status;
1809 RETURN(ELDLM_LOCK_ABORTED);
1813 CERROR("Unhandled intent "LPD64"\n", it->opc);
1817 /* By this point, whatever function we called above must have
1818 * either filled in 'lockh', been an intent replay, or returned
1819 * an error. We want to allow replayed RPCs to not get a lock,
1820 * since we would just drop it below anyways because lock replay
1821 * is done separately by the client afterwards. For regular
1822 * RPCs we want to give the new lock to the client instead of
1823 * whatever lock it was about to get.
1825 new_lock = ldlm_handle2lock(&lockh);
1826 if (flags & LDLM_FL_INTENT_ONLY && !new_lock)
1827 RETURN(ELDLM_LOCK_ABORTED);
1829 LASSERT(new_lock != NULL);
1831 /* If we've already given this lock to a client once, then we
1832 * should have no readers or writers. Otherwise, we should
1833 * have one reader _or_ writer ref (which will be zeroed below
1834 * before returning the lock to a client.
1836 if (new_lock->l_export == req->rq_export)
1837 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
1839 LASSERT(new_lock->l_export == NULL);
1840 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
1843 /* If we're running an intent only, we want to abort the new
1844 * lock, and let the client abort the original lock. */
1845 if (flags & LDLM_FL_INTENT_ONLY) {
1846 LDLM_DEBUG(lock, "INTENT_ONLY, aborting locks");
1847 l_lock(&new_lock->l_resource->lr_namespace->ns_lock);
1848 if (new_lock->l_readers)
1849 ldlm_lock_decref(&lockh, LCK_PR);
1851 ldlm_lock_decref(&lockh, LCK_PW);
1852 l_unlock(&new_lock->l_resource->lr_namespace->ns_lock);
1853 LDLM_LOCK_PUT(new_lock);
1854 RETURN(ELDLM_LOCK_ABORTED);
1859 rep->lock_policy_res2 = req->rq_status;
1861 if (new_lock->l_export == req->rq_export) {
1862 /* Already gave this to the client, which means that we
1863 * reconstructed a reply. */
1864 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
1866 RETURN(ELDLM_LOCK_REPLACED);
1869 /* Fixup the lock to be given to the client */
1870 l_lock(&new_lock->l_resource->lr_namespace->ns_lock);
1871 new_lock->l_readers = 0;
1872 new_lock->l_writers = 0;
1874 new_lock->l_export = class_export_get(req->rq_export);
1875 list_add(&new_lock->l_export_chain,
1876 &new_lock->l_export->exp_ldlm_data.led_held_locks);
1878 /* We don't need to worry about completion_ast (which isn't set
1879 * in 'lock' yet anyways), because this lock is already
1881 new_lock->l_blocking_ast = lock->l_blocking_ast;
1883 memcpy(&new_lock->l_remote_handle, &lock->l_remote_handle,
1884 sizeof(lock->l_remote_handle));
1886 new_lock->l_flags &= ~(LDLM_FL_LOCAL | LDLM_FL_AST_SENT |
1889 LDLM_LOCK_PUT(new_lock);
1890 l_unlock(&new_lock->l_resource->lr_namespace->ns_lock);
1892 RETURN(ELDLM_LOCK_REPLACED);
1894 int size = sizeof(struct ldlm_reply);
1895 if (lustre_pack_msg(1, &size, NULL, &req->rq_replen,
1904 int mds_attach(struct obd_device *dev, obd_count len, void *data)
1906 struct lprocfs_static_vars lvars;
1908 lprocfs_init_multi_vars(0, &lvars);
1909 return lprocfs_obd_attach(dev, lvars.obd_vars);
1912 int mds_detach(struct obd_device *dev)
1914 return lprocfs_obd_detach(dev);
1917 int mdt_attach(struct obd_device *dev, obd_count len, void *data)
1919 struct lprocfs_static_vars lvars;
1921 lprocfs_init_multi_vars(1, &lvars);
1922 return lprocfs_obd_attach(dev, lvars.obd_vars);
1925 int mdt_detach(struct obd_device *dev)
1927 return lprocfs_obd_detach(dev);
1930 static int mdt_setup(struct obd_device *obddev, obd_count len, void *buf)
1932 struct mds_obd *mds = &obddev->u.mds;
1936 mds->mds_service = ptlrpc_init_svc(MDS_NEVENTS, MDS_NBUFS,
1937 MDS_BUFSIZE, MDS_MAXREQSIZE,
1938 MDS_REQUEST_PORTAL, MDC_REPLY_PORTAL,
1939 mds_handle, "mds", obddev);
1941 if (!mds->mds_service) {
1942 CERROR("failed to start service\n");
1943 RETURN(rc = -ENOMEM);
1946 for (i = 0; i < MDT_NUM_THREADS; i++) {
1948 sprintf(name, "ll_mdt_%02d", i);
1949 rc = ptlrpc_start_thread(obddev, mds->mds_service, name);
1951 CERROR("cannot start MDT thread #%d: rc %d\n", i, rc);
1952 GOTO(err_thread, rc);
1956 mds->mds_setattr_service =
1957 ptlrpc_init_svc(MDS_NEVENTS, MDS_NBUFS,
1958 MDS_BUFSIZE, MDS_MAXREQSIZE,
1959 MDS_SETATTR_PORTAL, MDC_REPLY_PORTAL,
1960 mds_handle, "mds_setattr", obddev);
1961 if (!mds->mds_setattr_service) {
1962 CERROR("failed to start getattr service\n");
1963 GOTO(err_thread, rc = -ENOMEM);
1966 for (i = 0; i < MDT_NUM_THREADS; i++) {
1968 sprintf(name, "ll_mdt_attr_%02d", i);
1969 rc = ptlrpc_start_thread(obddev, mds->mds_setattr_service,
1972 CERROR("cannot start MDT setattr thread #%d: rc %d\n",
1974 GOTO(err_thread2, rc);
1978 mds->mds_readpage_service =
1979 ptlrpc_init_svc(MDS_NEVENTS, MDS_NBUFS,
1980 MDS_BUFSIZE, MDS_MAXREQSIZE,
1981 MDS_READPAGE_PORTAL, MDC_REPLY_PORTAL,
1982 mds_handle, "mds_readpage", obddev);
1983 if (!mds->mds_readpage_service) {
1984 CERROR("failed to start readpage service\n");
1985 GOTO(err_thread2, rc = -ENOMEM);
1988 for (i = 0; i < MDT_NUM_THREADS; i++) {
1990 sprintf(name, "ll_mdt_rdpg_%02d", i);
1991 rc = ptlrpc_start_thread(obddev, mds->mds_readpage_service,
1994 CERROR("cannot start MDT readpage thread #%d: rc %d\n",
1996 GOTO(err_thread3, rc);
2003 ptlrpc_stop_all_threads(mds->mds_readpage_service);
2004 ptlrpc_unregister_service(mds->mds_readpage_service);
2006 ptlrpc_stop_all_threads(mds->mds_setattr_service);
2007 ptlrpc_unregister_service(mds->mds_setattr_service);
2009 ptlrpc_stop_all_threads(mds->mds_service);
2010 ptlrpc_unregister_service(mds->mds_service);
2015 static int mdt_cleanup(struct obd_device *obddev, int flags)
2017 struct mds_obd *mds = &obddev->u.mds;
2020 ptlrpc_stop_all_threads(mds->mds_readpage_service);
2021 ptlrpc_unregister_service(mds->mds_readpage_service);
2023 ptlrpc_stop_all_threads(mds->mds_setattr_service);
2024 ptlrpc_unregister_service(mds->mds_setattr_service);
2026 ptlrpc_stop_all_threads(mds->mds_service);
2027 ptlrpc_unregister_service(mds->mds_service);
2032 extern int mds_iocontrol(unsigned int cmd, struct lustre_handle *conn,
2033 int len, void *karg, void *uarg);
2035 /* use obd ops to offer management infrastructure */
2036 static struct obd_ops mds_obd_ops = {
2037 o_owner: THIS_MODULE,
2038 o_attach: mds_attach,
2039 o_detach: mds_detach,
2040 o_connect: mds_connect,
2041 o_disconnect: mds_disconnect,
2043 o_cleanup: mds_cleanup,
2044 o_statfs: mds_obd_statfs,
2045 o_iocontrol: mds_iocontrol
2048 static struct obd_ops mdt_obd_ops = {
2049 o_owner: THIS_MODULE,
2050 o_attach: mdt_attach,
2051 o_detach: mdt_detach,
2053 o_cleanup: mdt_cleanup,
2057 static int __init mds_init(void)
2059 struct lprocfs_static_vars lvars;
2061 lprocfs_init_multi_vars(0, &lvars);
2062 class_register_type(&mds_obd_ops, lvars.module_vars, LUSTRE_MDS_NAME);
2063 lprocfs_init_multi_vars(1, &lvars);
2064 class_register_type(&mdt_obd_ops, lvars.module_vars, LUSTRE_MDT_NAME);
2065 ldlm_register_intent(ldlm_intent_policy);
2070 static void /*__exit*/ mds_exit(void)
2072 ldlm_unregister_intent();
2073 class_unregister_type(LUSTRE_MDS_NAME);
2074 class_unregister_type(LUSTRE_MDT_NAME);
2077 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
2078 MODULE_DESCRIPTION("Lustre Metadata Server (MDS)");
2079 MODULE_LICENSE("GPL");
2081 module_init(mds_init);
2082 module_exit(mds_exit);