1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Lustre Metadata Server (mds) request handler
7 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * Author: Peter Braam <braam@clusterfs.com>
9 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * Author: Phil Schwan <phil@clusterfs.com>
11 * Author: Mike Shaver <shaver@clusterfs.com>
13 * This file is part of Lustre, http://www.lustre.org.
15 * Lustre is free software; you can redistribute it and/or
16 * modify it under the terms of version 2 of the GNU General Public
17 * License as published by the Free Software Foundation.
19 * Lustre is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with Lustre; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 #define DEBUG_SUBSYSTEM S_MDS
32 #include <linux/module.h>
33 #include <linux/lustre_mds.h>
34 #include <linux/lustre_dlm.h>
35 #include <linux/init.h>
36 #include <linux/obd_class.h>
37 #include <linux/random.h>
39 #include <linux/jbd.h>
40 #include <linux/ext3_fs.h>
41 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
42 # include <linux/smp_lock.h>
43 # include <linux/buffer_head.h>
44 # include <linux/workqueue.h>
45 # include <linux/mount.h>
47 # include <linux/locks.h>
49 #include <linux/obd_lov.h>
50 #include <linux/lustre_mds.h>
51 #include <linux/lustre_fsfilt.h>
52 #include <linux/lprocfs_status.h>
53 #include "mds_internal.h"
55 extern int mds_get_lovtgts(struct mds_obd *obd, int tgt_count,
56 struct obd_uuid *uuidarray);
57 extern int mds_get_lovdesc(struct mds_obd *obd, struct lov_desc *desc);
58 int mds_finish_transno(struct mds_obd *mds, struct inode *i, void *handle,
59 struct ptlrpc_request *req, int rc, int disp);
60 static int mds_cleanup(struct obd_device * obddev, int force, int failover);
62 inline struct mds_obd *mds_req2mds(struct ptlrpc_request *req)
64 return &req->rq_export->exp_obd->u.mds;
67 static int mds_bulk_timeout(void *data)
69 struct ptlrpc_bulk_desc *desc = data;
70 struct obd_export *exp = desc->bd_export;
72 CERROR("bulk send timed out: evicting %s@%s\n",
73 exp->exp_client_uuid.uuid,
74 exp->exp_connection->c_remote_uuid.uuid);
75 ptlrpc_fail_export(exp);
76 ptlrpc_abort_bulk (desc);
80 /* Assumes caller has already pushed into the kernel filesystem context */
81 static int mds_sendpage(struct ptlrpc_request *req, struct file *file,
82 __u64 offset, __u64 xid)
84 struct ptlrpc_bulk_desc *desc;
85 struct l_wait_info lwi;
90 LASSERT ((offset & (PAGE_CACHE_SIZE - 1)) == 0);
92 desc = ptlrpc_prep_bulk_exp (req, BULK_PUT_SOURCE, MDS_BULK_PORTAL);
94 GOTO(out, rc = -ENOMEM);
96 LASSERT (PAGE_SIZE == PAGE_CACHE_SIZE);
97 page = alloc_pages (GFP_KERNEL, 0);
99 GOTO(cleanup_bulk, rc = -ENOMEM);
101 rc = ptlrpc_prep_bulk_page(desc, page, 0, PAGE_CACHE_SIZE);
103 GOTO(cleanup_buf, rc);
105 CDEBUG(D_EXT2, "reading %lu@"LPU64" from dir %lu (size %llu)\n",
106 PAGE_CACHE_SIZE, offset, file->f_dentry->d_inode->i_ino,
107 file->f_dentry->d_inode->i_size);
108 rc = fsfilt_readpage(req->rq_export->exp_obd, file, page_address (page),
109 PAGE_CACHE_SIZE, (loff_t *)&offset);
111 if (rc != PAGE_CACHE_SIZE)
112 GOTO(cleanup_buf, rc = -EIO);
114 rc = ptlrpc_bulk_put(desc);
116 GOTO(cleanup_buf, rc);
118 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) {
119 CERROR("obd_fail_loc=%x, fail operation rc=%d\n",
120 OBD_FAIL_MDS_SENDPAGE, rc);
121 ptlrpc_abort_bulk(desc);
122 GOTO(cleanup_buf, rc);
125 lwi = LWI_TIMEOUT(obd_timeout * HZ, mds_bulk_timeout, desc);
126 rc = l_wait_event(desc->bd_waitq, ptlrpc_bulk_complete (desc), &lwi);
128 LASSERT (rc == -ETIMEDOUT);
129 GOTO(cleanup_buf, rc);
134 __free_pages (page, 0);
136 ptlrpc_free_bulk (desc);
141 /* only valid locked dentries or errors should be returned */
142 struct dentry *mds_fid2locked_dentry(struct obd_device *obd, struct ll_fid *fid,
143 struct vfsmount **mnt, int lock_mode,
144 struct lustre_handle *lockh)
146 struct mds_obd *mds = &obd->u.mds;
147 struct dentry *de = mds_fid2dentry(mds, fid, mnt), *retval = de;
148 struct ldlm_res_id res_id = { .name = {0} };
155 res_id.name[0] = de->d_inode->i_ino;
156 res_id.name[1] = de->d_inode->i_generation;
157 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace, NULL,
158 res_id, LDLM_PLAIN, NULL, 0, lock_mode,
159 &flags, ldlm_completion_ast,
160 mds_blocking_ast, NULL, lockh);
161 if (rc != ELDLM_OK) {
163 retval = ERR_PTR(-ENOLCK); /* XXX translate ldlm code */
169 #ifndef DCACHE_DISCONNECTED
170 #define DCACHE_DISCONNECTED DCACHE_NFSD_DISCONNECTED
174 /* Look up an entry by inode number. */
175 /* this function ONLY returns valid dget'd dentries with an initialized inode
177 struct dentry *mds_fid2dentry(struct mds_obd *mds, struct ll_fid *fid,
178 struct vfsmount **mnt)
181 unsigned long ino = fid->id;
182 __u32 generation = fid->generation;
184 struct dentry *result;
187 RETURN(ERR_PTR(-ESTALE));
189 snprintf(fid_name, sizeof(fid_name), "0x%lx", ino);
191 /* under ext3 this is neither supposed to return bad inodes
193 result = ll_lookup_one_len(fid_name, mds->mds_fid_de, strlen(fid_name));
197 inode = result->d_inode;
199 RETURN(ERR_PTR(-ENOENT));
201 CDEBUG(D_DENTRY, "--> mds_fid2dentry: ino %lu, gen %u, sb %p\n",
202 inode->i_ino, inode->i_generation, inode->i_sb);
204 if (generation && inode->i_generation != generation) {
205 /* we didn't find the right inode.. */
206 CERROR("bad inode %lu, link: %d ct: %d or generation %u/%u\n",
207 inode->i_ino, inode->i_nlink,
208 atomic_read(&inode->i_count), inode->i_generation,
211 RETURN(ERR_PTR(-ENOENT));
215 *mnt = mds->mds_vfsmnt;
223 /* Establish a connection to the MDS.
225 * This will set up an export structure for the client to hold state data
226 * about that client, like open files, the last operation number it did
227 * on the server, etc.
229 static int mds_connect(struct lustre_handle *conn, struct obd_device *obd,
230 struct obd_uuid *cluuid)
232 struct obd_export *exp;
233 struct mds_export_data *med;
234 struct mds_client_data *mcd;
235 int rc, abort_recovery;
238 if (!conn || !obd || !cluuid)
241 /* Check for aborted recovery. */
242 spin_lock_bh(&obd->obd_processing_task_lock);
243 abort_recovery = obd->obd_abort_recovery;
244 spin_unlock_bh(&obd->obd_processing_task_lock);
246 target_abort_recovery(obd);
248 /* XXX There is a small race between checking the list and adding a
249 * new connection for the same UUID, but the real threat (list
250 * corruption when multiple different clients connect) is solved.
252 * There is a second race between adding the export to the list,
253 * and filling in the client data below. Hence skipping the case
254 * of NULL mcd above. We should already be controlling multiple
255 * connects at the client, and we can't hold the spinlock over
256 * memory allocations without risk of deadlocking.
258 rc = class_connect(conn, obd, cluuid);
261 exp = class_conn2export(conn);
263 med = &exp->exp_mds_data;
264 class_export_put(exp);
266 OBD_ALLOC(mcd, sizeof(*mcd));
268 CERROR("mds: out of memory for client data\n");
269 GOTO(out_export, rc = -ENOMEM);
272 memcpy(mcd->mcd_uuid, cluuid, sizeof(mcd->mcd_uuid));
275 INIT_LIST_HEAD(&med->med_open_head);
276 spin_lock_init(&med->med_open_lock);
278 rc = mds_client_add(obd, &obd->u.mds, med, -1);
285 OBD_FREE(mcd, sizeof(*mcd));
287 class_disconnect(conn, 0);
292 static void mds_mfd_addref(void *mfdp)
294 struct mds_file_data *mfd = mfdp;
296 atomic_inc(&mfd->mfd_refcount);
297 CDEBUG(D_INFO, "GETting mfd %p : new refcount %d\n", mfd,
298 atomic_read(&mfd->mfd_refcount));
301 struct mds_file_data *mds_mfd_new(void)
303 struct mds_file_data *mfd;
305 OBD_ALLOC(mfd, sizeof *mfd);
307 CERROR("mds: out of memory\n");
311 atomic_set(&mfd->mfd_refcount, 2);
313 INIT_LIST_HEAD(&mfd->mfd_handle.h_link);
314 class_handle_hash(&mfd->mfd_handle, mds_mfd_addref);
319 static struct mds_file_data *mds_handle2mfd(struct lustre_handle *handle)
322 LASSERT(handle != NULL);
323 RETURN(class_handle2object(handle->cookie));
326 void mds_mfd_put(struct mds_file_data *mfd)
328 CDEBUG(D_INFO, "PUTting mfd %p : new refcount %d\n", mfd,
329 atomic_read(&mfd->mfd_refcount) - 1);
330 LASSERT(atomic_read(&mfd->mfd_refcount) > 0 &&
331 atomic_read(&mfd->mfd_refcount) < 0x5a5a);
332 if (atomic_dec_and_test(&mfd->mfd_refcount)) {
333 LASSERT(list_empty(&mfd->mfd_handle.h_link));
334 OBD_FREE(mfd, sizeof *mfd);
338 void mds_mfd_destroy(struct mds_file_data *mfd)
340 class_handle_unhash(&mfd->mfd_handle);
344 /* Call with med->med_open_lock held, please. */
345 static int mds_close_mfd(struct mds_file_data *mfd, struct mds_export_data *med)
347 struct dentry *de = NULL;
350 LASSERT(spin_is_locked(&med->med_open_lock));
352 list_del(&mfd->mfd_list);
354 if (mfd->mfd_dentry->d_parent) {
355 LASSERT(atomic_read(&mfd->mfd_dentry->d_parent->d_count));
356 de = dget(mfd->mfd_dentry->d_parent);
359 /* this is the actual "close" */
360 l_dput(mfd->mfd_dentry);
365 mds_mfd_destroy(mfd);
369 static int mds_disconnect(struct lustre_handle *conn, int failover)
371 struct obd_export *export = class_conn2export(conn);
376 ldlm_cancel_locks_for_export(export);
378 spin_lock_irqsave(&export->exp_lock, flags);
379 export->exp_failover = failover;
380 spin_unlock_irqrestore(&export->exp_lock, flags);
382 rc = class_disconnect(conn, failover);
383 class_export_put(export);
388 static void mds_destroy_export(struct obd_export *export)
390 struct mds_export_data *med = &export->exp_mds_data;
391 struct list_head *tmp, *n;
395 LASSERT(!strcmp(export->exp_obd->obd_type->typ_name,
399 * Close any open files.
401 spin_lock(&med->med_open_lock);
402 list_for_each_safe(tmp, n, &med->med_open_head) {
403 struct mds_file_data *mfd =
404 list_entry(tmp, struct mds_file_data, mfd_list);
405 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
406 struct dentry *dentry = mfd->mfd_dentry;
407 CERROR("force closing client file handle for %*s (%s:%lu)\n",
408 dentry->d_name.len, dentry->d_name.name,
409 kdevname(dentry->d_inode->i_sb->s_dev),
410 dentry->d_inode->i_ino);
412 rc = mds_close_mfd(mfd, med);
414 CDEBUG(D_INODE, "Error closing file: %d\n", rc);
416 spin_unlock(&med->med_open_lock);
418 if (export->exp_outstanding_reply) {
419 struct ptlrpc_request *req = export->exp_outstanding_reply;
422 /* Fake the ack, so the locks get cancelled. */
424 /* Actually we can't do this because it prevents us knowing
425 * if the ACK callback ran or not */
426 spin_lock_irqsave (&req->rq_lock, flags);
427 req->rq_want_ack = 0;
429 wake_up(&req->rq_wait_for_rep);
430 spin_unlock_irqrestore (&req->rq_lock, flags);
432 export->exp_outstanding_reply = NULL;
435 if (!export->exp_failover)
436 mds_client_free(export);
441 * XXX This is NOT guaranteed to flush all transactions to disk (even though
442 * it is equivalent to calling sync()) because it only _starts_ the flush
443 * and does not wait for completion. It's better than nothing though.
444 * What we really want is a mild form of fsync_dev_lockfs(), but it is
445 * non-standard, or enabling do_sync_supers in ext3, just for this call.
447 static void mds_fsync_super(struct super_block *sb)
451 if (sb->s_dirt && sb->s_op && sb->s_op->write_super)
452 sb->s_op->write_super(sb);
457 static int mds_getstatus(struct ptlrpc_request *req)
459 struct mds_obd *mds = mds_req2mds(req);
460 struct mds_body *body;
461 int rc, size = sizeof(*body);
464 rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg);
465 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK)) {
466 CERROR("mds: out of memory for message: size=%d\n", size);
467 req->rq_status = -ENOMEM; /* superfluous? */
471 /* Flush any outstanding transactions to disk so the client will
472 * get the latest last_committed value and can drop their local
473 * requests if they have any. This would be fsync_super() if it
476 mds_fsync_super(mds->mds_sb);
478 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*body));
479 memcpy(&body->fid1, &mds->mds_rootfid, sizeof(body->fid1));
481 /* the last_committed and last_xid fields are filled in for all
482 * replies already - no need to do so here also.
487 static int mds_getlovinfo(struct ptlrpc_request *req)
489 struct mds_obd *mds = mds_req2mds(req);
490 struct mds_status_req *streq;
491 struct lov_desc *desc;
492 struct obd_uuid *uuid0;
494 int rc, size[2] = {sizeof(*desc)};
497 streq = lustre_swab_reqbuf (req, 0, sizeof (*streq),
498 lustre_swab_mds_status_req);
500 CERROR ("Can't unpack mds_status_req\n");
504 if (streq->repbuf > LOV_MAX_UUID_BUFFER_SIZE) {
505 CERROR ("Illegal request for uuid array > %d\n",
509 size[1] = streq->repbuf;
511 rc = lustre_pack_msg(2, size, NULL, &req->rq_replen, &req->rq_repmsg);
513 CERROR("mds: out of memory for message: size=%d\n", size[1]);
517 if (!mds->mds_has_lov_desc) {
518 req->rq_status = -ENOENT;
522 /* XXX We're sending the lov_desc in my byte order.
523 * Receiver will swab... */
524 desc = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*desc));
525 memcpy(desc, &mds->mds_lov_desc, sizeof (*desc));
527 tgt_count = mds->mds_lov_desc.ld_tgt_count;
528 uuid0 = lustre_msg_buf (req->rq_repmsg, 1,
529 tgt_count * sizeof (*uuid0));
531 CERROR("too many targets, enlarge client buffers\n");
532 req->rq_status = -ENOSPC;
536 rc = mds_get_lovtgts(mds, tgt_count, uuid0);
538 CERROR("get_lovtgts error %d\n", rc);
545 int mds_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
546 void *data, int flag)
551 if (flag == LDLM_CB_CANCELING) {
552 /* Don't need to do anything here. */
556 /* XXX layering violation! -phil */
557 l_lock(&lock->l_resource->lr_namespace->ns_lock);
558 /* Get this: if mds_blocking_ast is racing with ldlm_intent_policy,
559 * such that mds_blocking_ast is called just before l_i_p takes the
560 * ns_lock, then by the time we get the lock, we might not be the
561 * correct blocking function anymore. So check, and return early, if
563 if (lock->l_blocking_ast != mds_blocking_ast) {
564 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
568 lock->l_flags |= LDLM_FL_CBPENDING;
569 do_ast = (!lock->l_readers && !lock->l_writers);
570 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
573 struct lustre_handle lockh;
576 LDLM_DEBUG(lock, "already unused, calling ldlm_cli_cancel");
577 ldlm_lock2handle(lock, &lockh);
578 rc = ldlm_cli_cancel(&lockh);
580 CERROR("ldlm_cli_cancel: %d\n", rc);
582 LDLM_DEBUG(lock, "Lock still has references, will be "
588 int mds_pack_md(struct obd_device *obd, struct lustre_msg *msg,
589 int offset, struct mds_body *body, struct inode *inode)
591 struct mds_obd *mds = &obd->u.mds;
592 struct lov_mds_md *lmm;
597 lmm = lustre_msg_buf(msg, offset, 0);
599 /* Some problem with getting eadata when I sized the reply
601 CDEBUG(D_INFO, "no space reserved for inode %lu MD\n",
605 lmm_size = msg->buflens[offset];
607 /* I don't really like this, but it is a sanity check on the client
608 * MD request. However, if the client doesn't know how much space
609 * to reserve for the MD, this shouldn't be fatal either...
611 if (lmm_size > mds->mds_max_mdsize) {
612 CERROR("Reading MD for inode %lu of %d bytes > max %d\n",
613 inode->i_ino, lmm_size, mds->mds_max_mdsize);
617 rc = fsfilt_get_md(obd, inode, lmm, lmm_size);
619 CERROR ("Error %d reading eadata for ino %lu\n",
622 body->valid |= OBD_MD_FLEASIZE;
623 body->eadatasize = rc;
630 static int mds_getattr_internal(struct obd_device *obd, struct dentry *dentry,
631 struct ptlrpc_request *req,
632 struct mds_body *reqbody, int reply_off)
634 struct mds_body *body;
635 struct inode *inode = dentry->d_inode;
642 body = lustre_msg_buf(req->rq_repmsg, reply_off, sizeof (*body));
643 LASSERT (body != NULL); /* caller prepped reply */
645 mds_pack_inode2fid(&body->fid1, inode);
646 mds_pack_inode2body(body, inode);
648 if (S_ISREG(inode->i_mode) &&
649 (reqbody->valid & OBD_MD_FLEASIZE) != 0) {
650 rc = mds_pack_md(obd, req->rq_repmsg, reply_off + 1,
652 } else if (S_ISLNK(inode->i_mode) &&
653 (reqbody->valid & OBD_MD_LINKNAME) != 0) {
654 char *symname = lustre_msg_buf(req->rq_repmsg, reply_off + 1, 0);
657 LASSERT (symname != NULL); /* caller prepped reply */
658 len = req->rq_repmsg->buflens[reply_off + 1];
660 rc = inode->i_op->readlink(dentry, symname, len);
662 CERROR("readlink failed: %d\n", rc);
663 } else if (rc != len - 1) {
664 CERROR ("Unexpected readlink rc %d: expecting %d\n",
668 CDEBUG(D_INODE, "read symlink dest %s\n", symname);
669 body->valid |= OBD_MD_LINKNAME;
670 body->eadatasize = rc + 1;
671 symname[rc] = 0; /* NULL terminate */
678 static int mds_getattr_pack_msg(struct ptlrpc_request *req, struct inode *inode,
681 struct mds_obd *mds = mds_req2mds(req);
682 struct mds_body *body;
683 int rc = 0, size[2] = {sizeof(*body)}, bufcount = 1;
686 body = lustre_msg_buf(req->rq_reqmsg, offset, sizeof (*body));
687 LASSERT (body != NULL); /* checked by caller */
688 LASSERT_REQSWABBED (req, offset); /* swabbed by caller */
690 if (S_ISREG(inode->i_mode) &&
691 (body->valid & OBD_MD_FLEASIZE) != 0) {
692 int rc = fsfilt_get_md(req->rq_export->exp_obd, inode, NULL, 0);
693 CDEBUG(D_INODE, "got %d bytes MD data for inode %lu\n",
697 CERROR("error getting inode %lu MD: rc = %d\n",
700 } else if (rc > mds->mds_max_mdsize) {
702 CERROR("MD size %d larger than maximum possible %u\n",
703 rc, mds->mds_max_mdsize);
707 } else if (S_ISLNK (inode->i_mode) &&
708 (body->valid & OBD_MD_LINKNAME) != 0) {
709 if (inode->i_size + 1 != body->eadatasize)
710 CERROR ("symlink size: %Lu, reply space: %d\n",
711 inode->i_size + 1, body->eadatasize);
712 size[bufcount] = MIN(inode->i_size + 1, body->eadatasize);
714 CDEBUG(D_INODE, "symlink size: %Lu, reply space: %d\n",
715 inode->i_size + 1, body->eadatasize);
718 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) {
719 CERROR("failed MDS_GETATTR_PACK test\n");
720 req->rq_status = -ENOMEM;
721 GOTO(out, rc = -ENOMEM);
724 rc = lustre_pack_msg(bufcount, size, NULL, &req->rq_replen,
727 CERROR("out of memoryK\n");
737 /* This is more copy-and-paste from getattr_name than I'd like. */
738 static void reconstruct_getattr_name(int offset, struct ptlrpc_request *req,
739 struct lustre_handle *client_lockh)
741 struct obd_device *obd = req->rq_export->exp_obd;
742 struct mds_obd *mds = mds_req2mds(req);
743 struct dentry *parent, *child;
744 struct mds_body *body;
746 struct obd_run_ctxt saved;
751 if (req->rq_export->exp_outstanding_reply)
752 mds_steal_ack_locks(req->rq_export, req);
754 body = lustre_msg_buf(req->rq_reqmsg, offset, sizeof (*body));
755 LASSERT (body != NULL); /* checked by caller */
756 LASSERT_REQSWABBED (req, offset); /* swabbed by caller */
758 name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
759 LASSERT (name != NULL); /* checked by caller */
760 LASSERT_REQSWABBED (req, offset + 1); /* swabbed by caller */
761 namelen = req->rq_reqmsg->buflens[offset + 1];
763 LASSERT (offset == 2 || offset == 0);
764 /* requests were at offset 2, replies go back at 1 */
768 uc.ouc_fsuid = body->fsuid;
769 uc.ouc_fsgid = body->fsgid;
770 uc.ouc_cap = body->capability;
771 uc.ouc_suppgid1 = body->suppgid;
772 uc.ouc_suppgid2 = -1;
773 push_ctxt(&saved, &mds->mds_ctxt, &uc);
774 parent = mds_fid2dentry(mds, &body->fid1, NULL);
775 LASSERT(!IS_ERR(parent));
776 dir = parent->d_inode;
778 child = ll_lookup_one_len(name, parent, namelen - 1);
779 LASSERT(!IS_ERR(child));
781 if (req->rq_repmsg == NULL) {
782 rc = mds_getattr_pack_msg(req, child->d_inode, offset);
783 /* XXX need to handle error here */
787 rc = mds_getattr_internal(obd, child, req, body, offset);
793 static int mds_getattr_name(int offset, struct ptlrpc_request *req,
794 struct lustre_handle *child_lockh)
796 struct mds_obd *mds = mds_req2mds(req);
797 struct obd_device *obd = req->rq_export->exp_obd;
798 struct obd_run_ctxt saved;
799 struct mds_body *body;
800 struct dentry *de = NULL, *dchild = NULL;
803 struct ldlm_res_id child_res_id = { .name = {0} };
804 struct lustre_handle parent_lockh;
806 int flags = 0, rc = 0, cleanup_phase = 0, req_was_resent;
810 LASSERT(!strcmp(obd->obd_type->typ_name, "mds"));
812 /* Swab now, before anyone looks inside the request */
814 body = lustre_swab_reqbuf (req, offset, sizeof (*body),
815 lustre_swab_mds_body);
817 CERROR ("Can't swab mds_body\n");
818 GOTO (cleanup, rc = -EFAULT);
821 LASSERT_REQSWAB (req, offset + 1);
822 name = lustre_msg_string (req->rq_reqmsg, offset + 1, 0);
824 CERROR ("Can't unpack name\n");
825 GOTO (cleanup, rc = -EFAULT);
827 namesize = req->rq_reqmsg->buflens[offset + 1];
829 req_was_resent = lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT;
830 if (child_lockh->cookie) {
831 LASSERT(req_was_resent);
832 reconstruct_getattr_name(offset, req, child_lockh);
834 } else if (req_was_resent) {
835 DEBUG_REQ(D_HA, req, "no reply for RESENT req");
838 LASSERT (offset == 0 || offset == 2);
839 /* if requests were at offset 2, replies go back at 1 */
843 uc.ouc_fsuid = body->fsuid;
844 uc.ouc_fsgid = body->fsgid;
845 uc.ouc_cap = body->capability;
846 uc.ouc_suppgid1 = body->suppgid;
847 uc.ouc_suppgid2 = -1;
848 push_ctxt(&saved, &mds->mds_ctxt, &uc);
849 /* Step 1: Lookup/lock parent */
850 de = mds_fid2locked_dentry(obd, &body->fid1, NULL, LCK_PR,
853 GOTO(cleanup, rc = PTR_ERR(de));
857 cleanup_phase = 1; /* parent dentry and lock */
859 CDEBUG(D_INODE, "parent ino %lu, name %s\n", dir->i_ino, name);
861 /* Step 2: Lookup child */
862 dchild = ll_lookup_one_len(name, de, namesize - 1);
863 if (IS_ERR(dchild)) {
864 CDEBUG(D_INODE, "child lookup error %ld\n", PTR_ERR(dchild));
865 GOTO(cleanup, rc = PTR_ERR(dchild));
868 cleanup_phase = 2; /* child dentry */
870 if (dchild->d_inode == NULL) {
871 GOTO(cleanup, rc = -ENOENT);
874 /* Step 3: Lock child */
875 child_res_id.name[0] = dchild->d_inode->i_ino;
876 child_res_id.name[1] = dchild->d_inode->i_generation;
877 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace, NULL,
878 child_res_id, LDLM_PLAIN, NULL, 0, LCK_PR,
879 &flags, ldlm_completion_ast, mds_blocking_ast,
881 if (rc != ELDLM_OK) {
882 CERROR("ldlm_cli_enqueue: %d\n", rc);
883 GOTO(cleanup, rc = -EIO);
886 cleanup_phase = 3; /* child lock */
888 if (req->rq_repmsg == NULL) {
889 rc = mds_getattr_pack_msg(req, dchild->d_inode, offset);
891 CERROR ("mds_getattr_pack_msg: %d\n", rc);
896 rc = mds_getattr_internal(obd, dchild, req, body, offset);
897 GOTO(cleanup, rc); /* returns the lock to the client */
900 switch (cleanup_phase) {
903 ldlm_lock_decref(child_lockh, LCK_PR);
909 ldlm_lock_decref(&parent_lockh, LCK_PR);
911 memcpy(&req->rq_ack_locks[0].lock, &parent_lockh,
912 sizeof(parent_lockh));
913 req->rq_ack_locks[0].mode = LCK_PR;
919 pop_ctxt(&saved, &mds->mds_ctxt, &uc);
923 static int mds_getattr(int offset, struct ptlrpc_request *req)
925 struct mds_obd *mds = mds_req2mds(req);
926 struct obd_device *obd = req->rq_export->exp_obd;
927 struct obd_run_ctxt saved;
929 struct mds_body *body;
934 body = lustre_swab_reqbuf (req, offset, sizeof (*body),
935 lustre_swab_mds_body);
937 CERROR ("Can't unpack body\n");
941 uc.ouc_fsuid = body->fsuid;
942 uc.ouc_fsgid = body->fsgid;
943 uc.ouc_cap = body->capability;
944 push_ctxt(&saved, &mds->mds_ctxt, &uc);
945 de = mds_fid2dentry(mds, &body->fid1, NULL);
947 rc = req->rq_status = -ENOENT;
948 GOTO(out_pop, PTR_ERR(de));
951 rc = mds_getattr_pack_msg(req, de->d_inode, offset);
953 CERROR ("mds_getattr_pack_msg: %d\n", rc);
957 req->rq_status = mds_getattr_internal(obd, de, req, body, 0);
962 pop_ctxt(&saved, &mds->mds_ctxt, &uc);
966 static int mds_statfs(struct ptlrpc_request *req)
968 struct obd_device *obd = req->rq_export->exp_obd;
969 struct obd_statfs *osfs;
970 int rc, size = sizeof(*osfs);
973 rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg);
974 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
975 CERROR("mds: statfs lustre_pack_msg failed: rc = %d\n", rc);
979 osfs = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*osfs));
980 rc = fsfilt_statfs(obd, obd->u.mds.mds_sb, osfs);
982 CERROR("mds: statfs failed: rc %d\n", rc);
992 static void reconstruct_close(struct ptlrpc_request *req)
994 struct mds_export_data *med = &req->rq_export->exp_mds_data;
995 struct mds_client_data *mcd = med->med_mcd;
997 req->rq_transno = mcd->mcd_last_transno;
998 req->rq_status = mcd->mcd_last_result;
1000 /* XXX When open-unlink is working, we'll need to steal ack locks as
1001 * XXX well, and make sure that we do the right unlinking after we
1002 * XXX get the ack back.
1006 static int mds_close(struct ptlrpc_request *req)
1008 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1009 struct mds_body *body;
1010 struct mds_file_data *mfd;
1014 MDS_CHECK_RESENT(req, reconstruct_close(req));
1016 body = lustre_swab_reqbuf(req, 0, sizeof (*body),
1017 lustre_swab_mds_body);
1019 CERROR ("Can't unpack body\n");
1023 mfd = mds_handle2mfd(&body->handle);
1025 DEBUG_REQ(D_ERROR, req, "no handle for file close "LPD64
1026 ": cookie "LPX64"\n", body->fid1.id,
1027 body->handle.cookie);
1031 spin_lock(&med->med_open_lock);
1032 req->rq_status = mds_close_mfd(mfd, med);
1033 spin_unlock(&med->med_open_lock);
1035 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_CLOSE_PACK)) {
1036 CERROR("test case OBD_FAIL_MDS_CLOSE_PACK\n");
1037 req->rq_status = -ENOMEM;
1042 rc = lustre_pack_msg(0, NULL, NULL, &req->rq_replen, &req->rq_repmsg);
1044 CERROR("mds: lustre_pack_msg: rc = %d\n", rc);
1045 req->rq_status = rc;
1052 static int mds_readpage(struct ptlrpc_request *req)
1054 struct mds_obd *mds = mds_req2mds(req);
1055 struct vfsmount *mnt;
1058 struct mds_body *body, *repbody;
1059 struct obd_run_ctxt saved;
1060 int rc, size = sizeof(*repbody);
1061 struct obd_ucred uc;
1064 rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg);
1065 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK)) {
1066 CERROR("mds: out of memory\n");
1067 GOTO(out, rc = -ENOMEM);
1070 body = lustre_swab_reqbuf (req, 0, sizeof (*body),
1071 lustre_swab_mds_body);
1073 GOTO (out, rc = -EFAULT);
1075 /* body->size is actually the offset -eeb */
1076 if ((body->size & (PAGE_SIZE - 1)) != 0) {
1077 CERROR ("offset "LPU64"not on a page boundary\n", body->size);
1078 GOTO (out, rc = -EFAULT);
1081 /* body->nlink is actually the #bytes to read -eeb */
1082 if (body->nlink != PAGE_SIZE) {
1083 CERROR ("size %d is not PAGE_SIZE\n", body->nlink);
1084 GOTO (out, rc = -EFAULT);
1087 uc.ouc_fsuid = body->fsuid;
1088 uc.ouc_fsgid = body->fsgid;
1089 uc.ouc_cap = body->capability;
1090 push_ctxt(&saved, &mds->mds_ctxt, &uc);
1091 de = mds_fid2dentry(mds, &body->fid1, &mnt);
1093 GOTO(out_pop, rc = PTR_ERR(de));
1095 CDEBUG(D_INODE, "ino %lu\n", de->d_inode->i_ino);
1097 file = dentry_open(de, mnt, O_RDONLY | O_LARGEFILE);
1098 /* note: in case of an error, dentry_open puts dentry */
1100 GOTO(out_pop, rc = PTR_ERR(file));
1102 repbody = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*repbody));
1103 repbody->size = file->f_dentry->d_inode->i_size;
1104 repbody->valid = OBD_MD_FLSIZE;
1106 /* to make this asynchronous make sure that the handling function
1107 doesn't send a reply when this function completes. Instead a
1108 callback function would send the reply */
1109 /* body->blocks is actually the xid -phil */
1110 /* body->size is actually the offset -eeb */
1111 rc = mds_sendpage(req, file, body->size, body->blocks);
1113 filp_close(file, 0);
1115 pop_ctxt(&saved, &mds->mds_ctxt, &uc);
1117 req->rq_status = rc;
1121 int mds_reint(struct ptlrpc_request *req, int offset,
1122 struct lustre_handle *lockh)
1124 struct mds_update_record *rec; /* 116 bytes on the stack? no sir! */
1127 OBD_ALLOC(rec, sizeof(*rec));
1131 rc = mds_update_unpack(req, offset, rec);
1132 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK)) {
1133 CERROR("invalid record\n");
1134 GOTO(out, req->rq_status = -EINVAL);
1136 /* rc will be used to interrupt a for loop over multiple records */
1137 rc = mds_reint_rec(rec, offset, req, lockh);
1139 OBD_FREE(rec, sizeof(*rec));
1143 static int filter_recovery_request(struct ptlrpc_request *req,
1144 struct obd_device *obd, int *process)
1146 switch (req->rq_reqmsg->opc) {
1147 case MDS_CONNECT: /* This will never get here, but for completeness. */
1148 case OST_CONNECT: /* This will never get here, but for completeness. */
1149 case MDS_DISCONNECT:
1150 case OST_DISCONNECT:
1155 case MDS_GETSTATUS: /* used in unmounting */
1159 *process = target_queue_recovery_request(req, obd);
1163 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
1165 /* XXX what should we set rq_status to here? */
1166 req->rq_status = -EAGAIN;
1167 RETURN(ptlrpc_error(req));
1171 static char *reint_names[] = {
1172 [REINT_SETATTR] "setattr",
1173 [REINT_CREATE] "create",
1174 [REINT_LINK] "link",
1175 [REINT_UNLINK] "unlink",
1176 [REINT_RENAME] "rename",
1177 [REINT_OPEN] "open",
1180 void mds_steal_ack_locks(struct obd_export *exp,
1181 struct ptlrpc_request *req)
1183 unsigned long flags;
1185 struct ptlrpc_request *oldrep = exp->exp_outstanding_reply;
1186 memcpy(req->rq_ack_locks, oldrep->rq_ack_locks,
1187 sizeof req->rq_ack_locks);
1188 spin_lock_irqsave (&req->rq_lock, flags);
1189 oldrep->rq_resent = 1;
1190 wake_up(&oldrep->rq_wait_for_rep);
1191 spin_unlock_irqrestore (&req->rq_lock, flags);
1192 DEBUG_REQ(D_HA, oldrep, "stole locks from");
1193 DEBUG_REQ(D_HA, req, "stole locks for");
1196 int mds_handle(struct ptlrpc_request *req)
1200 struct mds_obd *mds = NULL; /* quell gcc overwarning */
1201 struct obd_device *obd = NULL;
1204 OBD_FAIL_RETURN(OBD_FAIL_MDS_ALL_REQUEST_NET | OBD_FAIL_ONCE, 0);
1206 LASSERT(!strcmp(req->rq_obd->obd_type->typ_name, LUSTRE_MDT_NAME));
1208 /* XXX identical to OST */
1209 if (req->rq_reqmsg->opc != MDS_CONNECT) {
1210 struct mds_export_data *med;
1211 int recovering, abort_recovery;
1213 if (req->rq_export == NULL) {
1214 CERROR("lustre_mds: operation %d on unconnected MDS\n",
1215 req->rq_reqmsg->opc);
1216 req->rq_status = -ENOTCONN;
1217 GOTO(out, rc = -ENOTCONN);
1220 med = &req->rq_export->exp_mds_data;
1221 obd = req->rq_export->exp_obd;
1224 /* Check for aborted recovery. */
1225 spin_lock_bh(&obd->obd_processing_task_lock);
1226 abort_recovery = obd->obd_abort_recovery;
1227 recovering = obd->obd_recovering;
1228 spin_unlock_bh(&obd->obd_processing_task_lock);
1229 if (abort_recovery) {
1230 target_abort_recovery(obd);
1231 } else if (recovering) {
1232 rc = filter_recovery_request(req, obd, &should_process);
1233 if (rc || !should_process)
1238 switch (req->rq_reqmsg->opc) {
1240 DEBUG_REQ(D_INODE, req, "connect");
1241 OBD_FAIL_RETURN(OBD_FAIL_MDS_CONNECT_NET, 0);
1242 rc = target_handle_connect(req, mds_handle);
1243 /* Make sure that last_rcvd is correct. */
1245 /* Now that we have an export, set mds. */
1246 mds = mds_req2mds(req);
1247 mds_fsync_super(mds->mds_sb);
1251 case MDS_DISCONNECT:
1252 DEBUG_REQ(D_INODE, req, "disconnect");
1253 OBD_FAIL_RETURN(OBD_FAIL_MDS_DISCONNECT_NET, 0);
1254 rc = target_handle_disconnect(req);
1255 /* Make sure that last_rcvd is correct. */
1257 mds_fsync_super(mds->mds_sb);
1258 req->rq_status = rc; /* superfluous? */
1262 DEBUG_REQ(D_INODE, req, "getstatus");
1263 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_NET, 0);
1264 rc = mds_getstatus(req);
1267 case MDS_GETLOVINFO:
1268 DEBUG_REQ(D_INODE, req, "getlovinfo");
1269 rc = mds_getlovinfo(req);
1273 DEBUG_REQ(D_INODE, req, "getattr");
1274 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NET, 0);
1275 rc = mds_getattr(0, req);
1278 case MDS_GETATTR_NAME: {
1279 struct lustre_handle lockh;
1280 DEBUG_REQ(D_INODE, req, "getattr_name");
1281 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NAME_NET, 0);
1283 /* If this request gets a reconstructed reply, we won't be
1284 * acquiring any new locks in mds_getattr_name, so we don't
1288 rc = mds_getattr_name(0, req, &lockh);
1289 if (rc == 0 && lockh.cookie)
1290 ldlm_lock_decref(&lockh, LCK_PR);
1294 DEBUG_REQ(D_INODE, req, "statfs");
1295 OBD_FAIL_RETURN(OBD_FAIL_MDS_STATFS_NET, 0);
1296 rc = mds_statfs(req);
1300 DEBUG_REQ(D_INODE, req, "readpage");
1301 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_NET, 0);
1302 rc = mds_readpage(req);
1304 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1309 __u32 *opcp = lustre_msg_buf (req->rq_reqmsg, 0, sizeof (*opcp));
1311 int size[2] = {sizeof(struct mds_body), mds->mds_max_mdsize};
1314 /* NB only peek inside req now; mds_reint() will swab it */
1316 CERROR ("Can't inspect opcode\n");
1321 if (lustre_msg_swabbed (req->rq_reqmsg))
1324 DEBUG_REQ(D_INODE, req, "reint %d (%s)", opc,
1325 (opc < sizeof (reint_names) / sizeof (reint_names[0]) ||
1326 reint_names[opc] == NULL) ? reint_names[opc] : "unknown opcode");
1328 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET, 0);
1330 if (opc == REINT_UNLINK)
1335 rc = lustre_pack_msg(bufcount, size, NULL,
1336 &req->rq_replen, &req->rq_repmsg);
1340 rc = mds_reint(req, 0, NULL);
1341 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET_REP, 0);
1346 DEBUG_REQ(D_INODE, req, "close");
1347 OBD_FAIL_RETURN(OBD_FAIL_MDS_CLOSE_NET, 0);
1348 rc = mds_close(req);
1352 DEBUG_REQ(D_INODE, req, "ping");
1353 rc = target_handle_ping(req);
1357 DEBUG_REQ(D_INODE, req, "enqueue");
1358 OBD_FAIL_RETURN(OBD_FAIL_LDLM_ENQUEUE, 0);
1359 rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast,
1360 ldlm_server_blocking_ast);
1363 DEBUG_REQ(D_INODE, req, "convert");
1364 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CONVERT, 0);
1365 rc = ldlm_handle_convert(req);
1367 case LDLM_BL_CALLBACK:
1368 case LDLM_CP_CALLBACK:
1369 DEBUG_REQ(D_INODE, req, "callback");
1370 CERROR("callbacks should not happen on MDS\n");
1372 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1375 req->rq_status = -ENOTSUPP;
1376 rc = ptlrpc_error(req);
1382 /* If we're DISCONNECTing, the mds_export_data is already freed */
1383 if (!rc && req->rq_reqmsg->opc != MDS_DISCONNECT) {
1384 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1385 struct obd_device *obd = list_entry(mds, struct obd_device,
1387 req->rq_repmsg->last_xid =
1388 le64_to_cpu (med->med_mcd->mcd_last_xid);
1390 if (!obd->obd_no_transno) {
1391 req->rq_repmsg->last_committed =
1392 obd->obd_last_committed;
1394 DEBUG_REQ(D_IOCTL, req,
1395 "not sending last_committed update");
1397 CDEBUG(D_INFO, "last_transno "LPU64", last_committed "LPU64
1399 mds->mds_last_transno, obd->obd_last_committed,
1404 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LAST_REPLAY) {
1405 if (obd && obd->obd_recovering) {
1406 DEBUG_REQ(D_HA, req, "LAST_REPLAY, queuing reply");
1407 return target_queue_final_reply(req, rc);
1409 /* Lost a race with recovery; let the error path DTRT. */
1410 rc = req->rq_status = -ENOTCONN;
1413 target_send_reply(req, rc, OBD_FAIL_MDS_ALL_REPLY_NET);
1417 /* Update the server data on disk. This stores the new mount_count and
1418 * also the last_rcvd value to disk. If we don't have a clean shutdown,
1419 * then the server last_rcvd value may be less than that of the clients.
1420 * This will alert us that we may need to do client recovery.
1422 * Also assumes for mds_last_transno that we are not modifying it (no locking).
1424 int mds_update_server_data(struct mds_obd *mds)
1426 struct mds_server_data *msd = mds->mds_server_data;
1427 struct file *filp = mds->mds_rcvd_filp;
1428 struct obd_run_ctxt saved;
1432 push_ctxt(&saved, &mds->mds_ctxt, NULL);
1433 msd->msd_last_transno = cpu_to_le64(mds->mds_last_transno);
1434 msd->msd_mount_count = cpu_to_le64(mds->mds_mount_count);
1436 CDEBUG(D_SUPER, "MDS mount_count is %Lu, last_transno is %Lu\n",
1437 (unsigned long long)mds->mds_mount_count,
1438 (unsigned long long)mds->mds_last_transno);
1439 rc = lustre_fwrite(filp, (char *)msd, sizeof(*msd), &off);
1440 if (rc != sizeof(*msd)) {
1441 CERROR("error writing MDS server data: rc = %d\n", rc);
1446 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1447 rc = fsync_dev(filp->f_dentry->d_inode->i_rdev);
1449 rc = file_fsync(filp, filp->f_dentry, 1);
1452 CERROR("error flushing MDS server data: rc = %d\n", rc);
1455 pop_ctxt(&saved, &mds->mds_ctxt, NULL);
1459 /* mount the file system (secretly) */
1460 static int mds_setup(struct obd_device *obddev, obd_count len, void *buf)
1462 struct obd_ioctl_data* data = buf;
1463 struct mds_obd *mds = &obddev->u.mds;
1464 struct vfsmount *mnt;
1470 #ifdef CONFIG_DEV_RDONLY
1471 dev_clear_rdonly(2);
1473 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2)
1474 RETURN(rc = -EINVAL);
1476 obddev->obd_fsops = fsfilt_get_ops(data->ioc_inlbuf2);
1477 if (IS_ERR(obddev->obd_fsops))
1478 RETURN(rc = PTR_ERR(obddev->obd_fsops));
1481 if (data->ioc_inllen3 > 0 && data->ioc_inlbuf3) {
1482 if (*data->ioc_inlbuf3 == '/') {
1483 CERROR("mds namespace mount: %s\n",
1485 // mds->mds_nspath = strdup(ioc->inlbuf4);
1487 CERROR("namespace mount must be absolute path: '%s'\n",
1492 if (!(page = __get_free_page(GFP_KERNEL)))
1495 memset((void *)page, 0, PAGE_SIZE);
1496 sprintf((char *)page, "iopen_nopriv");
1498 mnt = do_kern_mount(data->ioc_inlbuf2, 0,
1499 data->ioc_inlbuf1, (void *)page);
1503 CERROR("do_kern_mount failed: rc = %d\n", rc);
1507 CDEBUG(D_SUPER, "%s: mnt = %p\n", data->ioc_inlbuf1, mnt);
1508 mds->mds_sb = mnt->mnt_root->d_inode->i_sb;
1510 GOTO(err_put, rc = -ENODEV);
1512 spin_lock_init(&mds->mds_transno_lock);
1513 mds->mds_max_mdsize = sizeof(struct lov_mds_md);
1514 rc = mds_fs_setup(obddev, mnt);
1516 CERROR("MDS filesystem method init failed: rc = %d\n", rc);
1520 obddev->obd_namespace =
1521 ldlm_namespace_new("mds_server", LDLM_NAMESPACE_SERVER);
1522 if (obddev->obd_namespace == NULL) {
1523 mds_cleanup(obddev, 0, 0);
1524 GOTO(err_fs, rc = -ENOMEM);
1527 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
1528 "mds_ldlm_client", &obddev->obd_ldlm_client);
1530 mds->mds_has_lov_desc = 0;
1535 mds_fs_cleanup(obddev, 0);
1538 mntput(mds->mds_vfsmnt);
1542 fsfilt_put_ops(obddev->obd_fsops);
1546 static int mds_cleanup(struct obd_device *obddev, int force, int failover)
1548 struct super_block *sb;
1549 struct mds_obd *mds = &obddev->u.mds;
1556 mds_update_server_data(mds);
1557 mds_fs_cleanup(obddev, failover);
1561 /* 2 seems normal on mds, (may_umount() also expects 2
1562 fwiw), but we only see 1 at this point in obdfilter. */
1563 if (atomic_read(&obddev->u.mds.mds_vfsmnt->mnt_count) > 2){
1564 CERROR("%s: mount point busy, mnt_count: %d\n",
1566 atomic_read(&obddev->u.mds.mds_vfsmnt->mnt_count));
1569 mntput(mds->mds_vfsmnt);
1572 ldlm_namespace_free(obddev->obd_namespace);
1574 if (obddev->obd_recovering)
1575 target_cancel_recovery_timer(obddev);
1577 #ifdef CONFIG_DEV_RDONLY
1578 dev_clear_rdonly(2);
1580 fsfilt_put_ops(obddev->obd_fsops);
1585 inline void fixup_handle_for_resent_req(struct ptlrpc_request *req,
1586 struct lustre_handle *lockh)
1588 struct obd_export *exp = req->rq_export;
1589 struct obd_device *obd = exp->exp_obd;
1590 struct ldlm_request *dlmreq =
1591 lustre_msg_buf(req->rq_reqmsg, 0, sizeof (*dlmreq));
1592 struct lustre_handle remote_hdl = dlmreq->lock_handle1;
1593 struct list_head *iter;
1595 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
1598 l_lock(&obd->obd_namespace->ns_lock);
1599 list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) {
1600 struct ldlm_lock *lock;
1601 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
1602 if (lock->l_remote_handle.cookie == remote_hdl.cookie) {
1603 lockh->cookie = lock->l_handle.h_cookie;
1604 DEBUG_REQ(D_HA, req, "restoring lock cookie "LPX64,
1606 l_unlock(&obd->obd_namespace->ns_lock);
1611 l_unlock(&obd->obd_namespace->ns_lock);
1612 DEBUG_REQ(D_HA, req, "no existing lock with rhandle "LPX64,
1616 static int ldlm_intent_policy(struct ldlm_namespace *ns,
1617 struct ldlm_lock **lockp, void *req_cookie,
1618 ldlm_mode_t mode, int flags, void *data)
1620 struct ptlrpc_request *req = req_cookie;
1621 struct ldlm_lock *lock = *lockp;
1628 if (req->rq_reqmsg->bufcount > 1) {
1629 /* an intent needs to be considered */
1630 struct ldlm_intent *it;
1631 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
1632 struct mds_body *mds_body;
1633 struct ldlm_reply *rep;
1634 struct lustre_handle lockh = { 0 };
1635 struct ldlm_lock *new_lock;
1636 int rc, offset = 2, repsize[3] = {sizeof(struct ldlm_reply),
1637 sizeof(struct mds_body),
1638 mds->mds_max_mdsize};
1640 it = lustre_swab_reqbuf (req, 1, sizeof (*it),
1641 lustre_swab_ldlm_intent);
1643 CERROR ("Intent missing\n");
1644 rc = req->rq_status = -EFAULT;
1648 LDLM_DEBUG(lock, "intent policy, opc: %s",
1649 ldlm_it2str(it->opc));
1651 rc = lustre_pack_msg(3, repsize, NULL, &req->rq_replen,
1654 rc = req->rq_status = -ENOMEM;
1658 rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*rep));
1659 rep->lock_policy_res1 = IT_INTENT_EXEC;
1661 fixup_handle_for_resent_req(req, &lockh);
1663 /* execute policy */
1664 switch ((long)it->opc) {
1666 case IT_CREAT|IT_OPEN:
1667 rc = mds_reint(req, offset, &lockh);
1668 /* We return a dentry to the client if IT_OPEN_POS is
1669 * set, or if we make it to the OPEN portion of the
1670 * programme (which implies that we created) */
1671 if (!(rep->lock_policy_res1 & IT_OPEN_POS ||
1672 rep->lock_policy_res1 & IT_OPEN_OPEN)) {
1673 rep->lock_policy_res2 = rc;
1674 RETURN(ELDLM_LOCK_ABORTED);
1678 rc = mds_reint(req, offset, &lockh);
1679 /* Don't return a lock if the unlink failed, or if we're
1680 * not sending back an EA */
1682 rep->lock_policy_res2 = rc;
1683 RETURN(ELDLM_LOCK_ABORTED);
1685 if (req->rq_status != 0) {
1686 rep->lock_policy_res2 = req->rq_status;
1687 RETURN(ELDLM_LOCK_ABORTED);
1689 mds_body = lustre_msg_buf(req->rq_repmsg, 1, sizeof (*mds_body));
1690 if (!(mds_body->valid & OBD_MD_FLEASIZE)) {
1691 rep->lock_policy_res2 = rc;
1692 RETURN(ELDLM_LOCK_ABORTED);
1698 rc = mds_getattr_name(offset, req, &lockh);
1699 /* FIXME: we need to sit down and decide on who should
1700 * set req->rq_status, who should return negative and
1701 * positive return values, and what they all mean. */
1703 rep->lock_policy_res2 = rc;
1704 RETURN(ELDLM_LOCK_ABORTED);
1706 if (req->rq_status != 0) {
1707 rep->lock_policy_res2 = req->rq_status;
1708 RETURN(ELDLM_LOCK_ABORTED);
1712 CERROR("Unhandled intent "LPD64"\n", it->opc);
1716 /* By this point, whatever function we called above must have
1717 * filled in 'lockh' or returned an error. We want to give the
1718 * new lock to the client instead of whatever lock it was about
1720 new_lock = ldlm_handle2lock(&lockh);
1721 LASSERT(new_lock != NULL);
1723 /* If we've already given this lock to a client once, then we
1724 * should have no readers or writers. Otherwise, we should
1725 * have one reader _or_ writer ref (which will be zeroed below
1726 * before returning the lock to a client.
1728 if (new_lock->l_export == req->rq_export)
1729 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
1731 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
1733 /* If we're running an intent only, we want to abort the new
1734 * lock, and let the client abort the original lock. */
1735 if (flags & LDLM_FL_INTENT_ONLY) {
1736 LDLM_DEBUG(lock, "INTENT_ONLY, aborting locks");
1737 l_lock(&new_lock->l_resource->lr_namespace->ns_lock);
1738 if (new_lock->l_readers)
1739 ldlm_lock_decref(&lockh, LCK_PR);
1741 ldlm_lock_decref(&lockh, LCK_PW);
1742 l_unlock(&new_lock->l_resource->lr_namespace->ns_lock);
1743 LDLM_LOCK_PUT(new_lock);
1744 RETURN(ELDLM_LOCK_ABORTED);
1749 rep->lock_policy_res2 = req->rq_status;
1751 if (new_lock->l_export == req->rq_export) {
1752 /* Already gave this to the client, which means that we
1753 * reconstructed a reply. */
1754 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
1756 RETURN(ELDLM_LOCK_REPLACED);
1759 /* Fixup the lock to be given to the client */
1760 l_lock(&new_lock->l_resource->lr_namespace->ns_lock);
1761 new_lock->l_readers = 0;
1762 new_lock->l_writers = 0;
1764 new_lock->l_export = req->rq_export;
1765 list_add(&new_lock->l_export_chain,
1766 &new_lock->l_export->exp_ldlm_data.led_held_locks);
1768 /* We don't need to worry about completion_ast (which isn't set
1769 * in 'lock' yet anyways), because this lock is already
1771 new_lock->l_blocking_ast = lock->l_blocking_ast;
1773 memcpy(&new_lock->l_remote_handle, &lock->l_remote_handle,
1774 sizeof(lock->l_remote_handle));
1776 new_lock->l_flags &= ~(LDLM_FL_LOCAL | LDLM_FL_AST_SENT |
1779 LDLM_LOCK_PUT(new_lock);
1780 l_unlock(&new_lock->l_resource->lr_namespace->ns_lock);
1782 RETURN(ELDLM_LOCK_REPLACED);
1784 int size = sizeof(struct ldlm_reply);
1785 rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen,
1795 int mds_attach(struct obd_device *dev, obd_count len, void *data)
1797 struct lprocfs_static_vars lvars;
1799 lprocfs_init_multi_vars(0, &lvars);
1800 return lprocfs_obd_attach(dev, lvars.obd_vars);
1803 int mds_detach(struct obd_device *dev)
1805 return lprocfs_obd_detach(dev);
1808 int mdt_attach(struct obd_device *dev, obd_count len, void *data)
1810 struct lprocfs_static_vars lvars;
1812 lprocfs_init_multi_vars(1, &lvars);
1813 return lprocfs_obd_attach(dev, lvars.obd_vars);
1816 int mdt_detach(struct obd_device *dev)
1818 return lprocfs_obd_detach(dev);
1821 static int mdt_setup(struct obd_device *obddev, obd_count len, void *buf)
1823 struct mds_obd *mds = &obddev->u.mds;
1827 mds->mds_service = ptlrpc_init_svc(MDS_NEVENTS, MDS_NBUFS,
1828 MDS_BUFSIZE, MDS_MAXREQSIZE,
1829 MDS_REQUEST_PORTAL, MDC_REPLY_PORTAL,
1830 mds_handle, "mds", obddev);
1832 if (!mds->mds_service) {
1833 CERROR("failed to start service\n");
1834 RETURN(rc = -ENOMEM);
1837 for (i = 0; i < MDT_NUM_THREADS; i++) {
1839 sprintf(name, "ll_mdt_%02d", i);
1840 rc = ptlrpc_start_thread(obddev, mds->mds_service, name);
1842 CERROR("cannot start MDT thread #%d: rc %d\n", i, rc);
1843 GOTO(err_thread, rc);
1847 mds->mds_setattr_service =
1848 ptlrpc_init_svc(MDS_NEVENTS, MDS_NBUFS,
1849 MDS_BUFSIZE, MDS_MAXREQSIZE,
1850 MDS_SETATTR_PORTAL, MDC_REPLY_PORTAL,
1851 mds_handle, "mds_setattr", obddev);
1852 if (!mds->mds_setattr_service) {
1853 CERROR("failed to start getattr service\n");
1854 GOTO(err_thread, rc = -ENOMEM);
1857 for (i = 0; i < MDT_NUM_THREADS; i++) {
1859 sprintf(name, "ll_mdt_attr_%02d", i);
1860 rc = ptlrpc_start_thread(obddev, mds->mds_setattr_service,
1863 CERROR("cannot start MDT setattr thread #%d: rc %d\n",
1865 GOTO(err_thread2, rc);
1869 mds->mds_readpage_service =
1870 ptlrpc_init_svc(MDS_NEVENTS, MDS_NBUFS,
1871 MDS_BUFSIZE, MDS_MAXREQSIZE,
1872 MDS_READPAGE_PORTAL, MDC_REPLY_PORTAL,
1873 mds_handle, "mds_readpage", obddev);
1874 if (!mds->mds_readpage_service) {
1875 CERROR("failed to start readpage service\n");
1876 GOTO(err_thread2, rc = -ENOMEM);
1879 for (i = 0; i < MDT_NUM_THREADS; i++) {
1881 sprintf(name, "ll_mdt_rdpg_%02d", i);
1882 rc = ptlrpc_start_thread(obddev, mds->mds_readpage_service,
1885 CERROR("cannot start MDT readpage thread #%d: rc %d\n",
1887 GOTO(err_thread3, rc);
1894 ptlrpc_stop_all_threads(mds->mds_readpage_service);
1895 ptlrpc_unregister_service(mds->mds_readpage_service);
1897 ptlrpc_stop_all_threads(mds->mds_setattr_service);
1898 ptlrpc_unregister_service(mds->mds_setattr_service);
1900 ptlrpc_stop_all_threads(mds->mds_service);
1901 ptlrpc_unregister_service(mds->mds_service);
1906 static int mdt_cleanup(struct obd_device *obddev, int force, int failover)
1908 struct mds_obd *mds = &obddev->u.mds;
1911 ptlrpc_stop_all_threads(mds->mds_readpage_service);
1912 ptlrpc_unregister_service(mds->mds_readpage_service);
1914 ptlrpc_stop_all_threads(mds->mds_setattr_service);
1915 ptlrpc_unregister_service(mds->mds_setattr_service);
1917 ptlrpc_stop_all_threads(mds->mds_service);
1918 ptlrpc_unregister_service(mds->mds_service);
1923 extern int mds_iocontrol(unsigned int cmd, struct lustre_handle *conn,
1924 int len, void *karg, void *uarg);
1926 /* use obd ops to offer management infrastructure */
1927 static struct obd_ops mds_obd_ops = {
1928 o_owner: THIS_MODULE,
1929 o_attach: mds_attach,
1930 o_detach: mds_detach,
1931 o_connect: mds_connect,
1932 o_disconnect: mds_disconnect,
1934 o_cleanup: mds_cleanup,
1935 o_iocontrol: mds_iocontrol,
1936 o_destroy_export: mds_destroy_export
1939 static struct obd_ops mdt_obd_ops = {
1940 o_owner: THIS_MODULE,
1941 o_attach: mdt_attach,
1942 o_detach: mdt_detach,
1944 o_cleanup: mdt_cleanup,
1948 static int __init mds_init(void)
1950 struct lprocfs_static_vars lvars;
1952 lprocfs_init_multi_vars(0, &lvars);
1953 class_register_type(&mds_obd_ops, lvars.module_vars, LUSTRE_MDS_NAME);
1954 lprocfs_init_multi_vars(1, &lvars);
1955 class_register_type(&mdt_obd_ops, lvars.module_vars, LUSTRE_MDT_NAME);
1956 ldlm_register_intent(ldlm_intent_policy);
1961 static void __exit mds_exit(void)
1963 ldlm_unregister_intent();
1964 class_unregister_type(LUSTRE_MDS_NAME);
1965 class_unregister_type(LUSTRE_MDT_NAME);
1968 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
1969 MODULE_DESCRIPTION("Lustre Metadata Server (MDS)");
1970 MODULE_LICENSE("GPL");
1972 module_init(mds_init);
1973 module_exit(mds_exit);