1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Lustre Metadata Server (mds) request handler
7 * Copyright (c) 2001, 2002 Cluster File Systems, Inc.
8 * Author: Peter Braam <braam@clusterfs.com>
9 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * Author: Phil Schwan <phil@clusterfs.com>
11 * Author: Mike Shaver <shaver@clusterfs.com>
13 * This file is part of Lustre, http://www.lustre.org.
15 * Lustre is free software; you can redistribute it and/or
16 * modify it under the terms of version 2 of the GNU General Public
17 * License as published by the Free Software Foundation.
19 * Lustre is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with Lustre; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 #define DEBUG_SUBSYSTEM S_MDS
32 #include <linux/module.h>
33 #include <linux/lustre_mds.h>
34 #include <linux/lustre_dlm.h>
35 #include <linux/init.h>
36 #include <linux/obd_class.h>
37 #include <linux/random.h>
38 #include <linux/locks.h>
39 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
40 #include <linux/buffer_head.h>
42 #include <linux/obd_lov.h>
43 #include <linux/lustre_mds.h>
44 #include <linux/lustre_fsfilt.h>
45 #include <linux/lprocfs_status.h>
47 static kmem_cache_t *mds_file_cache;
49 extern int mds_get_lovtgts(struct mds_obd *obd, int tgt_count,
50 obd_uuid_t *uuidarray);
51 extern int mds_get_lovdesc(struct mds_obd *obd, struct lov_desc *desc);
52 extern void mds_start_transno(struct mds_obd *mds);
53 extern int mds_finish_transno(struct mds_obd *mds, void *handle,
54 struct ptlrpc_request *req, int rc);
55 static int mds_cleanup(struct obd_device * obddev);
57 extern struct lprocfs_vars status_var_nm_1[];
58 extern struct lprocfs_vars status_class_var[];
60 inline struct mds_obd *mds_req2mds(struct ptlrpc_request *req)
62 return &req->rq_export->exp_obd->u.mds;
65 static int mds_bulk_timeout(void *data)
67 struct ptlrpc_bulk_desc *desc = data;
70 recovd_conn_fail(desc->bd_connection);
74 /* Assumes caller has already pushed into the kernel filesystem context */
75 static int mds_sendpage(struct ptlrpc_request *req, struct file *file,
78 struct ptlrpc_bulk_desc *desc;
79 struct ptlrpc_bulk_page *bulk;
80 struct l_wait_info lwi;
85 desc = ptlrpc_prep_bulk(req->rq_connection);
87 GOTO(out, rc = -ENOMEM);
89 bulk = ptlrpc_prep_bulk_page(desc);
91 GOTO(cleanup_bulk, rc = -ENOMEM);
93 OBD_ALLOC(buf, PAGE_SIZE);
95 GOTO(cleanup_bulk, rc = -ENOMEM);
97 rc = fsfilt_readpage(req->rq_export->exp_obd, file, buf, PAGE_SIZE,
101 GOTO(cleanup_buf, rc = -EIO);
103 bulk->bp_xid = req->rq_xid;
105 bulk->bp_buflen = PAGE_SIZE;
106 desc->bd_ptl_ev_hdlr = NULL;
107 desc->bd_portal = MDS_BULK_PORTAL;
109 rc = ptlrpc_send_bulk(desc);
111 GOTO(cleanup_buf, rc);
113 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) {
114 CERROR("obd_fail_loc=%x, fail operation rc=%d\n",
115 OBD_FAIL_MDS_SENDPAGE, rc);
116 ptlrpc_abort_bulk(desc);
117 GOTO(cleanup_buf, rc);
120 lwi = LWI_TIMEOUT(obd_timeout * HZ, mds_bulk_timeout, desc);
121 rc = l_wait_event(desc->bd_waitq, desc->bd_flags & PTL_BULK_FL_SENT,
124 if (rc != -ETIMEDOUT)
126 GOTO(cleanup_buf, rc);
131 OBD_FREE(buf, PAGE_SIZE);
133 ptlrpc_bulk_decref(desc);
139 * Look up a named entry in a directory, and get an LDLM lock on it.
140 * 'dir' is a inode for which an LDLM lock has already been taken.
142 * If we do not need an exclusive or write lock on this entry (e.g.
143 * a read lock for attribute lookup only) then we do not hold the
144 * directory semaphore on return. It is up to the caller to know what
145 * type of lock it is getting, and clean up appropriately.
147 struct dentry *mds_name2locked_dentry(struct obd_device *obd,
148 struct dentry *dir, struct vfsmount **mnt,
149 char *name, int namelen, int lock_mode,
150 struct lustre_handle *lockh,
153 struct dentry *dchild;
155 __u64 res_id[3] = {0};
158 down(&dir->d_inode->i_sem);
159 dchild = lookup_one_len(name, dir, namelen);
160 if (IS_ERR(dchild)) {
161 CERROR("child lookup error %ld\n", PTR_ERR(dchild));
162 up(&dir->d_inode->i_sem);
166 if (dir_lock_mode != LCK_EX && dir_lock_mode != LCK_PW) {
167 up(&dir->d_inode->i_sem);
168 ldlm_lock_decref(lockh, dir_lock_mode);
171 if (lock_mode == 0 || !dchild->d_inode)
174 res_id[0] = dchild->d_inode->i_ino;
175 res_id[1] = dchild->d_inode->i_generation;
176 rc = ldlm_match_or_enqueue(NULL, NULL, obd->obd_namespace, NULL,
177 res_id, LDLM_PLAIN, NULL, 0, lock_mode,
178 &flags, ldlm_completion_ast,
179 mds_blocking_ast, NULL, 0, lockh);
180 if (rc != ELDLM_OK) {
182 up(&dir->d_inode->i_sem);
183 RETURN(ERR_PTR(-ENOLCK)); /* XXX translate ldlm code */
189 struct dentry *mds_fid2locked_dentry(struct obd_device *obd, struct ll_fid *fid,
190 struct vfsmount **mnt, int lock_mode,
191 struct lustre_handle *lockh)
193 struct mds_obd *mds = &obd->u.mds;
194 struct dentry *de = mds_fid2dentry(mds, fid, mnt), *retval = de;
196 __u64 res_id[3] = {0};
202 res_id[0] = de->d_inode->i_ino;
203 res_id[1] = de->d_inode->i_generation;
204 rc = ldlm_match_or_enqueue(NULL, NULL, obd->obd_namespace, NULL,
205 res_id, LDLM_PLAIN, NULL, 0, lock_mode,
206 &flags, ldlm_completion_ast,
207 mds_blocking_ast, NULL, 0, lockh);
208 if (rc != ELDLM_OK) {
210 retval = ERR_PTR(-ENOLCK); /* XXX translate ldlm code */
216 #ifndef DCACHE_DISCONNECTED
217 #define DCACHE_DISCONNECTED DCACHE_NFSD_DISCONNECTED
220 /* Look up an entry by inode number. */
221 struct dentry *mds_fid2dentry(struct mds_obd *mds, struct ll_fid *fid,
222 struct vfsmount **mnt)
224 /* stolen from NFS */
225 struct super_block *sb = mds->mds_sb;
226 unsigned long ino = fid->id;
227 __u32 generation = fid->generation;
229 struct list_head *lp;
230 struct dentry *result;
233 RETURN(ERR_PTR(-ESTALE));
235 inode = iget(sb, ino);
237 RETURN(ERR_PTR(-ENOMEM));
239 CDEBUG(D_DENTRY, "--> mds_fid2dentry: sb %p\n", inode->i_sb);
241 if (is_bad_inode(inode) ||
242 (generation && inode->i_generation != generation)) {
243 /* we didn't find the right inode.. */
244 CERROR("bad inode %lu, link: %d ct: %d or version %u/%u\n",
245 inode->i_ino, inode->i_nlink,
246 atomic_read(&inode->i_count), inode->i_generation,
249 RETURN(ERR_PTR(-ENOENT));
252 /* now to find a dentry. If possible, get a well-connected one */
254 *mnt = mds->mds_vfsmnt;
255 spin_lock(&dcache_lock);
256 list_for_each(lp, &inode->i_dentry) {
257 result = list_entry(lp, struct dentry, d_alias);
258 if (!(result->d_flags & DCACHE_DISCONNECTED)) {
260 result->d_vfs_flags |= DCACHE_REFERENCED;
261 spin_unlock(&dcache_lock);
268 spin_unlock(&dcache_lock);
269 result = d_alloc_root(inode);
270 if (result == NULL) {
272 return ERR_PTR(-ENOMEM);
276 result->d_flags |= DCACHE_DISCONNECTED;
280 /* Establish a connection to the MDS.
282 * This will set up an export structure for the client to hold state data
283 * about that client, like open files, the last operation number it did
284 * on the server, etc.
286 static int mds_connect(struct lustre_handle *conn, struct obd_device *obd,
287 obd_uuid_t cluuid, struct recovd_obd *recovd,
288 ptlrpc_recovery_cb_t recover)
290 struct obd_export *exp;
291 struct mds_export_data *med;
292 struct mds_client_data *mcd;
297 if (!conn || !obd || !cluuid)
302 spin_lock(&obd->obd_dev_lock);
303 list_for_each(p, &obd->obd_exports) {
304 exp = list_entry(p, struct obd_export, exp_obd_chain);
305 mcd = exp->exp_mds_data.med_mcd;
307 CERROR("FYI: NULL mcd - simultaneous connects\n");
310 if (!memcmp(cluuid, mcd->mcd_uuid, sizeof mcd->mcd_uuid)) {
311 /* XXX make handle-found-export a subroutine */
312 LASSERT(exp->exp_obd == obd);
314 spin_unlock(&obd->obd_dev_lock);
315 if (exp->exp_connection) {
316 struct lustre_handle *hdl;
317 hdl = &exp->exp_ldlm_data.led_import.imp_handle;
318 /* Might be a re-connect after a partition. */
319 if (!memcmp(conn, hdl, sizeof *conn)) {
320 CERROR("%s reconnecting\n", cluuid);
321 conn->addr = (__u64) (unsigned long)exp;
322 conn->cookie = exp->exp_cookie;
325 CERROR("%s reconnecting from %s, "
326 "handle mismatch (ours %Lx/%Lx, "
327 "theirs %Lx/%Lx)\n", cluuid,
328 exp->exp_connection->
329 c_remote_uuid, hdl->addr,
330 hdl->cookie, conn->addr,
332 /* XXX disconnect them here? */
333 memset(conn, 0, sizeof *conn);
339 conn->addr = (__u64) (unsigned long)exp;
340 conn->cookie = exp->exp_cookie;
341 CDEBUG(D_INFO, "existing export for UUID '%s' at %p\n",
343 CDEBUG(D_IOCTL,"connect: addr %Lx cookie %Lx\n",
344 (long long)conn->addr, (long long)conn->cookie);
348 spin_unlock(&obd->obd_dev_lock);
350 if (obd->u.mds.mds_recoverable_clients != 0) {
351 CERROR("denying connection for new client %s: in recovery\n",
357 /* XXX There is a small race between checking the list and adding a
358 * new connection for the same UUID, but the real threat (list
359 * corruption when multiple different clients connect) is solved.
361 * There is a second race between adding the export to the list,
362 * and filling in the client data below. Hence skipping the case
363 * of NULL mcd above. We should already be controlling multiple
364 * connects at the client, and we can't hold the spinlock over
365 * memory allocations without risk of deadlocking.
367 rc = class_connect(conn, obd, cluuid);
370 exp = class_conn2export(conn);
372 med = &exp->exp_mds_data;
374 OBD_ALLOC(mcd, sizeof(*mcd));
376 CERROR("mds: out of memory for client data\n");
377 GOTO(out_export, rc = -ENOMEM);
380 memcpy(mcd->mcd_uuid, cluuid, sizeof(mcd->mcd_uuid));
383 INIT_LIST_HEAD(&med->med_open_head);
384 spin_lock_init(&med->med_open_lock);
386 rc = mds_client_add(&obd->u.mds, med, -1);
393 OBD_FREE(mcd, sizeof(*mcd));
395 class_disconnect(conn);
402 /* Call with med->med_open_lock held, please. */
403 inline int mds_close_mfd(struct mds_file_data *mfd, struct mds_export_data *med)
405 struct file *file = mfd->mfd_file;
406 LASSERT(file->private_data == mfd);
408 list_del(&mfd->mfd_list);
409 mfd->mfd_servercookie = DEAD_HANDLE_MAGIC;
410 kmem_cache_free(mds_file_cache, mfd);
412 return filp_close(file, 0);
415 static int mds_disconnect(struct lustre_handle *conn)
417 struct obd_export *export = class_conn2export(conn);
418 struct list_head *tmp, *n;
419 struct mds_export_data *med = &export->exp_mds_data;
424 * Close any open files.
426 spin_lock(&med->med_open_lock);
427 list_for_each_safe(tmp, n, &med->med_open_head) {
428 struct mds_file_data *mfd =
429 list_entry(tmp, struct mds_file_data, mfd_list);
430 rc = mds_close_mfd(mfd, med);
432 /* XXX better diagnostics, with file path and stuff */
433 CDEBUG(D_INODE, "Error %d closing mfd %p\n", rc, mfd);
436 spin_unlock(&med->med_open_lock);
438 ldlm_cancel_locks_for_export(export);
439 mds_client_free(export);
441 rc = class_disconnect(conn);
449 * XXX This is NOT guaranteed to flush all transactions to disk (even though
450 * it is equivalent to calling sync()) because it only _starts_ the flush
451 * and does not wait for completion. It's better than nothing though.
452 * What we really want is a mild form of fsync_dev_lockfs(), but it is
453 * non-standard, or enabling do_sync_supers in ext3, just for this call.
455 static void mds_fsync_super(struct super_block *sb)
459 if (sb->s_dirt && sb->s_op && sb->s_op->write_super)
460 sb->s_op->write_super(sb);
465 static int mds_getstatus(struct ptlrpc_request *req)
467 struct mds_obd *mds = mds_req2mds(req);
468 struct mds_body *body;
469 int rc, size = sizeof(*body);
472 rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg);
473 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK)) {
474 CERROR("mds: out of memory for message: size=%d\n", size);
475 req->rq_status = -ENOMEM;
479 /* Flush any outstanding transactions to disk so the client will
480 * get the latest last_committed value and can drop their local
481 * requests if they have any. This would be fsync_super() if it
484 mds_fsync_super(mds->mds_sb);
486 body = lustre_msg_buf(req->rq_repmsg, 0);
487 memcpy(&body->fid1, &mds->mds_rootfid, sizeof(body->fid1));
489 /* the last_committed and last_xid fields are filled in for all
490 * replies already - no need to do so here also.
495 static int mds_getlovinfo(struct ptlrpc_request *req)
497 struct mds_obd *mds = mds_req2mds(req);
498 struct mds_status_req *streq;
499 struct lov_desc *desc;
501 int rc, size[2] = {sizeof(*desc)};
504 streq = lustre_msg_buf(req->rq_reqmsg, 0);
505 streq->flags = NTOH__u32(streq->flags);
506 streq->repbuf = NTOH__u32(streq->repbuf);
507 size[1] = streq->repbuf;
509 rc = lustre_pack_msg(2, size, NULL, &req->rq_replen, &req->rq_repmsg);
511 CERROR("mds: out of memory for message: size=%d\n", size[1]);
512 req->rq_status = -ENOMEM;
516 desc = lustre_msg_buf(req->rq_repmsg, 0);
517 rc = mds_get_lovdesc(mds, desc);
523 tgt_count = le32_to_cpu(desc->ld_tgt_count);
524 if (tgt_count * sizeof(obd_uuid_t) > streq->repbuf) {
525 CERROR("too many targets, enlarge client buffers\n");
526 req->rq_status = -ENOSPC;
530 /* XXX the MDS should not really know about this */
531 mds->mds_max_mdsize = lov_mds_md_size(tgt_count);
532 rc = mds_get_lovtgts(mds, tgt_count,
533 lustre_msg_buf(req->rq_repmsg, 1));
535 CERROR("get_lovtgts error %d\n", rc);
542 int mds_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
543 void *data, __u32 data_len, int flag)
548 if (flag == LDLM_CB_CANCELING) {
549 /* Don't need to do anything here. */
553 /* XXX layering violation! -phil */
554 l_lock(&lock->l_resource->lr_namespace->ns_lock);
555 lock->l_flags |= LDLM_FL_CBPENDING;
556 do_ast = (!lock->l_readers && !lock->l_writers);
557 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
560 struct lustre_handle lockh;
563 LDLM_DEBUG(lock, "already unused, calling ldlm_cli_cancel");
564 ldlm_lock2handle(lock, &lockh);
565 rc = ldlm_cli_cancel(&lockh);
567 CERROR("ldlm_cli_cancel: %d\n", rc);
569 LDLM_DEBUG(lock, "Lock still has references, will be"
574 int mds_pack_md(struct mds_obd *mds, struct ptlrpc_request *req,
575 int offset, struct mds_body *body, struct inode *inode)
577 struct lov_mds_md *lmm;
578 int lmm_size = req->rq_repmsg->buflens[offset];
582 CDEBUG(D_INFO, "no space reserved for inode %lu MD\n", inode->i_ino);
586 lmm = lustre_msg_buf(req->rq_repmsg, offset);
588 /* I don't really like this, but it is a sanity check on the client
589 * MD request. However, if the client doesn't know how much space
590 * to reserve for the MD, this shouldn't be fatal either...
592 if (lmm_size > mds->mds_max_mdsize) {
593 CERROR("Reading MD for inode %lu of %d bytes > max %d\n",
594 inode->i_ino, lmm_size, mds->mds_max_mdsize);
598 /* We don't need to store the reply size, because this buffer is
599 * discarded right after unpacking, and the LOV can figure out the
600 * size itself from the ost count.
602 if ((rc = fsfilt_get_md(req->rq_export->exp_obd, inode,
603 lmm, lmm_size)) < 0) {
604 CDEBUG(D_INFO, "No md for ino %lu: rc = %d\n", inode->i_ino,rc);
606 body->valid |= OBD_MD_FLEASIZE;
613 static int mds_getattr_internal(struct mds_obd *mds, struct dentry *dentry,
614 struct ptlrpc_request *req,
615 struct mds_body *reqbody, int reply_off)
617 struct mds_body *body;
618 struct inode *inode = dentry->d_inode;
625 body = lustre_msg_buf(req->rq_repmsg, reply_off);
627 mds_pack_inode2fid(&body->fid1, inode);
628 mds_pack_inode2body(body, inode);
630 if (S_ISREG(inode->i_mode) /* && reqbody->valid & OBD_MD_FLEASIZE */) {
631 rc = mds_pack_md(mds, req, reply_off + 1, body, inode);
632 } else if (S_ISLNK(inode->i_mode) && reqbody->valid & OBD_MD_LINKNAME) {
633 char *symname = lustre_msg_buf(req->rq_repmsg, reply_off + 1);
634 int len = req->rq_repmsg->buflens[reply_off + 1];
636 rc = inode->i_op->readlink(dentry, symname, len);
638 CERROR("readlink failed: %d\n", rc);
640 CDEBUG(D_INODE, "read symlink dest %s\n", symname);
641 body->valid |= OBD_MD_LINKNAME;
648 static int mds_getattr_name(int offset, struct ptlrpc_request *req)
650 struct mds_obd *mds = mds_req2mds(req);
651 struct obd_device *obd = req->rq_export->exp_obd;
652 struct obd_run_ctxt saved;
653 struct mds_body *body;
654 struct dentry *de = NULL, *dchild = NULL;
656 struct lustre_handle lockh;
658 int namelen, flags = 0, lock_mode, rc = 0;
660 __u64 res_id[3] = {0, 0, 0};
663 LASSERT(!strcmp(obd->obd_type->typ_name, "mds"));
665 if (req->rq_reqmsg->bufcount <= offset + 1) {
667 GOTO(out_pre_de, rc = -EINVAL);
670 body = lustre_msg_buf(req->rq_reqmsg, offset);
671 name = lustre_msg_buf(req->rq_reqmsg, offset + 1);
672 namelen = req->rq_reqmsg->buflens[offset + 1];
673 /* requests were at offset 2, replies go back at 1 */
677 uc.ouc_fsuid = body->fsuid;
678 uc.ouc_fsgid = body->fsgid;
679 uc.ouc_cap = body->capability;
680 push_ctxt(&saved, &mds->mds_ctxt, &uc);
681 de = mds_fid2dentry(mds, &body->fid1, NULL);
683 GOTO(out_pre_de, rc = -ENOENT);
687 CDEBUG(D_INODE, "parent ino %lu, name %*s\n", dir->i_ino,namelen,name);
690 res_id[0] = dir->i_ino;
691 res_id[1] = dir->i_generation;
693 rc = ldlm_lock_match(obd->obd_namespace, res_id, LDLM_PLAIN,
694 NULL, 0, lock_mode, &lockh);
696 LDLM_DEBUG_NOLOCK("enqueue res "LPU64, res_id[0]);
697 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace, NULL,
698 res_id, LDLM_PLAIN, NULL, 0, lock_mode,
699 &flags, ldlm_completion_ast,
700 mds_blocking_ast, NULL, 0, &lockh);
701 if (rc != ELDLM_OK) {
702 CERROR("lock enqueue: err: %d\n", rc);
703 GOTO(out_create_de, rc = -EIO);
706 ldlm_lock_dump((void *)(unsigned long)lockh.addr);
709 dchild = lookup_one_len(name, de, namelen - 1);
710 if (IS_ERR(dchild)) {
711 CDEBUG(D_INODE, "child lookup error %ld\n", PTR_ERR(dchild));
713 GOTO(out_create_dchild, rc = PTR_ERR(dchild));
716 rc = mds_getattr_internal(mds, dchild, req, body, offset);
722 ldlm_lock_decref(&lockh, lock_mode);
727 pop_ctxt(&saved, &mds->mds_ctxt, &uc);
731 static int mds_getattr(int offset, struct ptlrpc_request *req)
733 struct mds_obd *mds = mds_req2mds(req);
734 struct obd_run_ctxt saved;
737 struct mds_body *body;
739 int rc = 0, size[2] = {sizeof(*body)}, bufcount = 1;
742 body = lustre_msg_buf(req->rq_reqmsg, offset);
743 uc.ouc_fsuid = body->fsuid;
744 uc.ouc_fsgid = body->fsgid;
745 uc.ouc_cap = body->capability;
746 push_ctxt(&saved, &mds->mds_ctxt, &uc);
747 de = mds_fid2dentry(mds, &body->fid1, NULL);
749 rc = req->rq_status = -ENOENT;
750 GOTO(out_pop, PTR_ERR(de));
754 if (S_ISREG(body->fid1.f_type)) {
755 int rc = fsfilt_get_md(req->rq_export->exp_obd, inode, NULL, 0);
756 CDEBUG(D_INODE, "got %d bytes MD data for inode %lu\n",
760 CERROR("error getting inode %lu MD: rc = %d\n",
763 } else if (rc > mds->mds_max_mdsize) {
765 CERROR("MD size %d larger than maximum possible %u\n",
766 rc, mds->mds_max_mdsize);
770 } else if (body->valid & OBD_MD_LINKNAME) {
771 size[bufcount] = MIN(inode->i_size + 1, body->size);
773 CDEBUG(D_INODE, "symlink size: %d, reply space: %d\n",
774 inode->i_size + 1, body->size);
777 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) {
778 CERROR("failed MDS_GETATTR_PACK test\n");
779 req->rq_status = -ENOMEM;
780 GOTO(out, rc = -ENOMEM);
783 rc = lustre_pack_msg(bufcount, size, NULL, &req->rq_replen,
786 CERROR("out of memoryK\n");
791 req->rq_status = mds_getattr_internal(mds, de, req, body, 0);
797 pop_ctxt(&saved, &mds->mds_ctxt, &uc);
801 static int mds_statfs(struct ptlrpc_request *req)
803 struct obd_device *obd = req->rq_export->exp_obd;
804 struct obd_statfs *osfs;
805 int rc, size = sizeof(*osfs);
808 rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg);
809 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
810 CERROR("mds: statfs lustre_pack_msg failed: rc = %d\n", rc);
814 osfs = lustre_msg_buf(req->rq_repmsg, 0);
815 rc = fsfilt_statfs(obd, obd->u.mds.mds_sb, osfs);
817 CERROR("mds: statfs failed: rc %d\n", rc);
820 obd_statfs_pack(osfs, osfs);
828 static struct mds_file_data *mds_handle2mfd(struct lustre_handle *handle)
830 struct mds_file_data *mfd = NULL;
833 if (!handle || !handle->addr)
836 mfd = (struct mds_file_data *)(unsigned long)(handle->addr);
837 if (!kmem_cache_validate(mds_file_cache, mfd))
840 if (mfd->mfd_servercookie != handle->cookie)
846 static int mds_store_md(struct mds_obd *mds, struct ptlrpc_request *req,
847 int offset, struct mds_body *body, struct inode *inode)
849 struct obd_device *obd = req->rq_export->exp_obd;
850 struct lov_mds_md *lmm = lustre_msg_buf(req->rq_reqmsg, offset);
851 int lmm_size = req->rq_reqmsg->buflens[offset];
852 struct obd_run_ctxt saved;
858 /* I don't really like this, but it is a sanity check on the client
861 if (lmm_size > mds->mds_max_mdsize) {
862 CERROR("Saving MD for inode %lu of %d bytes > max %d\n",
863 inode->i_ino, lmm_size, mds->mds_max_mdsize);
867 CDEBUG(D_INODE, "storing %d bytes MD for inode %lu\n",
868 lmm_size, inode->i_ino);
869 uc.ouc_fsuid = body->fsuid;
870 uc.ouc_fsgid = body->fsgid;
871 uc.ouc_cap = body->capability;
872 push_ctxt(&saved, &mds->mds_ctxt, &uc);
873 mds_start_transno(mds);
874 handle = fsfilt_start(obd, inode,FSFILT_OP_SETATTR);
875 if (IS_ERR(handle)) {
876 rc = PTR_ERR(handle);
877 mds_finish_transno(mds, handle, req, rc);
881 rc = fsfilt_set_md(obd, inode,handle,lmm,lmm_size);
882 rc = mds_finish_transno(mds, handle, req, rc);
884 rc2 = fsfilt_commit(obd, inode, handle);
888 pop_ctxt(&saved, &mds->mds_ctxt, &uc);
893 static int mds_open(struct ptlrpc_request *req)
895 struct mds_obd *mds = mds_req2mds(req);
896 struct mds_body *body;
897 struct mds_export_data *med;
898 struct mds_file_data *mfd;
901 struct vfsmount *mnt;
903 struct list_head *tmp;
904 int rc, size = sizeof(*body);
907 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_OPEN_PACK)) {
908 CERROR("test case OBD_FAIL_MDS_OPEN_PACK\n");
909 req->rq_status = -ENOMEM;
913 rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg);
915 CERROR("mds: pack error: rc = %d\n", rc);
920 body = lustre_msg_buf(req->rq_reqmsg, 0);
922 /* was this animal open already and the client lost the reply? */
923 /* XXX need some way to detect a reopen, to avoid locked list walks */
924 med = &req->rq_export->exp_mds_data;
925 spin_lock(&med->med_open_lock);
926 list_for_each(tmp, &med->med_open_head) {
927 mfd = list_entry(tmp, typeof(*mfd), mfd_list);
928 if (!memcmp(&mfd->mfd_clienthandle, &body->handle,
929 sizeof(mfd->mfd_clienthandle)) &&
930 body->fid1.id == mfd->mfd_file->f_dentry->d_inode->i_ino) {
931 de = mfd->mfd_file->f_dentry;
932 spin_unlock(&med->med_open_lock);
933 CERROR("Re opening "LPD64"\n", body->fid1.id);
934 GOTO(out_pack, rc = 0);
937 spin_unlock(&med->med_open_lock);
939 mfd = kmem_cache_alloc(mds_file_cache, GFP_KERNEL);
941 CERROR("mds: out of memory\n");
942 req->rq_status = -ENOMEM;
946 de = mds_fid2dentry(mds, &body->fid1, &mnt);
948 GOTO(out_free, rc = PTR_ERR(de));
950 /* check if this inode has seen a delayed object creation */
951 if (lustre_msg_get_op_flags(req->rq_reqmsg) & MDS_OPEN_HAS_EA) {
952 rc = mds_store_md(mds, req, 1, body, de->d_inode);
961 /* dentry_open does a dput(de) and mntput(mnt) on error */
962 file = dentry_open(de, mnt, flags & ~O_DIRECT);
968 file->private_data = mfd;
969 mfd->mfd_file = file;
970 memcpy(&mfd->mfd_clienthandle, &body->handle, sizeof(body->handle));
971 get_random_bytes(&mfd->mfd_servercookie, sizeof(mfd->mfd_servercookie));
972 spin_lock(&med->med_open_lock);
973 list_add(&mfd->mfd_list, &med->med_open_head);
974 spin_unlock(&med->med_open_lock);
977 body = lustre_msg_buf(req->rq_repmsg, 0);
978 mds_pack_inode2fid(&body->fid1, de->d_inode);
979 mds_pack_inode2body(body, de->d_inode);
980 body->handle.addr = (__u64)(unsigned long)mfd;
981 body->handle.cookie = mfd->mfd_servercookie;
982 CDEBUG(D_INODE, "llite file "LPX64": addr %p, cookie "LPX64"\n",
983 mfd->mfd_clienthandle.addr, mfd, mfd->mfd_servercookie);
987 mfd->mfd_servercookie = DEAD_HANDLE_MAGIC;
988 kmem_cache_free(mds_file_cache, mfd);
993 static int mds_close(struct ptlrpc_request *req)
995 struct mds_export_data *med = &req->rq_export->exp_mds_data;
996 struct mds_body *body;
997 struct mds_file_data *mfd;
1001 body = lustre_msg_buf(req->rq_reqmsg, 0);
1003 mfd = mds_handle2mfd(&body->handle);
1005 CERROR("no handle for file close "LPD64
1006 ": addr "LPX64", cookie "LPX64"\n",
1007 body->fid1.id, body->handle.addr, body->handle.cookie);
1011 spin_lock(&med->med_open_lock);
1012 req->rq_status = mds_close_mfd(mfd, med);
1013 spin_unlock(&med->med_open_lock);
1015 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_CLOSE_PACK)) {
1016 CERROR("test case OBD_FAIL_MDS_CLOSE_PACK\n");
1017 req->rq_status = -ENOMEM;
1021 rc = lustre_pack_msg(0, NULL, NULL, &req->rq_replen, &req->rq_repmsg);
1023 CERROR("mds: lustre_pack_msg: rc = %d\n", rc);
1024 req->rq_status = rc;
1030 static int mds_readpage(struct ptlrpc_request *req)
1032 struct mds_obd *mds = mds_req2mds(req);
1033 struct vfsmount *mnt;
1036 struct mds_body *body, *repbody;
1037 struct obd_run_ctxt saved;
1038 int rc, size = sizeof(*body);
1039 struct obd_ucred uc;
1042 rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen, &req->rq_repmsg);
1043 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK)) {
1044 CERROR("mds: out of memory\n");
1045 GOTO(out, rc = -ENOMEM);
1048 body = lustre_msg_buf(req->rq_reqmsg, 0);
1049 uc.ouc_fsuid = body->fsuid;
1050 uc.ouc_fsgid = body->fsgid;
1051 uc.ouc_cap = body->capability;
1052 push_ctxt(&saved, &mds->mds_ctxt, &uc);
1053 de = mds_fid2dentry(mds, &body->fid1, &mnt);
1055 GOTO(out_pop, rc = PTR_ERR(de));
1057 CDEBUG(D_INODE, "ino %lu\n", de->d_inode->i_ino);
1059 file = dentry_open(de, mnt, O_RDONLY | O_LARGEFILE);
1060 /* note: in case of an error, dentry_open puts dentry */
1062 GOTO(out_pop, rc = PTR_ERR(file));
1064 repbody = lustre_msg_buf(req->rq_repmsg, 0);
1065 repbody->size = file->f_dentry->d_inode->i_size;
1066 repbody->valid = OBD_MD_FLSIZE;
1068 /* to make this asynchronous make sure that the handling function
1069 doesn't send a reply when this function completes. Instead a
1070 callback function would send the reply */
1071 /* note: in case of an error, dentry_open puts dentry */
1072 rc = mds_sendpage(req, file, body->size);
1074 filp_close(file, 0);
1076 pop_ctxt(&saved, &mds->mds_ctxt, &uc);
1078 req->rq_status = rc;
1082 int mds_reint(struct ptlrpc_request *req, int offset)
1085 struct mds_update_record rec;
1087 rc = mds_update_unpack(req, offset, &rec);
1088 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK)) {
1089 CERROR("invalid record\n");
1090 req->rq_status = -EINVAL;
1093 /* rc will be used to interrupt a for loop over multiple records */
1094 rc = mds_reint_rec(&rec, offset, req);
1098 /* forward declaration */
1099 int mds_handle(struct ptlrpc_request *req);
1101 static int check_for_next_transno(struct mds_obd *mds)
1103 struct ptlrpc_request *req;
1104 req = list_entry(mds->mds_recovery_queue.next,
1105 struct ptlrpc_request, rq_list);
1106 return req->rq_reqmsg->transno == mds->mds_next_recovery_transno;
1109 static void process_recovery_queue(struct mds_obd *mds)
1111 struct ptlrpc_request *req;
1114 spin_lock(&mds->mds_processing_task_lock);
1115 req = list_entry(mds->mds_recovery_queue.next,
1116 struct ptlrpc_request, rq_list);
1118 if (req->rq_reqmsg->transno != mds->mds_next_recovery_transno) {
1119 spin_unlock(&mds->mds_processing_task_lock);
1120 wait_event(mds->mds_next_transno_waitq,
1121 check_for_next_transno(mds));
1124 list_del(&req->rq_list);
1125 spin_unlock(&mds->mds_processing_task_lock);
1127 DEBUG_REQ(D_HA, req, "");
1130 if (list_empty(&mds->mds_recovery_queue))
1135 static int queue_recovery_request(struct ptlrpc_request *req,
1136 struct mds_obd *mds)
1138 struct list_head *tmp;
1139 int inserted = 0, transno = req->rq_reqmsg->transno;
1142 DEBUG_REQ(D_HA, req, "not queueing");
1146 spin_lock(&mds->mds_processing_task_lock);
1148 if (mds->mds_processing_task == current->pid) {
1149 /* Processing the queue right now, don't re-add. */
1150 spin_unlock(&mds->mds_processing_task_lock);
1155 list_for_each(tmp, &mds->mds_recovery_queue) {
1156 struct ptlrpc_request *reqiter =
1157 list_entry(tmp, struct ptlrpc_request, rq_list);
1158 if (reqiter->rq_reqmsg->transno > transno) {
1159 list_add_tail(&req->rq_list, &reqiter->rq_list);
1166 list_add_tail(&req->rq_list, &mds->mds_recovery_queue);
1168 if (mds->mds_processing_task != 0) {
1169 /* Someone else is processing this queue, we'll leave it to
1172 spin_unlock(&mds->mds_processing_task_lock);
1173 if (transno == mds->mds_next_recovery_transno)
1174 wake_up(&mds->mds_next_transno_waitq);
1178 /* Nobody is processing, and we know there's (at least) one to process
1179 * now, so we'll do the honours.
1181 mds->mds_processing_task = current->pid;
1182 spin_unlock(&mds->mds_processing_task_lock);
1184 process_recovery_queue(mds);
1188 static int filter_recovery_request(struct ptlrpc_request *req,
1189 struct mds_obd *mds, int *process)
1191 switch (req->rq_reqmsg->opc) {
1193 case MDS_DISCONNECT:
1198 case MDS_GETSTATUS: /* used in unmounting */
1201 *process = queue_recovery_request(req, mds);
1205 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
1207 RETURN(ptlrpc_error(req->rq_svc, req));
1211 static int mds_queue_final_reply(struct ptlrpc_request *req, int rc)
1213 struct mds_obd *mds = mds_req2mds(req);
1216 /* Just like ptlrpc_error, but without the sending. */
1217 lustre_pack_msg(0, NULL, NULL, &req->rq_replen,
1219 req->rq_type = PTL_RPC_MSG_ERR;
1222 list_add(&req->rq_list, &mds->mds_delayed_reply_queue);
1223 if (--mds->mds_recoverable_clients == 0) {
1224 struct list_head *tmp, *n;
1227 "all clients recovered, sending delayed replies\n");
1228 list_for_each_safe(tmp, n, &mds->mds_delayed_reply_queue) {
1229 req = list_entry(tmp, struct ptlrpc_request, rq_list);
1230 DEBUG_REQ(D_HA, req, "delayed:");
1231 ptlrpc_reply(req->rq_svc, req);
1234 CDEBUG(D_HA, "%d recoverable clients remain\n",
1235 mds->mds_recoverable_clients);
1241 static char *reint_names[] = {
1242 [REINT_SETATTR] "setattr",
1243 [REINT_CREATE] "create",
1244 [REINT_LINK] "link",
1245 [REINT_UNLINK] "unlink",
1246 [REINT_RENAME] "rename"
1249 int mds_handle(struct ptlrpc_request *req)
1253 struct mds_obd *mds = NULL; /* quell gcc overwarning */
1256 rc = lustre_unpack_msg(req->rq_reqmsg, req->rq_reqlen);
1257 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_HANDLE_UNPACK)) {
1258 CERROR("lustre_mds: Invalid request\n");
1262 LASSERT(!strcmp(req->rq_obd->obd_type->typ_name, LUSTRE_MDT_NAME));
1264 if (req->rq_reqmsg->opc != MDS_CONNECT) {
1265 if (req->rq_export == NULL)
1266 GOTO(out, rc = -ENOTCONN);
1268 mds = mds_req2mds(req);
1269 if (mds->mds_recoverable_clients != 0) {
1270 rc = filter_recovery_request(req, mds, &should_process);
1271 if (rc || !should_process)
1276 switch (req->rq_reqmsg->opc) {
1278 DEBUG_REQ(D_INODE, req, "connect");
1279 OBD_FAIL_RETURN(OBD_FAIL_MDS_CONNECT_NET, 0);
1280 rc = target_handle_connect(req);
1281 /* Make sure that last_rcvd is correct. */
1283 /* Now that we have an export, set mds. */
1284 mds = mds_req2mds(req);
1285 mds_fsync_super(mds->mds_sb);
1289 case MDS_DISCONNECT:
1290 DEBUG_REQ(D_INODE, req, "disconnect");
1291 OBD_FAIL_RETURN(OBD_FAIL_MDS_DISCONNECT_NET, 0);
1292 rc = target_handle_disconnect(req);
1293 /* Make sure that last_rcvd is correct. */
1295 mds_fsync_super(mds->mds_sb);
1299 DEBUG_REQ(D_INODE, req, "getstatus");
1300 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_NET, 0);
1301 rc = mds_getstatus(req);
1304 case MDS_GETLOVINFO:
1305 DEBUG_REQ(D_INODE, req, "getlovinfo");
1306 rc = mds_getlovinfo(req);
1310 DEBUG_REQ(D_INODE, req, "getattr");
1311 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NET, 0);
1312 rc = mds_getattr(0, req);
1316 DEBUG_REQ(D_INODE, req, "statfs");
1317 OBD_FAIL_RETURN(OBD_FAIL_MDS_STATFS_NET, 0);
1318 rc = mds_statfs(req);
1322 DEBUG_REQ(D_INODE, req, "readpage\n");
1323 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_NET, 0);
1324 rc = mds_readpage(req);
1326 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE))
1331 int size = sizeof(struct mds_body);
1332 int opc = *(u32 *)lustre_msg_buf(req->rq_reqmsg, 0),
1333 realopc = opc & REINT_OPCODE_MASK;
1335 DEBUG_REQ(D_INODE, req, "reint (%s%s)",
1336 reint_names[realopc],
1337 opc & REINT_REPLAYING ? "|REPLAYING" : "");
1339 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET, 0);
1341 rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen,
1344 req->rq_status = rc;
1347 rc = mds_reint(req, 0);
1348 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET_REP, 0);
1353 DEBUG_REQ(D_INODE, req, "open");
1354 OBD_FAIL_RETURN(OBD_FAIL_MDS_OPEN_NET, 0);
1359 DEBUG_REQ(D_INODE, req, "close");
1360 OBD_FAIL_RETURN(OBD_FAIL_MDS_CLOSE_NET, 0);
1361 rc = mds_close(req);
1365 DEBUG_REQ(D_INODE, req, "enqueue");
1366 OBD_FAIL_RETURN(OBD_FAIL_LDLM_ENQUEUE, 0);
1367 rc = ldlm_handle_enqueue(req);
1370 DEBUG_REQ(D_INODE, req, "convert");
1371 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CONVERT, 0);
1372 rc = ldlm_handle_convert(req);
1374 case LDLM_BL_CALLBACK:
1375 case LDLM_CP_CALLBACK:
1376 DEBUG_REQ(D_INODE, req, "callback");
1377 CERROR("callbacks should not happen on MDS\n");
1379 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
1382 rc = ptlrpc_error(req->rq_svc, req);
1389 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1391 req->rq_repmsg->last_xid =
1392 HTON__u64(le64_to_cpu(med->med_mcd->mcd_last_xid));
1393 req->rq_repmsg->last_committed =
1394 HTON__u64(mds->mds_last_committed);
1395 CDEBUG(D_INFO, "last_rcvd ~%Lu, last_committed %Lu, xid %d\n",
1396 (unsigned long long)mds->mds_last_rcvd,
1397 (unsigned long long)mds->mds_last_committed,
1398 cpu_to_le32(req->rq_xid));
1402 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LAST_REPLAY) {
1403 struct mds_obd *mds = mds_req2mds(req);
1404 LASSERT(mds->mds_recoverable_clients);
1405 DEBUG_REQ(D_HA, req, "LAST_REPLAY, queuing reply");
1406 return mds_queue_final_reply(req, rc);
1409 /* MDS_CONNECT / EALREADY (note: not -EALREADY!) isn't an error */
1410 if (rc && (req->rq_reqmsg->opc != MDS_CONNECT ||
1412 CERROR("mds: processing error (opcode %d): %d\n",
1413 req->rq_reqmsg->opc, rc);
1414 ptlrpc_error(req->rq_svc, req);
1416 CDEBUG(D_NET, "sending reply\n");
1417 ptlrpc_reply(req->rq_svc, req);
1422 /* Update the server data on disk. This stores the new mount_count and
1423 * also the last_rcvd value to disk. If we don't have a clean shutdown,
1424 * then the server last_rcvd value may be less than that of the clients.
1425 * This will alert us that we may need to do client recovery.
1427 * Assumes we are already in the server filesystem context.
1429 * Also assumes for mds_last_rcvd that we are not modifying it (no locking).
1431 int mds_update_server_data(struct mds_obd *mds)
1433 struct mds_server_data *msd = mds->mds_server_data;
1434 struct file *filp = mds->mds_rcvd_filp;
1438 msd->msd_last_rcvd = cpu_to_le64(mds->mds_last_rcvd);
1439 msd->msd_mount_count = cpu_to_le64(mds->mds_mount_count);
1441 CDEBUG(D_SUPER, "MDS mount_count is %Lu, last_rcvd is %Lu\n",
1442 (unsigned long long)mds->mds_mount_count,
1443 (unsigned long long)mds->mds_last_rcvd);
1444 rc = lustre_fwrite(filp, (char *)msd, sizeof(*msd), &off);
1445 if (rc != sizeof(*msd)) {
1446 CERROR("error writing MDS server data: rc = %d\n", rc);
1451 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0))
1452 rc = fsync_dev(filp->f_dentry->d_inode->i_rdev);
1454 rc = file_fsync(filp, filp->f_dentry, 1);
1457 CERROR("error flushing MDS server data: rc = %d\n", rc);
1462 /* Do recovery actions for the MDS */
1463 static int mds_recovery_complete(struct obd_device *obddev)
1465 struct mds_obd *mds = &obddev->u.mds;
1466 struct obd_run_ctxt saved;
1469 LASSERT(mds->mds_recoverable_clients == 0);
1471 /* This happens at the end when recovery is complete */
1472 ++mds->mds_mount_count;
1473 push_ctxt(&saved, &mds->mds_ctxt, NULL);
1474 rc = mds_update_server_data(mds);
1475 pop_ctxt(&saved, &mds->mds_ctxt, NULL);
1480 /* mount the file system (secretly) */
1481 static int mds_setup(struct obd_device *obddev, obd_count len, void *buf)
1483 struct obd_ioctl_data* data = buf;
1484 struct mds_obd *mds = &obddev->u.mds;
1485 struct vfsmount *mnt;
1490 #ifdef CONFIG_DEV_RDONLY
1491 dev_clear_rdonly(2);
1493 if (!data->ioc_inlbuf1 || !data->ioc_inlbuf2)
1494 GOTO(err_dec, rc = -EINVAL);
1496 obddev->obd_fsops = fsfilt_get_ops(data->ioc_inlbuf2);
1497 if (IS_ERR(obddev->obd_fsops))
1498 GOTO(err_dec, rc = PTR_ERR(obddev->obd_fsops));
1500 mnt = do_kern_mount(data->ioc_inlbuf2, 0, data->ioc_inlbuf1, NULL);
1503 CERROR("do_kern_mount failed: rc = %d\n", rc);
1507 CDEBUG(D_SUPER, "%s: mnt = %p\n", data->ioc_inlbuf1, mnt);
1508 mds->mds_sb = mnt->mnt_root->d_inode->i_sb;
1510 GOTO(err_put, rc = -ENODEV);
1512 init_MUTEX(&mds->mds_transno_sem);
1513 mds->mds_max_mdsize = sizeof(struct lov_mds_md);
1514 rc = mds_fs_setup(obddev, mnt);
1516 CERROR("MDS filesystem method init failed: rc = %d\n", rc);
1520 obddev->obd_namespace =
1521 ldlm_namespace_new("mds_server", LDLM_NAMESPACE_SERVER);
1522 if (obddev->obd_namespace == NULL) {
1523 mds_cleanup(obddev);
1524 GOTO(err_fs, rc = -ENOMEM);
1527 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
1528 "mds_ldlm_client", &obddev->obd_ldlm_client);
1530 spin_lock_init(&mds->mds_processing_task_lock);
1531 mds->mds_processing_task = 0;
1532 INIT_LIST_HEAD(&mds->mds_recovery_queue);
1533 INIT_LIST_HEAD(&mds->mds_delayed_reply_queue);
1538 mds_fs_cleanup(obddev);
1541 mntput(mds->mds_vfsmnt);
1545 fsfilt_put_ops(obddev->obd_fsops);
1551 static int mds_cleanup(struct obd_device *obddev)
1553 struct super_block *sb;
1554 struct mds_obd *mds = &obddev->u.mds;
1555 struct obd_run_ctxt saved;
1562 push_ctxt(&saved, &mds->mds_ctxt, NULL);
1563 mds_update_server_data(mds);
1565 if (mds->mds_rcvd_filp) {
1566 int rc = filp_close(mds->mds_rcvd_filp, 0);
1567 mds->mds_rcvd_filp = NULL;
1570 CERROR("last_rcvd file won't close, rc=%d\n", rc);
1572 pop_ctxt(&saved, &mds->mds_ctxt, NULL);
1575 mntput(mds->mds_vfsmnt);
1578 ldlm_namespace_free(obddev->obd_namespace);
1581 #ifdef CONFIG_DEV_RDONLY
1582 dev_clear_rdonly(2);
1584 mds_fs_cleanup(obddev);
1585 fsfilt_put_ops(obddev->obd_fsops);
1591 static int ldlm_intent_policy(struct ldlm_lock *lock, void *req_cookie,
1592 ldlm_mode_t mode, int flags, void *data)
1594 struct ptlrpc_request *req = req_cookie;
1601 if (req->rq_reqmsg->bufcount > 1) {
1602 /* an intent needs to be considered */
1603 struct ldlm_intent *it = lustre_msg_buf(req->rq_reqmsg, 1);
1604 struct mds_obd *mds= &req->rq_export->exp_obd->u.mds;
1605 struct mds_body *mds_rep;
1606 struct ldlm_reply *rep;
1607 __u64 new_resid[3] = {0, 0, 0}, old_res;
1608 int rc, size[3] = {sizeof(struct ldlm_reply),
1609 sizeof(struct mds_body),
1610 mds->mds_max_mdsize};
1612 it->opc = NTOH__u64(it->opc);
1614 LDLM_DEBUG(lock, "intent policy, opc: %s",
1615 ldlm_it2str(it->opc));
1617 rc = lustre_pack_msg(3, size, NULL, &req->rq_replen,
1620 rc = req->rq_status = -ENOMEM;
1624 rep = lustre_msg_buf(req->rq_repmsg, 0);
1625 rep->lock_policy_res1 = 1;
1627 /* execute policy */
1628 switch ((long)it->opc) {
1629 case IT_CREAT|IT_OPEN:
1630 rc = mds_reint(req, 2);
1631 if (rc || (req->rq_status != 0 &&
1632 req->rq_status != -EEXIST)) {
1633 rep->lock_policy_res2 = req->rq_status;
1634 RETURN(ELDLM_LOCK_ABORTED);
1645 rc = mds_reint(req, 2);
1646 if (rc || (req->rq_status != 0 &&
1647 req->rq_status != -EISDIR &&
1648 req->rq_status != -ENOTDIR)) {
1649 rep->lock_policy_res2 = req->rq_status;
1650 RETURN(ELDLM_LOCK_ABORTED);
1661 rc = mds_getattr_name(2, req);
1662 /* FIXME: we need to sit down and decide on who should
1663 * set req->rq_status, who should return negative and
1664 * positive return values, and what they all mean. */
1665 if (rc || req->rq_status != 0) {
1666 rep->lock_policy_res2 = req->rq_status;
1667 RETURN(ELDLM_LOCK_ABORTED);
1670 case IT_READDIR|IT_OPEN:
1674 CERROR("Unhandled intent "LPD64"\n", it->opc);
1678 /* We don't bother returning a lock to the client for a file
1679 * or directory we are removing.
1681 * As for link and rename, there is no reason for the client
1682 * to get a lock on the target at this point. If they are
1683 * going to modify the file/directory later they will get a
1684 * lock at that time.
1686 if (it->opc & (IT_UNLINK | IT_RMDIR | IT_LINK | IT_LINK2 |
1687 IT_RENAME | IT_RENAME2))
1688 RETURN(ELDLM_LOCK_ABORTED);
1690 rep->lock_policy_res2 = req->rq_status;
1691 mds_rep = lustre_msg_buf(req->rq_repmsg, 1);
1693 /* If the client is about to open a file that doesn't have an MD
1694 * stripe record, it's going to need a write lock. */
1695 if (it->opc & IT_OPEN && !(mds_rep->valid & OBD_MD_FLEASIZE)) {
1696 LDLM_DEBUG(lock, "open with no EA; returning PW lock");
1697 lock->l_req_mode = LCK_PW;
1700 if (flags & LDLM_FL_INTENT_ONLY) {
1701 LDLM_DEBUG(lock, "INTENT_ONLY, aborting lock");
1702 RETURN(ELDLM_LOCK_ABORTED);
1704 /* Give the client a lock on the child object, instead of the
1705 * parent that it requested. */
1706 new_resid[0] = NTOH__u32(mds_rep->ino);
1707 new_resid[1] = NTOH__u32(mds_rep->generation);
1708 if (new_resid[0] == 0)
1710 old_res = lock->l_resource->lr_name[0];
1712 ldlm_lock_change_resource(lock, new_resid);
1713 if (lock->l_resource == NULL) {
1717 LDLM_DEBUG(lock, "intent policy, old res %ld",
1719 RETURN(ELDLM_LOCK_CHANGED);
1721 int size = sizeof(struct ldlm_reply);
1722 rc = lustre_pack_msg(1, &size, NULL, &req->rq_replen,
1732 int mds_attach(struct obd_device *dev, obd_count len, void *data)
1734 return lprocfs_reg_obd(dev, status_var_nm_1, dev);
1737 int mds_detach(struct obd_device *dev)
1739 return lprocfs_dereg_obd(dev);
1742 static int mdt_setup(struct obd_device *obddev, obd_count len, void *buf)
1745 // struct obd_ioctl_data* data = buf;
1746 struct mds_obd *mds = &obddev->u.mds;
1752 mds->mds_service = ptlrpc_init_svc(MDS_NEVENTS, MDS_NBUFS,
1753 MDS_BUFSIZE, MDS_MAXREQSIZE,
1754 MDS_REQUEST_PORTAL, MDC_REPLY_PORTAL,
1755 "self", mds_handle, "mds");
1756 if (!mds->mds_service) {
1757 CERROR("failed to start service\n");
1758 GOTO(err_dec, rc = -ENOMEM);
1761 for (i = 0; i < MDT_NUM_THREADS; i++) {
1763 sprintf(name, "ll_mdt_%02d", i);
1764 rc = ptlrpc_start_thread(obddev, mds->mds_service, name);
1766 CERROR("cannot start MDT thread #%d: rc %d\n", i, rc);
1767 GOTO(err_thread, rc);
1774 ptlrpc_stop_all_threads(mds->mds_service);
1775 ptlrpc_unregister_service(mds->mds_service);
1782 static int mdt_cleanup(struct obd_device *obddev)
1784 struct mds_obd *mds = &obddev->u.mds;
1787 ptlrpc_stop_all_threads(mds->mds_service);
1788 ptlrpc_unregister_service(mds->mds_service);
1794 extern int mds_iocontrol(unsigned int cmd, struct lustre_handle *conn,
1795 int len, void *karg, void *uarg);
1797 /* use obd ops to offer management infrastructure */
1798 static struct obd_ops mds_obd_ops = {
1799 o_attach: mds_attach,
1800 o_detach: mds_detach,
1801 o_connect: mds_connect,
1802 o_disconnect: mds_disconnect,
1804 o_cleanup: mds_cleanup,
1805 o_iocontrol: mds_iocontrol
1808 static struct obd_ops mdt_obd_ops = {
1810 o_cleanup: mdt_cleanup,
1814 static int __init mds_init(void)
1816 mds_file_cache = kmem_cache_create("ll_mds_file_data",
1817 sizeof(struct mds_file_data),
1819 if (mds_file_cache == NULL)
1822 class_register_type(&mds_obd_ops, status_class_var, LUSTRE_MDS_NAME);
1823 class_register_type(&mdt_obd_ops, 0, LUSTRE_MDT_NAME);
1824 ldlm_register_intent(ldlm_intent_policy);
1829 static void __exit mds_exit(void)
1831 ldlm_unregister_intent();
1832 class_unregister_type(LUSTRE_MDS_NAME);
1833 class_unregister_type(LUSTRE_MDT_NAME);
1834 if (kmem_cache_destroy(mds_file_cache))
1835 CERROR("couldn't free MDS file cache\n");
1838 MODULE_AUTHOR("Cluster File Systems <info@clusterfs.com>");
1839 MODULE_DESCRIPTION("Lustre Metadata Server (MDS) v0.01");
1840 MODULE_LICENSE("GPL");
1842 module_init(mds_init);
1843 module_exit(mds_exit);