1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Lustre Metadata Server (mds) request handler
7 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * Author: Peter Braam <braam@clusterfs.com>
9 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * Author: Phil Schwan <phil@clusterfs.com>
11 * Author: Mike Shaver <shaver@clusterfs.com>
13 * This file is part of Lustre, http://www.lustre.org.
15 * Lustre is free software; you can redistribute it and/or
16 * modify it under the terms of version 2 of the GNU General Public
17 * License as published by the Free Software Foundation.
19 * Lustre is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with Lustre; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 # define EXPORT_SYMTAB
32 #define DEBUG_SUBSYSTEM S_MDS
34 #include <linux/module.h>
35 #include <linux/lustre_mds.h>
36 #include <linux/lustre_dlm.h>
37 #include <linux/init.h>
38 #include <linux/obd_class.h>
39 #include <linux/random.h>
41 #include <linux/jbd.h>
42 #include <linux/ext3_fs.h>
43 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
44 # include <linux/smp_lock.h>
45 # include <linux/buffer_head.h>
46 # include <linux/workqueue.h>
47 # include <linux/mount.h>
49 # include <linux/locks.h>
51 #include <linux/obd_lov.h>
52 #include <linux/obd_ost.h>
53 #include <linux/lustre_mds.h>
54 #include <linux/lustre_fsfilt.h>
55 #include <linux/lprocfs_status.h>
56 #include <linux/lustre_commit_confd.h>
58 #include "mds_internal.h"
60 static int mds_intent_policy(struct ldlm_namespace *ns,
61 struct ldlm_lock **lockp, void *req_cookie,
62 ldlm_mode_t mode, int flags, void *data);
63 static int mds_postsetup(struct obd_device *obd);
64 static int mds_cleanup(struct obd_device *obd, int flags);
67 /* Assumes caller has already pushed into the kernel filesystem context */
68 static int mds_sendpage(struct ptlrpc_request *req, struct file *file,
69 loff_t offset, int count)
71 struct ptlrpc_bulk_desc *desc;
72 struct l_wait_info lwi;
74 int rc = 0, npages, i, tmpcount, tmpsize = 0;
77 LASSERT((offset & (PAGE_SIZE - 1)) == 0); /* I'm dubious about this */
79 npages = (count + PAGE_SIZE - 1) >> PAGE_SHIFT;
80 OBD_ALLOC(pages, sizeof(*pages) * npages);
82 GOTO(out, rc = -ENOMEM);
84 desc = ptlrpc_prep_bulk_exp(req, npages, BULK_PUT_SOURCE,
87 GOTO(out_free, rc = -ENOMEM);
89 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
90 tmpsize = tmpcount > PAGE_SIZE ? PAGE_SIZE : tmpcount;
92 pages[i] = alloc_pages(GFP_KERNEL, 0);
94 GOTO(cleanup_buf, rc = -ENOMEM);
96 ptlrpc_prep_bulk_page(desc, pages[i], 0, tmpsize);
99 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
100 tmpsize = tmpcount > PAGE_SIZE ? PAGE_SIZE : tmpcount;
101 CDEBUG(D_EXT2, "reading %u@%llu from dir %lu (size %llu)\n",
102 tmpsize, offset, file->f_dentry->d_inode->i_ino,
103 file->f_dentry->d_inode->i_size);
105 rc = fsfilt_readpage(req->rq_export->exp_obd, file,
106 kmap(pages[i]), tmpsize, &offset);
110 GOTO(cleanup_buf, rc = -EIO);
113 LASSERT(desc->bd_nob == count);
115 rc = ptlrpc_start_bulk_transfer(desc);
117 GOTO(cleanup_buf, rc);
119 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) {
120 CERROR("obd_fail_loc=%x, fail operation rc=%d\n",
121 OBD_FAIL_MDS_SENDPAGE, rc = -EIO);
122 GOTO(abort_bulk, rc);
125 lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
126 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc), &lwi);
127 LASSERT (rc == 0 || rc == -ETIMEDOUT);
130 if (desc->bd_success &&
131 desc->bd_nob_transferred == count)
132 GOTO(cleanup_buf, rc);
134 rc = -ETIMEDOUT; /* XXX should this be a different errno? */
137 DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s\n",
138 (rc == -ETIMEDOUT) ? "timeout" : "network error",
139 desc->bd_nob_transferred, count,
140 req->rq_export->exp_client_uuid.uuid,
141 req->rq_export->exp_connection->c_remote_uuid.uuid);
143 ptlrpc_fail_export(req->rq_export);
147 ptlrpc_abort_bulk (desc);
149 for (i = 0; i < npages; i++)
151 __free_pages(pages[i], 0);
153 ptlrpc_free_bulk(desc);
155 OBD_FREE(pages, sizeof(*pages) * npages);
160 int mds_lock_mode_for_dir(struct obd_device *obd,
161 struct dentry *dentry, int mode)
165 /* any dir access needs couple locks:
166 * 1) on part of dir we gonna lookup/modify in
167 * 2) on a whole dir to protect it from concurrent splitting
168 * and to flush client's cache for readdir()
169 * so, for a given mode and dentry this routine decides what
170 * lock mode to use for lock #2:
171 * 1) if caller's gonna lookup in dir then we need to protect
172 * dir from being splitted only - LCK_CR
173 * 2) if caller's gonna modify dir then we need to protect
174 * dir from being splitted and to flush cache - LCK_CW
175 * 3) if caller's gonna modify dir and that dir seems ready
176 * for splitting then we need to protect it from any
177 * type of access (lookup/modify/split) - LCK_EX -bzzz */
179 split = mds_splitting_expected(obd, dentry);
182 * it is important to check here only for MDS_NO_SPLITTABLE. The reason
183 * is that MDS_NO_SPLITTABLE means dir is not splittable in principle
184 * and another thread will not split it on the quiet. But if we have
185 * MDS_NO_SPLIT_EXPECTED, this means, that dir may be splitted anytime,
186 * but not now (for current thread) and we should consider that it can
187 * happen soon and go that branch which can yield LCK_EX to protect from
188 * possible splitting.
190 if (split == MDS_NO_SPLITTABLE) {
192 * this inode won't be splitted. so we need not to protect from
193 * just flush client's cache on modification.
202 if (mode == LCK_PR) {
204 } else if (mode == LCK_PW) {
206 * caller gonna modify directory.we use concurrent write
207 * lock here to retract client's cache for readdir.
210 if (split == MDS_EXPECT_SPLIT) {
212 * splitting possible. serialize any access the
213 * idea is that first one seen dir is splittable
214 * is given exclusive lock and split
215 * directory. caller passes lock mode to
216 * mds_try_to_split_dir() and splitting would be
217 * done with exclusive lock only -bzzz.
219 CDEBUG(D_OTHER, "%s: gonna split %lu/%lu\n",
221 (unsigned long)dentry->d_inode->i_ino,
222 (unsigned long)dentry->d_inode->i_generation);
230 /* only valid locked dentries or errors should be returned */
231 struct dentry *mds_id2locked_dentry(struct obd_device *obd, struct lustre_id *id,
232 struct vfsmount **mnt, int lock_mode,
233 struct lustre_handle *lockh, int *mode,
234 char *name, int namelen, __u64 lockpart)
236 struct dentry *de = mds_id2dentry(obd, id, mnt), *retval = de;
237 ldlm_policy_data_t policy = { .l_inodebits = { lockpart } };
238 struct ldlm_res_id res_id = { .name = {0} };
246 res_id.name[0] = id_fid(id);
247 res_id.name[1] = id_group(id);
250 if (name && IS_PDIROPS(de->d_inode)) {
251 ldlm_policy_data_t cpolicy =
252 { .l_inodebits = { MDS_INODELOCK_UPDATE } };
253 LASSERT(mode != NULL);
254 *mode = mds_lock_mode_for_dir(obd, de, lock_mode);
256 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace,
258 &cpolicy, *mode, &flags,
260 ldlm_completion_ast, NULL, NULL,
261 NULL, 0, NULL, lockh + 1);
262 if (rc != ELDLM_OK) {
264 RETURN(ERR_PTR(-ENOLCK));
269 res_id.name[2] = full_name_hash(name, namelen);
271 CDEBUG(D_INFO, "take lock on "DLID4":"LPX64"\n",
272 OLID4(id), res_id.name[2]);
275 #warning "No PDIROPS support in the kernel"
277 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace, res_id,
278 LDLM_IBITS, &policy, lock_mode, &flags,
279 mds_blocking_ast, ldlm_completion_ast,
280 NULL, NULL, NULL, 0, NULL, lockh);
281 if (rc != ELDLM_OK) {
283 retval = ERR_PTR(-EIO); /* XXX translate ldlm code */
286 ldlm_lock_decref(lockh + 1, LCK_CW);
293 #ifndef DCACHE_DISCONNECTED
294 #define DCACHE_DISCONNECTED DCACHE_NFSD_DISCONNECTED
298 /* Look up an entry by inode number. This function ONLY returns valid dget'd
299 * dentries with an initialized inode or errors */
300 struct dentry *mds_id2dentry(struct obd_device *obd, struct lustre_id *id,
301 struct vfsmount **mnt)
303 unsigned long ino = (unsigned long)id_ino(id);
304 __u32 generation = (__u32)id_gen(id);
305 struct mds_obd *mds = &obd->u.mds;
306 struct dentry *result;
311 RETURN(ERR_PTR(-ESTALE));
313 snprintf(idname, sizeof(idname), "0x%lx", ino);
315 CDEBUG(D_DENTRY, "--> mds_id2dentry: ino/gen %lu/%u, sb %p\n",
316 ino, generation, mds->mds_sb);
318 /* under ext3 this is neither supposed to return bad inodes nor NULL
320 result = ll_lookup_one_len(idname, mds->mds_id_de,
325 inode = result->d_inode;
327 RETURN(ERR_PTR(-ENOENT));
329 if (is_bad_inode(inode)) {
330 CERROR("bad inode returned %lu/%u\n",
331 inode->i_ino, inode->i_generation);
333 RETURN(ERR_PTR(-ENOENT));
336 /* here we disabled generation check, as root inode i_generation
337 * of cache mds and real mds are different. */
338 if (inode->i_ino != id_ino(&mds->mds_rootid) && generation &&
339 inode->i_generation != generation) {
340 /* we didn't find the right inode.. */
341 CERROR("bad inode %lu, link: %lu, ct: %d, generation %u/%u\n",
342 inode->i_ino, (unsigned long)inode->i_nlink,
343 atomic_read(&inode->i_count), inode->i_generation,
346 RETURN(ERR_PTR(-ENOENT));
350 *mnt = mds->mds_vfsmnt;
358 /* Establish a connection to the MDS.
360 * This will set up an export structure for the client to hold state data about
361 * that client, like open files, the last operation number it did on the server,
364 static int mds_connect(struct lustre_handle *conn, struct obd_device *obd,
365 struct obd_uuid *cluuid, unsigned long flags)
367 struct mds_export_data *med;
368 struct mds_client_data *mcd;
369 struct obd_export *exp;
373 if (!conn || !obd || !cluuid)
376 /* XXX There is a small race between checking the list and adding a new
377 * connection for the same UUID, but the real threat (list corruption
378 * when multiple different clients connect) is solved.
380 * There is a second race between adding the export to the list, and
381 * filling in the client data below. Hence skipping the case of NULL
382 * mcd above. We should already be controlling multiple connects at the
383 * client, and we can't hold the spinlock over memory allocations
384 * without risk of deadlocking.
386 rc = class_connect(conn, obd, cluuid);
389 exp = class_conn2export(conn);
391 LASSERT(exp != NULL);
392 med = &exp->exp_mds_data;
394 OBD_ALLOC(mcd, sizeof(*mcd));
396 CERROR("%s: out of memory for client data.\n",
398 GOTO(out, rc = -ENOMEM);
401 memcpy(mcd->mcd_uuid, cluuid, sizeof(mcd->mcd_uuid));
404 rc = mds_client_add(obd, &obd->u.mds, med, -1);
411 OBD_FREE(mcd, sizeof(*mcd));
412 class_disconnect(exp, 0);
414 class_export_put(exp);
418 static int mds_connect_post(struct obd_export *exp, unsigned long flags)
420 struct obd_device *obd = exp->exp_obd;
421 struct mds_obd *mds = &obd->u.mds;
425 if (!(flags & OBD_OPT_MDS_CONNECTION)) {
426 if (!(exp->exp_flags & OBD_OPT_REAL_CLIENT)) {
427 atomic_inc(&mds->mds_real_clients);
428 CDEBUG(D_OTHER,"%s: peer from %s is real client (%d)\n",
429 obd->obd_name, exp->exp_client_uuid.uuid,
430 atomic_read(&mds->mds_real_clients));
431 exp->exp_flags |= OBD_OPT_REAL_CLIENT;
433 if (mds->mds_lmv_name)
434 rc = mds_lmv_connect(obd, mds->mds_lmv_name);
439 static int mds_init_export(struct obd_export *exp)
441 struct mds_export_data *med = &exp->exp_mds_data;
443 INIT_LIST_HEAD(&med->med_open_head);
444 spin_lock_init(&med->med_open_lock);
448 static int mds_destroy_export(struct obd_export *export)
450 struct mds_export_data *med;
451 struct obd_device *obd = export->exp_obd;
452 struct lvfs_run_ctxt saved;
456 med = &export->exp_mds_data;
457 target_destroy_export(export);
459 if (obd_uuid_equals(&export->exp_client_uuid, &obd->obd_uuid))
462 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
464 /* Close any open files (which may also cause orphan unlinking). */
465 spin_lock(&med->med_open_lock);
466 while (!list_empty(&med->med_open_head)) {
467 struct list_head *tmp = med->med_open_head.next;
468 struct mds_file_data *mfd =
469 list_entry(tmp, struct mds_file_data, mfd_list);
470 BDEVNAME_DECLARE_STORAGE(btmp);
472 /* bug 1579: fix force-closing for 2.5 */
473 struct dentry *dentry = mfd->mfd_dentry;
475 list_del(&mfd->mfd_list);
476 spin_unlock(&med->med_open_lock);
478 /* If you change this message, be sure to update
479 * replay_single:test_46 */
480 CERROR("force closing client file handle for %*s (%s:%lu)\n",
481 dentry->d_name.len, dentry->d_name.name,
482 ll_bdevname(dentry->d_inode->i_sb, btmp),
483 dentry->d_inode->i_ino);
484 /* child inode->i_alloc_sem protects orphan_dec_test and
485 * is_orphan race, mds_mfd_close drops it */
486 DOWN_WRITE_I_ALLOC_SEM(dentry->d_inode);
487 rc = mds_mfd_close(NULL, 0, obd, mfd,
488 !(export->exp_flags & OBD_OPT_FAILOVER));
490 CDEBUG(D_INODE, "Error closing file: %d\n", rc);
491 spin_lock(&med->med_open_lock);
493 spin_unlock(&med->med_open_lock);
494 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
498 mds_client_free(export, !(export->exp_flags & OBD_OPT_FAILOVER));
502 static int mds_disconnect(struct obd_export *exp, unsigned long flags)
504 unsigned long irqflags;
505 struct obd_device *obd;
510 LASSERT(exp != NULL);
511 obd = class_exp2obd(exp);
513 CDEBUG(D_IOCTL, "invalid client cookie "LPX64"\n",
514 exp->exp_handle.h_cookie);
520 * suppress any inter-mds requests durring disconnecting lmv if this is
521 * detected --force mode. This is needed to avoid endless recovery.
523 if (atomic_read(&mds->mds_real_clients) > 0 &&
524 !(exp->exp_flags & OBD_OPT_REAL_CLIENT))
525 flags |= OBD_OPT_FORCE;
527 if (!(exp->exp_flags & OBD_OPT_REAL_CLIENT)
528 && !atomic_read(&mds->mds_real_clients)) {
529 /* there was no client at all */
530 mds_lmv_disconnect(obd, flags);
533 if ((exp->exp_flags & OBD_OPT_REAL_CLIENT)
534 && atomic_dec_and_test(&mds->mds_real_clients)) {
535 /* time to drop LMV connections */
536 CDEBUG(D_OTHER, "%s: last real client %s disconnected. "
537 "Disconnnect from LMV now\n",
538 obd->obd_name, exp->exp_client_uuid.uuid);
539 mds_lmv_disconnect(obd, flags);
542 spin_lock_irqsave(&exp->exp_lock, irqflags);
543 exp->exp_flags = flags;
544 spin_unlock_irqrestore(&exp->exp_lock, irqflags);
546 /* disconnect early so that clients can't keep using export */
547 rc = class_disconnect(exp, flags);
548 ldlm_cancel_locks_for_export(exp);
550 /* complete all outstanding replies */
551 spin_lock_irqsave(&exp->exp_lock, irqflags);
552 while (!list_empty(&exp->exp_outstanding_replies)) {
553 struct ptlrpc_reply_state *rs =
554 list_entry(exp->exp_outstanding_replies.next,
555 struct ptlrpc_reply_state, rs_exp_list);
556 struct ptlrpc_service *svc = rs->rs_srv_ni->sni_service;
558 spin_lock(&svc->srv_lock);
559 list_del_init(&rs->rs_exp_list);
560 ptlrpc_schedule_difficult_reply(rs);
561 spin_unlock(&svc->srv_lock);
563 spin_unlock_irqrestore(&exp->exp_lock, irqflags);
567 static int mds_getstatus(struct ptlrpc_request *req)
569 struct mds_obd *mds = mds_req2mds(req);
570 struct mds_body *body;
574 size = sizeof(*body);
576 rc = lustre_pack_reply(req, 1, &size, NULL);
577 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK)) {
578 CERROR("mds: out of memory for message: size=%d\n", size);
579 req->rq_status = -ENOMEM; /* superfluous? */
583 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*body));
585 body->valid |= OBD_MD_FID;
586 memcpy(&body->id1, &mds->mds_rootid, sizeof(body->id1));
589 * the last_committed and last_xid fields are filled in for all replies
590 * already - no need to do so here also.
595 int mds_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
596 void *data, int flag)
601 if (flag == LDLM_CB_CANCELING) {
602 /* Don't need to do anything here. */
606 /* XXX layering violation! -phil */
607 l_lock(&lock->l_resource->lr_namespace->ns_lock);
610 * get this: if mds_blocking_ast is racing with mds_intent_policy, such
611 * that mds_blocking_ast is called just before l_i_p takes the ns_lock,
612 * then by the time we get the lock, we might not be the correct
613 * blocking function anymore. So check, and return early, if so.
615 if (lock->l_blocking_ast != mds_blocking_ast) {
616 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
620 lock->l_flags |= LDLM_FL_CBPENDING;
621 do_ast = (!lock->l_readers && !lock->l_writers);
622 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
625 struct lustre_handle lockh;
628 LDLM_DEBUG(lock, "already unused, calling ldlm_cli_cancel");
629 ldlm_lock2handle(lock, &lockh);
630 rc = ldlm_cli_cancel(&lockh);
632 CERROR("ldlm_cli_cancel: %d\n", rc);
634 LDLM_DEBUG(lock, "Lock still has references, will be "
640 int mds_get_md(struct obd_device *obd, struct inode *inode, void *md,
649 rc = fsfilt_get_md(obd, inode, md, *size);
654 CERROR("Error %d reading eadata for ino %lu\n",
659 if (S_ISREG(inode->i_mode))
660 rc = mds_convert_lov_ea(obd, inode, md, lmm_size);
661 if (S_ISDIR(inode->i_mode))
662 rc = mds_convert_mea_ea(obd, inode, md, lmm_size);
676 /* Call with lock=1 if you want mds_pack_md to take the i_sem.
677 * Call with lock=0 if the caller has already taken the i_sem. */
678 int mds_pack_md(struct obd_device *obd, struct lustre_msg *msg, int offset,
679 struct mds_body *body, struct inode *inode, int lock)
681 struct mds_obd *mds = &obd->u.mds;
687 lmm = lustre_msg_buf(msg, offset, 0);
689 /* Some problem with getting eadata when I sized the reply
691 CDEBUG(D_INFO, "no space reserved for inode %lu MD\n",
695 lmm_size = msg->buflens[offset];
697 /* I don't really like this, but it is a sanity check on the client
698 * MD request. However, if the client doesn't know how much space
699 * to reserve for the MD, it shouldn't be bad to have too much space.
701 if (lmm_size > mds->mds_max_mdsize) {
702 CWARN("Reading MD for inode %lu of %d bytes > max %d\n",
703 inode->i_ino, lmm_size, mds->mds_max_mdsize);
707 rc = mds_get_md(obd, inode, lmm, &lmm_size, lock);
709 if (S_ISDIR(inode->i_mode))
710 body->valid |= OBD_MD_FLDIREA;
712 body->valid |= OBD_MD_FLEASIZE;
713 body->eadatasize = lmm_size;
720 void mds_squash_root(struct mds_obd *mds, struct mds_req_sec_desc *rsd,
723 if (!mds->mds_squash_uid ||
724 (rsd->rsd_uid && rsd->rsd_fsuid))
727 if (*peernid == mds->mds_nosquash_nid)
730 CDEBUG(D_OTHER, "squash req from 0x%llx, (%d:%d/%x)=>(%d:%d/%x)\n",
731 *peernid, rsd->rsd_fsuid, rsd->rsd_fsgid, rsd->rsd_cap,
732 mds->mds_squash_uid, mds->mds_squash_gid,
733 (rsd->rsd_cap & ~CAP_FS_MASK));
735 rsd->rsd_uid = mds->mds_squash_uid;
736 rsd->rsd_fsuid = mds->mds_squash_uid;
737 rsd->rsd_fsgid = mds->mds_squash_gid;
739 /* XXX should we remove all capabilities? */
740 rsd->rsd_cap &= ~CAP_FS_MASK;
743 static int mds_getattr_internal(struct obd_device *obd, struct dentry *dentry,
744 struct ptlrpc_request *req, struct mds_body *reqbody,
747 struct inode *inode = dentry->d_inode;
748 struct mds_body *body;
752 if (inode == NULL && !(dentry->d_flags & DCACHE_CROSS_REF))
755 body = lustre_msg_buf(req->rq_repmsg, reply_off, sizeof(*body));
756 LASSERT(body != NULL); /* caller prepped reply */
758 if (dentry->d_flags & DCACHE_CROSS_REF) {
759 mds_pack_dentry2body(obd, body, dentry,
760 (reqbody->valid & OBD_MD_FID) ? 1 : 0);
761 CDEBUG(D_OTHER, "cross reference: "DLID4"\n",
766 mds_pack_inode2body(obd, body, inode,
767 (reqbody->valid & OBD_MD_FID) ? 1 : 0);
769 if ((S_ISREG(inode->i_mode) && (reqbody->valid & OBD_MD_FLEASIZE)) ||
770 (S_ISDIR(inode->i_mode) && (reqbody->valid & OBD_MD_FLDIREA))) {
771 rc = mds_pack_md(obd, req->rq_repmsg, reply_off + 1, body,
774 /* if we have LOV EA data, the OST holds size, atime, mtime. */
775 if (!(body->valid & OBD_MD_FLEASIZE) &&
776 !(body->valid & OBD_MD_FLDIREA))
777 body->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
778 OBD_MD_FLATIME | OBD_MD_FLMTIME);
779 } else if (S_ISLNK(inode->i_mode) &&
780 (reqbody->valid & OBD_MD_LINKNAME) != 0) {
781 int len = req->rq_repmsg->buflens[reply_off + 1];
782 char *symname = lustre_msg_buf(req->rq_repmsg, reply_off + 1, 0);
784 LASSERT(symname != NULL); /* caller prepped reply */
786 if (!inode->i_op->readlink) {
789 rc = inode->i_op->readlink(dentry, symname, len);
791 CERROR("readlink failed: %d\n", rc);
792 } else if (rc != len - 1) {
793 CERROR("Unexpected readlink rc %d: expecting %d\n",
797 CDEBUG(D_INODE, "read symlink dest %s\n", symname);
798 body->valid |= OBD_MD_LINKNAME;
799 body->eadatasize = rc + 1;
809 static int mds_getattr_pack_msg_cf(struct ptlrpc_request *req,
810 struct dentry *dentry,
813 int rc = 0, size[1] = {sizeof(struct mds_body)};
816 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) {
817 CERROR("failed MDS_GETATTR_PACK test\n");
818 req->rq_status = -ENOMEM;
822 rc = lustre_pack_reply(req, 1, size, NULL);
824 CERROR("lustre_pack_reply failed: rc %d\n", rc);
825 GOTO(out, req->rq_status = rc);
833 static int mds_getattr_pack_msg(struct ptlrpc_request *req,
837 struct mds_obd *mds = mds_req2mds(req);
838 struct mds_body *body;
839 int rc = 0, size[2] = {sizeof(*body)}, bufcount = 1;
842 body = lustre_msg_buf(req->rq_reqmsg, offset, sizeof (*body));
843 LASSERT(body != NULL); /* checked by caller */
844 LASSERT_REQSWABBED(req, offset); /* swabbed by caller */
846 if ((S_ISREG(inode->i_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
847 (S_ISDIR(inode->i_mode) && (body->valid & OBD_MD_FLDIREA))) {
850 rc = fsfilt_get_md(req->rq_export->exp_obd, inode, NULL, 0);
852 CDEBUG(D_INODE, "got %d bytes MD data for inode %lu\n",
856 CERROR("error getting inode %lu MD: rc = %d\n",
859 } else if (rc > mds->mds_max_mdsize) {
861 CERROR("MD size %d larger than maximum possible %u\n",
862 rc, mds->mds_max_mdsize);
867 } else if (S_ISLNK(inode->i_mode) && (body->valid & OBD_MD_LINKNAME)) {
868 if (inode->i_size + 1 != body->eadatasize)
869 CERROR("symlink size: %Lu, reply space: %d\n",
870 inode->i_size + 1, body->eadatasize);
871 size[bufcount] = min_t(int, inode->i_size+1, body->eadatasize);
873 CDEBUG(D_INODE, "symlink size: %Lu, reply space: %d\n",
874 inode->i_size + 1, body->eadatasize);
877 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) {
878 CERROR("failed MDS_GETATTR_PACK test\n");
879 req->rq_status = -ENOMEM;
880 GOTO(out, rc = -ENOMEM);
883 rc = lustre_pack_reply(req, bufcount, size, NULL);
885 CERROR("out of memory\n");
886 GOTO(out, req->rq_status = rc);
894 int mds_check_mds_num(struct obd_device *obd, struct inode *inode,
895 char *name, int namelen)
897 struct mea *mea = NULL;
898 int mea_size, rc = 0;
901 rc = mds_get_lmv_attr(obd, inode, &mea, &mea_size);
906 * dir is already splitted, check if requested filename should
907 * live at this MDS or at another one.
909 int i = mea_name2idx(mea, name, namelen - 1);
910 if (mea->mea_master != id_group(&mea->mea_ids[i])) {
912 "inapropriate MDS(%d) for %s. should be "
913 "%lu(%d)\n", mea->mea_master, name,
914 (unsigned long)id_group(&mea->mea_ids[i]), i);
920 OBD_FREE(mea, mea_size);
924 static int mds_getattr_lock(struct ptlrpc_request *req, int offset,
925 struct lustre_handle *child_lockh, int child_part)
927 struct obd_device *obd = req->rq_export->exp_obd;
928 struct mds_obd *mds = &obd->u.mds;
929 struct ldlm_reply *rep = NULL;
930 struct lvfs_run_ctxt saved;
931 struct mds_req_sec_desc *rsd;
932 struct mds_body *body;
933 struct dentry *dparent = NULL, *dchild = NULL;
934 struct lvfs_ucred uc;
935 struct lustre_handle parent_lockh[2] = {{0}, {0}};
936 unsigned int namesize;
937 int rc = 0, cleanup_phase = 0, resent_req = 0, update_mode, reply_offset;
941 LASSERT(!strcmp(obd->obd_type->typ_name, LUSTRE_MDS_NAME));
942 MD_COUNTER_INCREMENT(obd, getattr_lock);
944 rsd = lustre_swab_mds_secdesc(req, MDS_REQ_SECDESC_OFF);
946 CERROR("Can't unpack security desc\n");
949 mds_squash_root(mds, rsd, &req->rq_peer.peer_id.nid);
951 /* swab now, before anyone looks inside the request. */
952 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
953 lustre_swab_mds_body);
955 CERROR("Can't swab mds_body\n");
956 GOTO(cleanup, rc = -EFAULT);
959 LASSERT_REQSWAB(req, offset + 1);
960 name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
962 CERROR("Can't unpack name\n");
963 GOTO(cleanup, rc = -EFAULT);
965 namesize = req->rq_reqmsg->buflens[offset + 1];
967 /* namesize less than 2 means we have empty name, probably came from
968 revalidate by cfid, so no point in having name to be set */
972 LASSERT (offset == 1 || offset == 3);
974 rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*rep));
980 rc = mds_init_ucred(&uc, rsd);
982 CERROR("can't init ucred\n");
986 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
987 cleanup_phase = 1; /* kernel context */
988 intent_set_disposition(rep, DISP_LOOKUP_EXECD);
990 LASSERT(namesize > 0);
991 if (child_lockh->cookie != 0) {
992 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT);
997 if (body->valid == OBD_MD_FLID) {
998 struct mds_body *mds_reply;
999 int size = sizeof(*mds_reply);
1003 dparent = mds_id2dentry(obd, &body->id1, NULL);
1004 if (IS_ERR(dparent)) {
1005 rc = PTR_ERR(dparent);
1010 * the user requested ONLY the inode number, so do a raw lookup.
1012 rc = lustre_pack_reply(req, 1, &size, NULL);
1014 CERROR("out of memory\n");
1018 dir = dparent->d_inode;
1019 LASSERT(dir->i_op->lookup_raw != NULL);
1020 rc = dir->i_op->lookup_raw(dir, name, namesize - 1, &inum);
1022 mds_reply = lustre_msg_buf(req->rq_repmsg, 0,
1023 sizeof(*mds_reply));
1025 id_ino(&mds_reply->id1) = inum;
1026 mds_reply->valid = OBD_MD_FLID;
1031 if (resent_req == 0) {
1032 LASSERT(id_fid(&body->id1) != 0);
1034 rc = mds_get_parent_child_locked(obd, mds, &body->id1,
1035 parent_lockh, &dparent,
1037 MDS_INODELOCK_UPDATE,
1040 child_lockh, &dchild,
1041 LCK_PR, child_part);
1046 * let's make sure this name should leave on this mds
1049 rc = mds_check_mds_num(obd, dparent->d_inode, name, namesize);
1053 /* we have no dentry here, drop LOOKUP bit */
1054 /* FIXME: we need MDS_INODELOCK_LOOKUP or not. */
1055 child_part &= ~MDS_INODELOCK_LOOKUP;
1056 CDEBUG(D_OTHER, "%s: retrieve attrs for "DLID4"\n",
1057 obd->obd_name, OLID4(&body->id1));
1059 dchild = mds_id2locked_dentry(obd, &body->id1, NULL,
1060 LCK_PR, parent_lockh,
1063 MDS_INODELOCK_UPDATE);
1064 if (IS_ERR(dchild)) {
1065 CERROR("can't find inode with id "DLID4", err = %d\n",
1066 OLID4(&body->id1), (int)PTR_ERR(dchild));
1067 GOTO(cleanup, rc = PTR_ERR(dchild));
1069 memcpy(child_lockh, parent_lockh, sizeof(parent_lockh[0]));
1071 if (parent_lockh[1].cookie)
1072 ldlm_lock_decref(parent_lockh + 1, update_mode);
1076 struct ldlm_lock *granted_lock;
1078 DEBUG_REQ(D_DLMTRACE, req, "resent, not enqueuing new locks");
1079 granted_lock = ldlm_handle2lock(child_lockh);
1081 LASSERTF(granted_lock != NULL, LPU64"/%lu lockh "LPX64"\n",
1082 id_fid(&body->id1), (unsigned long)id_group(&body->id1),
1083 child_lockh->cookie);
1085 dparent = mds_id2dentry(obd, &body->id1, NULL);
1088 dchild = ll_lookup_one_len(name, dparent, namesize - 1);
1090 LDLM_LOCK_PUT(granted_lock);
1093 cleanup_phase = 2; /* dchild, dparent, locks */
1095 if (!DENTRY_VALID(dchild)) {
1096 intent_set_disposition(rep, DISP_LOOKUP_NEG);
1098 * in the intent case, the policy clears this error: the
1099 * disposition is enough.
1104 intent_set_disposition(rep, DISP_LOOKUP_POS);
1107 if (req->rq_repmsg == NULL) {
1108 if (dchild->d_flags & DCACHE_CROSS_REF)
1109 rc = mds_getattr_pack_msg_cf(req, dchild, offset);
1111 rc = mds_getattr_pack_msg(req, dchild->d_inode, offset);
1113 CERROR ("mds_getattr_pack_msg: %d\n", rc);
1118 rc = mds_getattr_internal(obd, dchild, req, body, reply_offset);
1119 GOTO(cleanup, rc); /* returns the lock to the client */
1122 switch (cleanup_phase) {
1124 if (resent_req == 0) {
1125 if (rc && DENTRY_VALID(dchild))
1126 ldlm_lock_decref(child_lockh, LCK_PR);
1128 ldlm_lock_decref(parent_lockh, LCK_PR);
1130 if (parent_lockh[1].cookie != 0)
1131 ldlm_lock_decref(parent_lockh + 1, update_mode);
1138 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1139 mds_exit_ucred(&uc);
1144 static int mds_getattr(struct ptlrpc_request *req, int offset)
1146 struct obd_device *obd = req->rq_export->exp_obd;
1147 struct lvfs_run_ctxt saved;
1149 struct mds_req_sec_desc *rsd;
1150 struct mds_body *body;
1151 struct lvfs_ucred uc;
1155 rsd = lustre_swab_mds_secdesc(req, MDS_REQ_SECDESC_OFF);
1157 CERROR("Can't unpack security desc\n");
1161 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1162 lustre_swab_mds_body);
1164 CERROR ("Can't unpack body\n");
1168 MD_COUNTER_INCREMENT(obd, getattr);
1170 rc = mds_init_ucred(&uc, rsd);
1172 CERROR("can't init ucred\n");
1176 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1177 de = mds_id2dentry(obd, &body->id1, NULL);
1179 rc = req->rq_status = PTR_ERR(de);
1183 rc = mds_getattr_pack_msg(req, de->d_inode, offset);
1185 CERROR("mds_getattr_pack_msg: %d\n", rc);
1189 req->rq_status = mds_getattr_internal(obd, de, req, body, 0);
1195 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1196 mds_exit_ucred(&uc);
1200 static int mds_obd_statfs(struct obd_device *obd, struct obd_statfs *osfs,
1201 unsigned long max_age)
1206 spin_lock(&obd->obd_osfs_lock);
1207 rc = fsfilt_statfs(obd, obd->u.mds.mds_sb, max_age);
1209 memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
1210 spin_unlock(&obd->obd_osfs_lock);
1215 static int mds_statfs(struct ptlrpc_request *req)
1217 struct obd_device *obd = req->rq_export->exp_obd;
1218 int rc, size = sizeof(struct obd_statfs);
1221 rc = lustre_pack_reply(req, 1, &size, NULL);
1222 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
1223 CERROR("mds: statfs lustre_pack_reply failed: rc = %d\n", rc);
1227 OBD_COUNTER_INCREMENT(obd, statfs);
1229 /* We call this so that we can cache a bit - 1 jiffie worth */
1230 rc = mds_obd_statfs(obd, lustre_msg_buf(req->rq_repmsg, 0, size),
1233 CERROR("mds_obd_statfs failed: rc %d\n", rc);
1239 req->rq_status = rc;
1243 static int mds_sync(struct ptlrpc_request *req, int offset)
1245 struct obd_device *obd = req->rq_export->exp_obd;
1246 struct mds_obd *mds = &obd->u.mds;
1247 struct mds_body *body;
1248 int rc, size = sizeof(*body);
1251 body = lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*body));
1253 GOTO(out, rc = -EPROTO);
1255 rc = lustre_pack_reply(req, 1, &size, NULL);
1256 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK)) {
1257 CERROR("fsync lustre_pack_reply failed: rc = %d\n", rc);
1261 if (id_ino(&body->id1) == 0) {
1262 /* an id of zero is taken to mean "sync whole filesystem" */
1263 rc = fsfilt_sync(obd, mds->mds_sb);
1267 /* just any file to grab fsync method - "file" arg unused */
1268 struct file *file = mds->mds_rcvd_filp;
1271 de = mds_id2dentry(obd, &body->id1, NULL);
1273 GOTO(out, rc = PTR_ERR(de));
1275 rc = file->f_op->fsync(NULL, de, 1);
1279 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*body));
1280 mds_pack_inode2body(obd, body, de->d_inode, 0);
1286 req->rq_status = rc;
1290 /* mds_readpage does not take a DLM lock on the inode, because the client must
1291 * already have a PR lock.
1293 * If we were to take another one here, a deadlock will result, if another
1294 * thread is already waiting for a PW lock. */
1295 static int mds_readpage(struct ptlrpc_request *req, int offset)
1297 struct obd_device *obd = req->rq_export->exp_obd;
1298 struct mds_obd *mds = &obd->u.mds;
1299 struct vfsmount *mnt;
1302 struct mds_req_sec_desc *rsd;
1303 struct mds_body *body, *repbody;
1304 struct lvfs_run_ctxt saved;
1305 int rc, size = sizeof(*repbody);
1306 struct lvfs_ucred uc;
1309 rc = lustre_pack_reply(req, 1, &size, NULL);
1310 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK)) {
1311 CERROR("mds: out of memory\n");
1312 GOTO(out, rc = -ENOMEM);
1315 rsd = lustre_swab_mds_secdesc(req, MDS_REQ_SECDESC_OFF);
1317 CERROR("Can't unpack security desc\n");
1318 GOTO (out, rc = -EFAULT);
1320 mds_squash_root(mds, rsd, &req->rq_peer.peer_id.nid);
1322 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1323 lustre_swab_mds_body);
1325 CERROR("Can't unpack body\n");
1326 GOTO (out, rc = -EFAULT);
1329 rc = mds_init_ucred(&uc, rsd);
1331 CERROR("can't init ucred\n");
1335 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1336 de = mds_id2dentry(obd, &body->id1, &mnt);
1338 GOTO(out_pop, rc = PTR_ERR(de));
1340 CDEBUG(D_INODE, "ino %lu\n", de->d_inode->i_ino);
1342 file = dentry_open(de, mnt, O_RDONLY | O_LARGEFILE);
1343 /* note: in case of an error, dentry_open puts dentry */
1345 GOTO(out_pop, rc = PTR_ERR(file));
1347 /* body->size is actually the offset -eeb */
1348 if ((body->size & (de->d_inode->i_blksize - 1)) != 0) {
1349 CERROR("offset "LPU64" not on a block boundary of %lu\n",
1350 body->size, de->d_inode->i_blksize);
1351 GOTO(out_file, rc = -EFAULT);
1354 /* body->nlink is actually the #bytes to read -eeb */
1355 if (body->nlink & (de->d_inode->i_blksize - 1)) {
1356 CERROR("size %u is not multiple of blocksize %lu\n",
1357 body->nlink, de->d_inode->i_blksize);
1358 GOTO(out_file, rc = -EFAULT);
1361 repbody = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*repbody));
1362 repbody->size = file->f_dentry->d_inode->i_size;
1363 repbody->valid = OBD_MD_FLSIZE;
1365 /* to make this asynchronous make sure that the handling function
1366 doesn't send a reply when this function completes. Instead a
1367 callback function would send the reply */
1368 /* body->size is actually the offset -eeb */
1369 rc = mds_sendpage(req, file, body->size, body->nlink);
1373 filp_close(file, 0);
1375 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1376 mds_exit_ucred(&uc);
1378 req->rq_status = rc;
1382 /* update master MDS ID, which is stored in local inode EA. */
1383 int mds_update_mid(struct obd_device *obd, struct lustre_id *id,
1384 void *data, int data_len)
1386 struct mds_obd *mds = &obd->u.mds;
1387 struct dentry *dentry;
1395 dentry = mds_id2dentry(obd, id, NULL);
1397 GOTO(out, rc = PTR_ERR(dentry));
1399 if (!dentry->d_inode) {
1400 CERROR("Can't find object "DLID4".\n",
1402 GOTO(out_dentry, rc = -EINVAL);
1405 handle = fsfilt_start(obd, dentry->d_inode,
1406 FSFILT_OP_SETATTR, NULL);
1408 GOTO(out_dentry, rc = PTR_ERR(handle));
1410 rc = mds_update_inode_mid(obd, dentry->d_inode, handle,
1411 (struct lustre_id *)data);
1413 CERROR("Can't update inode "DLID4" master id, "
1414 "error = %d.\n", OLID4(id), rc);
1415 GOTO(out_commit, rc);
1420 fsfilt_commit(obd, mds->mds_sb, dentry->d_inode,
1427 EXPORT_SYMBOL(mds_update_mid);
1429 /* read master MDS ID, which is stored in local inode EA. */
1430 int mds_read_mid(struct obd_device *obd, struct lustre_id *id,
1431 void *data, int data_len)
1433 struct dentry *dentry;
1440 dentry = mds_id2dentry(obd, id, NULL);
1442 GOTO(out, rc = PTR_ERR(dentry));
1444 if (!dentry->d_inode) {
1445 CERROR("Can't find object "DLID4".\n",
1447 GOTO(out_dentry, rc = -EINVAL);
1450 down(&dentry->d_inode->i_sem);
1451 rc = mds_read_inode_mid(obd, dentry->d_inode,
1452 (struct lustre_id *)data);
1453 up(&dentry->d_inode->i_sem);
1455 CERROR("Can't read inode "DLID4" master id, "
1456 "error = %d.\n", OLID4(id), rc);
1457 GOTO(out_dentry, rc);
1466 EXPORT_SYMBOL(mds_read_mid);
1468 int mds_reint(struct ptlrpc_request *req, int offset,
1469 struct lustre_handle *lockh)
1471 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
1472 struct mds_update_record *rec;
1473 struct mds_req_sec_desc *rsd;
1477 OBD_ALLOC(rec, sizeof(*rec));
1481 rsd = lustre_swab_mds_secdesc(req, MDS_REQ_SECDESC_OFF);
1483 CERROR("Can't unpack security desc\n");
1484 GOTO(out, rc = -EFAULT);
1486 mds_squash_root(mds, rsd, &req->rq_peer.peer_id.nid);
1488 rc = mds_update_unpack(req, offset, rec);
1489 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK)) {
1490 CERROR("invalid record\n");
1491 GOTO(out, req->rq_status = -EINVAL);
1494 rc = mds_init_ucred(&rec->ur_uc, rsd);
1496 CERROR("can't init ucred\n");
1500 /* rc will be used to interrupt a for loop over multiple records */
1501 rc = mds_reint_rec(rec, offset, req, lockh);
1502 mds_exit_ucred(&rec->ur_uc);
1505 OBD_FREE(rec, sizeof(*rec));
1509 static int mds_filter_recovery_request(struct ptlrpc_request *req,
1510 struct obd_device *obd, int *process)
1512 switch (req->rq_reqmsg->opc) {
1513 case MDS_CONNECT: /* This will never get here, but for completeness. */
1514 case OST_CONNECT: /* This will never get here, but for completeness. */
1515 case MDS_DISCONNECT:
1516 case OST_DISCONNECT:
1521 case MDS_SYNC: /* used in unmounting */
1526 *process = target_queue_recovery_request(req, obd);
1530 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
1532 /* XXX what should we set rq_status to here? */
1533 req->rq_status = -EAGAIN;
1534 RETURN(ptlrpc_error(req));
1538 static char *reint_names[] = {
1539 [REINT_SETATTR] "setattr",
1540 [REINT_CREATE] "create",
1541 [REINT_LINK] "link",
1542 [REINT_UNLINK] "unlink",
1543 [REINT_RENAME] "rename",
1544 [REINT_OPEN] "open",
1547 #define FILTER_VALID_FLAGS (OBD_MD_FLTYPE | OBD_MD_FLMODE | OBD_MD_FLGENER | \
1548 OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ| \
1549 OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME| \
1552 static void reconstruct_create(struct ptlrpc_request *req)
1554 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1555 struct mds_client_data *mcd = med->med_mcd;
1556 struct dentry *dentry;
1557 struct ost_body *body;
1558 struct lustre_id id;
1562 /* copy rc, transno and disp; steal locks */
1563 mds_req_from_mcd(req, mcd);
1564 if (req->rq_status) {
1572 id_ino(&id) = mcd->mcd_last_data;
1573 LASSERT(id_ino(&id) != 0);
1575 dentry = mds_id2dentry(req2obd(req), &id, NULL);
1576 if (IS_ERR(dentry)) {
1577 CERROR("can't find inode "LPU64"\n", id_ino(&id));
1578 req->rq_status = PTR_ERR(dentry);
1583 CWARN("reconstruct reply for x"LPU64" (remote ino) "LPU64" -> %lu/%u\n",
1584 req->rq_xid, id_ino(&id), dentry->d_inode->i_ino,
1585 dentry->d_inode->i_generation);
1587 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*body));
1588 obdo_from_inode(&body->oa, dentry->d_inode, FILTER_VALID_FLAGS);
1589 body->oa.o_id = dentry->d_inode->i_ino;
1590 body->oa.o_generation = dentry->d_inode->i_generation;
1591 body->oa.o_valid |= OBD_MD_FLID | OBD_MD_FLGENER;
1593 down(&dentry->d_inode->i_sem);
1594 rc = mds_read_inode_sid(req2obd(req), dentry->d_inode, &id);
1595 up(&dentry->d_inode->i_sem);
1597 CERROR("Can't read inode self id, inode %lu, "
1598 "rc %d\n", dentry->d_inode->i_ino, rc);
1602 body->oa.o_fid = id_fid(&id);
1603 body->oa.o_mds = id_group(&id);
1609 static int mdt_obj_create(struct ptlrpc_request *req)
1611 struct obd_device *obd = req->rq_export->exp_obd;
1612 struct mds_obd *mds = &obd->u.mds;
1613 struct ost_body *body, *repbody;
1614 char idname[LL_ID_NAMELEN];
1615 int size = sizeof(*repbody);
1616 struct inode *parent_inode;
1617 struct lvfs_run_ctxt saved;
1618 int rc, cleanup_phase = 0;
1619 struct dentry *new = NULL;
1620 struct dentry_params dp;
1621 int mealen, flags = 0;
1622 struct lvfs_ucred uc;
1623 struct lustre_id id;
1625 void *handle = NULL;
1626 unsigned long cr_inum = 0;
1629 DEBUG_REQ(D_HA, req, "create remote object");
1630 parent_inode = mds->mds_unnamed_dir->d_inode;
1632 body = lustre_swab_reqbuf(req, 0, sizeof(*body),
1633 lustre_swab_ost_body);
1637 rc = lustre_pack_reply(req, 1, &size, NULL);
1641 MDS_CHECK_RESENT(req, reconstruct_create(req));
1644 * this only serve to inter-mds request, don't need check group database
1647 uc.luc_ghash = NULL;
1648 uc.luc_ginfo = NULL;
1649 uc.luc_uid = body->oa.o_uid;
1650 uc.luc_fsuid = body->oa.o_uid;
1651 uc.luc_fsgid = body->oa.o_gid;
1653 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1654 repbody = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*repbody));
1656 /* in REPLAY case inum should be given (client or other MDS fills it) */
1657 if (body->oa.o_id && ((body->oa.o_flags & OBD_FL_RECREATE_OBJS) ||
1658 (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY))) {
1660 * this is re-create request from MDS holding directory name.
1661 * we have to lookup given ino/gen first. if it exists (good
1662 * case) then there is nothing to do. if it does not then we
1663 * have to recreate it.
1665 id_ino(&id) = body->oa.o_id;
1666 id_gen(&id) = body->oa.o_generation;
1668 new = mds_id2dentry(obd, &id, NULL);
1669 if (!IS_ERR(new) && new->d_inode) {
1670 struct lustre_id sid;
1672 CWARN("mkdir() repairing is on its way: %lu/%lu\n",
1673 (unsigned long)id_ino(&id), (unsigned long)id_gen(&id));
1675 obdo_from_inode(&repbody->oa, new->d_inode,
1676 FILTER_VALID_FLAGS);
1678 repbody->oa.o_id = new->d_inode->i_ino;
1679 repbody->oa.o_generation = new->d_inode->i_generation;
1680 repbody->oa.o_valid |= OBD_MD_FLID | OBD_MD_FLGENER;
1683 down(&new->d_inode->i_sem);
1684 rc = mds_read_inode_sid(obd, new->d_inode, &sid);
1685 up(&new->d_inode->i_sem);
1687 CERROR("Can't read inode self id "
1688 "inode %lu, rc %d.\n",
1689 new->d_inode->i_ino, rc);
1693 repbody->oa.o_fid = id_fid(&sid);
1694 repbody->oa.o_mds = id_group(&sid);
1695 LASSERT(id_fid(&sid) != 0);
1698 * here we could use fid passed in body->oa.o_fid and
1699 * thus avoid mds_read_inode_sid().
1701 cr_inum = new->d_inode->i_ino;
1702 GOTO(cleanup, rc = 0);
1706 down(&parent_inode->i_sem);
1707 handle = fsfilt_start(obd, parent_inode, FSFILT_OP_MKDIR, NULL);
1708 if (IS_ERR(handle)) {
1709 up(&parent_inode->i_sem);
1710 CERROR("fsfilt_start() failed, rc = %d\n",
1711 (int)PTR_ERR(handle));
1712 GOTO(cleanup, rc = PTR_ERR(handle));
1714 cleanup_phase = 1; /* transaction */
1717 rc = sprintf(idname, "%u.%u", ll_insecure_random_int(), current->pid);
1718 new = lookup_one_len(idname, mds->mds_unnamed_dir, rc);
1720 CERROR("%s: can't lookup new inode (%s) for mkdir: %d\n",
1721 obd->obd_name, idname, (int) PTR_ERR(new));
1722 fsfilt_commit(obd, mds->mds_sb, new->d_inode, handle, 0);
1723 up(&parent_inode->i_sem);
1724 RETURN(PTR_ERR(new));
1725 } else if (new->d_inode) {
1726 CERROR("%s: name exists. repeat\n", obd->obd_name);
1730 new->d_fsdata = (void *)&dp;
1734 if ((lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) ||
1735 (body->oa.o_flags & OBD_FL_RECREATE_OBJS)) {
1736 LASSERT(body->oa.o_id != 0);
1737 dp.p_inum = body->oa.o_id;
1738 DEBUG_REQ(D_HA, req, "replay create obj %lu/%lu",
1739 (unsigned long)body->oa.o_id,
1740 (unsigned long)body->oa.o_generation);
1743 rc = vfs_mkdir(parent_inode, new, body->oa.o_mode);
1745 if ((body->oa.o_flags & OBD_FL_RECREATE_OBJS) ||
1746 lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
1747 new->d_inode->i_generation = body->oa.o_generation;
1748 mark_inode_dirty(new->d_inode);
1751 * avoiding asserts in cache flush case, as
1752 * @body->oa.o_id should be zero.
1754 if (body->oa.o_id) {
1755 LASSERTF(body->oa.o_id == new->d_inode->i_ino,
1756 "BUG 3550: failed to recreate obj "
1757 LPU64" -> %lu\n", body->oa.o_id,
1758 new->d_inode->i_ino);
1760 LASSERTF(body->oa.o_generation ==
1761 new->d_inode->i_generation,
1762 "BUG 3550: failed to recreate obj/gen "
1763 LPU64"/%u -> %lu/%u\n", body->oa.o_id,
1764 body->oa.o_generation,
1765 new->d_inode->i_ino,
1766 new->d_inode->i_generation);
1770 obdo_from_inode(&repbody->oa, new->d_inode, FILTER_VALID_FLAGS);
1771 repbody->oa.o_id = new->d_inode->i_ino;
1772 repbody->oa.o_generation = new->d_inode->i_generation;
1773 repbody->oa.o_valid |= OBD_MD_FLID | OBD_MD_FLGENER | OBD_MD_FID;
1775 if ((body->oa.o_flags & OBD_FL_RECREATE_OBJS) ||
1776 lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
1777 id_group(&id) = mds->mds_num;
1779 LASSERT(body->oa.o_fid != 0);
1780 id_fid(&id) = body->oa.o_fid;
1782 LASSERT(body->oa.o_id != 0);
1783 id_ino(&id) = repbody->oa.o_id;
1784 id_gen(&id) = repbody->oa.o_generation;
1786 down(&new->d_inode->i_sem);
1787 rc = mds_update_inode_sid(obd, new->d_inode, handle, &id);
1788 up(&new->d_inode->i_sem);
1791 * make sure, that fid is up-to-date.
1793 mds_set_last_fid(obd, id_fid(&id));
1796 * allocate new sid, as object is created from scratch
1797 * and this is not replay.
1799 down(&new->d_inode->i_sem);
1800 rc = mds_alloc_inode_sid(obd, new->d_inode, handle, &id);
1801 up(&new->d_inode->i_sem);
1804 CERROR("Can't update lustre ID for inode %lu, "
1805 "error = %d\n", new->d_inode->i_ino, rc);
1809 /* initializing o_fid after it is allocated. */
1810 repbody->oa.o_fid = id_fid(&id);
1811 repbody->oa.o_mds = id_group(&id);
1813 rc = fsfilt_del_dir_entry(obd, new);
1814 up(&parent_inode->i_sem);
1816 CERROR("can't remove name for object: %d\n", rc);
1820 cleanup_phase = 2; /* created directory object */
1822 CDEBUG(D_OTHER, "created dirobj: %lu/%lu mode %o\n",
1823 (unsigned long)new->d_inode->i_ino,
1824 (unsigned long)new->d_inode->i_generation,
1825 (unsigned)new->d_inode->i_mode);
1826 cr_inum = new->d_inode->i_ino;
1828 up(&parent_inode->i_sem);
1829 CERROR("%s: can't create dirobj: %d\n", obd->obd_name, rc);
1833 if (body->oa.o_valid & OBD_MD_FLID) {
1834 /* this is new object for splitted dir. We have to prevent
1835 * recursive splitting on it -bzzz */
1836 mealen = obd_size_diskmd(mds->mds_lmv_exp, NULL);
1838 OBD_ALLOC(mea, mealen);
1840 GOTO(cleanup, rc = -ENOMEM);
1842 mea->mea_magic = MEA_MAGIC_ALL_CHARS;
1843 mea->mea_master = 0;
1846 down(&new->d_inode->i_sem);
1847 rc = fsfilt_set_md(obd, new->d_inode, handle, mea, mealen);
1848 up(&new->d_inode->i_sem);
1850 CERROR("fsfilt_set_md() failed, rc = %d\n", rc);
1852 OBD_FREE(mea, mealen);
1853 CDEBUG(D_OTHER, "%s: mark non-splittable %lu/%u - %d\n",
1854 obd->obd_name, new->d_inode->i_ino,
1855 new->d_inode->i_generation, flags);
1856 } else if (body->oa.o_easize) {
1857 /* we pass LCK_EX to split routine to signal that we have
1858 * exclusive access to the directory. simple because nobody
1859 * knows it already exists -bzzz */
1860 rc = mds_try_to_split_dir(obd, new, NULL,
1861 body->oa.o_easize, LCK_EX);
1863 CERROR("Can't split directory %lu, error = %d.\n",
1864 new->d_inode->i_ino, rc);
1872 switch (cleanup_phase) {
1873 case 2: /* object has been created, but we'll may want to replay it later */
1875 ptlrpc_require_repack(req);
1876 case 1: /* transaction */
1877 rc = mds_finish_transno(mds, parent_inode, handle,
1882 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1883 mds_put_group_entry(mds, uc.luc_ghash);
1887 static int mdt_get_info(struct ptlrpc_request *req)
1889 struct obd_export *exp = req->rq_export;
1894 key = lustre_msg_buf(req->rq_reqmsg, 0, 1);
1896 DEBUG_REQ(D_HA, req, "no get_info key");
1899 keylen = req->rq_reqmsg->buflens[0];
1901 if ((keylen < strlen("mdsize") || strcmp(key, "mdsize") != 0) &&
1902 (keylen < strlen("mdsnum") || strcmp(key, "mdsnum") != 0) &&
1903 (keylen < strlen("rootid") || strcmp(key, "rootid") != 0))
1906 if (keylen >= strlen("rootid") && !strcmp(key, "rootid")) {
1907 struct lustre_id *reply;
1908 int size = sizeof(*reply);
1910 rc = lustre_pack_reply(req, 1, &size, NULL);
1914 reply = lustre_msg_buf(req->rq_repmsg, 0, size);
1915 rc = obd_get_info(exp, keylen, key, &size, reply);
1918 int size = sizeof(*reply);
1920 rc = lustre_pack_reply(req, 1, &size, NULL);
1924 reply = lustre_msg_buf(req->rq_repmsg, 0, size);
1925 rc = obd_get_info(exp, keylen, key, &size, reply);
1928 req->rq_repmsg->status = 0;
1932 static int mds_set_info(struct obd_export *exp, __u32 keylen,
1933 void *key, __u32 vallen, void *val)
1935 struct obd_device *obd;
1936 struct mds_obd *mds;
1940 obd = class_exp2obd(exp);
1942 CDEBUG(D_IOCTL, "invalid client cookie "LPX64"\n",
1943 exp->exp_handle.h_cookie);
1948 if (keylen >= strlen("mds_type") &&
1949 memcmp(key, "mds_type", keylen) == 0) {
1953 CDEBUG(D_IOCTL, "set mds type to %x\n", *(int*)val);
1955 mds->mds_obd_type = *(int*)val;
1956 group = FILTER_GROUP_FIRST_MDS + mds->mds_obd_type;
1957 valsize = sizeof(group);
1959 /* mds number has been changed, so the corresponding obdfilter
1960 * exp need to be changed too. */
1961 rc = obd_set_info(mds->mds_lov_exp, strlen("mds_conn"),
1962 "mds_conn", valsize, &group);
1965 CDEBUG(D_IOCTL, "invalid key\n");
1969 static int mdt_set_info(struct ptlrpc_request *req)
1972 struct obd_export *exp = req->rq_export;
1973 int keylen, rc = 0, vallen;
1976 key = lustre_msg_buf(req->rq_reqmsg, 0, 1);
1978 DEBUG_REQ(D_HA, req, "no set_info key");
1981 keylen = req->rq_reqmsg->buflens[0];
1983 if (keylen == strlen("mds_type") &&
1984 memcmp(key, "mds_type", keylen) == 0) {
1985 rc = lustre_pack_reply(req, 0, NULL, NULL);
1989 val = lustre_msg_buf(req->rq_reqmsg, 1, 0);
1990 vallen = req->rq_reqmsg->buflens[1];
1992 rc = obd_set_info(exp, keylen, key, vallen, val);
1993 req->rq_repmsg->status = 0;
1996 CDEBUG(D_IOCTL, "invalid key\n");
2000 static int mds_msg_check_version(struct lustre_msg *msg)
2006 case MDS_DISCONNECT:
2008 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
2010 CERROR("bad opc %u version %08x, expecting %08x\n",
2011 msg->opc, msg->version, LUSTRE_OBD_VERSION);
2016 case MDS_GETATTR_LOCK:
2020 case MDS_DONE_WRITING:
2023 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
2025 CERROR("bad opc %u version %08x, expecting %08x\n",
2026 msg->opc, msg->version, LUSTRE_MDS_VERSION);
2030 case LDLM_BL_CALLBACK:
2031 case LDLM_CP_CALLBACK:
2032 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
2034 CERROR("bad opc %u version %08x, expecting %08x\n",
2035 msg->opc, msg->version, LUSTRE_DLM_VERSION);
2037 case OBD_LOG_CANCEL:
2038 case LLOG_ORIGIN_HANDLE_OPEN:
2039 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
2040 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
2041 case LLOG_ORIGIN_HANDLE_READ_HEADER:
2042 case LLOG_ORIGIN_HANDLE_CLOSE:
2044 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
2046 CERROR("bad opc %u version %08x, expecting %08x\n",
2047 msg->opc, msg->version, LUSTRE_LOG_VERSION);
2053 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
2055 CERROR("bad opc %u version %08x, expecting %08x\n",
2056 msg->opc, msg->version, LUSTRE_OBD_VERSION);
2059 CERROR("MDS unknown opcode %d\n", msg->opc);
2067 static char str[PTL_NALFMT_SIZE];
2069 int mds_handle(struct ptlrpc_request *req)
2071 int should_process, fail = OBD_FAIL_MDS_ALL_REPLY_NET;
2073 struct mds_obd *mds = NULL; /* quell gcc overwarning */
2074 struct obd_device *obd = NULL;
2077 OBD_FAIL_RETURN(OBD_FAIL_MDS_ALL_REQUEST_NET | OBD_FAIL_ONCE, 0);
2079 rc = mds_msg_check_version(req->rq_reqmsg);
2081 CERROR("MDS drop mal-formed request\n");
2085 LASSERT(current->journal_info == NULL);
2086 /* XXX identical to OST */
2087 if (req->rq_reqmsg->opc != MDS_CONNECT) {
2088 struct mds_export_data *med;
2091 if (req->rq_export == NULL) {
2092 CERROR("operation %d on unconnected MDS from NID %s\n",
2093 req->rq_reqmsg->opc,
2094 ptlrpc_peernid2str(&req->rq_peer, str));
2095 req->rq_status = -ENOTCONN;
2096 GOTO(out, rc = -ENOTCONN);
2099 med = &req->rq_export->exp_mds_data;
2100 obd = req->rq_export->exp_obd;
2103 /* sanity check: if the xid matches, the request must
2104 * be marked as a resent or replayed */
2105 if (req->rq_xid == med->med_mcd->mcd_last_xid) {
2106 LASSERTF(lustre_msg_get_flags(req->rq_reqmsg) &
2107 (MSG_RESENT | MSG_REPLAY),
2108 "rq_xid "LPU64" matches last_xid, "
2109 "expected RESENT flag\n",
2112 /* else: note the opposite is not always true; a
2113 * RESENT req after a failover will usually not match
2114 * the last_xid, since it was likely never
2115 * committed. A REPLAYed request will almost never
2116 * match the last xid, however it could for a
2117 * committed, but still retained, open. */
2119 spin_lock_bh(&obd->obd_processing_task_lock);
2120 recovering = obd->obd_recovering;
2121 spin_unlock_bh(&obd->obd_processing_task_lock);
2123 rc = mds_filter_recovery_request(req, obd,
2125 if (rc || should_process == 0) {
2127 } else if (should_process < 0) {
2128 req->rq_status = should_process;
2129 rc = ptlrpc_error(req);
2135 switch (req->rq_reqmsg->opc) {
2137 DEBUG_REQ(D_INODE, req, "connect");
2138 OBD_FAIL_RETURN(OBD_FAIL_MDS_CONNECT_NET, 0);
2139 rc = target_handle_connect(req);
2141 /* Now that we have an export, set mds. */
2142 mds = mds_req2mds(req);
2145 case MDS_DISCONNECT:
2146 DEBUG_REQ(D_INODE, req, "disconnect");
2147 OBD_FAIL_RETURN(OBD_FAIL_MDS_DISCONNECT_NET, 0);
2148 rc = target_handle_disconnect(req);
2149 req->rq_status = rc; /* superfluous? */
2153 DEBUG_REQ(D_INODE, req, "getstatus");
2154 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_NET, 0);
2155 rc = mds_getstatus(req);
2159 DEBUG_REQ(D_INODE, req, "getattr");
2160 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NET, 0);
2161 rc = mds_getattr(req, MDS_REQ_REC_OFF);
2164 case MDS_GETATTR_LOCK: {
2165 struct lustre_handle lockh;
2166 DEBUG_REQ(D_INODE, req, "getattr_lock");
2167 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_LOCK_NET, 0);
2169 /* If this request gets a reconstructed reply, we won't be
2170 * acquiring any new locks in mds_getattr_lock, so we don't
2174 rc = mds_getattr_lock(req, MDS_REQ_REC_OFF, &lockh,
2175 MDS_INODELOCK_UPDATE);
2176 /* this non-intent call (from an ioctl) is special */
2177 req->rq_status = rc;
2178 if (rc == 0 && lockh.cookie)
2179 ldlm_lock_decref(&lockh, LCK_PR);
2183 DEBUG_REQ(D_INODE, req, "statfs");
2184 OBD_FAIL_RETURN(OBD_FAIL_MDS_STATFS_NET, 0);
2185 rc = mds_statfs(req);
2189 DEBUG_REQ(D_INODE, req, "readpage");
2190 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_NET, 0);
2191 rc = mds_readpage(req, MDS_REQ_REC_OFF);
2193 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_MDS_SENDPAGE)) {
2194 if (req->rq_reply_state) {
2195 lustre_free_reply_state (req->rq_reply_state);
2196 req->rq_reply_state = NULL;
2203 __u32 *opcp = lustre_msg_buf(req->rq_reqmsg, MDS_REQ_REC_OFF,
2206 int size[3] = {sizeof(struct mds_body), mds->mds_max_mdsize,
2207 mds->mds_max_cookiesize};
2210 /* NB only peek inside req now; mds_reint() will swab it */
2212 CERROR ("Can't inspect opcode\n");
2217 if (lustre_msg_swabbed (req->rq_reqmsg))
2220 DEBUG_REQ(D_INODE, req, "reint %d (%s)", opc,
2221 (opc < sizeof(reint_names) / sizeof(reint_names[0]) ||
2222 reint_names[opc] == NULL) ? reint_names[opc] :
2225 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET, 0);
2227 if (opc == REINT_UNLINK || opc == REINT_RENAME)
2229 else if (opc == REINT_OPEN)
2234 rc = lustre_pack_reply(req, bufcount, size, NULL);
2238 rc = mds_reint(req, MDS_REQ_REC_OFF, NULL);
2239 fail = OBD_FAIL_MDS_REINT_NET_REP;
2244 DEBUG_REQ(D_INODE, req, "close");
2245 OBD_FAIL_RETURN(OBD_FAIL_MDS_CLOSE_NET, 0);
2246 rc = mds_close(req, MDS_REQ_REC_OFF);
2249 case MDS_DONE_WRITING:
2250 DEBUG_REQ(D_INODE, req, "done_writing");
2251 OBD_FAIL_RETURN(OBD_FAIL_MDS_DONE_WRITING_NET, 0);
2252 rc = mds_done_writing(req, MDS_REQ_REC_OFF);
2256 DEBUG_REQ(D_INODE, req, "pin");
2257 OBD_FAIL_RETURN(OBD_FAIL_MDS_PIN_NET, 0);
2258 rc = mds_pin(req, MDS_REQ_REC_OFF);
2262 DEBUG_REQ(D_INODE, req, "sync");
2263 OBD_FAIL_RETURN(OBD_FAIL_MDS_SYNC_NET, 0);
2264 rc = mds_sync(req, MDS_REQ_REC_OFF);
2268 DEBUG_REQ(D_INODE, req, "ping");
2269 rc = target_handle_ping(req);
2272 case OBD_LOG_CANCEL:
2273 CDEBUG(D_INODE, "log cancel\n");
2274 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
2275 rc = -ENOTSUPP; /* la la la */
2279 DEBUG_REQ(D_INODE, req, "enqueue");
2280 OBD_FAIL_RETURN(OBD_FAIL_LDLM_ENQUEUE, 0);
2281 rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast,
2282 ldlm_server_blocking_ast, NULL);
2283 fail = OBD_FAIL_LDLM_REPLY;
2286 DEBUG_REQ(D_INODE, req, "convert");
2287 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CONVERT, 0);
2288 rc = ldlm_handle_convert(req);
2290 case LDLM_BL_CALLBACK:
2291 case LDLM_CP_CALLBACK:
2292 DEBUG_REQ(D_INODE, req, "callback");
2293 CERROR("callbacks should not happen on MDS\n");
2295 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
2297 case LLOG_ORIGIN_HANDLE_OPEN:
2298 DEBUG_REQ(D_INODE, req, "llog_init");
2299 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
2300 rc = llog_origin_handle_open(req);
2302 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
2303 DEBUG_REQ(D_INODE, req, "llog next block");
2304 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
2305 rc = llog_origin_handle_next_block(req);
2307 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
2308 DEBUG_REQ(D_INODE, req, "llog prev block");
2309 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
2310 rc = llog_origin_handle_prev_block(req);
2312 case LLOG_ORIGIN_HANDLE_READ_HEADER:
2313 DEBUG_REQ(D_INODE, req, "llog read header");
2314 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
2315 rc = llog_origin_handle_read_header(req);
2317 case LLOG_ORIGIN_HANDLE_CLOSE:
2318 DEBUG_REQ(D_INODE, req, "llog close");
2319 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
2320 rc = llog_origin_handle_close(req);
2323 DEBUG_REQ(D_INODE, req, "ost_create");
2324 rc = mdt_obj_create(req);
2327 DEBUG_REQ(D_INODE, req, "get_info");
2328 rc = mdt_get_info(req);
2331 DEBUG_REQ(D_INODE, req, "set_info");
2332 rc = mdt_set_info(req);
2335 CDEBUG(D_INODE, "write\n");
2336 OBD_FAIL_RETURN(OBD_FAIL_OST_BRW_NET, 0);
2337 rc = ost_brw_write(req, NULL);
2338 LASSERT(current->journal_info == NULL);
2339 /* mdt_brw sends its own replies */
2343 DEBUG_REQ(D_INODE, req, "llog catinfo");
2344 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
2345 rc = llog_catinfo(req);
2348 req->rq_status = -ENOTSUPP;
2349 rc = ptlrpc_error(req);
2353 LASSERT(current->journal_info == NULL);
2357 /* If we're DISCONNECTing, the mds_export_data is already freed */
2358 if (!rc && req->rq_reqmsg->opc != MDS_DISCONNECT) {
2359 struct mds_export_data *med = &req->rq_export->exp_mds_data;
2360 struct obd_device *obd = list_entry(mds, struct obd_device,
2362 req->rq_repmsg->last_xid =
2363 le64_to_cpu(med->med_mcd->mcd_last_xid);
2365 if (!obd->obd_no_transno) {
2366 req->rq_repmsg->last_committed =
2367 obd->obd_last_committed;
2369 DEBUG_REQ(D_IOCTL, req,
2370 "not sending last_committed update");
2372 CDEBUG(D_INFO, "last_transno "LPU64", last_committed "LPU64
2374 mds->mds_last_transno, obd->obd_last_committed,
2379 if (lustre_msg_get_flags(req->rq_reqmsg) & MSG_LAST_REPLAY) {
2380 if (obd && obd->obd_recovering) {
2381 DEBUG_REQ(D_HA, req, "LAST_REPLAY, queuing reply");
2382 return target_queue_final_reply(req, rc);
2384 /* Lost a race with recovery; let the error path DTRT. */
2385 rc = req->rq_status = -ENOTCONN;
2388 target_send_reply(req, rc, fail);
2392 /* Update the server data on disk. This stores the new mount_count and also the
2393 * last_rcvd value to disk. If we don't have a clean shutdown, then the server
2394 * last_rcvd value may be less than that of the clients. This will alert us
2395 * that we may need to do client recovery.
2397 * Also assumes for mds_last_transno that we are not modifying it (no locking).
2399 int mds_update_server_data(struct obd_device *obd, int force_sync)
2401 struct mds_obd *mds = &obd->u.mds;
2402 struct mds_server_data *msd = mds->mds_server_data;
2403 struct file *filp = mds->mds_rcvd_filp;
2404 struct lvfs_run_ctxt saved;
2409 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
2410 msd->msd_last_transno = cpu_to_le64(mds->mds_last_transno);
2412 CDEBUG(D_SUPER, "MDS mount_count is "LPU64", last_transno is "LPU64"\n",
2413 mds->mds_mount_count, mds->mds_last_transno);
2414 rc = fsfilt_write_record(obd, filp, msd, sizeof(*msd), &off, force_sync);
2416 CERROR("error writing MDS server data: rc = %d\n", rc);
2417 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
2422 /* saves last allocated fid counter to file. */
2423 int mds_update_last_fid(struct obd_device *obd, void *handle,
2426 struct mds_obd *mds = &obd->u.mds;
2427 struct file *filp = mds->mds_fid_filp;
2428 struct lvfs_run_ctxt saved;
2433 down(&mds->mds_last_fid_sem);
2434 if (mds->mds_last_fid_changed) {
2435 CDEBUG(D_SUPER, "MDS last_fid is #"LPU64"\n",
2439 fsfilt_add_journal_cb(obd, mds->mds_sb,
2440 mds->mds_last_fid, handle,
2441 mds_commit_last_fid_cb, NULL);
2444 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
2445 rc = fsfilt_write_record(obd, filp, &mds->mds_last_fid,
2446 sizeof(mds->mds_last_fid),
2449 CERROR("error writing MDS last_fid #"LPU64
2450 ", err = %d\n", mds->mds_last_fid, rc);
2452 mds->mds_last_fid_changed = 0;
2455 CDEBUG(D_SUPER, "wrote fid #"LPU64" at idx "
2456 "%llu: err = %d\n", mds->mds_last_fid,
2458 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
2460 up(&mds->mds_last_fid_sem);
2465 void mds_set_last_fid(struct obd_device *obd, __u64 fid)
2467 struct mds_obd *mds = &obd->u.mds;
2469 down(&mds->mds_last_fid_sem);
2470 if (fid > mds->mds_last_fid) {
2471 mds->mds_last_fid = fid;
2472 mds->mds_last_fid_changed = 1;
2474 up(&mds->mds_last_fid_sem);
2477 void mds_commit_last_transno_cb(struct obd_device *obd,
2478 __u64 transno, void *data,
2481 obd_transno_commit_cb(obd, transno, error);
2484 void mds_commit_last_fid_cb(struct obd_device *obd,
2485 __u64 fid, void *data,
2489 CERROR("%s: fid "LPD64" commit error: %d\n",
2490 obd->obd_name, fid, error);
2494 CDEBUG(D_HA, "%s: fid "LPD64" committed\n",
2495 obd->obd_name, fid);
2499 * allocates new lustre_id on passed @inode and saves it to inode EA.
2501 int mds_alloc_inode_sid(struct obd_device *obd, struct inode *inode,
2502 void *handle, struct lustre_id *id)
2504 struct mds_obd *mds = &obd->u.mds;
2508 LASSERT(id != NULL);
2509 LASSERT(obd != NULL);
2511 id_group(id) = mds->mds_num;
2513 down(&mds->mds_last_fid_sem);
2514 mds->mds_last_fid_changed = 1;
2515 id_fid(id) = ++mds->mds_last_fid;
2516 up(&mds->mds_last_fid_sem);
2518 id_ino(id) = inode->i_ino;
2519 id_gen(id) = inode->i_generation;
2520 id_type(id) = (S_IFMT & inode->i_mode);
2522 rc = mds_update_inode_sid(obd, inode, handle, id);
2524 CERROR("Can't update inode FID EA, "
2532 * reads inode self id from inode EA. Probably later this should be replaced by
2533 * caching inode self id to avoid raeding it every time it is needed.
2535 int mds_read_inode_sid(struct obd_device *obd, struct inode *inode,
2536 struct lustre_id *id)
2541 LASSERT(id != NULL);
2542 LASSERT(obd != NULL);
2543 LASSERT(inode != NULL);
2545 rc = fsfilt_get_sid(obd, inode, &id->li_fid,
2546 sizeof(id->li_fid));
2548 CERROR("fsfilt_get_sid() failed, "
2561 /* updates inode self id in EA. */
2562 int mds_update_inode_sid(struct obd_device *obd, struct inode *inode,
2563 void *handle, struct lustre_id *id)
2568 LASSERT(id != NULL);
2569 LASSERT(obd != NULL);
2570 LASSERT(inode != NULL);
2572 rc = fsfilt_set_sid(obd, inode, handle, &id->li_fid,
2573 sizeof(id->li_fid));
2575 CERROR("fsfilt_set_sid() failed, rc = %d\n", rc);
2583 * reads inode id on master MDS. This is usualy done by CMOBD to update requests
2584 * to master MDS by correct store cookie, needed to find inode on master MDS
2587 int mds_read_inode_mid(struct obd_device *obd, struct inode *inode,
2588 struct lustre_id *id)
2593 LASSERT(id != NULL);
2594 LASSERT(obd != NULL);
2595 LASSERT(inode != NULL);
2597 rc = fsfilt_get_mid(obd, inode, id, sizeof(*id));
2599 CERROR("fsfilt_get_mid() failed, "
2613 * updates master inode id. Usualy this is done by CMOBD after an inode is
2614 * created and relationship between cache MDS and master one should be
2617 int mds_update_inode_mid(struct obd_device *obd, struct inode *inode,
2618 void *handle, struct lustre_id *id)
2623 LASSERT(id != NULL);
2624 LASSERT(obd != NULL);
2625 LASSERT(inode != NULL);
2627 rc = fsfilt_set_mid(obd, inode, handle, id, sizeof(*id));
2629 CERROR("fsfilt_set_mid() failed, rc = %d\n", rc);
2636 /* mount the file system (secretly) */
2637 static int mds_setup(struct obd_device *obd, obd_count len, void *buf)
2639 struct lustre_cfg* lcfg = buf;
2640 struct mds_obd *mds = &obd->u.mds;
2641 char *options = NULL;
2642 struct vfsmount *mnt;
2648 dev_clear_rdonly(2);
2650 if (!lcfg->lcfg_inlbuf1 || !lcfg->lcfg_inlbuf2)
2651 RETURN(rc = -EINVAL);
2653 obd->obd_fsops = fsfilt_get_ops(lcfg->lcfg_inlbuf2);
2654 if (IS_ERR(obd->obd_fsops))
2655 RETURN(rc = PTR_ERR(obd->obd_fsops));
2657 mds->mds_max_mdsize = sizeof(struct lov_mds_md);
2659 page = __get_free_page(GFP_KERNEL);
2663 options = (char *)page;
2664 memset(options, 0, PAGE_SIZE);
2667 * here we use "iopen_nopriv" hardcoded, because it affects MDS utility
2668 * and the rest of options are passed by mount options. Probably this
2669 * should be moved to somewhere else like startup scripts or lconf.
2671 sprintf(options, "iopen_nopriv");
2673 if (lcfg->lcfg_inllen4 > 0 && lcfg->lcfg_inlbuf4)
2674 sprintf(options + strlen(options), ",%s",
2675 lcfg->lcfg_inlbuf4);
2677 /* we have to know mdsnum before touching underlying fs -bzzz */
2678 sema_init(&mds->mds_lmv_sem, 1);
2679 mds->mds_lmv_connected = 0;
2680 if (lcfg->lcfg_inllen5 > 0 && lcfg->lcfg_inlbuf5 &&
2681 strcmp(lcfg->lcfg_inlbuf5, "dumb")) {
2684 CDEBUG(D_OTHER, "MDS: %s is master for %s\n",
2685 obd->obd_name, lcfg->lcfg_inlbuf5);
2687 generate_random_uuid(uuid);
2688 class_uuid_unparse(uuid, &mds->mds_lmv_uuid);
2690 OBD_ALLOC(mds->mds_lmv_name, lcfg->lcfg_inllen5);
2691 if (mds->mds_lmv_name == NULL)
2692 RETURN(rc = -ENOMEM);
2694 memcpy(mds->mds_lmv_name, lcfg->lcfg_inlbuf5,
2695 lcfg->lcfg_inllen5);
2697 rc = mds_lmv_connect(obd, mds->mds_lmv_name);
2699 OBD_FREE(mds->mds_lmv_name, lcfg->lcfg_inllen5);
2704 mds->mds_obd_type = MDS_MASTER_OBD;
2706 if (lcfg->lcfg_inllen6 > 0 && lcfg->lcfg_inlbuf6 &&
2707 strcmp(lcfg->lcfg_inlbuf6, "dumb")) {
2708 if (!memcmp(lcfg->lcfg_inlbuf6, "master", strlen("master"))) {
2709 mds->mds_obd_type = MDS_MASTER_OBD;
2710 } else if (!memcmp(lcfg->lcfg_inlbuf6, "cache", strlen("cache"))) {
2711 mds->mds_obd_type = MDS_CACHE_OBD;
2715 mnt = do_kern_mount(lcfg->lcfg_inlbuf2, 0, lcfg->lcfg_inlbuf1, options);
2720 CERROR("do_kern_mount failed: rc = %d\n", rc);
2724 CDEBUG(D_SUPER, "%s: mnt = %p\n", lcfg->lcfg_inlbuf1, mnt);
2726 mds->mds_last_fid_changed = 0;
2727 sema_init(&mds->mds_epoch_sem, 1);
2728 sema_init(&mds->mds_last_fid_sem, 1);
2729 atomic_set(&mds->mds_real_clients, 0);
2730 spin_lock_init(&mds->mds_transno_lock);
2731 sema_init(&mds->mds_orphan_recovery_sem, 1);
2732 mds->mds_max_cookiesize = sizeof(struct llog_cookie);
2734 sprintf(ns_name, "mds-%s", obd->obd_uuid.uuid);
2735 obd->obd_namespace = ldlm_namespace_new(ns_name, LDLM_NAMESPACE_SERVER);
2737 if (obd->obd_namespace == NULL) {
2738 mds_cleanup(obd, 0);
2739 GOTO(err_put, rc = -ENOMEM);
2741 ldlm_register_intent(obd->obd_namespace, mds_intent_policy);
2743 rc = mds_fs_setup(obd, mnt);
2745 CERROR("%s: MDS filesystem method init failed: rc = %d\n",
2750 rc = llog_start_commit_thread();
2755 * this check for @dumb string is needed to handle mounting MDS with
2756 * smfs. Read lconf:MDSDEV.write_conf() for more details.
2758 if (lcfg->lcfg_inllen3 > 0 && lcfg->lcfg_inlbuf3 &&
2759 strcmp(lcfg->lcfg_inlbuf3, "dumb")) {
2762 generate_random_uuid(uuid);
2763 class_uuid_unparse(uuid, &mds->mds_lov_uuid);
2765 OBD_ALLOC(mds->mds_profile, lcfg->lcfg_inllen3);
2766 if (mds->mds_profile == NULL)
2767 GOTO(err_fs, rc = -ENOMEM);
2769 memcpy(mds->mds_profile, lcfg->lcfg_inlbuf3,
2770 lcfg->lcfg_inllen3);
2773 * setup root id in the case this is not clients write
2774 * setup. This is important, as in the case of LMV we need
2775 * mds->mds_num to be already assigned to form correct root fid.
2777 rc = mds_fs_setup_rootid(obd);
2781 /* setup lustre id for ID directory. */
2782 rc = mds_fs_setup_virtid(obd);
2787 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
2788 "mds_ldlm_client", &obd->obd_ldlm_client);
2789 obd->obd_replayable = 1;
2791 rc = mds_postsetup(obd);
2798 /* No extra cleanup needed for llog_init_commit_thread() */
2799 mds_fs_cleanup(obd, 0);
2801 ldlm_namespace_free(obd->obd_namespace, 0);
2802 obd->obd_namespace = NULL;
2805 mntput(mds->mds_vfsmnt);
2809 fsfilt_put_ops(obd->obd_fsops);
2813 static int mds_postsetup(struct obd_device *obd)
2815 struct mds_obd *mds = &obd->u.mds;
2819 rc = obd_llog_setup(obd, &obd->obd_llogs, LLOG_CONFIG_ORIG_CTXT,
2820 obd, 0, NULL, &llog_lvfs_ops);
2824 if (mds->mds_profile) {
2825 struct llog_ctxt *lgctxt;
2826 struct lvfs_run_ctxt saved;
2827 struct lustre_profile *lprof;
2828 struct config_llog_instance cfg;
2830 cfg.cfg_instance = NULL;
2831 cfg.cfg_uuid = mds->mds_lov_uuid;
2832 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
2834 lgctxt = llog_get_context(&obd->obd_llogs, LLOG_CONFIG_ORIG_CTXT);
2836 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
2837 GOTO(err_llog, rc = -EINVAL);
2840 rc = class_config_process_llog(lgctxt, mds->mds_profile, &cfg);
2841 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
2846 lprof = class_get_profile(mds->mds_profile);
2847 if (lprof == NULL) {
2848 CERROR("No profile found: %s\n", mds->mds_profile);
2849 GOTO(err_cleanup, rc = -ENOENT);
2851 rc = mds_lov_connect(obd, lprof->lp_lov);
2853 GOTO(err_cleanup, rc);
2855 rc = mds_lmv_postsetup(obd);
2857 GOTO(err_cleanup, rc);
2864 obd_llog_cleanup(llog_get_context(&obd->obd_llogs,
2865 LLOG_CONFIG_ORIG_CTXT));
2869 int mds_postrecov(struct obd_device *obd)
2871 struct mds_obd *mds = &obd->u.mds;
2872 struct llog_ctxt *ctxt;
2873 int rc, item = 0, valsize;
2877 LASSERT(!obd->obd_recovering);
2878 ctxt = llog_get_context(&obd->obd_llogs, LLOG_UNLINK_ORIG_CTXT);
2879 LASSERT(ctxt != NULL);
2881 /* set nextid first, so we are sure it happens */
2882 rc = mds_lov_set_nextid(obd);
2884 CERROR("%s: mds_lov_set_nextid failed\n", obd->obd_name);
2888 /* clean PENDING dir */
2889 rc = mds_cleanup_orphans(obd);
2894 group = FILTER_GROUP_FIRST_MDS + mds->mds_num;
2895 valsize = sizeof(group);
2896 rc = obd_set_info(mds->mds_lov_exp, strlen("mds_conn"), "mds_conn",
2901 rc = llog_connect(ctxt, obd->u.mds.mds_lov_desc.ld_tgt_count,
2904 CERROR("%s: failed at llog_origin_connect: %d\n",
2909 /* remove the orphaned precreated objects */
2910 rc = mds_lov_clearorphans(mds, NULL /* all OSTs */);
2915 RETURN(rc < 0 ? rc : item);
2918 /* cleanup all llogging subsystems */
2919 rc = obd_llog_finish(obd, &obd->obd_llogs,
2920 mds->mds_lov_desc.ld_tgt_count);
2922 CERROR("%s: failed to cleanup llogging subsystems\n",
2927 int mds_lov_clean(struct obd_device *obd)
2929 struct mds_obd *mds = &obd->u.mds;
2932 if (mds->mds_profile) {
2934 struct llog_ctxt *llctx;
2935 struct lvfs_run_ctxt saved;
2936 struct config_llog_instance cfg;
2937 int len = strlen(mds->mds_profile) + sizeof("-clean") + 1;
2939 OBD_ALLOC(cln_prof, len);
2940 sprintf(cln_prof, "%s-clean", mds->mds_profile);
2942 cfg.cfg_instance = NULL;
2943 cfg.cfg_uuid = mds->mds_lov_uuid;
2945 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
2946 llctx = llog_get_context(&obd->obd_llogs,
2947 LLOG_CONFIG_ORIG_CTXT);
2948 class_config_process_llog(llctx, cln_prof, &cfg);
2949 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
2951 OBD_FREE(cln_prof, len);
2952 OBD_FREE(mds->mds_profile, strlen(mds->mds_profile) + 1);
2953 mds->mds_profile = NULL;
2958 int mds_lmv_clean(struct obd_device *obd)
2960 struct mds_obd *mds = &obd->u.mds;
2963 if (mds->mds_lmv_name) {
2964 OBD_FREE(mds->mds_lmv_name, strlen(mds->mds_lmv_name) + 1);
2965 mds->mds_lmv_name = NULL;
2970 static int mds_precleanup(struct obd_device *obd, int flags)
2976 mds_lov_disconnect(obd, flags);
2978 obd_llog_cleanup(llog_get_context(&obd->obd_llogs, LLOG_CONFIG_ORIG_CTXT));
2982 static int mds_cleanup(struct obd_device *obd, int flags)
2984 struct mds_obd *mds = &obd->u.mds;
2987 if (mds->mds_sb == NULL)
2990 mds_update_server_data(obd, 1);
2991 mds_update_last_fid(obd, NULL, 1);
2993 if (mds->mds_lov_objids != NULL) {
2994 int size = mds->mds_lov_desc.ld_tgt_count *
2996 OBD_FREE(mds->mds_lov_objids, size);
2998 mds_fs_cleanup(obd, flags);
3003 * 2 seems normal on mds, (may_umount() also expects 2 fwiw), but we
3004 * only see 1 at this point in obdfilter.
3006 if (atomic_read(&obd->u.mds.mds_vfsmnt->mnt_count) > 2)
3007 CERROR("%s: mount busy, mnt_count %d != 2\n", obd->obd_name,
3008 atomic_read(&obd->u.mds.mds_vfsmnt->mnt_count));
3010 mntput(mds->mds_vfsmnt);
3013 ldlm_namespace_free(obd->obd_namespace, flags & OBD_OPT_FORCE);
3015 spin_lock_bh(&obd->obd_processing_task_lock);
3016 if (obd->obd_recovering) {
3017 target_cancel_recovery_timer(obd);
3018 obd->obd_recovering = 0;
3020 spin_unlock_bh(&obd->obd_processing_task_lock);
3023 dev_clear_rdonly(2);
3024 fsfilt_put_ops(obd->obd_fsops);
3029 static void fixup_handle_for_resent_req(struct ptlrpc_request *req,
3031 struct ldlm_lock *new_lock,
3032 struct ldlm_lock **old_lock,
3033 struct lustre_handle *lockh)
3035 struct obd_export *exp = req->rq_export;
3036 struct obd_device *obd = exp->exp_obd;
3037 struct ldlm_request *dlmreq =
3038 lustre_msg_buf(req->rq_reqmsg, offset, sizeof (*dlmreq));
3039 struct lustre_handle remote_hdl = dlmreq->lock_handle1;
3040 struct list_head *iter;
3042 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
3045 l_lock(&obd->obd_namespace->ns_lock);
3046 list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) {
3047 struct ldlm_lock *lock;
3048 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
3049 if (lock == new_lock)
3051 if (lock->l_remote_handle.cookie == remote_hdl.cookie) {
3052 lockh->cookie = lock->l_handle.h_cookie;
3053 LDLM_DEBUG(lock, "restoring lock cookie");
3054 DEBUG_REQ(D_HA, req, "restoring lock cookie "LPX64,
3057 *old_lock = LDLM_LOCK_GET(lock);
3058 l_unlock(&obd->obd_namespace->ns_lock);
3062 l_unlock(&obd->obd_namespace->ns_lock);
3064 /* If the xid matches, then we know this is a resent request,
3065 * and allow it. (It's probably an OPEN, for which we don't
3067 if (req->rq_xid == exp->exp_mds_data.med_mcd->mcd_last_xid)
3070 /* This remote handle isn't enqueued, so we never received or
3071 * processed this request. Clear MSG_RESENT, because it can
3072 * be handled like any normal request now. */
3074 lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
3076 DEBUG_REQ(D_HA, req, "no existing lock with rhandle "LPX64,
3080 int intent_disposition(struct ldlm_reply *rep, int flag)
3084 return (rep->lock_policy_res1 & flag);
3087 void intent_set_disposition(struct ldlm_reply *rep, int flag)
3091 rep->lock_policy_res1 |= flag;
3094 static int mds_intent_policy(struct ldlm_namespace *ns,
3095 struct ldlm_lock **lockp, void *req_cookie,
3096 ldlm_mode_t mode, int flags, void *data)
3098 struct ptlrpc_request *req = req_cookie;
3099 struct ldlm_lock *lock = *lockp;
3100 struct ldlm_intent *it;
3101 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
3102 struct ldlm_reply *rep;
3103 struct lustre_handle lockh[2] = {{0}, {0}};
3104 struct ldlm_lock *new_lock = NULL;
3105 int getattr_part = MDS_INODELOCK_UPDATE;
3106 int rc, repsize[4] = { sizeof(struct ldlm_reply),
3107 sizeof(struct mds_body),
3108 mds->mds_max_mdsize,
3109 mds->mds_max_cookiesize };
3110 int offset = MDS_REQ_INTENT_REC_OFF;
3113 LASSERT(req != NULL);
3114 MD_COUNTER_INCREMENT(req->rq_export->exp_obd, intent_lock);
3116 if (req->rq_reqmsg->bufcount <= MDS_REQ_INTENT_IT_OFF) {
3117 /* No intent was provided */
3118 int size = sizeof(struct ldlm_reply);
3119 rc = lustre_pack_reply(req, 1, &size, NULL);
3124 it = lustre_swab_reqbuf(req, MDS_REQ_INTENT_IT_OFF, sizeof(*it),
3125 lustre_swab_ldlm_intent);
3127 CERROR("Intent missing\n");
3128 RETURN(req->rq_status = -EFAULT);
3131 LDLM_DEBUG(lock, "intent policy, opc: %s", ldlm_it2str(it->opc));
3133 rc = lustre_pack_reply(req, 3, repsize, NULL);
3135 RETURN(req->rq_status = rc);
3137 rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*rep));
3138 LASSERT(rep != NULL);
3140 intent_set_disposition(rep, DISP_IT_EXECD);
3142 /* execute policy */
3143 switch ((long)it->opc) {
3145 case IT_CREAT|IT_OPEN:
3146 /* XXX swab here to assert that an mds_open reint
3147 * packet is following */
3148 fixup_handle_for_resent_req(req, MDS_REQ_INTENT_LOCKREQ_OFF,
3150 rep->lock_policy_res2 = mds_reint(req, offset, lockh);
3152 /* We abort the lock if the lookup was negative and
3153 * we did not make it to the OPEN portion */
3154 if (!intent_disposition(rep, DISP_LOOKUP_EXECD))
3155 RETURN(ELDLM_LOCK_ABORTED);
3156 if (intent_disposition(rep, DISP_LOOKUP_NEG) &&
3157 !intent_disposition(rep, DISP_OPEN_OPEN))
3159 /* IT_OPEN may return lock on cross-node dentry
3160 * that we want to hold during attr retrival -bzzz */
3161 if (rc != 0 || lockh[0].cookie == 0)
3162 RETURN(ELDLM_LOCK_ABORTED);
3165 getattr_part = MDS_INODELOCK_LOOKUP;
3168 getattr_part |= MDS_INODELOCK_LOOKUP;
3170 fixup_handle_for_resent_req(req, MDS_REQ_INTENT_LOCKREQ_OFF,
3171 lock, &new_lock, lockh);
3172 rep->lock_policy_res2 = mds_getattr_lock(req, offset, lockh,
3174 /* FIXME: LDLM can set req->rq_status. MDS sets
3175 policy_res{1,2} with disposition and status.
3176 - replay: returns 0 & req->status is old status
3177 - otherwise: returns req->status */
3178 if (intent_disposition(rep, DISP_LOOKUP_NEG))
3179 rep->lock_policy_res2 = 0;
3180 if (!intent_disposition(rep, DISP_LOOKUP_POS) ||
3181 rep->lock_policy_res2)
3182 RETURN(ELDLM_LOCK_ABORTED);
3183 if (req->rq_status != 0) {
3185 rep->lock_policy_res2 = req->rq_status;
3186 RETURN(ELDLM_LOCK_ABORTED);
3190 rc = mds_lock_and_check_slave(offset, req, lockh);
3191 if ((rep->lock_policy_res2 = rc)) {
3193 rep->lock_policy_res2 = 0;
3194 RETURN(ELDLM_LOCK_ABORTED);
3198 CERROR("Unhandled intent "LPD64"\n", it->opc);
3202 /* By this point, whatever function we called above must have either
3203 * filled in 'lockh', been an intent replay, or returned an error. We
3204 * want to allow replayed RPCs to not get a lock, since we would just
3205 * drop it below anyways because lock replay is done separately by the
3206 * client afterwards. For regular RPCs we want to give the new lock to
3207 * the client instead of whatever lock it was about to get. */
3208 if (new_lock == NULL)
3209 new_lock = ldlm_handle2lock(&lockh[0]);
3210 if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY))
3213 LASSERTF(new_lock != NULL, "op "LPX64" lockh "LPX64"\n",
3214 it->opc, lockh[0].cookie);
3216 /* If we've already given this lock to a client once, then we should
3217 * have no readers or writers. Otherwise, we should have one reader
3218 * _or_ writer ref (which will be zeroed below) before returning the
3219 * lock to a client. */
3220 if (new_lock->l_export == req->rq_export) {
3221 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
3223 LASSERT(new_lock->l_export == NULL);
3224 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
3229 if (new_lock->l_export == req->rq_export) {
3230 /* Already gave this to the client, which means that we
3231 * reconstructed a reply. */
3232 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
3234 RETURN(ELDLM_LOCK_REPLACED);
3237 /* Fixup the lock to be given to the client */
3238 l_lock(&new_lock->l_resource->lr_namespace->ns_lock);
3239 new_lock->l_readers = 0;
3240 new_lock->l_writers = 0;
3242 new_lock->l_export = class_export_get(req->rq_export);
3243 list_add(&new_lock->l_export_chain,
3244 &new_lock->l_export->exp_ldlm_data.led_held_locks);
3246 new_lock->l_blocking_ast = lock->l_blocking_ast;
3247 new_lock->l_completion_ast = lock->l_completion_ast;
3249 memcpy(&new_lock->l_remote_handle, &lock->l_remote_handle,
3250 sizeof(lock->l_remote_handle));
3252 new_lock->l_flags &= ~LDLM_FL_LOCAL;
3254 LDLM_LOCK_PUT(new_lock);
3255 l_unlock(&new_lock->l_resource->lr_namespace->ns_lock);
3257 RETURN(ELDLM_LOCK_REPLACED);
3260 int mds_attach(struct obd_device *dev, obd_count len, void *data)
3262 struct lprocfs_static_vars lvars;
3265 lprocfs_init_multi_vars(0, &lvars);
3267 rc = lprocfs_obd_attach(dev, lvars.obd_vars);
3271 return lprocfs_alloc_md_stats(dev, 0);
3274 int mds_detach(struct obd_device *dev)
3276 lprocfs_free_md_stats(dev);
3277 return lprocfs_obd_detach(dev);
3280 int mdt_attach(struct obd_device *dev, obd_count len, void *data)
3282 struct lprocfs_static_vars lvars;
3284 lprocfs_init_multi_vars(1, &lvars);
3285 return lprocfs_obd_attach(dev, lvars.obd_vars);
3288 int mdt_detach(struct obd_device *dev)
3290 return lprocfs_obd_detach(dev);
3293 static int mdt_setup(struct obd_device *obd, obd_count len, void *buf)
3295 struct mds_obd *mds = &obd->u.mds;
3300 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
3301 MDS_REQUEST_PORTAL, MDC_REPLY_PORTAL,
3302 mds_handle, "mds", obd->obd_proc_entry);
3304 if (!mds->mds_service) {
3305 CERROR("failed to start service\n");
3309 rc = ptlrpc_start_n_threads(obd, mds->mds_service, MDT_NUM_THREADS,
3312 GOTO(err_thread, rc);
3314 mds->mds_setattr_service =
3315 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
3316 MDS_SETATTR_PORTAL, MDC_REPLY_PORTAL,
3317 mds_handle, "mds_setattr",
3318 obd->obd_proc_entry);
3319 if (!mds->mds_setattr_service) {
3320 CERROR("failed to start getattr service\n");
3321 GOTO(err_thread, rc = -ENOMEM);
3324 rc = ptlrpc_start_n_threads(obd, mds->mds_setattr_service,
3325 MDT_NUM_THREADS, "ll_mdt_attr");
3327 GOTO(err_thread2, rc);
3329 mds->mds_readpage_service =
3330 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
3331 MDS_READPAGE_PORTAL, MDC_REPLY_PORTAL,
3332 mds_handle, "mds_readpage",
3333 obd->obd_proc_entry);
3334 if (!mds->mds_readpage_service) {
3335 CERROR("failed to start readpage service\n");
3336 GOTO(err_thread2, rc = -ENOMEM);
3339 rc = ptlrpc_start_n_threads(obd, mds->mds_readpage_service,
3340 MDT_NUM_THREADS, "ll_mdt_rdpg");
3343 GOTO(err_thread3, rc);
3348 ptlrpc_unregister_service(mds->mds_readpage_service);
3350 ptlrpc_unregister_service(mds->mds_setattr_service);
3352 ptlrpc_unregister_service(mds->mds_service);
3356 static int mdt_cleanup(struct obd_device *obd, int flags)
3358 struct mds_obd *mds = &obd->u.mds;
3361 ptlrpc_stop_all_threads(mds->mds_readpage_service);
3362 ptlrpc_unregister_service(mds->mds_readpage_service);
3364 ptlrpc_stop_all_threads(mds->mds_setattr_service);
3365 ptlrpc_unregister_service(mds->mds_setattr_service);
3367 ptlrpc_stop_all_threads(mds->mds_service);
3368 ptlrpc_unregister_service(mds->mds_service);
3373 static struct dentry *mds_lvfs_id2dentry(__u64 ino, __u32 gen,
3374 __u64 gr, void *data)
3376 struct lustre_id id;
3377 struct obd_device *obd = data;
3381 return mds_id2dentry(obd, &id, NULL);
3384 static int mds_get_info(struct obd_export *exp, __u32 keylen,
3385 void *key, __u32 *valsize, void *val)
3387 struct obd_device *obd;
3388 struct mds_obd *mds;
3391 obd = class_exp2obd(exp);
3395 CDEBUG(D_IOCTL, "invalid client cookie "LPX64"\n",
3396 exp->exp_handle.h_cookie);
3400 if (keylen >= strlen("reint_log") && memcmp(key, "reint_log", 9) == 0) {
3401 /* get log_context handle. */
3402 unsigned long *llh_handle = val;
3403 *valsize = sizeof(unsigned long);
3404 *llh_handle = (unsigned long)obd->obd_llog_ctxt[LLOG_REINT_ORIG_CTXT];
3407 if (keylen >= strlen("cache_sb") && memcmp(key, "cache_sb", 8) == 0) {
3408 /* get log_context handle. */
3409 unsigned long *sb = val;
3410 *valsize = sizeof(unsigned long);
3411 *sb = (unsigned long)obd->u.mds.mds_sb;
3415 if (keylen >= strlen("mdsize") && memcmp(key, "mdsize", keylen) == 0) {
3416 __u32 *mdsize = val;
3417 *valsize = sizeof(*mdsize);
3418 *mdsize = mds->mds_max_mdsize;
3422 if (keylen >= strlen("mdsnum") && strcmp(key, "mdsnum") == 0) {
3423 __u32 *mdsnum = val;
3424 *valsize = sizeof(*mdsnum);
3425 *mdsnum = mds->mds_num;
3429 if (keylen >= strlen("rootid") && strcmp(key, "rootid") == 0) {
3430 struct lustre_id *rootid = val;
3431 *valsize = sizeof(struct lustre_id);
3432 *rootid = mds->mds_rootid;
3436 CDEBUG(D_IOCTL, "invalid key\n");
3440 struct lvfs_callback_ops mds_lvfs_ops = {
3441 l_id2dentry: mds_lvfs_id2dentry,
3444 int mds_preprw(int cmd, struct obd_export *exp, struct obdo *oa,
3445 int objcount, struct obd_ioobj *obj,
3446 int niocount, struct niobuf_remote *nb,
3447 struct niobuf_local *res,
3448 struct obd_trans_info *oti);
3450 int mds_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
3451 int objcount, struct obd_ioobj *obj, int niocount,
3452 struct niobuf_local *res, struct obd_trans_info *oti,
3455 /* use obd ops to offer management infrastructure */
3456 static struct obd_ops mds_obd_ops = {
3457 .o_owner = THIS_MODULE,
3458 .o_attach = mds_attach,
3459 .o_detach = mds_detach,
3460 .o_connect = mds_connect,
3461 .o_connect_post = mds_connect_post,
3462 .o_init_export = mds_init_export,
3463 .o_destroy_export = mds_destroy_export,
3464 .o_disconnect = mds_disconnect,
3465 .o_setup = mds_setup,
3466 .o_precleanup = mds_precleanup,
3467 .o_cleanup = mds_cleanup,
3468 .o_postrecov = mds_postrecov,
3469 .o_statfs = mds_obd_statfs,
3470 .o_iocontrol = mds_iocontrol,
3471 .o_create = mds_obd_create,
3472 .o_destroy = mds_obd_destroy,
3473 .o_llog_init = mds_llog_init,
3474 .o_llog_finish = mds_llog_finish,
3475 .o_notify = mds_notify,
3476 .o_get_info = mds_get_info,
3477 .o_set_info = mds_set_info,
3478 .o_preprw = mds_preprw,
3479 .o_commitrw = mds_commitrw,
3482 static struct obd_ops mdt_obd_ops = {
3483 .o_owner = THIS_MODULE,
3484 .o_attach = mdt_attach,
3485 .o_detach = mdt_detach,
3486 .o_setup = mdt_setup,
3487 .o_cleanup = mdt_cleanup,
3490 static int __init mds_init(void)
3492 struct lprocfs_static_vars lvars;
3494 mds_group_hash_init();
3496 lprocfs_init_multi_vars(0, &lvars);
3497 class_register_type(&mds_obd_ops, NULL, lvars.module_vars,
3499 lprocfs_init_multi_vars(1, &lvars);
3500 class_register_type(&mdt_obd_ops, NULL, lvars.module_vars,
3506 static void /*__exit*/ mds_exit(void)
3508 mds_group_hash_cleanup();
3510 class_unregister_type(LUSTRE_MDS_NAME);
3511 class_unregister_type(LUSTRE_MDT_NAME);
3514 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
3515 MODULE_DESCRIPTION("Lustre Metadata Server (MDS)");
3516 MODULE_LICENSE("GPL");
3518 module_init(mds_init);
3519 module_exit(mds_exit);