1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Lustre Metadata Server (mds) request handler
7 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * Author: Peter Braam <braam@clusterfs.com>
9 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * Author: Phil Schwan <phil@clusterfs.com>
11 * Author: Mike Shaver <shaver@clusterfs.com>
13 * This file is part of Lustre, http://www.lustre.org.
15 * Lustre is free software; you can redistribute it and/or
16 * modify it under the terms of version 2 of the GNU General Public
17 * License as published by the Free Software Foundation.
19 * Lustre is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with Lustre; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 # define EXPORT_SYMTAB
32 #define DEBUG_SUBSYSTEM S_MDS
34 #include <linux/module.h>
35 #include <linux/lustre_mds.h>
36 #include <linux/lustre_dlm.h>
37 #include <linux/init.h>
38 #include <linux/obd_class.h>
39 #include <linux/random.h>
41 #include <linux/jbd.h>
42 #include <linux/namei.h>
43 #include <linux/ext3_fs.h>
44 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
45 # include <linux/smp_lock.h>
46 # include <linux/buffer_head.h>
47 # include <linux/workqueue.h>
48 # include <linux/mount.h>
50 # include <linux/locks.h>
52 #include <linux/obd_lov.h>
53 #include <linux/obd_ost.h>
54 #include <linux/lustre_mds.h>
55 #include <linux/lustre_fsfilt.h>
56 #include <linux/lprocfs_status.h>
57 #include <linux/lustre_commit_confd.h>
58 #include <linux/lustre_acl.h>
59 #include "mds_internal.h"
60 #include <linux/lustre_sec.h>
62 static int mds_intent_policy(struct ldlm_namespace *ns,
63 struct ldlm_lock **lockp, void *req_cookie,
64 ldlm_mode_t mode, int flags, void *data);
65 static int mds_postsetup(struct obd_device *obd);
66 static int mds_cleanup(struct obd_device *obd, int flags);
69 /* Assumes caller has already pushed into the kernel filesystem context */
70 static int mds_sendpage(struct ptlrpc_request *req, struct file *file,
71 loff_t offset, int count)
73 struct ptlrpc_bulk_desc *desc;
74 struct l_wait_info lwi;
76 int rc = 0, npages, i, tmpcount, tmpsize = 0;
79 LASSERT((offset & (PAGE_SIZE - 1)) == 0); /* I'm dubious about this */
81 npages = (count + PAGE_SIZE - 1) >> PAGE_SHIFT;
82 OBD_ALLOC(pages, sizeof(*pages) * npages);
84 GOTO(out, rc = -ENOMEM);
86 desc = ptlrpc_prep_bulk_exp(req, npages, BULK_PUT_SOURCE,
89 GOTO(out_free, rc = -ENOMEM);
91 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
92 tmpsize = tmpcount > PAGE_SIZE ? PAGE_SIZE : tmpcount;
94 pages[i] = alloc_pages(GFP_KERNEL, 0);
96 GOTO(cleanup_buf, rc = -ENOMEM);
98 ptlrpc_prep_bulk_page(desc, pages[i], 0, tmpsize);
101 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
102 tmpsize = tmpcount > PAGE_SIZE ? PAGE_SIZE : tmpcount;
103 CDEBUG(D_EXT2, "reading %u@%llu from dir %lu (size %llu)\n",
104 tmpsize, offset, file->f_dentry->d_inode->i_ino,
105 file->f_dentry->d_inode->i_size);
107 rc = fsfilt_readpage(req->rq_export->exp_obd, file,
108 kmap(pages[i]), tmpsize, &offset);
112 GOTO(cleanup_buf, rc = -EIO);
115 LASSERT(desc->bd_nob == count);
117 rc = ptlrpc_start_bulk_transfer(desc);
119 GOTO(cleanup_buf, rc);
121 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) {
122 CERROR("obd_fail_loc=%x, fail operation rc=%d\n",
123 OBD_FAIL_MDS_SENDPAGE, rc = -EIO);
124 GOTO(abort_bulk, rc);
127 lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
128 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc), &lwi);
129 LASSERT (rc == 0 || rc == -ETIMEDOUT);
132 if (desc->bd_success &&
133 desc->bd_nob_transferred == count)
134 GOTO(cleanup_buf, rc);
136 rc = -ETIMEDOUT; /* XXX should this be a different errno? */
139 DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s\n",
140 (rc == -ETIMEDOUT) ? "timeout" : "network error",
141 desc->bd_nob_transferred, count,
142 req->rq_export->exp_client_uuid.uuid,
143 req->rq_export->exp_connection->c_remote_uuid.uuid);
145 ptlrpc_fail_export(req->rq_export);
149 ptlrpc_abort_bulk (desc);
151 for (i = 0; i < npages; i++)
153 __free_pages(pages[i], 0);
155 ptlrpc_free_bulk(desc);
157 OBD_FREE(pages, sizeof(*pages) * npages);
162 extern char *ldlm_lockname[];
164 int mds_lock_mode_for_dir(struct obd_device *obd,
165 struct dentry *dentry, int mode)
167 int ret_mode = 0, split;
169 /* any dir access needs couple locks:
170 * 1) on part of dir we gonna lookup/modify in
171 * 2) on a whole dir to protect it from concurrent splitting
172 * and to flush client's cache for readdir()
173 * so, for a given mode and dentry this routine decides what
174 * lock mode to use for lock #2:
175 * 1) if caller's gonna lookup in dir then we need to protect
176 * dir from being splitted only - LCK_CR
177 * 2) if caller's gonna modify dir then we need to protect
178 * dir from being splitted and to flush cache - LCK_CW
179 * 3) if caller's gonna modify dir and that dir seems ready
180 * for splitting then we need to protect it from any
181 * type of access (lookup/modify/split) - LCK_EX -bzzz */
183 split = mds_splitting_expected(obd, dentry);
186 * it is important to check here only for MDS_NO_SPLITTABLE. The reason
187 * is that MDS_NO_SPLITTABLE means dir is not splittable in principle
188 * and another thread will not split it on the quiet. But if we have
189 * MDS_NO_SPLIT_EXPECTED, this means, that dir may be splitted anytime,
190 * but not now (for current thread) and we should consider that it can
191 * happen soon and go that branch which can yield LCK_EX to protect from
192 * possible splitting.
194 if (split == MDS_NO_SPLITTABLE) {
196 * this inode won't be splitted. so we need not to protect from
197 * just flush client's cache on modification.
204 if (mode == LCK_EX) {
206 } else if (mode == LCK_PR) {
208 } else if (mode == LCK_PW) {
210 * caller gonna modify directory. We use concurrent
211 * write lock here to retract client's cache for
214 if (split == MDS_EXPECT_SPLIT) {
216 * splitting possible. serialize any access the
217 * idea is that first one seen dir is splittable
218 * is given exclusive lock and split
219 * directory. caller passes lock mode to
220 * mds_try_to_split_dir() and splitting would be
221 * done with exclusive lock only -bzzz.
223 CDEBUG(D_OTHER, "%s: gonna split %lu/%lu\n",
225 (unsigned long)dentry->d_inode->i_ino,
226 (unsigned long)dentry->d_inode->i_generation);
237 /* only valid locked dentries or errors should be returned */
238 struct dentry *mds_id2locked_dentry(struct obd_device *obd, struct lustre_id *id,
239 struct vfsmount **mnt, int lock_mode,
240 struct lustre_handle *lockh, int *mode,
241 char *name, int namelen, __u64 lockpart)
243 struct dentry *de = mds_id2dentry(obd, id, mnt), *retval = de;
244 ldlm_policy_data_t policy = { .l_inodebits = { lockpart } };
245 struct ldlm_res_id res_id = { .name = {0} };
253 res_id.name[0] = id_fid(id);
254 res_id.name[1] = id_group(id);
257 if (name && IS_PDIROPS(de->d_inode)) {
258 ldlm_policy_data_t cpolicy =
259 { .l_inodebits = { MDS_INODELOCK_UPDATE } };
260 LASSERT(mode != NULL);
261 *mode = mds_lock_mode_for_dir(obd, de, lock_mode);
263 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace,
265 &cpolicy, *mode, &flags,
267 ldlm_completion_ast, NULL, NULL,
268 NULL, 0, NULL, lockh + 1);
269 if (rc != ELDLM_OK) {
271 RETURN(ERR_PTR(-ENOLCK));
276 res_id.name[2] = full_name_hash((unsigned char *)name, namelen);
278 CDEBUG(D_INFO, "take lock on "DLID4":"LPX64"\n",
279 OLID4(id), res_id.name[2]);
282 #warning "No PDIROPS support in the kernel"
284 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace, res_id,
285 LDLM_IBITS, &policy, lock_mode, &flags,
286 mds_blocking_ast, ldlm_completion_ast,
287 NULL, NULL, NULL, 0, NULL, lockh);
288 if (rc != ELDLM_OK) {
290 retval = ERR_PTR(-EIO); /* XXX translate ldlm code */
293 ldlm_lock_decref(lockh + 1, *mode);
295 } else if (de->d_inode && de->d_inode->i_nlink == 0) {
296 /* as sometimes we lookup inode by ino/generation through
297 iopen mechanism, it's possible to find already unlinked
298 inode with nlink == 0. let's interpretate the case as
300 CWARN("found already unlinked inode %lu/%u\n",
301 de->d_inode->i_ino, de->d_inode->i_generation);
303 retval = ERR_PTR(-ENOENT);
304 ldlm_lock_decref(lockh, lock_mode);
307 ldlm_lock_decref(lockh + 1, *mode);
314 #ifndef DCACHE_DISCONNECTED
315 #define DCACHE_DISCONNECTED DCACHE_NFSD_DISCONNECTED
318 /* Look up an entry by inode number. This function ONLY returns valid dget'd
319 * dentries with an initialized inode or errors */
320 struct dentry *mds_id2dentry(struct obd_device *obd, struct lustre_id *id,
321 struct vfsmount **mnt)
323 unsigned long ino = (unsigned long)id_ino(id);
324 __u32 generation = (__u32)id_gen(id);
325 struct mds_obd *mds = &obd->u.mds;
326 struct dentry *result;
331 RETURN(ERR_PTR(-ESTALE));
333 snprintf(idname, sizeof(idname), "0x%lx", ino);
335 CDEBUG(D_DENTRY, "--> mds_id2dentry: ino/gen %lu/%u, sb %p\n",
336 ino, generation, mds->mds_sb);
338 /* under ext3 this is neither supposed to return bad inodes nor NULL
340 result = ll_lookup_one_len(idname, mds->mds_id_de,
345 inode = result->d_inode;
347 RETURN(ERR_PTR(-ENOENT));
349 if (is_bad_inode(inode)) {
350 CERROR("bad inode returned %lu/%u\n",
351 inode->i_ino, inode->i_generation);
353 RETURN(ERR_PTR(-ENOENT));
356 /* here we disabled generation check, as root inode i_generation
357 * of cache mds and real mds are different. */
358 if (inode->i_ino != id_ino(&mds->mds_rootid) && generation &&
359 inode->i_generation != generation) {
360 /* we didn't find the right inode.. */
361 if (id_group(id) != mds->mds_num) {
362 CERROR("bad inode %lu found, link: %lu, ct: %d, generation "
363 "%u != %u, mds %u != %u, request to wrong MDS?\n",
364 inode->i_ino, (unsigned long)inode->i_nlink,
365 atomic_read(&inode->i_count), inode->i_generation,
366 generation, mds->mds_num, (unsigned)id_group(id));
368 CERROR("bad inode %lu found, link: %lu, ct: %d, generation "
369 "%u != %u, inode is recreated while request handled?\n",
370 inode->i_ino, (unsigned long)inode->i_nlink,
371 atomic_read(&inode->i_count), inode->i_generation,
375 RETURN(ERR_PTR(-ENOENT));
379 *mnt = mds->mds_vfsmnt;
387 int mds_req_add_idmapping(struct ptlrpc_request *req,
388 struct mds_export_data *med)
390 struct mds_req_sec_desc *rsd;
391 struct lustre_sec_desc *lsd;
394 if (!med->med_remote)
397 /* maybe we should do it more completely: invalidate the gss ctxt? */
398 if (req->rq_mapped_uid == MDS_IDMAP_NOTFOUND) {
399 CWARN("didn't find mapped uid\n");
403 rsd = lustre_swab_mds_secdesc(req, MDS_REQ_SECDESC_OFF);
405 CERROR("Can't unpack security desc\n");
409 lsd = mds_get_lsd(req->rq_mapped_uid);
411 CERROR("can't get LSD(%u), no mapping added\n",
416 rc = mds_idmap_add(med->med_idmap, rsd->rsd_uid, lsd->lsd_uid,
417 rsd->rsd_gid, lsd->lsd_gid);
423 int mds_req_del_idmapping(struct ptlrpc_request *req,
424 struct mds_export_data *med)
426 struct mds_req_sec_desc *rsd;
427 struct lustre_sec_desc *lsd;
430 if (!med->med_remote)
433 rsd = lustre_swab_mds_secdesc(req, MDS_REQ_SECDESC_OFF);
435 CERROR("Can't unpack security desc\n");
439 LASSERT(req->rq_mapped_uid != -1);
440 lsd = mds_get_lsd(req->rq_mapped_uid);
442 CERROR("can't get LSD(%u), no idmapping deleted\n",
447 rc = mds_idmap_del(med->med_idmap, rsd->rsd_uid, lsd->lsd_uid,
448 rsd->rsd_gid, lsd->lsd_gid);
453 static int mds_init_export_data(struct ptlrpc_request *req,
454 struct mds_export_data *med)
456 struct obd_connect_data *data, *reply;
457 int ask_remote, ask_local;
460 data = lustre_msg_buf(req->rq_reqmsg, 5, sizeof(*data));
461 reply = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*data));
462 LASSERT(data && reply);
464 if (med->med_initialized) {
465 CDEBUG(D_SEC, "med already initialized, reconnect?\n");
469 ask_remote = data->ocd_connect_flags & OBD_CONNECT_REMOTE;
470 ask_local = data->ocd_connect_flags & OBD_CONNECT_LOCAL;
472 /* currently the policy is simple: satisfy client as possible
475 if (req->rq_auth_uid == -1) {
477 CWARN("null sec is used, force to be local\n");
481 if (!req->rq_remote_realm)
482 CWARN("local realm asked to be remote\n");
484 } else if (ask_local) {
485 if (req->rq_remote_realm)
486 CWARN("remote realm asked to be local\n");
489 med->med_remote = (req->rq_remote_realm != 0);
492 med->med_nllu = data->ocd_nllu[0];
493 med->med_nllg = data->ocd_nllu[1];
495 med->med_initialized = 1;
497 reply->ocd_connect_flags &= ~(OBD_CONNECT_REMOTE | OBD_CONNECT_LOCAL);
498 if (med->med_remote) {
500 med->med_idmap = mds_idmap_alloc();
503 CERROR("Failed to alloc idmap, following request from "
504 "this client will be refused\n");
506 reply->ocd_connect_flags |= OBD_CONNECT_REMOTE;
507 CDEBUG(D_SEC, "set client as remote\n");
509 reply->ocd_connect_flags |= OBD_CONNECT_LOCAL;
510 CDEBUG(D_SEC, "set client as local\n");
516 static void mds_free_export_data(struct mds_export_data *med)
521 LASSERT(med->med_remote);
522 mds_idmap_free(med->med_idmap);
523 med->med_idmap = NULL;
526 /* Establish a connection to the MDS.
528 * This will set up an export structure for the client to hold state data about
529 * that client, like open files, the last operation number it did on the server,
532 static int mds_connect(struct lustre_handle *conn, struct obd_device *obd,
533 struct obd_uuid *cluuid, struct obd_connect_data *data,
536 struct mds_export_data *med;
537 struct mds_client_data *mcd;
538 struct obd_export *exp;
542 if (!conn || !obd || !cluuid)
545 /* XXX There is a small race between checking the list and adding a new
546 * connection for the same UUID, but the real threat (list corruption
547 * when multiple different clients connect) is solved.
549 * There is a second race between adding the export to the list, and
550 * filling in the client data below. Hence skipping the case of NULL
551 * mcd above. We should already be controlling multiple connects at the
552 * client, and we can't hold the spinlock over memory allocations
553 * without risk of deadlocking.
555 rc = class_connect(conn, obd, cluuid);
558 exp = class_conn2export(conn);
560 LASSERT(exp != NULL);
561 med = &exp->exp_mds_data;
563 OBD_ALLOC(mcd, sizeof(*mcd));
565 CERROR("%s: out of memory for client data.\n",
567 GOTO(out, rc = -ENOMEM);
570 memcpy(mcd->mcd_uuid, cluuid, sizeof(mcd->mcd_uuid));
573 rc = mds_client_add(obd, &obd->u.mds, med, -1);
581 OBD_FREE(mcd, sizeof(*mcd));
582 class_disconnect(exp, 0);
584 class_export_put(exp);
589 static int mds_connect_post(struct obd_export *exp, unsigned initial,
592 struct obd_device *obd = exp->exp_obd;
593 struct mds_obd *mds = &obd->u.mds;
594 struct mds_export_data *med;
595 struct mds_client_data *mcd;
599 med = &exp->exp_mds_data;
603 /* some one reconnect initially, we have to reset
604 * data existing export can have. bug 6102 */
605 if (mcd->mcd_last_xid != 0)
606 CDEBUG(D_HA, "initial reconnect to existing export\n");
607 mcd->mcd_last_transno = 0;
608 mcd->mcd_last_xid = 0;
609 mcd->mcd_last_result = 0;
610 mcd->mcd_last_data = 0;
613 if (!(flags & OBD_OPT_MDS_CONNECTION)) {
614 if (!(exp->exp_flags & OBD_OPT_REAL_CLIENT)) {
615 atomic_inc(&mds->mds_real_clients);
616 CDEBUG(D_OTHER,"%s: peer from %s is real client (%d)\n",
617 obd->obd_name, exp->exp_client_uuid.uuid,
618 atomic_read(&mds->mds_real_clients));
619 exp->exp_flags |= OBD_OPT_REAL_CLIENT;
621 if (mds->mds_md_name)
622 rc = mds_md_connect(obd, mds->mds_md_name);
627 static int mds_init_export(struct obd_export *exp)
629 struct mds_export_data *med = &exp->exp_mds_data;
631 INIT_LIST_HEAD(&med->med_open_head);
632 spin_lock_init(&med->med_open_lock);
636 static int mds_destroy_export(struct obd_export *export)
638 struct obd_device *obd = export->exp_obd;
639 struct mds_export_data *med = &export->exp_mds_data;
640 struct lvfs_run_ctxt saved;
644 mds_free_export_data(med);
645 target_destroy_export(export);
647 if (obd_uuid_equals(&export->exp_client_uuid, &obd->obd_uuid))
650 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
652 /* Close any open files (which may also cause orphan unlinking). */
653 spin_lock(&med->med_open_lock);
654 while (!list_empty(&med->med_open_head)) {
655 struct list_head *tmp = med->med_open_head.next;
656 struct mds_file_data *mfd =
657 list_entry(tmp, struct mds_file_data, mfd_list);
658 struct lustre_id sid;
660 BDEVNAME_DECLARE_STORAGE(btmp);
662 /* bug 1579: fix force-closing for 2.5 */
663 struct dentry *dentry = mfd->mfd_dentry;
665 list_del(&mfd->mfd_list);
666 spin_unlock(&med->med_open_lock);
668 down(&dentry->d_inode->i_sem);
669 rc = mds_read_inode_sid(obd, dentry->d_inode, &sid);
670 up(&dentry->d_inode->i_sem);
672 CERROR("Can't read inode self id, inode %lu, "
673 "rc %d\n", dentry->d_inode->i_ino, rc);
674 memset(&sid, 0, sizeof(sid));
677 /* If you change this message, be sure to update
678 * replay_single:test_46 */
679 CERROR("force closing client file handle for %.*s (%s:"
680 DLID4")\n", dentry->d_name.len, dentry->d_name.name,
681 ll_bdevname(dentry->d_inode->i_sb, btmp),
684 /* child inode->i_alloc_sem protects orphan_dec_test and
685 * is_orphan race, mds_mfd_close drops it */
686 DOWN_WRITE_I_ALLOC_SEM(dentry->d_inode);
687 rc = mds_mfd_close(NULL, 0, obd, mfd,
688 !(export->exp_flags & OBD_OPT_FAILOVER));
690 CDEBUG(D_INODE, "Error closing file: %d\n", rc);
691 spin_lock(&med->med_open_lock);
693 spin_unlock(&med->med_open_lock);
694 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
698 mds_client_free(export, !(export->exp_flags & OBD_OPT_FAILOVER));
702 static int mds_disconnect(struct obd_export *exp, unsigned long flags)
704 unsigned long irqflags;
705 struct obd_device *obd;
710 LASSERT(exp != NULL);
711 obd = class_exp2obd(exp);
713 CDEBUG(D_IOCTL, "invalid client cookie "LPX64"\n",
714 exp->exp_handle.h_cookie);
720 * suppress any inter-mds requests durring disconnecting lmv if this is
721 * detected --force mode. This is needed to avoid endless recovery.
723 if (atomic_read(&mds->mds_real_clients) > 0 &&
724 !(exp->exp_flags & OBD_OPT_REAL_CLIENT))
725 flags |= OBD_OPT_FORCE;
727 if (!(exp->exp_flags & OBD_OPT_REAL_CLIENT)
728 && !atomic_read(&mds->mds_real_clients)) {
729 /* there was no client at all */
730 mds_md_disconnect(obd, flags);
733 if ((exp->exp_flags & OBD_OPT_REAL_CLIENT)
734 && atomic_dec_and_test(&mds->mds_real_clients)) {
735 /* time to drop LMV connections */
736 CDEBUG(D_OTHER, "%s: last real client %s disconnected. "
737 "Disconnnect from LMV now\n",
738 obd->obd_name, exp->exp_client_uuid.uuid);
739 mds_md_disconnect(obd, flags);
742 spin_lock_irqsave(&exp->exp_lock, irqflags);
743 exp->exp_flags = flags;
744 spin_unlock_irqrestore(&exp->exp_lock, irqflags);
746 /* disconnect early so that clients can't keep using export */
747 rc = class_disconnect(exp, flags);
748 ldlm_cancel_locks_for_export(exp);
750 /* complete all outstanding replies */
751 spin_lock_irqsave(&exp->exp_lock, irqflags);
752 while (!list_empty(&exp->exp_outstanding_replies)) {
753 struct ptlrpc_reply_state *rs =
754 list_entry(exp->exp_outstanding_replies.next,
755 struct ptlrpc_reply_state, rs_exp_list);
756 struct ptlrpc_service *svc = rs->rs_srv_ni->sni_service;
758 spin_lock(&svc->srv_lock);
759 list_del_init(&rs->rs_exp_list);
760 ptlrpc_schedule_difficult_reply(rs);
761 spin_unlock(&svc->srv_lock);
763 spin_unlock_irqrestore(&exp->exp_lock, irqflags);
767 static int mds_getstatus(struct ptlrpc_request *req)
769 struct mds_obd *mds = mds_req2mds(req);
770 struct mds_body *body;
774 size = sizeof(*body);
776 rc = lustre_pack_reply(req, 1, &size, NULL);
777 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK)) {
778 CERROR("mds: out of memory for message: size=%d\n", size);
779 req->rq_status = -ENOMEM; /* superfluous? */
783 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*body));
784 body->valid |= OBD_MD_FID;
786 memcpy(&body->id1, &mds->mds_rootid, sizeof(body->id1));
789 * the last_committed and last_xid fields are filled in for all replies
790 * already - no need to do so here also.
795 int mds_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
796 void *data, int flag)
801 if (flag == LDLM_CB_CANCELING) {
802 /* Don't need to do anything here. */
806 /* XXX layering violation! -phil */
807 l_lock(&lock->l_resource->lr_namespace->ns_lock);
810 * get this: if mds_blocking_ast is racing with mds_intent_policy, such
811 * that mds_blocking_ast is called just before l_i_p takes the ns_lock,
812 * then by the time we get the lock, we might not be the correct
813 * blocking function anymore. So check, and return early, if so.
815 if (lock->l_blocking_ast != mds_blocking_ast) {
816 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
820 lock->l_flags |= LDLM_FL_CBPENDING;
821 do_ast = (!lock->l_readers && !lock->l_writers);
822 l_unlock(&lock->l_resource->lr_namespace->ns_lock);
825 struct lustre_handle lockh;
828 LDLM_DEBUG(lock, "already unused, calling ldlm_cli_cancel");
829 ldlm_lock2handle(lock, &lockh);
830 rc = ldlm_cli_cancel(&lockh);
832 CERROR("ldlm_cli_cancel: %d\n", rc);
834 LDLM_DEBUG(lock, "Lock still has references, will be "
840 static int mds_convert_md(struct obd_device *obd, struct inode *inode,
841 void *md, int size, int mea)
845 if (S_ISREG(inode->i_mode)) {
846 rc = mds_convert_lov_ea(obd, inode, md, size);
847 } else if (S_ISDIR(inode->i_mode)) {
849 rc = mds_convert_mea_ea(obd, inode, md, size);
851 rc = mds_convert_lov_ea(obd, inode, md, size);
854 CERROR("Invalid EA format (nor LOV or MEA) "
855 "is detected. Inode %lu/%u\n",
856 inode->i_ino, inode->i_generation);
862 int mds_get_md(struct obd_device *obd, struct inode *inode,
863 void *md, int *size, int lock, int mea)
872 rc = fsfilt_get_md(obd, inode, md, *size,
873 (mea ? EA_MEA : EA_LOV));
875 CERROR("Error %d reading eadata for ino %lu\n",
879 rc = mds_convert_md(obd, inode, md,
894 /* Call with lock=1 if you want mds_pack_md to take the i_sem.
895 * Call with lock=0 if the caller has already taken the i_sem. */
896 int mds_pack_md(struct obd_device *obd, struct lustre_msg *msg, int offset,
897 struct mds_body *body, struct inode *inode, int lock, int mea)
899 struct mds_obd *mds = &obd->u.mds;
904 lmm = lustre_msg_buf(msg, offset, 0);
906 /* Some problem with getting eadata when I sized the reply
908 CDEBUG(D_INFO, "no space reserved for inode %lu MD\n",
912 lmm_size = msg->buflens[offset];
914 /* I don't really like this, but it is a sanity check on the client
915 * MD request. However, if the client doesn't know how much space
916 * to reserve for the MD, it shouldn't be bad to have too much space.
918 if (lmm_size > mds->mds_max_mdsize) {
919 CWARN("Reading MD for inode %lu of %d bytes > max %d\n",
920 inode->i_ino, lmm_size, mds->mds_max_mdsize);
924 rc = mds_get_md(obd, inode, lmm, &lmm_size, lock, mea);
926 body->valid |= S_ISDIR(inode->i_mode) ?
927 OBD_MD_FLDIREA : OBD_MD_FLEASIZE;
930 body->valid |= OBD_MD_MEA;
932 body->eadatasize = lmm_size;
939 int mds_pack_link(struct dentry *dentry, struct ptlrpc_request *req,
940 struct mds_body *repbody, int reply_off)
942 struct inode *inode = dentry->d_inode;
947 symname = lustre_msg_buf(req->rq_repmsg, reply_off + 1,0);
948 LASSERT(symname != NULL);
949 len = req->rq_repmsg->buflens[reply_off + 1];
951 rc = inode->i_op->readlink(dentry, symname, len);
953 CERROR("readlink failed: %d\n", rc);
954 } else if (rc != len - 1) {
955 CERROR ("Unexpected readlink rc %d: expecting %d\n",
959 CDEBUG(D_INODE, "read symlink dest %s\n", symname);
960 repbody->valid |= OBD_MD_LINKNAME;
961 repbody->eadatasize = rc + 1;
962 symname[rc] = 0; /* NULL terminate */
969 int mds_pack_ea(struct dentry *dentry, struct ptlrpc_request *req,
970 struct mds_body *repbody, int req_off, int reply_off)
972 struct inode *inode = dentry->d_inode;
978 ea_name = lustre_msg_string(req->rq_reqmsg, req_off + 1, 0);
979 len = req->rq_repmsg->buflens[reply_off + 1];
981 value = lustre_msg_buf(req->rq_repmsg, reply_off + 1, len);
984 if (inode->i_op && inode->i_op->getxattr)
985 rc = inode->i_op->getxattr(dentry, ea_name, value, len);
988 if (rc != -ENODATA && rc != -EOPNOTSUPP)
989 CERROR("getxattr failed: %d", rc);
991 repbody->valid |= OBD_MD_FLEA;
992 repbody->eadatasize = rc;
999 int mds_pack_ealist(struct dentry *dentry, struct ptlrpc_request *req,
1000 struct mds_body *repbody, int reply_off)
1002 struct inode *inode = dentry->d_inode;
1007 len = req->rq_repmsg->buflens[reply_off + 1];
1009 value = lustre_msg_buf(req->rq_repmsg, reply_off + 1, len);
1012 if (inode->i_op && inode->i_op->getxattr)
1013 rc = inode->i_op->listxattr(dentry, value, len);
1016 CERROR("listxattr failed: %d", rc);
1018 repbody->valid |= OBD_MD_FLEALIST;
1019 repbody->eadatasize = rc;
1025 int mds_pack_acl(struct obd_device *obd, struct lustre_msg *repmsg, int offset,
1026 struct mds_body *body, struct inode *inode)
1028 struct dentry de = { .d_inode = inode };
1029 __u32 buflen, *sizep;
1034 if (!inode->i_op->getxattr)
1037 buflen = repmsg->buflens[offset + 1];
1038 buf = lustre_msg_buf(repmsg, offset + 1, buflen);
1040 size = inode->i_op->getxattr(&de, XATTR_NAME_ACL_ACCESS, buf, buflen);
1041 if (size == -ENODATA || size == -EOPNOTSUPP)
1047 sizep = lustre_msg_buf(repmsg, offset, 4);
1049 CERROR("can't locate returned acl size buf\n");
1053 *sizep = cpu_to_le32(size);
1054 body->valid |= OBD_MD_FLACL_ACCESS;
1060 * here we take simple rule: once uid/fsuid is root, we also squash
1061 * the gid/fsgid, don't care setuid/setgid attributes.
1063 int mds_squash_root(struct mds_obd *mds, struct mds_req_sec_desc *rsd,
1066 if (!mds->mds_squash_uid || *peernid == mds->mds_nosquash_nid)
1069 if (rsd->rsd_uid && rsd->rsd_fsuid)
1072 CDEBUG(D_SEC, "squash req from "LPX64":"
1073 "(%u:%u-%u:%u/%x)=>(%u:%u-%u:%u/%x)\n", *peernid,
1074 rsd->rsd_uid, rsd->rsd_gid,
1075 rsd->rsd_fsuid, rsd->rsd_fsgid, rsd->rsd_cap,
1076 rsd->rsd_uid ? rsd->rsd_uid : mds->mds_squash_uid,
1077 rsd->rsd_uid ? rsd->rsd_gid : mds->mds_squash_gid,
1078 rsd->rsd_fsuid ? rsd->rsd_fsuid : mds->mds_squash_uid,
1079 rsd->rsd_fsuid ? rsd->rsd_fsgid : mds->mds_squash_gid,
1080 rsd->rsd_cap & ~CAP_FS_MASK);
1082 if (rsd->rsd_uid == 0) {
1083 rsd->rsd_uid = mds->mds_squash_uid;
1084 rsd->rsd_gid = mds->mds_squash_gid;
1086 if (rsd->rsd_fsuid == 0) {
1087 rsd->rsd_fsuid = mds->mds_squash_uid;
1088 rsd->rsd_fsgid = mds->mds_squash_gid;
1090 rsd->rsd_cap &= ~CAP_FS_MASK;
1095 static int mds_getattr_internal(struct obd_device *obd, struct dentry *dentry,
1096 struct ptlrpc_request *req, int req_off,
1097 struct mds_body *reqbody, int reply_off)
1099 struct mds_export_data *med = &req->rq_export->u.eu_mds_data;
1100 struct inode *inode = dentry->d_inode;
1101 struct mds_body *body;
1105 if (inode == NULL && !(dentry->d_flags & DCACHE_CROSS_REF))
1108 body = lustre_msg_buf(req->rq_repmsg, reply_off, sizeof(*body));
1109 LASSERT(body != NULL); /* caller prepped reply */
1111 if (dentry->d_flags & DCACHE_CROSS_REF) {
1112 mds_pack_dentry2body(obd, body, dentry,
1113 (reqbody->valid & OBD_MD_FID) ? 1 : 0);
1114 CDEBUG(D_OTHER, "cross reference: "DLID4"\n",
1119 mds_pack_inode2body(obd, body, inode,
1120 (reqbody->valid & OBD_MD_FID) ? 1 : 0);
1122 if ((S_ISREG(inode->i_mode) && (reqbody->valid & OBD_MD_FLEASIZE)) ||
1123 (S_ISDIR(inode->i_mode) && (reqbody->valid & OBD_MD_FLDIREA))) {
1125 /* guessing what kind og attribute do we need. */
1126 int is_mea = (S_ISDIR(inode->i_mode) &&
1127 (reqbody->valid & OBD_MD_MEA) != 0);
1129 rc = mds_pack_md(obd, req->rq_repmsg, reply_off + 1,
1130 body, inode, 1, is_mea);
1132 /* if we have LOV EA data, the OST holds size, atime, mtime. */
1133 if (!(body->valid & OBD_MD_FLEASIZE) &&
1134 !(body->valid & OBD_MD_FLDIREA))
1135 body->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
1136 OBD_MD_FLATIME | OBD_MD_FLMTIME);
1137 } else if (S_ISLNK(inode->i_mode) &&
1138 (reqbody->valid & OBD_MD_LINKNAME) != 0) {
1139 rc = mds_pack_link(dentry, req, body, reply_off);
1140 } else if (reqbody->valid & OBD_MD_FLEA) {
1141 rc = mds_pack_ea(dentry, req, body, req_off, reply_off);
1142 } else if (reqbody->valid & OBD_MD_FLEALIST) {
1143 rc = mds_pack_ealist(dentry, req, body, reply_off);
1146 if (reqbody->valid & OBD_MD_FLACL_ACCESS) {
1147 int inc = (reqbody->valid & OBD_MD_FLEASIZE) ? 2 : 1;
1148 rc = mds_pack_acl(obd, req->rq_repmsg, reply_off + inc,
1153 mds_body_do_reverse_map(med, body);
1158 static int mds_getattr_pack_msg_cf(struct ptlrpc_request *req,
1159 struct dentry *dentry,
1162 int rc = 0, size[1] = {sizeof(struct mds_body)};
1165 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) {
1166 CERROR("failed MDS_GETATTR_PACK test\n");
1167 req->rq_status = -ENOMEM;
1171 rc = lustre_pack_reply(req, 1, size, NULL);
1173 CERROR("lustre_pack_reply failed: rc %d\n", rc);
1174 GOTO(out, req->rq_status = rc);
1182 static int mds_getattr_pack_msg(struct ptlrpc_request *req, struct dentry *de,
1185 struct inode *inode = de->d_inode;
1186 struct mds_obd *mds = mds_req2mds(req);
1187 struct mds_body *body;
1188 int rc = 0, size[4] = {sizeof(*body)}, bufcount = 1;
1191 body = lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*body));
1192 LASSERT(body != NULL); /* checked by caller */
1193 LASSERT_REQSWABBED(req, offset); /* swabbed by caller */
1195 if ((S_ISREG(inode->i_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
1196 (S_ISDIR(inode->i_mode) && (body->valid & OBD_MD_FLDIREA))) {
1199 down(&inode->i_sem);
1200 rc = fsfilt_get_md(req->rq_export->exp_obd, inode, NULL, 0,
1201 ((body->valid & OBD_MD_MEA) ? EA_MEA : EA_LOV));
1204 if (rc != -ENODATA && rc != -EOPNOTSUPP)
1205 CERROR("error getting inode %lu MD: rc = %d\n",
1208 } else if (rc > mds->mds_max_mdsize) {
1210 CERROR("MD size %d larger than maximum possible %u\n",
1211 rc, mds->mds_max_mdsize);
1213 size[bufcount] = rc;
1216 } else if (S_ISLNK(inode->i_mode) && (body->valid & OBD_MD_LINKNAME)) {
1217 if (inode->i_size + 1 != body->eadatasize)
1218 CERROR("symlink size: %Lu, reply space: %d\n",
1219 inode->i_size + 1, body->eadatasize);
1220 size[bufcount] = min_t(int, inode->i_size+1, body->eadatasize);
1222 CDEBUG(D_INODE, "symlink size: %Lu, reply space: %d\n",
1223 inode->i_size + 1, body->eadatasize);
1224 } else if ((body->valid & OBD_MD_FLEA)) {
1225 char *ea_name = lustre_msg_string(req->rq_reqmsg,
1228 if (inode->i_op && inode->i_op->getxattr)
1229 rc = inode->i_op->getxattr(de, ea_name, NULL, 0);
1232 if (rc != -ENODATA && rc != -EOPNOTSUPP)
1233 CERROR("error getting inode %lu EA: rc = %d\n",
1237 size[bufcount] = min_t(int, body->eadatasize, rc);
1240 } else if (body->valid & OBD_MD_FLEALIST) {
1242 if (inode->i_op && inode->i_op->getxattr)
1243 rc = inode->i_op->listxattr(de, NULL, 0);
1246 if (rc != -ENODATA && rc != -EOPNOTSUPP)
1247 CERROR("error getting inode %lu EA: rc = %d\n",
1251 size[bufcount] = min_t(int, body->eadatasize, rc);
1256 /* may co-exist with OBD_MD_FLEASIZE */
1257 if (body->valid & OBD_MD_FLACL_ACCESS) {
1258 size[bufcount++] = 4;
1259 size[bufcount++] = xattr_acl_size(LL_ACL_MAX_ENTRIES);
1262 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) {
1263 CERROR("failed MDS_GETATTR_PACK test\n");
1264 req->rq_status = -ENOMEM;
1265 GOTO(out, rc = -ENOMEM);
1268 rc = lustre_pack_reply(req, bufcount, size, NULL);
1270 CERROR("out of memory\n");
1271 GOTO(out, req->rq_status = rc);
1279 int mds_check_mds_num(struct obd_device *obd, struct inode *inode,
1280 char *name, int namelen)
1282 struct mea *mea = NULL;
1283 int mea_size, rc = 0;
1286 rc = mds_md_get_attr(obd, inode, &mea, &mea_size);
1291 * dir is already splitted, check if requested filename should
1292 * live at this MDS or at another one.
1294 int i = mea_name2idx(mea, name, namelen - 1);
1295 if (mea->mea_master != id_group(&mea->mea_ids[i])) {
1297 "inapropriate MDS(%d) for %s. should be "
1298 "%lu(%d)\n", mea->mea_master, name,
1299 (unsigned long)id_group(&mea->mea_ids[i]), i);
1305 OBD_FREE(mea, mea_size);
1309 static int mds_getattr_lock(struct ptlrpc_request *req, int offset,
1310 struct lustre_handle *child_lockh, int child_part)
1312 struct obd_device *obd = req->rq_export->exp_obd;
1313 struct mds_obd *mds = &obd->u.mds;
1314 struct ldlm_reply *rep = NULL;
1315 struct lvfs_run_ctxt saved;
1316 struct mds_req_sec_desc *rsd;
1317 struct mds_body *body;
1318 struct dentry *dparent = NULL, *dchild = NULL;
1319 struct lvfs_ucred uc = {NULL, NULL,};
1320 struct lustre_handle parent_lockh[2] = {{0}, {0}};
1321 unsigned int namesize;
1322 int rc = 0, cleanup_phase = 0, resent_req = 0, update_mode, reply_offset;
1326 LASSERT(!strcmp(obd->obd_type->typ_name, LUSTRE_MDS_NAME));
1327 MD_COUNTER_INCREMENT(obd, getattr_lock);
1329 rsd = lustre_swab_mds_secdesc(req, MDS_REQ_SECDESC_OFF);
1331 CERROR("Can't unpack security desc\n");
1335 /* swab now, before anyone looks inside the request. */
1336 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1337 lustre_swab_mds_body);
1339 CERROR("Can't swab mds_body\n");
1340 GOTO(cleanup, rc = -EFAULT);
1343 LASSERT_REQSWAB(req, offset + 1);
1344 name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
1346 CERROR("Can't unpack name\n");
1347 GOTO(cleanup, rc = -EFAULT);
1349 namesize = req->rq_reqmsg->buflens[offset + 1];
1351 /* namesize less than 2 means we have empty name, probably came from
1352 revalidate by cfid, so no point in having name to be set */
1356 LASSERT (offset == 1 || offset == 3);
1358 rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*rep));
1364 rc = mds_init_ucred(&uc, req, rsd);
1369 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1370 cleanup_phase = 1; /* kernel context */
1371 intent_set_disposition(rep, DISP_LOOKUP_EXECD);
1373 LASSERT(namesize > 0);
1374 if (child_lockh->cookie != 0) {
1375 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT);
1379 if (body->valid == OBD_MD_FLID) {
1380 struct mds_body *mds_reply;
1381 int size = sizeof(*mds_reply);
1385 dparent = mds_id2dentry(obd, &body->id1, NULL);
1386 if (IS_ERR(dparent)) {
1387 rc = PTR_ERR(dparent);
1391 * the user requested ONLY the inode number, so do a raw lookup.
1393 rc = lustre_pack_reply(req, 1, &size, NULL);
1395 CERROR("out of memory\n");
1399 dir = dparent->d_inode;
1400 LASSERT(dir->i_op->lookup_raw != NULL);
1401 rc = dir->i_op->lookup_raw(dir, name, namesize - 1, &inum);
1403 mds_reply = lustre_msg_buf(req->rq_repmsg, 0,
1404 sizeof(*mds_reply));
1406 id_ino(&mds_reply->id1) = inum;
1407 mds_reply->valid = OBD_MD_FLID;
1411 if (resent_req == 0) {
1412 LASSERT(id_fid(&body->id1) != 0);
1414 rc = mds_get_parent_child_locked(obd, mds, &body->id1,
1415 parent_lockh, &dparent,
1417 MDS_INODELOCK_UPDATE,
1420 child_lockh, &dchild,
1421 LCK_PR, child_part);
1425 cleanup_phase = 2; /* dchild, dparent, locks */
1428 * let's make sure this name should leave on this mds
1431 rc = mds_check_mds_num(obd, dparent->d_inode, name, namesize);
1435 /* we have no dentry here, drop LOOKUP bit */
1436 /* FIXME: we need MDS_INODELOCK_LOOKUP or not. */
1437 child_part &= ~MDS_INODELOCK_LOOKUP;
1438 CDEBUG(D_OTHER, "%s: retrieve attrs for "DLID4"\n",
1439 obd->obd_name, OLID4(&body->id1));
1441 dchild = mds_id2locked_dentry(obd, &body->id1, NULL,
1442 LCK_PR, parent_lockh,
1445 MDS_INODELOCK_UPDATE);
1446 if (IS_ERR(dchild)) {
1447 CERROR("can't find inode with id "DLID4", err = %d\n",
1448 OLID4(&body->id1), (int)PTR_ERR(dchild));
1449 GOTO(cleanup, rc = PTR_ERR(dchild));
1451 memcpy(child_lockh, parent_lockh, sizeof(parent_lockh[0]));
1454 struct ldlm_lock *granted_lock;
1456 DEBUG_REQ(D_DLMTRACE, req, "resent, not enqueuing new locks");
1457 granted_lock = ldlm_handle2lock(child_lockh);
1459 LASSERTF(granted_lock != NULL, LPU64"/%lu lockh "LPX64"\n",
1460 id_fid(&body->id1), (unsigned long)id_group(&body->id1),
1461 child_lockh->cookie);
1464 /* usual named request */
1465 dparent = mds_id2dentry(obd, &body->id1, NULL);
1466 LASSERT(!IS_ERR(dparent));
1467 dchild = ll_lookup_one_len(name, dparent, namesize - 1);
1468 LASSERT(!IS_ERR(dchild));
1470 /* client wants to get attr. by id */
1471 dchild = mds_id2dentry(obd, &body->id1, NULL);
1472 LASSERT(!IS_ERR(dchild));
1474 LDLM_LOCK_PUT(granted_lock);
1477 cleanup_phase = 2; /* dchild, dparent, locks */
1479 if (!DENTRY_VALID(dchild)) {
1480 intent_set_disposition(rep, DISP_LOOKUP_NEG);
1482 * in the intent case, the policy clears this error: the
1483 * disposition is enough.
1488 intent_set_disposition(rep, DISP_LOOKUP_POS);
1491 if (req->rq_repmsg == NULL) {
1492 if (dchild->d_flags & DCACHE_CROSS_REF)
1493 rc = mds_getattr_pack_msg_cf(req, dchild, offset);
1495 rc = mds_getattr_pack_msg(req, dchild, offset);
1497 CERROR ("mds_getattr_pack_msg: %d\n", rc);
1502 rc = mds_getattr_internal(obd, dchild, req, offset, body, reply_offset);
1503 GOTO(cleanup, rc); /* returns the lock to the client */
1506 switch (cleanup_phase) {
1508 if (resent_req == 0) {
1509 if (rc && DENTRY_VALID(dchild))
1510 ldlm_lock_decref(child_lockh, LCK_PR);
1512 ldlm_lock_decref(parent_lockh, LCK_PR);
1514 if (parent_lockh[1].cookie != 0)
1515 ldlm_lock_decref(parent_lockh + 1, update_mode);
1522 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1524 mds_exit_ucred(&uc);
1529 static int mds_getattr(struct ptlrpc_request *req, int offset)
1531 struct obd_device *obd = req->rq_export->exp_obd;
1532 struct lvfs_run_ctxt saved;
1534 struct mds_req_sec_desc *rsd;
1535 struct mds_body *body;
1536 struct lvfs_ucred uc = {NULL, NULL,};
1540 MD_COUNTER_INCREMENT(obd, getattr);
1542 rsd = lustre_swab_mds_secdesc(req, MDS_REQ_SECDESC_OFF);
1544 CERROR("Can't unpack security desc\n");
1548 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1549 lustre_swab_mds_body);
1551 CERROR ("Can't unpack body\n");
1555 rc = mds_init_ucred(&uc, req, rsd);
1557 mds_exit_ucred(&uc);
1561 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1562 de = mds_id2dentry(obd, &body->id1, NULL);
1564 rc = req->rq_status = PTR_ERR(de);
1568 rc = mds_getattr_pack_msg(req, de, offset);
1570 CERROR("mds_getattr_pack_msg: %d\n", rc);
1574 req->rq_status = mds_getattr_internal(obd, de, req, offset, body, 0);
1579 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1580 mds_exit_ucred(&uc);
1584 static int mds_obd_statfs(struct obd_device *obd, struct obd_statfs *osfs,
1585 unsigned long max_age)
1590 spin_lock(&obd->obd_osfs_lock);
1591 rc = fsfilt_statfs(obd, obd->u.mds.mds_sb, max_age);
1593 memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
1594 spin_unlock(&obd->obd_osfs_lock);
1599 static int mds_statfs(struct ptlrpc_request *req)
1601 struct obd_device *obd = req->rq_export->exp_obd;
1602 int rc, size = sizeof(struct obd_statfs);
1605 /* This will trigger a watchdog timeout */
1606 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
1607 (MDS_SERVICE_WATCHDOG_TIMEOUT / 1000) + 1);
1609 rc = lustre_pack_reply(req, 1, &size, NULL);
1610 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
1611 CERROR("mds: statfs lustre_pack_reply failed: rc = %d\n", rc);
1615 OBD_COUNTER_INCREMENT(obd, statfs);
1617 /* We call this so that we can cache a bit - 1 jiffie worth */
1618 rc = mds_obd_statfs(obd, lustre_msg_buf(req->rq_repmsg, 0, size),
1621 CERROR("mds_obd_statfs failed: rc %d\n", rc);
1627 req->rq_status = rc;
1631 static int mds_sync(struct ptlrpc_request *req, int offset)
1633 struct obd_device *obd = req->rq_export->exp_obd;
1634 struct mds_obd *mds = &obd->u.mds;
1635 struct mds_body *body;
1636 int rc, size = sizeof(*body);
1639 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1640 lustre_swab_mds_body);
1642 GOTO(out, rc = -EPROTO);
1644 rc = lustre_pack_reply(req, 1, &size, NULL);
1645 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK)) {
1646 CERROR("fsync lustre_pack_reply failed: rc = %d\n", rc);
1650 if (id_ino(&body->id1) == 0) {
1651 /* an id of zero is taken to mean "sync whole filesystem" */
1652 rc = fsfilt_sync(obd, mds->mds_sb);
1656 /* just any file to grab fsync method - "file" arg unused */
1657 struct file *file = mds->mds_rcvd_filp;
1658 struct mds_body *rep_body;
1661 de = mds_id2dentry(obd, &body->id1, NULL);
1663 GOTO(out, rc = PTR_ERR(de));
1665 rc = file->f_op->fsync(NULL, de, 1);
1669 rep_body = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*rep_body));
1670 mds_pack_inode2body(obd, rep_body, de->d_inode,
1671 (body->valid & OBD_MD_FID) ? 1 : 0);
1677 req->rq_status = rc;
1681 /* mds_readpage does not take a DLM lock on the inode, because the client must
1682 * already have a PR lock.
1684 * If we were to take another one here, a deadlock will result, if another
1685 * thread is already waiting for a PW lock. */
1686 static int mds_readpage(struct ptlrpc_request *req, int offset)
1688 struct obd_device *obd = req->rq_export->exp_obd;
1689 struct vfsmount *mnt;
1692 struct mds_req_sec_desc *rsd;
1693 struct mds_body *body, *repbody;
1694 struct lvfs_run_ctxt saved;
1695 int rc, size = sizeof(*repbody);
1696 struct lvfs_ucred uc = {NULL, NULL,};
1699 rc = lustre_pack_reply(req, 1, &size, NULL);
1700 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK)) {
1701 CERROR("mds: out of memory\n");
1702 GOTO(out, rc = -ENOMEM);
1705 rsd = lustre_swab_mds_secdesc(req, MDS_REQ_SECDESC_OFF);
1707 CERROR("Can't unpack security desc\n");
1708 GOTO (out, rc = -EFAULT);
1711 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1712 lustre_swab_mds_body);
1714 CERROR("Can't unpack body\n");
1715 GOTO (out, rc = -EFAULT);
1718 rc = mds_init_ucred(&uc, req, rsd);
1723 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1724 de = mds_id2dentry(obd, &body->id1, &mnt);
1726 GOTO(out_pop, rc = PTR_ERR(de));
1728 CDEBUG(D_INODE, "ino %lu\n", de->d_inode->i_ino);
1730 file = dentry_open(de, mnt, O_RDONLY | O_LARGEFILE);
1731 /* note: in case of an error, dentry_open puts dentry */
1733 GOTO(out_pop, rc = PTR_ERR(file));
1735 /* body->size is actually the offset -eeb */
1736 if ((body->size & (de->d_inode->i_blksize - 1)) != 0) {
1737 CERROR("offset "LPU64" not on a block boundary of %lu\n",
1738 body->size, de->d_inode->i_blksize);
1739 GOTO(out_file, rc = -EFAULT);
1742 /* body->nlink is actually the #bytes to read -eeb */
1743 if (body->nlink & (de->d_inode->i_blksize - 1)) {
1744 CERROR("size %u is not multiple of blocksize %lu\n",
1745 body->nlink, de->d_inode->i_blksize);
1746 GOTO(out_file, rc = -EFAULT);
1749 repbody = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*repbody));
1750 repbody->size = file->f_dentry->d_inode->i_size;
1751 repbody->valid = OBD_MD_FLSIZE;
1753 /* to make this asynchronous make sure that the handling function
1754 doesn't send a reply when this function completes. Instead a
1755 callback function would send the reply */
1756 /* body->size is actually the offset -eeb */
1757 rc = mds_sendpage(req, file, body->size, body->nlink);
1761 filp_close(file, 0);
1763 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1765 mds_exit_ucred(&uc);
1766 req->rq_status = rc;
1770 /* update master MDS ID, which is stored in local inode EA. */
1771 int mds_update_mid(struct obd_device *obd, struct lustre_id *id,
1772 void *data, int data_len)
1774 struct mds_obd *mds = &obd->u.mds;
1775 struct dentry *dentry;
1783 dentry = mds_id2dentry(obd, id, NULL);
1785 GOTO(out, rc = PTR_ERR(dentry));
1787 if (!dentry->d_inode) {
1788 CERROR("Can't find object "DLID4".\n",
1790 GOTO(out_dentry, rc = -EINVAL);
1793 handle = fsfilt_start(obd, dentry->d_inode,
1794 FSFILT_OP_SETATTR, NULL);
1796 GOTO(out_dentry, rc = PTR_ERR(handle));
1798 rc = mds_update_inode_mid(obd, dentry->d_inode, handle,
1799 (struct lustre_id *)data);
1801 CERROR("Can't update inode "DLID4" master id, "
1802 "error = %d.\n", OLID4(id), rc);
1803 GOTO(out_commit, rc);
1808 fsfilt_commit(obd, mds->mds_sb, dentry->d_inode,
1815 EXPORT_SYMBOL(mds_update_mid);
1817 /* read master MDS ID, which is stored in local inode EA. */
1818 int mds_read_mid(struct obd_device *obd, struct lustre_id *id,
1819 void *data, int data_len)
1821 struct dentry *dentry;
1828 dentry = mds_id2dentry(obd, id, NULL);
1830 GOTO(out, rc = PTR_ERR(dentry));
1832 if (!dentry->d_inode) {
1833 CERROR("Can't find object "DLID4".\n",
1835 GOTO(out_dentry, rc = -EINVAL);
1838 down(&dentry->d_inode->i_sem);
1839 rc = mds_read_inode_mid(obd, dentry->d_inode,
1840 (struct lustre_id *)data);
1841 up(&dentry->d_inode->i_sem);
1843 CERROR("Can't read inode "DLID4" master id, "
1844 "error = %d.\n", OLID4(id), rc);
1845 GOTO(out_dentry, rc);
1854 EXPORT_SYMBOL(mds_read_mid);
1856 int mds_read_md(struct obd_device *obd, struct lustre_id *id,
1857 char **data, int *datalen)
1859 struct dentry *dentry;
1860 struct mds_obd *mds = &obd->u.mds;
1861 int rc = 0, mea = 0;
1868 dentry = mds_id2dentry(obd, id, NULL);
1870 GOTO(out, rc = PTR_ERR(dentry));
1872 if (!dentry->d_inode) {
1873 CERROR("Can't find object "DLID4".\n",
1875 GOTO(out_dentry, rc = -EINVAL);
1877 if (S_ISDIR(dentry->d_inode->i_mode)) {
1878 *datalen = obd_packmd(mds->mds_md_exp, NULL, NULL);
1881 *datalen = obd_packmd(mds->mds_dt_exp, NULL, NULL);
1884 OBD_ALLOC(ea, *datalen);
1887 GOTO(out_dentry, rc = PTR_ERR(dentry));
1890 down(&dentry->d_inode->i_sem);
1891 rc = fsfilt_get_md(obd, dentry->d_inode, *data, *datalen,
1892 (mea ? EA_MEA : EA_LOV));
1893 up(&dentry->d_inode->i_sem);
1896 CERROR("Error %d reading eadata for ino %lu\n",
1897 rc, dentry->d_inode->i_ino);
1903 EXPORT_SYMBOL(mds_read_md);
1905 int mds_reint(struct ptlrpc_request *req, int offset,
1906 struct lustre_handle *lockh)
1908 struct mds_update_record *rec;
1909 struct mds_req_sec_desc *rsd;
1913 OBD_ALLOC(rec, sizeof(*rec));
1917 rsd = lustre_swab_mds_secdesc(req, MDS_REQ_SECDESC_OFF);
1919 CERROR("Can't unpack security desc\n");
1920 GOTO(out, rc = -EFAULT);
1923 rc = mds_update_unpack(req, offset, rec);
1924 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK)) {
1925 CERROR("invalid record\n");
1926 GOTO(out, req->rq_status = -EINVAL);
1929 rc = mds_init_ucred(&rec->ur_uc, req, rsd);
1934 /* rc will be used to interrupt a for loop over multiple records */
1935 rc = mds_reint_rec(rec, offset, req, lockh);
1938 mds_exit_ucred(&rec->ur_uc);
1939 OBD_FREE(rec, sizeof(*rec));
1943 static int mds_filter_recovery_request(struct ptlrpc_request *req,
1944 struct obd_device *obd, int *process)
1946 switch (req->rq_reqmsg->opc) {
1947 case MDS_CONNECT: /* This will never get here, but for completeness. */
1948 case OST_CONNECT: /* This will never get here, but for completeness. */
1949 case MDS_DISCONNECT:
1950 case OST_DISCONNECT:
1955 case MDS_SYNC: /* used in unmounting */
1960 *process = target_queue_recovery_request(req, obd);
1964 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
1966 /* XXX what should we set rq_status to here? */
1967 req->rq_status = -EAGAIN;
1968 RETURN(ptlrpc_error(req));
1972 static char *reint_names[] = {
1973 [REINT_SETATTR] "setattr",
1974 [REINT_CREATE] "create",
1975 [REINT_LINK] "link",
1976 [REINT_UNLINK] "unlink",
1977 [REINT_RENAME] "rename",
1978 [REINT_OPEN] "open",
1981 #define FILTER_VALID_FLAGS (OBD_MD_FLTYPE | OBD_MD_FLMODE | OBD_MD_FLGENER | \
1982 OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ| \
1983 OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME| \
1986 static void reconstruct_create(struct ptlrpc_request *req)
1988 struct mds_export_data *med = &req->rq_export->exp_mds_data;
1989 struct mds_client_data *mcd = med->med_mcd;
1990 struct dentry *dentry;
1991 struct ost_body *body;
1992 struct lustre_id id;
1996 /* copy rc, transno and disp; steal locks */
1997 mds_req_from_mcd(req, mcd);
1998 if (req->rq_status) {
2006 id_ino(&id) = mcd->mcd_last_data;
2007 LASSERT(id_ino(&id) != 0);
2009 dentry = mds_id2dentry(req2obd(req), &id, NULL);
2010 if (IS_ERR(dentry)) {
2011 CERROR("can't find inode "LPU64"\n", id_ino(&id));
2012 req->rq_status = PTR_ERR(dentry);
2017 CWARN("reconstruct reply for x"LPU64" (remote ino) "LPU64" -> %lu/%u\n",
2018 req->rq_xid, id_ino(&id), dentry->d_inode->i_ino,
2019 dentry->d_inode->i_generation);
2021 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*body));
2022 obdo_from_inode(&body->oa, dentry->d_inode, FILTER_VALID_FLAGS);
2023 body->oa.o_id = dentry->d_inode->i_ino;
2024 body->oa.o_generation = dentry->d_inode->i_generation;
2025 body->oa.o_valid |= OBD_MD_FLID | OBD_MD_FLGENER;
2027 down(&dentry->d_inode->i_sem);
2028 rc = mds_read_inode_sid(req2obd(req), dentry->d_inode, &id);
2029 up(&dentry->d_inode->i_sem);
2031 CERROR("Can't read inode self id, inode %lu, "
2032 "rc %d\n", dentry->d_inode->i_ino, rc);
2036 body->oa.o_fid = id_fid(&id);
2037 body->oa.o_mds = id_group(&id);
2043 static int mds_inode_init_acl(struct obd_device *obd, void *handle,
2044 struct dentry *de, void *xattr, int xattr_size)
2046 struct inode *inode = de->d_inode;
2047 struct posix_acl *acl;
2054 LASSERT(xattr_size > 0);
2056 if (!inode->i_op->getxattr || !inode->i_op->setxattr) {
2057 CERROR("backend fs dosen't support xattr\n");
2061 /* set default acl */
2062 if (S_ISDIR(inode->i_mode)) {
2063 rc = inode->i_op->setxattr(de, XATTR_NAME_ACL_DEFAULT,
2064 xattr, xattr_size, 0);
2066 CERROR("set default acl err: %d\n", rc);
2071 /* set access acl */
2072 acl = posix_acl_from_xattr(xattr, xattr_size);
2073 if (acl == NULL || IS_ERR(acl)) {
2074 CERROR("insane attr data\n");
2075 return PTR_ERR(acl);
2078 if (posix_acl_valid(acl)) {
2079 CERROR("default acl not valid: %d\n", rc);
2084 mode = inode->i_mode;
2085 rc = posix_acl_create_masq(acl, &mode);
2087 CERROR("create masq err %d\n", rc);
2091 if (inode->i_mode != mode) {
2092 struct iattr iattr = { .ia_valid = ATTR_MODE,
2096 rc2 = fsfilt_setattr(obd, de, handle, &iattr, 0);
2098 CERROR("setattr mode err: %d\n", rc2);
2105 /* we didn't change acl except mode bits of some
2106 * entries, so should be fit into original size.
2108 rc = posix_acl_to_xattr(acl, xattr, xattr_size);
2111 rc = inode->i_op->setxattr(de, XATTR_NAME_ACL_ACCESS,
2112 xattr, xattr_size, 0);
2114 CERROR("set access acl err: %d\n", rc);
2117 posix_acl_release(acl);
2121 static int mdt_obj_create(struct ptlrpc_request *req)
2123 struct obd_device *obd = req->rq_export->exp_obd;
2124 struct mds_obd *mds = &obd->u.mds;
2125 struct ost_body *body, *repbody;
2128 char idname[LL_ID_NAMELEN];
2129 int size = sizeof(*repbody);
2130 struct inode *parent_inode;
2131 struct lvfs_run_ctxt saved;
2132 int rc, cleanup_phase = 0;
2133 struct dentry *new = NULL;
2134 struct dentry_params dp;
2135 int mealen, flags = 0;
2136 struct lvfs_ucred uc;
2137 struct lustre_id id;
2139 void *handle = NULL;
2140 unsigned long cr_inum = 0;
2143 DEBUG_REQ(D_HA, req, "create remote object");
2144 parent_inode = mds->mds_unnamed_dir->d_inode;
2146 body = lustre_swab_reqbuf(req, 0, sizeof(*body),
2147 lustre_swab_ost_body);
2151 /* acl data is packed transparently, no swab here */
2152 LASSERT(req->rq_reqmsg->bufcount >= 2);
2153 acl_size = req->rq_reqmsg->buflens[1];
2155 acl = lustre_msg_buf(req->rq_reqmsg, 1, acl_size);
2157 CERROR("No default acl buf?\n");
2162 rc = lustre_pack_reply(req, 1, &size, NULL);
2166 MDS_CHECK_RESENT(req, reconstruct_create(req));
2169 uc.luc_ginfo = NULL;
2170 uc.luc_uid = body->oa.o_uid;
2171 uc.luc_gid = body->oa.o_gid;
2172 uc.luc_fsuid = body->oa.o_uid;
2173 uc.luc_fsgid = body->oa.o_gid;
2175 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
2176 repbody = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*repbody));
2178 /* in REPLAY case inum should be given (client or other MDS fills it) */
2179 if (body->oa.o_id && ((body->oa.o_flags & OBD_FL_RECREATE_OBJS) ||
2180 (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY))) {
2182 * this is re-create request from MDS holding directory name.
2183 * we have to lookup given ino/gen first. if it exists (good
2184 * case) then there is nothing to do. if it does not then we
2185 * have to recreate it.
2187 id_ino(&id) = body->oa.o_id;
2188 id_gen(&id) = body->oa.o_generation;
2190 new = mds_id2dentry(obd, &id, NULL);
2191 if (!IS_ERR(new) && new->d_inode) {
2192 struct lustre_id sid;
2194 CWARN("mkdir() repairing is on its way: %lu/%lu\n",
2195 (unsigned long)id_ino(&id), (unsigned long)id_gen(&id));
2197 obdo_from_inode(&repbody->oa, new->d_inode,
2198 FILTER_VALID_FLAGS);
2200 repbody->oa.o_id = new->d_inode->i_ino;
2201 repbody->oa.o_generation = new->d_inode->i_generation;
2202 repbody->oa.o_valid |= OBD_MD_FLID | OBD_MD_FLGENER;
2205 down(&new->d_inode->i_sem);
2206 rc = mds_read_inode_sid(obd, new->d_inode, &sid);
2207 up(&new->d_inode->i_sem);
2209 CERROR("Can't read inode self id "
2210 "inode %lu, rc %d.\n",
2211 new->d_inode->i_ino, rc);
2215 repbody->oa.o_fid = id_fid(&sid);
2216 repbody->oa.o_mds = id_group(&sid);
2217 LASSERT(id_fid(&sid) != 0);
2220 * here we could use fid passed in body->oa.o_fid and
2221 * thus avoid mds_read_inode_sid().
2223 cr_inum = new->d_inode->i_ino;
2224 GOTO(cleanup, rc = 0);
2228 down(&parent_inode->i_sem);
2229 handle = fsfilt_start(obd, parent_inode, FSFILT_OP_MKDIR, NULL);
2230 if (IS_ERR(handle)) {
2231 up(&parent_inode->i_sem);
2232 CERROR("fsfilt_start() failed, rc = %d\n",
2233 (int)PTR_ERR(handle));
2234 GOTO(cleanup, rc = PTR_ERR(handle));
2236 cleanup_phase = 1; /* transaction */
2239 rc = sprintf(idname, "%u.%u", ll_insecure_random_int(), current->pid);
2240 new = lookup_one_len(idname, mds->mds_unnamed_dir, rc);
2242 CERROR("%s: can't lookup new inode (%s) for mkdir: %d\n",
2243 obd->obd_name, idname, (int) PTR_ERR(new));
2244 fsfilt_commit(obd, mds->mds_sb, new->d_inode, handle, 0);
2245 up(&parent_inode->i_sem);
2246 RETURN(PTR_ERR(new));
2247 } else if (new->d_inode) {
2248 CERROR("%s: name exists. repeat\n", obd->obd_name);
2252 new->d_fsdata = (void *)&dp;
2256 if ((lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) ||
2257 (body->oa.o_flags & OBD_FL_RECREATE_OBJS)) {
2258 LASSERT(body->oa.o_id != 0);
2259 dp.p_inum = body->oa.o_id;
2260 DEBUG_REQ(D_HA, req, "replay create obj %lu/%lu",
2261 (unsigned long)body->oa.o_id,
2262 (unsigned long)body->oa.o_generation);
2265 rc = vfs_mkdir(parent_inode, new, body->oa.o_mode);
2268 rc = mds_inode_init_acl(obd, handle, new,
2271 up(&parent_inode->i_sem);
2275 if ((body->oa.o_flags & OBD_FL_RECREATE_OBJS) ||
2276 lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
2277 new->d_inode->i_generation = body->oa.o_generation;
2278 mark_inode_dirty(new->d_inode);
2281 * avoiding asserts in cache flush case, as
2282 * @body->oa.o_id should be zero.
2284 if (body->oa.o_id) {
2285 LASSERTF(body->oa.o_id == new->d_inode->i_ino,
2286 "BUG 3550: failed to recreate obj "
2287 LPU64" -> %lu\n", body->oa.o_id,
2288 new->d_inode->i_ino);
2290 LASSERTF(body->oa.o_generation ==
2291 new->d_inode->i_generation,
2292 "BUG 3550: failed to recreate obj/gen "
2293 LPU64"/%u -> %lu/%u\n", body->oa.o_id,
2294 body->oa.o_generation,
2295 new->d_inode->i_ino,
2296 new->d_inode->i_generation);
2300 obdo_from_inode(&repbody->oa, new->d_inode, FILTER_VALID_FLAGS);
2301 repbody->oa.o_id = new->d_inode->i_ino;
2302 repbody->oa.o_generation = new->d_inode->i_generation;
2303 repbody->oa.o_valid |= OBD_MD_FLID | OBD_MD_FLGENER | OBD_MD_FID;
2305 if ((body->oa.o_flags & OBD_FL_RECREATE_OBJS) ||
2306 lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
2307 id_group(&id) = mds->mds_num;
2309 LASSERT(body->oa.o_fid != 0);
2310 id_fid(&id) = body->oa.o_fid;
2312 LASSERT(body->oa.o_id != 0);
2313 id_ino(&id) = repbody->oa.o_id;
2314 id_gen(&id) = repbody->oa.o_generation;
2316 down(&new->d_inode->i_sem);
2317 rc = mds_update_inode_sid(obd, new->d_inode, handle, &id);
2318 up(&new->d_inode->i_sem);
2321 * make sure, that fid is up-to-date.
2323 mds_set_last_fid(obd, id_fid(&id));
2326 * allocate new sid, as object is created from scratch
2327 * and this is not replay.
2329 down(&new->d_inode->i_sem);
2330 rc = mds_alloc_inode_sid(obd, new->d_inode, handle, &id);
2331 up(&new->d_inode->i_sem);
2334 CERROR("Can't update lustre ID for inode %lu, "
2335 "error = %d\n", new->d_inode->i_ino, rc);
2339 /* initializing o_fid after it is allocated. */
2340 repbody->oa.o_fid = id_fid(&id);
2341 repbody->oa.o_mds = id_group(&id);
2343 rc = fsfilt_del_dir_entry(obd, new);
2344 up(&parent_inode->i_sem);
2346 CERROR("can't remove name for object: %d\n", rc);
2350 cleanup_phase = 2; /* created directory object */
2352 CDEBUG(D_OTHER, "created dirobj: %lu/%lu mode %o\n",
2353 (unsigned long)new->d_inode->i_ino,
2354 (unsigned long)new->d_inode->i_generation,
2355 (unsigned)new->d_inode->i_mode);
2356 cr_inum = new->d_inode->i_ino;
2358 up(&parent_inode->i_sem);
2359 CERROR("%s: can't create dirobj: %d\n", obd->obd_name, rc);
2363 if (body->oa.o_valid & OBD_MD_FLID) {
2364 /* this is new object for splitted dir. We have to prevent
2365 * recursive splitting on it -bzzz */
2366 mealen = obd_size_diskmd(mds->mds_md_exp, NULL);
2368 OBD_ALLOC(mea, mealen);
2370 GOTO(cleanup, rc = -ENOMEM);
2372 mea->mea_magic = MEA_MAGIC_ALL_CHARS;
2373 mea->mea_master = 0;
2376 down(&new->d_inode->i_sem);
2377 rc = fsfilt_set_md(obd, new->d_inode, handle,
2378 mea, mealen, EA_MEA);
2379 up(&new->d_inode->i_sem);
2381 CERROR("fsfilt_set_md() failed, "
2384 OBD_FREE(mea, mealen);
2386 CDEBUG(D_OTHER, "%s: mark non-splittable %lu/%u - %d\n",
2387 obd->obd_name, new->d_inode->i_ino,
2388 new->d_inode->i_generation, flags);
2389 } else if (body->oa.o_easize) {
2390 /* we pass LCK_EX to split routine to signal that we have
2391 * exclusive access to the directory. simple because nobody
2392 * knows it already exists -bzzz */
2393 rc = mds_try_to_split_dir(obd, new, NULL,
2394 body->oa.o_easize, LCK_EX);
2396 CERROR("Can't split directory %lu, error = %d.\n",
2397 new->d_inode->i_ino, rc);
2405 switch (cleanup_phase) {
2406 case 2: /* object has been created, but we'll may want to replay it later */
2408 ptlrpc_require_repack(req);
2409 case 1: /* transaction */
2410 rc = mds_finish_transno(mds, parent_inode, handle,
2415 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
2419 static int mdt_get_info(struct ptlrpc_request *req)
2421 struct obd_export *exp = req->rq_export;
2426 key = lustre_msg_buf(req->rq_reqmsg, 0, 1);
2428 DEBUG_REQ(D_HA, req, "no get_info key");
2431 keylen = req->rq_reqmsg->buflens[0];
2433 if ((keylen < strlen("mdsize") || strcmp(key, "mdsize") != 0) &&
2434 (keylen < strlen("mdsnum") || strcmp(key, "mdsnum") != 0) &&
2435 (keylen < strlen("rootid") || strcmp(key, "rootid") != 0))
2438 if (keylen >= strlen("rootid") && !strcmp(key, "rootid")) {
2439 struct lustre_id *reply;
2440 int size = sizeof(*reply);
2442 rc = lustre_pack_reply(req, 1, &size, NULL);
2446 reply = lustre_msg_buf(req->rq_repmsg, 0, size);
2447 rc = obd_get_info(exp, keylen, key, (__u32 *)&size, reply);
2450 int size = sizeof(*reply);
2452 rc = lustre_pack_reply(req, 1, &size, NULL);
2456 reply = lustre_msg_buf(req->rq_repmsg, 0, size);
2457 rc = obd_get_info(exp, keylen, key, (__u32 *)&size, reply);
2460 req->rq_repmsg->status = 0;
2464 static int mds_set_info(struct obd_export *exp, __u32 keylen,
2465 void *key, __u32 vallen, void *val)
2467 struct obd_device *obd;
2468 struct mds_obd *mds;
2472 obd = class_exp2obd(exp);
2474 CDEBUG(D_IOCTL, "invalid client cookie "LPX64"\n",
2475 exp->exp_handle.h_cookie);
2480 if (keylen >= strlen("mds_type") &&
2481 memcmp(key, "mds_type", keylen) == 0) {
2485 CDEBUG(D_IOCTL, "set mds type to %x\n", *(int*)val);
2487 mds->mds_obd_type = *(int*)val;
2488 group = FILTER_GROUP_FIRST_MDS + mds->mds_obd_type;
2489 valsize = sizeof(group);
2491 /* mds number has been changed, so the corresponding obdfilter
2492 * exp need to be changed too. */
2493 rc = obd_set_info(mds->mds_dt_exp, strlen("mds_conn"),
2494 "mds_conn", valsize, &group);
2497 CDEBUG(D_IOCTL, "invalid key\n");
2501 static int mdt_set_info(struct ptlrpc_request *req)
2504 struct obd_export *exp = req->rq_export;
2505 int keylen, rc = 0, vallen;
2508 key = lustre_msg_buf(req->rq_reqmsg, 0, 1);
2510 DEBUG_REQ(D_HA, req, "no set_info key");
2513 keylen = req->rq_reqmsg->buflens[0];
2515 if (keylen == strlen("mds_type") &&
2516 memcmp(key, "mds_type", keylen) == 0) {
2517 rc = lustre_pack_reply(req, 0, NULL, NULL);
2521 val = lustre_msg_buf(req->rq_reqmsg, 1, 0);
2522 vallen = req->rq_reqmsg->buflens[1];
2524 rc = obd_set_info(exp, keylen, key, vallen, val);
2525 req->rq_repmsg->status = 0;
2528 CDEBUG(D_IOCTL, "invalid key\n");
2532 static void mds_revoke_export_locks(struct obd_export *exp)
2534 struct ldlm_namespace *ns = exp->exp_obd->obd_namespace;
2535 struct list_head *locklist = &exp->exp_ldlm_data.led_held_locks;
2536 struct ldlm_lock *lock, *next;
2537 struct ldlm_lock_desc desc;
2539 if (!exp->u.eu_mds_data.med_remote)
2543 l_lock(&ns->ns_lock);
2544 list_for_each_entry_safe(lock, next, locklist, l_export_chain) {
2545 if (lock->l_req_mode != lock->l_granted_mode)
2548 LASSERT(lock->l_resource);
2549 if (lock->l_resource->lr_type != LDLM_IBITS &&
2550 lock->l_resource->lr_type != LDLM_PLAIN)
2553 if (lock->l_flags & LDLM_FL_AST_SENT)
2556 lock->l_flags |= LDLM_FL_AST_SENT;
2558 /* the desc just pretend to exclusive */
2559 ldlm_lock2desc(lock, &desc);
2560 desc.l_req_mode = LCK_EX;
2561 desc.l_granted_mode = 0;
2563 lock->l_blocking_ast(lock, &desc, NULL, LDLM_CB_BLOCKING);
2565 l_unlock(&ns->ns_lock);
2569 static int mds_msg_check_version(struct lustre_msg *msg)
2575 case MDS_DISCONNECT:
2577 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
2579 CERROR("bad opc %u version %08x, expecting %08x\n",
2580 msg->opc, msg->version, LUSTRE_OBD_VERSION);
2585 case MDS_GETATTR_LOCK:
2589 case MDS_DONE_WRITING:
2592 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
2594 CERROR("bad opc %u version %08x, expecting %08x\n",
2595 msg->opc, msg->version, LUSTRE_MDS_VERSION);
2599 case LDLM_BL_CALLBACK:
2600 case LDLM_CP_CALLBACK:
2601 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
2603 CERROR("bad opc %u version %08x, expecting %08x\n",
2604 msg->opc, msg->version, LUSTRE_DLM_VERSION);
2606 case OBD_LOG_CANCEL:
2607 case LLOG_ORIGIN_HANDLE_OPEN:
2608 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
2609 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
2610 case LLOG_ORIGIN_HANDLE_READ_HEADER:
2611 case LLOG_ORIGIN_HANDLE_CLOSE:
2613 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
2615 CERROR("bad opc %u version %08x, expecting %08x\n",
2616 msg->opc, msg->version, LUSTRE_LOG_VERSION);
2622 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
2624 CERROR("bad opc %u version %08x, expecting %08x\n",
2625 msg->opc, msg->version, LUSTRE_OBD_VERSION);
2628 case SEC_INIT_CONTINUE:
2633 CERROR("MDS unknown opcode %d\n", msg->opc);
2641 int mds_handle(struct ptlrpc_request *req)
2643 int should_process, fail = OBD_FAIL_MDS_ALL_REPLY_NET;
2644 struct obd_device *obd = NULL;
2645 struct mds_obd *mds = NULL; /* quell gcc overwarning */
2649 OBD_FAIL_RETURN(OBD_FAIL_MDS_ALL_REQUEST_NET | OBD_FAIL_ONCE, 0);
2651 rc = mds_msg_check_version(req->rq_reqmsg);
2653 CERROR("MDS drop mal-formed request\n");
2657 /* Security opc should NOT trigger any recovery events */
2658 if (req->rq_reqmsg->opc == SEC_INIT ||
2659 req->rq_reqmsg->opc == SEC_INIT_CONTINUE) {
2660 if (req->rq_export) {
2661 mds_req_add_idmapping(req,
2662 &req->rq_export->exp_mds_data);
2663 mds_revoke_export_locks(req->rq_export);
2666 } else if (req->rq_reqmsg->opc == SEC_FINI) {
2667 if (req->rq_export) {
2668 mds_req_del_idmapping(req,
2669 &req->rq_export->exp_mds_data);
2670 mds_revoke_export_locks(req->rq_export);
2675 LASSERT(current->journal_info == NULL);
2676 /* XXX identical to OST */
2677 if (req->rq_reqmsg->opc != MDS_CONNECT) {
2678 struct mds_export_data *med;
2681 if (req->rq_export == NULL) {
2682 CERROR("operation %d on unconnected MDS from %s\n",
2683 req->rq_reqmsg->opc,
2685 req->rq_status = -ENOTCONN;
2686 GOTO(out, rc = -ENOTCONN);
2689 med = &req->rq_export->exp_mds_data;
2690 obd = req->rq_export->exp_obd;
2693 /* sanity check: if the xid matches, the request must
2694 * be marked as a resent or replayed */
2695 if (req->rq_xid == le64_to_cpu(med->med_mcd->mcd_last_xid) ||
2696 req->rq_xid == le64_to_cpu(med->med_mcd->mcd_last_close_xid)) {
2697 LASSERTF(lustre_msg_get_flags(req->rq_reqmsg) &
2698 (MSG_RESENT | MSG_REPLAY),
2699 "rq_xid "LPU64" matches last_xid, "
2700 "expected RESENT flag\n",
2703 /* else: note the opposite is not always true; a
2704 * RESENT req after a failover will usually not match
2705 * the last_xid, since it was likely never
2706 * committed. A REPLAYed request will almost never
2707 * match the last xid, however it could for a
2708 * committed, but still retained, open. */
2710 spin_lock_bh(&obd->obd_processing_task_lock);
2711 recovering = obd->obd_recovering;
2712 spin_unlock_bh(&obd->obd_processing_task_lock);
2714 rc = mds_filter_recovery_request(req, obd,
2716 if (rc || should_process == 0) {
2718 } else if (should_process < 0) {
2719 req->rq_status = should_process;
2720 rc = ptlrpc_error(req);
2726 switch (req->rq_reqmsg->opc) {
2728 DEBUG_REQ(D_INODE, req, "connect");
2729 OBD_FAIL_RETURN(OBD_FAIL_MDS_CONNECT_NET, 0);
2730 rc = target_handle_connect(req);
2732 struct mds_export_data *med;
2734 LASSERT(req->rq_export);
2735 med = &req->rq_export->u.eu_mds_data;
2736 mds_init_export_data(req, med);
2737 mds_req_add_idmapping(req, med);
2739 /* Now that we have an export, set mds. */
2740 obd = req->rq_export->exp_obd;
2741 mds = mds_req2mds(req);
2745 case MDS_DISCONNECT:
2746 DEBUG_REQ(D_INODE, req, "disconnect");
2747 OBD_FAIL_RETURN(OBD_FAIL_MDS_DISCONNECT_NET, 0);
2748 rc = target_handle_disconnect(req);
2749 req->rq_status = rc; /* superfluous? */
2753 DEBUG_REQ(D_INODE, req, "getstatus");
2754 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_NET, 0);
2755 rc = mds_getstatus(req);
2759 DEBUG_REQ(D_INODE, req, "getattr");
2760 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NET, 0);
2761 rc = mds_getattr(req, MDS_REQ_REC_OFF);
2764 case MDS_GETATTR_LOCK: {
2765 struct lustre_handle lockh;
2766 DEBUG_REQ(D_INODE, req, "getattr_lock");
2767 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_LOCK_NET, 0);
2769 /* If this request gets a reconstructed reply, we won't be
2770 * acquiring any new locks in mds_getattr_lock, so we don't
2774 rc = mds_getattr_lock(req, MDS_REQ_REC_OFF, &lockh,
2775 MDS_INODELOCK_UPDATE);
2776 /* this non-intent call (from an ioctl) is special */
2777 req->rq_status = rc;
2778 if (rc == 0 && lockh.cookie)
2779 ldlm_lock_decref(&lockh, LCK_PR);
2783 DEBUG_REQ(D_INODE, req, "statfs");
2784 OBD_FAIL_RETURN(OBD_FAIL_MDS_STATFS_NET, 0);
2785 rc = mds_statfs(req);
2789 DEBUG_REQ(D_INODE, req, "readpage");
2790 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_NET, 0);
2791 rc = mds_readpage(req, MDS_REQ_REC_OFF);
2793 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_MDS_SENDPAGE)) {
2794 if (req->rq_reply_state) {
2795 lustre_free_reply_state (req->rq_reply_state);
2796 req->rq_reply_state = NULL;
2803 __u32 *opcp = lustre_msg_buf(req->rq_reqmsg, MDS_REQ_REC_OFF,
2806 int size[3] = {sizeof(struct mds_body), mds->mds_max_mdsize,
2807 mds->mds_max_cookiesize};
2810 /* NB only peek inside req now; mds_reint() will swab it */
2812 CERROR ("Can't inspect opcode\n");
2817 if (lustre_msg_swabbed (req->rq_reqmsg))
2820 DEBUG_REQ(D_INODE, req, "reint %d (%s)", opc,
2821 (opc < sizeof(reint_names) / sizeof(reint_names[0]) ||
2822 reint_names[opc] == NULL) ? reint_names[opc] :
2825 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET, 0);
2827 if (opc == REINT_UNLINK || opc == REINT_RENAME)
2829 else if (opc == REINT_OPEN)
2834 rc = lustre_pack_reply(req, bufcount, size, NULL);
2838 rc = mds_reint(req, MDS_REQ_REC_OFF, NULL);
2839 fail = OBD_FAIL_MDS_REINT_NET_REP;
2844 DEBUG_REQ(D_INODE, req, "close");
2845 OBD_FAIL_RETURN(OBD_FAIL_MDS_CLOSE_NET, 0);
2846 rc = mds_close(req, MDS_REQ_REC_OFF);
2849 case MDS_DONE_WRITING:
2850 DEBUG_REQ(D_INODE, req, "done_writing");
2851 OBD_FAIL_RETURN(OBD_FAIL_MDS_DONE_WRITING_NET, 0);
2852 rc = mds_done_writing(req, MDS_REQ_REC_OFF);
2856 DEBUG_REQ(D_INODE, req, "pin");
2857 OBD_FAIL_RETURN(OBD_FAIL_MDS_PIN_NET, 0);
2858 rc = mds_pin(req, MDS_REQ_REC_OFF);
2862 DEBUG_REQ(D_INODE, req, "sync");
2863 OBD_FAIL_RETURN(OBD_FAIL_MDS_SYNC_NET, 0);
2864 rc = mds_sync(req, MDS_REQ_REC_OFF);
2868 DEBUG_REQ(D_INODE, req, "ping");
2869 rc = target_handle_ping(req);
2872 case OBD_LOG_CANCEL:
2873 CDEBUG(D_INODE, "log cancel\n");
2874 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
2875 rc = -ENOTSUPP; /* la la la */
2879 DEBUG_REQ(D_INODE, req, "enqueue");
2880 OBD_FAIL_RETURN(OBD_FAIL_LDLM_ENQUEUE, 0);
2881 rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast,
2882 ldlm_server_blocking_ast, NULL);
2883 fail = OBD_FAIL_LDLM_REPLY;
2886 DEBUG_REQ(D_INODE, req, "convert");
2887 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CONVERT, 0);
2888 rc = ldlm_handle_convert(req);
2890 case LDLM_BL_CALLBACK:
2891 case LDLM_CP_CALLBACK:
2892 DEBUG_REQ(D_INODE, req, "callback");
2893 CERROR("callbacks should not happen on MDS\n");
2895 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
2897 case LLOG_ORIGIN_HANDLE_OPEN:
2898 DEBUG_REQ(D_INODE, req, "llog_init");
2899 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
2900 rc = llog_origin_handle_open(req);
2902 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
2903 DEBUG_REQ(D_INODE, req, "llog next block");
2904 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
2905 rc = llog_origin_handle_next_block(req);
2907 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
2908 DEBUG_REQ(D_INODE, req, "llog prev block");
2909 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
2910 rc = llog_origin_handle_prev_block(req);
2912 case LLOG_ORIGIN_HANDLE_READ_HEADER:
2913 DEBUG_REQ(D_INODE, req, "llog read header");
2914 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
2915 rc = llog_origin_handle_read_header(req);
2917 case LLOG_ORIGIN_HANDLE_CLOSE:
2918 DEBUG_REQ(D_INODE, req, "llog close");
2919 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
2920 rc = llog_origin_handle_close(req);
2923 DEBUG_REQ(D_INODE, req, "ost_create");
2924 rc = mdt_obj_create(req);
2927 DEBUG_REQ(D_INODE, req, "get_info");
2928 rc = mdt_get_info(req);
2931 DEBUG_REQ(D_INODE, req, "set_info");
2932 rc = mdt_set_info(req);
2935 CDEBUG(D_INODE, "write\n");
2936 OBD_FAIL_RETURN(OBD_FAIL_OST_BRW_NET, 0);
2937 rc = ost_brw_write(req, NULL);
2938 LASSERT(current->journal_info == NULL);
2939 /* mdt_brw sends its own replies */
2943 DEBUG_REQ(D_INODE, req, "llog catinfo");
2944 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
2945 rc = llog_catinfo(req);
2948 req->rq_status = -ENOTSUPP;
2949 rc = ptlrpc_error(req);
2953 LASSERT(current->journal_info == NULL);
2957 /* If we're DISCONNECTing, the mds_export_data is already freed */
2958 if (!rc && req->rq_reqmsg->opc != MDS_DISCONNECT) {
2959 struct mds_export_data *med = &req->rq_export->exp_mds_data;
2960 struct obd_device *obd = list_entry(mds, struct obd_device,
2962 req->rq_repmsg->last_xid =
2963 le64_to_cpu(med->med_mcd->mcd_last_xid);
2965 if (!obd->obd_no_transno) {
2966 req->rq_repmsg->last_committed =
2967 obd->obd_last_committed;
2969 DEBUG_REQ(D_IOCTL, req,
2970 "not sending last_committed update");
2972 CDEBUG(D_INFO, "last_transno "LPU64", last_committed "LPU64
2974 mds->mds_last_transno, obd->obd_last_committed,
2980 target_send_reply(req, rc, fail);
2984 /* Update the server data on disk. This stores the new mount_count and also the
2985 * last_rcvd value to disk. If we don't have a clean shutdown, then the server
2986 * last_rcvd value may be less than that of the clients. This will alert us
2987 * that we may need to do client recovery.
2989 * Also assumes for mds_last_transno that we are not modifying it (no locking).
2991 int mds_update_server_data(struct obd_device *obd, int force_sync)
2993 struct mds_obd *mds = &obd->u.mds;
2994 struct mds_server_data *msd = mds->mds_server_data;
2995 struct file *filp = mds->mds_rcvd_filp;
2996 struct lvfs_run_ctxt saved;
3001 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3002 msd->msd_last_transno = cpu_to_le64(mds->mds_last_transno);
3004 CDEBUG(D_SUPER, "MDS mount_count is "LPU64", last_transno is "LPU64"\n",
3005 mds->mds_mount_count, mds->mds_last_transno);
3006 rc = fsfilt_write_record(obd, filp, msd, sizeof(*msd), &off, force_sync);
3008 CERROR("error writing MDS server data: rc = %d\n", rc);
3009 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3014 /* saves last allocated fid counter to file. */
3015 int mds_update_last_fid(struct obd_device *obd, void *handle,
3018 struct mds_obd *mds = &obd->u.mds;
3019 struct file *filp = mds->mds_fid_filp;
3020 struct lvfs_run_ctxt saved;
3026 spin_lock(&mds->mds_last_fid_lock);
3027 last_fid = mds->mds_last_fid;
3028 spin_unlock(&mds->mds_last_fid_lock);
3030 CDEBUG(D_SUPER, "MDS last_fid is #"LPU64"\n",
3034 fsfilt_add_journal_cb(obd, mds->mds_sb, last_fid,
3035 handle, mds_commit_last_fid_cb,
3039 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3040 rc = fsfilt_write_record(obd, filp, &last_fid, sizeof(last_fid),
3042 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3045 CERROR("error writing MDS last_fid #"LPU64
3046 ", err = %d\n", last_fid, rc);
3050 CDEBUG(D_SUPER, "wrote fid #"LPU64" at idx "
3051 "%llu: err = %d\n", last_fid, off, rc);
3056 void mds_set_last_fid(struct obd_device *obd, __u64 fid)
3058 struct mds_obd *mds = &obd->u.mds;
3060 spin_lock(&mds->mds_last_fid_lock);
3061 if (fid > mds->mds_last_fid)
3062 mds->mds_last_fid = fid;
3063 spin_unlock(&mds->mds_last_fid_lock);
3066 void mds_commit_last_transno_cb(struct obd_device *obd,
3067 __u64 transno, void *data,
3070 obd_transno_commit_cb(obd, transno, error);
3073 void mds_commit_last_fid_cb(struct obd_device *obd,
3074 __u64 fid, void *data,
3078 CERROR("%s: fid "LPD64" commit error: %d\n",
3079 obd->obd_name, fid, error);
3083 CDEBUG(D_HA, "%s: fid "LPD64" committed\n",
3084 obd->obd_name, fid);
3087 __u64 mds_alloc_fid(struct obd_device *obd)
3089 struct mds_obd *mds = &obd->u.mds;
3092 spin_lock(&mds->mds_last_fid_lock);
3093 fid = ++mds->mds_last_fid;
3094 spin_unlock(&mds->mds_last_fid_lock);
3100 * allocates new lustre_id on passed @inode and saves it to inode EA.
3102 int mds_alloc_inode_sid(struct obd_device *obd, struct inode *inode,
3103 void *handle, struct lustre_id *id)
3105 struct mds_obd *mds = &obd->u.mds;
3106 int alloc = 0, rc = 0;
3109 LASSERT(obd != NULL);
3110 LASSERT(inode != NULL);
3113 OBD_ALLOC(id, sizeof(*id));
3119 id_group(id) = mds->mds_num;
3120 id_fid(id) = mds_alloc_fid(obd);
3121 id_ino(id) = inode->i_ino;
3122 id_gen(id) = inode->i_generation;
3123 id_type(id) = (S_IFMT & inode->i_mode);
3125 rc = mds_update_inode_sid(obd, inode, handle, id);
3127 CERROR("Can't update inode FID EA, "
3132 OBD_FREE(id, sizeof(*id));
3137 * reads inode self id from inode EA. Probably later this should be replaced by
3138 * caching inode self id to avoid raeding it every time it is needed.
3140 int mds_read_inode_sid(struct obd_device *obd, struct inode *inode,
3141 struct lustre_id *id)
3146 LASSERT(id != NULL);
3147 LASSERT(obd != NULL);
3148 LASSERT(inode != NULL);
3150 rc = fsfilt_get_md(obd, inode, &id->li_fid,
3151 sizeof(id->li_fid), EA_SID);
3153 CERROR("fsfilt_get_md() failed, "
3166 /* updates inode self id in EA. */
3167 int mds_update_inode_sid(struct obd_device *obd, struct inode *inode,
3168 void *handle, struct lustre_id *id)
3173 LASSERT(id != NULL);
3174 LASSERT(obd != NULL);
3175 LASSERT(inode != NULL);
3177 rc = fsfilt_set_md(obd, inode, handle, &id->li_fid,
3178 sizeof(id->li_fid), EA_SID);
3180 CERROR("fsfilt_set_md() failed, rc = %d\n", rc);
3188 * reads inode id on master MDS. This is usualy done by CMOBD to update requests
3189 * to master MDS by correct store cookie, needed to find inode on master MDS
3192 int mds_read_inode_mid(struct obd_device *obd, struct inode *inode,
3193 struct lustre_id *id)
3198 LASSERT(id != NULL);
3199 LASSERT(obd != NULL);
3200 LASSERT(inode != NULL);
3202 rc = fsfilt_get_md(obd, inode, id, sizeof(*id), EA_MID);
3204 CERROR("fsfilt_get_md() failed, rc = %d\n", rc);
3217 * updates master inode id. Usualy this is done by CMOBD after an inode is
3218 * created and relationship between cache MDS and master one should be
3221 int mds_update_inode_mid(struct obd_device *obd, struct inode *inode,
3222 void *handle, struct lustre_id *id)
3227 LASSERT(id != NULL);
3228 LASSERT(obd != NULL);
3229 LASSERT(inode != NULL);
3231 rc = fsfilt_set_md(obd, inode, handle, id,
3232 sizeof(*id), EA_MID);
3234 CERROR("fsfilt_set_md() failed, "
3242 /* mount the file system (secretly) */
3243 static int mds_setup(struct obd_device *obd, obd_count len, void *buf)
3245 struct lustre_cfg* lcfg = buf;
3246 struct mds_obd *mds = &obd->u.mds;
3247 struct lvfs_obd_ctxt *lvfs_ctxt = NULL;
3248 char *options = NULL;
3249 struct vfsmount *mnt;
3255 if (lcfg->lcfg_bufcount < 3)
3256 RETURN(rc = -EINVAL);
3258 if (LUSTRE_CFG_BUFLEN(lcfg, 1) == 0 || LUSTRE_CFG_BUFLEN(lcfg, 2) == 0)
3259 RETURN(rc = -EINVAL);
3261 obd->obd_fsops = fsfilt_get_ops(lustre_cfg_string(lcfg, 2));
3262 if (IS_ERR(obd->obd_fsops))
3263 RETURN(rc = PTR_ERR(obd->obd_fsops));
3265 mds->mds_max_mdsize = sizeof(struct lov_mds_md);
3267 page = get_zeroed_page(GFP_KERNEL);
3271 options = (char *)page;
3274 * here we use "iopen_nopriv" hardcoded, because it affects MDS utility
3275 * and the rest of options are passed by mount options. Probably this
3276 * should be moved to somewhere else like startup scripts or lconf. */
3277 sprintf(options, "iopen_nopriv");
3279 if (LUSTRE_CFG_BUFLEN(lcfg, 4) > 0 && lustre_cfg_buf(lcfg, 4))
3280 sprintf(options + strlen(options), ",%s",
3281 lustre_cfg_string(lcfg, 4));
3283 /* we have to know mdsnum before touching underlying fs -bzzz */
3284 atomic_set(&mds->mds_open_count, 0);
3285 sema_init(&mds->mds_md_sem, 1);
3286 sema_init(&mds->mds_create_sem, 1);
3287 mds->mds_md_connected = 0;
3288 mds->mds_md_name = NULL;
3290 if (LUSTRE_CFG_BUFLEN(lcfg, 5) > 0 && lustre_cfg_buf(lcfg, 5) &&
3291 strncmp(lustre_cfg_string(lcfg, 5), "dumb", LUSTRE_CFG_BUFLEN(lcfg, 5))) {
3294 generate_random_uuid(uuid);
3295 class_uuid_unparse(uuid, &mds->mds_md_uuid);
3297 OBD_ALLOC(mds->mds_md_name, LUSTRE_CFG_BUFLEN(lcfg, 5));
3298 if (mds->mds_md_name == NULL)
3299 RETURN(rc = -ENOMEM);
3301 memcpy(mds->mds_md_name, lustre_cfg_buf(lcfg, 5),
3302 LUSTRE_CFG_BUFLEN(lcfg, 5));
3304 CDEBUG(D_OTHER, "MDS: %s is master for %s\n",
3305 obd->obd_name, mds->mds_md_name);
3307 rc = mds_md_connect(obd, mds->mds_md_name);
3309 OBD_FREE(mds->mds_md_name, LUSTRE_CFG_BUFLEN(lcfg, 5));
3314 mds->mds_obd_type = MDS_MASTER_OBD;
3316 if (LUSTRE_CFG_BUFLEN(lcfg, 6) > 0 && lustre_cfg_buf(lcfg, 6) &&
3317 strncmp(lustre_cfg_string(lcfg, 6), "dumb",
3318 LUSTRE_CFG_BUFLEN(lcfg, 6))) {
3319 if (!memcmp(lustre_cfg_string(lcfg, 6), "master",
3320 strlen("master"))) {
3321 mds->mds_obd_type = MDS_MASTER_OBD;
3322 } else if (!memcmp(lustre_cfg_string(lcfg, 6), "cache",
3324 mds->mds_obd_type = MDS_CACHE_OBD;
3328 rc = lvfs_mount_fs(lustre_cfg_string(lcfg, 1),
3329 lustre_cfg_string(lcfg, 2),
3330 options, 0, &lvfs_ctxt);
3334 if (rc || !lvfs_ctxt) {
3335 CERROR("lvfs_mount_fs failed: rc = %d\n", rc);
3339 mnt = lvfs_ctxt->loc_mnt;
3340 mds->mds_lvfs_ctxt = lvfs_ctxt;
3341 ll_clear_rdonly(ll_sbdev(mnt->mnt_sb));
3343 CDEBUG(D_SUPER, "%s: mnt = %p\n", lustre_cfg_string(lcfg, 1), mnt);
3345 sema_init(&mds->mds_epoch_sem, 1);
3346 atomic_set(&mds->mds_real_clients, 0);
3347 spin_lock_init(&mds->mds_transno_lock);
3348 spin_lock_init(&mds->mds_last_fid_lock);
3349 sema_init(&mds->mds_orphan_recovery_sem, 1);
3350 mds->mds_max_cookiesize = sizeof(struct llog_cookie);
3352 sprintf(ns_name, "mds-%s", obd->obd_uuid.uuid);
3353 obd->obd_namespace = ldlm_namespace_new(ns_name, LDLM_NAMESPACE_SERVER);
3355 if (obd->obd_namespace == NULL) {
3356 mds_cleanup(obd, 0);
3357 GOTO(err_put, rc = -ENOMEM);
3359 ldlm_register_intent(obd->obd_namespace, mds_intent_policy);
3361 rc = mds_fs_setup(obd, mnt);
3363 CERROR("%s: MDS filesystem method init failed: rc = %d\n",
3368 rc = llog_start_commit_thread();
3374 if (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0 && lustre_cfg_buf(lcfg, 3) &&
3375 strncmp(lustre_cfg_string(lcfg, 3), "dumb",
3376 LUSTRE_CFG_BUFLEN(lcfg, 3))) {
3379 generate_random_uuid(uuid);
3380 class_uuid_unparse(uuid, &mds->mds_dt_uuid);
3382 OBD_ALLOC(mds->mds_profile, LUSTRE_CFG_BUFLEN(lcfg, 3));
3383 if (mds->mds_profile == NULL)
3384 GOTO(err_fs, rc = -ENOMEM);
3386 strncpy(mds->mds_profile, lustre_cfg_string(lcfg, 3),
3387 LUSTRE_CFG_BUFLEN(lcfg, 3));
3391 * setup root dir and files ID dir if lmv already connected, or there is
3394 if (mds->mds_md_exp || (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0 &&
3395 lustre_cfg_buf(lcfg, 3) &&
3396 strncmp(lustre_cfg_string(lcfg, 3), "dumb",
3397 LUSTRE_CFG_BUFLEN(lcfg, 3)))) {
3398 rc = mds_fs_setup_rootid(obd);
3402 rc = mds_fs_setup_virtid(obd);
3407 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
3408 "mds_ldlm_client", &obd->obd_ldlm_client);
3409 obd->obd_replayable = 1;
3411 rc = mds_postsetup(obd);
3418 /* No extra cleanup needed for llog_init_commit_thread() */
3419 mds_fs_cleanup(obd, 0);
3421 ldlm_namespace_free(obd->obd_namespace, 0);
3422 obd->obd_namespace = NULL;
3425 lvfs_umount_fs(mds->mds_lvfs_ctxt);
3429 fsfilt_put_ops(obd->obd_fsops);
3433 static int mds_postsetup(struct obd_device *obd)
3435 struct mds_obd *mds = &obd->u.mds;
3439 rc = obd_llog_setup(obd, &obd->obd_llogs, LLOG_CONFIG_ORIG_CTXT,
3440 obd, 0, NULL, &llog_lvfs_ops);
3444 if (mds->mds_profile) {
3445 struct llog_ctxt *lgctxt;
3446 struct lvfs_run_ctxt saved;
3447 struct lustre_profile *lprof;
3448 struct config_llog_instance cfg;
3450 cfg.cfg_instance = NULL;
3451 cfg.cfg_uuid = mds->mds_dt_uuid;
3452 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3454 lgctxt = llog_get_context(&obd->obd_llogs, LLOG_CONFIG_ORIG_CTXT);
3456 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3457 GOTO(err_llog, rc = -EINVAL);
3460 rc = class_config_process_llog(lgctxt, mds->mds_profile, &cfg);
3461 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3466 lprof = class_get_profile(mds->mds_profile);
3467 if (lprof == NULL) {
3468 CERROR("No profile found: %s\n", mds->mds_profile);
3469 GOTO(err_cleanup, rc = -ENOENT);
3471 rc = mds_dt_connect(obd, lprof->lp_lov);
3473 GOTO(err_cleanup, rc);
3475 rc = mds_md_postsetup(obd);
3477 GOTO(err_cleanup, rc);
3484 obd_llog_cleanup(llog_get_context(&obd->obd_llogs,
3485 LLOG_CONFIG_ORIG_CTXT));
3489 int mds_postrecov_common(struct obd_device *obd)
3491 struct mds_obd *mds = &obd->u.mds;
3492 struct llog_ctxt *ctxt;
3493 int rc, item = 0, valsize;
3497 LASSERT(!obd->obd_recovering);
3498 ctxt = llog_get_context(&obd->obd_llogs, LLOG_UNLINK_ORIG_CTXT);
3499 LASSERT(ctxt != NULL);
3501 /* clean PENDING dir */
3502 rc = mds_cleanup_orphans(obd);
3507 group = FILTER_GROUP_FIRST_MDS + mds->mds_num;
3508 valsize = sizeof(group);
3509 rc = obd_set_info(mds->mds_dt_exp, strlen("mds_conn"),
3510 "mds_conn", valsize, &group);
3514 rc = llog_connect(ctxt, obd->u.mds.mds_dt_desc.ld_tgt_count,
3517 CERROR("%s: failed at llog_origin_connect: %d\n",
3522 /* remove the orphaned precreated objects */
3523 rc = mds_dt_clear_orphans(mds, NULL /* all OSTs */);
3528 RETURN(rc < 0 ? rc : item);
3531 /* cleanup all llogging subsystems */
3532 rc = obd_llog_finish(obd, &obd->obd_llogs,
3533 mds->mds_dt_desc.ld_tgt_count);
3535 CERROR("%s: failed to cleanup llogging subsystems\n",
3540 int mds_postrecov(struct obd_device *obd)
3544 rc = mds_postrecov_common(obd);
3546 rc = mds_md_reconnect(obd);
3550 int mds_dt_clean(struct obd_device *obd)
3552 struct mds_obd *mds = &obd->u.mds;
3555 if (mds->mds_profile) {
3557 struct llog_ctxt *llctx;
3558 struct lvfs_run_ctxt saved;
3559 struct config_llog_instance cfg;
3560 int len = strlen(mds->mds_profile) + sizeof("-clean") + 1;
3562 OBD_ALLOC(cln_prof, len);
3563 sprintf(cln_prof, "%s-clean", mds->mds_profile);
3565 cfg.cfg_instance = NULL;
3566 cfg.cfg_uuid = mds->mds_dt_uuid;
3568 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3569 llctx = llog_get_context(&obd->obd_llogs,
3570 LLOG_CONFIG_ORIG_CTXT);
3571 class_config_process_llog(llctx, cln_prof, &cfg);
3572 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3574 OBD_FREE(cln_prof, len);
3575 OBD_FREE(mds->mds_profile, strlen(mds->mds_profile) + 1);
3576 mds->mds_profile = NULL;
3581 int mds_md_clean(struct obd_device *obd)
3583 struct mds_obd *mds = &obd->u.mds;
3586 if (mds->mds_md_name) {
3587 OBD_FREE(mds->mds_md_name, strlen(mds->mds_md_name) + 1);
3588 mds->mds_md_name = NULL;
3593 static int mds_precleanup(struct obd_device *obd, int flags)
3599 mds_dt_disconnect(obd, flags);
3601 obd_llog_cleanup(llog_get_context(&obd->obd_llogs, LLOG_CONFIG_ORIG_CTXT));
3605 extern void lgss_svc_cache_purge_all(void);
3606 static int mds_cleanup(struct obd_device *obd, int flags)
3608 struct mds_obd *mds = &obd->u.mds;
3611 if (mds->mds_sb == NULL)
3614 mds_update_server_data(obd, 1);
3615 mds_update_last_fid(obd, NULL, 1);
3617 if (mds->mds_dt_objids != NULL) {
3618 int size = mds->mds_dt_desc.ld_tgt_count *
3620 OBD_FREE(mds->mds_dt_objids, size);
3622 mds_fs_cleanup(obd, flags);
3626 /* 2 seems normal on mds, (may_umount() also expects 2
3627 fwiw), but we only see 1 at this point in obdfilter. */
3628 lvfs_umount_fs(mds->mds_lvfs_ctxt);
3632 ldlm_namespace_free(obd->obd_namespace, flags & OBD_OPT_FORCE);
3634 spin_lock_bh(&obd->obd_processing_task_lock);
3635 if (obd->obd_recovering) {
3636 target_cancel_recovery_timer(obd);
3637 obd->obd_recovering = 0;
3639 spin_unlock_bh(&obd->obd_processing_task_lock);
3642 fsfilt_put_ops(obd->obd_fsops);
3646 lgss_svc_cache_purge_all();
3649 spin_lock(&mds->mds_denylist_lock);
3650 while (!list_empty( &mds->mds_denylist ) ) {
3651 deny_sec_t *p_deny_sec = list_entry(mds->mds_denylist.next,
3653 list_del(&p_deny_sec->list);
3654 OBD_FREE(p_deny_sec, sizeof(*p_deny_sec));
3656 spin_unlock(&mds->mds_denylist_lock);
3661 static int set_security(const char *value, char **sec)
3663 if (!strcmp(value, "null"))
3665 else if (!strcmp(value, "krb5i"))
3667 else if (!strcmp(value, "krb5p"))
3670 CERROR("Unrecognized security flavor %s\n", value);
3677 static int mds_process_config(struct obd_device *obd, obd_count len, void *buf)
3679 struct lustre_cfg *lcfg = buf;
3680 struct mds_obd *mds = &obd->u.mds;
3684 switch(lcfg->lcfg_command) {
3685 case LCFG_SET_SECURITY: {
3686 if ((LUSTRE_CFG_BUFLEN(lcfg, 1) == 0) ||
3687 (LUSTRE_CFG_BUFLEN(lcfg, 2) == 0))
3688 GOTO(out, rc = -EINVAL);
3690 if (!strcmp(lustre_cfg_string(lcfg, 1), "mds_sec"))
3691 rc = set_security(lustre_cfg_string(lcfg, 2),
3693 else if (!strcmp(lustre_cfg_string(lcfg, 1), "oss_sec"))
3694 rc = set_security(lustre_cfg_string(lcfg, 2),
3696 else if (!strcmp(lustre_cfg_string(lcfg, 1), "deny_sec")){
3697 spin_lock(&mds->mds_denylist_lock);
3698 rc = add_deny_security(lustre_cfg_string(lcfg, 2),
3699 &mds->mds_denylist);
3700 spin_unlock(&mds->mds_denylist_lock);
3702 CERROR("Unrecognized key\n");
3708 CERROR("Unknown command: %d\n", lcfg->lcfg_command);
3709 GOTO(out, rc = -EINVAL);
3715 static void fixup_handle_for_resent_req(struct ptlrpc_request *req,
3717 struct ldlm_lock *new_lock,
3718 struct ldlm_lock **old_lock,
3719 struct lustre_handle *lockh)
3721 struct obd_export *exp = req->rq_export;
3722 struct obd_device *obd = exp->exp_obd;
3723 struct ldlm_request *dlmreq =
3724 lustre_msg_buf(req->rq_reqmsg, offset, sizeof (*dlmreq));
3725 struct lustre_handle remote_hdl = dlmreq->lock_handle1;
3726 struct list_head *iter;
3728 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
3731 l_lock(&obd->obd_namespace->ns_lock);
3732 list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) {
3733 struct ldlm_lock *lock;
3734 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
3735 if (lock == new_lock)
3737 if (lock->l_remote_handle.cookie == remote_hdl.cookie) {
3738 lockh->cookie = lock->l_handle.h_cookie;
3739 LDLM_DEBUG(lock, "restoring lock cookie");
3740 DEBUG_REQ(D_HA, req, "restoring lock cookie "LPX64,
3743 *old_lock = LDLM_LOCK_GET(lock);
3744 l_unlock(&obd->obd_namespace->ns_lock);
3748 l_unlock(&obd->obd_namespace->ns_lock);
3750 /* If the xid matches, then we know this is a resent request,
3751 * and allow it. (It's probably an OPEN, for which we don't
3754 le64_to_cpu(exp->exp_mds_data.med_mcd->mcd_last_xid))
3758 le64_to_cpu(exp->exp_mds_data.med_mcd->mcd_last_close_xid))
3761 /* This remote handle isn't enqueued, so we never received or
3762 * processed this request. Clear MSG_RESENT, because it can
3763 * be handled like any normal request now. */
3765 lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
3767 DEBUG_REQ(D_HA, req, "no existing lock with rhandle "LPX64,
3771 int intent_disposition(struct ldlm_reply *rep, int flag)
3775 return (rep->lock_policy_res1 & flag);
3778 void intent_set_disposition(struct ldlm_reply *rep, int flag)
3782 rep->lock_policy_res1 |= flag;
3785 static int mds_intent_policy(struct ldlm_namespace *ns,
3786 struct ldlm_lock **lockp, void *req_cookie,
3787 ldlm_mode_t mode, int flags, void *data)
3789 struct ptlrpc_request *req = req_cookie;
3790 struct ldlm_lock *lock = *lockp;
3791 struct ldlm_intent *it;
3792 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
3793 struct ldlm_reply *rep;
3794 struct lustre_handle lockh[2] = {{0}, {0}};
3795 struct ldlm_lock *new_lock = NULL;
3796 int getattr_part = MDS_INODELOCK_UPDATE;
3797 int rc, reply_buffers;
3798 int repsize[5] = {sizeof(struct ldlm_reply),
3799 sizeof(struct mds_body),
3800 mds->mds_max_mdsize};
3802 int offset = MDS_REQ_INTENT_REC_OFF;
3805 LASSERT(req != NULL);
3806 MD_COUNTER_INCREMENT(req->rq_export->exp_obd, intent_lock);
3808 if (req->rq_reqmsg->bufcount <= MDS_REQ_INTENT_IT_OFF) {
3809 /* No intent was provided */
3810 int size = sizeof(struct ldlm_reply);
3811 rc = lustre_pack_reply(req, 1, &size, NULL);
3816 it = lustre_swab_reqbuf(req, MDS_REQ_INTENT_IT_OFF, sizeof(*it),
3817 lustre_swab_ldlm_intent);
3819 CERROR("Intent missing\n");
3820 RETURN(req->rq_status = -EFAULT);
3823 LDLM_DEBUG(lock, "intent policy, opc: %s", ldlm_it2str(it->opc));
3826 if (it->opc & ( IT_OPEN | IT_GETATTR | IT_LOOKUP | IT_CHDIR )) {
3829 repsize[4] = xattr_acl_size(LL_ACL_MAX_ENTRIES);
3832 rc = lustre_pack_reply(req, reply_buffers, repsize, NULL);
3834 RETURN(req->rq_status = rc);
3836 rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*rep));
3837 LASSERT(rep != NULL);
3839 intent_set_disposition(rep, DISP_IT_EXECD);
3841 /* execute policy */
3842 switch ((long)it->opc) {
3844 case IT_CREAT|IT_OPEN:
3845 fixup_handle_for_resent_req(req, MDS_REQ_INTENT_LOCKREQ_OFF,
3847 /* XXX swab here to assert that an mds_open reint
3848 * packet is following */
3849 fixup_handle_for_resent_req(req, MDS_REQ_INTENT_LOCKREQ_OFF,
3851 rep->lock_policy_res2 = mds_reint(req, offset, lockh);
3853 if (rep->lock_policy_res2) {
3855 * mds_open() returns ENOLCK where it should return
3856 * zero, but it has no lock to return.
3858 if (rep->lock_policy_res2 == ENOLCK)
3859 rep->lock_policy_res2 = 0;
3861 RETURN(ELDLM_LOCK_ABORTED);
3865 * IT_OPEN may return lock on cross-node dentry that we want to
3866 * hold during attr retrival -bzzz
3868 if (lockh[0].cookie == 0)
3869 RETURN(ELDLM_LOCK_ABORTED);
3873 getattr_part = MDS_INODELOCK_LOOKUP;
3876 getattr_part |= MDS_INODELOCK_LOOKUP;
3878 fixup_handle_for_resent_req(req, MDS_REQ_INTENT_LOCKREQ_OFF,
3879 lock, &new_lock, lockh);
3880 rep->lock_policy_res2 = mds_getattr_lock(req, offset, lockh,
3882 /* FIXME: LDLM can set req->rq_status. MDS sets
3883 policy_res{1,2} with disposition and status.
3884 - replay: returns 0 & req->status is old status
3885 - otherwise: returns req->status */
3886 if (intent_disposition(rep, DISP_LOOKUP_NEG))
3887 rep->lock_policy_res2 = 0;
3888 if (!intent_disposition(rep, DISP_LOOKUP_POS) ||
3889 rep->lock_policy_res2)
3890 RETURN(ELDLM_LOCK_ABORTED);
3891 if (req->rq_status != 0) {
3893 rep->lock_policy_res2 = req->rq_status;
3894 RETURN(ELDLM_LOCK_ABORTED);
3898 rc = mds_lock_and_check_slave(offset, req, lockh);
3899 if ((rep->lock_policy_res2 = rc)) {
3901 rep->lock_policy_res2 = 0;
3902 RETURN(ELDLM_LOCK_ABORTED);
3906 CERROR("Unhandled intent "LPD64"\n", it->opc);
3910 /* By this point, whatever function we called above must have either
3911 * filled in 'lockh', been an intent replay, or returned an error. We
3912 * want to allow replayed RPCs to not get a lock, since we would just
3913 * drop it below anyways because lock replay is done separately by the
3914 * client afterwards. For regular RPCs we want to give the new lock to
3915 * the client instead of whatever lock it was about to get. */
3916 if (new_lock == NULL)
3917 new_lock = ldlm_handle2lock(&lockh[0]);
3918 if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY))
3921 LASSERTF(new_lock != NULL, "op "LPX64" lockh "LPX64"\n",
3922 it->opc, lockh[0].cookie);
3924 /* If we've already given this lock to a client once, then we should
3925 * have no readers or writers. Otherwise, we should have one reader
3926 * _or_ writer ref (which will be zeroed below) before returning the
3927 * lock to a client. */
3928 if (new_lock->l_export == req->rq_export) {
3929 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
3931 LASSERT(new_lock->l_export == NULL);
3932 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
3937 if (new_lock->l_export == req->rq_export) {
3938 /* Already gave this to the client, which means that we
3939 * reconstructed a reply. */
3940 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
3942 RETURN(ELDLM_LOCK_REPLACED);
3945 /* Fixup the lock to be given to the client */
3946 l_lock(&new_lock->l_resource->lr_namespace->ns_lock);
3947 new_lock->l_readers = 0;
3948 new_lock->l_writers = 0;
3950 new_lock->l_export = class_export_get(req->rq_export);
3951 list_add(&new_lock->l_export_chain,
3952 &new_lock->l_export->exp_ldlm_data.led_held_locks);
3954 new_lock->l_blocking_ast = lock->l_blocking_ast;
3955 new_lock->l_completion_ast = lock->l_completion_ast;
3957 memcpy(&new_lock->l_remote_handle, &lock->l_remote_handle,
3958 sizeof(lock->l_remote_handle));
3960 new_lock->l_flags &= ~LDLM_FL_LOCAL;
3962 LDLM_LOCK_PUT(new_lock);
3963 l_unlock(&new_lock->l_resource->lr_namespace->ns_lock);
3965 RETURN(ELDLM_LOCK_REPLACED);
3968 int mds_attach(struct obd_device *dev, obd_count len, void *data)
3970 struct lprocfs_static_vars lvars;
3972 struct mds_obd *mds = &dev->u.mds;
3974 spin_lock_init(&mds->mds_denylist_lock);
3975 INIT_LIST_HEAD(&mds->mds_denylist);
3977 lprocfs_init_multi_vars(0, &lvars);
3979 rc = lprocfs_obd_attach(dev, lvars.obd_vars);
3983 return lprocfs_alloc_md_stats(dev, 0);
3986 int mds_detach(struct obd_device *dev)
3988 lprocfs_free_md_stats(dev);
3989 return lprocfs_obd_detach(dev);
3992 int mdt_attach(struct obd_device *dev, obd_count len, void *data)
3994 struct lprocfs_static_vars lvars;
3996 lprocfs_init_multi_vars(1, &lvars);
3997 return lprocfs_obd_attach(dev, lvars.obd_vars);
4000 int mdt_detach(struct obd_device *dev)
4002 return lprocfs_obd_detach(dev);
4005 static int mdt_setup(struct obd_device *obd, obd_count len, void *buf)
4007 struct mds_obd *mds = &obd->u.mds;
4012 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
4013 MDS_REQUEST_PORTAL, MDC_REPLY_PORTAL,
4014 MDS_SERVICE_WATCHDOG_TIMEOUT,
4015 mds_handle, "mds", obd->obd_proc_entry);
4017 if (!mds->mds_service) {
4018 CERROR("failed to start service\n");
4022 rc = ptlrpc_start_n_threads(obd, mds->mds_service, MDT_NUM_THREADS,
4025 GOTO(err_thread, rc);
4027 mds->mds_setattr_service =
4028 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
4029 MDS_SETATTR_PORTAL, MDC_REPLY_PORTAL,
4030 MDS_SERVICE_WATCHDOG_TIMEOUT,
4031 mds_handle, "mds_setattr",
4032 obd->obd_proc_entry);
4033 if (!mds->mds_setattr_service) {
4034 CERROR("failed to start getattr service\n");
4035 GOTO(err_thread, rc = -ENOMEM);
4038 rc = ptlrpc_start_n_threads(obd, mds->mds_setattr_service,
4039 MDT_NUM_THREADS, "ll_mdt_attr");
4041 GOTO(err_thread2, rc);
4043 mds->mds_readpage_service =
4044 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
4045 MDS_READPAGE_PORTAL, MDC_REPLY_PORTAL,
4046 MDS_SERVICE_WATCHDOG_TIMEOUT,
4047 mds_handle, "mds_readpage",
4048 obd->obd_proc_entry);
4049 if (!mds->mds_readpage_service) {
4050 CERROR("failed to start readpage service\n");
4051 GOTO(err_thread2, rc = -ENOMEM);
4054 rc = ptlrpc_start_n_threads(obd, mds->mds_readpage_service,
4055 MDT_NUM_THREADS, "ll_mdt_rdpg");
4058 GOTO(err_thread3, rc);
4063 ptlrpc_unregister_service(mds->mds_readpage_service);
4065 ptlrpc_unregister_service(mds->mds_setattr_service);
4067 ptlrpc_unregister_service(mds->mds_service);
4071 static int mdt_cleanup(struct obd_device *obd, int flags)
4073 struct mds_obd *mds = &obd->u.mds;
4076 ptlrpc_stop_all_threads(mds->mds_readpage_service);
4077 ptlrpc_unregister_service(mds->mds_readpage_service);
4079 ptlrpc_stop_all_threads(mds->mds_setattr_service);
4080 ptlrpc_unregister_service(mds->mds_setattr_service);
4082 ptlrpc_stop_all_threads(mds->mds_service);
4083 ptlrpc_unregister_service(mds->mds_service);
4088 static struct dentry *mds_lvfs_id2dentry(__u64 ino, __u32 gen,
4089 __u64 gr, void *data)
4091 struct lustre_id id;
4092 struct obd_device *obd = data;
4096 return mds_id2dentry(obd, &id, NULL);
4099 static int mds_get_info(struct obd_export *exp, __u32 keylen,
4100 void *key, __u32 *valsize, void *val)
4102 struct obd_device *obd;
4103 struct mds_obd *mds;
4106 obd = class_exp2obd(exp);
4110 CDEBUG(D_IOCTL, "invalid client cookie "LPX64"\n",
4111 exp->exp_handle.h_cookie);
4115 if (keylen >= strlen("reint_log") && memcmp(key, "reint_log", 9) == 0) {
4116 /* get log_context handle. */
4117 unsigned long *llh_handle = val;
4118 *valsize = sizeof(unsigned long);
4119 *llh_handle = (unsigned long)obd->obd_llog_ctxt[LLOG_REINT_ORIG_CTXT];
4122 if (keylen >= strlen("cache_sb") && memcmp(key, "cache_sb", 8) == 0) {
4123 /* get log_context handle. */
4124 unsigned long *sb = val;
4125 *valsize = sizeof(unsigned long);
4126 *sb = (unsigned long)obd->u.mds.mds_sb;
4130 if (keylen >= strlen("mdsize") && memcmp(key, "mdsize", keylen) == 0) {
4131 __u32 *mdsize = val;
4132 *valsize = sizeof(*mdsize);
4133 *mdsize = mds->mds_max_mdsize;
4137 if (keylen >= strlen("mdsnum") && strcmp(key, "mdsnum") == 0) {
4138 __u32 *mdsnum = val;
4139 *valsize = sizeof(*mdsnum);
4140 *mdsnum = mds->mds_num;
4144 if (keylen >= strlen("rootid") && strcmp(key, "rootid") == 0) {
4145 struct lustre_id *rootid = val;
4146 *valsize = sizeof(struct lustre_id);
4147 *rootid = mds->mds_rootid;
4151 CDEBUG(D_IOCTL, "invalid key\n");
4155 struct lvfs_callback_ops mds_lvfs_ops = {
4156 l_id2dentry: mds_lvfs_id2dentry,
4159 int mds_preprw(int cmd, struct obd_export *exp, struct obdo *oa,
4160 int objcount, struct obd_ioobj *obj,
4161 int niocount, struct niobuf_remote *nb,
4162 struct niobuf_local *res,
4163 struct obd_trans_info *oti);
4165 int mds_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
4166 int objcount, struct obd_ioobj *obj, int niocount,
4167 struct niobuf_local *res, struct obd_trans_info *oti,
4170 /* use obd ops to offer management infrastructure */
4171 static struct obd_ops mds_obd_ops = {
4172 .o_owner = THIS_MODULE,
4173 .o_attach = mds_attach,
4174 .o_detach = mds_detach,
4175 .o_connect = mds_connect,
4176 .o_connect_post = mds_connect_post,
4177 .o_init_export = mds_init_export,
4178 .o_destroy_export = mds_destroy_export,
4179 .o_disconnect = mds_disconnect,
4180 .o_setup = mds_setup,
4181 .o_precleanup = mds_precleanup,
4182 .o_cleanup = mds_cleanup,
4183 .o_process_config = mds_process_config,
4184 .o_postrecov = mds_postrecov,
4185 .o_statfs = mds_obd_statfs,
4186 .o_iocontrol = mds_iocontrol,
4187 .o_create = mds_obd_create,
4188 .o_destroy = mds_obd_destroy,
4189 .o_llog_init = mds_llog_init,
4190 .o_llog_finish = mds_llog_finish,
4191 .o_notify = mds_notify,
4192 .o_get_info = mds_get_info,
4193 .o_set_info = mds_set_info,
4194 .o_preprw = mds_preprw,
4195 .o_commitrw = mds_commitrw,
4198 static struct obd_ops mdt_obd_ops = {
4199 .o_owner = THIS_MODULE,
4200 .o_attach = mdt_attach,
4201 .o_detach = mdt_detach,
4202 .o_setup = mdt_setup,
4203 .o_cleanup = mdt_cleanup,
4206 static int __init mds_init(void)
4208 struct lprocfs_static_vars lvars;
4210 mds_init_lsd_cache();
4212 lprocfs_init_multi_vars(0, &lvars);
4213 class_register_type(&mds_obd_ops, NULL, lvars.module_vars,
4215 lprocfs_init_multi_vars(1, &lvars);
4216 class_register_type(&mdt_obd_ops, NULL, lvars.module_vars,
4222 static void /*__exit*/ mds_exit(void)
4224 mds_cleanup_lsd_cache();
4226 class_unregister_type(LUSTRE_MDS_NAME);
4227 class_unregister_type(LUSTRE_MDT_NAME);
4230 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
4231 MODULE_DESCRIPTION("Lustre Metadata Server (MDS)");
4232 MODULE_LICENSE("GPL");
4234 module_init(mds_init);
4235 module_exit(mds_exit);