1 /* -*- mode: c; c-basic-offset: 8; indent-tabs-mode: nil; -*-
2 * vim:expandtab:shiftwidth=8:tabstop=8:
5 * Lustre Metadata Server (mds) request handler
7 * Copyright (c) 2001-2003 Cluster File Systems, Inc.
8 * Author: Peter Braam <braam@clusterfs.com>
9 * Author: Andreas Dilger <adilger@clusterfs.com>
10 * Author: Phil Schwan <phil@clusterfs.com>
11 * Author: Mike Shaver <shaver@clusterfs.com>
13 * This file is part of Lustre, http://www.lustre.org.
15 * Lustre is free software; you can redistribute it and/or
16 * modify it under the terms of version 2 of the GNU General Public
17 * License as published by the Free Software Foundation.
19 * Lustre is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with Lustre; if not, write to the Free Software
26 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
30 # define EXPORT_SYMTAB
32 #define DEBUG_SUBSYSTEM S_MDS
34 #include <linux/module.h>
35 #include <linux/lustre_mds.h>
36 #include <linux/lustre_dlm.h>
37 #include <linux/init.h>
38 #include <linux/obd_class.h>
39 #include <linux/random.h>
41 #include <linux/jbd.h>
42 #include <linux/namei.h>
43 #include <linux/ext3_fs.h>
44 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0))
45 # include <linux/smp_lock.h>
46 # include <linux/buffer_head.h>
47 # include <linux/workqueue.h>
48 # include <linux/mount.h>
50 # include <linux/locks.h>
52 #include <linux/obd_lov.h>
53 #include <linux/obd_ost.h>
54 #include <linux/lustre_mds.h>
55 #include <linux/lustre_fsfilt.h>
56 #include <linux/lprocfs_status.h>
57 #include <linux/lustre_commit_confd.h>
58 #include <linux/lustre_acl.h>
59 #include <linux/lustre_sec.h>
60 #include <linux/lustre_gs.h>
61 #include "mds_internal.h"
63 static int mds_intent_policy(struct ldlm_namespace *ns,
64 struct ldlm_lock **lockp, void *req_cookie,
65 ldlm_mode_t mode, int flags, void *data);
66 static int mds_postsetup(struct obd_device *obd);
67 static int mds_cleanup(struct obd_device *obd, int flags);
70 /* Assumes caller has already pushed into the kernel filesystem context */
71 static int mds_sendpage(struct ptlrpc_request *req, struct file *file,
72 loff_t offset, int count)
74 struct ptlrpc_bulk_desc *desc;
75 struct l_wait_info lwi;
77 int rc = 0, npages, i, tmpcount, tmpsize = 0;
80 LASSERT((offset & (PAGE_SIZE - 1)) == 0); /* I'm dubious about this */
82 npages = (count + PAGE_SIZE - 1) >> PAGE_SHIFT;
83 OBD_ALLOC(pages, sizeof(*pages) * npages);
85 GOTO(out, rc = -ENOMEM);
87 desc = ptlrpc_prep_bulk_exp(req, npages, BULK_PUT_SOURCE,
90 GOTO(out_free, rc = -ENOMEM);
92 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
93 tmpsize = tmpcount > PAGE_SIZE ? PAGE_SIZE : tmpcount;
95 pages[i] = alloc_pages(GFP_KERNEL, 0);
97 GOTO(cleanup_buf, rc = -ENOMEM);
99 ptlrpc_prep_bulk_page(desc, pages[i], 0, tmpsize);
102 for (i = 0, tmpcount = count; i < npages; i++, tmpcount -= tmpsize) {
103 tmpsize = tmpcount > PAGE_SIZE ? PAGE_SIZE : tmpcount;
104 CDEBUG(D_EXT2, "reading %u@%llu from dir %lu (size %llu)\n",
105 tmpsize, offset, file->f_dentry->d_inode->i_ino,
106 file->f_dentry->d_inode->i_size);
108 rc = fsfilt_readpage(req->rq_export->exp_obd, file,
109 kmap(pages[i]), tmpsize, &offset);
113 GOTO(cleanup_buf, rc = -EIO);
116 LASSERT(desc->bd_nob == count);
118 rc = ptlrpc_start_bulk_transfer(desc);
120 GOTO(cleanup_buf, rc);
122 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_SENDPAGE)) {
123 CERROR("obd_fail_loc=%x, fail operation rc=%d\n",
124 OBD_FAIL_MDS_SENDPAGE, rc = -EIO);
125 GOTO(abort_bulk, rc);
128 lwi = LWI_TIMEOUT(obd_timeout * HZ / 4, NULL, NULL);
129 rc = l_wait_event(desc->bd_waitq, !ptlrpc_bulk_active(desc), &lwi);
130 LASSERT (rc == 0 || rc == -ETIMEDOUT);
133 if (desc->bd_success &&
134 desc->bd_nob_transferred == count)
135 GOTO(cleanup_buf, rc);
137 rc = -ETIMEDOUT; /* XXX should this be a different errno? */
140 DEBUG_REQ(D_ERROR, req, "bulk failed: %s %d(%d), evicting %s@%s\n",
141 (rc == -ETIMEDOUT) ? "timeout" : "network error",
142 desc->bd_nob_transferred, count,
143 req->rq_export->exp_client_uuid.uuid,
144 req->rq_export->exp_connection->c_remote_uuid.uuid);
146 ptlrpc_fail_export(req->rq_export);
150 ptlrpc_abort_bulk (desc);
152 for (i = 0; i < npages; i++)
154 __free_pages(pages[i], 0);
156 ptlrpc_free_bulk(desc);
158 OBD_FREE(pages, sizeof(*pages) * npages);
163 extern char *ldlm_lockname[];
165 int mds_lock_mode_for_dir(struct obd_device *obd,
166 struct dentry *dentry, int mode)
168 int ret_mode = 0, split;
170 /* any dir access needs couple locks:
171 * 1) on part of dir we gonna lookup/modify in
172 * 2) on a whole dir to protect it from concurrent splitting
173 * and to flush client's cache for readdir()
174 * so, for a given mode and dentry this routine decides what
175 * lock mode to use for lock #2:
176 * 1) if caller's gonna lookup in dir then we need to protect
177 * dir from being splitted only - LCK_CR
178 * 2) if caller's gonna modify dir then we need to protect
179 * dir from being splitted and to flush cache - LCK_CW
180 * 3) if caller's gonna modify dir and that dir seems ready
181 * for splitting then we need to protect it from any
182 * type of access (lookup/modify/split) - LCK_EX -bzzz */
184 split = mds_splitting_expected(obd, dentry);
187 * it is important to check here only for MDS_NO_SPLITTABLE. The reason
188 * is that MDS_NO_SPLITTABLE means dir is not splittable in principle
189 * and another thread will not split it on the quiet. But if we have
190 * MDS_NO_SPLIT_EXPECTED, this means, that dir may be splitted anytime,
191 * but not now (for current thread) and we should consider that it can
192 * happen soon and go that branch which can yield LCK_EX to protect from
193 * possible splitting.
195 if (split == MDS_NO_SPLITTABLE) {
197 * this inode won't be splitted. so we need not to protect from
198 * just flush client's cache on modification.
205 if (mode == LCK_EX) {
207 } else if (mode == LCK_PR) {
209 } else if (mode == LCK_PW) {
211 * caller gonna modify directory. We use concurrent
212 * write lock here to retract client's cache for
215 if (split == MDS_EXPECT_SPLIT) {
217 * splitting possible. serialize any access the
218 * idea is that first one seen dir is splittable
219 * is given exclusive lock and split
220 * directory. caller passes lock mode to
221 * mds_try_to_split_dir() and splitting would be
222 * done with exclusive lock only -bzzz.
224 CDEBUG(D_OTHER, "%s: gonna split %lu/%lu\n",
226 (unsigned long)dentry->d_inode->i_ino,
227 (unsigned long)dentry->d_inode->i_generation);
238 /* only valid locked dentries or errors should be returned */
239 struct dentry *mds_id2locked_dentry(struct obd_device *obd, struct lustre_id *id,
240 struct vfsmount **mnt, int lock_mode,
241 struct lustre_handle *lockh, int *mode,
242 char *name, int namelen, __u64 lockpart)
244 struct dentry *de = mds_id2dentry(obd, id, mnt), *retval = de;
245 ldlm_policy_data_t policy = { .l_inodebits = { lockpart } };
246 struct ldlm_res_id res_id = { .name = {0} };
247 int flags = LDLM_FL_ATOMIC_CB, rc;
254 res_id.name[0] = id_fid(id);
255 res_id.name[1] = id_group(id);
258 if (name && IS_PDIROPS(de->d_inode)) {
259 ldlm_policy_data_t cpolicy =
260 { .l_inodebits = { MDS_INODELOCK_UPDATE } };
261 LASSERT(mode != NULL);
262 *mode = mds_lock_mode_for_dir(obd, de, lock_mode);
264 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace,
266 &cpolicy, *mode, &flags,
268 ldlm_completion_ast, NULL, NULL,
269 NULL, 0, NULL, lockh + 1);
270 if (rc != ELDLM_OK) {
272 RETURN(ERR_PTR(-ENOLCK));
275 flags = LDLM_FL_ATOMIC_CB;
277 res_id.name[2] = full_name_hash((unsigned char *)name, namelen);
279 CDEBUG(D_INFO, "take lock on "DLID4":"LPX64"\n",
280 OLID4(id), res_id.name[2]);
283 #warning "No PDIROPS support in the kernel"
285 rc = ldlm_cli_enqueue(NULL, NULL, obd->obd_namespace, res_id,
286 LDLM_IBITS, &policy, lock_mode, &flags,
287 mds_blocking_ast, ldlm_completion_ast,
288 NULL, NULL, NULL, 0, NULL, lockh);
289 if (rc != ELDLM_OK) {
291 retval = ERR_PTR(-EIO); /* XXX translate ldlm code */
294 ldlm_lock_decref(lockh + 1, *mode);
296 } else if (de->d_inode && de->d_inode->i_nlink == 0) {
297 /* as sometimes we lookup inode by ino/generation through
298 iopen mechanism, it's possible to find already unlinked
299 inode with nlink == 0. let's interpretate the case as
301 CWARN("found already unlinked inode %lu/%u\n",
302 de->d_inode->i_ino, de->d_inode->i_generation);
304 retval = ERR_PTR(-ENOENT);
305 ldlm_lock_decref(lockh, lock_mode);
308 ldlm_lock_decref(lockh + 1, *mode);
315 #ifndef DCACHE_DISCONNECTED
316 #define DCACHE_DISCONNECTED DCACHE_NFSD_DISCONNECTED
319 /* Look up an entry by inode number. This function ONLY returns valid dget'd
320 * dentries with an initialized inode or errors */
321 struct dentry *mds_id2dentry(struct obd_device *obd, struct lustre_id *id,
322 struct vfsmount **mnt)
324 struct mds_obd *mds = &obd->u.mds;
325 struct dentry *result;
327 unsigned long ino = 0;
328 __u32 generation = 0;
331 if (!id_ino(id) && id_fid(id)) {
332 struct lustre_id *lid;
334 /* if this is reint case we should use fidmap for resolving
335 * correct local store cookie. */
336 lid = mds_fidmap_lookup(obd, id);
338 RETURN(ERR_PTR(-ENOENT));
340 ino = (unsigned long)id_ino(lid);
341 generation = (__u32)id_gen(lid);
343 CDEBUG(D_DENTRY, "fidmap resolved "DLID4"->"DLID4"\n",
344 OLID4(id), OLID4(lid));
346 } else if (id_ino(id)) {
347 ino = (unsigned long)id_ino(id);
348 generation = (__u32)id_gen(id);
350 CERROR("invalid id for lookup "
351 DLID4"\n", OLID4(id));
356 RETURN(ERR_PTR(-ESTALE));
358 snprintf(idname, sizeof(idname), "0x%lx", ino);
360 CDEBUG(D_DENTRY, "--> mds_id2dentry: ino/gen %lu/%u, sb %p\n",
361 ino, generation, mds->mds_sb);
363 /* under ext3 this is neither supposed to return bad inodes nor NULL
365 result = ll_lookup_one_len(idname, mds->mds_id_de,
370 inode = result->d_inode;
372 RETURN(ERR_PTR(-ENOENT));
374 if (is_bad_inode(inode)) {
375 CERROR("bad inode returned %lu/%u\n",
376 inode->i_ino, inode->i_generation);
378 RETURN(ERR_PTR(-ENOENT));
381 /* here we disabled generation check, as root inode i_generation
382 * of cache mds and real mds are different. */
383 if (id_fid(id) != id_fid(&mds->mds_rootid) && generation != 0 &&
384 inode->i_generation != generation) {
385 /* we didn't find the right inode.. */
386 if (id_group(id) != mds->mds_num) {
387 CERROR("bad inode %lu found, link: %lu, ct: %d, generation "
388 "%u != %u, mds %u != %u, request to wrong MDS?\n",
389 inode->i_ino, (unsigned long)inode->i_nlink,
390 atomic_read(&inode->i_count), inode->i_generation,
391 generation, mds->mds_num, (unsigned)id_group(id));
393 CERROR("bad inode %lu found, link: %lu, ct: %d, generation "
394 "%u != %u, inode is recreated while request handled?\n",
395 inode->i_ino, (unsigned long)inode->i_nlink,
396 atomic_read(&inode->i_count), inode->i_generation,
400 RETURN(ERR_PTR(-ENOENT));
404 *mnt = mds->mds_vfsmnt;
412 int mds_req_add_idmapping(struct ptlrpc_request *req,
413 struct mds_export_data *med)
415 struct mds_req_sec_desc *rsd;
416 struct lustre_sec_desc *lsd;
419 if (!med->med_remote)
422 /* maybe we should do it more completely: invalidate the gss ctxt? */
423 if (req->rq_mapped_uid == MDS_IDMAP_NOTFOUND) {
424 CWARN("didn't find mapped uid\n");
428 rsd = lustre_swab_mds_secdesc(req, MDS_REQ_SECDESC_OFF);
430 CERROR("Can't unpack security desc\n");
434 lsd = mds_get_lsd(req->rq_mapped_uid);
436 CERROR("can't get LSD(%u), no mapping added\n",
441 rc = mds_idmap_add(med->med_idmap, rsd->rsd_uid, lsd->lsd_uid,
442 rsd->rsd_gid, lsd->lsd_gid);
448 int mds_req_del_idmapping(struct ptlrpc_request *req,
449 struct mds_export_data *med)
451 struct mds_req_sec_desc *rsd;
452 struct lustre_sec_desc *lsd;
455 if (!med->med_remote)
458 rsd = lustre_swab_mds_secdesc(req, MDS_REQ_SECDESC_OFF);
460 CERROR("Can't unpack security desc\n");
464 LASSERT(req->rq_mapped_uid != -1);
465 lsd = mds_get_lsd(req->rq_mapped_uid);
467 CERROR("can't get LSD(%u), no idmapping deleted\n",
472 rc = mds_idmap_del(med->med_idmap, rsd->rsd_uid, lsd->lsd_uid,
473 rsd->rsd_gid, lsd->lsd_gid);
478 static int mds_init_export_data(struct ptlrpc_request *req,
479 struct mds_export_data *med)
481 struct obd_connect_data *data, *reply;
482 int ask_remote, ask_local;
485 data = lustre_msg_buf(req->rq_reqmsg, 5, sizeof(*data));
486 reply = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*data));
487 LASSERT(data && reply);
489 if (med->med_initialized) {
490 CDEBUG(D_SEC, "med already initialized, reconnect?\n");
494 ask_remote = data->ocd_connect_flags & OBD_CONNECT_REMOTE;
495 ask_local = data->ocd_connect_flags & OBD_CONNECT_LOCAL;
497 /* currently the policy is simple: satisfy client as possible
500 if (req->rq_auth_uid == -1) {
502 CWARN("null sec is used, force to be local\n");
506 if (!req->rq_remote_realm)
507 CWARN("local realm asked to be remote\n");
509 } else if (ask_local) {
510 if (req->rq_remote_realm)
511 CWARN("remote realm asked to be local\n");
514 med->med_remote = (req->rq_remote_realm != 0);
517 med->med_nllu = data->ocd_nllu[0];
518 med->med_nllg = data->ocd_nllu[1];
520 med->med_initialized = 1;
522 reply->ocd_connect_flags &= ~(OBD_CONNECT_REMOTE | OBD_CONNECT_LOCAL);
523 if (med->med_remote) {
525 med->med_idmap = mds_idmap_alloc();
528 CERROR("Failed to alloc idmap, following request from "
529 "this client will be refused\n");
531 reply->ocd_connect_flags |= OBD_CONNECT_REMOTE;
532 CDEBUG(D_SEC, "set client as remote\n");
534 reply->ocd_connect_flags |= OBD_CONNECT_LOCAL;
535 CDEBUG(D_SEC, "set client as local\n");
541 static void mds_free_export_data(struct mds_export_data *med)
546 LASSERT(med->med_remote);
547 mds_idmap_free(med->med_idmap);
548 med->med_idmap = NULL;
551 /* Establish a connection to the MDS.
553 * This will set up an export structure for the client to hold state data about
554 * that client, like open files, the last operation number it did on the server,
557 static int mds_connect(struct lustre_handle *conn, struct obd_device *obd,
558 struct obd_uuid *cluuid, struct obd_connect_data *data,
561 struct mds_export_data *med;
562 struct mds_client_data *mcd;
563 struct obd_export *exp;
567 if (!conn || !obd || !cluuid)
570 /* XXX There is a small race between checking the list and adding a new
571 * connection for the same UUID, but the real threat (list corruption
572 * when multiple different clients connect) is solved.
574 * There is a second race between adding the export to the list, and
575 * filling in the client data below. Hence skipping the case of NULL
576 * mcd above. We should already be controlling multiple connects at the
577 * client, and we can't hold the spinlock over memory allocations
578 * without risk of deadlocking.
580 rc = class_connect(conn, obd, cluuid);
583 exp = class_conn2export(conn);
585 LASSERT(exp != NULL);
586 med = &exp->exp_mds_data;
588 OBD_ALLOC(mcd, sizeof(*mcd));
590 CERROR("%s: out of memory for client data.\n",
592 GOTO(out, rc = -ENOMEM);
595 memcpy(mcd->mcd_uuid, cluuid, sizeof(mcd->mcd_uuid));
598 rc = mds_client_add(obd, &obd->u.mds, med, -1);
606 OBD_FREE(mcd, sizeof(*mcd));
607 class_disconnect(exp, 0);
609 class_export_put(exp);
614 static int mds_connect_post(struct obd_export *exp, unsigned initial,
617 struct obd_device *obd = exp->exp_obd;
618 struct mds_obd *mds = &obd->u.mds;
619 struct mds_export_data *med;
620 struct mds_client_data *mcd;
624 med = &exp->exp_mds_data;
628 /* some one reconnect initially, we have to reset
629 * data existing export can have. bug 6102 */
630 if (mcd->mcd_last_xid != 0)
631 CDEBUG(D_HA, "initial reconnect to existing export\n");
632 mcd->mcd_last_transno = 0;
633 mcd->mcd_last_xid = 0;
634 mcd->mcd_last_close_xid = 0;
635 mcd->mcd_last_result = 0;
636 mcd->mcd_last_data = 0;
639 if (!(flags & OBD_OPT_MDS_CONNECTION)) {
640 if (!(exp->exp_flags & OBD_OPT_REAL_CLIENT)) {
641 atomic_inc(&mds->mds_real_clients);
642 CDEBUG(D_OTHER,"%s: peer from %s is real client (%d)\n",
643 obd->obd_name, exp->exp_client_uuid.uuid,
644 atomic_read(&mds->mds_real_clients));
645 exp->exp_flags |= OBD_OPT_REAL_CLIENT;
647 if (mds->mds_md_name)
648 rc = mds_md_connect(obd, mds->mds_md_name);
653 static int mds_init_export(struct obd_export *exp)
655 struct mds_export_data *med = &exp->exp_mds_data;
657 INIT_LIST_HEAD(&med->med_open_head);
658 spin_lock_init(&med->med_open_lock);
662 static int mds_destroy_export(struct obd_export *export)
664 struct obd_device *obd = export->exp_obd;
665 struct mds_export_data *med = &export->exp_mds_data;
666 struct lvfs_run_ctxt saved;
670 mds_free_export_data(med);
671 target_destroy_export(export);
673 if (obd_uuid_equals(&export->exp_client_uuid, &obd->obd_uuid))
676 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
678 /* Close any open files (which may also cause orphan unlinking). */
679 spin_lock(&med->med_open_lock);
680 while (!list_empty(&med->med_open_head)) {
681 struct list_head *tmp = med->med_open_head.next;
682 struct mds_file_data *mfd =
683 list_entry(tmp, struct mds_file_data, mfd_list);
684 struct lustre_id sid;
686 BDEVNAME_DECLARE_STORAGE(btmp);
688 /* bug 1579: fix force-closing for 2.5 */
689 struct dentry *dentry = mfd->mfd_dentry;
691 list_del(&mfd->mfd_list);
692 spin_unlock(&med->med_open_lock);
694 down(&dentry->d_inode->i_sem);
695 rc = mds_read_inode_sid(obd, dentry->d_inode, &sid);
696 up(&dentry->d_inode->i_sem);
698 CERROR("Can't read inode self id, inode %lu, "
699 "rc %d\n", dentry->d_inode->i_ino, rc);
700 memset(&sid, 0, sizeof(sid));
703 /* If you change this message, be sure to update
704 * replay_single:test_46 */
705 CERROR("force closing client file handle for %.*s (%s:"
706 DLID4")\n", dentry->d_name.len, dentry->d_name.name,
707 ll_bdevname(dentry->d_inode->i_sb, btmp),
710 /* child inode->i_alloc_sem protects orphan_dec_test and
711 * is_orphan race, mds_mfd_close drops it */
712 DOWN_WRITE_I_ALLOC_SEM(dentry->d_inode);
713 rc = mds_mfd_close(NULL, 0, obd, mfd,
714 !(export->exp_flags & OBD_OPT_FAILOVER));
716 CDEBUG(D_INODE, "Error closing file: %d\n", rc);
717 spin_lock(&med->med_open_lock);
719 spin_unlock(&med->med_open_lock);
720 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
724 mds_client_free(export, !(export->exp_flags & OBD_OPT_FAILOVER));
728 static int mds_disconnect(struct obd_export *exp, unsigned long flags)
730 unsigned long irqflags;
731 struct obd_device *obd;
736 LASSERT(exp != NULL);
737 obd = class_exp2obd(exp);
739 CDEBUG(D_IOCTL, "invalid client cookie "LPX64"\n",
740 exp->exp_handle.h_cookie);
746 * suppress any inter-mds requests durring disconnecting lmv if this is
747 * detected --force mode. This is needed to avoid endless recovery.
749 if (atomic_read(&mds->mds_real_clients) > 0 &&
750 !(exp->exp_flags & OBD_OPT_REAL_CLIENT))
751 flags |= OBD_OPT_FORCE;
753 if (!(exp->exp_flags & OBD_OPT_REAL_CLIENT)
754 && !atomic_read(&mds->mds_real_clients)) {
755 /* there was no client at all */
756 mds_md_disconnect(obd, flags);
759 if ((exp->exp_flags & OBD_OPT_REAL_CLIENT)
760 && atomic_dec_and_test(&mds->mds_real_clients)) {
761 /* time to drop LMV connections */
762 CDEBUG(D_OTHER, "%s: last real client %s disconnected. "
763 "Disconnnect from LMV now\n",
764 obd->obd_name, exp->exp_client_uuid.uuid);
765 mds_md_disconnect(obd, flags);
768 spin_lock_irqsave(&exp->exp_lock, irqflags);
769 exp->exp_flags = flags;
770 spin_unlock_irqrestore(&exp->exp_lock, irqflags);
772 /* disconnect early so that clients can't keep using export */
773 rc = class_disconnect(exp, flags);
774 ldlm_cancel_locks_for_export(exp);
776 /* complete all outstanding replies */
777 spin_lock_irqsave(&exp->exp_lock, irqflags);
778 while (!list_empty(&exp->exp_outstanding_replies)) {
779 struct ptlrpc_reply_state *rs =
780 list_entry(exp->exp_outstanding_replies.next,
781 struct ptlrpc_reply_state, rs_exp_list);
782 struct ptlrpc_service *svc = rs->rs_srv_ni->sni_service;
784 spin_lock(&svc->srv_lock);
785 list_del_init(&rs->rs_exp_list);
786 ptlrpc_schedule_difficult_reply(rs);
787 spin_unlock(&svc->srv_lock);
789 spin_unlock_irqrestore(&exp->exp_lock, irqflags);
793 static int mds_getstatus(struct ptlrpc_request *req)
795 struct mds_obd *mds = mds_req2mds(req);
796 struct mds_body *body;
800 size = sizeof(*body);
802 rc = lustre_pack_reply(req, 1, &size, NULL);
803 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_GETSTATUS_PACK)) {
804 CERROR("mds: out of memory for message: size=%d\n", size);
805 req->rq_status = -ENOMEM; /* superfluous? */
809 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*body));
810 body->valid |= OBD_MD_FID;
812 memcpy(&body->id1, &mds->mds_rootid, sizeof(body->id1));
815 * the last_committed and last_xid fields are filled in for all replies
816 * already - no need to do so here also.
821 int mds_blocking_ast(struct ldlm_lock *lock, struct ldlm_lock_desc *desc,
822 void *data, int flag)
827 if (flag == LDLM_CB_CANCELING) {
828 /* Don't need to do anything here. */
832 /* XXX layering violation! -phil */
833 lock_res_and_lock(lock);
836 * get this: if mds_blocking_ast is racing with mds_intent_policy, such
837 * that mds_blocking_ast is called just before l_i_p takes the ns_lock,
838 * then by the time we get the lock, we might not be the correct
839 * blocking function anymore. So check, and return early, if so.
841 if (lock->l_blocking_ast != mds_blocking_ast) {
842 unlock_res_and_lock(lock);
846 lock->l_flags |= LDLM_FL_CBPENDING;
847 do_ast = (!lock->l_readers && !lock->l_writers);
848 unlock_res_and_lock(lock);
851 struct lustre_handle lockh;
854 LDLM_DEBUG(lock, "already unused, calling ldlm_cli_cancel");
855 ldlm_lock2handle(lock, &lockh);
856 rc = ldlm_cli_cancel(&lockh);
858 CERROR("ldlm_cli_cancel: %d\n", rc);
860 LDLM_DEBUG(lock, "Lock still has references, will be "
866 static int mds_convert_md(struct obd_device *obd, struct inode *inode,
867 void *md, int size, int mea)
871 if (S_ISREG(inode->i_mode)) {
872 rc = mds_convert_lov_ea(obd, inode, md, size);
873 } else if (S_ISDIR(inode->i_mode)) {
875 rc = mds_convert_mea_ea(obd, inode, md, size);
877 rc = mds_convert_lov_ea(obd, inode, md, size);
880 CERROR("Invalid EA format (nor LOV or MEA) "
881 "is detected. Inode %lu/%u\n",
882 inode->i_ino, inode->i_generation);
888 int mds_get_md(struct obd_device *obd, struct inode *inode,
889 void *md, int *size, int lock, int mea)
898 rc = fsfilt_get_md(obd, inode, md, *size,
899 (mea ? EA_MEA : EA_LOV));
901 CERROR("Error %d reading eadata for ino %lu\n",
905 rc = mds_convert_md(obd, inode, md,
920 /* Call with lock=1 if you want mds_pack_md to take the i_sem.
921 * Call with lock=0 if the caller has already taken the i_sem. */
922 int mds_pack_md(struct obd_device *obd, struct lustre_msg *msg, int offset,
923 struct mds_body *body, struct inode *inode, int lock, int mea)
925 struct mds_obd *mds = &obd->u.mds;
930 lmm = lustre_msg_buf(msg, offset, 0);
932 /* Some problem with getting eadata when I sized the reply
934 CDEBUG(D_INFO, "no space reserved for inode %lu MD\n",
938 lmm_size = msg->buflens[offset];
940 /* I don't really like this, but it is a sanity check on the client
941 * MD request. However, if the client doesn't know how much space
942 * to reserve for the MD, it shouldn't be bad to have too much space.
944 if (lmm_size > mds->mds_max_mdsize) {
945 CWARN("Reading MD for inode %lu of %d bytes > max %d\n",
946 inode->i_ino, lmm_size, mds->mds_max_mdsize);
950 rc = mds_get_md(obd, inode, lmm, &lmm_size, lock, mea);
952 body->valid |= S_ISDIR(inode->i_mode) ?
953 OBD_MD_FLDIREA : OBD_MD_FLEASIZE;
956 body->valid |= OBD_MD_MEA;
958 body->eadatasize = lmm_size;
965 int mds_pack_link(struct dentry *dentry, struct ptlrpc_request *req,
966 struct mds_body *repbody, int reply_off)
968 struct inode *inode = dentry->d_inode;
973 symname = lustre_msg_buf(req->rq_repmsg, reply_off + 1,0);
974 LASSERT(symname != NULL);
975 len = req->rq_repmsg->buflens[reply_off + 1];
977 rc = inode->i_op->readlink(dentry, symname, len);
979 CERROR("readlink failed: %d\n", rc);
980 } else if (rc != len - 1) {
981 CERROR ("Unexpected readlink rc %d: expecting %d\n",
985 CDEBUG(D_INODE, "read symlink dest %s\n", symname);
986 repbody->valid |= OBD_MD_LINKNAME;
987 repbody->eadatasize = rc + 1;
988 symname[rc] = 0; /* NULL terminate */
995 int mds_pack_xattr(struct dentry *dentry, struct ptlrpc_request *req,
996 struct mds_body *repbody, int req_off, int reply_off)
998 struct inode *inode = dentry->d_inode;
1004 ea_name = lustre_msg_string(req->rq_reqmsg, req_off + 1, 0);
1005 len = req->rq_repmsg->buflens[reply_off + 1];
1007 value = lustre_msg_buf(req->rq_repmsg, reply_off + 1, len);
1011 if (!strcmp(ea_name, XATTR_NAME_LUSTRE_ACL)) {
1012 struct rmtacl_upcall_desc desc;
1014 if (len != LUSTRE_ACL_SIZE_MAX || !value) {
1015 CERROR("no reply buffer prepared\n");
1019 memset(&desc, 0, sizeof(desc));
1021 desc.cmd = lustre_msg_string(req->rq_reqmsg, req_off + 2, 0);
1022 desc.cmdlen = req->rq_reqmsg->buflens[req_off + 2];
1023 desc.res = (char *) value;
1024 desc.reslen = LUSTRE_ACL_SIZE_MAX;
1026 mds_do_remote_acl_upcall(&desc);
1028 if (desc.upcall_status)
1029 RETURN(desc.upcall_status);
1031 if (desc.reslen > LUSTRE_ACL_SIZE_MAX) {
1032 CERROR("downcall claim reslen %u\n", desc.reslen);
1035 /* like remote setfacl, steal "flags" in mds_body as the
1036 * exececution status
1038 repbody->flags = desc.status;
1039 repbody->valid |= OBD_MD_FLXATTR;
1040 repbody->eadatasize = desc.reslen;
1045 if (inode->i_op && inode->i_op->getxattr)
1046 rc = inode->i_op->getxattr(dentry, ea_name, value, len);
1049 if (rc != -ENODATA && rc != -EOPNOTSUPP)
1050 CERROR("getxattr failed: %d", rc);
1052 repbody->valid |= OBD_MD_FLXATTR;
1053 repbody->eadatasize = rc;
1060 int mds_pack_xattr_list(struct dentry *dentry, struct ptlrpc_request *req,
1061 struct mds_body *repbody, int reply_off)
1063 struct inode *inode = dentry->d_inode;
1068 len = req->rq_repmsg->buflens[reply_off + 1];
1070 value = lustre_msg_buf(req->rq_repmsg, reply_off + 1, len);
1073 if (inode->i_op && inode->i_op->getxattr)
1074 rc = inode->i_op->listxattr(dentry, value, len);
1077 CERROR("listxattr failed: %d", rc);
1079 repbody->valid |= OBD_MD_FLXATTRLIST;
1080 repbody->eadatasize = rc;
1087 int mds_pack_posix_acl(struct lustre_msg *repmsg, int offset,
1088 struct mds_body *body, struct inode *inode)
1090 struct dentry de = { .d_inode = inode };
1091 __u32 buflen, *sizep;
1096 if (!inode->i_op->getxattr)
1099 sizep = lustre_msg_buf(repmsg, offset, 4);
1101 CERROR("can't locate returned acl size buf\n");
1105 buflen = repmsg->buflens[offset + 1];
1106 buf = lustre_msg_buf(repmsg, offset + 1, buflen);
1108 size = inode->i_op->getxattr(&de, XATTR_NAME_ACL_ACCESS, buf, buflen);
1109 if (size == -ENODATA || size == -EOPNOTSUPP)
1115 *sizep = cpu_to_le32(size);
1116 body->valid |= OBD_MD_FLACL;
1121 int mds_pack_remote_perm(struct ptlrpc_request *req, int reply_off,
1122 struct mds_body *body, struct inode *inode)
1124 struct mds_export_data *med = &req->rq_export->u.eu_mds_data;
1125 struct lustre_sec_desc *lsd;
1126 struct mds_remote_perm *perm;
1129 LASSERT(inode->i_op);
1130 LASSERT(inode->i_op->permission);
1131 LASSERT(req->rq_export->exp_mds_data.med_remote);
1133 perm = (struct mds_remote_perm *)
1134 lustre_msg_buf(req->rq_repmsg, reply_off, sizeof(perm));
1136 CERROR("no remote perm buf at offset %d\n", reply_off);
1140 memset(perm, 0, sizeof(*perm));
1142 /* obtain authenticated uid/gid and LSD permissions, which
1143 * might be different from current process context, from LSD
1145 lsd = mds_get_lsd(current->uid);
1147 CWARN("can't LSD of uid %u\n", current->uid);
1151 perm->mrp_auth_uid = lsd->lsd_uid;
1152 perm->mrp_auth_gid = lsd->lsd_gid;
1154 lsd_perms = mds_lsd_get_perms(lsd, 1, 0, req->rq_peer.peer_id.nid);
1155 if (lsd_perms & LSD_PERM_SETUID)
1156 perm->mrp_allow_setuid = 1;
1157 if (lsd_perms & LSD_PERM_SETGID)
1158 perm->mrp_allow_setgid = 1;
1162 if (mds_remote_perm_do_reverse_map(med, perm))
1165 /* permission bits of current user
1166 * XXX this is low efficient, could we do it in one blow?
1168 if (inode->i_op->permission(inode, MAY_EXEC, NULL) == 0)
1169 perm->mrp_perm |= MAY_EXEC;
1170 if (inode->i_op->permission(inode, MAY_WRITE, NULL) == 0)
1171 perm->mrp_perm |= MAY_WRITE;
1172 if (inode->i_op->permission(inode, MAY_READ, NULL) == 0)
1173 perm->mrp_perm |= MAY_READ;
1175 body->valid |= (OBD_MD_FLACL | OBD_MD_FLRMTACL);
1180 int mds_pack_acl(struct ptlrpc_request *req, int reply_off,
1181 struct mds_body *body, struct inode *inode)
1185 if (!req->rq_export->exp_mds_data.med_remote)
1186 rc = mds_pack_posix_acl(req->rq_repmsg, reply_off, body, inode);
1188 rc = mds_pack_remote_perm(req, reply_off + 1, body, inode);
1193 static int mds_getattr_internal(struct obd_device *obd, struct dentry *dentry,
1194 struct ptlrpc_request *req, int req_off,
1195 struct mds_body *reqbody, int reply_off,
1196 struct mds_req_sec_desc *rsd)
1198 struct mds_export_data *med = &req->rq_export->u.eu_mds_data;
1199 struct inode *inode = dentry->d_inode;
1200 struct mds_body *body;
1201 int rc = 0, offset = 0;
1204 if (inode == NULL && !(dentry->d_flags & DCACHE_CROSS_REF))
1207 body = lustre_msg_buf(req->rq_repmsg, reply_off, sizeof(*body));
1208 LASSERT(body != NULL); /* caller prepped reply */
1210 if (dentry->d_flags & DCACHE_CROSS_REF) {
1211 mds_pack_dentry2body(obd, body, dentry,
1212 (reqbody->valid & OBD_MD_FID) ? 1 : 0);
1213 CDEBUG(D_OTHER, "cross reference: "DLID4"\n",
1218 mds_pack_inode2body(obd, body, inode,
1219 (reqbody->valid & OBD_MD_FID) ? 1 : 0);
1221 if ((S_ISREG(inode->i_mode) && (reqbody->valid & OBD_MD_FLEASIZE)) ||
1222 (S_ISDIR(inode->i_mode) && (reqbody->valid & OBD_MD_FLDIREA))) {
1224 /* guessing what kind og attribute do we need. */
1225 int is_mea = (S_ISDIR(inode->i_mode) &&
1226 (reqbody->valid & OBD_MD_MEA) != 0);
1228 rc = mds_pack_md(obd, req->rq_repmsg, reply_off + 1,
1229 body, inode, 1, is_mea);
1231 /* if we have LOV EA data, the OST holds size, atime, mtime. */
1232 if (!(body->valid & OBD_MD_FLEASIZE) &&
1233 !(body->valid & OBD_MD_FLDIREA))
1234 body->valid |= (OBD_MD_FLSIZE | OBD_MD_FLBLOCKS |
1235 OBD_MD_FLATIME | OBD_MD_FLMTIME);
1236 } else if (S_ISLNK(inode->i_mode) &&
1237 (reqbody->valid & OBD_MD_LINKNAME) != 0) {
1238 rc = mds_pack_link(dentry, req, body, reply_off);
1239 } else if (reqbody->valid & OBD_MD_FLXATTR) {
1240 rc = mds_pack_xattr(dentry, req, body, req_off, reply_off);
1241 } else if (reqbody->valid & OBD_MD_FLXATTRLIST) {
1242 rc = mds_pack_xattr_list(dentry, req, body, reply_off);
1245 offset = reply_off + ((reqbody->valid & OBD_MD_FLEASIZE) ? 2 : 1);
1246 if (reqbody->valid & OBD_MD_FLACL) {
1247 rc = mds_pack_acl(req, offset, body, inode);
1251 if (reqbody->valid & OBD_MD_FLKEY) {
1252 rc = mds_pack_gskey(obd, req->rq_repmsg, &offset,
1256 mds_pack_audit(obd, inode, body);
1258 if (reqbody->valid & OBD_MD_CAPA) {
1259 struct lustre_capa *req_capa;
1261 LASSERT(!(reqbody->valid & ~OBD_MD_CAPA));
1262 LASSERT(S_ISREG(inode->i_mode));
1264 req_capa = lustre_swab_reqbuf(req, req_off + 1,
1266 lustre_swab_lustre_capa);
1267 if (req_capa == NULL) {
1268 CERROR("Can't unpack capa\n");
1272 offset = reply_off + 1;
1273 rc = mds_pack_capa(obd, med, reqbody, req_capa, req,
1278 mds_body_do_reverse_map(med, body);
1283 static int mds_getattr_pack_msg_cf(struct ptlrpc_request *req,
1284 struct dentry *dentry,
1287 int rc = 0, size[1] = {sizeof(struct mds_body)};
1290 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) {
1291 CERROR("failed MDS_GETATTR_PACK test\n");
1292 req->rq_status = -ENOMEM;
1296 rc = lustre_pack_reply(req, 1, size, NULL);
1298 CERROR("lustre_pack_reply failed: rc %d\n", rc);
1299 GOTO(out, req->rq_status = rc);
1307 static int mds_getattr_pack_msg(struct ptlrpc_request *req, struct dentry *de,
1310 struct inode *inode = de->d_inode;
1311 struct mds_obd *mds = mds_req2mds(req);
1312 struct mds_body *body;
1313 int rc = 0, size[4] = {sizeof(*body)}, bufcount = 1;
1316 body = lustre_msg_buf(req->rq_reqmsg, offset, sizeof(*body));
1317 LASSERT(body != NULL); /* checked by caller */
1318 LASSERT_REQSWABBED(req, offset); /* swabbed by caller */
1320 if ((S_ISREG(inode->i_mode) && (body->valid & OBD_MD_FLEASIZE)) ||
1321 (S_ISDIR(inode->i_mode) && (body->valid & OBD_MD_FLDIREA))) {
1324 down(&inode->i_sem);
1325 rc = fsfilt_get_md(req->rq_export->exp_obd, inode, NULL, 0,
1326 ((body->valid & OBD_MD_MEA) ? EA_MEA : EA_LOV));
1329 if (rc != -ENODATA && rc != -EOPNOTSUPP)
1330 CERROR("error getting inode %lu MD: rc = %d\n",
1333 } else if (rc > mds->mds_max_mdsize) {
1335 CERROR("MD size %d larger than maximum possible %u\n",
1336 rc, mds->mds_max_mdsize);
1338 size[bufcount] = rc;
1341 } else if (S_ISLNK(inode->i_mode) && (body->valid & OBD_MD_LINKNAME)) {
1342 if (inode->i_size + 1 != body->eadatasize)
1343 CERROR("symlink size: %Lu, reply space: %d\n",
1344 inode->i_size + 1, body->eadatasize);
1345 size[bufcount] = min_t(int, inode->i_size+1, body->eadatasize);
1347 CDEBUG(D_INODE, "symlink size: %Lu, reply space: %d\n",
1348 inode->i_size + 1, body->eadatasize);
1349 } else if ((body->valid & OBD_MD_FLXATTR)) {
1350 char *ea_name = lustre_msg_string(req->rq_reqmsg,
1354 if (!strcmp(ea_name, XATTR_NAME_LUSTRE_ACL)) {
1355 size[bufcount] = LUSTRE_ACL_SIZE_MAX;
1357 if (inode->i_op && inode->i_op->getxattr)
1358 rc = inode->i_op->getxattr(de, ea_name,
1362 if (rc != -ENODATA && rc != -EOPNOTSUPP)
1363 CERROR("error get inode %lu EA: %d\n",
1367 size[bufcount] = min_t(int,
1368 body->eadatasize, rc);
1372 } else if (body->valid & OBD_MD_FLXATTRLIST) {
1374 if (inode->i_op && inode->i_op->getxattr)
1375 rc = inode->i_op->listxattr(de, NULL, 0);
1378 if (rc != -ENODATA && rc != -EOPNOTSUPP)
1379 CERROR("error getting inode %lu EA: rc = %d\n",
1383 size[bufcount] = min_t(int, body->eadatasize, rc);
1388 /* may co-exist with OBD_MD_FLEASIZE */
1389 if (body->valid & OBD_MD_FLACL) {
1390 if (req->rq_export->exp_mds_data.med_remote) {
1391 size[bufcount++] = sizeof(int);
1392 size[bufcount++] = sizeof(struct mds_remote_perm);
1394 size[bufcount++] = sizeof(int);
1395 size[bufcount++] = xattr_acl_size(LL_ACL_MAX_ENTRIES);
1399 if (body->valid & OBD_MD_FLKEY) {
1400 size[bufcount++] = sizeof(int);
1401 size[bufcount++] = sizeof(struct crypto_key);
1404 if (body->valid & OBD_MD_CAPA)
1405 size[bufcount++] = sizeof(struct lustre_capa);
1407 if (OBD_FAIL_CHECK(OBD_FAIL_MDS_GETATTR_PACK)) {
1408 CERROR("failed MDS_GETATTR_PACK test\n");
1409 req->rq_status = -ENOMEM;
1410 GOTO(out, rc = -ENOMEM);
1413 rc = lustre_pack_reply(req, bufcount, size, NULL);
1415 CERROR("out of memory\n");
1416 GOTO(out, req->rq_status = rc);
1424 int mds_check_mds_num(struct obd_device *obd, struct inode *inode,
1425 char *name, int namelen)
1427 struct mea *mea = NULL;
1428 int mea_size, rc = 0;
1431 rc = mds_md_get_attr(obd, inode, &mea, &mea_size);
1434 if (mea != NULL && mea->mea_count) {
1436 * dir is already splitted, check if requested filename should
1437 * live at this MDS or at another one.
1439 int i = mea_name2idx(mea, name, namelen - 1);
1440 if (mea->mea_master != id_group(&mea->mea_ids[i])) {
1442 "inapropriate MDS(%d) for %s. should be "
1443 "%lu(%d)\n", mea->mea_master, name,
1444 (unsigned long)id_group(&mea->mea_ids[i]), i);
1450 OBD_FREE(mea, mea_size);
1454 int mds_getattr_size(struct obd_device *obd, struct dentry *dentry,
1455 struct ptlrpc_request *req, struct mds_body *body)
1457 struct inode *inode = dentry->d_inode;
1460 LASSERT(body != NULL);
1462 if (dentry->d_inode == NULL || !S_ISREG(inode->i_mode))
1465 /* XXX: quite a ugly hack, need to check old code
1466 * drop FLSIZE/FLBLOCKS prior any checking to */
1467 body->valid &= ~(OBD_MD_FLSIZE | OBD_MD_FLBLOCKS);
1469 if (obd->obd_recovering) {
1470 CDEBUG(D_INODE, "size for "DLID4" is unknown yet (recovering)\n",
1475 if (atomic_read(&inode->i_writecount)) {
1476 /* some one has opened the file for write.
1477 * mds doesn't know actual size */
1478 CDEBUG(D_INODE, "MDS doesn't know actual size for "DLID4"\n",
1482 CDEBUG(D_INODE, "MDS returns "LPD64"/"LPD64" for"DLID4"\n",
1483 body->size, body->blocks, OLID4(&body->id1));
1484 body->valid |= OBD_MD_FLSIZE | OBD_MD_FLBLOCKS;
1488 static int mds_getattr_lock(struct ptlrpc_request *req, int offset,
1489 struct lustre_handle *child_lockh, int child_part)
1491 struct obd_device *obd = req->rq_export->exp_obd;
1492 struct mds_obd *mds = &obd->u.mds;
1493 struct ldlm_reply *rep = NULL;
1494 struct lvfs_run_ctxt saved;
1495 struct mds_req_sec_desc *rsd;
1496 struct mds_body *body;
1497 struct dentry *dparent = NULL, *dchild = NULL;
1498 struct lvfs_ucred uc = {NULL, NULL,};
1499 struct lustre_handle parent_lockh[2] = {{0}, {0}};
1500 unsigned int namesize = 0;
1501 int rc = 0, cleanup_phase = 0, resent_req = 0, update_mode, reply_offset;
1505 LASSERT(!strcmp(obd->obd_type->typ_name, OBD_MDS_DEVICENAME));
1506 MD_COUNTER_INCREMENT(obd, getattr_lock);
1508 rsd = lustre_swab_mds_secdesc(req, MDS_REQ_SECDESC_OFF);
1510 CERROR("Can't unpack security desc\n");
1514 /* swab now, before anyone looks inside the request. */
1515 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1516 lustre_swab_mds_body);
1518 CERROR("Can't swab mds_body\n");
1519 GOTO(cleanup, rc = -EFAULT);
1522 LASSERT_REQSWAB(req, offset + 1);
1523 name = lustre_msg_string(req->rq_reqmsg, offset + 1, 0);
1525 CERROR("Can't unpack name\n");
1526 GOTO(cleanup, rc = -EFAULT);
1528 namesize = req->rq_reqmsg->buflens[offset + 1];
1530 /* namesize less than 2 means we have empty name, probably came from
1531 revalidate by cfid, so no point in having name to be set */
1535 LASSERT (offset == 1 || offset == 3);
1537 rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*rep));
1543 rc = mds_init_ucred(&uc, req, rsd);
1545 if (child_lockh->cookie == 0)
1546 mds_audit_auth(req, &uc, AUDIT_STAT, &body->id1,
1547 name, namesize - 1);
1551 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1552 cleanup_phase = 1; /* kernel context */
1553 intent_set_disposition(rep, DISP_LOOKUP_EXECD);
1555 LASSERT(namesize > 0);
1556 if (child_lockh->cookie != 0) {
1557 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT);
1561 if (body->valid == OBD_MD_FLID) {
1562 struct mds_body *mds_reply;
1563 int size = sizeof(*mds_reply);
1567 dparent = mds_id2dentry(obd, &body->id1, NULL);
1568 if (IS_ERR(dparent)) {
1569 rc = PTR_ERR(dparent);
1573 * the user requested ONLY the inode number, so do a raw lookup.
1575 rc = lustre_pack_reply(req, 1, &size, NULL);
1577 CERROR("out of memory\n");
1581 dir = dparent->d_inode;
1582 LASSERT(dir->i_op->lookup_raw != NULL);
1583 rc = dir->i_op->lookup_raw(dir, name, namesize - 1, &inum);
1585 mds_reply = lustre_msg_buf(req->rq_repmsg, 0,
1586 sizeof(*mds_reply));
1588 id_ino(&mds_reply->id1) = inum;
1589 mds_reply->valid = OBD_MD_FLID;
1593 if (resent_req == 0) {
1594 LASSERT(id_fid(&body->id1) != 0);
1596 rc = mds_get_parent_child_locked(obd, mds, &body->id1,
1597 parent_lockh, &dparent,
1599 MDS_INODELOCK_UPDATE,
1602 child_lockh, &dchild,
1603 LCK_PR, child_part);
1607 cleanup_phase = 2; /* dchild, dparent, locks */
1610 * let's make sure this name should leave on this mds
1613 rc = mds_check_mds_num(obd, dparent->d_inode, name, namesize);
1617 /* we have no dentry here, drop LOOKUP bit */
1618 /* FIXME: we need MDS_INODELOCK_LOOKUP or not. */
1619 child_part &= ~MDS_INODELOCK_LOOKUP;
1620 CDEBUG(D_OTHER, "%s: retrieve attrs for "DLID4"\n",
1621 obd->obd_name, OLID4(&body->id1));
1623 dchild = mds_id2locked_dentry(obd, &body->id1, NULL,
1624 LCK_PR, parent_lockh,
1625 &update_mode, NULL, 0,
1626 MDS_INODELOCK_UPDATE);
1627 if (IS_ERR(dchild)) {
1628 CERROR("can't find inode with id "DLID4", err = %d\n",
1629 OLID4(&body->id1), (int)PTR_ERR(dchild));
1630 rc = PTR_ERR(dchild);
1634 memcpy(child_lockh, parent_lockh, sizeof(parent_lockh[0]));
1637 struct ldlm_lock *granted_lock;
1639 DEBUG_REQ(D_DLMTRACE, req, "resent, not enqueuing new locks");
1640 granted_lock = ldlm_handle2lock(child_lockh);
1642 LASSERTF(granted_lock != NULL, LPU64"/%lu lockh "LPX64"\n",
1643 id_fid(&body->id1), (unsigned long)id_group(&body->id1),
1644 child_lockh->cookie);
1647 /* usual named request */
1648 dparent = mds_id2dentry(obd, &body->id1, NULL);
1649 LASSERT(!IS_ERR(dparent));
1650 dchild = ll_lookup_one_len(name, dparent, namesize - 1);
1651 LASSERT(!IS_ERR(dchild));
1653 /* client wants to get attr. by id */
1654 dchild = mds_id2dentry(obd, &body->id1, NULL);
1655 LASSERT(!IS_ERR(dchild));
1657 LDLM_LOCK_PUT(granted_lock);
1660 cleanup_phase = 2; /* dchild, dparent, locks */
1662 if (!DENTRY_VALID(dchild)) {
1663 intent_set_disposition(rep, DISP_LOOKUP_NEG);
1665 * in the intent case, the policy clears this error: the
1666 * disposition is enough.
1671 intent_set_disposition(rep, DISP_LOOKUP_POS);
1674 if (req->rq_repmsg == NULL) {
1675 if (dchild->d_flags & DCACHE_CROSS_REF)
1676 rc = mds_getattr_pack_msg_cf(req, dchild, offset);
1678 rc = mds_getattr_pack_msg(req, dchild, offset);
1680 CERROR ("mds_getattr_pack_msg: %d\n", rc);
1685 rc = mds_getattr_internal(obd, dchild, req, offset, body,
1688 GOTO(cleanup, rc); /* returns the lock to the client */
1690 /* probably MDS knows actual size? */
1691 body = lustre_msg_buf(req->rq_repmsg, reply_offset, sizeof(*body));
1692 LASSERT(body != NULL);
1693 mds_getattr_size(obd, dchild, req, body);
1698 /* audit stuff for getattr */
1699 if (resent_req == 0 && (dparent || dchild)) {
1700 struct inode * au_inode = NULL;
1702 if (dchild && dchild->d_inode) {
1703 au_inode = dchild->d_inode;
1704 mds_audit_stat(req, &body->id1, au_inode,
1707 au_inode = dparent->d_inode;
1708 mds_audit_stat(req, &body->id1, au_inode,
1709 name, namesize - 1, rc);
1712 switch (cleanup_phase) {
1714 if (resent_req == 0) {
1715 if (rc && DENTRY_VALID(dchild))
1716 ldlm_lock_decref(child_lockh, LCK_PR);
1718 ldlm_lock_decref(parent_lockh, LCK_PR);
1720 if (parent_lockh[1].cookie != 0)
1721 ldlm_lock_decref(parent_lockh + 1, update_mode);
1728 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1730 mds_exit_ucred(&uc);
1735 static int mds_getattr(struct ptlrpc_request *req, int offset)
1737 struct obd_device *obd = req->rq_export->exp_obd;
1738 struct lvfs_run_ctxt saved;
1740 struct mds_req_sec_desc *rsd;
1741 struct mds_body *body;
1742 struct lvfs_ucred uc = {NULL, NULL,};
1746 MD_COUNTER_INCREMENT(obd, getattr);
1748 rsd = lustre_swab_mds_secdesc(req, MDS_REQ_SECDESC_OFF);
1750 CERROR("Can't unpack security desc\n");
1754 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1755 lustre_swab_mds_body);
1757 CERROR ("Can't unpack body\n");
1761 rc = mds_init_ucred(&uc, req, rsd);
1763 mds_exit_ucred(&uc);
1767 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1768 de = mds_id2dentry(obd, &body->id1, NULL);
1770 rc = req->rq_status = PTR_ERR(de);
1774 rc = mds_getattr_pack_msg(req, de, offset);
1776 CERROR("mds_getattr_pack_msg: %d\n", rc);
1780 req->rq_status = mds_getattr_internal(obd, de, req, offset, body,
1786 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1787 mds_exit_ucred(&uc);
1791 static int mds_access_check(struct ptlrpc_request *req, int offset)
1793 struct obd_device *obd = req->rq_export->exp_obd;
1794 struct lvfs_run_ctxt saved;
1796 struct mds_req_sec_desc *rsd;
1797 struct mds_body *body;
1798 struct lvfs_ucred uc;
1799 int rep_size[2] = {sizeof(*body),
1800 sizeof(struct mds_remote_perm)};
1804 if (!req->rq_export->exp_mds_data.med_remote) {
1805 CERROR("from local client "LPU64"\n", req->rq_peer.peer_id.nid);
1809 rsd = lustre_swab_mds_secdesc(req, MDS_REQ_SECDESC_OFF);
1811 CERROR("Can't unpack security desc\n");
1815 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1816 lustre_swab_mds_body);
1818 CERROR ("Can't unpack body\n");
1822 MD_COUNTER_INCREMENT(obd, access_check);
1824 rc = mds_init_ucred(&uc, req, rsd);
1826 CERROR("init ucred error: %d\n", rc);
1829 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1831 de = mds_id2dentry(obd, &body->id1, NULL);
1833 CERROR("grab ino "LPU64": err %ld\n",
1834 body->id1.li_stc.u.e3s.l3s_ino, PTR_ERR(de));
1835 GOTO(out_pop, rc = PTR_ERR(de));
1838 rc = lustre_pack_reply(req, 2, rep_size, NULL);
1840 CERROR("pack reply error: %d\n", rc);
1841 GOTO(out_dput, rc = -EINVAL);
1844 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*body));
1847 rc = mds_pack_remote_perm(req, 1, body, de->d_inode);
1854 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
1855 mds_exit_ucred(&uc);
1859 static int mds_obd_statfs(struct obd_device *obd, struct obd_statfs *osfs,
1860 unsigned long max_age)
1865 spin_lock(&obd->obd_osfs_lock);
1866 rc = fsfilt_statfs(obd, obd->u.mds.mds_sb, max_age);
1868 memcpy(osfs, &obd->obd_osfs, sizeof(*osfs));
1869 spin_unlock(&obd->obd_osfs_lock);
1874 static int mds_statfs(struct ptlrpc_request *req)
1876 struct obd_device *obd = req->rq_export->exp_obd;
1877 int rc, size = sizeof(struct obd_statfs);
1880 /* This will trigger a watchdog timeout */
1881 OBD_FAIL_TIMEOUT(OBD_FAIL_MDS_STATFS_LCW_SLEEP,
1882 (MDS_SERVICE_WATCHDOG_TIMEOUT / 1000) + 1);
1884 rc = lustre_pack_reply(req, 1, &size, NULL);
1885 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_STATFS_PACK)) {
1886 CERROR("mds: statfs lustre_pack_reply failed: rc = %d\n", rc);
1890 OBD_COUNTER_INCREMENT(obd, statfs);
1892 /* We call this so that we can cache a bit - 1 jiffie worth */
1893 rc = mds_obd_statfs(obd, lustre_msg_buf(req->rq_repmsg, 0, size),
1896 CERROR("mds_obd_statfs failed: rc %d\n", rc);
1902 req->rq_status = rc;
1906 static int mds_sync(struct ptlrpc_request *req, int offset)
1908 struct obd_device *obd = req->rq_export->exp_obd;
1909 struct mds_obd *mds = &obd->u.mds;
1910 struct mds_body *body;
1911 int rc, size = sizeof(*body);
1914 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1915 lustre_swab_mds_body);
1917 GOTO(out, rc = -EPROTO);
1919 rc = lustre_pack_reply(req, 1, &size, NULL);
1920 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_SYNC_PACK)) {
1921 CERROR("fsync lustre_pack_reply failed: rc = %d\n", rc);
1925 if (id_ino(&body->id1) == 0) {
1926 /* an id of zero is taken to mean "sync whole filesystem" */
1927 rc = fsfilt_sync(obd, mds->mds_sb);
1931 /* just any file to grab fsync method - "file" arg unused */
1932 struct file *file = mds->mds_rcvd_filp;
1933 struct mds_body *rep_body;
1936 de = mds_id2dentry(obd, &body->id1, NULL);
1938 GOTO(out, rc = PTR_ERR(de));
1940 rc = file->f_op->fsync(NULL, de, 1);
1944 rep_body = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*rep_body));
1945 mds_pack_inode2body(obd, rep_body, de->d_inode,
1946 (body->valid & OBD_MD_FID) ? 1 : 0);
1952 req->rq_status = rc;
1956 /* mds_readpage does not take a DLM lock on the inode, because the client must
1957 * already have a PR lock.
1959 * If we were to take another one here, a deadlock will result, if another
1960 * thread is already waiting for a PW lock. */
1961 static int mds_readpage(struct ptlrpc_request *req, int offset)
1963 struct obd_device *obd = req->rq_export->exp_obd;
1964 struct vfsmount *mnt;
1967 struct mds_req_sec_desc *rsd;
1968 struct mds_body *body, *repbody;
1969 struct lvfs_run_ctxt saved;
1970 int rc, size = sizeof(*repbody);
1971 struct lvfs_ucred uc = {NULL, NULL,};
1974 rc = lustre_pack_reply(req, 1, &size, NULL);
1975 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_READPAGE_PACK)) {
1976 CERROR("mds: out of memory\n");
1977 GOTO(out, rc = -ENOMEM);
1980 rsd = lustre_swab_mds_secdesc(req, MDS_REQ_SECDESC_OFF);
1982 CERROR("Can't unpack security desc\n");
1983 GOTO (out, rc = -EFAULT);
1986 body = lustre_swab_reqbuf(req, offset, sizeof(*body),
1987 lustre_swab_mds_body);
1989 CERROR("Can't unpack body\n");
1990 GOTO (out, rc = -EFAULT);
1993 rc = mds_init_ucred(&uc, req, rsd);
1995 mds_audit_auth(req, &uc, AUDIT_READDIR, &body->id1,
2000 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
2001 de = mds_id2dentry(obd, &body->id1, &mnt);
2003 GOTO(out_pop, rc = PTR_ERR(de));
2005 CDEBUG(D_INODE, "ino %lu\n", de->d_inode->i_ino);
2007 file = dentry_open(de, mnt, O_RDONLY | O_LARGEFILE);
2008 /* note: in case of an error, dentry_open puts dentry */
2010 GOTO(out_pop, rc = PTR_ERR(file));
2012 /* body->size is actually the offset -eeb */
2013 if ((body->size & (de->d_inode->i_blksize - 1)) != 0) {
2014 CERROR("offset "LPU64" not on a block boundary of %lu\n",
2015 body->size, de->d_inode->i_blksize);
2016 GOTO(out_file, rc = -EFAULT);
2019 /* body->nlink is actually the #bytes to read -eeb */
2020 if (body->nlink & (de->d_inode->i_blksize - 1)) {
2021 CERROR("size %u is not multiple of blocksize %lu\n",
2022 body->nlink, de->d_inode->i_blksize);
2023 GOTO(out_file, rc = -EFAULT);
2026 repbody = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*repbody));
2027 repbody->size = file->f_dentry->d_inode->i_size;
2028 repbody->valid = OBD_MD_FLSIZE;
2030 /* to make this asynchronous make sure that the handling function
2031 doesn't send a reply when this function completes. Instead a
2032 callback function would send the reply */
2033 /* body->size is actually the offset -eeb */
2034 rc = mds_sendpage(req, file, body->size, body->nlink);
2038 filp_close(file, 0);
2040 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
2042 mds_exit_ucred(&uc);
2043 req->rq_status = rc;
2047 int mds_read_md(struct obd_device *obd, struct lustre_id *id,
2048 char **data, int *datalen)
2050 struct dentry *dentry;
2051 struct mds_obd *mds = &obd->u.mds;
2052 int rc = 0, mea = 0;
2059 dentry = mds_id2dentry(obd, id, NULL);
2061 GOTO(out, rc = PTR_ERR(dentry));
2063 if (!dentry->d_inode) {
2064 CERROR("Can't find object "DLID4".\n",
2066 GOTO(out_dentry, rc = -EINVAL);
2068 if (S_ISDIR(dentry->d_inode->i_mode)) {
2069 *datalen = obd_packmd(mds->mds_md_exp, NULL, NULL);
2072 *datalen = obd_packmd(mds->mds_dt_exp, NULL, NULL);
2075 OBD_ALLOC(ea, *datalen);
2078 GOTO(out_dentry, rc = PTR_ERR(dentry));
2081 down(&dentry->d_inode->i_sem);
2082 rc = fsfilt_get_md(obd, dentry->d_inode, *data, *datalen,
2083 (mea ? EA_MEA : EA_LOV));
2084 up(&dentry->d_inode->i_sem);
2087 CERROR("Error %d reading eadata for ino %lu\n",
2088 rc, dentry->d_inode->i_ino);
2094 EXPORT_SYMBOL(mds_read_md);
2096 int mds_reint(struct ptlrpc_request *req, int offset,
2097 struct lustre_handle *lockh)
2099 struct mds_update_record *rec;
2100 struct mds_req_sec_desc *rsd;
2104 OBD_ALLOC(rec, sizeof(*rec));
2108 rsd = lustre_swab_mds_secdesc(req, MDS_REQ_SECDESC_OFF);
2110 CERROR("Can't unpack security desc\n");
2111 GOTO(out, rc = -EFAULT);
2114 rc = mds_update_unpack(req, offset, rec);
2115 if (rc || OBD_FAIL_CHECK(OBD_FAIL_MDS_REINT_UNPACK)) {
2116 CERROR("invalid record\n");
2117 GOTO(out, req->rq_status = -EINVAL);
2120 rc = mds_init_ucred(&rec->ur_uc, req, rsd);
2122 mds_audit_reint(req, rec);
2126 /* rc will be used to interrupt a for loop over multiple records */
2127 rc = mds_reint_rec(rec, offset, req, lockh);
2130 mds_exit_ucred(&rec->ur_uc);
2131 OBD_FREE(rec, sizeof(*rec));
2135 static int mds_filter_recovery_request(struct ptlrpc_request *req,
2136 struct obd_device *obd, int *process)
2138 switch (req->rq_reqmsg->opc) {
2139 case MDS_CONNECT: /* This will never get here, but for completeness. */
2140 case OST_CONNECT: /* This will never get here, but for completeness. */
2141 case MDS_DISCONNECT:
2142 case OST_DISCONNECT:
2147 case MDS_SYNC: /* used in unmounting */
2152 *process = target_queue_recovery_request(req, obd);
2156 DEBUG_REQ(D_ERROR, req, "not permitted during recovery");
2158 /* XXX what should we set rq_status to here? */
2159 req->rq_status = -EAGAIN;
2160 RETURN(ptlrpc_error(req));
2164 static char *reint_names[] = {
2165 [REINT_SETATTR] "setattr",
2166 [REINT_CREATE] "create",
2167 [REINT_LINK] "link",
2168 [REINT_UNLINK] "unlink",
2169 [REINT_RENAME] "rename",
2170 [REINT_OPEN] "open",
2173 #define FILTER_VALID_FLAGS (OBD_MD_FLTYPE | OBD_MD_FLMODE | OBD_MD_FLGENER | \
2174 OBD_MD_FLSIZE | OBD_MD_FLBLOCKS | OBD_MD_FLBLKSZ| \
2175 OBD_MD_FLATIME | OBD_MD_FLMTIME | OBD_MD_FLCTIME| \
2178 static void reconstruct_create(struct ptlrpc_request *req)
2180 struct mds_export_data *med = &req->rq_export->exp_mds_data;
2181 struct mds_client_data *mcd = med->med_mcd;
2182 struct dentry *dentry;
2183 struct ost_body *body;
2184 struct lustre_id id;
2188 /* copy rc, transno and disp; steal locks */
2189 mds_req_from_mcd(req, mcd);
2190 if (req->rq_status) {
2198 id_ino(&id) = mcd->mcd_last_data;
2199 LASSERT(id_ino(&id) != 0);
2201 dentry = mds_id2dentry(req2obd(req), &id, NULL);
2202 if (IS_ERR(dentry)) {
2203 CERROR("can't find inode "LPU64"\n", id_ino(&id));
2204 req->rq_status = PTR_ERR(dentry);
2209 CWARN("reconstruct reply for x"LPU64" (remote ino) "LPU64" -> %lu/%u\n",
2210 req->rq_xid, id_ino(&id), dentry->d_inode->i_ino,
2211 dentry->d_inode->i_generation);
2213 body = lustre_msg_buf(req->rq_repmsg, 0, sizeof (*body));
2214 obdo_from_inode(&body->oa, dentry->d_inode, FILTER_VALID_FLAGS);
2215 body->oa.o_id = dentry->d_inode->i_ino;
2216 body->oa.o_generation = dentry->d_inode->i_generation;
2217 body->oa.o_valid |= OBD_MD_FLID | OBD_MD_FLGENER;
2219 down(&dentry->d_inode->i_sem);
2220 rc = mds_read_inode_sid(req2obd(req), dentry->d_inode, &id);
2221 up(&dentry->d_inode->i_sem);
2223 CERROR("Can't read inode self id, inode %lu, "
2224 "rc %d\n", dentry->d_inode->i_ino, rc);
2228 body->oa.o_fid = id_fid(&id);
2229 body->oa.o_mds = id_group(&id);
2235 static int mds_inode_init_acl(struct obd_device *obd, void *handle,
2236 struct dentry *de, void *xattr, int xattr_size)
2238 struct inode *inode = de->d_inode;
2239 struct posix_acl *acl;
2246 LASSERT(xattr_size > 0);
2248 if (!inode->i_op->getxattr || !inode->i_op->setxattr) {
2249 CERROR("backend fs dosen't support xattr\n");
2253 /* set default acl */
2254 if (S_ISDIR(inode->i_mode)) {
2255 rc = inode->i_op->setxattr(de, XATTR_NAME_ACL_DEFAULT,
2256 xattr, xattr_size, 0);
2258 CERROR("set default acl err: %d\n", rc);
2263 /* set access acl */
2264 acl = posix_acl_from_xattr(xattr, xattr_size);
2265 if (acl == NULL || IS_ERR(acl)) {
2266 CERROR("insane attr data\n");
2267 return PTR_ERR(acl);
2270 if (posix_acl_valid(acl)) {
2271 CERROR("default acl not valid: %d\n", rc);
2276 mode = inode->i_mode;
2277 rc = posix_acl_create_masq(acl, &mode);
2279 CERROR("create masq err %d\n", rc);
2283 if (inode->i_mode != mode) {
2284 struct iattr iattr = { .ia_valid = ATTR_MODE,
2288 rc2 = fsfilt_setattr(obd, de, handle, &iattr, 0);
2290 CERROR("setattr mode err: %d\n", rc2);
2297 /* we didn't change acl except mode bits of some
2298 * entries, so should be fit into original size.
2300 rc = posix_acl_to_xattr(acl, xattr, xattr_size);
2303 rc = inode->i_op->setxattr(de, XATTR_NAME_ACL_ACCESS,
2304 xattr, xattr_size, 0);
2306 CERROR("set access acl err: %d\n", rc);
2309 posix_acl_release(acl);
2313 static int mdt_obj_create(struct ptlrpc_request *req)
2315 struct obd_device *obd = req->rq_export->exp_obd;
2316 struct mds_obd *mds = &obd->u.mds;
2317 struct ost_body *body, *repbody;
2320 char idname[LL_ID_NAMELEN];
2321 int size = sizeof(*repbody);
2322 struct inode *parent_inode;
2323 struct lvfs_run_ctxt saved;
2324 int rc, cleanup_phase = 0;
2325 struct dentry *new = NULL;
2326 struct dentry_params dp;
2327 int mealen, flags = 0;
2328 struct lvfs_ucred uc;
2329 struct lustre_id id;
2331 void *handle = NULL;
2332 unsigned long cr_inum = 0;
2336 DEBUG_REQ(D_HA, req, "create remote object");
2337 parent_inode = mds->mds_unnamed_dir->d_inode;
2339 body = lustre_swab_reqbuf(req, 0, sizeof(*body),
2340 lustre_swab_ost_body);
2344 /* acl data is packed transparently, no swab here */
2345 LASSERT(req->rq_reqmsg->bufcount >= 2);
2346 acl_size = req->rq_reqmsg->buflens[1];
2348 acl = lustre_msg_buf(req->rq_reqmsg, 1, acl_size);
2350 CERROR("No default acl buf?\n");
2355 rc = lustre_pack_reply(req, 1, &size, NULL);
2359 MDS_CHECK_RESENT(req, reconstruct_create(req));
2362 uc.luc_ginfo = NULL;
2363 uc.luc_uid = body->oa.o_uid;
2364 uc.luc_gid = body->oa.o_gid;
2365 uc.luc_fsuid = body->oa.o_uid;
2366 uc.luc_fsgid = body->oa.o_gid;
2368 push_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
2369 repbody = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*repbody));
2371 /* in REPLAY case inum should be given (client or other MDS fills it) */
2372 if (body->oa.o_id && ((body->oa.o_flags & OBD_FL_RECREATE_OBJS) ||
2373 (lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY))) {
2375 * this is re-create request from MDS holding directory name.
2376 * we have to lookup given ino/gen first. if it exists (good
2377 * case) then there is nothing to do. if it does not then we
2378 * have to recreate it.
2380 id_ino(&id) = body->oa.o_id;
2381 id_gen(&id) = body->oa.o_generation;
2383 new = mds_id2dentry(obd, &id, NULL);
2384 if (!IS_ERR(new) && new->d_inode) {
2385 struct lustre_id sid;
2387 CDEBUG(D_OTHER, "mkdir repairing %lu/%lu\n",
2388 (unsigned long)id_ino(&id),
2389 (unsigned long)id_gen(&id));
2391 obdo_from_inode(&repbody->oa, new->d_inode,
2392 FILTER_VALID_FLAGS);
2394 repbody->oa.o_id = new->d_inode->i_ino;
2395 repbody->oa.o_generation = new->d_inode->i_generation;
2396 repbody->oa.o_valid |= OBD_MD_FLID | OBD_MD_FLGENER;
2399 down(&new->d_inode->i_sem);
2400 rc = mds_read_inode_sid(obd, new->d_inode, &sid);
2401 up(&new->d_inode->i_sem);
2403 CERROR("Can't read inode self id "
2404 "inode %lu, rc %d.\n",
2405 new->d_inode->i_ino, rc);
2409 repbody->oa.o_fid = id_fid(&sid);
2410 repbody->oa.o_mds = id_group(&sid);
2411 LASSERT(id_fid(&sid) != 0);
2414 * here we could use fid passed in body->oa.o_fid and
2415 * thus avoid mds_read_inode_sid().
2417 cr_inum = new->d_inode->i_ino;
2418 GOTO(cleanup, rc = 0);
2422 down(&parent_inode->i_sem);
2423 handle = fsfilt_start(obd, parent_inode, FSFILT_OP_MKDIR, NULL);
2424 if (IS_ERR(handle)) {
2425 up(&parent_inode->i_sem);
2426 CERROR("fsfilt_start() failed, rc = %d\n",
2427 (int)PTR_ERR(handle));
2428 GOTO(cleanup, rc = PTR_ERR(handle));
2430 cleanup_phase = 1; /* transaction */
2433 rc = sprintf(idname, "%u.%u", ll_insecure_random_int(), current->pid);
2434 new = lookup_one_len(idname, mds->mds_unnamed_dir, rc);
2436 CERROR("%s: can't lookup new inode (%s) for mkdir: %d\n",
2437 obd->obd_name, idname, (int) PTR_ERR(new));
2438 fsfilt_commit(obd, mds->mds_sb, new->d_inode, handle, 0);
2439 up(&parent_inode->i_sem);
2440 RETURN(PTR_ERR(new));
2441 } else if (new->d_inode) {
2442 CERROR("%s: name exists. repeat\n", obd->obd_name);
2445 if ((body->oa.o_flags & OBD_FL_RECREATE_OBJS) ||
2446 lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
2447 fid = body->oa.o_fid;
2449 fid = mds_alloc_fid(obd);
2451 new->d_fsdata = (void *)&dp;
2456 dp.p_group = mds->mds_num;
2458 if ((lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) ||
2459 (body->oa.o_flags & OBD_FL_RECREATE_OBJS)) {
2460 LASSERT(body->oa.o_id != 0);
2461 dp.p_inum = body->oa.o_id;
2462 DEBUG_REQ(D_HA, req, "replay create obj %lu/%lu",
2463 (unsigned long)body->oa.o_id,
2464 (unsigned long)body->oa.o_generation);
2467 rc = vfs_mkdir(parent_inode, new, body->oa.o_mode);
2470 rc = mds_inode_init_acl(obd, handle, new,
2473 up(&parent_inode->i_sem);
2477 if ((body->oa.o_flags & OBD_FL_RECREATE_OBJS) ||
2478 lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
2479 new->d_inode->i_generation = body->oa.o_generation;
2480 mark_inode_dirty(new->d_inode);
2483 * avoiding asserts in cache flush case, as
2484 * @body->oa.o_id should be zero.
2486 if (body->oa.o_id) {
2487 LASSERTF(body->oa.o_id == new->d_inode->i_ino,
2488 "BUG 3550: failed to recreate obj "
2489 LPU64" -> %lu\n", body->oa.o_id,
2490 new->d_inode->i_ino);
2492 LASSERTF(body->oa.o_generation ==
2493 new->d_inode->i_generation,
2494 "BUG 3550: failed to recreate obj/gen "
2495 LPU64"/%u -> %lu/%u\n", body->oa.o_id,
2496 body->oa.o_generation,
2497 new->d_inode->i_ino,
2498 new->d_inode->i_generation);
2502 obdo_from_inode(&repbody->oa, new->d_inode, FILTER_VALID_FLAGS);
2503 repbody->oa.o_id = new->d_inode->i_ino;
2504 repbody->oa.o_generation = new->d_inode->i_generation;
2505 repbody->oa.o_valid |= OBD_MD_FLID | OBD_MD_FLGENER | OBD_MD_FID;
2507 if ((body->oa.o_flags & OBD_FL_RECREATE_OBJS) ||
2508 lustre_msg_get_flags(req->rq_reqmsg) & MSG_REPLAY) {
2509 LASSERT(body->oa.o_id != 0);
2510 LASSERT(body->oa.o_fid != 0);
2513 mds_inode2id(obd, &id, new->d_inode, fid);
2514 mds_update_inode_ids(obd, new->d_inode, handle, &id,
2515 body->oa.o_valid & OBD_MD_FLID ?
2516 NULL : obdo_id(&body->oa));
2518 /* initializing o_fid after it is allocated. */
2519 repbody->oa.o_fid = id_fid(&id);
2520 repbody->oa.o_mds = id_group(&id);
2522 rc = fsfilt_del_dir_entry(obd, new);
2523 up(&parent_inode->i_sem);
2525 CERROR("can't remove name for object: %d\n", rc);
2529 cleanup_phase = 2; /* created directory object */
2531 CDEBUG(D_OTHER, "created dirobj: %lu/%lu mode %o\n",
2532 (unsigned long)new->d_inode->i_ino,
2533 (unsigned long)new->d_inode->i_generation,
2534 (unsigned)new->d_inode->i_mode);
2535 cr_inum = new->d_inode->i_ino;
2537 up(&parent_inode->i_sem);
2538 CERROR("%s: can't create dirobj: %d\n", obd->obd_name, rc);
2542 if (body->oa.o_valid & OBD_MD_FLID) {
2543 /* this is new object for splitted dir. We have to prevent
2544 * recursive splitting on it -bzzz */
2545 mealen = obd_size_diskmd(mds->mds_md_exp, NULL);
2547 OBD_ALLOC(mea, mealen);
2549 GOTO(cleanup, rc = -ENOMEM);
2551 mea->mea_magic = MEA_MAGIC_ALL_CHARS;
2552 mea->mea_master = body->oa.o_mds; /* master mds num */
2555 obdo2id(&mea->mea_ids[body->oa.o_mds], &body->oa);
2557 down(&new->d_inode->i_sem);
2558 rc = fsfilt_set_md(obd, new->d_inode, handle,
2559 mea, mealen, EA_MEA);
2560 up(&new->d_inode->i_sem);
2562 CERROR("fsfilt_set_md() failed, "
2565 OBD_FREE(mea, mealen);
2567 CDEBUG(D_OTHER, "%s: mark non-splittable %lu/%u - %d\n",
2568 obd->obd_name, new->d_inode->i_ino,
2569 new->d_inode->i_generation, flags);
2570 } else if (body->oa.o_easize) {
2571 /* we pass LCK_EX to split routine to signal that we have
2572 * exclusive access to the directory. simple because nobody
2573 * knows it already exists -bzzz */
2574 rc = mds_try_to_split_dir(obd, new, NULL,
2575 body->oa.o_easize, LCK_EX);
2577 CERROR("Can't split directory %lu, error = %d.\n",
2578 new->d_inode->i_ino, rc);
2586 if (rc == 0 && (body->oa.o_flags & OBD_FL_REINT)) {
2587 rc = mds_fidmap_add(obd, &id);
2589 CERROR("can't create fid->ino mapping, "
2596 switch (cleanup_phase) {
2597 case 2: /* object has been created, but we'll may want to replay it later */
2599 ptlrpc_require_repack(req);
2600 case 1: /* transaction */
2601 rc = mds_finish_transno(mds, parent_inode, handle,
2606 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, &uc);
2610 static int mdt_get_info(struct ptlrpc_request *req)
2612 struct obd_export *exp = req->rq_export;
2617 key = lustre_msg_buf(req->rq_reqmsg, 0, 1);
2619 DEBUG_REQ(D_HA, req, "no get_info key");
2622 keylen = req->rq_reqmsg->buflens[0];
2624 if ((keylen < strlen("mdsize") || strcmp(key, "mdsize") != 0) &&
2625 (keylen < strlen("mdsnum") || strcmp(key, "mdsnum") != 0) &&
2626 (keylen < strlen("lovdesc") || strcmp(key, "lovdesc") != 0) &&
2627 (keylen < strlen("getext") || strcmp(key, "getext") != 0) &&
2628 (keylen < strlen("rootid") || strcmp(key, "rootid") != 0))
2631 if (keylen >= strlen("rootid") && !strcmp(key, "rootid")) {
2632 struct lustre_id *reply;
2633 int size = sizeof(*reply);
2635 rc = lustre_pack_reply(req, 1, &size, NULL);
2639 reply = lustre_msg_buf(req->rq_repmsg, 0, size);
2640 rc = obd_get_info(exp, keylen, key, (__u32 *)&size, reply);
2641 } else if (keylen >= strlen("lovdesc") && !strcmp(key, "lovdesc")) {
2642 struct lov_desc *reply;
2643 int size = sizeof(*reply);
2645 rc = lustre_pack_reply(req, 1, &size, NULL);
2649 reply = lustre_msg_buf(req->rq_repmsg, 0, size);
2650 rc = obd_get_info(exp, keylen, key, (__u32 *)&size, reply);
2651 } else if (keylen >= strlen("getext") && !strcmp(key, "getext")) {
2652 struct fid_extent *reply;
2653 int size = sizeof(*reply);
2655 rc = lustre_pack_reply(req, 1, &size, NULL);
2659 reply = lustre_msg_buf(req->rq_repmsg, 0, size);
2660 rc = obd_get_info(exp, keylen, key, (__u32 *)&size, reply);
2663 int size = sizeof(*reply);
2665 rc = lustre_pack_reply(req, 1, &size, NULL);
2669 reply = lustre_msg_buf(req->rq_repmsg, 0, size);
2670 rc = obd_get_info(exp, keylen, key, (__u32 *)&size, reply);
2673 req->rq_repmsg->status = 0;
2677 static int mds_set_info(struct obd_export *exp, __u32 keylen,
2678 void *key, __u32 vallen, void *val)
2680 struct obd_device *obd;
2681 struct mds_obd *mds;
2685 obd = class_exp2obd(exp);
2687 CDEBUG(D_IOCTL, "invalid client cookie "LPX64"\n",
2688 exp->exp_handle.h_cookie);
2693 if (keylen == 5 && memcmp(key, "audit", 5) == 0) {
2694 rc = mds_set_audit(obd, val);
2696 } else if (keylen >= strlen("ids") && memcmp(key, "ids", keylen) == 0) {
2697 struct lustre_id *ids = (struct lustre_id *)val;
2699 struct inode *inode;
2703 de = mds_id2dentry(obd, ids, NULL);
2706 CERROR("lookup by an id error rc=%d\n ", rc);
2709 inode = de->d_inode;
2711 GOTO(out_put, rc = -ENOENT);
2713 down(&inode->i_sem);
2714 handle = fsfilt_start(obd, inode, FSFILT_OP_SETATTR, NULL);
2715 if (IS_ERR(handle)) {
2717 GOTO(out_put, rc = PTR_ERR(handle));
2720 rc = mds_update_inode_ids(obd, inode, handle, NULL, ids + 1);
2722 err = fsfilt_commit(obd, mds->mds_sb, inode, handle,
2725 CERROR("error committing transaction: %d\n", err);
2734 if (keylen >= strlen("crypto_type") &&
2735 memcmp(key, "crypto_type", keylen) == 0) {
2736 rc = mds_set_crypto_type(obd, val, vallen);
2740 if (keylen >= strlen("setext") && !memcmp(key, "setext", keylen)) {
2741 struct fid_extent *ext = val;
2743 CDEBUG(D_IOCTL, "set last fid to extent ["LPD64"-"LPD64"]\n",
2744 ext->fe_start, ext->fe_width);
2746 /* set lastfid into fid extent start. All next object creates
2747 * will use that fid. */
2748 mds_set_last_fid(obd, ext->fe_start);
2750 /* setting the same extent to OSC to avoid ids intersecting in
2751 * object ids, as all cache MDSs have the same group 0. */
2752 rc = obd_set_info(mds->mds_dt_exp, strlen("setext"),
2753 "setext", sizeof(*ext), ext);
2755 CERROR("can't set extent ["LPD64"-"LPD64"] to %s, "
2756 "err %d\n", ext->fe_start, ext->fe_width,
2757 mds->mds_dt_exp->exp_obd->obd_name, rc);
2761 CDEBUG(D_IOCTL, "invalid key\n");
2765 static int mdt_set_info(struct ptlrpc_request *req)
2768 struct obd_export *exp = req->rq_export;
2769 int keylen, rc = 0, vallen = 0;
2772 key = lustre_msg_buf(req->rq_reqmsg, 0, 1);
2774 DEBUG_REQ(D_HA, req, "no set_info key");
2777 keylen = req->rq_reqmsg->buflens[0];
2779 if ((keylen == strlen("crypto_type") &&
2780 memcmp(key, "crypto_type", keylen) == 0)) {
2781 rc = lustre_pack_reply(req, 0, NULL, NULL);
2785 val = lustre_msg_buf(req->rq_reqmsg, 1, 0);
2786 vallen = req->rq_reqmsg->buflens[1];
2788 rc = obd_set_info(exp, keylen, key, vallen, val);
2789 req->rq_repmsg->status = 0;
2791 } else if (keylen == 5 && memcmp(key, "audit", 5) == 0) {
2792 struct audit_attr_msg msg, *p;
2795 rc = lustre_pack_reply(req, 0, NULL, NULL);
2799 p = lustre_swab_reqbuf(req, 1, sizeof(msg),
2800 lustre_swab_audit_attr);
2803 CDEBUG(D_INFO, "Get new audit setting 0x%x\n", (__u32)msg.attr);
2804 rc = obd_set_info(exp, keylen, key, sizeof(msg), &msg);
2806 req->rq_repmsg->status = rc;
2808 } else if (keylen == strlen("ids") &&
2809 memcmp(key, "ids", keylen) == 0) {
2810 struct lustre_id *id, ids[2];
2812 rc = lustre_pack_reply(req, 0, NULL, NULL);
2815 id = lustre_swab_reqbuf(req, 1, sizeof(struct lustre_id),
2816 lustre_swab_lustre_id);
2818 id = lustre_swab_reqbuf(req, 2, sizeof(struct lustre_id),
2819 lustre_swab_lustre_id);
2822 rc = obd_set_info(exp, keylen, key, vallen, ids);
2823 req->rq_repmsg->status = rc;
2825 } else if (keylen == strlen("setext") &&
2826 memcmp(key, "setext", keylen) == 0) {
2827 rc = lustre_pack_reply(req, 0, NULL, NULL);
2831 val = lustre_msg_buf(req->rq_reqmsg, 1, 0);
2832 vallen = req->rq_reqmsg->buflens[1];
2834 rc = obd_set_info(exp, keylen, key, vallen, val);
2835 req->rq_repmsg->status = 0;
2839 CDEBUG(D_IOCTL, "invalid key\n");
2843 static void mds_revoke_export_locks(struct obd_export *exp)
2845 struct list_head *locklist = &exp->exp_ldlm_data.led_held_locks;
2846 struct list_head rpc_list;
2847 struct ldlm_lock *lock, *next;
2848 struct ldlm_lock_desc desc;
2850 /* don't do this for local client */
2851 if (!exp->u.eu_mds_data.med_remote)
2854 /* don't revoke locks during recovery */
2855 if (exp->exp_obd->obd_recovering)
2859 INIT_LIST_HEAD(&rpc_list);
2861 spin_lock(&exp->exp_ldlm_data.led_lock);
2862 list_for_each_entry_safe(lock, next, locklist, l_export_chain) {
2864 lock_res_and_lock(lock);
2865 if (lock->l_req_mode != lock->l_granted_mode) {
2866 unlock_res_and_lock(lock);
2870 LASSERT(lock->l_resource);
2871 if (lock->l_resource->lr_type != LDLM_IBITS &&
2872 lock->l_resource->lr_type != LDLM_PLAIN) {
2873 unlock_res_and_lock(lock);
2877 if (lock->l_flags & LDLM_FL_AST_SENT) {
2878 unlock_res_and_lock(lock);
2882 LASSERT(lock->l_blocking_ast);
2883 LASSERT(!lock->l_blocking_lock);
2885 lock->l_flags |= LDLM_FL_AST_SENT;
2886 unlock_res_and_lock(lock);
2888 list_move(&lock->l_export_chain, &rpc_list);
2890 spin_unlock(&exp->exp_ldlm_data.led_lock);
2892 while (!list_empty(&rpc_list)) {
2893 lock = list_entry(rpc_list.next, struct ldlm_lock,
2895 list_del_init(&lock->l_export_chain);
2897 /* the desc just pretend to exclusive */
2898 ldlm_lock2desc(lock, &desc);
2899 desc.l_req_mode = LCK_EX;
2900 desc.l_granted_mode = 0;
2902 lock->l_blocking_ast(lock, &desc, NULL, LDLM_CB_BLOCKING);
2907 static int mds_msg_check_version(struct lustre_msg *msg)
2913 case MDS_DISCONNECT:
2915 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
2917 CERROR("bad opc %u version %08x, expecting %08x\n",
2918 msg->opc, msg->version, LUSTRE_OBD_VERSION);
2923 case MDS_GETATTR_LOCK:
2924 case MDS_ACCESS_CHECK:
2928 case MDS_DONE_WRITING:
2932 rc = lustre_msg_check_version(msg, LUSTRE_MDS_VERSION);
2934 CERROR("bad opc %u version %08x, expecting %08x\n",
2935 msg->opc, msg->version, LUSTRE_MDS_VERSION);
2939 case LDLM_BL_CALLBACK:
2940 case LDLM_CP_CALLBACK:
2941 rc = lustre_msg_check_version(msg, LUSTRE_DLM_VERSION);
2943 CERROR("bad opc %u version %08x, expecting %08x\n",
2944 msg->opc, msg->version, LUSTRE_DLM_VERSION);
2946 case OBD_LOG_CANCEL:
2947 case LLOG_ORIGIN_HANDLE_OPEN:
2948 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
2949 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
2950 case LLOG_ORIGIN_HANDLE_READ_HEADER:
2951 case LLOG_ORIGIN_HANDLE_CLOSE:
2953 rc = lustre_msg_check_version(msg, LUSTRE_LOG_VERSION);
2955 CERROR("bad opc %u version %08x, expecting %08x\n",
2956 msg->opc, msg->version, LUSTRE_LOG_VERSION);
2962 rc = lustre_msg_check_version(msg, LUSTRE_OBD_VERSION);
2964 CERROR("bad opc %u version %08x, expecting %08x\n",
2965 msg->opc, msg->version, LUSTRE_OBD_VERSION);
2968 case SEC_INIT_CONTINUE:
2973 CERROR("MDS unknown opcode %d\n", msg->opc);
2981 int mds_handle(struct ptlrpc_request *req)
2983 int should_process, fail = OBD_FAIL_MDS_ALL_REPLY_NET;
2984 struct obd_device *obd = NULL;
2985 struct mds_obd *mds = NULL; /* quell gcc overwarning */
2989 OBD_FAIL_RETURN(OBD_FAIL_MDS_ALL_REQUEST_NET | OBD_FAIL_ONCE, 0);
2991 rc = mds_msg_check_version(req->rq_reqmsg);
2993 CERROR("MDS drop mal-formed request\n");
2997 /* Security opc should NOT trigger any recovery events */
2998 if (req->rq_reqmsg->opc == SEC_INIT ||
2999 req->rq_reqmsg->opc == SEC_INIT_CONTINUE) {
3000 if (req->rq_export) {
3001 mds_req_add_idmapping(req,
3002 &req->rq_export->exp_mds_data);
3003 mds_revoke_export_locks(req->rq_export);
3006 } else if (req->rq_reqmsg->opc == SEC_FINI) {
3007 if (req->rq_export) {
3008 mds_req_del_idmapping(req,
3009 &req->rq_export->exp_mds_data);
3010 mds_revoke_export_locks(req->rq_export);
3015 LASSERT(current->journal_info == NULL);
3016 /* XXX identical to OST */
3017 if (req->rq_reqmsg->opc != MDS_CONNECT) {
3018 struct mds_export_data *med;
3021 if (req->rq_export == NULL) {
3022 CERROR("operation %d on unconnected MDS from %s\n",
3023 req->rq_reqmsg->opc,
3025 req->rq_status = -ENOTCONN;
3026 GOTO(out, rc = -ENOTCONN);
3029 med = &req->rq_export->exp_mds_data;
3030 obd = req->rq_export->exp_obd;
3033 /* sanity check: if the xid matches, the request must
3034 * be marked as a resent or replayed */
3035 if (req->rq_xid == le64_to_cpu(med->med_mcd->mcd_last_xid) ||
3036 req->rq_xid == le64_to_cpu(med->med_mcd->mcd_last_close_xid)) {
3037 LASSERTF(lustre_msg_get_flags(req->rq_reqmsg) &
3038 (MSG_RESENT | MSG_REPLAY),
3039 "rq_xid "LPU64" matches last_xid, "
3040 "expected RESENT flag\n",
3043 /* else: note the opposite is not always true; a
3044 * RESENT req after a failover will usually not match
3045 * the last_xid, since it was likely never
3046 * committed. A REPLAYed request will almost never
3047 * match the last xid, however it could for a
3048 * committed, but still retained, open. */
3050 spin_lock_bh(&obd->obd_processing_task_lock);
3051 recovering = obd->obd_recovering;
3052 spin_unlock_bh(&obd->obd_processing_task_lock);
3054 rc = mds_filter_recovery_request(req, obd,
3056 if (rc || should_process == 0) {
3058 } else if (should_process < 0) {
3059 req->rq_status = should_process;
3060 rc = ptlrpc_error(req);
3066 switch (req->rq_reqmsg->opc) {
3068 DEBUG_REQ(D_INODE, req, "connect");
3069 OBD_FAIL_RETURN(OBD_FAIL_MDS_CONNECT_NET, 0);
3070 rc = target_handle_connect(req);
3072 struct mds_export_data *med;
3074 LASSERT(req->rq_export);
3075 med = &req->rq_export->u.eu_mds_data;
3076 mds_init_export_data(req, med);
3077 mds_req_add_idmapping(req, med);
3079 /* Now that we have an export, set mds. */
3080 obd = req->rq_export->exp_obd;
3081 mds = mds_req2mds(req);
3085 case MDS_DISCONNECT:
3086 DEBUG_REQ(D_INODE, req, "disconnect");
3087 OBD_FAIL_RETURN(OBD_FAIL_MDS_DISCONNECT_NET, 0);
3088 rc = target_handle_disconnect(req);
3089 req->rq_status = rc; /* superfluous? */
3093 DEBUG_REQ(D_INODE, req, "getstatus");
3094 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETSTATUS_NET, 0);
3095 rc = mds_getstatus(req);
3099 DEBUG_REQ(D_INODE, req, "getattr");
3100 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_NET, 0);
3101 rc = mds_getattr(req, MDS_REQ_REC_OFF);
3104 case MDS_ACCESS_CHECK:
3105 DEBUG_REQ(D_INODE, req, "access_check");
3106 OBD_FAIL_RETURN(OBD_FAIL_MDS_ACCESS_CHECK_NET, 0);
3107 rc = mds_access_check(req, MDS_REQ_REC_OFF);
3110 case MDS_GETATTR_LOCK: {
3111 struct lustre_handle lockh;
3112 DEBUG_REQ(D_INODE, req, "getattr_lock");
3113 OBD_FAIL_RETURN(OBD_FAIL_MDS_GETATTR_LOCK_NET, 0);
3115 /* If this request gets a reconstructed reply, we won't be
3116 * acquiring any new locks in mds_getattr_lock, so we don't
3120 rc = mds_getattr_lock(req, MDS_REQ_REC_OFF, &lockh,
3121 MDS_INODELOCK_UPDATE);
3122 /* this non-intent call (from an ioctl) is special */
3123 req->rq_status = rc;
3124 if (rc == 0 && lockh.cookie)
3125 ldlm_lock_decref(&lockh, LCK_PR);
3129 DEBUG_REQ(D_INODE, req, "statfs");
3130 OBD_FAIL_RETURN(OBD_FAIL_MDS_STATFS_NET, 0);
3131 rc = mds_statfs(req);
3135 DEBUG_REQ(D_INODE, req, "readpage");
3136 OBD_FAIL_RETURN(OBD_FAIL_MDS_READPAGE_NET, 0);
3137 rc = mds_readpage(req, MDS_REQ_REC_OFF);
3139 if (OBD_FAIL_CHECK_ONCE(OBD_FAIL_MDS_SENDPAGE)) {
3140 if (req->rq_reply_state) {
3141 lustre_free_reply_state (req->rq_reply_state);
3142 req->rq_reply_state = NULL;
3149 __u32 *opcp = lustre_msg_buf(req->rq_reqmsg, MDS_REQ_REC_OFF,
3152 int size[3] = {sizeof(struct mds_body), mds->mds_max_mdsize,
3153 mds->mds_max_cookiesize};
3156 /* NB only peek inside req now; mds_reint() will swab it */
3158 CERROR ("Can't inspect opcode\n");
3163 if (lustre_msg_swabbed (req->rq_reqmsg))
3166 DEBUG_REQ(D_INODE, req, "reint %d (%s)", opc,
3167 (opc < sizeof(reint_names) / sizeof(reint_names[0]) ||
3168 reint_names[opc] == NULL) ? reint_names[opc] :
3171 OBD_FAIL_RETURN(OBD_FAIL_MDS_REINT_NET, 0);
3173 if (opc == REINT_UNLINK || opc == REINT_RENAME)
3175 else if (opc == REINT_OPEN)
3180 /* for SETATTR: I have different reply setting for
3181 * remote setfacl, so delay the reply buffer allocation.
3183 if (opc != REINT_SETATTR) {
3184 rc = lustre_pack_reply(req, bufcount, size, NULL);
3189 rc = mds_reint(req, MDS_REQ_REC_OFF, NULL);
3190 fail = OBD_FAIL_MDS_REINT_NET_REP;
3195 DEBUG_REQ(D_INODE, req, "close");
3196 OBD_FAIL_RETURN(OBD_FAIL_MDS_CLOSE_NET, 0);
3197 rc = mds_close(req, MDS_REQ_REC_OFF);
3200 case MDS_DONE_WRITING:
3201 DEBUG_REQ(D_INODE, req, "done_writing");
3202 OBD_FAIL_RETURN(OBD_FAIL_MDS_DONE_WRITING_NET, 0);
3203 rc = mds_done_writing(req, MDS_REQ_REC_OFF);
3207 DEBUG_REQ(D_INODE, req, "pin");
3208 OBD_FAIL_RETURN(OBD_FAIL_MDS_PIN_NET, 0);
3209 rc = mds_pin(req, MDS_REQ_REC_OFF);
3213 DEBUG_REQ(D_INODE, req, "sync");
3214 OBD_FAIL_RETURN(OBD_FAIL_MDS_SYNC_NET, 0);
3215 rc = mds_sync(req, MDS_REQ_REC_OFF);
3218 DEBUG_REQ(D_INODE, req, "parseid");
3219 rc = mds_parse_id(req);
3222 DEBUG_REQ(D_INODE, req, "ping");
3223 rc = target_handle_ping(req);
3226 case OBD_LOG_CANCEL:
3227 CDEBUG(D_INODE, "log cancel\n");
3228 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOG_CANCEL_NET, 0);
3229 rc = -ENOTSUPP; /* la la la */
3233 DEBUG_REQ(D_INODE, req, "enqueue");
3234 OBD_FAIL_RETURN(OBD_FAIL_LDLM_ENQUEUE, 0);
3235 rc = ldlm_handle_enqueue(req, ldlm_server_completion_ast,
3236 ldlm_server_blocking_ast, NULL);
3237 fail = OBD_FAIL_LDLM_REPLY;
3240 DEBUG_REQ(D_INODE, req, "convert");
3241 OBD_FAIL_RETURN(OBD_FAIL_LDLM_CONVERT, 0);
3242 rc = ldlm_handle_convert(req);
3244 case LDLM_BL_CALLBACK:
3245 case LDLM_CP_CALLBACK:
3246 DEBUG_REQ(D_INODE, req, "callback");
3247 CERROR("callbacks should not happen on MDS\n");
3249 OBD_FAIL_RETURN(OBD_FAIL_LDLM_BL_CALLBACK, 0);
3251 case LLOG_ORIGIN_HANDLE_OPEN:
3252 DEBUG_REQ(D_INODE, req, "llog_init");
3253 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
3254 rc = llog_origin_handle_open(req);
3256 case LLOG_ORIGIN_HANDLE_NEXT_BLOCK:
3257 DEBUG_REQ(D_INODE, req, "llog next block");
3258 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
3259 rc = llog_origin_handle_next_block(req);
3261 case LLOG_ORIGIN_HANDLE_PREV_BLOCK:
3262 DEBUG_REQ(D_INODE, req, "llog prev block");
3263 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
3264 rc = llog_origin_handle_prev_block(req);
3266 case LLOG_ORIGIN_HANDLE_READ_HEADER:
3267 DEBUG_REQ(D_INODE, req, "llog read header");
3268 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
3269 rc = llog_origin_handle_read_header(req);
3271 case LLOG_ORIGIN_HANDLE_CLOSE:
3272 DEBUG_REQ(D_INODE, req, "llog close");
3273 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
3274 rc = llog_origin_handle_close(req);
3277 DEBUG_REQ(D_INODE, req, "ost_create");
3278 rc = mdt_obj_create(req);
3281 DEBUG_REQ(D_INODE, req, "get_info");
3282 rc = mdt_get_info(req);
3285 DEBUG_REQ(D_INODE, req, "set_info");
3286 rc = mdt_set_info(req);
3289 CDEBUG(D_INODE, "write\n");
3290 OBD_FAIL_RETURN(OBD_FAIL_OST_BRW_NET, 0);
3291 rc = ost_brw_write(req, NULL);
3292 LASSERT(current->journal_info == NULL);
3293 /* mdt_brw sends its own replies */
3297 DEBUG_REQ(D_INODE, req, "llog catinfo");
3298 OBD_FAIL_RETURN(OBD_FAIL_OBD_LOGD_NET, 0);
3299 rc = llog_catinfo(req);
3302 req->rq_status = -ENOTSUPP;
3303 rc = ptlrpc_error(req);
3307 LASSERT(current->journal_info == NULL);
3309 /* If we're DISCONNECTing, the mds_export_data is already freed */
3310 if (!rc && req->rq_reqmsg->opc != MDS_DISCONNECT) {
3311 struct mds_export_data *med = &req->rq_export->exp_mds_data;
3312 struct obd_device *obd = list_entry(mds, struct obd_device,
3314 req->rq_repmsg->last_xid =
3315 le64_to_cpu(med->med_mcd->mcd_last_xid);
3317 if (!obd->obd_no_transno) {
3318 req->rq_repmsg->last_committed =
3319 obd->obd_last_committed;
3321 DEBUG_REQ(D_IOCTL, req,
3322 "not sending last_committed update");
3324 CDEBUG(D_INFO, "last_transno "LPU64", last_committed "LPU64
3326 mds->mds_last_transno, obd->obd_last_committed,
3332 target_send_reply(req, rc, fail);
3336 /* Update the server data on disk. This stores the new mount_count and also the
3337 * last_rcvd value to disk. If we don't have a clean shutdown, then the server
3338 * last_rcvd value may be less than that of the clients. This will alert us
3339 * that we may need to do client recovery.
3341 * Also assumes for mds_last_transno that we are not modifying it (no locking).
3343 int mds_update_server_data(struct obd_device *obd, int force_sync)
3345 struct mds_obd *mds = &obd->u.mds;
3346 struct mds_server_data *msd = mds->mds_server_data;
3347 struct file *filp = mds->mds_rcvd_filp;
3348 struct lvfs_run_ctxt saved;
3353 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3354 msd->msd_last_transno = cpu_to_le64(mds->mds_last_transno);
3356 CDEBUG(D_SUPER, "MDS mount_count is "LPU64", last_transno is "LPU64"\n",
3357 mds->mds_mount_count, mds->mds_last_transno);
3358 rc = fsfilt_write_record(obd, filp, msd, sizeof(*msd), &off, force_sync);
3360 CERROR("error writing MDS server data: rc = %d\n", rc);
3361 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3366 /* saves last allocated fid counter to file. */
3367 int mds_update_last_fid(struct obd_device *obd, void *handle,
3370 struct mds_obd *mds = &obd->u.mds;
3371 struct file *filp = mds->mds_fid_filp;
3372 struct lvfs_run_ctxt saved;
3378 spin_lock(&mds->mds_last_fid_lock);
3379 last_fid = mds->mds_last_fid;
3380 spin_unlock(&mds->mds_last_fid_lock);
3382 CDEBUG(D_SUPER, "MDS last_fid is #"LPU64"\n",
3386 fsfilt_add_journal_cb(obd, mds->mds_sb, last_fid,
3387 handle, mds_commit_last_fid_cb,
3391 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3392 rc = fsfilt_write_record(obd, filp, &last_fid, sizeof(last_fid),
3394 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3397 CERROR("error writing MDS last_fid #"LPU64
3398 ", err = %d\n", last_fid, rc);
3402 CDEBUG(D_SUPER, "wrote fid #"LPU64" at idx "
3403 "%llu: err = %d\n", last_fid, off, rc);
3408 void mds_set_last_fid(struct obd_device *obd, __u64 fid)
3410 struct mds_obd *mds = &obd->u.mds;
3412 spin_lock(&mds->mds_last_fid_lock);
3413 if (fid > mds->mds_last_fid)
3414 mds->mds_last_fid = fid;
3415 spin_unlock(&mds->mds_last_fid_lock);
3418 void mds_commit_last_transno_cb(struct obd_device *obd,
3419 __u64 transno, void *data,
3422 obd_transno_commit_cb(obd, transno, error);
3425 void mds_commit_last_fid_cb(struct obd_device *obd,
3426 __u64 fid, void *data,
3430 CERROR("%s: fid "LPD64" commit error: %d\n",
3431 obd->obd_name, fid, error);
3435 CDEBUG(D_HA, "%s: fid "LPD64" committed\n",
3436 obd->obd_name, fid);
3439 __u64 mds_alloc_fid(struct obd_device *obd)
3441 struct mds_obd *mds = &obd->u.mds;
3444 spin_lock(&mds->mds_last_fid_lock);
3445 fid = ++mds->mds_last_fid;
3446 spin_unlock(&mds->mds_last_fid_lock);
3452 * reads inode self id from inode EA. Probably later this should be replaced by
3453 * caching inode self id to avoid raeding it every time it is needed.
3455 int mds_read_inode_sid(struct obd_device *obd, struct inode *inode,
3456 struct lustre_id *id)
3461 LASSERT(id != NULL);
3462 LASSERT(obd != NULL);
3463 LASSERT(inode != NULL);
3465 rc = fsfilt_get_md(obd, inode, &id->li_fid,
3466 sizeof(id->li_fid), EA_SID);
3468 CERROR("fsfilt_get_md() failed, "
3477 id_ino(id) = inode->i_ino;
3478 id_gen(id) = inode->i_generation;
3479 id_type(id) = S_IFMT & inode->i_mode;
3484 int mds_read_inode_pid(struct obd_device *obd, struct inode *inode,
3485 struct lustre_id *id)
3490 LASSERT(inode && id);
3492 rc = fsfilt_get_md(obd, inode, id, sizeof(*id), EA_PID);
3494 CERROR("get parent id from EA failed, rc=%d\n", rc);
3503 /* updates inode self id in EA. */
3504 int mds_update_inode_ids(struct obd_device *obd, struct inode *inode,
3505 void *handle, struct lustre_id *id,
3506 struct lustre_id *pid)
3512 LASSERT(id == NULL || id_fid(id) != 0);
3513 LASSERT(pid == NULL || id_fid(pid) != 0);
3514 LASSERT(obd != NULL);
3515 LASSERT(inode != NULL);
3518 mds_set_last_fid(obd, id_fid(id));
3519 rc = fsfilt_set_md(obd, inode, handle, &id->li_fid,
3520 sizeof(id->li_fid), EA_SID);
3521 LASSERTF(rc == 0, "failed to update fid: %d\n", rc);
3524 rc = fsfilt_set_md(obd, inode, handle, pid,
3525 sizeof(*pid), EA_PID);
3526 LASSERTF(rc == 0, "failed to update parent fid: %d\n", rc);
3532 /* mount the file system (secretly) */
3533 static int mds_setup(struct obd_device *obd, obd_count len, void *buf)
3535 struct lustre_cfg* lcfg = buf;
3536 struct mds_obd *mds = &obd->u.mds;
3537 struct lvfs_obd_ctxt *lvfs_ctxt = NULL;
3538 char *options = NULL;
3539 struct vfsmount *mnt;
3542 struct crypto_tfm *tfm = NULL;
3546 if (lcfg->lcfg_bufcount < 3)
3547 RETURN(rc = -EINVAL);
3549 if (LUSTRE_CFG_BUFLEN(lcfg, 1) == 0 || LUSTRE_CFG_BUFLEN(lcfg, 2) == 0)
3550 RETURN(rc = -EINVAL);
3552 obd->obd_fsops = fsfilt_get_ops(lustre_cfg_string(lcfg, 2));
3553 if (IS_ERR(obd->obd_fsops))
3554 RETURN(rc = PTR_ERR(obd->obd_fsops));
3556 mds->mds_max_mdsize = sizeof(struct lov_mds_md);
3558 page = get_zeroed_page(GFP_KERNEL);
3562 options = (char *)page;
3565 * here we use "iopen_nopriv" hardcoded, because it affects MDS utility
3566 * and the rest of options are passed by mount options. Probably this
3567 * should be moved to somewhere else like startup scripts or lconf. */
3568 sprintf(options, "iopen_nopriv");
3570 if (LUSTRE_CFG_BUFLEN(lcfg, 4) > 0 && lustre_cfg_buf(lcfg, 4))
3571 sprintf(options + strlen(options), ",%s",
3572 lustre_cfg_string(lcfg, 4));
3574 /* we have to know mdsnum before touching underlying fs -bzzz */
3575 atomic_set(&mds->mds_open_count, 0);
3576 sema_init(&mds->mds_md_sem, 1);
3577 mds->mds_md_connected = 0;
3578 mds->mds_md_name = NULL;
3580 if (LUSTRE_CFG_BUFLEN(lcfg, 5) > 0 && lustre_cfg_buf(lcfg, 5) &&
3581 strncmp(lustre_cfg_string(lcfg, 5), "dumb", LUSTRE_CFG_BUFLEN(lcfg, 5))) {
3584 generate_random_uuid(uuid);
3585 class_uuid_unparse(uuid, &mds->mds_md_uuid);
3587 OBD_ALLOC(mds->mds_md_name, LUSTRE_CFG_BUFLEN(lcfg, 5));
3588 if (mds->mds_md_name == NULL)
3589 RETURN(rc = -ENOMEM);
3591 memcpy(mds->mds_md_name, lustre_cfg_buf(lcfg, 5),
3592 LUSTRE_CFG_BUFLEN(lcfg, 5));
3594 CDEBUG(D_OTHER, "MDS: %s is master for %s\n",
3595 obd->obd_name, mds->mds_md_name);
3597 rc = mds_md_connect(obd, mds->mds_md_name);
3599 OBD_FREE(mds->mds_md_name, LUSTRE_CFG_BUFLEN(lcfg, 5));
3604 mds->mds_obd_type = MDS_MASTER_OBD;
3606 if (LUSTRE_CFG_BUFLEN(lcfg, 6) > 0 && lustre_cfg_buf(lcfg, 6) &&
3607 strncmp(lustre_cfg_string(lcfg, 6), "dumb",
3608 LUSTRE_CFG_BUFLEN(lcfg, 6))) {
3609 if (!memcmp(lustre_cfg_string(lcfg, 6), "master",
3610 strlen("master"))) {
3611 mds->mds_obd_type = MDS_MASTER_OBD;
3612 } else if (!memcmp(lustre_cfg_string(lcfg, 6), "cache",
3614 mds->mds_obd_type = MDS_CACHE_OBD;
3618 rc = lvfs_mount_fs(lustre_cfg_string(lcfg, 1),
3619 lustre_cfg_string(lcfg, 2),
3620 options, 0, &lvfs_ctxt);
3624 if (rc || !lvfs_ctxt) {
3625 CERROR("lvfs_mount_fs failed: rc = %d\n", rc);
3629 mnt = lvfs_ctxt->loc_mnt;
3630 mds->mds_lvfs_ctxt = lvfs_ctxt;
3631 ll_clear_rdonly(ll_sbdev(mnt->mnt_sb));
3633 CDEBUG(D_SUPER, "%s: mnt = %p\n", lustre_cfg_string(lcfg, 1), mnt);
3635 mds->mds_fidext_thumb = 0;
3636 sema_init(&mds->mds_epoch_sem, 1);
3637 atomic_set(&mds->mds_real_clients, 0);
3638 spin_lock_init(&mds->mds_fidext_lock);
3639 spin_lock_init(&mds->mds_fidmap_lock);
3640 spin_lock_init(&mds->mds_transno_lock);
3641 spin_lock_init(&mds->mds_last_fid_lock);
3642 sema_init(&mds->mds_orphan_recovery_sem, 1);
3643 mds->mds_max_cookiesize = sizeof(struct llog_cookie);
3645 sprintf(ns_name, "mds-%s", obd->obd_uuid.uuid);
3646 obd->obd_namespace = ldlm_namespace_new(ns_name, LDLM_NAMESPACE_SERVER);
3648 if (obd->obd_namespace == NULL) {
3649 mds_cleanup(obd, 0);
3650 GOTO(err_put, rc = -ENOMEM);
3652 ldlm_register_intent(obd->obd_namespace, mds_intent_policy);
3654 tfm = crypto_alloc_tfm(CAPA_HMAC_ALG, 0);
3656 GOTO(err_ns, rc = -ENOSYS);
3658 mds->mds_capa_hmac = tfm;
3659 mds->mds_capa_timeout = CAPA_TIMEOUT;
3660 mds->mds_capa_key_timeout = CAPA_KEY_TIMEOUT;
3662 rc = mds_fs_setup(obd, mnt);
3664 CERROR("%s: MDS filesystem method init failed: rc = %d\n",
3669 rc = llog_start_commit_thread();
3674 if (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0 && lustre_cfg_buf(lcfg, 3) &&
3675 strncmp(lustre_cfg_string(lcfg, 3), "dumb",
3676 LUSTRE_CFG_BUFLEN(lcfg, 3))) {
3679 generate_random_uuid(uuid);
3680 class_uuid_unparse(uuid, &mds->mds_dt_uuid);
3682 OBD_ALLOC(mds->mds_profile, LUSTRE_CFG_BUFLEN(lcfg, 3));
3683 if (mds->mds_profile == NULL)
3684 GOTO(err_fs, rc = -ENOMEM);
3686 strncpy(mds->mds_profile, lustre_cfg_string(lcfg, 3),
3687 LUSTRE_CFG_BUFLEN(lcfg, 3));
3691 * setup root dir and files ID dir if lmv already connected, or there is
3694 if (mds->mds_md_exp || (LUSTRE_CFG_BUFLEN(lcfg, 3) > 0 &&
3695 lustre_cfg_buf(lcfg, 3) &&
3696 strncmp(lustre_cfg_string(lcfg, 3), "dumb",
3697 LUSTRE_CFG_BUFLEN(lcfg, 3)))) {
3698 rc = mds_fs_setup_rootid(obd);
3702 rc = mds_fs_setup_virtid(obd);
3708 ptlrpc_init_client(LDLM_CB_REQUEST_PORTAL, LDLM_CB_REPLY_PORTAL,
3709 "mds_ldlm_client", &obd->obd_ldlm_client);
3710 obd->obd_replayable = 1;
3712 mds->mds_crypto_type = NO_CRYPTO;
3714 rc = mds_postsetup(obd);
3721 /* No extra cleanup needed for llog_init_commit_thread() */
3722 mds_fs_cleanup(obd, 0);
3724 crypto_free_tfm(mds->mds_capa_hmac);
3726 ldlm_namespace_free(obd->obd_namespace, 0);
3727 obd->obd_namespace = NULL;
3730 lvfs_umount_fs(mds->mds_lvfs_ctxt);
3734 fsfilt_put_ops(obd->obd_fsops);
3738 static int mds_fs_post_setup(struct obd_device *obd)
3740 struct mds_obd *mds = &obd->u.mds;
3741 struct dentry *dentry;
3745 dentry = mds_id2dentry(obd, &mds->mds_rootid, NULL);
3746 if (IS_ERR(dentry)) {
3747 CERROR("Can't find ROOT, err = %d\n",
3748 (int)PTR_ERR(dentry));
3749 RETURN(PTR_ERR(dentry));
3751 rc = fsfilt_post_setup(obd, dentry);
3752 //set id2name function handler
3753 fsfilt_set_info(obd, mds->mds_sb, NULL, 7, "id2name",
3754 sizeof(mds_audit_id2name), mds_audit_id2name);
3760 static int mds_postsetup(struct obd_device *obd)
3762 struct mds_obd *mds = &obd->u.mds;
3766 rc = obd_llog_setup(obd, &obd->obd_llogs, LLOG_CONFIG_ORIG_CTXT,
3767 obd, 0, NULL, &llog_lvfs_ops);
3771 if (mds->mds_profile) {
3772 struct llog_ctxt *lgctxt;
3773 struct lvfs_run_ctxt saved;
3774 struct lustre_profile *lprof;
3775 struct config_llog_instance cfg;
3777 cfg.cfg_instance = NULL;
3778 cfg.cfg_uuid = mds->mds_dt_uuid;
3779 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3781 lgctxt = llog_get_context(&obd->obd_llogs, LLOG_CONFIG_ORIG_CTXT);
3783 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3784 GOTO(err_llog, rc = -EINVAL);
3787 rc = class_config_process_llog(lgctxt, mds->mds_profile, &cfg);
3788 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3793 lprof = class_get_profile(mds->mds_profile);
3794 if (lprof == NULL) {
3795 CERROR("No profile found: %s\n", mds->mds_profile);
3796 GOTO(err_cleanup, rc = -ENOENT);
3798 rc = mds_dt_connect(obd, lprof->lp_lov);
3800 GOTO(err_cleanup, rc);
3802 rc = mds_md_postsetup(obd);
3804 GOTO(err_cleanup, rc);
3806 rc = mds_fs_post_setup(obd);
3808 CERROR("can not post setup fsfilt\n");
3813 obd_llog_cleanup(llog_get_context(&obd->obd_llogs,
3814 LLOG_CONFIG_ORIG_CTXT));
3818 int mds_postrecov_common(struct obd_device *obd)
3820 struct mds_obd *mds = &obd->u.mds;
3821 struct llog_ctxt *ctxt;
3822 int rc, item = 0, valsize;
3826 LASSERT(!obd->obd_recovering);
3827 ctxt = llog_get_context(&obd->obd_llogs, LLOG_UNLINK_ORIG_CTXT);
3828 LASSERT(ctxt != NULL);
3830 /* clean PENDING dir */
3831 rc = mds_cleanup_orphans(obd);
3836 group = FILTER_GROUP_FIRST_MDS + mds->mds_num;
3837 valsize = sizeof(group);
3838 rc = obd_set_info(mds->mds_dt_exp, strlen("mds_conn"),
3839 "mds_conn", valsize, &group);
3843 rc = llog_connect(ctxt, obd->u.mds.mds_dt_desc.ld_tgt_count,
3846 CERROR("%s: failed at llog_origin_connect: %d\n",
3851 /* remove the orphaned precreated objects */
3852 rc = mds_dt_clear_orphans(mds, NULL /* all OSTs */);
3857 RETURN(rc < 0 ? rc : item);
3860 /* cleanup all llogging subsystems */
3861 rc = obd_llog_finish(obd, &obd->obd_llogs,
3862 mds->mds_dt_desc.ld_tgt_count);
3864 CERROR("%s: failed to cleanup llogging subsystems\n",
3869 int mds_postrecov(struct obd_device *obd)
3873 rc = mds_postrecov_common(obd);
3875 rc = mds_md_reconnect(obd);
3879 int mds_dt_clean(struct obd_device *obd)
3881 struct mds_obd *mds = &obd->u.mds;
3884 if (mds->mds_profile) {
3886 struct llog_ctxt *llctx;
3887 struct lvfs_run_ctxt saved;
3888 struct config_llog_instance cfg;
3889 int len = strlen(mds->mds_profile) + sizeof("-clean") + 1;
3891 OBD_ALLOC(cln_prof, len);
3893 CERROR("can't allocate memory, processing cleanup "
3894 "profile is skipped\n");
3898 sprintf(cln_prof, "%s-clean", mds->mds_profile);
3900 cfg.cfg_instance = NULL;
3901 cfg.cfg_uuid = mds->mds_dt_uuid;
3903 push_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3904 llctx = llog_get_context(&obd->obd_llogs,
3905 LLOG_CONFIG_ORIG_CTXT);
3906 class_config_process_llog(llctx, cln_prof, &cfg);
3907 pop_ctxt(&saved, &obd->obd_lvfs_ctxt, NULL);
3909 OBD_FREE(cln_prof, len);
3911 OBD_FREE(mds->mds_profile, strlen(mds->mds_profile) + 1);
3912 mds->mds_profile = NULL;
3917 int mds_md_clean(struct obd_device *obd)
3919 struct mds_obd *mds = &obd->u.mds;
3922 if (mds->mds_md_name) {
3923 OBD_FREE(mds->mds_md_name, strlen(mds->mds_md_name) + 1);
3924 mds->mds_md_name = NULL;
3929 static int mds_precleanup(struct obd_device *obd, int flags)
3935 mds_dt_disconnect(obd, flags);
3937 obd_llog_cleanup(llog_get_context(&obd->obd_llogs, LLOG_CONFIG_ORIG_CTXT));
3941 extern void lgss_svc_cache_purge_all(void);
3942 static int mds_cleanup(struct obd_device *obd, int flags)
3944 struct mds_obd *mds = &obd->u.mds;
3947 if (mds->mds_sb == NULL)
3950 mds_update_server_data(obd, 1);
3951 mds_update_last_fid(obd, NULL, 1);
3953 if (mds->mds_dt_objids != NULL) {
3954 int size = mds->mds_dt_desc.ld_tgt_count *
3956 OBD_FREE(mds->mds_dt_objids, size);
3958 mds_fs_cleanup(obd, flags);
3962 /* 2 seems normal on mds, (may_umount() also expects 2
3963 fwiw), but we only see 1 at this point in obdfilter. */
3964 lvfs_umount_fs(mds->mds_lvfs_ctxt);
3968 ldlm_namespace_free(obd->obd_namespace, flags & OBD_OPT_FORCE);
3970 spin_lock_bh(&obd->obd_processing_task_lock);
3971 if (obd->obd_recovering) {
3972 target_cancel_recovery_timer(obd);
3973 obd->obd_recovering = 0;
3975 spin_unlock_bh(&obd->obd_processing_task_lock);
3978 fsfilt_put_ops(obd->obd_fsops);
3982 lgss_svc_cache_purge_all();
3985 spin_lock(&mds->mds_denylist_lock);
3986 while (!list_empty( &mds->mds_denylist ) ) {
3987 deny_sec_t *p_deny_sec = list_entry(mds->mds_denylist.next,
3989 list_del(&p_deny_sec->list);
3990 OBD_FREE(p_deny_sec, sizeof(*p_deny_sec));
3992 spin_unlock(&mds->mds_denylist_lock);
3994 mds_capa_keys_cleanup(obd);
3996 if (mds->mds_capa_hmac)
3997 crypto_free_tfm(mds->mds_capa_hmac);
4001 static int set_security(const char *value, char **sec)
4003 if (!strcmp(value, "null"))
4005 else if (!strcmp(value, "krb5i"))
4007 else if (!strcmp(value, "krb5p"))
4010 CERROR("Unrecognized security flavor %s\n", value);
4017 static int mds_process_config(struct obd_device *obd, obd_count len, void *buf)
4019 struct lustre_cfg *lcfg = buf;
4020 struct mds_obd *mds = &obd->u.mds;
4024 switch(lcfg->lcfg_command) {
4025 case LCFG_SET_SECURITY: {
4026 if ((LUSTRE_CFG_BUFLEN(lcfg, 1) == 0) ||
4027 (LUSTRE_CFG_BUFLEN(lcfg, 2) == 0))
4028 GOTO(out, rc = -EINVAL);
4030 if (!strcmp(lustre_cfg_string(lcfg, 1), "mds_sec"))
4031 rc = set_security(lustre_cfg_string(lcfg, 2),
4033 else if (!strcmp(lustre_cfg_string(lcfg, 1), "oss_sec"))
4034 rc = set_security(lustre_cfg_string(lcfg, 2),
4036 else if (!strcmp(lustre_cfg_string(lcfg, 1), "deny_sec")){
4037 spin_lock(&mds->mds_denylist_lock);
4038 rc = add_deny_security(lustre_cfg_string(lcfg, 2),
4039 &mds->mds_denylist);
4040 spin_unlock(&mds->mds_denylist_lock);
4042 CERROR("Unrecognized key\n");
4048 CERROR("Unknown command: %d\n", lcfg->lcfg_command);
4049 GOTO(out, rc = -EINVAL);
4055 static void fixup_handle_for_resent_req(struct ptlrpc_request *req,
4057 struct ldlm_lock *new_lock,
4058 struct ldlm_lock **old_lock,
4059 struct lustre_handle *lockh)
4061 struct obd_export *exp = req->rq_export;
4062 struct obd_device *obd = exp->exp_obd;
4063 struct ldlm_request *dlmreq =
4064 lustre_msg_buf(req->rq_reqmsg, offset, sizeof (*dlmreq));
4065 struct lustre_handle remote_hdl = dlmreq->lock_handle1;
4066 struct list_head *iter;
4068 if (!(lustre_msg_get_flags(req->rq_reqmsg) & MSG_RESENT))
4071 spin_lock(&obd->obd_namespace->ns_hash_lock);
4072 list_for_each(iter, &exp->exp_ldlm_data.led_held_locks) {
4073 struct ldlm_lock *lock;
4074 lock = list_entry(iter, struct ldlm_lock, l_export_chain);
4075 if (lock == new_lock)
4077 if (lock->l_remote_handle.cookie == remote_hdl.cookie) {
4078 lockh->cookie = lock->l_handle.h_cookie;
4079 LDLM_DEBUG(lock, "restoring lock cookie");
4080 DEBUG_REQ(D_HA, req, "restoring lock cookie "LPX64,
4083 *old_lock = LDLM_LOCK_GET(lock);
4084 spin_unlock(&obd->obd_namespace->ns_hash_lock);
4088 spin_unlock(&obd->obd_namespace->ns_hash_lock);
4090 /* If the xid matches, then we know this is a resent request,
4091 * and allow it. (It's probably an OPEN, for which we don't
4094 le64_to_cpu(exp->exp_mds_data.med_mcd->mcd_last_xid))
4098 le64_to_cpu(exp->exp_mds_data.med_mcd->mcd_last_close_xid))
4101 /* This remote handle isn't enqueued, so we never received or
4102 * processed this request. Clear MSG_RESENT, because it can
4103 * be handled like any normal request now. */
4105 lustre_msg_clear_flags(req->rq_reqmsg, MSG_RESENT);
4107 DEBUG_REQ(D_HA, req, "no existing lock with rhandle "LPX64,
4111 int intent_disposition(struct ldlm_reply *rep, int flag)
4115 return (rep->lock_policy_res1 & flag);
4118 void intent_set_disposition(struct ldlm_reply *rep, int flag)
4122 rep->lock_policy_res1 |= flag;
4125 static int mds_intent_prepare_reply_buffers(struct ptlrpc_request *req,
4126 struct ldlm_intent *it)
4128 struct mds_obd *mds = &req->rq_export->exp_obd->u.mds;
4129 struct mds_export_data *med = &req->rq_export->u.eu_mds_data;
4130 int rc, reply_buffers;
4131 int repsize[8] = {sizeof(struct ldlm_reply),
4132 sizeof(struct mds_body),
4133 mds->mds_max_mdsize};
4137 if (it->opc & ( IT_OPEN | IT_GETATTR | IT_LOOKUP | IT_CHDIR )) {
4138 /*Ugly here, Actually, we should prepare the reply buffer
4139 *after we know whether these stuff exist or not, which should
4140 * be fixed in future, Now each item is in the fix position,
4141 * the sequence is lsm, acl, crypto ea, capa.*/
4142 repsize[reply_buffers++] = sizeof(int);
4143 if (med->med_remote)
4144 repsize[reply_buffers++] =
4145 sizeof(struct mds_remote_perm);
4147 repsize[reply_buffers++] =
4148 xattr_acl_size(LL_ACL_MAX_ENTRIES);
4150 repsize[reply_buffers++] = sizeof(int);
4151 repsize[reply_buffers++] = sizeof(struct crypto_key);
4153 /* XXX: if new buffer is to be added, capability reply
4154 * buffer should always been reserved. */
4155 if (it->opc & IT_OPEN)
4156 repsize[reply_buffers++] = sizeof(struct lustre_capa);
4159 rc = lustre_pack_reply(req, reply_buffers, repsize, NULL);
4164 static int mds_intent_policy(struct ldlm_namespace *ns,
4165 struct ldlm_lock **lockp, void *req_cookie,
4166 ldlm_mode_t mode, int flags, void *data)
4168 struct ptlrpc_request *req = req_cookie;
4169 struct ldlm_lock *lock = *lockp;
4170 struct ldlm_intent *it;
4171 struct ldlm_reply *rep;
4172 struct lustre_handle lockh[2] = {{0}, {0}};
4173 struct ldlm_lock *new_lock = NULL;
4174 int getattr_part = MDS_INODELOCK_UPDATE;
4177 int offset = MDS_REQ_INTENT_REC_OFF;
4180 LASSERT(req != NULL);
4181 MD_COUNTER_INCREMENT(req->rq_export->exp_obd, intent_lock);
4183 if (req->rq_reqmsg->bufcount <= MDS_REQ_INTENT_IT_OFF) {
4184 /* No intent was provided */
4185 int size = sizeof(struct ldlm_reply);
4186 rc = lustre_pack_reply(req, 1, &size, NULL);
4191 it = lustre_swab_reqbuf(req, MDS_REQ_INTENT_IT_OFF, sizeof(*it),
4192 lustre_swab_ldlm_intent);
4194 CERROR("Intent missing\n");
4195 RETURN(req->rq_status = -EFAULT);
4198 LDLM_DEBUG(lock, "intent policy, opc: %s", ldlm_it2str(it->opc));
4200 rc = mds_intent_prepare_reply_buffers(req, it);
4203 RETURN(req->rq_status = rc);
4205 rep = lustre_msg_buf(req->rq_repmsg, 0, sizeof(*rep));
4206 LASSERT(rep != NULL);
4208 intent_set_disposition(rep, DISP_IT_EXECD);
4210 /* execute policy */
4211 switch ((long)it->opc) {
4213 case IT_CREAT|IT_OPEN:
4214 fixup_handle_for_resent_req(req, MDS_REQ_INTENT_LOCKREQ_OFF,
4216 /* XXX swab here to assert that an mds_open reint
4217 * packet is following */
4218 fixup_handle_for_resent_req(req, MDS_REQ_INTENT_LOCKREQ_OFF,
4220 rep->lock_policy_res2 = mds_reint(req, offset, lockh);
4222 if (rep->lock_policy_res2) {
4224 * mds_open() returns ENOLCK where it should return
4225 * zero, but it has no lock to return.
4227 if (rep->lock_policy_res2 == ENOLCK)
4228 rep->lock_policy_res2 = 0;
4230 RETURN(ELDLM_LOCK_ABORTED);
4234 * IT_OPEN may return lock on cross-node dentry that we want to
4235 * hold during attr retrival -bzzz
4237 if (lockh[0].cookie == 0)
4238 RETURN(ELDLM_LOCK_ABORTED);
4242 getattr_part = MDS_INODELOCK_LOOKUP;
4245 getattr_part |= MDS_INODELOCK_LOOKUP;
4247 fixup_handle_for_resent_req(req, MDS_REQ_INTENT_LOCKREQ_OFF,
4248 lock, &new_lock, lockh);
4249 rep->lock_policy_res2 = mds_getattr_lock(req, offset, lockh,
4251 /* FIXME: LDLM can set req->rq_status. MDS sets
4252 policy_res{1,2} with disposition and status.
4253 - replay: returns 0 & req->status is old status
4254 - otherwise: returns req->status */
4255 if (intent_disposition(rep, DISP_LOOKUP_NEG))
4256 rep->lock_policy_res2 = 0;
4257 if (!intent_disposition(rep, DISP_LOOKUP_POS) ||
4258 rep->lock_policy_res2)
4259 RETURN(ELDLM_LOCK_ABORTED);
4260 if (req->rq_status != 0) {
4262 rep->lock_policy_res2 = req->rq_status;
4263 RETURN(ELDLM_LOCK_ABORTED);
4267 rc = mds_lock_and_check_slave(offset, req, lockh);
4268 if ((rep->lock_policy_res2 = rc)) {
4270 rep->lock_policy_res2 = 0;
4271 RETURN(ELDLM_LOCK_ABORTED);
4275 CERROR("Unhandled intent "LPD64"\n", it->opc);
4279 /* By this point, whatever function we called above must have either
4280 * filled in 'lockh', been an intent replay, or returned an error. We
4281 * want to allow replayed RPCs to not get a lock, since we would just
4282 * drop it below anyways because lock replay is done separately by the
4283 * client afterwards. For regular RPCs we want to give the new lock to
4284 * the client instead of whatever lock it was about to get. */
4285 if (new_lock == NULL)
4286 new_lock = ldlm_handle2lock(&lockh[0]);
4287 if (new_lock == NULL && (flags & LDLM_FL_INTENT_ONLY))
4290 LASSERTF(new_lock != NULL, "op "LPX64" lockh "LPX64"\n",
4291 it->opc, lockh[0].cookie);
4293 /* If we've already given this lock to a client once, then we should
4294 * have no readers or writers. Otherwise, we should have one reader
4295 * _or_ writer ref (which will be zeroed below) before returning the
4296 * lock to a client. */
4297 if (new_lock->l_export == req->rq_export) {
4298 LASSERT(new_lock->l_readers + new_lock->l_writers == 0);
4300 LASSERT(new_lock->l_export == NULL);
4301 LASSERT(new_lock->l_readers + new_lock->l_writers == 1);
4306 if (new_lock->l_export == req->rq_export) {
4307 /* Already gave this to the client, which means that we
4308 * reconstructed a reply. */
4309 LASSERT(lustre_msg_get_flags(req->rq_reqmsg) &
4311 RETURN(ELDLM_LOCK_REPLACED);
4314 /* Fixup the lock to be given to the client */
4315 lock_res_and_lock(new_lock);
4316 new_lock->l_readers = 0;
4317 new_lock->l_writers = 0;
4319 new_lock->l_export = class_export_get(req->rq_export);
4321 spin_lock(&new_lock->l_export->exp_ldlm_data.led_lock);
4322 list_add(&new_lock->l_export_chain,
4323 &new_lock->l_export->exp_ldlm_data.led_held_locks);
4324 spin_unlock(&new_lock->l_export->exp_ldlm_data.led_lock);
4326 new_lock->l_blocking_ast = lock->l_blocking_ast;
4327 new_lock->l_completion_ast = lock->l_completion_ast;
4329 memcpy(&new_lock->l_remote_handle, &lock->l_remote_handle,
4330 sizeof(lock->l_remote_handle));
4332 new_lock->l_flags &= ~LDLM_FL_LOCAL;
4334 unlock_res_and_lock(new_lock);
4335 LDLM_LOCK_PUT(new_lock);
4337 RETURN(ELDLM_LOCK_REPLACED);
4340 int mds_attach(struct obd_device *dev, obd_count len, void *data)
4342 struct lprocfs_static_vars lvars;
4344 struct mds_obd *mds = &dev->u.mds;
4346 spin_lock_init(&mds->mds_denylist_lock);
4347 INIT_LIST_HEAD(&mds->mds_denylist);
4349 lprocfs_init_multi_vars(0, &lvars);
4351 rc = lprocfs_obd_attach(dev, lvars.obd_vars);
4355 return lprocfs_alloc_md_stats(dev, 0);
4358 int mds_detach(struct obd_device *dev)
4360 lprocfs_free_md_stats(dev);
4361 return lprocfs_obd_detach(dev);
4364 int mdt_attach(struct obd_device *dev, obd_count len, void *data)
4366 struct lprocfs_static_vars lvars;
4368 lprocfs_init_multi_vars(1, &lvars);
4369 return lprocfs_obd_attach(dev, lvars.obd_vars);
4372 int mdt_detach(struct obd_device *dev)
4374 return lprocfs_obd_detach(dev);
4377 static int mdt_setup(struct obd_device *obd, obd_count len, void *buf)
4379 struct mds_obd *mds = &obd->u.mds;
4384 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
4385 MDS_REQUEST_PORTAL, MDC_REPLY_PORTAL,
4386 MDS_SERVICE_WATCHDOG_TIMEOUT,
4387 mds_handle, "mds", obd->obd_proc_entry);
4389 if (!mds->mds_service) {
4390 CERROR("failed to start service\n");
4394 rc = ptlrpc_start_n_threads(obd, mds->mds_service, MDT_NUM_THREADS,
4397 GOTO(err_thread, rc);
4399 mds->mds_setattr_service =
4400 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
4401 MDS_SETATTR_PORTAL, MDC_REPLY_PORTAL,
4402 MDS_SERVICE_WATCHDOG_TIMEOUT,
4403 mds_handle, "mds_setattr",
4404 obd->obd_proc_entry);
4405 if (!mds->mds_setattr_service) {
4406 CERROR("failed to start getattr service\n");
4407 GOTO(err_thread, rc = -ENOMEM);
4410 rc = ptlrpc_start_n_threads(obd, mds->mds_setattr_service,
4411 MDT_NUM_THREADS, "ll_mdt_attr");
4413 GOTO(err_thread2, rc);
4415 mds->mds_readpage_service =
4416 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
4417 MDS_READPAGE_PORTAL, MDC_REPLY_PORTAL,
4418 MDS_SERVICE_WATCHDOG_TIMEOUT,
4419 mds_handle, "mds_readpage",
4420 obd->obd_proc_entry);
4421 if (!mds->mds_readpage_service) {
4422 CERROR("failed to start readpage service\n");
4423 GOTO(err_thread2, rc = -ENOMEM);
4426 rc = ptlrpc_start_n_threads(obd, mds->mds_readpage_service,
4427 MDT_NUM_THREADS, "ll_mdt_rdpg");
4430 GOTO(err_thread3, rc);
4432 mds->mds_close_service =
4433 ptlrpc_init_svc(MDS_NBUFS, MDS_BUFSIZE, MDS_MAXREQSIZE,
4434 MDS_CLOSE_PORTAL, MDC_REPLY_PORTAL,
4435 MDS_SERVICE_WATCHDOG_TIMEOUT,
4436 mds_handle, "mds_close",
4437 obd->obd_proc_entry);
4438 if (!mds->mds_close_service) {
4439 CERROR("failed to start close service\n");
4440 GOTO(err_thread3, rc = -ENOMEM);
4443 rc = ptlrpc_start_n_threads(obd, mds->mds_close_service,
4444 MDT_NUM_THREADS, "ll_mdt_clos");
4447 GOTO(err_thread4, rc);
4451 ptlrpc_unregister_service(mds->mds_close_service);
4453 ptlrpc_unregister_service(mds->mds_readpage_service);
4455 ptlrpc_unregister_service(mds->mds_setattr_service);
4457 ptlrpc_unregister_service(mds->mds_service);
4461 static int mdt_cleanup(struct obd_device *obd, int flags)
4463 struct mds_obd *mds = &obd->u.mds;
4466 ptlrpc_stop_all_threads(mds->mds_close_service);
4467 ptlrpc_unregister_service(mds->mds_close_service);
4469 ptlrpc_stop_all_threads(mds->mds_readpage_service);
4470 ptlrpc_unregister_service(mds->mds_readpage_service);
4472 ptlrpc_stop_all_threads(mds->mds_setattr_service);
4473 ptlrpc_unregister_service(mds->mds_setattr_service);
4475 ptlrpc_stop_all_threads(mds->mds_service);
4476 ptlrpc_unregister_service(mds->mds_service);
4481 static struct dentry *mds_lvfs_id2dentry(__u64 ino, __u32 gen,
4482 __u64 gr, void *data)
4484 struct lustre_id id;
4485 struct obd_device *obd = data;
4489 return mds_id2dentry(obd, &id, NULL);
4492 static int mds_get_info(struct obd_export *exp, __u32 keylen,
4493 void *key, __u32 *valsize, void *val)
4495 struct obd_device *obd;
4496 struct mds_obd *mds;
4500 obd = class_exp2obd(exp);
4504 CDEBUG(D_IOCTL, "invalid client cookie "LPX64"\n",
4505 exp->exp_handle.h_cookie);
4509 if (keylen >= strlen("reint_log") && memcmp(key, "reint_log", 9) == 0) {
4510 /* get log_context handle. */
4511 struct llog_ctxt *ctxt;
4512 unsigned long *llh_handle = val;
4513 *valsize = sizeof(unsigned long);
4514 ctxt = llog_get_context(&obd->obd_llogs, LLOG_REINT_ORIG_CTXT);
4516 CERROR("Cannot get REINT llog context\n");
4519 *llh_handle = (unsigned long)ctxt;
4522 if (keylen >= strlen("cache_sb") && memcmp(key, "cache_sb", 8) == 0) {
4523 /* get log_context handle. */
4524 unsigned long *sb = val;
4525 *valsize = sizeof(unsigned long);
4526 *sb = (unsigned long)obd->u.mds.mds_sb;
4530 if (keylen >= strlen("mdsize") && memcmp(key, "mdsize", keylen) == 0) {
4531 __u32 *mdsize = val;
4532 *valsize = sizeof(*mdsize);
4533 *mdsize = mds->mds_max_mdsize;
4537 if (keylen >= strlen("mdsnum") && strcmp(key, "mdsnum") == 0) {
4538 __u32 *mdsnum = val;
4539 *valsize = sizeof(*mdsnum);
4540 *mdsnum = mds->mds_num;
4544 if (keylen >= strlen("rootid") && strcmp(key, "rootid") == 0) {
4545 struct lustre_id *rootid = val;
4546 *valsize = sizeof(*rootid);
4547 *rootid = mds->mds_rootid;
4551 if (keylen >= strlen("lovdesc") && strcmp(key, "lovdesc") == 0) {
4552 struct lov_desc *desc = val;
4553 *valsize = sizeof(*desc);
4554 *desc = mds->mds_dt_desc;
4558 if (keylen >= strlen("getext") && strcmp(key, "getext") == 0) {
4559 struct fid_extent *ext = val;
4560 *valsize = sizeof(*ext);
4562 spin_lock(&mds->mds_fidext_lock);
4563 ext->fe_width = MDS_FIDEXT_SIZE;
4564 ext->fe_start = mds->mds_fidext_thumb + 1;
4565 mds->mds_fidext_thumb += MDS_FIDEXT_SIZE;
4566 spin_unlock(&mds->mds_fidext_lock);
4571 rc = fsfilt_get_info(obd, mds->mds_sb, NULL, keylen, key, valsize, val);
4573 CDEBUG(D_IOCTL, "invalid key\n");
4578 struct lvfs_callback_ops mds_lvfs_ops = {
4579 l_id2dentry: mds_lvfs_id2dentry,
4582 int mds_preprw(int cmd, struct obd_export *exp, struct obdo *oa,
4583 int objcount, struct obd_ioobj *obj,
4584 int niocount, struct niobuf_remote *nb,
4585 struct niobuf_local *res,
4586 struct obd_trans_info *oti, struct lustre_capa *capa);
4588 int mds_commitrw(int cmd, struct obd_export *exp, struct obdo *oa,
4589 int objcount, struct obd_ioobj *obj, int niocount,
4590 struct niobuf_local *res, struct obd_trans_info *oti,
4593 /* use obd ops to offer management infrastructure */
4594 static struct obd_ops mds_obd_ops = {
4595 .o_owner = THIS_MODULE,
4596 .o_attach = mds_attach,
4597 .o_detach = mds_detach,
4598 .o_connect = mds_connect,
4599 .o_connect_post = mds_connect_post,
4600 .o_init_export = mds_init_export,
4601 .o_destroy_export = mds_destroy_export,
4602 .o_disconnect = mds_disconnect,
4603 .o_setup = mds_setup,
4604 .o_precleanup = mds_precleanup,
4605 .o_cleanup = mds_cleanup,
4606 .o_process_config = mds_process_config,
4607 .o_postrecov = mds_postrecov,
4608 .o_statfs = mds_obd_statfs,
4609 .o_iocontrol = mds_iocontrol,
4610 .o_create = mds_obd_create,
4611 .o_destroy = mds_obd_destroy,
4612 .o_llog_init = mds_llog_init,
4613 .o_llog_finish = mds_llog_finish,
4614 .o_notify = mds_notify,
4615 .o_get_info = mds_get_info,
4616 .o_set_info = mds_set_info,
4617 .o_preprw = mds_preprw,
4618 .o_commitrw = mds_commitrw,
4621 static struct obd_ops mdt_obd_ops = {
4622 .o_owner = THIS_MODULE,
4623 .o_attach = mdt_attach,
4624 .o_detach = mdt_detach,
4625 .o_setup = mdt_setup,
4626 .o_cleanup = mdt_cleanup,
4629 static int __init mds_init(void)
4631 struct lprocfs_static_vars lvars;
4635 mds_init_lsd_cache();
4636 mds_init_rmtacl_upcall_cache();
4638 lprocfs_init_multi_vars(0, &lvars);
4639 class_register_type(&mds_obd_ops, NULL, lvars.module_vars,
4640 OBD_MDS_DEVICENAME);
4641 lprocfs_init_multi_vars(1, &lvars);
4642 class_register_type(&mdt_obd_ops, NULL, lvars.module_vars,
4643 OBD_MDT_DEVICENAME);
4645 rc = mds_capa_key_start_thread();
4647 class_unregister_type(OBD_MDT_DEVICENAME);
4648 class_unregister_type(OBD_MDS_DEVICENAME);
4649 mds_cleanup_lsd_cache();
4652 mds_eck_timer.function = mds_capa_key_timer_callback;
4653 mds_eck_timer.data = 0;
4654 init_timer(&mds_eck_timer);
4659 static void /*__exit*/ mds_exit(void)
4661 mds_capa_key_stop_thread();
4662 mds_cleanup_rmtacl_upcall_cache();
4663 mds_cleanup_lsd_cache();
4665 class_unregister_type(OBD_MDS_DEVICENAME);
4666 class_unregister_type(OBD_MDT_DEVICENAME);
4669 MODULE_AUTHOR("Cluster File Systems, Inc. <info@clusterfs.com>");
4670 MODULE_DESCRIPTION("Lustre Metadata Server (MDS)");
4671 MODULE_LICENSE("GPL");
4673 module_init(mds_init);
4674 module_exit(mds_exit);